Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/team/team.c - Network team device driver
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 */
6
7#include <linux/ethtool.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/rcupdate.h>
14#include <linux/errno.h>
15#include <linux/ctype.h>
16#include <linux/notifier.h>
17#include <linux/netdevice.h>
18#include <linux/netpoll.h>
19#include <linux/if_vlan.h>
20#include <linux/if_arp.h>
21#include <linux/socket.h>
22#include <linux/etherdevice.h>
23#include <linux/rtnetlink.h>
24#include <net/rtnetlink.h>
25#include <net/genetlink.h>
26#include <net/netlink.h>
27#include <net/sch_generic.h>
28#include <generated/utsrelease.h>
29#include <linux/if_team.h>
30
31#define DRV_NAME "team"
32
33
34/**********
35 * Helpers
36 **********/
37
38static struct team_port *team_port_get_rtnl(const struct net_device *dev)
39{
40 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
41
42 return netif_is_team_port(dev) ? port : NULL;
43}
44
45/*
46 * Since the ability to change device address for open port device is tested in
47 * team_port_add, this function can be called without control of return value
48 */
49static int __set_port_dev_addr(struct net_device *port_dev,
50 const unsigned char *dev_addr)
51{
52 struct sockaddr_storage addr;
53
54 memcpy(addr.__data, dev_addr, port_dev->addr_len);
55 addr.ss_family = port_dev->type;
56 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
57}
58
59static int team_port_set_orig_dev_addr(struct team_port *port)
60{
61 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
62}
63
64static int team_port_set_team_dev_addr(struct team *team,
65 struct team_port *port)
66{
67 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
68}
69
70int team_modeop_port_enter(struct team *team, struct team_port *port)
71{
72 return team_port_set_team_dev_addr(team, port);
73}
74EXPORT_SYMBOL(team_modeop_port_enter);
75
76void team_modeop_port_change_dev_addr(struct team *team,
77 struct team_port *port)
78{
79 team_port_set_team_dev_addr(team, port);
80}
81EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
82
83static void team_lower_state_changed(struct team_port *port)
84{
85 struct netdev_lag_lower_state_info info;
86
87 info.link_up = port->linkup;
88 info.tx_enabled = team_port_enabled(port);
89 netdev_lower_state_changed(port->dev, &info);
90}
91
92static void team_refresh_port_linkup(struct team_port *port)
93{
94 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
95 port->state.linkup;
96
97 if (port->linkup != new_linkup) {
98 port->linkup = new_linkup;
99 team_lower_state_changed(port);
100 }
101}
102
103
104/*******************
105 * Options handling
106 *******************/
107
108struct team_option_inst { /* One for each option instance */
109 struct list_head list;
110 struct list_head tmp_list;
111 struct team_option *option;
112 struct team_option_inst_info info;
113 bool changed;
114 bool removed;
115};
116
117static struct team_option *__team_find_option(struct team *team,
118 const char *opt_name)
119{
120 struct team_option *option;
121
122 list_for_each_entry(option, &team->option_list, list) {
123 if (strcmp(option->name, opt_name) == 0)
124 return option;
125 }
126 return NULL;
127}
128
129static void __team_option_inst_del(struct team_option_inst *opt_inst)
130{
131 list_del(&opt_inst->list);
132 kfree(opt_inst);
133}
134
135static void __team_option_inst_del_option(struct team *team,
136 struct team_option *option)
137{
138 struct team_option_inst *opt_inst, *tmp;
139
140 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
141 if (opt_inst->option == option)
142 __team_option_inst_del(opt_inst);
143 }
144}
145
146static int __team_option_inst_add(struct team *team, struct team_option *option,
147 struct team_port *port)
148{
149 struct team_option_inst *opt_inst;
150 unsigned int array_size;
151 unsigned int i;
152
153 array_size = option->array_size;
154 if (!array_size)
155 array_size = 1; /* No array but still need one instance */
156
157 for (i = 0; i < array_size; i++) {
158 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
159 if (!opt_inst)
160 return -ENOMEM;
161 opt_inst->option = option;
162 opt_inst->info.port = port;
163 opt_inst->info.array_index = i;
164 opt_inst->changed = true;
165 opt_inst->removed = false;
166 list_add_tail(&opt_inst->list, &team->option_inst_list);
167 if (option->init)
168 option->init(team, &opt_inst->info);
169
170 }
171 return 0;
172}
173
174static int __team_option_inst_add_option(struct team *team,
175 struct team_option *option)
176{
177 int err;
178
179 if (!option->per_port) {
180 err = __team_option_inst_add(team, option, NULL);
181 if (err)
182 goto inst_del_option;
183 }
184 return 0;
185
186inst_del_option:
187 __team_option_inst_del_option(team, option);
188 return err;
189}
190
191static void __team_option_inst_mark_removed_option(struct team *team,
192 struct team_option *option)
193{
194 struct team_option_inst *opt_inst;
195
196 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
197 if (opt_inst->option == option) {
198 opt_inst->changed = true;
199 opt_inst->removed = true;
200 }
201 }
202}
203
204static void __team_option_inst_del_port(struct team *team,
205 struct team_port *port)
206{
207 struct team_option_inst *opt_inst, *tmp;
208
209 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
210 if (opt_inst->option->per_port &&
211 opt_inst->info.port == port)
212 __team_option_inst_del(opt_inst);
213 }
214}
215
216static int __team_option_inst_add_port(struct team *team,
217 struct team_port *port)
218{
219 struct team_option *option;
220 int err;
221
222 list_for_each_entry(option, &team->option_list, list) {
223 if (!option->per_port)
224 continue;
225 err = __team_option_inst_add(team, option, port);
226 if (err)
227 goto inst_del_port;
228 }
229 return 0;
230
231inst_del_port:
232 __team_option_inst_del_port(team, port);
233 return err;
234}
235
236static void __team_option_inst_mark_removed_port(struct team *team,
237 struct team_port *port)
238{
239 struct team_option_inst *opt_inst;
240
241 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
242 if (opt_inst->info.port == port) {
243 opt_inst->changed = true;
244 opt_inst->removed = true;
245 }
246 }
247}
248
249static int __team_options_register(struct team *team,
250 const struct team_option *option,
251 size_t option_count)
252{
253 int i;
254 struct team_option **dst_opts;
255 int err;
256
257 dst_opts = kcalloc(option_count, sizeof(struct team_option *),
258 GFP_KERNEL);
259 if (!dst_opts)
260 return -ENOMEM;
261 for (i = 0; i < option_count; i++, option++) {
262 if (__team_find_option(team, option->name)) {
263 err = -EEXIST;
264 goto alloc_rollback;
265 }
266 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
267 if (!dst_opts[i]) {
268 err = -ENOMEM;
269 goto alloc_rollback;
270 }
271 }
272
273 for (i = 0; i < option_count; i++) {
274 err = __team_option_inst_add_option(team, dst_opts[i]);
275 if (err)
276 goto inst_rollback;
277 list_add_tail(&dst_opts[i]->list, &team->option_list);
278 }
279
280 kfree(dst_opts);
281 return 0;
282
283inst_rollback:
284 for (i--; i >= 0; i--) {
285 __team_option_inst_del_option(team, dst_opts[i]);
286 list_del(&dst_opts[i]->list);
287 }
288
289 i = option_count;
290alloc_rollback:
291 for (i--; i >= 0; i--)
292 kfree(dst_opts[i]);
293
294 kfree(dst_opts);
295 return err;
296}
297
298static void __team_options_mark_removed(struct team *team,
299 const struct team_option *option,
300 size_t option_count)
301{
302 int i;
303
304 for (i = 0; i < option_count; i++, option++) {
305 struct team_option *del_opt;
306
307 del_opt = __team_find_option(team, option->name);
308 if (del_opt)
309 __team_option_inst_mark_removed_option(team, del_opt);
310 }
311}
312
313static void __team_options_unregister(struct team *team,
314 const struct team_option *option,
315 size_t option_count)
316{
317 int i;
318
319 for (i = 0; i < option_count; i++, option++) {
320 struct team_option *del_opt;
321
322 del_opt = __team_find_option(team, option->name);
323 if (del_opt) {
324 __team_option_inst_del_option(team, del_opt);
325 list_del(&del_opt->list);
326 kfree(del_opt);
327 }
328 }
329}
330
331static void __team_options_change_check(struct team *team);
332
333int team_options_register(struct team *team,
334 const struct team_option *option,
335 size_t option_count)
336{
337 int err;
338
339 err = __team_options_register(team, option, option_count);
340 if (err)
341 return err;
342 __team_options_change_check(team);
343 return 0;
344}
345EXPORT_SYMBOL(team_options_register);
346
347void team_options_unregister(struct team *team,
348 const struct team_option *option,
349 size_t option_count)
350{
351 __team_options_mark_removed(team, option, option_count);
352 __team_options_change_check(team);
353 __team_options_unregister(team, option, option_count);
354}
355EXPORT_SYMBOL(team_options_unregister);
356
357static int team_option_get(struct team *team,
358 struct team_option_inst *opt_inst,
359 struct team_gsetter_ctx *ctx)
360{
361 if (!opt_inst->option->getter)
362 return -EOPNOTSUPP;
363
364 opt_inst->option->getter(team, ctx);
365 return 0;
366}
367
368static int team_option_set(struct team *team,
369 struct team_option_inst *opt_inst,
370 struct team_gsetter_ctx *ctx)
371{
372 if (!opt_inst->option->setter)
373 return -EOPNOTSUPP;
374 return opt_inst->option->setter(team, ctx);
375}
376
377void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
378{
379 struct team_option_inst *opt_inst;
380
381 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
382 opt_inst->changed = true;
383}
384EXPORT_SYMBOL(team_option_inst_set_change);
385
386void team_options_change_check(struct team *team)
387{
388 __team_options_change_check(team);
389}
390EXPORT_SYMBOL(team_options_change_check);
391
392
393/****************
394 * Mode handling
395 ****************/
396
397static LIST_HEAD(mode_list);
398static DEFINE_SPINLOCK(mode_list_lock);
399
400struct team_mode_item {
401 struct list_head list;
402 const struct team_mode *mode;
403};
404
405static struct team_mode_item *__find_mode(const char *kind)
406{
407 struct team_mode_item *mitem;
408
409 list_for_each_entry(mitem, &mode_list, list) {
410 if (strcmp(mitem->mode->kind, kind) == 0)
411 return mitem;
412 }
413 return NULL;
414}
415
416static bool is_good_mode_name(const char *name)
417{
418 while (*name != '\0') {
419 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
420 return false;
421 name++;
422 }
423 return true;
424}
425
426int team_mode_register(const struct team_mode *mode)
427{
428 int err = 0;
429 struct team_mode_item *mitem;
430
431 if (!is_good_mode_name(mode->kind) ||
432 mode->priv_size > TEAM_MODE_PRIV_SIZE)
433 return -EINVAL;
434
435 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
436 if (!mitem)
437 return -ENOMEM;
438
439 spin_lock(&mode_list_lock);
440 if (__find_mode(mode->kind)) {
441 err = -EEXIST;
442 kfree(mitem);
443 goto unlock;
444 }
445 mitem->mode = mode;
446 list_add_tail(&mitem->list, &mode_list);
447unlock:
448 spin_unlock(&mode_list_lock);
449 return err;
450}
451EXPORT_SYMBOL(team_mode_register);
452
453void team_mode_unregister(const struct team_mode *mode)
454{
455 struct team_mode_item *mitem;
456
457 spin_lock(&mode_list_lock);
458 mitem = __find_mode(mode->kind);
459 if (mitem) {
460 list_del_init(&mitem->list);
461 kfree(mitem);
462 }
463 spin_unlock(&mode_list_lock);
464}
465EXPORT_SYMBOL(team_mode_unregister);
466
467static const struct team_mode *team_mode_get(const char *kind)
468{
469 struct team_mode_item *mitem;
470 const struct team_mode *mode = NULL;
471
472 if (!try_module_get(THIS_MODULE))
473 return NULL;
474
475 spin_lock(&mode_list_lock);
476 mitem = __find_mode(kind);
477 if (!mitem) {
478 spin_unlock(&mode_list_lock);
479 request_module("team-mode-%s", kind);
480 spin_lock(&mode_list_lock);
481 mitem = __find_mode(kind);
482 }
483 if (mitem) {
484 mode = mitem->mode;
485 if (!try_module_get(mode->owner))
486 mode = NULL;
487 }
488
489 spin_unlock(&mode_list_lock);
490 module_put(THIS_MODULE);
491 return mode;
492}
493
494static void team_mode_put(const struct team_mode *mode)
495{
496 module_put(mode->owner);
497}
498
499static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
500{
501 dev_kfree_skb_any(skb);
502 return false;
503}
504
505static rx_handler_result_t team_dummy_receive(struct team *team,
506 struct team_port *port,
507 struct sk_buff *skb)
508{
509 return RX_HANDLER_ANOTHER;
510}
511
512static const struct team_mode __team_no_mode = {
513 .kind = "*NOMODE*",
514};
515
516static bool team_is_mode_set(struct team *team)
517{
518 return team->mode != &__team_no_mode;
519}
520
521static void team_set_no_mode(struct team *team)
522{
523 team->user_carrier_enabled = false;
524 team->mode = &__team_no_mode;
525}
526
527static void team_adjust_ops(struct team *team)
528{
529 /*
530 * To avoid checks in rx/tx skb paths, ensure here that non-null and
531 * correct ops are always set.
532 */
533
534 if (!team->en_port_count || !team_is_mode_set(team) ||
535 !team->mode->ops->transmit)
536 team->ops.transmit = team_dummy_transmit;
537 else
538 team->ops.transmit = team->mode->ops->transmit;
539
540 if (!team->en_port_count || !team_is_mode_set(team) ||
541 !team->mode->ops->receive)
542 team->ops.receive = team_dummy_receive;
543 else
544 team->ops.receive = team->mode->ops->receive;
545}
546
547/*
548 * We can benefit from the fact that it's ensured no port is present
549 * at the time of mode change. Therefore no packets are in fly so there's no
550 * need to set mode operations in any special way.
551 */
552static int __team_change_mode(struct team *team,
553 const struct team_mode *new_mode)
554{
555 /* Check if mode was previously set and do cleanup if so */
556 if (team_is_mode_set(team)) {
557 void (*exit_op)(struct team *team) = team->ops.exit;
558
559 /* Clear ops area so no callback is called any longer */
560 memset(&team->ops, 0, sizeof(struct team_mode_ops));
561 team_adjust_ops(team);
562
563 if (exit_op)
564 exit_op(team);
565 team_mode_put(team->mode);
566 team_set_no_mode(team);
567 /* zero private data area */
568 memset(&team->mode_priv, 0,
569 sizeof(struct team) - offsetof(struct team, mode_priv));
570 }
571
572 if (!new_mode)
573 return 0;
574
575 if (new_mode->ops->init) {
576 int err;
577
578 err = new_mode->ops->init(team);
579 if (err)
580 return err;
581 }
582
583 team->mode = new_mode;
584 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
585 team_adjust_ops(team);
586
587 return 0;
588}
589
590static int team_change_mode(struct team *team, const char *kind)
591{
592 const struct team_mode *new_mode;
593 struct net_device *dev = team->dev;
594 int err;
595
596 if (!list_empty(&team->port_list)) {
597 netdev_err(dev, "No ports can be present during mode change\n");
598 return -EBUSY;
599 }
600
601 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
602 netdev_err(dev, "Unable to change to the same mode the team is in\n");
603 return -EINVAL;
604 }
605
606 new_mode = team_mode_get(kind);
607 if (!new_mode) {
608 netdev_err(dev, "Mode \"%s\" not found\n", kind);
609 return -EINVAL;
610 }
611
612 err = __team_change_mode(team, new_mode);
613 if (err) {
614 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
615 team_mode_put(new_mode);
616 return err;
617 }
618
619 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
620 return 0;
621}
622
623
624/*********************
625 * Peers notification
626 *********************/
627
628static void team_notify_peers_work(struct work_struct *work)
629{
630 struct team *team;
631 int val;
632
633 team = container_of(work, struct team, notify_peers.dw.work);
634
635 if (!rtnl_trylock()) {
636 schedule_delayed_work(&team->notify_peers.dw, 0);
637 return;
638 }
639 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
640 if (val < 0) {
641 rtnl_unlock();
642 return;
643 }
644 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
645 rtnl_unlock();
646 if (val)
647 schedule_delayed_work(&team->notify_peers.dw,
648 msecs_to_jiffies(team->notify_peers.interval));
649}
650
651static void team_notify_peers(struct team *team)
652{
653 if (!team->notify_peers.count || !netif_running(team->dev))
654 return;
655 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
656 schedule_delayed_work(&team->notify_peers.dw, 0);
657}
658
659static void team_notify_peers_init(struct team *team)
660{
661 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
662}
663
664static void team_notify_peers_fini(struct team *team)
665{
666 cancel_delayed_work_sync(&team->notify_peers.dw);
667}
668
669
670/*******************************
671 * Send multicast group rejoins
672 *******************************/
673
674static void team_mcast_rejoin_work(struct work_struct *work)
675{
676 struct team *team;
677 int val;
678
679 team = container_of(work, struct team, mcast_rejoin.dw.work);
680
681 if (!rtnl_trylock()) {
682 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
683 return;
684 }
685 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
686 if (val < 0) {
687 rtnl_unlock();
688 return;
689 }
690 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
691 rtnl_unlock();
692 if (val)
693 schedule_delayed_work(&team->mcast_rejoin.dw,
694 msecs_to_jiffies(team->mcast_rejoin.interval));
695}
696
697static void team_mcast_rejoin(struct team *team)
698{
699 if (!team->mcast_rejoin.count || !netif_running(team->dev))
700 return;
701 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
702 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
703}
704
705static void team_mcast_rejoin_init(struct team *team)
706{
707 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
708}
709
710static void team_mcast_rejoin_fini(struct team *team)
711{
712 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
713}
714
715
716/************************
717 * Rx path frame handler
718 ************************/
719
720/* note: already called with rcu_read_lock */
721static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
722{
723 struct sk_buff *skb = *pskb;
724 struct team_port *port;
725 struct team *team;
726 rx_handler_result_t res;
727
728 skb = skb_share_check(skb, GFP_ATOMIC);
729 if (!skb)
730 return RX_HANDLER_CONSUMED;
731
732 *pskb = skb;
733
734 port = team_port_get_rcu(skb->dev);
735 team = port->team;
736 if (!team_port_enabled(port)) {
737 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
738 /* link-local packets are mostly useful when stack receives them
739 * with the link they arrive on.
740 */
741 return RX_HANDLER_PASS;
742 /* allow exact match delivery for disabled ports */
743 res = RX_HANDLER_EXACT;
744 } else {
745 res = team->ops.receive(team, port, skb);
746 }
747 if (res == RX_HANDLER_ANOTHER) {
748 struct team_pcpu_stats *pcpu_stats;
749
750 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
751 u64_stats_update_begin(&pcpu_stats->syncp);
752 u64_stats_inc(&pcpu_stats->rx_packets);
753 u64_stats_add(&pcpu_stats->rx_bytes, skb->len);
754 if (skb->pkt_type == PACKET_MULTICAST)
755 u64_stats_inc(&pcpu_stats->rx_multicast);
756 u64_stats_update_end(&pcpu_stats->syncp);
757
758 skb->dev = team->dev;
759 } else if (res == RX_HANDLER_EXACT) {
760 this_cpu_inc(team->pcpu_stats->rx_nohandler);
761 } else {
762 this_cpu_inc(team->pcpu_stats->rx_dropped);
763 }
764
765 return res;
766}
767
768
769/*************************************
770 * Multiqueue Tx port select override
771 *************************************/
772
773static int team_queue_override_init(struct team *team)
774{
775 struct list_head *listarr;
776 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
777 unsigned int i;
778
779 if (!queue_cnt)
780 return 0;
781 listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
782 GFP_KERNEL);
783 if (!listarr)
784 return -ENOMEM;
785 team->qom_lists = listarr;
786 for (i = 0; i < queue_cnt; i++)
787 INIT_LIST_HEAD(listarr++);
788 return 0;
789}
790
791static void team_queue_override_fini(struct team *team)
792{
793 kfree(team->qom_lists);
794}
795
796static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
797{
798 return &team->qom_lists[queue_id - 1];
799}
800
801/*
802 * note: already called with rcu_read_lock
803 */
804static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
805{
806 struct list_head *qom_list;
807 struct team_port *port;
808
809 if (!team->queue_override_enabled || !skb->queue_mapping)
810 return false;
811 qom_list = __team_get_qom_list(team, skb->queue_mapping);
812 list_for_each_entry_rcu(port, qom_list, qom_list) {
813 if (!team_dev_queue_xmit(team, port, skb))
814 return true;
815 }
816 return false;
817}
818
819static void __team_queue_override_port_del(struct team *team,
820 struct team_port *port)
821{
822 if (!port->queue_id)
823 return;
824 list_del_rcu(&port->qom_list);
825}
826
827static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
828 struct team_port *cur)
829{
830 if (port->priority < cur->priority)
831 return true;
832 if (port->priority > cur->priority)
833 return false;
834 if (port->index < cur->index)
835 return true;
836 return false;
837}
838
839static void __team_queue_override_port_add(struct team *team,
840 struct team_port *port)
841{
842 struct team_port *cur;
843 struct list_head *qom_list;
844 struct list_head *node;
845
846 if (!port->queue_id)
847 return;
848 qom_list = __team_get_qom_list(team, port->queue_id);
849 node = qom_list;
850 list_for_each_entry(cur, qom_list, qom_list) {
851 if (team_queue_override_port_has_gt_prio_than(port, cur))
852 break;
853 node = &cur->qom_list;
854 }
855 list_add_tail_rcu(&port->qom_list, node);
856}
857
858static void __team_queue_override_enabled_check(struct team *team)
859{
860 struct team_port *port;
861 bool enabled = false;
862
863 list_for_each_entry(port, &team->port_list, list) {
864 if (port->queue_id) {
865 enabled = true;
866 break;
867 }
868 }
869 if (enabled == team->queue_override_enabled)
870 return;
871 netdev_dbg(team->dev, "%s queue override\n",
872 enabled ? "Enabling" : "Disabling");
873 team->queue_override_enabled = enabled;
874}
875
876static void team_queue_override_port_prio_changed(struct team *team,
877 struct team_port *port)
878{
879 if (!port->queue_id || team_port_enabled(port))
880 return;
881 __team_queue_override_port_del(team, port);
882 __team_queue_override_port_add(team, port);
883 __team_queue_override_enabled_check(team);
884}
885
886static void team_queue_override_port_change_queue_id(struct team *team,
887 struct team_port *port,
888 u16 new_queue_id)
889{
890 if (team_port_enabled(port)) {
891 __team_queue_override_port_del(team, port);
892 port->queue_id = new_queue_id;
893 __team_queue_override_port_add(team, port);
894 __team_queue_override_enabled_check(team);
895 } else {
896 port->queue_id = new_queue_id;
897 }
898}
899
900static void team_queue_override_port_add(struct team *team,
901 struct team_port *port)
902{
903 __team_queue_override_port_add(team, port);
904 __team_queue_override_enabled_check(team);
905}
906
907static void team_queue_override_port_del(struct team *team,
908 struct team_port *port)
909{
910 __team_queue_override_port_del(team, port);
911 __team_queue_override_enabled_check(team);
912}
913
914
915/****************
916 * Port handling
917 ****************/
918
919static bool team_port_find(const struct team *team,
920 const struct team_port *port)
921{
922 struct team_port *cur;
923
924 list_for_each_entry(cur, &team->port_list, list)
925 if (cur == port)
926 return true;
927 return false;
928}
929
930/*
931 * Enable/disable port by adding to enabled port hashlist and setting
932 * port->index (Might be racy so reader could see incorrect ifindex when
933 * processing a flying packet, but that is not a problem). Write guarded
934 * by team->lock.
935 */
936static void team_port_enable(struct team *team,
937 struct team_port *port)
938{
939 if (team_port_enabled(port))
940 return;
941 port->index = team->en_port_count++;
942 hlist_add_head_rcu(&port->hlist,
943 team_port_index_hash(team, port->index));
944 team_adjust_ops(team);
945 team_queue_override_port_add(team, port);
946 if (team->ops.port_enabled)
947 team->ops.port_enabled(team, port);
948 team_notify_peers(team);
949 team_mcast_rejoin(team);
950 team_lower_state_changed(port);
951}
952
953static void __reconstruct_port_hlist(struct team *team, int rm_index)
954{
955 int i;
956 struct team_port *port;
957
958 for (i = rm_index + 1; i < team->en_port_count; i++) {
959 port = team_get_port_by_index(team, i);
960 hlist_del_rcu(&port->hlist);
961 port->index--;
962 hlist_add_head_rcu(&port->hlist,
963 team_port_index_hash(team, port->index));
964 }
965}
966
967static void team_port_disable(struct team *team,
968 struct team_port *port)
969{
970 if (!team_port_enabled(port))
971 return;
972 if (team->ops.port_disabled)
973 team->ops.port_disabled(team, port);
974 hlist_del_rcu(&port->hlist);
975 __reconstruct_port_hlist(team, port->index);
976 port->index = -1;
977 team->en_port_count--;
978 team_queue_override_port_del(team, port);
979 team_adjust_ops(team);
980 team_lower_state_changed(port);
981}
982
983#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
984 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
985 NETIF_F_HIGHDMA | NETIF_F_LRO)
986
987#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
988 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
989
990static void __team_compute_features(struct team *team)
991{
992 struct team_port *port;
993 netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
994 NETIF_F_ALL_FOR_ALL;
995 netdev_features_t enc_features = TEAM_ENC_FEATURES;
996 unsigned short max_hard_header_len = ETH_HLEN;
997 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
998 IFF_XMIT_DST_RELEASE_PERM;
999
1000 rcu_read_lock();
1001 list_for_each_entry_rcu(port, &team->port_list, list) {
1002 vlan_features = netdev_increment_features(vlan_features,
1003 port->dev->vlan_features,
1004 TEAM_VLAN_FEATURES);
1005 enc_features =
1006 netdev_increment_features(enc_features,
1007 port->dev->hw_enc_features,
1008 TEAM_ENC_FEATURES);
1009
1010
1011 dst_release_flag &= port->dev->priv_flags;
1012 if (port->dev->hard_header_len > max_hard_header_len)
1013 max_hard_header_len = port->dev->hard_header_len;
1014 }
1015 rcu_read_unlock();
1016
1017 team->dev->vlan_features = vlan_features;
1018 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1019 NETIF_F_HW_VLAN_CTAG_TX |
1020 NETIF_F_HW_VLAN_STAG_TX;
1021 team->dev->hard_header_len = max_hard_header_len;
1022
1023 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1024 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1025 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1026}
1027
1028static void team_compute_features(struct team *team)
1029{
1030 __team_compute_features(team);
1031 netdev_change_features(team->dev);
1032}
1033
1034static int team_port_enter(struct team *team, struct team_port *port)
1035{
1036 int err = 0;
1037
1038 dev_hold(team->dev);
1039 if (team->ops.port_enter) {
1040 err = team->ops.port_enter(team, port);
1041 if (err) {
1042 netdev_err(team->dev, "Device %s failed to enter team mode\n",
1043 port->dev->name);
1044 goto err_port_enter;
1045 }
1046 }
1047
1048 return 0;
1049
1050err_port_enter:
1051 dev_put(team->dev);
1052
1053 return err;
1054}
1055
1056static void team_port_leave(struct team *team, struct team_port *port)
1057{
1058 if (team->ops.port_leave)
1059 team->ops.port_leave(team, port);
1060 dev_put(team->dev);
1061}
1062
1063#ifdef CONFIG_NET_POLL_CONTROLLER
1064static int __team_port_enable_netpoll(struct team_port *port)
1065{
1066 struct netpoll *np;
1067 int err;
1068
1069 np = kzalloc(sizeof(*np), GFP_KERNEL);
1070 if (!np)
1071 return -ENOMEM;
1072
1073 err = __netpoll_setup(np, port->dev);
1074 if (err) {
1075 kfree(np);
1076 return err;
1077 }
1078 port->np = np;
1079 return err;
1080}
1081
1082static int team_port_enable_netpoll(struct team_port *port)
1083{
1084 if (!port->team->dev->npinfo)
1085 return 0;
1086
1087 return __team_port_enable_netpoll(port);
1088}
1089
1090static void team_port_disable_netpoll(struct team_port *port)
1091{
1092 struct netpoll *np = port->np;
1093
1094 if (!np)
1095 return;
1096 port->np = NULL;
1097
1098 __netpoll_free(np);
1099}
1100#else
1101static int team_port_enable_netpoll(struct team_port *port)
1102{
1103 return 0;
1104}
1105static void team_port_disable_netpoll(struct team_port *port)
1106{
1107}
1108#endif
1109
1110static int team_upper_dev_link(struct team *team, struct team_port *port,
1111 struct netlink_ext_ack *extack)
1112{
1113 struct netdev_lag_upper_info lag_upper_info;
1114 int err;
1115
1116 lag_upper_info.tx_type = team->mode->lag_tx_type;
1117 lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1118 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1119 &lag_upper_info, extack);
1120 if (err)
1121 return err;
1122 port->dev->priv_flags |= IFF_TEAM_PORT;
1123 return 0;
1124}
1125
1126static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1127{
1128 netdev_upper_dev_unlink(port->dev, team->dev);
1129 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1130}
1131
1132static void __team_port_change_port_added(struct team_port *port, bool linkup);
1133static int team_dev_type_check_change(struct net_device *dev,
1134 struct net_device *port_dev);
1135
1136static int team_port_add(struct team *team, struct net_device *port_dev,
1137 struct netlink_ext_ack *extack)
1138{
1139 struct net_device *dev = team->dev;
1140 struct team_port *port;
1141 char *portname = port_dev->name;
1142 int err;
1143
1144 if (port_dev->flags & IFF_LOOPBACK) {
1145 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1146 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1147 portname);
1148 return -EINVAL;
1149 }
1150
1151 if (netif_is_team_port(port_dev)) {
1152 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1153 netdev_err(dev, "Device %s is already a port "
1154 "of a team device\n", portname);
1155 return -EBUSY;
1156 }
1157
1158 if (dev == port_dev) {
1159 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1160 netdev_err(dev, "Cannot enslave team device to itself\n");
1161 return -EINVAL;
1162 }
1163
1164 if (netdev_has_upper_dev(dev, port_dev)) {
1165 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1166 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1167 portname);
1168 return -EBUSY;
1169 }
1170
1171 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1172 vlan_uses_dev(dev)) {
1173 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1174 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1175 portname);
1176 return -EPERM;
1177 }
1178
1179 err = team_dev_type_check_change(dev, port_dev);
1180 if (err)
1181 return err;
1182
1183 if (port_dev->flags & IFF_UP) {
1184 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1185 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1186 portname);
1187 return -EBUSY;
1188 }
1189
1190 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1191 GFP_KERNEL);
1192 if (!port)
1193 return -ENOMEM;
1194
1195 port->dev = port_dev;
1196 port->team = team;
1197 INIT_LIST_HEAD(&port->qom_list);
1198
1199 port->orig.mtu = port_dev->mtu;
1200 err = dev_set_mtu(port_dev, dev->mtu);
1201 if (err) {
1202 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1203 goto err_set_mtu;
1204 }
1205
1206 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1207
1208 err = team_port_enter(team, port);
1209 if (err) {
1210 netdev_err(dev, "Device %s failed to enter team mode\n",
1211 portname);
1212 goto err_port_enter;
1213 }
1214
1215 err = dev_open(port_dev, extack);
1216 if (err) {
1217 netdev_dbg(dev, "Device %s opening failed\n",
1218 portname);
1219 goto err_dev_open;
1220 }
1221
1222 err = vlan_vids_add_by_dev(port_dev, dev);
1223 if (err) {
1224 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1225 portname);
1226 goto err_vids_add;
1227 }
1228
1229 err = team_port_enable_netpoll(port);
1230 if (err) {
1231 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1232 portname);
1233 goto err_enable_netpoll;
1234 }
1235
1236 if (!(dev->features & NETIF_F_LRO))
1237 dev_disable_lro(port_dev);
1238
1239 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1240 port);
1241 if (err) {
1242 netdev_err(dev, "Device %s failed to register rx_handler\n",
1243 portname);
1244 goto err_handler_register;
1245 }
1246
1247 err = team_upper_dev_link(team, port, extack);
1248 if (err) {
1249 netdev_err(dev, "Device %s failed to set upper link\n",
1250 portname);
1251 goto err_set_upper_link;
1252 }
1253
1254 err = __team_option_inst_add_port(team, port);
1255 if (err) {
1256 netdev_err(dev, "Device %s failed to add per-port options\n",
1257 portname);
1258 goto err_option_port_add;
1259 }
1260
1261 /* set promiscuity level to new slave */
1262 if (dev->flags & IFF_PROMISC) {
1263 err = dev_set_promiscuity(port_dev, 1);
1264 if (err)
1265 goto err_set_slave_promisc;
1266 }
1267
1268 /* set allmulti level to new slave */
1269 if (dev->flags & IFF_ALLMULTI) {
1270 err = dev_set_allmulti(port_dev, 1);
1271 if (err) {
1272 if (dev->flags & IFF_PROMISC)
1273 dev_set_promiscuity(port_dev, -1);
1274 goto err_set_slave_promisc;
1275 }
1276 }
1277
1278 if (dev->flags & IFF_UP) {
1279 netif_addr_lock_bh(dev);
1280 dev_uc_sync_multiple(port_dev, dev);
1281 dev_mc_sync_multiple(port_dev, dev);
1282 netif_addr_unlock_bh(dev);
1283 }
1284
1285 port->index = -1;
1286 list_add_tail_rcu(&port->list, &team->port_list);
1287 team_port_enable(team, port);
1288 __team_compute_features(team);
1289 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1290 __team_options_change_check(team);
1291
1292 netdev_info(dev, "Port device %s added\n", portname);
1293
1294 return 0;
1295
1296err_set_slave_promisc:
1297 __team_option_inst_del_port(team, port);
1298
1299err_option_port_add:
1300 team_upper_dev_unlink(team, port);
1301
1302err_set_upper_link:
1303 netdev_rx_handler_unregister(port_dev);
1304
1305err_handler_register:
1306 team_port_disable_netpoll(port);
1307
1308err_enable_netpoll:
1309 vlan_vids_del_by_dev(port_dev, dev);
1310
1311err_vids_add:
1312 dev_close(port_dev);
1313
1314err_dev_open:
1315 team_port_leave(team, port);
1316 team_port_set_orig_dev_addr(port);
1317
1318err_port_enter:
1319 dev_set_mtu(port_dev, port->orig.mtu);
1320
1321err_set_mtu:
1322 kfree(port);
1323
1324 return err;
1325}
1326
1327static void __team_port_change_port_removed(struct team_port *port);
1328
1329static int team_port_del(struct team *team, struct net_device *port_dev)
1330{
1331 struct net_device *dev = team->dev;
1332 struct team_port *port;
1333 char *portname = port_dev->name;
1334
1335 port = team_port_get_rtnl(port_dev);
1336 if (!port || !team_port_find(team, port)) {
1337 netdev_err(dev, "Device %s does not act as a port of this team\n",
1338 portname);
1339 return -ENOENT;
1340 }
1341
1342 team_port_disable(team, port);
1343 list_del_rcu(&port->list);
1344
1345 if (dev->flags & IFF_PROMISC)
1346 dev_set_promiscuity(port_dev, -1);
1347 if (dev->flags & IFF_ALLMULTI)
1348 dev_set_allmulti(port_dev, -1);
1349
1350 team_upper_dev_unlink(team, port);
1351 netdev_rx_handler_unregister(port_dev);
1352 team_port_disable_netpoll(port);
1353 vlan_vids_del_by_dev(port_dev, dev);
1354 if (dev->flags & IFF_UP) {
1355 dev_uc_unsync(port_dev, dev);
1356 dev_mc_unsync(port_dev, dev);
1357 }
1358 dev_close(port_dev);
1359 team_port_leave(team, port);
1360
1361 __team_option_inst_mark_removed_port(team, port);
1362 __team_options_change_check(team);
1363 __team_option_inst_del_port(team, port);
1364 __team_port_change_port_removed(port);
1365
1366 team_port_set_orig_dev_addr(port);
1367 dev_set_mtu(port_dev, port->orig.mtu);
1368 kfree_rcu(port, rcu);
1369 netdev_info(dev, "Port device %s removed\n", portname);
1370 __team_compute_features(team);
1371
1372 return 0;
1373}
1374
1375
1376/*****************
1377 * Net device ops
1378 *****************/
1379
1380static void team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1381{
1382 ctx->data.str_val = team->mode->kind;
1383}
1384
1385static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1386{
1387 return team_change_mode(team, ctx->data.str_val);
1388}
1389
1390static void team_notify_peers_count_get(struct team *team,
1391 struct team_gsetter_ctx *ctx)
1392{
1393 ctx->data.u32_val = team->notify_peers.count;
1394}
1395
1396static int team_notify_peers_count_set(struct team *team,
1397 struct team_gsetter_ctx *ctx)
1398{
1399 team->notify_peers.count = ctx->data.u32_val;
1400 return 0;
1401}
1402
1403static void team_notify_peers_interval_get(struct team *team,
1404 struct team_gsetter_ctx *ctx)
1405{
1406 ctx->data.u32_val = team->notify_peers.interval;
1407}
1408
1409static int team_notify_peers_interval_set(struct team *team,
1410 struct team_gsetter_ctx *ctx)
1411{
1412 team->notify_peers.interval = ctx->data.u32_val;
1413 return 0;
1414}
1415
1416static void team_mcast_rejoin_count_get(struct team *team,
1417 struct team_gsetter_ctx *ctx)
1418{
1419 ctx->data.u32_val = team->mcast_rejoin.count;
1420}
1421
1422static int team_mcast_rejoin_count_set(struct team *team,
1423 struct team_gsetter_ctx *ctx)
1424{
1425 team->mcast_rejoin.count = ctx->data.u32_val;
1426 return 0;
1427}
1428
1429static void team_mcast_rejoin_interval_get(struct team *team,
1430 struct team_gsetter_ctx *ctx)
1431{
1432 ctx->data.u32_val = team->mcast_rejoin.interval;
1433}
1434
1435static int team_mcast_rejoin_interval_set(struct team *team,
1436 struct team_gsetter_ctx *ctx)
1437{
1438 team->mcast_rejoin.interval = ctx->data.u32_val;
1439 return 0;
1440}
1441
1442static void team_port_en_option_get(struct team *team,
1443 struct team_gsetter_ctx *ctx)
1444{
1445 struct team_port *port = ctx->info->port;
1446
1447 ctx->data.bool_val = team_port_enabled(port);
1448}
1449
1450static int team_port_en_option_set(struct team *team,
1451 struct team_gsetter_ctx *ctx)
1452{
1453 struct team_port *port = ctx->info->port;
1454
1455 if (ctx->data.bool_val)
1456 team_port_enable(team, port);
1457 else
1458 team_port_disable(team, port);
1459 return 0;
1460}
1461
1462static void team_user_linkup_option_get(struct team *team,
1463 struct team_gsetter_ctx *ctx)
1464{
1465 struct team_port *port = ctx->info->port;
1466
1467 ctx->data.bool_val = port->user.linkup;
1468}
1469
1470static void __team_carrier_check(struct team *team);
1471
1472static int team_user_linkup_option_set(struct team *team,
1473 struct team_gsetter_ctx *ctx)
1474{
1475 struct team_port *port = ctx->info->port;
1476
1477 port->user.linkup = ctx->data.bool_val;
1478 team_refresh_port_linkup(port);
1479 __team_carrier_check(port->team);
1480 return 0;
1481}
1482
1483static void team_user_linkup_en_option_get(struct team *team,
1484 struct team_gsetter_ctx *ctx)
1485{
1486 struct team_port *port = ctx->info->port;
1487
1488 ctx->data.bool_val = port->user.linkup_enabled;
1489}
1490
1491static int team_user_linkup_en_option_set(struct team *team,
1492 struct team_gsetter_ctx *ctx)
1493{
1494 struct team_port *port = ctx->info->port;
1495
1496 port->user.linkup_enabled = ctx->data.bool_val;
1497 team_refresh_port_linkup(port);
1498 __team_carrier_check(port->team);
1499 return 0;
1500}
1501
1502static void team_priority_option_get(struct team *team,
1503 struct team_gsetter_ctx *ctx)
1504{
1505 struct team_port *port = ctx->info->port;
1506
1507 ctx->data.s32_val = port->priority;
1508}
1509
1510static int team_priority_option_set(struct team *team,
1511 struct team_gsetter_ctx *ctx)
1512{
1513 struct team_port *port = ctx->info->port;
1514 s32 priority = ctx->data.s32_val;
1515
1516 if (port->priority == priority)
1517 return 0;
1518 port->priority = priority;
1519 team_queue_override_port_prio_changed(team, port);
1520 return 0;
1521}
1522
1523static void team_queue_id_option_get(struct team *team,
1524 struct team_gsetter_ctx *ctx)
1525{
1526 struct team_port *port = ctx->info->port;
1527
1528 ctx->data.u32_val = port->queue_id;
1529}
1530
1531static int team_queue_id_option_set(struct team *team,
1532 struct team_gsetter_ctx *ctx)
1533{
1534 struct team_port *port = ctx->info->port;
1535 u16 new_queue_id = ctx->data.u32_val;
1536
1537 if (port->queue_id == new_queue_id)
1538 return 0;
1539 if (new_queue_id >= team->dev->real_num_tx_queues)
1540 return -EINVAL;
1541 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1542 return 0;
1543}
1544
1545static const struct team_option team_options[] = {
1546 {
1547 .name = "mode",
1548 .type = TEAM_OPTION_TYPE_STRING,
1549 .getter = team_mode_option_get,
1550 .setter = team_mode_option_set,
1551 },
1552 {
1553 .name = "notify_peers_count",
1554 .type = TEAM_OPTION_TYPE_U32,
1555 .getter = team_notify_peers_count_get,
1556 .setter = team_notify_peers_count_set,
1557 },
1558 {
1559 .name = "notify_peers_interval",
1560 .type = TEAM_OPTION_TYPE_U32,
1561 .getter = team_notify_peers_interval_get,
1562 .setter = team_notify_peers_interval_set,
1563 },
1564 {
1565 .name = "mcast_rejoin_count",
1566 .type = TEAM_OPTION_TYPE_U32,
1567 .getter = team_mcast_rejoin_count_get,
1568 .setter = team_mcast_rejoin_count_set,
1569 },
1570 {
1571 .name = "mcast_rejoin_interval",
1572 .type = TEAM_OPTION_TYPE_U32,
1573 .getter = team_mcast_rejoin_interval_get,
1574 .setter = team_mcast_rejoin_interval_set,
1575 },
1576 {
1577 .name = "enabled",
1578 .type = TEAM_OPTION_TYPE_BOOL,
1579 .per_port = true,
1580 .getter = team_port_en_option_get,
1581 .setter = team_port_en_option_set,
1582 },
1583 {
1584 .name = "user_linkup",
1585 .type = TEAM_OPTION_TYPE_BOOL,
1586 .per_port = true,
1587 .getter = team_user_linkup_option_get,
1588 .setter = team_user_linkup_option_set,
1589 },
1590 {
1591 .name = "user_linkup_enabled",
1592 .type = TEAM_OPTION_TYPE_BOOL,
1593 .per_port = true,
1594 .getter = team_user_linkup_en_option_get,
1595 .setter = team_user_linkup_en_option_set,
1596 },
1597 {
1598 .name = "priority",
1599 .type = TEAM_OPTION_TYPE_S32,
1600 .per_port = true,
1601 .getter = team_priority_option_get,
1602 .setter = team_priority_option_set,
1603 },
1604 {
1605 .name = "queue_id",
1606 .type = TEAM_OPTION_TYPE_U32,
1607 .per_port = true,
1608 .getter = team_queue_id_option_get,
1609 .setter = team_queue_id_option_set,
1610 },
1611};
1612
1613
1614static int team_init(struct net_device *dev)
1615{
1616 struct team *team = netdev_priv(dev);
1617 int i;
1618 int err;
1619
1620 team->dev = dev;
1621 team_set_no_mode(team);
1622 team->notifier_ctx = false;
1623
1624 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1625 if (!team->pcpu_stats)
1626 return -ENOMEM;
1627
1628 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1629 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1630 INIT_LIST_HEAD(&team->port_list);
1631 err = team_queue_override_init(team);
1632 if (err)
1633 goto err_team_queue_override_init;
1634
1635 team_adjust_ops(team);
1636
1637 INIT_LIST_HEAD(&team->option_list);
1638 INIT_LIST_HEAD(&team->option_inst_list);
1639
1640 team_notify_peers_init(team);
1641 team_mcast_rejoin_init(team);
1642
1643 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1644 if (err)
1645 goto err_options_register;
1646 netif_carrier_off(dev);
1647
1648 lockdep_register_key(&team->team_lock_key);
1649 __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1650 netdev_lockdep_set_classes(dev);
1651
1652 return 0;
1653
1654err_options_register:
1655 team_mcast_rejoin_fini(team);
1656 team_notify_peers_fini(team);
1657 team_queue_override_fini(team);
1658err_team_queue_override_init:
1659 free_percpu(team->pcpu_stats);
1660
1661 return err;
1662}
1663
1664static void team_uninit(struct net_device *dev)
1665{
1666 struct team *team = netdev_priv(dev);
1667 struct team_port *port;
1668 struct team_port *tmp;
1669
1670 mutex_lock(&team->lock);
1671 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1672 team_port_del(team, port->dev);
1673
1674 __team_change_mode(team, NULL); /* cleanup */
1675 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1676 team_mcast_rejoin_fini(team);
1677 team_notify_peers_fini(team);
1678 team_queue_override_fini(team);
1679 mutex_unlock(&team->lock);
1680 netdev_change_features(dev);
1681 lockdep_unregister_key(&team->team_lock_key);
1682}
1683
1684static void team_destructor(struct net_device *dev)
1685{
1686 struct team *team = netdev_priv(dev);
1687
1688 free_percpu(team->pcpu_stats);
1689}
1690
1691static int team_open(struct net_device *dev)
1692{
1693 return 0;
1694}
1695
1696static int team_close(struct net_device *dev)
1697{
1698 struct team *team = netdev_priv(dev);
1699 struct team_port *port;
1700
1701 list_for_each_entry(port, &team->port_list, list) {
1702 dev_uc_unsync(port->dev, dev);
1703 dev_mc_unsync(port->dev, dev);
1704 }
1705
1706 return 0;
1707}
1708
1709/*
1710 * note: already called with rcu_read_lock
1711 */
1712static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1713{
1714 struct team *team = netdev_priv(dev);
1715 bool tx_success;
1716 unsigned int len = skb->len;
1717
1718 tx_success = team_queue_override_transmit(team, skb);
1719 if (!tx_success)
1720 tx_success = team->ops.transmit(team, skb);
1721 if (tx_success) {
1722 struct team_pcpu_stats *pcpu_stats;
1723
1724 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1725 u64_stats_update_begin(&pcpu_stats->syncp);
1726 u64_stats_inc(&pcpu_stats->tx_packets);
1727 u64_stats_add(&pcpu_stats->tx_bytes, len);
1728 u64_stats_update_end(&pcpu_stats->syncp);
1729 } else {
1730 this_cpu_inc(team->pcpu_stats->tx_dropped);
1731 }
1732
1733 return NETDEV_TX_OK;
1734}
1735
1736static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1737 struct net_device *sb_dev)
1738{
1739 /*
1740 * This helper function exists to help dev_pick_tx get the correct
1741 * destination queue. Using a helper function skips a call to
1742 * skb_tx_hash and will put the skbs in the queue we expect on their
1743 * way down to the team driver.
1744 */
1745 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1746
1747 /*
1748 * Save the original txq to restore before passing to the driver
1749 */
1750 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1751
1752 if (unlikely(txq >= dev->real_num_tx_queues)) {
1753 do {
1754 txq -= dev->real_num_tx_queues;
1755 } while (txq >= dev->real_num_tx_queues);
1756 }
1757 return txq;
1758}
1759
1760static void team_change_rx_flags(struct net_device *dev, int change)
1761{
1762 struct team *team = netdev_priv(dev);
1763 struct team_port *port;
1764 int inc;
1765
1766 rcu_read_lock();
1767 list_for_each_entry_rcu(port, &team->port_list, list) {
1768 if (change & IFF_PROMISC) {
1769 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1770 dev_set_promiscuity(port->dev, inc);
1771 }
1772 if (change & IFF_ALLMULTI) {
1773 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1774 dev_set_allmulti(port->dev, inc);
1775 }
1776 }
1777 rcu_read_unlock();
1778}
1779
1780static void team_set_rx_mode(struct net_device *dev)
1781{
1782 struct team *team = netdev_priv(dev);
1783 struct team_port *port;
1784
1785 rcu_read_lock();
1786 list_for_each_entry_rcu(port, &team->port_list, list) {
1787 dev_uc_sync_multiple(port->dev, dev);
1788 dev_mc_sync_multiple(port->dev, dev);
1789 }
1790 rcu_read_unlock();
1791}
1792
1793static int team_set_mac_address(struct net_device *dev, void *p)
1794{
1795 struct sockaddr *addr = p;
1796 struct team *team = netdev_priv(dev);
1797 struct team_port *port;
1798
1799 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1800 return -EADDRNOTAVAIL;
1801 dev_addr_set(dev, addr->sa_data);
1802 mutex_lock(&team->lock);
1803 list_for_each_entry(port, &team->port_list, list)
1804 if (team->ops.port_change_dev_addr)
1805 team->ops.port_change_dev_addr(team, port);
1806 mutex_unlock(&team->lock);
1807 return 0;
1808}
1809
1810static int team_change_mtu(struct net_device *dev, int new_mtu)
1811{
1812 struct team *team = netdev_priv(dev);
1813 struct team_port *port;
1814 int err;
1815
1816 /*
1817 * Alhough this is reader, it's guarded by team lock. It's not possible
1818 * to traverse list in reverse under rcu_read_lock
1819 */
1820 mutex_lock(&team->lock);
1821 team->port_mtu_change_allowed = true;
1822 list_for_each_entry(port, &team->port_list, list) {
1823 err = dev_set_mtu(port->dev, new_mtu);
1824 if (err) {
1825 netdev_err(dev, "Device %s failed to change mtu",
1826 port->dev->name);
1827 goto unwind;
1828 }
1829 }
1830 team->port_mtu_change_allowed = false;
1831 mutex_unlock(&team->lock);
1832
1833 dev->mtu = new_mtu;
1834
1835 return 0;
1836
1837unwind:
1838 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1839 dev_set_mtu(port->dev, dev->mtu);
1840 team->port_mtu_change_allowed = false;
1841 mutex_unlock(&team->lock);
1842
1843 return err;
1844}
1845
1846static void
1847team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1848{
1849 struct team *team = netdev_priv(dev);
1850 struct team_pcpu_stats *p;
1851 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1852 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1853 unsigned int start;
1854 int i;
1855
1856 for_each_possible_cpu(i) {
1857 p = per_cpu_ptr(team->pcpu_stats, i);
1858 do {
1859 start = u64_stats_fetch_begin(&p->syncp);
1860 rx_packets = u64_stats_read(&p->rx_packets);
1861 rx_bytes = u64_stats_read(&p->rx_bytes);
1862 rx_multicast = u64_stats_read(&p->rx_multicast);
1863 tx_packets = u64_stats_read(&p->tx_packets);
1864 tx_bytes = u64_stats_read(&p->tx_bytes);
1865 } while (u64_stats_fetch_retry(&p->syncp, start));
1866
1867 stats->rx_packets += rx_packets;
1868 stats->rx_bytes += rx_bytes;
1869 stats->multicast += rx_multicast;
1870 stats->tx_packets += tx_packets;
1871 stats->tx_bytes += tx_bytes;
1872 /*
1873 * rx_dropped, tx_dropped & rx_nohandler are u32,
1874 * updated without syncp protection.
1875 */
1876 rx_dropped += READ_ONCE(p->rx_dropped);
1877 tx_dropped += READ_ONCE(p->tx_dropped);
1878 rx_nohandler += READ_ONCE(p->rx_nohandler);
1879 }
1880 stats->rx_dropped = rx_dropped;
1881 stats->tx_dropped = tx_dropped;
1882 stats->rx_nohandler = rx_nohandler;
1883}
1884
1885static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1886{
1887 struct team *team = netdev_priv(dev);
1888 struct team_port *port;
1889 int err;
1890
1891 /*
1892 * Alhough this is reader, it's guarded by team lock. It's not possible
1893 * to traverse list in reverse under rcu_read_lock
1894 */
1895 mutex_lock(&team->lock);
1896 list_for_each_entry(port, &team->port_list, list) {
1897 err = vlan_vid_add(port->dev, proto, vid);
1898 if (err)
1899 goto unwind;
1900 }
1901 mutex_unlock(&team->lock);
1902
1903 return 0;
1904
1905unwind:
1906 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1907 vlan_vid_del(port->dev, proto, vid);
1908 mutex_unlock(&team->lock);
1909
1910 return err;
1911}
1912
1913static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1914{
1915 struct team *team = netdev_priv(dev);
1916 struct team_port *port;
1917
1918 mutex_lock(&team->lock);
1919 list_for_each_entry(port, &team->port_list, list)
1920 vlan_vid_del(port->dev, proto, vid);
1921 mutex_unlock(&team->lock);
1922
1923 return 0;
1924}
1925
1926#ifdef CONFIG_NET_POLL_CONTROLLER
1927static void team_poll_controller(struct net_device *dev)
1928{
1929}
1930
1931static void __team_netpoll_cleanup(struct team *team)
1932{
1933 struct team_port *port;
1934
1935 list_for_each_entry(port, &team->port_list, list)
1936 team_port_disable_netpoll(port);
1937}
1938
1939static void team_netpoll_cleanup(struct net_device *dev)
1940{
1941 struct team *team = netdev_priv(dev);
1942
1943 mutex_lock(&team->lock);
1944 __team_netpoll_cleanup(team);
1945 mutex_unlock(&team->lock);
1946}
1947
1948static int team_netpoll_setup(struct net_device *dev,
1949 struct netpoll_info *npifo)
1950{
1951 struct team *team = netdev_priv(dev);
1952 struct team_port *port;
1953 int err = 0;
1954
1955 mutex_lock(&team->lock);
1956 list_for_each_entry(port, &team->port_list, list) {
1957 err = __team_port_enable_netpoll(port);
1958 if (err) {
1959 __team_netpoll_cleanup(team);
1960 break;
1961 }
1962 }
1963 mutex_unlock(&team->lock);
1964 return err;
1965}
1966#endif
1967
1968static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1969 struct netlink_ext_ack *extack)
1970{
1971 struct team *team = netdev_priv(dev);
1972 int err;
1973
1974 mutex_lock(&team->lock);
1975 err = team_port_add(team, port_dev, extack);
1976 mutex_unlock(&team->lock);
1977
1978 if (!err)
1979 netdev_change_features(dev);
1980
1981 return err;
1982}
1983
1984static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1985{
1986 struct team *team = netdev_priv(dev);
1987 int err;
1988
1989 mutex_lock(&team->lock);
1990 err = team_port_del(team, port_dev);
1991 mutex_unlock(&team->lock);
1992
1993 if (err)
1994 return err;
1995
1996 if (netif_is_team_master(port_dev)) {
1997 lockdep_unregister_key(&team->team_lock_key);
1998 lockdep_register_key(&team->team_lock_key);
1999 lockdep_set_class(&team->lock, &team->team_lock_key);
2000 }
2001 netdev_change_features(dev);
2002
2003 return err;
2004}
2005
2006static netdev_features_t team_fix_features(struct net_device *dev,
2007 netdev_features_t features)
2008{
2009 struct team_port *port;
2010 struct team *team = netdev_priv(dev);
2011 netdev_features_t mask;
2012
2013 mask = features;
2014 features &= ~NETIF_F_ONE_FOR_ALL;
2015 features |= NETIF_F_ALL_FOR_ALL;
2016
2017 rcu_read_lock();
2018 list_for_each_entry_rcu(port, &team->port_list, list) {
2019 features = netdev_increment_features(features,
2020 port->dev->features,
2021 mask);
2022 }
2023 rcu_read_unlock();
2024
2025 features = netdev_add_tso_features(features, mask);
2026
2027 return features;
2028}
2029
2030static int team_change_carrier(struct net_device *dev, bool new_carrier)
2031{
2032 struct team *team = netdev_priv(dev);
2033
2034 team->user_carrier_enabled = true;
2035
2036 if (new_carrier)
2037 netif_carrier_on(dev);
2038 else
2039 netif_carrier_off(dev);
2040 return 0;
2041}
2042
2043static const struct net_device_ops team_netdev_ops = {
2044 .ndo_init = team_init,
2045 .ndo_uninit = team_uninit,
2046 .ndo_open = team_open,
2047 .ndo_stop = team_close,
2048 .ndo_start_xmit = team_xmit,
2049 .ndo_select_queue = team_select_queue,
2050 .ndo_change_rx_flags = team_change_rx_flags,
2051 .ndo_set_rx_mode = team_set_rx_mode,
2052 .ndo_set_mac_address = team_set_mac_address,
2053 .ndo_change_mtu = team_change_mtu,
2054 .ndo_get_stats64 = team_get_stats64,
2055 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
2056 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
2057#ifdef CONFIG_NET_POLL_CONTROLLER
2058 .ndo_poll_controller = team_poll_controller,
2059 .ndo_netpoll_setup = team_netpoll_setup,
2060 .ndo_netpoll_cleanup = team_netpoll_cleanup,
2061#endif
2062 .ndo_add_slave = team_add_slave,
2063 .ndo_del_slave = team_del_slave,
2064 .ndo_fix_features = team_fix_features,
2065 .ndo_change_carrier = team_change_carrier,
2066 .ndo_features_check = passthru_features_check,
2067};
2068
2069/***********************
2070 * ethtool interface
2071 ***********************/
2072
2073static void team_ethtool_get_drvinfo(struct net_device *dev,
2074 struct ethtool_drvinfo *drvinfo)
2075{
2076 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2077 strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2078}
2079
2080static int team_ethtool_get_link_ksettings(struct net_device *dev,
2081 struct ethtool_link_ksettings *cmd)
2082{
2083 struct team *team= netdev_priv(dev);
2084 unsigned long speed = 0;
2085 struct team_port *port;
2086
2087 cmd->base.duplex = DUPLEX_UNKNOWN;
2088 cmd->base.port = PORT_OTHER;
2089
2090 rcu_read_lock();
2091 list_for_each_entry_rcu(port, &team->port_list, list) {
2092 if (team_port_txable(port)) {
2093 if (port->state.speed != SPEED_UNKNOWN)
2094 speed += port->state.speed;
2095 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2096 port->state.duplex != DUPLEX_UNKNOWN)
2097 cmd->base.duplex = port->state.duplex;
2098 }
2099 }
2100 rcu_read_unlock();
2101
2102 cmd->base.speed = speed ? : SPEED_UNKNOWN;
2103
2104 return 0;
2105}
2106
2107static const struct ethtool_ops team_ethtool_ops = {
2108 .get_drvinfo = team_ethtool_get_drvinfo,
2109 .get_link = ethtool_op_get_link,
2110 .get_link_ksettings = team_ethtool_get_link_ksettings,
2111};
2112
2113/***********************
2114 * rt netlink interface
2115 ***********************/
2116
2117static void team_setup_by_port(struct net_device *dev,
2118 struct net_device *port_dev)
2119{
2120 struct team *team = netdev_priv(dev);
2121
2122 if (port_dev->type == ARPHRD_ETHER)
2123 dev->header_ops = team->header_ops_cache;
2124 else
2125 dev->header_ops = port_dev->header_ops;
2126 dev->type = port_dev->type;
2127 dev->hard_header_len = port_dev->hard_header_len;
2128 dev->needed_headroom = port_dev->needed_headroom;
2129 dev->addr_len = port_dev->addr_len;
2130 dev->mtu = port_dev->mtu;
2131 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2132 eth_hw_addr_inherit(dev, port_dev);
2133
2134 if (port_dev->flags & IFF_POINTOPOINT) {
2135 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
2136 dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
2137 } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
2138 (IFF_BROADCAST | IFF_MULTICAST)) {
2139 dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
2140 dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
2141 }
2142}
2143
2144static int team_dev_type_check_change(struct net_device *dev,
2145 struct net_device *port_dev)
2146{
2147 struct team *team = netdev_priv(dev);
2148 char *portname = port_dev->name;
2149 int err;
2150
2151 if (dev->type == port_dev->type)
2152 return 0;
2153 if (!list_empty(&team->port_list)) {
2154 netdev_err(dev, "Device %s is of different type\n", portname);
2155 return -EBUSY;
2156 }
2157 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2158 err = notifier_to_errno(err);
2159 if (err) {
2160 netdev_err(dev, "Refused to change device type\n");
2161 return err;
2162 }
2163 dev_uc_flush(dev);
2164 dev_mc_flush(dev);
2165 team_setup_by_port(dev, port_dev);
2166 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2167 return 0;
2168}
2169
2170static void team_setup(struct net_device *dev)
2171{
2172 struct team *team = netdev_priv(dev);
2173
2174 ether_setup(dev);
2175 dev->max_mtu = ETH_MAX_MTU;
2176 team->header_ops_cache = dev->header_ops;
2177
2178 dev->netdev_ops = &team_netdev_ops;
2179 dev->ethtool_ops = &team_ethtool_ops;
2180 dev->needs_free_netdev = true;
2181 dev->priv_destructor = team_destructor;
2182 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2183 dev->priv_flags |= IFF_NO_QUEUE;
2184 dev->priv_flags |= IFF_TEAM;
2185
2186 /*
2187 * Indicate we support unicast address filtering. That way core won't
2188 * bring us to promisc mode in case a unicast addr is added.
2189 * Let this up to underlay drivers.
2190 */
2191 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2192
2193 dev->features |= NETIF_F_LLTX;
2194 dev->features |= NETIF_F_GRO;
2195
2196 /* Don't allow team devices to change network namespaces. */
2197 dev->features |= NETIF_F_NETNS_LOCAL;
2198
2199 dev->hw_features = TEAM_VLAN_FEATURES |
2200 NETIF_F_HW_VLAN_CTAG_RX |
2201 NETIF_F_HW_VLAN_CTAG_FILTER |
2202 NETIF_F_HW_VLAN_STAG_RX |
2203 NETIF_F_HW_VLAN_STAG_FILTER;
2204
2205 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2206 dev->features |= dev->hw_features;
2207 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2208}
2209
2210static int team_newlink(struct net *src_net, struct net_device *dev,
2211 struct nlattr *tb[], struct nlattr *data[],
2212 struct netlink_ext_ack *extack)
2213{
2214 if (tb[IFLA_ADDRESS] == NULL)
2215 eth_hw_addr_random(dev);
2216
2217 return register_netdevice(dev);
2218}
2219
2220static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2221 struct netlink_ext_ack *extack)
2222{
2223 if (tb[IFLA_ADDRESS]) {
2224 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2225 return -EINVAL;
2226 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2227 return -EADDRNOTAVAIL;
2228 }
2229 return 0;
2230}
2231
2232static unsigned int team_get_num_tx_queues(void)
2233{
2234 return TEAM_DEFAULT_NUM_TX_QUEUES;
2235}
2236
2237static unsigned int team_get_num_rx_queues(void)
2238{
2239 return TEAM_DEFAULT_NUM_RX_QUEUES;
2240}
2241
2242static struct rtnl_link_ops team_link_ops __read_mostly = {
2243 .kind = DRV_NAME,
2244 .priv_size = sizeof(struct team),
2245 .setup = team_setup,
2246 .newlink = team_newlink,
2247 .validate = team_validate,
2248 .get_num_tx_queues = team_get_num_tx_queues,
2249 .get_num_rx_queues = team_get_num_rx_queues,
2250};
2251
2252
2253/***********************************
2254 * Generic netlink custom interface
2255 ***********************************/
2256
2257static struct genl_family team_nl_family;
2258
2259static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2260 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
2261 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
2262 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
2263 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
2264};
2265
2266static const struct nla_policy
2267team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2268 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
2269 [TEAM_ATTR_OPTION_NAME] = {
2270 .type = NLA_STRING,
2271 .len = TEAM_STRING_MAX_LEN,
2272 },
2273 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
2274 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
2275 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
2276 [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
2277 [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
2278};
2279
2280static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2281{
2282 struct sk_buff *msg;
2283 void *hdr;
2284 int err;
2285
2286 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2287 if (!msg)
2288 return -ENOMEM;
2289
2290 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2291 &team_nl_family, 0, TEAM_CMD_NOOP);
2292 if (!hdr) {
2293 err = -EMSGSIZE;
2294 goto err_msg_put;
2295 }
2296
2297 genlmsg_end(msg, hdr);
2298
2299 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2300
2301err_msg_put:
2302 nlmsg_free(msg);
2303
2304 return err;
2305}
2306
2307/*
2308 * Netlink cmd functions should be locked by following two functions.
2309 * Since dev gets held here, that ensures dev won't disappear in between.
2310 */
2311static struct team *team_nl_team_get(struct genl_info *info)
2312{
2313 struct net *net = genl_info_net(info);
2314 int ifindex;
2315 struct net_device *dev;
2316 struct team *team;
2317
2318 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2319 return NULL;
2320
2321 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2322 dev = dev_get_by_index(net, ifindex);
2323 if (!dev || dev->netdev_ops != &team_netdev_ops) {
2324 dev_put(dev);
2325 return NULL;
2326 }
2327
2328 team = netdev_priv(dev);
2329 mutex_lock(&team->lock);
2330 return team;
2331}
2332
2333static void team_nl_team_put(struct team *team)
2334{
2335 mutex_unlock(&team->lock);
2336 dev_put(team->dev);
2337}
2338
2339typedef int team_nl_send_func_t(struct sk_buff *skb,
2340 struct team *team, u32 portid);
2341
2342static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2343{
2344 return genlmsg_unicast(dev_net(team->dev), skb, portid);
2345}
2346
2347static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2348 struct team_option_inst *opt_inst)
2349{
2350 struct nlattr *option_item;
2351 struct team_option *option = opt_inst->option;
2352 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2353 struct team_gsetter_ctx ctx;
2354 int err;
2355
2356 ctx.info = opt_inst_info;
2357 err = team_option_get(team, opt_inst, &ctx);
2358 if (err)
2359 return err;
2360
2361 option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2362 if (!option_item)
2363 return -EMSGSIZE;
2364
2365 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2366 goto nest_cancel;
2367 if (opt_inst_info->port &&
2368 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2369 opt_inst_info->port->dev->ifindex))
2370 goto nest_cancel;
2371 if (opt_inst->option->array_size &&
2372 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2373 opt_inst_info->array_index))
2374 goto nest_cancel;
2375
2376 switch (option->type) {
2377 case TEAM_OPTION_TYPE_U32:
2378 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2379 goto nest_cancel;
2380 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2381 goto nest_cancel;
2382 break;
2383 case TEAM_OPTION_TYPE_STRING:
2384 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2385 goto nest_cancel;
2386 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2387 ctx.data.str_val))
2388 goto nest_cancel;
2389 break;
2390 case TEAM_OPTION_TYPE_BINARY:
2391 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2392 goto nest_cancel;
2393 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2394 ctx.data.bin_val.ptr))
2395 goto nest_cancel;
2396 break;
2397 case TEAM_OPTION_TYPE_BOOL:
2398 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2399 goto nest_cancel;
2400 if (ctx.data.bool_val &&
2401 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2402 goto nest_cancel;
2403 break;
2404 case TEAM_OPTION_TYPE_S32:
2405 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2406 goto nest_cancel;
2407 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2408 goto nest_cancel;
2409 break;
2410 default:
2411 BUG();
2412 }
2413 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2414 goto nest_cancel;
2415 if (opt_inst->changed) {
2416 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2417 goto nest_cancel;
2418 opt_inst->changed = false;
2419 }
2420 nla_nest_end(skb, option_item);
2421 return 0;
2422
2423nest_cancel:
2424 nla_nest_cancel(skb, option_item);
2425 return -EMSGSIZE;
2426}
2427
2428static int __send_and_alloc_skb(struct sk_buff **pskb,
2429 struct team *team, u32 portid,
2430 team_nl_send_func_t *send_func)
2431{
2432 int err;
2433
2434 if (*pskb) {
2435 err = send_func(*pskb, team, portid);
2436 if (err)
2437 return err;
2438 }
2439 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2440 if (!*pskb)
2441 return -ENOMEM;
2442 return 0;
2443}
2444
2445static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2446 int flags, team_nl_send_func_t *send_func,
2447 struct list_head *sel_opt_inst_list)
2448{
2449 struct nlattr *option_list;
2450 struct nlmsghdr *nlh;
2451 void *hdr;
2452 struct team_option_inst *opt_inst;
2453 int err;
2454 struct sk_buff *skb = NULL;
2455 bool incomplete;
2456 int i;
2457
2458 opt_inst = list_first_entry(sel_opt_inst_list,
2459 struct team_option_inst, tmp_list);
2460
2461start_again:
2462 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2463 if (err)
2464 return err;
2465
2466 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2467 TEAM_CMD_OPTIONS_GET);
2468 if (!hdr) {
2469 nlmsg_free(skb);
2470 return -EMSGSIZE;
2471 }
2472
2473 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2474 goto nla_put_failure;
2475 option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2476 if (!option_list)
2477 goto nla_put_failure;
2478
2479 i = 0;
2480 incomplete = false;
2481 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2482 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2483 if (err) {
2484 if (err == -EMSGSIZE) {
2485 if (!i)
2486 goto errout;
2487 incomplete = true;
2488 break;
2489 }
2490 goto errout;
2491 }
2492 i++;
2493 }
2494
2495 nla_nest_end(skb, option_list);
2496 genlmsg_end(skb, hdr);
2497 if (incomplete)
2498 goto start_again;
2499
2500send_done:
2501 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2502 if (!nlh) {
2503 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2504 if (err)
2505 return err;
2506 goto send_done;
2507 }
2508
2509 return send_func(skb, team, portid);
2510
2511nla_put_failure:
2512 err = -EMSGSIZE;
2513errout:
2514 nlmsg_free(skb);
2515 return err;
2516}
2517
2518static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2519{
2520 struct team *team;
2521 struct team_option_inst *opt_inst;
2522 int err;
2523 LIST_HEAD(sel_opt_inst_list);
2524
2525 team = team_nl_team_get(info);
2526 if (!team)
2527 return -EINVAL;
2528
2529 list_for_each_entry(opt_inst, &team->option_inst_list, list)
2530 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2531 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2532 NLM_F_ACK, team_nl_send_unicast,
2533 &sel_opt_inst_list);
2534
2535 team_nl_team_put(team);
2536
2537 return err;
2538}
2539
2540static int team_nl_send_event_options_get(struct team *team,
2541 struct list_head *sel_opt_inst_list);
2542
2543static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2544{
2545 struct team *team;
2546 int err = 0;
2547 int i;
2548 struct nlattr *nl_option;
2549
2550 rtnl_lock();
2551
2552 team = team_nl_team_get(info);
2553 if (!team) {
2554 err = -EINVAL;
2555 goto rtnl_unlock;
2556 }
2557
2558 err = -EINVAL;
2559 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2560 err = -EINVAL;
2561 goto team_put;
2562 }
2563
2564 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2565 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2566 struct nlattr *attr;
2567 struct nlattr *attr_data;
2568 LIST_HEAD(opt_inst_list);
2569 enum team_option_type opt_type;
2570 int opt_port_ifindex = 0; /* != 0 for per-port options */
2571 u32 opt_array_index = 0;
2572 bool opt_is_array = false;
2573 struct team_option_inst *opt_inst;
2574 char *opt_name;
2575 bool opt_found = false;
2576
2577 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2578 err = -EINVAL;
2579 goto team_put;
2580 }
2581 err = nla_parse_nested_deprecated(opt_attrs,
2582 TEAM_ATTR_OPTION_MAX,
2583 nl_option,
2584 team_nl_option_policy,
2585 info->extack);
2586 if (err)
2587 goto team_put;
2588 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2589 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2590 err = -EINVAL;
2591 goto team_put;
2592 }
2593 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2594 case NLA_U32:
2595 opt_type = TEAM_OPTION_TYPE_U32;
2596 break;
2597 case NLA_STRING:
2598 opt_type = TEAM_OPTION_TYPE_STRING;
2599 break;
2600 case NLA_BINARY:
2601 opt_type = TEAM_OPTION_TYPE_BINARY;
2602 break;
2603 case NLA_FLAG:
2604 opt_type = TEAM_OPTION_TYPE_BOOL;
2605 break;
2606 case NLA_S32:
2607 opt_type = TEAM_OPTION_TYPE_S32;
2608 break;
2609 default:
2610 goto team_put;
2611 }
2612
2613 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2614 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2615 err = -EINVAL;
2616 goto team_put;
2617 }
2618
2619 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2620 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2621 if (attr)
2622 opt_port_ifindex = nla_get_u32(attr);
2623
2624 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2625 if (attr) {
2626 opt_is_array = true;
2627 opt_array_index = nla_get_u32(attr);
2628 }
2629
2630 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2631 struct team_option *option = opt_inst->option;
2632 struct team_gsetter_ctx ctx;
2633 struct team_option_inst_info *opt_inst_info;
2634 int tmp_ifindex;
2635
2636 opt_inst_info = &opt_inst->info;
2637 tmp_ifindex = opt_inst_info->port ?
2638 opt_inst_info->port->dev->ifindex : 0;
2639 if (option->type != opt_type ||
2640 strcmp(option->name, opt_name) ||
2641 tmp_ifindex != opt_port_ifindex ||
2642 (option->array_size && !opt_is_array) ||
2643 opt_inst_info->array_index != opt_array_index)
2644 continue;
2645 opt_found = true;
2646 ctx.info = opt_inst_info;
2647 switch (opt_type) {
2648 case TEAM_OPTION_TYPE_U32:
2649 ctx.data.u32_val = nla_get_u32(attr_data);
2650 break;
2651 case TEAM_OPTION_TYPE_STRING:
2652 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2653 err = -EINVAL;
2654 goto team_put;
2655 }
2656 ctx.data.str_val = nla_data(attr_data);
2657 break;
2658 case TEAM_OPTION_TYPE_BINARY:
2659 ctx.data.bin_val.len = nla_len(attr_data);
2660 ctx.data.bin_val.ptr = nla_data(attr_data);
2661 break;
2662 case TEAM_OPTION_TYPE_BOOL:
2663 ctx.data.bool_val = attr_data ? true : false;
2664 break;
2665 case TEAM_OPTION_TYPE_S32:
2666 ctx.data.s32_val = nla_get_s32(attr_data);
2667 break;
2668 default:
2669 BUG();
2670 }
2671 err = team_option_set(team, opt_inst, &ctx);
2672 if (err)
2673 goto team_put;
2674 opt_inst->changed = true;
2675 list_add(&opt_inst->tmp_list, &opt_inst_list);
2676 }
2677 if (!opt_found) {
2678 err = -ENOENT;
2679 goto team_put;
2680 }
2681
2682 err = team_nl_send_event_options_get(team, &opt_inst_list);
2683 if (err)
2684 break;
2685 }
2686
2687team_put:
2688 team_nl_team_put(team);
2689rtnl_unlock:
2690 rtnl_unlock();
2691 return err;
2692}
2693
2694static int team_nl_fill_one_port_get(struct sk_buff *skb,
2695 struct team_port *port)
2696{
2697 struct nlattr *port_item;
2698
2699 port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2700 if (!port_item)
2701 goto nest_cancel;
2702 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2703 goto nest_cancel;
2704 if (port->changed) {
2705 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2706 goto nest_cancel;
2707 port->changed = false;
2708 }
2709 if ((port->removed &&
2710 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2711 (port->state.linkup &&
2712 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2713 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2714 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2715 goto nest_cancel;
2716 nla_nest_end(skb, port_item);
2717 return 0;
2718
2719nest_cancel:
2720 nla_nest_cancel(skb, port_item);
2721 return -EMSGSIZE;
2722}
2723
2724static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2725 int flags, team_nl_send_func_t *send_func,
2726 struct team_port *one_port)
2727{
2728 struct nlattr *port_list;
2729 struct nlmsghdr *nlh;
2730 void *hdr;
2731 struct team_port *port;
2732 int err;
2733 struct sk_buff *skb = NULL;
2734 bool incomplete;
2735 int i;
2736
2737 port = list_first_entry_or_null(&team->port_list,
2738 struct team_port, list);
2739
2740start_again:
2741 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2742 if (err)
2743 return err;
2744
2745 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2746 TEAM_CMD_PORT_LIST_GET);
2747 if (!hdr) {
2748 nlmsg_free(skb);
2749 return -EMSGSIZE;
2750 }
2751
2752 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2753 goto nla_put_failure;
2754 port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2755 if (!port_list)
2756 goto nla_put_failure;
2757
2758 i = 0;
2759 incomplete = false;
2760
2761 /* If one port is selected, called wants to send port list containing
2762 * only this port. Otherwise go through all listed ports and send all
2763 */
2764 if (one_port) {
2765 err = team_nl_fill_one_port_get(skb, one_port);
2766 if (err)
2767 goto errout;
2768 } else if (port) {
2769 list_for_each_entry_from(port, &team->port_list, list) {
2770 err = team_nl_fill_one_port_get(skb, port);
2771 if (err) {
2772 if (err == -EMSGSIZE) {
2773 if (!i)
2774 goto errout;
2775 incomplete = true;
2776 break;
2777 }
2778 goto errout;
2779 }
2780 i++;
2781 }
2782 }
2783
2784 nla_nest_end(skb, port_list);
2785 genlmsg_end(skb, hdr);
2786 if (incomplete)
2787 goto start_again;
2788
2789send_done:
2790 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2791 if (!nlh) {
2792 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2793 if (err)
2794 return err;
2795 goto send_done;
2796 }
2797
2798 return send_func(skb, team, portid);
2799
2800nla_put_failure:
2801 err = -EMSGSIZE;
2802errout:
2803 nlmsg_free(skb);
2804 return err;
2805}
2806
2807static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2808 struct genl_info *info)
2809{
2810 struct team *team;
2811 int err;
2812
2813 team = team_nl_team_get(info);
2814 if (!team)
2815 return -EINVAL;
2816
2817 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2818 NLM_F_ACK, team_nl_send_unicast, NULL);
2819
2820 team_nl_team_put(team);
2821
2822 return err;
2823}
2824
2825static const struct genl_small_ops team_nl_ops[] = {
2826 {
2827 .cmd = TEAM_CMD_NOOP,
2828 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2829 .doit = team_nl_cmd_noop,
2830 },
2831 {
2832 .cmd = TEAM_CMD_OPTIONS_SET,
2833 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2834 .doit = team_nl_cmd_options_set,
2835 .flags = GENL_ADMIN_PERM,
2836 },
2837 {
2838 .cmd = TEAM_CMD_OPTIONS_GET,
2839 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2840 .doit = team_nl_cmd_options_get,
2841 .flags = GENL_ADMIN_PERM,
2842 },
2843 {
2844 .cmd = TEAM_CMD_PORT_LIST_GET,
2845 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2846 .doit = team_nl_cmd_port_list_get,
2847 .flags = GENL_ADMIN_PERM,
2848 },
2849};
2850
2851static const struct genl_multicast_group team_nl_mcgrps[] = {
2852 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2853};
2854
2855static struct genl_family team_nl_family __ro_after_init = {
2856 .name = TEAM_GENL_NAME,
2857 .version = TEAM_GENL_VERSION,
2858 .maxattr = TEAM_ATTR_MAX,
2859 .policy = team_nl_policy,
2860 .netnsok = true,
2861 .module = THIS_MODULE,
2862 .small_ops = team_nl_ops,
2863 .n_small_ops = ARRAY_SIZE(team_nl_ops),
2864 .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1,
2865 .mcgrps = team_nl_mcgrps,
2866 .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
2867};
2868
2869static int team_nl_send_multicast(struct sk_buff *skb,
2870 struct team *team, u32 portid)
2871{
2872 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2873 skb, 0, 0, GFP_KERNEL);
2874}
2875
2876static int team_nl_send_event_options_get(struct team *team,
2877 struct list_head *sel_opt_inst_list)
2878{
2879 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2880 sel_opt_inst_list);
2881}
2882
2883static int team_nl_send_event_port_get(struct team *team,
2884 struct team_port *port)
2885{
2886 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2887 port);
2888}
2889
2890static int __init team_nl_init(void)
2891{
2892 return genl_register_family(&team_nl_family);
2893}
2894
2895static void __exit team_nl_fini(void)
2896{
2897 genl_unregister_family(&team_nl_family);
2898}
2899
2900
2901/******************
2902 * Change checkers
2903 ******************/
2904
2905static void __team_options_change_check(struct team *team)
2906{
2907 int err;
2908 struct team_option_inst *opt_inst;
2909 LIST_HEAD(sel_opt_inst_list);
2910
2911 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2912 if (opt_inst->changed)
2913 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2914 }
2915 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2916 if (err && err != -ESRCH)
2917 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2918 err);
2919}
2920
2921/* rtnl lock is held */
2922
2923static void __team_port_change_send(struct team_port *port, bool linkup)
2924{
2925 int err;
2926
2927 port->changed = true;
2928 port->state.linkup = linkup;
2929 team_refresh_port_linkup(port);
2930 if (linkup) {
2931 struct ethtool_link_ksettings ecmd;
2932
2933 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2934 if (!err) {
2935 port->state.speed = ecmd.base.speed;
2936 port->state.duplex = ecmd.base.duplex;
2937 goto send_event;
2938 }
2939 }
2940 port->state.speed = 0;
2941 port->state.duplex = 0;
2942
2943send_event:
2944 err = team_nl_send_event_port_get(port->team, port);
2945 if (err && err != -ESRCH)
2946 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2947 port->dev->name, err);
2948
2949}
2950
2951static void __team_carrier_check(struct team *team)
2952{
2953 struct team_port *port;
2954 bool team_linkup;
2955
2956 if (team->user_carrier_enabled)
2957 return;
2958
2959 team_linkup = false;
2960 list_for_each_entry(port, &team->port_list, list) {
2961 if (port->linkup) {
2962 team_linkup = true;
2963 break;
2964 }
2965 }
2966
2967 if (team_linkup)
2968 netif_carrier_on(team->dev);
2969 else
2970 netif_carrier_off(team->dev);
2971}
2972
2973static void __team_port_change_check(struct team_port *port, bool linkup)
2974{
2975 if (port->state.linkup != linkup)
2976 __team_port_change_send(port, linkup);
2977 __team_carrier_check(port->team);
2978}
2979
2980static void __team_port_change_port_added(struct team_port *port, bool linkup)
2981{
2982 __team_port_change_send(port, linkup);
2983 __team_carrier_check(port->team);
2984}
2985
2986static void __team_port_change_port_removed(struct team_port *port)
2987{
2988 port->removed = true;
2989 __team_port_change_send(port, false);
2990 __team_carrier_check(port->team);
2991}
2992
2993static void team_port_change_check(struct team_port *port, bool linkup)
2994{
2995 struct team *team = port->team;
2996
2997 mutex_lock(&team->lock);
2998 __team_port_change_check(port, linkup);
2999 mutex_unlock(&team->lock);
3000}
3001
3002
3003/************************************
3004 * Net device notifier event handler
3005 ************************************/
3006
3007static int team_device_event(struct notifier_block *unused,
3008 unsigned long event, void *ptr)
3009{
3010 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3011 struct team_port *port;
3012
3013 port = team_port_get_rtnl(dev);
3014 if (!port)
3015 return NOTIFY_DONE;
3016
3017 switch (event) {
3018 case NETDEV_UP:
3019 if (netif_oper_up(dev))
3020 team_port_change_check(port, true);
3021 break;
3022 case NETDEV_DOWN:
3023 team_port_change_check(port, false);
3024 break;
3025 case NETDEV_CHANGE:
3026 if (netif_running(port->dev))
3027 team_port_change_check(port,
3028 !!netif_oper_up(port->dev));
3029 break;
3030 case NETDEV_UNREGISTER:
3031 team_del_slave(port->team->dev, dev);
3032 break;
3033 case NETDEV_FEAT_CHANGE:
3034 if (!port->team->notifier_ctx) {
3035 port->team->notifier_ctx = true;
3036 team_compute_features(port->team);
3037 port->team->notifier_ctx = false;
3038 }
3039 break;
3040 case NETDEV_PRECHANGEMTU:
3041 /* Forbid to change mtu of underlaying device */
3042 if (!port->team->port_mtu_change_allowed)
3043 return NOTIFY_BAD;
3044 break;
3045 case NETDEV_PRE_TYPE_CHANGE:
3046 /* Forbid to change type of underlaying device */
3047 return NOTIFY_BAD;
3048 case NETDEV_RESEND_IGMP:
3049 /* Propagate to master device */
3050 call_netdevice_notifiers(event, port->team->dev);
3051 break;
3052 }
3053 return NOTIFY_DONE;
3054}
3055
3056static struct notifier_block team_notifier_block __read_mostly = {
3057 .notifier_call = team_device_event,
3058};
3059
3060
3061/***********************
3062 * Module init and exit
3063 ***********************/
3064
3065static int __init team_module_init(void)
3066{
3067 int err;
3068
3069 register_netdevice_notifier(&team_notifier_block);
3070
3071 err = rtnl_link_register(&team_link_ops);
3072 if (err)
3073 goto err_rtnl_reg;
3074
3075 err = team_nl_init();
3076 if (err)
3077 goto err_nl_init;
3078
3079 return 0;
3080
3081err_nl_init:
3082 rtnl_link_unregister(&team_link_ops);
3083
3084err_rtnl_reg:
3085 unregister_netdevice_notifier(&team_notifier_block);
3086
3087 return err;
3088}
3089
3090static void __exit team_module_exit(void)
3091{
3092 team_nl_fini();
3093 rtnl_link_unregister(&team_link_ops);
3094 unregister_netdevice_notifier(&team_notifier_block);
3095}
3096
3097module_init(team_module_init);
3098module_exit(team_module_exit);
3099
3100MODULE_LICENSE("GPL v2");
3101MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3102MODULE_DESCRIPTION("Ethernet team device driver");
3103MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/team/team.c - Network team device driver
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/rcupdate.h>
13#include <linux/errno.h>
14#include <linux/ctype.h>
15#include <linux/notifier.h>
16#include <linux/netdevice.h>
17#include <linux/netpoll.h>
18#include <linux/if_vlan.h>
19#include <linux/if_arp.h>
20#include <linux/socket.h>
21#include <linux/etherdevice.h>
22#include <linux/rtnetlink.h>
23#include <net/rtnetlink.h>
24#include <net/genetlink.h>
25#include <net/netlink.h>
26#include <net/sch_generic.h>
27#include <generated/utsrelease.h>
28#include <linux/if_team.h>
29
30#define DRV_NAME "team"
31
32
33/**********
34 * Helpers
35 **********/
36
37static struct team_port *team_port_get_rtnl(const struct net_device *dev)
38{
39 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
40
41 return netif_is_team_port(dev) ? port : NULL;
42}
43
44/*
45 * Since the ability to change device address for open port device is tested in
46 * team_port_add, this function can be called without control of return value
47 */
48static int __set_port_dev_addr(struct net_device *port_dev,
49 const unsigned char *dev_addr)
50{
51 struct sockaddr_storage addr;
52
53 memcpy(addr.__data, dev_addr, port_dev->addr_len);
54 addr.ss_family = port_dev->type;
55 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
56}
57
58static int team_port_set_orig_dev_addr(struct team_port *port)
59{
60 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
61}
62
63static int team_port_set_team_dev_addr(struct team *team,
64 struct team_port *port)
65{
66 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
67}
68
69int team_modeop_port_enter(struct team *team, struct team_port *port)
70{
71 return team_port_set_team_dev_addr(team, port);
72}
73EXPORT_SYMBOL(team_modeop_port_enter);
74
75void team_modeop_port_change_dev_addr(struct team *team,
76 struct team_port *port)
77{
78 team_port_set_team_dev_addr(team, port);
79}
80EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
81
82static void team_lower_state_changed(struct team_port *port)
83{
84 struct netdev_lag_lower_state_info info;
85
86 info.link_up = port->linkup;
87 info.tx_enabled = team_port_enabled(port);
88 netdev_lower_state_changed(port->dev, &info);
89}
90
91static void team_refresh_port_linkup(struct team_port *port)
92{
93 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
94 port->state.linkup;
95
96 if (port->linkup != new_linkup) {
97 port->linkup = new_linkup;
98 team_lower_state_changed(port);
99 }
100}
101
102
103/*******************
104 * Options handling
105 *******************/
106
107struct team_option_inst { /* One for each option instance */
108 struct list_head list;
109 struct list_head tmp_list;
110 struct team_option *option;
111 struct team_option_inst_info info;
112 bool changed;
113 bool removed;
114};
115
116static struct team_option *__team_find_option(struct team *team,
117 const char *opt_name)
118{
119 struct team_option *option;
120
121 list_for_each_entry(option, &team->option_list, list) {
122 if (strcmp(option->name, opt_name) == 0)
123 return option;
124 }
125 return NULL;
126}
127
128static void __team_option_inst_del(struct team_option_inst *opt_inst)
129{
130 list_del(&opt_inst->list);
131 kfree(opt_inst);
132}
133
134static void __team_option_inst_del_option(struct team *team,
135 struct team_option *option)
136{
137 struct team_option_inst *opt_inst, *tmp;
138
139 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
140 if (opt_inst->option == option)
141 __team_option_inst_del(opt_inst);
142 }
143}
144
145static int __team_option_inst_add(struct team *team, struct team_option *option,
146 struct team_port *port)
147{
148 struct team_option_inst *opt_inst;
149 unsigned int array_size;
150 unsigned int i;
151 int err;
152
153 array_size = option->array_size;
154 if (!array_size)
155 array_size = 1; /* No array but still need one instance */
156
157 for (i = 0; i < array_size; i++) {
158 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
159 if (!opt_inst)
160 return -ENOMEM;
161 opt_inst->option = option;
162 opt_inst->info.port = port;
163 opt_inst->info.array_index = i;
164 opt_inst->changed = true;
165 opt_inst->removed = false;
166 list_add_tail(&opt_inst->list, &team->option_inst_list);
167 if (option->init) {
168 err = option->init(team, &opt_inst->info);
169 if (err)
170 return err;
171 }
172
173 }
174 return 0;
175}
176
177static int __team_option_inst_add_option(struct team *team,
178 struct team_option *option)
179{
180 int err;
181
182 if (!option->per_port) {
183 err = __team_option_inst_add(team, option, NULL);
184 if (err)
185 goto inst_del_option;
186 }
187 return 0;
188
189inst_del_option:
190 __team_option_inst_del_option(team, option);
191 return err;
192}
193
194static void __team_option_inst_mark_removed_option(struct team *team,
195 struct team_option *option)
196{
197 struct team_option_inst *opt_inst;
198
199 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
200 if (opt_inst->option == option) {
201 opt_inst->changed = true;
202 opt_inst->removed = true;
203 }
204 }
205}
206
207static void __team_option_inst_del_port(struct team *team,
208 struct team_port *port)
209{
210 struct team_option_inst *opt_inst, *tmp;
211
212 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
213 if (opt_inst->option->per_port &&
214 opt_inst->info.port == port)
215 __team_option_inst_del(opt_inst);
216 }
217}
218
219static int __team_option_inst_add_port(struct team *team,
220 struct team_port *port)
221{
222 struct team_option *option;
223 int err;
224
225 list_for_each_entry(option, &team->option_list, list) {
226 if (!option->per_port)
227 continue;
228 err = __team_option_inst_add(team, option, port);
229 if (err)
230 goto inst_del_port;
231 }
232 return 0;
233
234inst_del_port:
235 __team_option_inst_del_port(team, port);
236 return err;
237}
238
239static void __team_option_inst_mark_removed_port(struct team *team,
240 struct team_port *port)
241{
242 struct team_option_inst *opt_inst;
243
244 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
245 if (opt_inst->info.port == port) {
246 opt_inst->changed = true;
247 opt_inst->removed = true;
248 }
249 }
250}
251
252static int __team_options_register(struct team *team,
253 const struct team_option *option,
254 size_t option_count)
255{
256 int i;
257 struct team_option **dst_opts;
258 int err;
259
260 dst_opts = kcalloc(option_count, sizeof(struct team_option *),
261 GFP_KERNEL);
262 if (!dst_opts)
263 return -ENOMEM;
264 for (i = 0; i < option_count; i++, option++) {
265 if (__team_find_option(team, option->name)) {
266 err = -EEXIST;
267 goto alloc_rollback;
268 }
269 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
270 if (!dst_opts[i]) {
271 err = -ENOMEM;
272 goto alloc_rollback;
273 }
274 }
275
276 for (i = 0; i < option_count; i++) {
277 err = __team_option_inst_add_option(team, dst_opts[i]);
278 if (err)
279 goto inst_rollback;
280 list_add_tail(&dst_opts[i]->list, &team->option_list);
281 }
282
283 kfree(dst_opts);
284 return 0;
285
286inst_rollback:
287 for (i--; i >= 0; i--)
288 __team_option_inst_del_option(team, dst_opts[i]);
289
290 i = option_count;
291alloc_rollback:
292 for (i--; i >= 0; i--)
293 kfree(dst_opts[i]);
294
295 kfree(dst_opts);
296 return err;
297}
298
299static void __team_options_mark_removed(struct team *team,
300 const struct team_option *option,
301 size_t option_count)
302{
303 int i;
304
305 for (i = 0; i < option_count; i++, option++) {
306 struct team_option *del_opt;
307
308 del_opt = __team_find_option(team, option->name);
309 if (del_opt)
310 __team_option_inst_mark_removed_option(team, del_opt);
311 }
312}
313
314static void __team_options_unregister(struct team *team,
315 const struct team_option *option,
316 size_t option_count)
317{
318 int i;
319
320 for (i = 0; i < option_count; i++, option++) {
321 struct team_option *del_opt;
322
323 del_opt = __team_find_option(team, option->name);
324 if (del_opt) {
325 __team_option_inst_del_option(team, del_opt);
326 list_del(&del_opt->list);
327 kfree(del_opt);
328 }
329 }
330}
331
332static void __team_options_change_check(struct team *team);
333
334int team_options_register(struct team *team,
335 const struct team_option *option,
336 size_t option_count)
337{
338 int err;
339
340 err = __team_options_register(team, option, option_count);
341 if (err)
342 return err;
343 __team_options_change_check(team);
344 return 0;
345}
346EXPORT_SYMBOL(team_options_register);
347
348void team_options_unregister(struct team *team,
349 const struct team_option *option,
350 size_t option_count)
351{
352 __team_options_mark_removed(team, option, option_count);
353 __team_options_change_check(team);
354 __team_options_unregister(team, option, option_count);
355}
356EXPORT_SYMBOL(team_options_unregister);
357
358static int team_option_get(struct team *team,
359 struct team_option_inst *opt_inst,
360 struct team_gsetter_ctx *ctx)
361{
362 if (!opt_inst->option->getter)
363 return -EOPNOTSUPP;
364 return opt_inst->option->getter(team, ctx);
365}
366
367static int team_option_set(struct team *team,
368 struct team_option_inst *opt_inst,
369 struct team_gsetter_ctx *ctx)
370{
371 if (!opt_inst->option->setter)
372 return -EOPNOTSUPP;
373 return opt_inst->option->setter(team, ctx);
374}
375
376void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
377{
378 struct team_option_inst *opt_inst;
379
380 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
381 opt_inst->changed = true;
382}
383EXPORT_SYMBOL(team_option_inst_set_change);
384
385void team_options_change_check(struct team *team)
386{
387 __team_options_change_check(team);
388}
389EXPORT_SYMBOL(team_options_change_check);
390
391
392/****************
393 * Mode handling
394 ****************/
395
396static LIST_HEAD(mode_list);
397static DEFINE_SPINLOCK(mode_list_lock);
398
399struct team_mode_item {
400 struct list_head list;
401 const struct team_mode *mode;
402};
403
404static struct team_mode_item *__find_mode(const char *kind)
405{
406 struct team_mode_item *mitem;
407
408 list_for_each_entry(mitem, &mode_list, list) {
409 if (strcmp(mitem->mode->kind, kind) == 0)
410 return mitem;
411 }
412 return NULL;
413}
414
415static bool is_good_mode_name(const char *name)
416{
417 while (*name != '\0') {
418 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
419 return false;
420 name++;
421 }
422 return true;
423}
424
425int team_mode_register(const struct team_mode *mode)
426{
427 int err = 0;
428 struct team_mode_item *mitem;
429
430 if (!is_good_mode_name(mode->kind) ||
431 mode->priv_size > TEAM_MODE_PRIV_SIZE)
432 return -EINVAL;
433
434 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
435 if (!mitem)
436 return -ENOMEM;
437
438 spin_lock(&mode_list_lock);
439 if (__find_mode(mode->kind)) {
440 err = -EEXIST;
441 kfree(mitem);
442 goto unlock;
443 }
444 mitem->mode = mode;
445 list_add_tail(&mitem->list, &mode_list);
446unlock:
447 spin_unlock(&mode_list_lock);
448 return err;
449}
450EXPORT_SYMBOL(team_mode_register);
451
452void team_mode_unregister(const struct team_mode *mode)
453{
454 struct team_mode_item *mitem;
455
456 spin_lock(&mode_list_lock);
457 mitem = __find_mode(mode->kind);
458 if (mitem) {
459 list_del_init(&mitem->list);
460 kfree(mitem);
461 }
462 spin_unlock(&mode_list_lock);
463}
464EXPORT_SYMBOL(team_mode_unregister);
465
466static const struct team_mode *team_mode_get(const char *kind)
467{
468 struct team_mode_item *mitem;
469 const struct team_mode *mode = NULL;
470
471 if (!try_module_get(THIS_MODULE))
472 return NULL;
473
474 spin_lock(&mode_list_lock);
475 mitem = __find_mode(kind);
476 if (!mitem) {
477 spin_unlock(&mode_list_lock);
478 request_module("team-mode-%s", kind);
479 spin_lock(&mode_list_lock);
480 mitem = __find_mode(kind);
481 }
482 if (mitem) {
483 mode = mitem->mode;
484 if (!try_module_get(mode->owner))
485 mode = NULL;
486 }
487
488 spin_unlock(&mode_list_lock);
489 module_put(THIS_MODULE);
490 return mode;
491}
492
493static void team_mode_put(const struct team_mode *mode)
494{
495 module_put(mode->owner);
496}
497
498static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
499{
500 dev_kfree_skb_any(skb);
501 return false;
502}
503
504static rx_handler_result_t team_dummy_receive(struct team *team,
505 struct team_port *port,
506 struct sk_buff *skb)
507{
508 return RX_HANDLER_ANOTHER;
509}
510
511static const struct team_mode __team_no_mode = {
512 .kind = "*NOMODE*",
513};
514
515static bool team_is_mode_set(struct team *team)
516{
517 return team->mode != &__team_no_mode;
518}
519
520static void team_set_no_mode(struct team *team)
521{
522 team->user_carrier_enabled = false;
523 team->mode = &__team_no_mode;
524}
525
526static void team_adjust_ops(struct team *team)
527{
528 /*
529 * To avoid checks in rx/tx skb paths, ensure here that non-null and
530 * correct ops are always set.
531 */
532
533 if (!team->en_port_count || !team_is_mode_set(team) ||
534 !team->mode->ops->transmit)
535 team->ops.transmit = team_dummy_transmit;
536 else
537 team->ops.transmit = team->mode->ops->transmit;
538
539 if (!team->en_port_count || !team_is_mode_set(team) ||
540 !team->mode->ops->receive)
541 team->ops.receive = team_dummy_receive;
542 else
543 team->ops.receive = team->mode->ops->receive;
544}
545
546/*
547 * We can benefit from the fact that it's ensured no port is present
548 * at the time of mode change. Therefore no packets are in fly so there's no
549 * need to set mode operations in any special way.
550 */
551static int __team_change_mode(struct team *team,
552 const struct team_mode *new_mode)
553{
554 /* Check if mode was previously set and do cleanup if so */
555 if (team_is_mode_set(team)) {
556 void (*exit_op)(struct team *team) = team->ops.exit;
557
558 /* Clear ops area so no callback is called any longer */
559 memset(&team->ops, 0, sizeof(struct team_mode_ops));
560 team_adjust_ops(team);
561
562 if (exit_op)
563 exit_op(team);
564 team_mode_put(team->mode);
565 team_set_no_mode(team);
566 /* zero private data area */
567 memset(&team->mode_priv, 0,
568 sizeof(struct team) - offsetof(struct team, mode_priv));
569 }
570
571 if (!new_mode)
572 return 0;
573
574 if (new_mode->ops->init) {
575 int err;
576
577 err = new_mode->ops->init(team);
578 if (err)
579 return err;
580 }
581
582 team->mode = new_mode;
583 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
584 team_adjust_ops(team);
585
586 return 0;
587}
588
589static int team_change_mode(struct team *team, const char *kind)
590{
591 const struct team_mode *new_mode;
592 struct net_device *dev = team->dev;
593 int err;
594
595 if (!list_empty(&team->port_list)) {
596 netdev_err(dev, "No ports can be present during mode change\n");
597 return -EBUSY;
598 }
599
600 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
601 netdev_err(dev, "Unable to change to the same mode the team is in\n");
602 return -EINVAL;
603 }
604
605 new_mode = team_mode_get(kind);
606 if (!new_mode) {
607 netdev_err(dev, "Mode \"%s\" not found\n", kind);
608 return -EINVAL;
609 }
610
611 err = __team_change_mode(team, new_mode);
612 if (err) {
613 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
614 team_mode_put(new_mode);
615 return err;
616 }
617
618 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
619 return 0;
620}
621
622
623/*********************
624 * Peers notification
625 *********************/
626
627static void team_notify_peers_work(struct work_struct *work)
628{
629 struct team *team;
630 int val;
631
632 team = container_of(work, struct team, notify_peers.dw.work);
633
634 if (!rtnl_trylock()) {
635 schedule_delayed_work(&team->notify_peers.dw, 0);
636 return;
637 }
638 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
639 if (val < 0) {
640 rtnl_unlock();
641 return;
642 }
643 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
644 rtnl_unlock();
645 if (val)
646 schedule_delayed_work(&team->notify_peers.dw,
647 msecs_to_jiffies(team->notify_peers.interval));
648}
649
650static void team_notify_peers(struct team *team)
651{
652 if (!team->notify_peers.count || !netif_running(team->dev))
653 return;
654 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
655 schedule_delayed_work(&team->notify_peers.dw, 0);
656}
657
658static void team_notify_peers_init(struct team *team)
659{
660 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
661}
662
663static void team_notify_peers_fini(struct team *team)
664{
665 cancel_delayed_work_sync(&team->notify_peers.dw);
666}
667
668
669/*******************************
670 * Send multicast group rejoins
671 *******************************/
672
673static void team_mcast_rejoin_work(struct work_struct *work)
674{
675 struct team *team;
676 int val;
677
678 team = container_of(work, struct team, mcast_rejoin.dw.work);
679
680 if (!rtnl_trylock()) {
681 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
682 return;
683 }
684 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
685 if (val < 0) {
686 rtnl_unlock();
687 return;
688 }
689 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
690 rtnl_unlock();
691 if (val)
692 schedule_delayed_work(&team->mcast_rejoin.dw,
693 msecs_to_jiffies(team->mcast_rejoin.interval));
694}
695
696static void team_mcast_rejoin(struct team *team)
697{
698 if (!team->mcast_rejoin.count || !netif_running(team->dev))
699 return;
700 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
701 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
702}
703
704static void team_mcast_rejoin_init(struct team *team)
705{
706 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
707}
708
709static void team_mcast_rejoin_fini(struct team *team)
710{
711 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
712}
713
714
715/************************
716 * Rx path frame handler
717 ************************/
718
719/* note: already called with rcu_read_lock */
720static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
721{
722 struct sk_buff *skb = *pskb;
723 struct team_port *port;
724 struct team *team;
725 rx_handler_result_t res;
726
727 skb = skb_share_check(skb, GFP_ATOMIC);
728 if (!skb)
729 return RX_HANDLER_CONSUMED;
730
731 *pskb = skb;
732
733 port = team_port_get_rcu(skb->dev);
734 team = port->team;
735 if (!team_port_enabled(port)) {
736 /* allow exact match delivery for disabled ports */
737 res = RX_HANDLER_EXACT;
738 } else {
739 res = team->ops.receive(team, port, skb);
740 }
741 if (res == RX_HANDLER_ANOTHER) {
742 struct team_pcpu_stats *pcpu_stats;
743
744 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
745 u64_stats_update_begin(&pcpu_stats->syncp);
746 pcpu_stats->rx_packets++;
747 pcpu_stats->rx_bytes += skb->len;
748 if (skb->pkt_type == PACKET_MULTICAST)
749 pcpu_stats->rx_multicast++;
750 u64_stats_update_end(&pcpu_stats->syncp);
751
752 skb->dev = team->dev;
753 } else if (res == RX_HANDLER_EXACT) {
754 this_cpu_inc(team->pcpu_stats->rx_nohandler);
755 } else {
756 this_cpu_inc(team->pcpu_stats->rx_dropped);
757 }
758
759 return res;
760}
761
762
763/*************************************
764 * Multiqueue Tx port select override
765 *************************************/
766
767static int team_queue_override_init(struct team *team)
768{
769 struct list_head *listarr;
770 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
771 unsigned int i;
772
773 if (!queue_cnt)
774 return 0;
775 listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
776 GFP_KERNEL);
777 if (!listarr)
778 return -ENOMEM;
779 team->qom_lists = listarr;
780 for (i = 0; i < queue_cnt; i++)
781 INIT_LIST_HEAD(listarr++);
782 return 0;
783}
784
785static void team_queue_override_fini(struct team *team)
786{
787 kfree(team->qom_lists);
788}
789
790static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
791{
792 return &team->qom_lists[queue_id - 1];
793}
794
795/*
796 * note: already called with rcu_read_lock
797 */
798static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
799{
800 struct list_head *qom_list;
801 struct team_port *port;
802
803 if (!team->queue_override_enabled || !skb->queue_mapping)
804 return false;
805 qom_list = __team_get_qom_list(team, skb->queue_mapping);
806 list_for_each_entry_rcu(port, qom_list, qom_list) {
807 if (!team_dev_queue_xmit(team, port, skb))
808 return true;
809 }
810 return false;
811}
812
813static void __team_queue_override_port_del(struct team *team,
814 struct team_port *port)
815{
816 if (!port->queue_id)
817 return;
818 list_del_rcu(&port->qom_list);
819}
820
821static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
822 struct team_port *cur)
823{
824 if (port->priority < cur->priority)
825 return true;
826 if (port->priority > cur->priority)
827 return false;
828 if (port->index < cur->index)
829 return true;
830 return false;
831}
832
833static void __team_queue_override_port_add(struct team *team,
834 struct team_port *port)
835{
836 struct team_port *cur;
837 struct list_head *qom_list;
838 struct list_head *node;
839
840 if (!port->queue_id)
841 return;
842 qom_list = __team_get_qom_list(team, port->queue_id);
843 node = qom_list;
844 list_for_each_entry(cur, qom_list, qom_list) {
845 if (team_queue_override_port_has_gt_prio_than(port, cur))
846 break;
847 node = &cur->qom_list;
848 }
849 list_add_tail_rcu(&port->qom_list, node);
850}
851
852static void __team_queue_override_enabled_check(struct team *team)
853{
854 struct team_port *port;
855 bool enabled = false;
856
857 list_for_each_entry(port, &team->port_list, list) {
858 if (port->queue_id) {
859 enabled = true;
860 break;
861 }
862 }
863 if (enabled == team->queue_override_enabled)
864 return;
865 netdev_dbg(team->dev, "%s queue override\n",
866 enabled ? "Enabling" : "Disabling");
867 team->queue_override_enabled = enabled;
868}
869
870static void team_queue_override_port_prio_changed(struct team *team,
871 struct team_port *port)
872{
873 if (!port->queue_id || team_port_enabled(port))
874 return;
875 __team_queue_override_port_del(team, port);
876 __team_queue_override_port_add(team, port);
877 __team_queue_override_enabled_check(team);
878}
879
880static void team_queue_override_port_change_queue_id(struct team *team,
881 struct team_port *port,
882 u16 new_queue_id)
883{
884 if (team_port_enabled(port)) {
885 __team_queue_override_port_del(team, port);
886 port->queue_id = new_queue_id;
887 __team_queue_override_port_add(team, port);
888 __team_queue_override_enabled_check(team);
889 } else {
890 port->queue_id = new_queue_id;
891 }
892}
893
894static void team_queue_override_port_add(struct team *team,
895 struct team_port *port)
896{
897 __team_queue_override_port_add(team, port);
898 __team_queue_override_enabled_check(team);
899}
900
901static void team_queue_override_port_del(struct team *team,
902 struct team_port *port)
903{
904 __team_queue_override_port_del(team, port);
905 __team_queue_override_enabled_check(team);
906}
907
908
909/****************
910 * Port handling
911 ****************/
912
913static bool team_port_find(const struct team *team,
914 const struct team_port *port)
915{
916 struct team_port *cur;
917
918 list_for_each_entry(cur, &team->port_list, list)
919 if (cur == port)
920 return true;
921 return false;
922}
923
924/*
925 * Enable/disable port by adding to enabled port hashlist and setting
926 * port->index (Might be racy so reader could see incorrect ifindex when
927 * processing a flying packet, but that is not a problem). Write guarded
928 * by team->lock.
929 */
930static void team_port_enable(struct team *team,
931 struct team_port *port)
932{
933 if (team_port_enabled(port))
934 return;
935 port->index = team->en_port_count++;
936 hlist_add_head_rcu(&port->hlist,
937 team_port_index_hash(team, port->index));
938 team_adjust_ops(team);
939 team_queue_override_port_add(team, port);
940 if (team->ops.port_enabled)
941 team->ops.port_enabled(team, port);
942 team_notify_peers(team);
943 team_mcast_rejoin(team);
944 team_lower_state_changed(port);
945}
946
947static void __reconstruct_port_hlist(struct team *team, int rm_index)
948{
949 int i;
950 struct team_port *port;
951
952 for (i = rm_index + 1; i < team->en_port_count; i++) {
953 port = team_get_port_by_index(team, i);
954 hlist_del_rcu(&port->hlist);
955 port->index--;
956 hlist_add_head_rcu(&port->hlist,
957 team_port_index_hash(team, port->index));
958 }
959}
960
961static void team_port_disable(struct team *team,
962 struct team_port *port)
963{
964 if (!team_port_enabled(port))
965 return;
966 if (team->ops.port_disabled)
967 team->ops.port_disabled(team, port);
968 hlist_del_rcu(&port->hlist);
969 __reconstruct_port_hlist(team, port->index);
970 port->index = -1;
971 team->en_port_count--;
972 team_queue_override_port_del(team, port);
973 team_adjust_ops(team);
974 team_lower_state_changed(port);
975}
976
977#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
978 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
979 NETIF_F_HIGHDMA | NETIF_F_LRO)
980
981#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
982 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
983
984static void __team_compute_features(struct team *team)
985{
986 struct team_port *port;
987 netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
988 NETIF_F_ALL_FOR_ALL;
989 netdev_features_t enc_features = TEAM_ENC_FEATURES;
990 unsigned short max_hard_header_len = ETH_HLEN;
991 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
992 IFF_XMIT_DST_RELEASE_PERM;
993
994 list_for_each_entry(port, &team->port_list, list) {
995 vlan_features = netdev_increment_features(vlan_features,
996 port->dev->vlan_features,
997 TEAM_VLAN_FEATURES);
998 enc_features =
999 netdev_increment_features(enc_features,
1000 port->dev->hw_enc_features,
1001 TEAM_ENC_FEATURES);
1002
1003
1004 dst_release_flag &= port->dev->priv_flags;
1005 if (port->dev->hard_header_len > max_hard_header_len)
1006 max_hard_header_len = port->dev->hard_header_len;
1007 }
1008
1009 team->dev->vlan_features = vlan_features;
1010 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1011 NETIF_F_HW_VLAN_CTAG_TX |
1012 NETIF_F_HW_VLAN_STAG_TX |
1013 NETIF_F_GSO_UDP_L4;
1014 team->dev->hard_header_len = max_hard_header_len;
1015
1016 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1017 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1018 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1019}
1020
1021static void team_compute_features(struct team *team)
1022{
1023 mutex_lock(&team->lock);
1024 __team_compute_features(team);
1025 mutex_unlock(&team->lock);
1026 netdev_change_features(team->dev);
1027}
1028
1029static int team_port_enter(struct team *team, struct team_port *port)
1030{
1031 int err = 0;
1032
1033 dev_hold(team->dev);
1034 if (team->ops.port_enter) {
1035 err = team->ops.port_enter(team, port);
1036 if (err) {
1037 netdev_err(team->dev, "Device %s failed to enter team mode\n",
1038 port->dev->name);
1039 goto err_port_enter;
1040 }
1041 }
1042
1043 return 0;
1044
1045err_port_enter:
1046 dev_put(team->dev);
1047
1048 return err;
1049}
1050
1051static void team_port_leave(struct team *team, struct team_port *port)
1052{
1053 if (team->ops.port_leave)
1054 team->ops.port_leave(team, port);
1055 dev_put(team->dev);
1056}
1057
1058#ifdef CONFIG_NET_POLL_CONTROLLER
1059static int __team_port_enable_netpoll(struct team_port *port)
1060{
1061 struct netpoll *np;
1062 int err;
1063
1064 np = kzalloc(sizeof(*np), GFP_KERNEL);
1065 if (!np)
1066 return -ENOMEM;
1067
1068 err = __netpoll_setup(np, port->dev);
1069 if (err) {
1070 kfree(np);
1071 return err;
1072 }
1073 port->np = np;
1074 return err;
1075}
1076
1077static int team_port_enable_netpoll(struct team_port *port)
1078{
1079 if (!port->team->dev->npinfo)
1080 return 0;
1081
1082 return __team_port_enable_netpoll(port);
1083}
1084
1085static void team_port_disable_netpoll(struct team_port *port)
1086{
1087 struct netpoll *np = port->np;
1088
1089 if (!np)
1090 return;
1091 port->np = NULL;
1092
1093 __netpoll_free(np);
1094}
1095#else
1096static int team_port_enable_netpoll(struct team_port *port)
1097{
1098 return 0;
1099}
1100static void team_port_disable_netpoll(struct team_port *port)
1101{
1102}
1103#endif
1104
1105static int team_upper_dev_link(struct team *team, struct team_port *port,
1106 struct netlink_ext_ack *extack)
1107{
1108 struct netdev_lag_upper_info lag_upper_info;
1109 int err;
1110
1111 lag_upper_info.tx_type = team->mode->lag_tx_type;
1112 lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1113 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1114 &lag_upper_info, extack);
1115 if (err)
1116 return err;
1117 port->dev->priv_flags |= IFF_TEAM_PORT;
1118 return 0;
1119}
1120
1121static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1122{
1123 netdev_upper_dev_unlink(port->dev, team->dev);
1124 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1125}
1126
1127static void __team_port_change_port_added(struct team_port *port, bool linkup);
1128static int team_dev_type_check_change(struct net_device *dev,
1129 struct net_device *port_dev);
1130
1131static int team_port_add(struct team *team, struct net_device *port_dev,
1132 struct netlink_ext_ack *extack)
1133{
1134 struct net_device *dev = team->dev;
1135 struct team_port *port;
1136 char *portname = port_dev->name;
1137 int err;
1138
1139 if (port_dev->flags & IFF_LOOPBACK) {
1140 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1141 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1142 portname);
1143 return -EINVAL;
1144 }
1145
1146 if (netif_is_team_port(port_dev)) {
1147 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1148 netdev_err(dev, "Device %s is already a port "
1149 "of a team device\n", portname);
1150 return -EBUSY;
1151 }
1152
1153 if (dev == port_dev) {
1154 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1155 netdev_err(dev, "Cannot enslave team device to itself\n");
1156 return -EINVAL;
1157 }
1158
1159 if (netdev_has_upper_dev(dev, port_dev)) {
1160 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1161 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1162 portname);
1163 return -EBUSY;
1164 }
1165
1166 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1167 vlan_uses_dev(dev)) {
1168 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1169 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1170 portname);
1171 return -EPERM;
1172 }
1173
1174 err = team_dev_type_check_change(dev, port_dev);
1175 if (err)
1176 return err;
1177
1178 if (port_dev->flags & IFF_UP) {
1179 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1180 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1181 portname);
1182 return -EBUSY;
1183 }
1184
1185 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1186 GFP_KERNEL);
1187 if (!port)
1188 return -ENOMEM;
1189
1190 port->dev = port_dev;
1191 port->team = team;
1192 INIT_LIST_HEAD(&port->qom_list);
1193
1194 port->orig.mtu = port_dev->mtu;
1195 err = dev_set_mtu(port_dev, dev->mtu);
1196 if (err) {
1197 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1198 goto err_set_mtu;
1199 }
1200
1201 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1202
1203 err = team_port_enter(team, port);
1204 if (err) {
1205 netdev_err(dev, "Device %s failed to enter team mode\n",
1206 portname);
1207 goto err_port_enter;
1208 }
1209
1210 err = dev_open(port_dev, extack);
1211 if (err) {
1212 netdev_dbg(dev, "Device %s opening failed\n",
1213 portname);
1214 goto err_dev_open;
1215 }
1216
1217 err = vlan_vids_add_by_dev(port_dev, dev);
1218 if (err) {
1219 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1220 portname);
1221 goto err_vids_add;
1222 }
1223
1224 err = team_port_enable_netpoll(port);
1225 if (err) {
1226 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1227 portname);
1228 goto err_enable_netpoll;
1229 }
1230
1231 if (!(dev->features & NETIF_F_LRO))
1232 dev_disable_lro(port_dev);
1233
1234 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1235 port);
1236 if (err) {
1237 netdev_err(dev, "Device %s failed to register rx_handler\n",
1238 portname);
1239 goto err_handler_register;
1240 }
1241
1242 err = team_upper_dev_link(team, port, extack);
1243 if (err) {
1244 netdev_err(dev, "Device %s failed to set upper link\n",
1245 portname);
1246 goto err_set_upper_link;
1247 }
1248
1249 err = __team_option_inst_add_port(team, port);
1250 if (err) {
1251 netdev_err(dev, "Device %s failed to add per-port options\n",
1252 portname);
1253 goto err_option_port_add;
1254 }
1255
1256 /* set promiscuity level to new slave */
1257 if (dev->flags & IFF_PROMISC) {
1258 err = dev_set_promiscuity(port_dev, 1);
1259 if (err)
1260 goto err_set_slave_promisc;
1261 }
1262
1263 /* set allmulti level to new slave */
1264 if (dev->flags & IFF_ALLMULTI) {
1265 err = dev_set_allmulti(port_dev, 1);
1266 if (err) {
1267 if (dev->flags & IFF_PROMISC)
1268 dev_set_promiscuity(port_dev, -1);
1269 goto err_set_slave_promisc;
1270 }
1271 }
1272
1273 netif_addr_lock_bh(dev);
1274 dev_uc_sync_multiple(port_dev, dev);
1275 dev_mc_sync_multiple(port_dev, dev);
1276 netif_addr_unlock_bh(dev);
1277
1278 port->index = -1;
1279 list_add_tail_rcu(&port->list, &team->port_list);
1280 team_port_enable(team, port);
1281 __team_compute_features(team);
1282 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1283 __team_options_change_check(team);
1284
1285 netdev_info(dev, "Port device %s added\n", portname);
1286
1287 return 0;
1288
1289err_set_slave_promisc:
1290 __team_option_inst_del_port(team, port);
1291
1292err_option_port_add:
1293 team_upper_dev_unlink(team, port);
1294
1295err_set_upper_link:
1296 netdev_rx_handler_unregister(port_dev);
1297
1298err_handler_register:
1299 team_port_disable_netpoll(port);
1300
1301err_enable_netpoll:
1302 vlan_vids_del_by_dev(port_dev, dev);
1303
1304err_vids_add:
1305 dev_close(port_dev);
1306
1307err_dev_open:
1308 team_port_leave(team, port);
1309 team_port_set_orig_dev_addr(port);
1310
1311err_port_enter:
1312 dev_set_mtu(port_dev, port->orig.mtu);
1313
1314err_set_mtu:
1315 kfree(port);
1316
1317 return err;
1318}
1319
1320static void __team_port_change_port_removed(struct team_port *port);
1321
1322static int team_port_del(struct team *team, struct net_device *port_dev)
1323{
1324 struct net_device *dev = team->dev;
1325 struct team_port *port;
1326 char *portname = port_dev->name;
1327
1328 port = team_port_get_rtnl(port_dev);
1329 if (!port || !team_port_find(team, port)) {
1330 netdev_err(dev, "Device %s does not act as a port of this team\n",
1331 portname);
1332 return -ENOENT;
1333 }
1334
1335 team_port_disable(team, port);
1336 list_del_rcu(&port->list);
1337
1338 if (dev->flags & IFF_PROMISC)
1339 dev_set_promiscuity(port_dev, -1);
1340 if (dev->flags & IFF_ALLMULTI)
1341 dev_set_allmulti(port_dev, -1);
1342
1343 team_upper_dev_unlink(team, port);
1344 netdev_rx_handler_unregister(port_dev);
1345 team_port_disable_netpoll(port);
1346 vlan_vids_del_by_dev(port_dev, dev);
1347 dev_uc_unsync(port_dev, dev);
1348 dev_mc_unsync(port_dev, dev);
1349 dev_close(port_dev);
1350 team_port_leave(team, port);
1351
1352 __team_option_inst_mark_removed_port(team, port);
1353 __team_options_change_check(team);
1354 __team_option_inst_del_port(team, port);
1355 __team_port_change_port_removed(port);
1356
1357 team_port_set_orig_dev_addr(port);
1358 dev_set_mtu(port_dev, port->orig.mtu);
1359 kfree_rcu(port, rcu);
1360 netdev_info(dev, "Port device %s removed\n", portname);
1361 __team_compute_features(team);
1362
1363 return 0;
1364}
1365
1366
1367/*****************
1368 * Net device ops
1369 *****************/
1370
1371static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1372{
1373 ctx->data.str_val = team->mode->kind;
1374 return 0;
1375}
1376
1377static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1378{
1379 return team_change_mode(team, ctx->data.str_val);
1380}
1381
1382static int team_notify_peers_count_get(struct team *team,
1383 struct team_gsetter_ctx *ctx)
1384{
1385 ctx->data.u32_val = team->notify_peers.count;
1386 return 0;
1387}
1388
1389static int team_notify_peers_count_set(struct team *team,
1390 struct team_gsetter_ctx *ctx)
1391{
1392 team->notify_peers.count = ctx->data.u32_val;
1393 return 0;
1394}
1395
1396static int team_notify_peers_interval_get(struct team *team,
1397 struct team_gsetter_ctx *ctx)
1398{
1399 ctx->data.u32_val = team->notify_peers.interval;
1400 return 0;
1401}
1402
1403static int team_notify_peers_interval_set(struct team *team,
1404 struct team_gsetter_ctx *ctx)
1405{
1406 team->notify_peers.interval = ctx->data.u32_val;
1407 return 0;
1408}
1409
1410static int team_mcast_rejoin_count_get(struct team *team,
1411 struct team_gsetter_ctx *ctx)
1412{
1413 ctx->data.u32_val = team->mcast_rejoin.count;
1414 return 0;
1415}
1416
1417static int team_mcast_rejoin_count_set(struct team *team,
1418 struct team_gsetter_ctx *ctx)
1419{
1420 team->mcast_rejoin.count = ctx->data.u32_val;
1421 return 0;
1422}
1423
1424static int team_mcast_rejoin_interval_get(struct team *team,
1425 struct team_gsetter_ctx *ctx)
1426{
1427 ctx->data.u32_val = team->mcast_rejoin.interval;
1428 return 0;
1429}
1430
1431static int team_mcast_rejoin_interval_set(struct team *team,
1432 struct team_gsetter_ctx *ctx)
1433{
1434 team->mcast_rejoin.interval = ctx->data.u32_val;
1435 return 0;
1436}
1437
1438static int team_port_en_option_get(struct team *team,
1439 struct team_gsetter_ctx *ctx)
1440{
1441 struct team_port *port = ctx->info->port;
1442
1443 ctx->data.bool_val = team_port_enabled(port);
1444 return 0;
1445}
1446
1447static int team_port_en_option_set(struct team *team,
1448 struct team_gsetter_ctx *ctx)
1449{
1450 struct team_port *port = ctx->info->port;
1451
1452 if (ctx->data.bool_val)
1453 team_port_enable(team, port);
1454 else
1455 team_port_disable(team, port);
1456 return 0;
1457}
1458
1459static int team_user_linkup_option_get(struct team *team,
1460 struct team_gsetter_ctx *ctx)
1461{
1462 struct team_port *port = ctx->info->port;
1463
1464 ctx->data.bool_val = port->user.linkup;
1465 return 0;
1466}
1467
1468static void __team_carrier_check(struct team *team);
1469
1470static int team_user_linkup_option_set(struct team *team,
1471 struct team_gsetter_ctx *ctx)
1472{
1473 struct team_port *port = ctx->info->port;
1474
1475 port->user.linkup = ctx->data.bool_val;
1476 team_refresh_port_linkup(port);
1477 __team_carrier_check(port->team);
1478 return 0;
1479}
1480
1481static int team_user_linkup_en_option_get(struct team *team,
1482 struct team_gsetter_ctx *ctx)
1483{
1484 struct team_port *port = ctx->info->port;
1485
1486 ctx->data.bool_val = port->user.linkup_enabled;
1487 return 0;
1488}
1489
1490static int team_user_linkup_en_option_set(struct team *team,
1491 struct team_gsetter_ctx *ctx)
1492{
1493 struct team_port *port = ctx->info->port;
1494
1495 port->user.linkup_enabled = ctx->data.bool_val;
1496 team_refresh_port_linkup(port);
1497 __team_carrier_check(port->team);
1498 return 0;
1499}
1500
1501static int team_priority_option_get(struct team *team,
1502 struct team_gsetter_ctx *ctx)
1503{
1504 struct team_port *port = ctx->info->port;
1505
1506 ctx->data.s32_val = port->priority;
1507 return 0;
1508}
1509
1510static int team_priority_option_set(struct team *team,
1511 struct team_gsetter_ctx *ctx)
1512{
1513 struct team_port *port = ctx->info->port;
1514 s32 priority = ctx->data.s32_val;
1515
1516 if (port->priority == priority)
1517 return 0;
1518 port->priority = priority;
1519 team_queue_override_port_prio_changed(team, port);
1520 return 0;
1521}
1522
1523static int team_queue_id_option_get(struct team *team,
1524 struct team_gsetter_ctx *ctx)
1525{
1526 struct team_port *port = ctx->info->port;
1527
1528 ctx->data.u32_val = port->queue_id;
1529 return 0;
1530}
1531
1532static int team_queue_id_option_set(struct team *team,
1533 struct team_gsetter_ctx *ctx)
1534{
1535 struct team_port *port = ctx->info->port;
1536 u16 new_queue_id = ctx->data.u32_val;
1537
1538 if (port->queue_id == new_queue_id)
1539 return 0;
1540 if (new_queue_id >= team->dev->real_num_tx_queues)
1541 return -EINVAL;
1542 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1543 return 0;
1544}
1545
1546static const struct team_option team_options[] = {
1547 {
1548 .name = "mode",
1549 .type = TEAM_OPTION_TYPE_STRING,
1550 .getter = team_mode_option_get,
1551 .setter = team_mode_option_set,
1552 },
1553 {
1554 .name = "notify_peers_count",
1555 .type = TEAM_OPTION_TYPE_U32,
1556 .getter = team_notify_peers_count_get,
1557 .setter = team_notify_peers_count_set,
1558 },
1559 {
1560 .name = "notify_peers_interval",
1561 .type = TEAM_OPTION_TYPE_U32,
1562 .getter = team_notify_peers_interval_get,
1563 .setter = team_notify_peers_interval_set,
1564 },
1565 {
1566 .name = "mcast_rejoin_count",
1567 .type = TEAM_OPTION_TYPE_U32,
1568 .getter = team_mcast_rejoin_count_get,
1569 .setter = team_mcast_rejoin_count_set,
1570 },
1571 {
1572 .name = "mcast_rejoin_interval",
1573 .type = TEAM_OPTION_TYPE_U32,
1574 .getter = team_mcast_rejoin_interval_get,
1575 .setter = team_mcast_rejoin_interval_set,
1576 },
1577 {
1578 .name = "enabled",
1579 .type = TEAM_OPTION_TYPE_BOOL,
1580 .per_port = true,
1581 .getter = team_port_en_option_get,
1582 .setter = team_port_en_option_set,
1583 },
1584 {
1585 .name = "user_linkup",
1586 .type = TEAM_OPTION_TYPE_BOOL,
1587 .per_port = true,
1588 .getter = team_user_linkup_option_get,
1589 .setter = team_user_linkup_option_set,
1590 },
1591 {
1592 .name = "user_linkup_enabled",
1593 .type = TEAM_OPTION_TYPE_BOOL,
1594 .per_port = true,
1595 .getter = team_user_linkup_en_option_get,
1596 .setter = team_user_linkup_en_option_set,
1597 },
1598 {
1599 .name = "priority",
1600 .type = TEAM_OPTION_TYPE_S32,
1601 .per_port = true,
1602 .getter = team_priority_option_get,
1603 .setter = team_priority_option_set,
1604 },
1605 {
1606 .name = "queue_id",
1607 .type = TEAM_OPTION_TYPE_U32,
1608 .per_port = true,
1609 .getter = team_queue_id_option_get,
1610 .setter = team_queue_id_option_set,
1611 },
1612};
1613
1614
1615static int team_init(struct net_device *dev)
1616{
1617 struct team *team = netdev_priv(dev);
1618 int i;
1619 int err;
1620
1621 team->dev = dev;
1622 team_set_no_mode(team);
1623
1624 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1625 if (!team->pcpu_stats)
1626 return -ENOMEM;
1627
1628 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1629 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1630 INIT_LIST_HEAD(&team->port_list);
1631 err = team_queue_override_init(team);
1632 if (err)
1633 goto err_team_queue_override_init;
1634
1635 team_adjust_ops(team);
1636
1637 INIT_LIST_HEAD(&team->option_list);
1638 INIT_LIST_HEAD(&team->option_inst_list);
1639
1640 team_notify_peers_init(team);
1641 team_mcast_rejoin_init(team);
1642
1643 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1644 if (err)
1645 goto err_options_register;
1646 netif_carrier_off(dev);
1647
1648 lockdep_register_key(&team->team_lock_key);
1649 __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1650 netdev_lockdep_set_classes(dev);
1651
1652 return 0;
1653
1654err_options_register:
1655 team_mcast_rejoin_fini(team);
1656 team_notify_peers_fini(team);
1657 team_queue_override_fini(team);
1658err_team_queue_override_init:
1659 free_percpu(team->pcpu_stats);
1660
1661 return err;
1662}
1663
1664static void team_uninit(struct net_device *dev)
1665{
1666 struct team *team = netdev_priv(dev);
1667 struct team_port *port;
1668 struct team_port *tmp;
1669
1670 mutex_lock(&team->lock);
1671 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1672 team_port_del(team, port->dev);
1673
1674 __team_change_mode(team, NULL); /* cleanup */
1675 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1676 team_mcast_rejoin_fini(team);
1677 team_notify_peers_fini(team);
1678 team_queue_override_fini(team);
1679 mutex_unlock(&team->lock);
1680 netdev_change_features(dev);
1681 lockdep_unregister_key(&team->team_lock_key);
1682}
1683
1684static void team_destructor(struct net_device *dev)
1685{
1686 struct team *team = netdev_priv(dev);
1687
1688 free_percpu(team->pcpu_stats);
1689}
1690
1691static int team_open(struct net_device *dev)
1692{
1693 return 0;
1694}
1695
1696static int team_close(struct net_device *dev)
1697{
1698 return 0;
1699}
1700
1701/*
1702 * note: already called with rcu_read_lock
1703 */
1704static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1705{
1706 struct team *team = netdev_priv(dev);
1707 bool tx_success;
1708 unsigned int len = skb->len;
1709
1710 tx_success = team_queue_override_transmit(team, skb);
1711 if (!tx_success)
1712 tx_success = team->ops.transmit(team, skb);
1713 if (tx_success) {
1714 struct team_pcpu_stats *pcpu_stats;
1715
1716 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1717 u64_stats_update_begin(&pcpu_stats->syncp);
1718 pcpu_stats->tx_packets++;
1719 pcpu_stats->tx_bytes += len;
1720 u64_stats_update_end(&pcpu_stats->syncp);
1721 } else {
1722 this_cpu_inc(team->pcpu_stats->tx_dropped);
1723 }
1724
1725 return NETDEV_TX_OK;
1726}
1727
1728static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1729 struct net_device *sb_dev)
1730{
1731 /*
1732 * This helper function exists to help dev_pick_tx get the correct
1733 * destination queue. Using a helper function skips a call to
1734 * skb_tx_hash and will put the skbs in the queue we expect on their
1735 * way down to the team driver.
1736 */
1737 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1738
1739 /*
1740 * Save the original txq to restore before passing to the driver
1741 */
1742 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1743
1744 if (unlikely(txq >= dev->real_num_tx_queues)) {
1745 do {
1746 txq -= dev->real_num_tx_queues;
1747 } while (txq >= dev->real_num_tx_queues);
1748 }
1749 return txq;
1750}
1751
1752static void team_change_rx_flags(struct net_device *dev, int change)
1753{
1754 struct team *team = netdev_priv(dev);
1755 struct team_port *port;
1756 int inc;
1757
1758 rcu_read_lock();
1759 list_for_each_entry_rcu(port, &team->port_list, list) {
1760 if (change & IFF_PROMISC) {
1761 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1762 dev_set_promiscuity(port->dev, inc);
1763 }
1764 if (change & IFF_ALLMULTI) {
1765 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1766 dev_set_allmulti(port->dev, inc);
1767 }
1768 }
1769 rcu_read_unlock();
1770}
1771
1772static void team_set_rx_mode(struct net_device *dev)
1773{
1774 struct team *team = netdev_priv(dev);
1775 struct team_port *port;
1776
1777 rcu_read_lock();
1778 list_for_each_entry_rcu(port, &team->port_list, list) {
1779 dev_uc_sync_multiple(port->dev, dev);
1780 dev_mc_sync_multiple(port->dev, dev);
1781 }
1782 rcu_read_unlock();
1783}
1784
1785static int team_set_mac_address(struct net_device *dev, void *p)
1786{
1787 struct sockaddr *addr = p;
1788 struct team *team = netdev_priv(dev);
1789 struct team_port *port;
1790
1791 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1792 return -EADDRNOTAVAIL;
1793 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1794 mutex_lock(&team->lock);
1795 list_for_each_entry(port, &team->port_list, list)
1796 if (team->ops.port_change_dev_addr)
1797 team->ops.port_change_dev_addr(team, port);
1798 mutex_unlock(&team->lock);
1799 return 0;
1800}
1801
1802static int team_change_mtu(struct net_device *dev, int new_mtu)
1803{
1804 struct team *team = netdev_priv(dev);
1805 struct team_port *port;
1806 int err;
1807
1808 /*
1809 * Alhough this is reader, it's guarded by team lock. It's not possible
1810 * to traverse list in reverse under rcu_read_lock
1811 */
1812 mutex_lock(&team->lock);
1813 team->port_mtu_change_allowed = true;
1814 list_for_each_entry(port, &team->port_list, list) {
1815 err = dev_set_mtu(port->dev, new_mtu);
1816 if (err) {
1817 netdev_err(dev, "Device %s failed to change mtu",
1818 port->dev->name);
1819 goto unwind;
1820 }
1821 }
1822 team->port_mtu_change_allowed = false;
1823 mutex_unlock(&team->lock);
1824
1825 dev->mtu = new_mtu;
1826
1827 return 0;
1828
1829unwind:
1830 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1831 dev_set_mtu(port->dev, dev->mtu);
1832 team->port_mtu_change_allowed = false;
1833 mutex_unlock(&team->lock);
1834
1835 return err;
1836}
1837
1838static void
1839team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1840{
1841 struct team *team = netdev_priv(dev);
1842 struct team_pcpu_stats *p;
1843 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1844 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1845 unsigned int start;
1846 int i;
1847
1848 for_each_possible_cpu(i) {
1849 p = per_cpu_ptr(team->pcpu_stats, i);
1850 do {
1851 start = u64_stats_fetch_begin_irq(&p->syncp);
1852 rx_packets = p->rx_packets;
1853 rx_bytes = p->rx_bytes;
1854 rx_multicast = p->rx_multicast;
1855 tx_packets = p->tx_packets;
1856 tx_bytes = p->tx_bytes;
1857 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1858
1859 stats->rx_packets += rx_packets;
1860 stats->rx_bytes += rx_bytes;
1861 stats->multicast += rx_multicast;
1862 stats->tx_packets += tx_packets;
1863 stats->tx_bytes += tx_bytes;
1864 /*
1865 * rx_dropped, tx_dropped & rx_nohandler are u32,
1866 * updated without syncp protection.
1867 */
1868 rx_dropped += p->rx_dropped;
1869 tx_dropped += p->tx_dropped;
1870 rx_nohandler += p->rx_nohandler;
1871 }
1872 stats->rx_dropped = rx_dropped;
1873 stats->tx_dropped = tx_dropped;
1874 stats->rx_nohandler = rx_nohandler;
1875}
1876
1877static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1878{
1879 struct team *team = netdev_priv(dev);
1880 struct team_port *port;
1881 int err;
1882
1883 /*
1884 * Alhough this is reader, it's guarded by team lock. It's not possible
1885 * to traverse list in reverse under rcu_read_lock
1886 */
1887 mutex_lock(&team->lock);
1888 list_for_each_entry(port, &team->port_list, list) {
1889 err = vlan_vid_add(port->dev, proto, vid);
1890 if (err)
1891 goto unwind;
1892 }
1893 mutex_unlock(&team->lock);
1894
1895 return 0;
1896
1897unwind:
1898 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1899 vlan_vid_del(port->dev, proto, vid);
1900 mutex_unlock(&team->lock);
1901
1902 return err;
1903}
1904
1905static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1906{
1907 struct team *team = netdev_priv(dev);
1908 struct team_port *port;
1909
1910 mutex_lock(&team->lock);
1911 list_for_each_entry(port, &team->port_list, list)
1912 vlan_vid_del(port->dev, proto, vid);
1913 mutex_unlock(&team->lock);
1914
1915 return 0;
1916}
1917
1918#ifdef CONFIG_NET_POLL_CONTROLLER
1919static void team_poll_controller(struct net_device *dev)
1920{
1921}
1922
1923static void __team_netpoll_cleanup(struct team *team)
1924{
1925 struct team_port *port;
1926
1927 list_for_each_entry(port, &team->port_list, list)
1928 team_port_disable_netpoll(port);
1929}
1930
1931static void team_netpoll_cleanup(struct net_device *dev)
1932{
1933 struct team *team = netdev_priv(dev);
1934
1935 mutex_lock(&team->lock);
1936 __team_netpoll_cleanup(team);
1937 mutex_unlock(&team->lock);
1938}
1939
1940static int team_netpoll_setup(struct net_device *dev,
1941 struct netpoll_info *npifo)
1942{
1943 struct team *team = netdev_priv(dev);
1944 struct team_port *port;
1945 int err = 0;
1946
1947 mutex_lock(&team->lock);
1948 list_for_each_entry(port, &team->port_list, list) {
1949 err = __team_port_enable_netpoll(port);
1950 if (err) {
1951 __team_netpoll_cleanup(team);
1952 break;
1953 }
1954 }
1955 mutex_unlock(&team->lock);
1956 return err;
1957}
1958#endif
1959
1960static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1961 struct netlink_ext_ack *extack)
1962{
1963 struct team *team = netdev_priv(dev);
1964 int err;
1965
1966 mutex_lock(&team->lock);
1967 err = team_port_add(team, port_dev, extack);
1968 mutex_unlock(&team->lock);
1969
1970 if (!err)
1971 netdev_change_features(dev);
1972
1973 return err;
1974}
1975
1976static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1977{
1978 struct team *team = netdev_priv(dev);
1979 int err;
1980
1981 mutex_lock(&team->lock);
1982 err = team_port_del(team, port_dev);
1983 mutex_unlock(&team->lock);
1984
1985 if (err)
1986 return err;
1987
1988 if (netif_is_team_master(port_dev)) {
1989 lockdep_unregister_key(&team->team_lock_key);
1990 lockdep_register_key(&team->team_lock_key);
1991 lockdep_set_class(&team->lock, &team->team_lock_key);
1992 }
1993 netdev_change_features(dev);
1994
1995 return err;
1996}
1997
1998static netdev_features_t team_fix_features(struct net_device *dev,
1999 netdev_features_t features)
2000{
2001 struct team_port *port;
2002 struct team *team = netdev_priv(dev);
2003 netdev_features_t mask;
2004
2005 mask = features;
2006 features &= ~NETIF_F_ONE_FOR_ALL;
2007 features |= NETIF_F_ALL_FOR_ALL;
2008
2009 rcu_read_lock();
2010 list_for_each_entry_rcu(port, &team->port_list, list) {
2011 features = netdev_increment_features(features,
2012 port->dev->features,
2013 mask);
2014 }
2015 rcu_read_unlock();
2016
2017 features = netdev_add_tso_features(features, mask);
2018
2019 return features;
2020}
2021
2022static int team_change_carrier(struct net_device *dev, bool new_carrier)
2023{
2024 struct team *team = netdev_priv(dev);
2025
2026 team->user_carrier_enabled = true;
2027
2028 if (new_carrier)
2029 netif_carrier_on(dev);
2030 else
2031 netif_carrier_off(dev);
2032 return 0;
2033}
2034
2035static const struct net_device_ops team_netdev_ops = {
2036 .ndo_init = team_init,
2037 .ndo_uninit = team_uninit,
2038 .ndo_open = team_open,
2039 .ndo_stop = team_close,
2040 .ndo_start_xmit = team_xmit,
2041 .ndo_select_queue = team_select_queue,
2042 .ndo_change_rx_flags = team_change_rx_flags,
2043 .ndo_set_rx_mode = team_set_rx_mode,
2044 .ndo_set_mac_address = team_set_mac_address,
2045 .ndo_change_mtu = team_change_mtu,
2046 .ndo_get_stats64 = team_get_stats64,
2047 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
2048 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
2049#ifdef CONFIG_NET_POLL_CONTROLLER
2050 .ndo_poll_controller = team_poll_controller,
2051 .ndo_netpoll_setup = team_netpoll_setup,
2052 .ndo_netpoll_cleanup = team_netpoll_cleanup,
2053#endif
2054 .ndo_add_slave = team_add_slave,
2055 .ndo_del_slave = team_del_slave,
2056 .ndo_fix_features = team_fix_features,
2057 .ndo_change_carrier = team_change_carrier,
2058 .ndo_features_check = passthru_features_check,
2059};
2060
2061/***********************
2062 * ethtool interface
2063 ***********************/
2064
2065static void team_ethtool_get_drvinfo(struct net_device *dev,
2066 struct ethtool_drvinfo *drvinfo)
2067{
2068 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2069 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2070}
2071
2072static int team_ethtool_get_link_ksettings(struct net_device *dev,
2073 struct ethtool_link_ksettings *cmd)
2074{
2075 struct team *team= netdev_priv(dev);
2076 unsigned long speed = 0;
2077 struct team_port *port;
2078
2079 cmd->base.duplex = DUPLEX_UNKNOWN;
2080 cmd->base.port = PORT_OTHER;
2081
2082 rcu_read_lock();
2083 list_for_each_entry_rcu(port, &team->port_list, list) {
2084 if (team_port_txable(port)) {
2085 if (port->state.speed != SPEED_UNKNOWN)
2086 speed += port->state.speed;
2087 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2088 port->state.duplex != DUPLEX_UNKNOWN)
2089 cmd->base.duplex = port->state.duplex;
2090 }
2091 }
2092 rcu_read_unlock();
2093
2094 cmd->base.speed = speed ? : SPEED_UNKNOWN;
2095
2096 return 0;
2097}
2098
2099static const struct ethtool_ops team_ethtool_ops = {
2100 .get_drvinfo = team_ethtool_get_drvinfo,
2101 .get_link = ethtool_op_get_link,
2102 .get_link_ksettings = team_ethtool_get_link_ksettings,
2103};
2104
2105/***********************
2106 * rt netlink interface
2107 ***********************/
2108
2109static void team_setup_by_port(struct net_device *dev,
2110 struct net_device *port_dev)
2111{
2112 dev->header_ops = port_dev->header_ops;
2113 dev->type = port_dev->type;
2114 dev->hard_header_len = port_dev->hard_header_len;
2115 dev->needed_headroom = port_dev->needed_headroom;
2116 dev->addr_len = port_dev->addr_len;
2117 dev->mtu = port_dev->mtu;
2118 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2119 eth_hw_addr_inherit(dev, port_dev);
2120}
2121
2122static int team_dev_type_check_change(struct net_device *dev,
2123 struct net_device *port_dev)
2124{
2125 struct team *team = netdev_priv(dev);
2126 char *portname = port_dev->name;
2127 int err;
2128
2129 if (dev->type == port_dev->type)
2130 return 0;
2131 if (!list_empty(&team->port_list)) {
2132 netdev_err(dev, "Device %s is of different type\n", portname);
2133 return -EBUSY;
2134 }
2135 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2136 err = notifier_to_errno(err);
2137 if (err) {
2138 netdev_err(dev, "Refused to change device type\n");
2139 return err;
2140 }
2141 dev_uc_flush(dev);
2142 dev_mc_flush(dev);
2143 team_setup_by_port(dev, port_dev);
2144 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2145 return 0;
2146}
2147
2148static void team_setup(struct net_device *dev)
2149{
2150 ether_setup(dev);
2151 dev->max_mtu = ETH_MAX_MTU;
2152
2153 dev->netdev_ops = &team_netdev_ops;
2154 dev->ethtool_ops = &team_ethtool_ops;
2155 dev->needs_free_netdev = true;
2156 dev->priv_destructor = team_destructor;
2157 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2158 dev->priv_flags |= IFF_NO_QUEUE;
2159 dev->priv_flags |= IFF_TEAM;
2160
2161 /*
2162 * Indicate we support unicast address filtering. That way core won't
2163 * bring us to promisc mode in case a unicast addr is added.
2164 * Let this up to underlay drivers.
2165 */
2166 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2167
2168 dev->features |= NETIF_F_LLTX;
2169 dev->features |= NETIF_F_GRO;
2170
2171 /* Don't allow team devices to change network namespaces. */
2172 dev->features |= NETIF_F_NETNS_LOCAL;
2173
2174 dev->hw_features = TEAM_VLAN_FEATURES |
2175 NETIF_F_HW_VLAN_CTAG_RX |
2176 NETIF_F_HW_VLAN_CTAG_FILTER;
2177
2178 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2179 dev->features |= dev->hw_features;
2180 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2181}
2182
2183static int team_newlink(struct net *src_net, struct net_device *dev,
2184 struct nlattr *tb[], struct nlattr *data[],
2185 struct netlink_ext_ack *extack)
2186{
2187 if (tb[IFLA_ADDRESS] == NULL)
2188 eth_hw_addr_random(dev);
2189
2190 return register_netdevice(dev);
2191}
2192
2193static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2194 struct netlink_ext_ack *extack)
2195{
2196 if (tb[IFLA_ADDRESS]) {
2197 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2198 return -EINVAL;
2199 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2200 return -EADDRNOTAVAIL;
2201 }
2202 return 0;
2203}
2204
2205static unsigned int team_get_num_tx_queues(void)
2206{
2207 return TEAM_DEFAULT_NUM_TX_QUEUES;
2208}
2209
2210static unsigned int team_get_num_rx_queues(void)
2211{
2212 return TEAM_DEFAULT_NUM_RX_QUEUES;
2213}
2214
2215static struct rtnl_link_ops team_link_ops __read_mostly = {
2216 .kind = DRV_NAME,
2217 .priv_size = sizeof(struct team),
2218 .setup = team_setup,
2219 .newlink = team_newlink,
2220 .validate = team_validate,
2221 .get_num_tx_queues = team_get_num_tx_queues,
2222 .get_num_rx_queues = team_get_num_rx_queues,
2223};
2224
2225
2226/***********************************
2227 * Generic netlink custom interface
2228 ***********************************/
2229
2230static struct genl_family team_nl_family;
2231
2232static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2233 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
2234 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
2235 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
2236 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
2237};
2238
2239static const struct nla_policy
2240team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2241 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
2242 [TEAM_ATTR_OPTION_NAME] = {
2243 .type = NLA_STRING,
2244 .len = TEAM_STRING_MAX_LEN,
2245 },
2246 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
2247 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
2248 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
2249 [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
2250 [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
2251};
2252
2253static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2254{
2255 struct sk_buff *msg;
2256 void *hdr;
2257 int err;
2258
2259 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2260 if (!msg)
2261 return -ENOMEM;
2262
2263 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2264 &team_nl_family, 0, TEAM_CMD_NOOP);
2265 if (!hdr) {
2266 err = -EMSGSIZE;
2267 goto err_msg_put;
2268 }
2269
2270 genlmsg_end(msg, hdr);
2271
2272 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2273
2274err_msg_put:
2275 nlmsg_free(msg);
2276
2277 return err;
2278}
2279
2280/*
2281 * Netlink cmd functions should be locked by following two functions.
2282 * Since dev gets held here, that ensures dev won't disappear in between.
2283 */
2284static struct team *team_nl_team_get(struct genl_info *info)
2285{
2286 struct net *net = genl_info_net(info);
2287 int ifindex;
2288 struct net_device *dev;
2289 struct team *team;
2290
2291 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2292 return NULL;
2293
2294 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2295 dev = dev_get_by_index(net, ifindex);
2296 if (!dev || dev->netdev_ops != &team_netdev_ops) {
2297 if (dev)
2298 dev_put(dev);
2299 return NULL;
2300 }
2301
2302 team = netdev_priv(dev);
2303 mutex_lock(&team->lock);
2304 return team;
2305}
2306
2307static void team_nl_team_put(struct team *team)
2308{
2309 mutex_unlock(&team->lock);
2310 dev_put(team->dev);
2311}
2312
2313typedef int team_nl_send_func_t(struct sk_buff *skb,
2314 struct team *team, u32 portid);
2315
2316static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2317{
2318 return genlmsg_unicast(dev_net(team->dev), skb, portid);
2319}
2320
2321static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2322 struct team_option_inst *opt_inst)
2323{
2324 struct nlattr *option_item;
2325 struct team_option *option = opt_inst->option;
2326 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2327 struct team_gsetter_ctx ctx;
2328 int err;
2329
2330 ctx.info = opt_inst_info;
2331 err = team_option_get(team, opt_inst, &ctx);
2332 if (err)
2333 return err;
2334
2335 option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2336 if (!option_item)
2337 return -EMSGSIZE;
2338
2339 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2340 goto nest_cancel;
2341 if (opt_inst_info->port &&
2342 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2343 opt_inst_info->port->dev->ifindex))
2344 goto nest_cancel;
2345 if (opt_inst->option->array_size &&
2346 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2347 opt_inst_info->array_index))
2348 goto nest_cancel;
2349
2350 switch (option->type) {
2351 case TEAM_OPTION_TYPE_U32:
2352 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2353 goto nest_cancel;
2354 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2355 goto nest_cancel;
2356 break;
2357 case TEAM_OPTION_TYPE_STRING:
2358 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2359 goto nest_cancel;
2360 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2361 ctx.data.str_val))
2362 goto nest_cancel;
2363 break;
2364 case TEAM_OPTION_TYPE_BINARY:
2365 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2366 goto nest_cancel;
2367 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2368 ctx.data.bin_val.ptr))
2369 goto nest_cancel;
2370 break;
2371 case TEAM_OPTION_TYPE_BOOL:
2372 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2373 goto nest_cancel;
2374 if (ctx.data.bool_val &&
2375 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2376 goto nest_cancel;
2377 break;
2378 case TEAM_OPTION_TYPE_S32:
2379 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2380 goto nest_cancel;
2381 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2382 goto nest_cancel;
2383 break;
2384 default:
2385 BUG();
2386 }
2387 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2388 goto nest_cancel;
2389 if (opt_inst->changed) {
2390 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2391 goto nest_cancel;
2392 opt_inst->changed = false;
2393 }
2394 nla_nest_end(skb, option_item);
2395 return 0;
2396
2397nest_cancel:
2398 nla_nest_cancel(skb, option_item);
2399 return -EMSGSIZE;
2400}
2401
2402static int __send_and_alloc_skb(struct sk_buff **pskb,
2403 struct team *team, u32 portid,
2404 team_nl_send_func_t *send_func)
2405{
2406 int err;
2407
2408 if (*pskb) {
2409 err = send_func(*pskb, team, portid);
2410 if (err)
2411 return err;
2412 }
2413 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2414 if (!*pskb)
2415 return -ENOMEM;
2416 return 0;
2417}
2418
2419static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2420 int flags, team_nl_send_func_t *send_func,
2421 struct list_head *sel_opt_inst_list)
2422{
2423 struct nlattr *option_list;
2424 struct nlmsghdr *nlh;
2425 void *hdr;
2426 struct team_option_inst *opt_inst;
2427 int err;
2428 struct sk_buff *skb = NULL;
2429 bool incomplete;
2430 int i;
2431
2432 opt_inst = list_first_entry(sel_opt_inst_list,
2433 struct team_option_inst, tmp_list);
2434
2435start_again:
2436 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2437 if (err)
2438 return err;
2439
2440 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2441 TEAM_CMD_OPTIONS_GET);
2442 if (!hdr) {
2443 nlmsg_free(skb);
2444 return -EMSGSIZE;
2445 }
2446
2447 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2448 goto nla_put_failure;
2449 option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2450 if (!option_list)
2451 goto nla_put_failure;
2452
2453 i = 0;
2454 incomplete = false;
2455 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2456 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2457 if (err) {
2458 if (err == -EMSGSIZE) {
2459 if (!i)
2460 goto errout;
2461 incomplete = true;
2462 break;
2463 }
2464 goto errout;
2465 }
2466 i++;
2467 }
2468
2469 nla_nest_end(skb, option_list);
2470 genlmsg_end(skb, hdr);
2471 if (incomplete)
2472 goto start_again;
2473
2474send_done:
2475 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2476 if (!nlh) {
2477 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2478 if (err)
2479 return err;
2480 goto send_done;
2481 }
2482
2483 return send_func(skb, team, portid);
2484
2485nla_put_failure:
2486 err = -EMSGSIZE;
2487errout:
2488 nlmsg_free(skb);
2489 return err;
2490}
2491
2492static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2493{
2494 struct team *team;
2495 struct team_option_inst *opt_inst;
2496 int err;
2497 LIST_HEAD(sel_opt_inst_list);
2498
2499 team = team_nl_team_get(info);
2500 if (!team)
2501 return -EINVAL;
2502
2503 list_for_each_entry(opt_inst, &team->option_inst_list, list)
2504 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2505 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2506 NLM_F_ACK, team_nl_send_unicast,
2507 &sel_opt_inst_list);
2508
2509 team_nl_team_put(team);
2510
2511 return err;
2512}
2513
2514static int team_nl_send_event_options_get(struct team *team,
2515 struct list_head *sel_opt_inst_list);
2516
2517static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2518{
2519 struct team *team;
2520 int err = 0;
2521 int i;
2522 struct nlattr *nl_option;
2523
2524 rtnl_lock();
2525
2526 team = team_nl_team_get(info);
2527 if (!team) {
2528 err = -EINVAL;
2529 goto rtnl_unlock;
2530 }
2531
2532 err = -EINVAL;
2533 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2534 err = -EINVAL;
2535 goto team_put;
2536 }
2537
2538 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2539 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2540 struct nlattr *attr;
2541 struct nlattr *attr_data;
2542 LIST_HEAD(opt_inst_list);
2543 enum team_option_type opt_type;
2544 int opt_port_ifindex = 0; /* != 0 for per-port options */
2545 u32 opt_array_index = 0;
2546 bool opt_is_array = false;
2547 struct team_option_inst *opt_inst;
2548 char *opt_name;
2549 bool opt_found = false;
2550
2551 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2552 err = -EINVAL;
2553 goto team_put;
2554 }
2555 err = nla_parse_nested_deprecated(opt_attrs,
2556 TEAM_ATTR_OPTION_MAX,
2557 nl_option,
2558 team_nl_option_policy,
2559 info->extack);
2560 if (err)
2561 goto team_put;
2562 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2563 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2564 err = -EINVAL;
2565 goto team_put;
2566 }
2567 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2568 case NLA_U32:
2569 opt_type = TEAM_OPTION_TYPE_U32;
2570 break;
2571 case NLA_STRING:
2572 opt_type = TEAM_OPTION_TYPE_STRING;
2573 break;
2574 case NLA_BINARY:
2575 opt_type = TEAM_OPTION_TYPE_BINARY;
2576 break;
2577 case NLA_FLAG:
2578 opt_type = TEAM_OPTION_TYPE_BOOL;
2579 break;
2580 case NLA_S32:
2581 opt_type = TEAM_OPTION_TYPE_S32;
2582 break;
2583 default:
2584 goto team_put;
2585 }
2586
2587 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2588 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2589 err = -EINVAL;
2590 goto team_put;
2591 }
2592
2593 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2594 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2595 if (attr)
2596 opt_port_ifindex = nla_get_u32(attr);
2597
2598 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2599 if (attr) {
2600 opt_is_array = true;
2601 opt_array_index = nla_get_u32(attr);
2602 }
2603
2604 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2605 struct team_option *option = opt_inst->option;
2606 struct team_gsetter_ctx ctx;
2607 struct team_option_inst_info *opt_inst_info;
2608 int tmp_ifindex;
2609
2610 opt_inst_info = &opt_inst->info;
2611 tmp_ifindex = opt_inst_info->port ?
2612 opt_inst_info->port->dev->ifindex : 0;
2613 if (option->type != opt_type ||
2614 strcmp(option->name, opt_name) ||
2615 tmp_ifindex != opt_port_ifindex ||
2616 (option->array_size && !opt_is_array) ||
2617 opt_inst_info->array_index != opt_array_index)
2618 continue;
2619 opt_found = true;
2620 ctx.info = opt_inst_info;
2621 switch (opt_type) {
2622 case TEAM_OPTION_TYPE_U32:
2623 ctx.data.u32_val = nla_get_u32(attr_data);
2624 break;
2625 case TEAM_OPTION_TYPE_STRING:
2626 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2627 err = -EINVAL;
2628 goto team_put;
2629 }
2630 ctx.data.str_val = nla_data(attr_data);
2631 break;
2632 case TEAM_OPTION_TYPE_BINARY:
2633 ctx.data.bin_val.len = nla_len(attr_data);
2634 ctx.data.bin_val.ptr = nla_data(attr_data);
2635 break;
2636 case TEAM_OPTION_TYPE_BOOL:
2637 ctx.data.bool_val = attr_data ? true : false;
2638 break;
2639 case TEAM_OPTION_TYPE_S32:
2640 ctx.data.s32_val = nla_get_s32(attr_data);
2641 break;
2642 default:
2643 BUG();
2644 }
2645 err = team_option_set(team, opt_inst, &ctx);
2646 if (err)
2647 goto team_put;
2648 opt_inst->changed = true;
2649 list_add(&opt_inst->tmp_list, &opt_inst_list);
2650 }
2651 if (!opt_found) {
2652 err = -ENOENT;
2653 goto team_put;
2654 }
2655
2656 err = team_nl_send_event_options_get(team, &opt_inst_list);
2657 if (err)
2658 break;
2659 }
2660
2661team_put:
2662 team_nl_team_put(team);
2663rtnl_unlock:
2664 rtnl_unlock();
2665 return err;
2666}
2667
2668static int team_nl_fill_one_port_get(struct sk_buff *skb,
2669 struct team_port *port)
2670{
2671 struct nlattr *port_item;
2672
2673 port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2674 if (!port_item)
2675 goto nest_cancel;
2676 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2677 goto nest_cancel;
2678 if (port->changed) {
2679 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2680 goto nest_cancel;
2681 port->changed = false;
2682 }
2683 if ((port->removed &&
2684 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2685 (port->state.linkup &&
2686 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2687 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2688 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2689 goto nest_cancel;
2690 nla_nest_end(skb, port_item);
2691 return 0;
2692
2693nest_cancel:
2694 nla_nest_cancel(skb, port_item);
2695 return -EMSGSIZE;
2696}
2697
2698static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2699 int flags, team_nl_send_func_t *send_func,
2700 struct team_port *one_port)
2701{
2702 struct nlattr *port_list;
2703 struct nlmsghdr *nlh;
2704 void *hdr;
2705 struct team_port *port;
2706 int err;
2707 struct sk_buff *skb = NULL;
2708 bool incomplete;
2709 int i;
2710
2711 port = list_first_entry_or_null(&team->port_list,
2712 struct team_port, list);
2713
2714start_again:
2715 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2716 if (err)
2717 return err;
2718
2719 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2720 TEAM_CMD_PORT_LIST_GET);
2721 if (!hdr) {
2722 nlmsg_free(skb);
2723 return -EMSGSIZE;
2724 }
2725
2726 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2727 goto nla_put_failure;
2728 port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2729 if (!port_list)
2730 goto nla_put_failure;
2731
2732 i = 0;
2733 incomplete = false;
2734
2735 /* If one port is selected, called wants to send port list containing
2736 * only this port. Otherwise go through all listed ports and send all
2737 */
2738 if (one_port) {
2739 err = team_nl_fill_one_port_get(skb, one_port);
2740 if (err)
2741 goto errout;
2742 } else if (port) {
2743 list_for_each_entry_from(port, &team->port_list, list) {
2744 err = team_nl_fill_one_port_get(skb, port);
2745 if (err) {
2746 if (err == -EMSGSIZE) {
2747 if (!i)
2748 goto errout;
2749 incomplete = true;
2750 break;
2751 }
2752 goto errout;
2753 }
2754 i++;
2755 }
2756 }
2757
2758 nla_nest_end(skb, port_list);
2759 genlmsg_end(skb, hdr);
2760 if (incomplete)
2761 goto start_again;
2762
2763send_done:
2764 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2765 if (!nlh) {
2766 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2767 if (err)
2768 return err;
2769 goto send_done;
2770 }
2771
2772 return send_func(skb, team, portid);
2773
2774nla_put_failure:
2775 err = -EMSGSIZE;
2776errout:
2777 nlmsg_free(skb);
2778 return err;
2779}
2780
2781static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2782 struct genl_info *info)
2783{
2784 struct team *team;
2785 int err;
2786
2787 team = team_nl_team_get(info);
2788 if (!team)
2789 return -EINVAL;
2790
2791 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2792 NLM_F_ACK, team_nl_send_unicast, NULL);
2793
2794 team_nl_team_put(team);
2795
2796 return err;
2797}
2798
2799static const struct genl_ops team_nl_ops[] = {
2800 {
2801 .cmd = TEAM_CMD_NOOP,
2802 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2803 .doit = team_nl_cmd_noop,
2804 },
2805 {
2806 .cmd = TEAM_CMD_OPTIONS_SET,
2807 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2808 .doit = team_nl_cmd_options_set,
2809 .flags = GENL_ADMIN_PERM,
2810 },
2811 {
2812 .cmd = TEAM_CMD_OPTIONS_GET,
2813 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2814 .doit = team_nl_cmd_options_get,
2815 .flags = GENL_ADMIN_PERM,
2816 },
2817 {
2818 .cmd = TEAM_CMD_PORT_LIST_GET,
2819 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2820 .doit = team_nl_cmd_port_list_get,
2821 .flags = GENL_ADMIN_PERM,
2822 },
2823};
2824
2825static const struct genl_multicast_group team_nl_mcgrps[] = {
2826 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2827};
2828
2829static struct genl_family team_nl_family __ro_after_init = {
2830 .name = TEAM_GENL_NAME,
2831 .version = TEAM_GENL_VERSION,
2832 .maxattr = TEAM_ATTR_MAX,
2833 .policy = team_nl_policy,
2834 .netnsok = true,
2835 .module = THIS_MODULE,
2836 .ops = team_nl_ops,
2837 .n_ops = ARRAY_SIZE(team_nl_ops),
2838 .mcgrps = team_nl_mcgrps,
2839 .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
2840};
2841
2842static int team_nl_send_multicast(struct sk_buff *skb,
2843 struct team *team, u32 portid)
2844{
2845 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2846 skb, 0, 0, GFP_KERNEL);
2847}
2848
2849static int team_nl_send_event_options_get(struct team *team,
2850 struct list_head *sel_opt_inst_list)
2851{
2852 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2853 sel_opt_inst_list);
2854}
2855
2856static int team_nl_send_event_port_get(struct team *team,
2857 struct team_port *port)
2858{
2859 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2860 port);
2861}
2862
2863static int __init team_nl_init(void)
2864{
2865 return genl_register_family(&team_nl_family);
2866}
2867
2868static void team_nl_fini(void)
2869{
2870 genl_unregister_family(&team_nl_family);
2871}
2872
2873
2874/******************
2875 * Change checkers
2876 ******************/
2877
2878static void __team_options_change_check(struct team *team)
2879{
2880 int err;
2881 struct team_option_inst *opt_inst;
2882 LIST_HEAD(sel_opt_inst_list);
2883
2884 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2885 if (opt_inst->changed)
2886 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2887 }
2888 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2889 if (err && err != -ESRCH)
2890 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2891 err);
2892}
2893
2894/* rtnl lock is held */
2895
2896static void __team_port_change_send(struct team_port *port, bool linkup)
2897{
2898 int err;
2899
2900 port->changed = true;
2901 port->state.linkup = linkup;
2902 team_refresh_port_linkup(port);
2903 if (linkup) {
2904 struct ethtool_link_ksettings ecmd;
2905
2906 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2907 if (!err) {
2908 port->state.speed = ecmd.base.speed;
2909 port->state.duplex = ecmd.base.duplex;
2910 goto send_event;
2911 }
2912 }
2913 port->state.speed = 0;
2914 port->state.duplex = 0;
2915
2916send_event:
2917 err = team_nl_send_event_port_get(port->team, port);
2918 if (err && err != -ESRCH)
2919 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2920 port->dev->name, err);
2921
2922}
2923
2924static void __team_carrier_check(struct team *team)
2925{
2926 struct team_port *port;
2927 bool team_linkup;
2928
2929 if (team->user_carrier_enabled)
2930 return;
2931
2932 team_linkup = false;
2933 list_for_each_entry(port, &team->port_list, list) {
2934 if (port->linkup) {
2935 team_linkup = true;
2936 break;
2937 }
2938 }
2939
2940 if (team_linkup)
2941 netif_carrier_on(team->dev);
2942 else
2943 netif_carrier_off(team->dev);
2944}
2945
2946static void __team_port_change_check(struct team_port *port, bool linkup)
2947{
2948 if (port->state.linkup != linkup)
2949 __team_port_change_send(port, linkup);
2950 __team_carrier_check(port->team);
2951}
2952
2953static void __team_port_change_port_added(struct team_port *port, bool linkup)
2954{
2955 __team_port_change_send(port, linkup);
2956 __team_carrier_check(port->team);
2957}
2958
2959static void __team_port_change_port_removed(struct team_port *port)
2960{
2961 port->removed = true;
2962 __team_port_change_send(port, false);
2963 __team_carrier_check(port->team);
2964}
2965
2966static void team_port_change_check(struct team_port *port, bool linkup)
2967{
2968 struct team *team = port->team;
2969
2970 mutex_lock(&team->lock);
2971 __team_port_change_check(port, linkup);
2972 mutex_unlock(&team->lock);
2973}
2974
2975
2976/************************************
2977 * Net device notifier event handler
2978 ************************************/
2979
2980static int team_device_event(struct notifier_block *unused,
2981 unsigned long event, void *ptr)
2982{
2983 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2984 struct team_port *port;
2985
2986 port = team_port_get_rtnl(dev);
2987 if (!port)
2988 return NOTIFY_DONE;
2989
2990 switch (event) {
2991 case NETDEV_UP:
2992 if (netif_oper_up(dev))
2993 team_port_change_check(port, true);
2994 break;
2995 case NETDEV_DOWN:
2996 team_port_change_check(port, false);
2997 break;
2998 case NETDEV_CHANGE:
2999 if (netif_running(port->dev))
3000 team_port_change_check(port,
3001 !!netif_oper_up(port->dev));
3002 break;
3003 case NETDEV_UNREGISTER:
3004 team_del_slave(port->team->dev, dev);
3005 break;
3006 case NETDEV_FEAT_CHANGE:
3007 team_compute_features(port->team);
3008 break;
3009 case NETDEV_PRECHANGEMTU:
3010 /* Forbid to change mtu of underlaying device */
3011 if (!port->team->port_mtu_change_allowed)
3012 return NOTIFY_BAD;
3013 break;
3014 case NETDEV_PRE_TYPE_CHANGE:
3015 /* Forbid to change type of underlaying device */
3016 return NOTIFY_BAD;
3017 case NETDEV_RESEND_IGMP:
3018 /* Propagate to master device */
3019 call_netdevice_notifiers(event, port->team->dev);
3020 break;
3021 }
3022 return NOTIFY_DONE;
3023}
3024
3025static struct notifier_block team_notifier_block __read_mostly = {
3026 .notifier_call = team_device_event,
3027};
3028
3029
3030/***********************
3031 * Module init and exit
3032 ***********************/
3033
3034static int __init team_module_init(void)
3035{
3036 int err;
3037
3038 register_netdevice_notifier(&team_notifier_block);
3039
3040 err = rtnl_link_register(&team_link_ops);
3041 if (err)
3042 goto err_rtnl_reg;
3043
3044 err = team_nl_init();
3045 if (err)
3046 goto err_nl_init;
3047
3048 return 0;
3049
3050err_nl_init:
3051 rtnl_link_unregister(&team_link_ops);
3052
3053err_rtnl_reg:
3054 unregister_netdevice_notifier(&team_notifier_block);
3055
3056 return err;
3057}
3058
3059static void __exit team_module_exit(void)
3060{
3061 team_nl_fini();
3062 rtnl_link_unregister(&team_link_ops);
3063 unregister_netdevice_notifier(&team_notifier_block);
3064}
3065
3066module_init(team_module_init);
3067module_exit(team_module_exit);
3068
3069MODULE_LICENSE("GPL v2");
3070MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3071MODULE_DESCRIPTION("Ethernet team device driver");
3072MODULE_ALIAS_RTNL_LINK(DRV_NAME);