Loading...
1/*
2 * net/core/dev_addr_lists.c - Functions for handling net device lists
3 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This file contains functions for working with unicast, multicast and device
6 * addresses lists.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/export.h>
17#include <linux/list.h>
18
19/*
20 * General list handling functions
21 */
22
23static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
24 const unsigned char *addr, int addr_len,
25 unsigned char addr_type, bool global,
26 bool sync)
27{
28 struct netdev_hw_addr *ha;
29 int alloc_size;
30
31 alloc_size = sizeof(*ha);
32 if (alloc_size < L1_CACHE_BYTES)
33 alloc_size = L1_CACHE_BYTES;
34 ha = kmalloc(alloc_size, GFP_ATOMIC);
35 if (!ha)
36 return -ENOMEM;
37 memcpy(ha->addr, addr, addr_len);
38 ha->type = addr_type;
39 ha->refcount = 1;
40 ha->global_use = global;
41 ha->synced = sync ? 1 : 0;
42 ha->sync_cnt = 0;
43 list_add_tail_rcu(&ha->list, &list->list);
44 list->count++;
45
46 return 0;
47}
48
49static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
50 const unsigned char *addr, int addr_len,
51 unsigned char addr_type, bool global, bool sync,
52 int sync_count)
53{
54 struct netdev_hw_addr *ha;
55
56 if (addr_len > MAX_ADDR_LEN)
57 return -EINVAL;
58
59 list_for_each_entry(ha, &list->list, list) {
60 if (!memcmp(ha->addr, addr, addr_len) &&
61 ha->type == addr_type) {
62 if (global) {
63 /* check if addr is already used as global */
64 if (ha->global_use)
65 return 0;
66 else
67 ha->global_use = true;
68 }
69 if (sync) {
70 if (ha->synced && sync_count)
71 return -EEXIST;
72 else
73 ha->synced++;
74 }
75 ha->refcount++;
76 return 0;
77 }
78 }
79
80 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global,
81 sync);
82}
83
84static int __hw_addr_add(struct netdev_hw_addr_list *list,
85 const unsigned char *addr, int addr_len,
86 unsigned char addr_type)
87{
88 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
89 0);
90}
91
92static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
93 struct netdev_hw_addr *ha, bool global,
94 bool sync)
95{
96 if (global && !ha->global_use)
97 return -ENOENT;
98
99 if (sync && !ha->synced)
100 return -ENOENT;
101
102 if (global)
103 ha->global_use = false;
104
105 if (sync)
106 ha->synced--;
107
108 if (--ha->refcount)
109 return 0;
110 list_del_rcu(&ha->list);
111 kfree_rcu(ha, rcu_head);
112 list->count--;
113 return 0;
114}
115
116static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
117 const unsigned char *addr, int addr_len,
118 unsigned char addr_type, bool global, bool sync)
119{
120 struct netdev_hw_addr *ha;
121
122 list_for_each_entry(ha, &list->list, list) {
123 if (!memcmp(ha->addr, addr, addr_len) &&
124 (ha->type == addr_type || !addr_type))
125 return __hw_addr_del_entry(list, ha, global, sync);
126 }
127 return -ENOENT;
128}
129
130static int __hw_addr_del(struct netdev_hw_addr_list *list,
131 const unsigned char *addr, int addr_len,
132 unsigned char addr_type)
133{
134 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
135}
136
137static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
138 struct netdev_hw_addr *ha,
139 int addr_len)
140{
141 int err;
142
143 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
144 false, true, ha->sync_cnt);
145 if (err && err != -EEXIST)
146 return err;
147
148 if (!err) {
149 ha->sync_cnt++;
150 ha->refcount++;
151 }
152
153 return 0;
154}
155
156static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
157 struct netdev_hw_addr_list *from_list,
158 struct netdev_hw_addr *ha,
159 int addr_len)
160{
161 int err;
162
163 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
164 false, true);
165 if (err)
166 return;
167 ha->sync_cnt--;
168 /* address on from list is not marked synced */
169 __hw_addr_del_entry(from_list, ha, false, false);
170}
171
172static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
173 struct netdev_hw_addr_list *from_list,
174 int addr_len)
175{
176 int err = 0;
177 struct netdev_hw_addr *ha, *tmp;
178
179 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
180 if (ha->sync_cnt == ha->refcount) {
181 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
182 } else {
183 err = __hw_addr_sync_one(to_list, ha, addr_len);
184 if (err)
185 break;
186 }
187 }
188 return err;
189}
190
191/* This function only works where there is a strict 1-1 relationship
192 * between source and destionation of they synch. If you ever need to
193 * sync addresses to more then 1 destination, you need to use
194 * __hw_addr_sync_multiple().
195 */
196int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
197 struct netdev_hw_addr_list *from_list,
198 int addr_len)
199{
200 int err = 0;
201 struct netdev_hw_addr *ha, *tmp;
202
203 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
204 if (!ha->sync_cnt) {
205 err = __hw_addr_sync_one(to_list, ha, addr_len);
206 if (err)
207 break;
208 } else if (ha->refcount == 1)
209 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
210 }
211 return err;
212}
213EXPORT_SYMBOL(__hw_addr_sync);
214
215void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
216 struct netdev_hw_addr_list *from_list,
217 int addr_len)
218{
219 struct netdev_hw_addr *ha, *tmp;
220
221 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
222 if (ha->sync_cnt)
223 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
224 }
225}
226EXPORT_SYMBOL(__hw_addr_unsync);
227
228static void __hw_addr_flush(struct netdev_hw_addr_list *list)
229{
230 struct netdev_hw_addr *ha, *tmp;
231
232 list_for_each_entry_safe(ha, tmp, &list->list, list) {
233 list_del_rcu(&ha->list);
234 kfree_rcu(ha, rcu_head);
235 }
236 list->count = 0;
237}
238
239void __hw_addr_init(struct netdev_hw_addr_list *list)
240{
241 INIT_LIST_HEAD(&list->list);
242 list->count = 0;
243}
244EXPORT_SYMBOL(__hw_addr_init);
245
246/*
247 * Device addresses handling functions
248 */
249
250/**
251 * dev_addr_flush - Flush device address list
252 * @dev: device
253 *
254 * Flush device address list and reset ->dev_addr.
255 *
256 * The caller must hold the rtnl_mutex.
257 */
258void dev_addr_flush(struct net_device *dev)
259{
260 /* rtnl_mutex must be held here */
261
262 __hw_addr_flush(&dev->dev_addrs);
263 dev->dev_addr = NULL;
264}
265EXPORT_SYMBOL(dev_addr_flush);
266
267/**
268 * dev_addr_init - Init device address list
269 * @dev: device
270 *
271 * Init device address list and create the first element,
272 * used by ->dev_addr.
273 *
274 * The caller must hold the rtnl_mutex.
275 */
276int dev_addr_init(struct net_device *dev)
277{
278 unsigned char addr[MAX_ADDR_LEN];
279 struct netdev_hw_addr *ha;
280 int err;
281
282 /* rtnl_mutex must be held here */
283
284 __hw_addr_init(&dev->dev_addrs);
285 memset(addr, 0, sizeof(addr));
286 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
287 NETDEV_HW_ADDR_T_LAN);
288 if (!err) {
289 /*
290 * Get the first (previously created) address from the list
291 * and set dev_addr pointer to this location.
292 */
293 ha = list_first_entry(&dev->dev_addrs.list,
294 struct netdev_hw_addr, list);
295 dev->dev_addr = ha->addr;
296 }
297 return err;
298}
299EXPORT_SYMBOL(dev_addr_init);
300
301/**
302 * dev_addr_add - Add a device address
303 * @dev: device
304 * @addr: address to add
305 * @addr_type: address type
306 *
307 * Add a device address to the device or increase the reference count if
308 * it already exists.
309 *
310 * The caller must hold the rtnl_mutex.
311 */
312int dev_addr_add(struct net_device *dev, const unsigned char *addr,
313 unsigned char addr_type)
314{
315 int err;
316
317 ASSERT_RTNL();
318
319 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
320 if (!err)
321 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
322 return err;
323}
324EXPORT_SYMBOL(dev_addr_add);
325
326/**
327 * dev_addr_del - Release a device address.
328 * @dev: device
329 * @addr: address to delete
330 * @addr_type: address type
331 *
332 * Release reference to a device address and remove it from the device
333 * if the reference count drops to zero.
334 *
335 * The caller must hold the rtnl_mutex.
336 */
337int dev_addr_del(struct net_device *dev, const unsigned char *addr,
338 unsigned char addr_type)
339{
340 int err;
341 struct netdev_hw_addr *ha;
342
343 ASSERT_RTNL();
344
345 /*
346 * We can not remove the first address from the list because
347 * dev->dev_addr points to that.
348 */
349 ha = list_first_entry(&dev->dev_addrs.list,
350 struct netdev_hw_addr, list);
351 if (!memcmp(ha->addr, addr, dev->addr_len) &&
352 ha->type == addr_type && ha->refcount == 1)
353 return -ENOENT;
354
355 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
356 addr_type);
357 if (!err)
358 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
359 return err;
360}
361EXPORT_SYMBOL(dev_addr_del);
362
363/*
364 * Unicast list handling functions
365 */
366
367/**
368 * dev_uc_add_excl - Add a global secondary unicast address
369 * @dev: device
370 * @addr: address to add
371 */
372int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
373{
374 struct netdev_hw_addr *ha;
375 int err;
376
377 netif_addr_lock_bh(dev);
378 list_for_each_entry(ha, &dev->uc.list, list) {
379 if (!memcmp(ha->addr, addr, dev->addr_len) &&
380 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
381 err = -EEXIST;
382 goto out;
383 }
384 }
385 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
386 NETDEV_HW_ADDR_T_UNICAST, true, false);
387 if (!err)
388 __dev_set_rx_mode(dev);
389out:
390 netif_addr_unlock_bh(dev);
391 return err;
392}
393EXPORT_SYMBOL(dev_uc_add_excl);
394
395/**
396 * dev_uc_add - Add a secondary unicast address
397 * @dev: device
398 * @addr: address to add
399 *
400 * Add a secondary unicast address to the device or increase
401 * the reference count if it already exists.
402 */
403int dev_uc_add(struct net_device *dev, const unsigned char *addr)
404{
405 int err;
406
407 netif_addr_lock_bh(dev);
408 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
409 NETDEV_HW_ADDR_T_UNICAST);
410 if (!err)
411 __dev_set_rx_mode(dev);
412 netif_addr_unlock_bh(dev);
413 return err;
414}
415EXPORT_SYMBOL(dev_uc_add);
416
417/**
418 * dev_uc_del - Release secondary unicast address.
419 * @dev: device
420 * @addr: address to delete
421 *
422 * Release reference to a secondary unicast address and remove it
423 * from the device if the reference count drops to zero.
424 */
425int dev_uc_del(struct net_device *dev, const unsigned char *addr)
426{
427 int err;
428
429 netif_addr_lock_bh(dev);
430 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
431 NETDEV_HW_ADDR_T_UNICAST);
432 if (!err)
433 __dev_set_rx_mode(dev);
434 netif_addr_unlock_bh(dev);
435 return err;
436}
437EXPORT_SYMBOL(dev_uc_del);
438
439/**
440 * dev_uc_sync - Synchronize device's unicast list to another device
441 * @to: destination device
442 * @from: source device
443 *
444 * Add newly added addresses to the destination device and release
445 * addresses that have no users left. The source device must be
446 * locked by netif_addr_lock_bh.
447 *
448 * This function is intended to be called from the dev->set_rx_mode
449 * function of layered software devices. This function assumes that
450 * addresses will only ever be synced to the @to devices and no other.
451 */
452int dev_uc_sync(struct net_device *to, struct net_device *from)
453{
454 int err = 0;
455
456 if (to->addr_len != from->addr_len)
457 return -EINVAL;
458
459 netif_addr_lock_nested(to);
460 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
461 if (!err)
462 __dev_set_rx_mode(to);
463 netif_addr_unlock(to);
464 return err;
465}
466EXPORT_SYMBOL(dev_uc_sync);
467
468/**
469 * dev_uc_sync_multiple - Synchronize device's unicast list to another
470 * device, but allow for multiple calls to sync to multiple devices.
471 * @to: destination device
472 * @from: source device
473 *
474 * Add newly added addresses to the destination device and release
475 * addresses that have been deleted from the source. The source device
476 * must be locked by netif_addr_lock_bh.
477 *
478 * This function is intended to be called from the dev->set_rx_mode
479 * function of layered software devices. It allows for a single source
480 * device to be synced to multiple destination devices.
481 */
482int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
483{
484 int err = 0;
485
486 if (to->addr_len != from->addr_len)
487 return -EINVAL;
488
489 netif_addr_lock_nested(to);
490 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
491 if (!err)
492 __dev_set_rx_mode(to);
493 netif_addr_unlock(to);
494 return err;
495}
496EXPORT_SYMBOL(dev_uc_sync_multiple);
497
498/**
499 * dev_uc_unsync - Remove synchronized addresses from the destination device
500 * @to: destination device
501 * @from: source device
502 *
503 * Remove all addresses that were added to the destination device by
504 * dev_uc_sync(). This function is intended to be called from the
505 * dev->stop function of layered software devices.
506 */
507void dev_uc_unsync(struct net_device *to, struct net_device *from)
508{
509 if (to->addr_len != from->addr_len)
510 return;
511
512 netif_addr_lock_bh(from);
513 netif_addr_lock_nested(to);
514 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
515 __dev_set_rx_mode(to);
516 netif_addr_unlock(to);
517 netif_addr_unlock_bh(from);
518}
519EXPORT_SYMBOL(dev_uc_unsync);
520
521/**
522 * dev_uc_flush - Flush unicast addresses
523 * @dev: device
524 *
525 * Flush unicast addresses.
526 */
527void dev_uc_flush(struct net_device *dev)
528{
529 netif_addr_lock_bh(dev);
530 __hw_addr_flush(&dev->uc);
531 netif_addr_unlock_bh(dev);
532}
533EXPORT_SYMBOL(dev_uc_flush);
534
535/**
536 * dev_uc_flush - Init unicast address list
537 * @dev: device
538 *
539 * Init unicast address list.
540 */
541void dev_uc_init(struct net_device *dev)
542{
543 __hw_addr_init(&dev->uc);
544}
545EXPORT_SYMBOL(dev_uc_init);
546
547/*
548 * Multicast list handling functions
549 */
550
551/**
552 * dev_mc_add_excl - Add a global secondary multicast address
553 * @dev: device
554 * @addr: address to add
555 */
556int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
557{
558 struct netdev_hw_addr *ha;
559 int err;
560
561 netif_addr_lock_bh(dev);
562 list_for_each_entry(ha, &dev->mc.list, list) {
563 if (!memcmp(ha->addr, addr, dev->addr_len) &&
564 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
565 err = -EEXIST;
566 goto out;
567 }
568 }
569 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
570 NETDEV_HW_ADDR_T_MULTICAST, true, false);
571 if (!err)
572 __dev_set_rx_mode(dev);
573out:
574 netif_addr_unlock_bh(dev);
575 return err;
576}
577EXPORT_SYMBOL(dev_mc_add_excl);
578
579static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
580 bool global)
581{
582 int err;
583
584 netif_addr_lock_bh(dev);
585 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
586 NETDEV_HW_ADDR_T_MULTICAST, global, false, 0);
587 if (!err)
588 __dev_set_rx_mode(dev);
589 netif_addr_unlock_bh(dev);
590 return err;
591}
592/**
593 * dev_mc_add - Add a multicast address
594 * @dev: device
595 * @addr: address to add
596 *
597 * Add a multicast address to the device or increase
598 * the reference count if it already exists.
599 */
600int dev_mc_add(struct net_device *dev, const unsigned char *addr)
601{
602 return __dev_mc_add(dev, addr, false);
603}
604EXPORT_SYMBOL(dev_mc_add);
605
606/**
607 * dev_mc_add_global - Add a global multicast address
608 * @dev: device
609 * @addr: address to add
610 *
611 * Add a global multicast address to the device.
612 */
613int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
614{
615 return __dev_mc_add(dev, addr, true);
616}
617EXPORT_SYMBOL(dev_mc_add_global);
618
619static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
620 bool global)
621{
622 int err;
623
624 netif_addr_lock_bh(dev);
625 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
626 NETDEV_HW_ADDR_T_MULTICAST, global, false);
627 if (!err)
628 __dev_set_rx_mode(dev);
629 netif_addr_unlock_bh(dev);
630 return err;
631}
632
633/**
634 * dev_mc_del - Delete a multicast address.
635 * @dev: device
636 * @addr: address to delete
637 *
638 * Release reference to a multicast address and remove it
639 * from the device if the reference count drops to zero.
640 */
641int dev_mc_del(struct net_device *dev, const unsigned char *addr)
642{
643 return __dev_mc_del(dev, addr, false);
644}
645EXPORT_SYMBOL(dev_mc_del);
646
647/**
648 * dev_mc_del_global - Delete a global multicast address.
649 * @dev: device
650 * @addr: address to delete
651 *
652 * Release reference to a multicast address and remove it
653 * from the device if the reference count drops to zero.
654 */
655int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
656{
657 return __dev_mc_del(dev, addr, true);
658}
659EXPORT_SYMBOL(dev_mc_del_global);
660
661/**
662 * dev_mc_sync - Synchronize device's multicast list to another device
663 * @to: destination device
664 * @from: source device
665 *
666 * Add newly added addresses to the destination device and release
667 * addresses that have no users left. The source device must be
668 * locked by netif_addr_lock_bh.
669 *
670 * This function is intended to be called from the ndo_set_rx_mode
671 * function of layered software devices.
672 */
673int dev_mc_sync(struct net_device *to, struct net_device *from)
674{
675 int err = 0;
676
677 if (to->addr_len != from->addr_len)
678 return -EINVAL;
679
680 netif_addr_lock_nested(to);
681 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
682 if (!err)
683 __dev_set_rx_mode(to);
684 netif_addr_unlock(to);
685 return err;
686}
687EXPORT_SYMBOL(dev_mc_sync);
688
689/**
690 * dev_mc_sync_multiple - Synchronize device's multicast list to another
691 * device, but allow for multiple calls to sync to multiple devices.
692 * @to: destination device
693 * @from: source device
694 *
695 * Add newly added addresses to the destination device and release
696 * addresses that have no users left. The source device must be
697 * locked by netif_addr_lock_bh.
698 *
699 * This function is intended to be called from the ndo_set_rx_mode
700 * function of layered software devices. It allows for a single
701 * source device to be synced to multiple destination devices.
702 */
703int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
704{
705 int err = 0;
706
707 if (to->addr_len != from->addr_len)
708 return -EINVAL;
709
710 netif_addr_lock_nested(to);
711 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
712 if (!err)
713 __dev_set_rx_mode(to);
714 netif_addr_unlock(to);
715 return err;
716}
717EXPORT_SYMBOL(dev_mc_sync_multiple);
718
719/**
720 * dev_mc_unsync - Remove synchronized addresses from the destination device
721 * @to: destination device
722 * @from: source device
723 *
724 * Remove all addresses that were added to the destination device by
725 * dev_mc_sync(). This function is intended to be called from the
726 * dev->stop function of layered software devices.
727 */
728void dev_mc_unsync(struct net_device *to, struct net_device *from)
729{
730 if (to->addr_len != from->addr_len)
731 return;
732
733 netif_addr_lock_bh(from);
734 netif_addr_lock_nested(to);
735 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
736 __dev_set_rx_mode(to);
737 netif_addr_unlock(to);
738 netif_addr_unlock_bh(from);
739}
740EXPORT_SYMBOL(dev_mc_unsync);
741
742/**
743 * dev_mc_flush - Flush multicast addresses
744 * @dev: device
745 *
746 * Flush multicast addresses.
747 */
748void dev_mc_flush(struct net_device *dev)
749{
750 netif_addr_lock_bh(dev);
751 __hw_addr_flush(&dev->mc);
752 netif_addr_unlock_bh(dev);
753}
754EXPORT_SYMBOL(dev_mc_flush);
755
756/**
757 * dev_mc_flush - Init multicast address list
758 * @dev: device
759 *
760 * Init multicast address list.
761 */
762void dev_mc_init(struct net_device *dev)
763{
764 __hw_addr_init(&dev->mc);
765}
766EXPORT_SYMBOL(dev_mc_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/core/dev_addr_lists.c - Functions for handling net device lists
4 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
5 *
6 * This file contains functions for working with unicast, multicast and device
7 * addresses lists.
8 */
9
10#include <linux/netdevice.h>
11#include <linux/rtnetlink.h>
12#include <linux/export.h>
13#include <linux/list.h>
14
15#include "dev.h"
16
17/*
18 * General list handling functions
19 */
20
21static int __hw_addr_insert(struct netdev_hw_addr_list *list,
22 struct netdev_hw_addr *new, int addr_len)
23{
24 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
25 struct netdev_hw_addr *ha;
26
27 while (*ins_point) {
28 int diff;
29
30 ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
31 diff = memcmp(new->addr, ha->addr, addr_len);
32 if (diff == 0)
33 diff = memcmp(&new->type, &ha->type, sizeof(new->type));
34
35 parent = *ins_point;
36 if (diff < 0)
37 ins_point = &parent->rb_left;
38 else if (diff > 0)
39 ins_point = &parent->rb_right;
40 else
41 return -EEXIST;
42 }
43
44 rb_link_node_rcu(&new->node, parent, ins_point);
45 rb_insert_color(&new->node, &list->tree);
46
47 return 0;
48}
49
50static struct netdev_hw_addr*
51__hw_addr_create(const unsigned char *addr, int addr_len,
52 unsigned char addr_type, bool global, bool sync)
53{
54 struct netdev_hw_addr *ha;
55 int alloc_size;
56
57 alloc_size = sizeof(*ha);
58 if (alloc_size < L1_CACHE_BYTES)
59 alloc_size = L1_CACHE_BYTES;
60 ha = kmalloc(alloc_size, GFP_ATOMIC);
61 if (!ha)
62 return NULL;
63 memcpy(ha->addr, addr, addr_len);
64 ha->type = addr_type;
65 ha->refcount = 1;
66 ha->global_use = global;
67 ha->synced = sync ? 1 : 0;
68 ha->sync_cnt = 0;
69
70 return ha;
71}
72
73static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
74 const unsigned char *addr, int addr_len,
75 unsigned char addr_type, bool global, bool sync,
76 int sync_count, bool exclusive)
77{
78 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
79 struct netdev_hw_addr *ha;
80
81 if (addr_len > MAX_ADDR_LEN)
82 return -EINVAL;
83
84 while (*ins_point) {
85 int diff;
86
87 ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
88 diff = memcmp(addr, ha->addr, addr_len);
89 if (diff == 0)
90 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type));
91
92 parent = *ins_point;
93 if (diff < 0) {
94 ins_point = &parent->rb_left;
95 } else if (diff > 0) {
96 ins_point = &parent->rb_right;
97 } else {
98 if (exclusive)
99 return -EEXIST;
100 if (global) {
101 /* check if addr is already used as global */
102 if (ha->global_use)
103 return 0;
104 else
105 ha->global_use = true;
106 }
107 if (sync) {
108 if (ha->synced && sync_count)
109 return -EEXIST;
110 else
111 ha->synced++;
112 }
113 ha->refcount++;
114 return 0;
115 }
116 }
117
118 ha = __hw_addr_create(addr, addr_len, addr_type, global, sync);
119 if (!ha)
120 return -ENOMEM;
121
122 rb_link_node(&ha->node, parent, ins_point);
123 rb_insert_color(&ha->node, &list->tree);
124
125 list_add_tail_rcu(&ha->list, &list->list);
126 list->count++;
127
128 return 0;
129}
130
131static int __hw_addr_add(struct netdev_hw_addr_list *list,
132 const unsigned char *addr, int addr_len,
133 unsigned char addr_type)
134{
135 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
136 0, false);
137}
138
139static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
140 struct netdev_hw_addr *ha, bool global,
141 bool sync)
142{
143 if (global && !ha->global_use)
144 return -ENOENT;
145
146 if (sync && !ha->synced)
147 return -ENOENT;
148
149 if (global)
150 ha->global_use = false;
151
152 if (sync)
153 ha->synced--;
154
155 if (--ha->refcount)
156 return 0;
157
158 rb_erase(&ha->node, &list->tree);
159
160 list_del_rcu(&ha->list);
161 kfree_rcu(ha, rcu_head);
162 list->count--;
163 return 0;
164}
165
166static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list,
167 const unsigned char *addr, int addr_len,
168 unsigned char addr_type)
169{
170 struct rb_node *node;
171
172 node = list->tree.rb_node;
173
174 while (node) {
175 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node);
176 int diff = memcmp(addr, ha->addr, addr_len);
177
178 if (diff == 0 && addr_type)
179 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type));
180
181 if (diff < 0)
182 node = node->rb_left;
183 else if (diff > 0)
184 node = node->rb_right;
185 else
186 return ha;
187 }
188
189 return NULL;
190}
191
192static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
193 const unsigned char *addr, int addr_len,
194 unsigned char addr_type, bool global, bool sync)
195{
196 struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type);
197
198 if (!ha)
199 return -ENOENT;
200 return __hw_addr_del_entry(list, ha, global, sync);
201}
202
203static int __hw_addr_del(struct netdev_hw_addr_list *list,
204 const unsigned char *addr, int addr_len,
205 unsigned char addr_type)
206{
207 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
208}
209
210static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
211 struct netdev_hw_addr *ha,
212 int addr_len)
213{
214 int err;
215
216 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
217 false, true, ha->sync_cnt, false);
218 if (err && err != -EEXIST)
219 return err;
220
221 if (!err) {
222 ha->sync_cnt++;
223 ha->refcount++;
224 }
225
226 return 0;
227}
228
229static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
230 struct netdev_hw_addr_list *from_list,
231 struct netdev_hw_addr *ha,
232 int addr_len)
233{
234 int err;
235
236 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
237 false, true);
238 if (err)
239 return;
240 ha->sync_cnt--;
241 /* address on from list is not marked synced */
242 __hw_addr_del_entry(from_list, ha, false, false);
243}
244
245static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
246 struct netdev_hw_addr_list *from_list,
247 int addr_len)
248{
249 int err = 0;
250 struct netdev_hw_addr *ha, *tmp;
251
252 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
253 if (ha->sync_cnt == ha->refcount) {
254 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
255 } else {
256 err = __hw_addr_sync_one(to_list, ha, addr_len);
257 if (err)
258 break;
259 }
260 }
261 return err;
262}
263
264/* This function only works where there is a strict 1-1 relationship
265 * between source and destionation of they synch. If you ever need to
266 * sync addresses to more then 1 destination, you need to use
267 * __hw_addr_sync_multiple().
268 */
269int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
270 struct netdev_hw_addr_list *from_list,
271 int addr_len)
272{
273 int err = 0;
274 struct netdev_hw_addr *ha, *tmp;
275
276 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
277 if (!ha->sync_cnt) {
278 err = __hw_addr_sync_one(to_list, ha, addr_len);
279 if (err)
280 break;
281 } else if (ha->refcount == 1)
282 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
283 }
284 return err;
285}
286EXPORT_SYMBOL(__hw_addr_sync);
287
288void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
289 struct netdev_hw_addr_list *from_list,
290 int addr_len)
291{
292 struct netdev_hw_addr *ha, *tmp;
293
294 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
295 if (ha->sync_cnt)
296 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
297 }
298}
299EXPORT_SYMBOL(__hw_addr_unsync);
300
301/**
302 * __hw_addr_sync_dev - Synchonize device's multicast list
303 * @list: address list to syncronize
304 * @dev: device to sync
305 * @sync: function to call if address should be added
306 * @unsync: function to call if address should be removed
307 *
308 * This function is intended to be called from the ndo_set_rx_mode
309 * function of devices that require explicit address add/remove
310 * notifications. The unsync function may be NULL in which case
311 * the addresses requiring removal will simply be removed without
312 * any notification to the device.
313 **/
314int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
315 struct net_device *dev,
316 int (*sync)(struct net_device *, const unsigned char *),
317 int (*unsync)(struct net_device *,
318 const unsigned char *))
319{
320 struct netdev_hw_addr *ha, *tmp;
321 int err;
322
323 /* first go through and flush out any stale entries */
324 list_for_each_entry_safe(ha, tmp, &list->list, list) {
325 if (!ha->sync_cnt || ha->refcount != 1)
326 continue;
327
328 /* if unsync is defined and fails defer unsyncing address */
329 if (unsync && unsync(dev, ha->addr))
330 continue;
331
332 ha->sync_cnt--;
333 __hw_addr_del_entry(list, ha, false, false);
334 }
335
336 /* go through and sync new entries to the list */
337 list_for_each_entry_safe(ha, tmp, &list->list, list) {
338 if (ha->sync_cnt)
339 continue;
340
341 err = sync(dev, ha->addr);
342 if (err)
343 return err;
344
345 ha->sync_cnt++;
346 ha->refcount++;
347 }
348
349 return 0;
350}
351EXPORT_SYMBOL(__hw_addr_sync_dev);
352
353/**
354 * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
355 * into account references
356 * @list: address list to synchronize
357 * @dev: device to sync
358 * @sync: function to call if address or reference on it should be added
359 * @unsync: function to call if address or some reference on it should removed
360 *
361 * This function is intended to be called from the ndo_set_rx_mode
362 * function of devices that require explicit address or references on it
363 * add/remove notifications. The unsync function may be NULL in which case
364 * the addresses or references on it requiring removal will simply be
365 * removed without any notification to the device. That is responsibility of
366 * the driver to identify and distribute address or references on it between
367 * internal address tables.
368 **/
369int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
370 struct net_device *dev,
371 int (*sync)(struct net_device *,
372 const unsigned char *, int),
373 int (*unsync)(struct net_device *,
374 const unsigned char *, int))
375{
376 struct netdev_hw_addr *ha, *tmp;
377 int err, ref_cnt;
378
379 /* first go through and flush out any unsynced/stale entries */
380 list_for_each_entry_safe(ha, tmp, &list->list, list) {
381 /* sync if address is not used */
382 if ((ha->sync_cnt << 1) <= ha->refcount)
383 continue;
384
385 /* if fails defer unsyncing address */
386 ref_cnt = ha->refcount - ha->sync_cnt;
387 if (unsync && unsync(dev, ha->addr, ref_cnt))
388 continue;
389
390 ha->refcount = (ref_cnt << 1) + 1;
391 ha->sync_cnt = ref_cnt;
392 __hw_addr_del_entry(list, ha, false, false);
393 }
394
395 /* go through and sync updated/new entries to the list */
396 list_for_each_entry_safe(ha, tmp, &list->list, list) {
397 /* sync if address added or reused */
398 if ((ha->sync_cnt << 1) >= ha->refcount)
399 continue;
400
401 ref_cnt = ha->refcount - ha->sync_cnt;
402 err = sync(dev, ha->addr, ref_cnt);
403 if (err)
404 return err;
405
406 ha->refcount = ref_cnt << 1;
407 ha->sync_cnt = ref_cnt;
408 }
409
410 return 0;
411}
412EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
413
414/**
415 * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
416 * it from device
417 * @list: address list to remove synchronized addresses (references on it) from
418 * @dev: device to sync
419 * @unsync: function to call if address and references on it should be removed
420 *
421 * Remove all addresses that were added to the device by
422 * __hw_addr_ref_sync_dev(). This function is intended to be called from the
423 * ndo_stop or ndo_open functions on devices that require explicit address (or
424 * references on it) add/remove notifications. If the unsync function pointer
425 * is NULL then this function can be used to just reset the sync_cnt for the
426 * addresses in the list.
427 **/
428void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
429 struct net_device *dev,
430 int (*unsync)(struct net_device *,
431 const unsigned char *, int))
432{
433 struct netdev_hw_addr *ha, *tmp;
434
435 list_for_each_entry_safe(ha, tmp, &list->list, list) {
436 if (!ha->sync_cnt)
437 continue;
438
439 /* if fails defer unsyncing address */
440 if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
441 continue;
442
443 ha->refcount -= ha->sync_cnt - 1;
444 ha->sync_cnt = 0;
445 __hw_addr_del_entry(list, ha, false, false);
446 }
447}
448EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
449
450/**
451 * __hw_addr_unsync_dev - Remove synchronized addresses from device
452 * @list: address list to remove synchronized addresses from
453 * @dev: device to sync
454 * @unsync: function to call if address should be removed
455 *
456 * Remove all addresses that were added to the device by __hw_addr_sync_dev().
457 * This function is intended to be called from the ndo_stop or ndo_open
458 * functions on devices that require explicit address add/remove
459 * notifications. If the unsync function pointer is NULL then this function
460 * can be used to just reset the sync_cnt for the addresses in the list.
461 **/
462void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
463 struct net_device *dev,
464 int (*unsync)(struct net_device *,
465 const unsigned char *))
466{
467 struct netdev_hw_addr *ha, *tmp;
468
469 list_for_each_entry_safe(ha, tmp, &list->list, list) {
470 if (!ha->sync_cnt)
471 continue;
472
473 /* if unsync is defined and fails defer unsyncing address */
474 if (unsync && unsync(dev, ha->addr))
475 continue;
476
477 ha->sync_cnt--;
478 __hw_addr_del_entry(list, ha, false, false);
479 }
480}
481EXPORT_SYMBOL(__hw_addr_unsync_dev);
482
483static void __hw_addr_flush(struct netdev_hw_addr_list *list)
484{
485 struct netdev_hw_addr *ha, *tmp;
486
487 list->tree = RB_ROOT;
488 list_for_each_entry_safe(ha, tmp, &list->list, list) {
489 list_del_rcu(&ha->list);
490 kfree_rcu(ha, rcu_head);
491 }
492 list->count = 0;
493}
494
495void __hw_addr_init(struct netdev_hw_addr_list *list)
496{
497 INIT_LIST_HEAD(&list->list);
498 list->count = 0;
499 list->tree = RB_ROOT;
500}
501EXPORT_SYMBOL(__hw_addr_init);
502
503/*
504 * Device addresses handling functions
505 */
506
507/* Check that netdev->dev_addr is not written to directly as this would
508 * break the rbtree layout. All changes should go thru dev_addr_set() and co.
509 * Remove this check in mid-2024.
510 */
511void dev_addr_check(struct net_device *dev)
512{
513 if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN))
514 return;
515
516 netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr);
517 netdev_warn(dev, "Expected addr: %*ph\n",
518 MAX_ADDR_LEN, dev->dev_addr_shadow);
519 netdev_WARN(dev, "Incorrect netdev->dev_addr\n");
520}
521
522/**
523 * dev_addr_flush - Flush device address list
524 * @dev: device
525 *
526 * Flush device address list and reset ->dev_addr.
527 *
528 * The caller must hold the rtnl_mutex.
529 */
530void dev_addr_flush(struct net_device *dev)
531{
532 /* rtnl_mutex must be held here */
533 dev_addr_check(dev);
534
535 __hw_addr_flush(&dev->dev_addrs);
536 dev->dev_addr = NULL;
537}
538
539/**
540 * dev_addr_init - Init device address list
541 * @dev: device
542 *
543 * Init device address list and create the first element,
544 * used by ->dev_addr.
545 *
546 * The caller must hold the rtnl_mutex.
547 */
548int dev_addr_init(struct net_device *dev)
549{
550 unsigned char addr[MAX_ADDR_LEN];
551 struct netdev_hw_addr *ha;
552 int err;
553
554 /* rtnl_mutex must be held here */
555
556 __hw_addr_init(&dev->dev_addrs);
557 memset(addr, 0, sizeof(addr));
558 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
559 NETDEV_HW_ADDR_T_LAN);
560 if (!err) {
561 /*
562 * Get the first (previously created) address from the list
563 * and set dev_addr pointer to this location.
564 */
565 ha = list_first_entry(&dev->dev_addrs.list,
566 struct netdev_hw_addr, list);
567 dev->dev_addr = ha->addr;
568 }
569 return err;
570}
571
572void dev_addr_mod(struct net_device *dev, unsigned int offset,
573 const void *addr, size_t len)
574{
575 struct netdev_hw_addr *ha;
576
577 dev_addr_check(dev);
578
579 ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]);
580 rb_erase(&ha->node, &dev->dev_addrs.tree);
581 memcpy(&ha->addr[offset], addr, len);
582 memcpy(&dev->dev_addr_shadow[offset], addr, len);
583 WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len));
584}
585EXPORT_SYMBOL(dev_addr_mod);
586
587/**
588 * dev_addr_add - Add a device address
589 * @dev: device
590 * @addr: address to add
591 * @addr_type: address type
592 *
593 * Add a device address to the device or increase the reference count if
594 * it already exists.
595 *
596 * The caller must hold the rtnl_mutex.
597 */
598int dev_addr_add(struct net_device *dev, const unsigned char *addr,
599 unsigned char addr_type)
600{
601 int err;
602
603 ASSERT_RTNL();
604
605 err = dev_pre_changeaddr_notify(dev, addr, NULL);
606 if (err)
607 return err;
608 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
609 if (!err)
610 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
611 return err;
612}
613EXPORT_SYMBOL(dev_addr_add);
614
615/**
616 * dev_addr_del - Release a device address.
617 * @dev: device
618 * @addr: address to delete
619 * @addr_type: address type
620 *
621 * Release reference to a device address and remove it from the device
622 * if the reference count drops to zero.
623 *
624 * The caller must hold the rtnl_mutex.
625 */
626int dev_addr_del(struct net_device *dev, const unsigned char *addr,
627 unsigned char addr_type)
628{
629 int err;
630 struct netdev_hw_addr *ha;
631
632 ASSERT_RTNL();
633
634 /*
635 * We can not remove the first address from the list because
636 * dev->dev_addr points to that.
637 */
638 ha = list_first_entry(&dev->dev_addrs.list,
639 struct netdev_hw_addr, list);
640 if (!memcmp(ha->addr, addr, dev->addr_len) &&
641 ha->type == addr_type && ha->refcount == 1)
642 return -ENOENT;
643
644 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
645 addr_type);
646 if (!err)
647 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
648 return err;
649}
650EXPORT_SYMBOL(dev_addr_del);
651
652/*
653 * Unicast list handling functions
654 */
655
656/**
657 * dev_uc_add_excl - Add a global secondary unicast address
658 * @dev: device
659 * @addr: address to add
660 */
661int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
662{
663 int err;
664
665 netif_addr_lock_bh(dev);
666 err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len,
667 NETDEV_HW_ADDR_T_UNICAST, true, false,
668 0, true);
669 if (!err)
670 __dev_set_rx_mode(dev);
671 netif_addr_unlock_bh(dev);
672 return err;
673}
674EXPORT_SYMBOL(dev_uc_add_excl);
675
676/**
677 * dev_uc_add - Add a secondary unicast address
678 * @dev: device
679 * @addr: address to add
680 *
681 * Add a secondary unicast address to the device or increase
682 * the reference count if it already exists.
683 */
684int dev_uc_add(struct net_device *dev, const unsigned char *addr)
685{
686 int err;
687
688 netif_addr_lock_bh(dev);
689 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
690 NETDEV_HW_ADDR_T_UNICAST);
691 if (!err)
692 __dev_set_rx_mode(dev);
693 netif_addr_unlock_bh(dev);
694 return err;
695}
696EXPORT_SYMBOL(dev_uc_add);
697
698/**
699 * dev_uc_del - Release secondary unicast address.
700 * @dev: device
701 * @addr: address to delete
702 *
703 * Release reference to a secondary unicast address and remove it
704 * from the device if the reference count drops to zero.
705 */
706int dev_uc_del(struct net_device *dev, const unsigned char *addr)
707{
708 int err;
709
710 netif_addr_lock_bh(dev);
711 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
712 NETDEV_HW_ADDR_T_UNICAST);
713 if (!err)
714 __dev_set_rx_mode(dev);
715 netif_addr_unlock_bh(dev);
716 return err;
717}
718EXPORT_SYMBOL(dev_uc_del);
719
720/**
721 * dev_uc_sync - Synchronize device's unicast list to another device
722 * @to: destination device
723 * @from: source device
724 *
725 * Add newly added addresses to the destination device and release
726 * addresses that have no users left. The source device must be
727 * locked by netif_addr_lock_bh.
728 *
729 * This function is intended to be called from the dev->set_rx_mode
730 * function of layered software devices. This function assumes that
731 * addresses will only ever be synced to the @to devices and no other.
732 */
733int dev_uc_sync(struct net_device *to, struct net_device *from)
734{
735 int err = 0;
736
737 if (to->addr_len != from->addr_len)
738 return -EINVAL;
739
740 netif_addr_lock(to);
741 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
742 if (!err)
743 __dev_set_rx_mode(to);
744 netif_addr_unlock(to);
745 return err;
746}
747EXPORT_SYMBOL(dev_uc_sync);
748
749/**
750 * dev_uc_sync_multiple - Synchronize device's unicast list to another
751 * device, but allow for multiple calls to sync to multiple devices.
752 * @to: destination device
753 * @from: source device
754 *
755 * Add newly added addresses to the destination device and release
756 * addresses that have been deleted from the source. The source device
757 * must be locked by netif_addr_lock_bh.
758 *
759 * This function is intended to be called from the dev->set_rx_mode
760 * function of layered software devices. It allows for a single source
761 * device to be synced to multiple destination devices.
762 */
763int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
764{
765 int err = 0;
766
767 if (to->addr_len != from->addr_len)
768 return -EINVAL;
769
770 netif_addr_lock(to);
771 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
772 if (!err)
773 __dev_set_rx_mode(to);
774 netif_addr_unlock(to);
775 return err;
776}
777EXPORT_SYMBOL(dev_uc_sync_multiple);
778
779/**
780 * dev_uc_unsync - Remove synchronized addresses from the destination device
781 * @to: destination device
782 * @from: source device
783 *
784 * Remove all addresses that were added to the destination device by
785 * dev_uc_sync(). This function is intended to be called from the
786 * dev->stop function of layered software devices.
787 */
788void dev_uc_unsync(struct net_device *to, struct net_device *from)
789{
790 if (to->addr_len != from->addr_len)
791 return;
792
793 /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
794 * reasons:
795 * 1) This is always called without any addr_list_lock, so as the
796 * outermost one here, it must be 0.
797 * 2) This is called by some callers after unlinking the upper device,
798 * so the dev->lower_level becomes 1 again.
799 * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
800 * larger.
801 */
802 netif_addr_lock_bh(from);
803 netif_addr_lock(to);
804 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
805 __dev_set_rx_mode(to);
806 netif_addr_unlock(to);
807 netif_addr_unlock_bh(from);
808}
809EXPORT_SYMBOL(dev_uc_unsync);
810
811/**
812 * dev_uc_flush - Flush unicast addresses
813 * @dev: device
814 *
815 * Flush unicast addresses.
816 */
817void dev_uc_flush(struct net_device *dev)
818{
819 netif_addr_lock_bh(dev);
820 __hw_addr_flush(&dev->uc);
821 netif_addr_unlock_bh(dev);
822}
823EXPORT_SYMBOL(dev_uc_flush);
824
825/**
826 * dev_uc_init - Init unicast address list
827 * @dev: device
828 *
829 * Init unicast address list.
830 */
831void dev_uc_init(struct net_device *dev)
832{
833 __hw_addr_init(&dev->uc);
834}
835EXPORT_SYMBOL(dev_uc_init);
836
837/*
838 * Multicast list handling functions
839 */
840
841/**
842 * dev_mc_add_excl - Add a global secondary multicast address
843 * @dev: device
844 * @addr: address to add
845 */
846int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
847{
848 int err;
849
850 netif_addr_lock_bh(dev);
851 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
852 NETDEV_HW_ADDR_T_MULTICAST, true, false,
853 0, true);
854 if (!err)
855 __dev_set_rx_mode(dev);
856 netif_addr_unlock_bh(dev);
857 return err;
858}
859EXPORT_SYMBOL(dev_mc_add_excl);
860
861static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
862 bool global)
863{
864 int err;
865
866 netif_addr_lock_bh(dev);
867 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
868 NETDEV_HW_ADDR_T_MULTICAST, global, false,
869 0, false);
870 if (!err)
871 __dev_set_rx_mode(dev);
872 netif_addr_unlock_bh(dev);
873 return err;
874}
875/**
876 * dev_mc_add - Add a multicast address
877 * @dev: device
878 * @addr: address to add
879 *
880 * Add a multicast address to the device or increase
881 * the reference count if it already exists.
882 */
883int dev_mc_add(struct net_device *dev, const unsigned char *addr)
884{
885 return __dev_mc_add(dev, addr, false);
886}
887EXPORT_SYMBOL(dev_mc_add);
888
889/**
890 * dev_mc_add_global - Add a global multicast address
891 * @dev: device
892 * @addr: address to add
893 *
894 * Add a global multicast address to the device.
895 */
896int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
897{
898 return __dev_mc_add(dev, addr, true);
899}
900EXPORT_SYMBOL(dev_mc_add_global);
901
902static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
903 bool global)
904{
905 int err;
906
907 netif_addr_lock_bh(dev);
908 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
909 NETDEV_HW_ADDR_T_MULTICAST, global, false);
910 if (!err)
911 __dev_set_rx_mode(dev);
912 netif_addr_unlock_bh(dev);
913 return err;
914}
915
916/**
917 * dev_mc_del - Delete a multicast address.
918 * @dev: device
919 * @addr: address to delete
920 *
921 * Release reference to a multicast address and remove it
922 * from the device if the reference count drops to zero.
923 */
924int dev_mc_del(struct net_device *dev, const unsigned char *addr)
925{
926 return __dev_mc_del(dev, addr, false);
927}
928EXPORT_SYMBOL(dev_mc_del);
929
930/**
931 * dev_mc_del_global - Delete a global multicast address.
932 * @dev: device
933 * @addr: address to delete
934 *
935 * Release reference to a multicast address and remove it
936 * from the device if the reference count drops to zero.
937 */
938int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
939{
940 return __dev_mc_del(dev, addr, true);
941}
942EXPORT_SYMBOL(dev_mc_del_global);
943
944/**
945 * dev_mc_sync - Synchronize device's multicast list to another device
946 * @to: destination device
947 * @from: source device
948 *
949 * Add newly added addresses to the destination device and release
950 * addresses that have no users left. The source device must be
951 * locked by netif_addr_lock_bh.
952 *
953 * This function is intended to be called from the ndo_set_rx_mode
954 * function of layered software devices.
955 */
956int dev_mc_sync(struct net_device *to, struct net_device *from)
957{
958 int err = 0;
959
960 if (to->addr_len != from->addr_len)
961 return -EINVAL;
962
963 netif_addr_lock(to);
964 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
965 if (!err)
966 __dev_set_rx_mode(to);
967 netif_addr_unlock(to);
968 return err;
969}
970EXPORT_SYMBOL(dev_mc_sync);
971
972/**
973 * dev_mc_sync_multiple - Synchronize device's multicast list to another
974 * device, but allow for multiple calls to sync to multiple devices.
975 * @to: destination device
976 * @from: source device
977 *
978 * Add newly added addresses to the destination device and release
979 * addresses that have no users left. The source device must be
980 * locked by netif_addr_lock_bh.
981 *
982 * This function is intended to be called from the ndo_set_rx_mode
983 * function of layered software devices. It allows for a single
984 * source device to be synced to multiple destination devices.
985 */
986int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
987{
988 int err = 0;
989
990 if (to->addr_len != from->addr_len)
991 return -EINVAL;
992
993 netif_addr_lock(to);
994 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
995 if (!err)
996 __dev_set_rx_mode(to);
997 netif_addr_unlock(to);
998 return err;
999}
1000EXPORT_SYMBOL(dev_mc_sync_multiple);
1001
1002/**
1003 * dev_mc_unsync - Remove synchronized addresses from the destination device
1004 * @to: destination device
1005 * @from: source device
1006 *
1007 * Remove all addresses that were added to the destination device by
1008 * dev_mc_sync(). This function is intended to be called from the
1009 * dev->stop function of layered software devices.
1010 */
1011void dev_mc_unsync(struct net_device *to, struct net_device *from)
1012{
1013 if (to->addr_len != from->addr_len)
1014 return;
1015
1016 /* See the above comments inside dev_uc_unsync(). */
1017 netif_addr_lock_bh(from);
1018 netif_addr_lock(to);
1019 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
1020 __dev_set_rx_mode(to);
1021 netif_addr_unlock(to);
1022 netif_addr_unlock_bh(from);
1023}
1024EXPORT_SYMBOL(dev_mc_unsync);
1025
1026/**
1027 * dev_mc_flush - Flush multicast addresses
1028 * @dev: device
1029 *
1030 * Flush multicast addresses.
1031 */
1032void dev_mc_flush(struct net_device *dev)
1033{
1034 netif_addr_lock_bh(dev);
1035 __hw_addr_flush(&dev->mc);
1036 netif_addr_unlock_bh(dev);
1037}
1038EXPORT_SYMBOL(dev_mc_flush);
1039
1040/**
1041 * dev_mc_init - Init multicast address list
1042 * @dev: device
1043 *
1044 * Init multicast address list.
1045 */
1046void dev_mc_init(struct net_device *dev)
1047{
1048 __hw_addr_init(&dev->mc);
1049}
1050EXPORT_SYMBOL(dev_mc_init);