Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/slab.h>
14#include <linux/rtnetlink.h>
15#include <linux/of.h>
16#include <linux/of_net.h>
17#include <net/devlink.h>
18
19#include "dsa_priv.h"
20
21static DEFINE_MUTEX(dsa2_mutex);
22LIST_HEAD(dsa_tree_list);
23
24/**
25 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
26 * @dst: collection of struct dsa_switch devices to notify.
27 * @e: event, must be of type DSA_NOTIFIER_*
28 * @v: event-specific value.
29 *
30 * Given a struct dsa_switch_tree, this can be used to run a function once for
31 * each member DSA switch. The other alternative of traversing the tree is only
32 * through its ports list, which does not uniquely list the switches.
33 */
34int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
35{
36 struct raw_notifier_head *nh = &dst->nh;
37 int err;
38
39 err = raw_notifier_call_chain(nh, e, v);
40
41 return notifier_to_errno(err);
42}
43
44/**
45 * dsa_broadcast - Notify all DSA trees in the system.
46 * @e: event, must be of type DSA_NOTIFIER_*
47 * @v: event-specific value.
48 *
49 * Can be used to notify the switching fabric of events such as cross-chip
50 * bridging between disjoint trees (such as islands of tagger-compatible
51 * switches bridged by an incompatible middle switch).
52 */
53int dsa_broadcast(unsigned long e, void *v)
54{
55 struct dsa_switch_tree *dst;
56 int err = 0;
57
58 list_for_each_entry(dst, &dsa_tree_list, list) {
59 err = dsa_tree_notify(dst, e, v);
60 if (err)
61 break;
62 }
63
64 return err;
65}
66
67/**
68 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
69 * @dst: Tree in which to record the mapping.
70 * @lag: Netdev that is to be mapped to an ID.
71 *
72 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
73 * two spaces. The size of the mapping space is determined by the
74 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
75 * it unset if it is not needed, in which case these functions become
76 * no-ops.
77 */
78void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
79{
80 unsigned int id;
81
82 if (dsa_lag_id(dst, lag) >= 0)
83 /* Already mapped */
84 return;
85
86 for (id = 0; id < dst->lags_len; id++) {
87 if (!dsa_lag_dev(dst, id)) {
88 dst->lags[id] = lag;
89 return;
90 }
91 }
92
93 /* No IDs left, which is OK. Some drivers do not need it. The
94 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
95 * returns an error for this device when joining the LAG. The
96 * driver can then return -EOPNOTSUPP back to DSA, which will
97 * fall back to a software LAG.
98 */
99}
100
101/**
102 * dsa_lag_unmap() - Remove a LAG ID mapping
103 * @dst: Tree in which the mapping is recorded.
104 * @lag: Netdev that was mapped.
105 *
106 * As there may be multiple users of the mapping, it is only removed
107 * if there are no other references to it.
108 */
109void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
110{
111 struct dsa_port *dp;
112 unsigned int id;
113
114 dsa_lag_foreach_port(dp, dst, lag)
115 /* There are remaining users of this mapping */
116 return;
117
118 dsa_lags_foreach_id(id, dst) {
119 if (dsa_lag_dev(dst, id) == lag) {
120 dst->lags[id] = NULL;
121 break;
122 }
123 }
124}
125
126struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
127{
128 struct dsa_switch_tree *dst;
129 struct dsa_port *dp;
130
131 list_for_each_entry(dst, &dsa_tree_list, list) {
132 if (dst->index != tree_index)
133 continue;
134
135 list_for_each_entry(dp, &dst->ports, list) {
136 if (dp->ds->index != sw_index)
137 continue;
138
139 return dp->ds;
140 }
141 }
142
143 return NULL;
144}
145EXPORT_SYMBOL_GPL(dsa_switch_find);
146
147static struct dsa_switch_tree *dsa_tree_find(int index)
148{
149 struct dsa_switch_tree *dst;
150
151 list_for_each_entry(dst, &dsa_tree_list, list)
152 if (dst->index == index)
153 return dst;
154
155 return NULL;
156}
157
158static struct dsa_switch_tree *dsa_tree_alloc(int index)
159{
160 struct dsa_switch_tree *dst;
161
162 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
163 if (!dst)
164 return NULL;
165
166 dst->index = index;
167
168 INIT_LIST_HEAD(&dst->rtable);
169
170 INIT_LIST_HEAD(&dst->ports);
171
172 INIT_LIST_HEAD(&dst->list);
173 list_add_tail(&dst->list, &dsa_tree_list);
174
175 kref_init(&dst->refcount);
176
177 return dst;
178}
179
180static void dsa_tree_free(struct dsa_switch_tree *dst)
181{
182 if (dst->tag_ops)
183 dsa_tag_driver_put(dst->tag_ops);
184 list_del(&dst->list);
185 kfree(dst);
186}
187
188static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
189{
190 if (dst)
191 kref_get(&dst->refcount);
192
193 return dst;
194}
195
196static struct dsa_switch_tree *dsa_tree_touch(int index)
197{
198 struct dsa_switch_tree *dst;
199
200 dst = dsa_tree_find(index);
201 if (dst)
202 return dsa_tree_get(dst);
203 else
204 return dsa_tree_alloc(index);
205}
206
207static void dsa_tree_release(struct kref *ref)
208{
209 struct dsa_switch_tree *dst;
210
211 dst = container_of(ref, struct dsa_switch_tree, refcount);
212
213 dsa_tree_free(dst);
214}
215
216static void dsa_tree_put(struct dsa_switch_tree *dst)
217{
218 if (dst)
219 kref_put(&dst->refcount, dsa_tree_release);
220}
221
222static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
223 struct device_node *dn)
224{
225 struct dsa_port *dp;
226
227 list_for_each_entry(dp, &dst->ports, list)
228 if (dp->dn == dn)
229 return dp;
230
231 return NULL;
232}
233
234static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
235 struct dsa_port *link_dp)
236{
237 struct dsa_switch *ds = dp->ds;
238 struct dsa_switch_tree *dst;
239 struct dsa_link *dl;
240
241 dst = ds->dst;
242
243 list_for_each_entry(dl, &dst->rtable, list)
244 if (dl->dp == dp && dl->link_dp == link_dp)
245 return dl;
246
247 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
248 if (!dl)
249 return NULL;
250
251 dl->dp = dp;
252 dl->link_dp = link_dp;
253
254 INIT_LIST_HEAD(&dl->list);
255 list_add_tail(&dl->list, &dst->rtable);
256
257 return dl;
258}
259
260static bool dsa_port_setup_routing_table(struct dsa_port *dp)
261{
262 struct dsa_switch *ds = dp->ds;
263 struct dsa_switch_tree *dst = ds->dst;
264 struct device_node *dn = dp->dn;
265 struct of_phandle_iterator it;
266 struct dsa_port *link_dp;
267 struct dsa_link *dl;
268 int err;
269
270 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
271 link_dp = dsa_tree_find_port_by_node(dst, it.node);
272 if (!link_dp) {
273 of_node_put(it.node);
274 return false;
275 }
276
277 dl = dsa_link_touch(dp, link_dp);
278 if (!dl) {
279 of_node_put(it.node);
280 return false;
281 }
282 }
283
284 return true;
285}
286
287static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
288{
289 bool complete = true;
290 struct dsa_port *dp;
291
292 list_for_each_entry(dp, &dst->ports, list) {
293 if (dsa_port_is_dsa(dp)) {
294 complete = dsa_port_setup_routing_table(dp);
295 if (!complete)
296 break;
297 }
298 }
299
300 return complete;
301}
302
303static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
304{
305 struct dsa_port *dp;
306
307 list_for_each_entry(dp, &dst->ports, list)
308 if (dsa_port_is_cpu(dp))
309 return dp;
310
311 return NULL;
312}
313
314static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
315{
316 struct dsa_port *cpu_dp, *dp;
317
318 cpu_dp = dsa_tree_find_first_cpu(dst);
319 if (!cpu_dp) {
320 pr_err("DSA: tree %d has no CPU port\n", dst->index);
321 return -EINVAL;
322 }
323
324 /* Assign the default CPU port to all ports of the fabric */
325 list_for_each_entry(dp, &dst->ports, list)
326 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
327 dp->cpu_dp = cpu_dp;
328
329 return 0;
330}
331
332static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
333{
334 struct dsa_port *dp;
335
336 list_for_each_entry(dp, &dst->ports, list)
337 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
338 dp->cpu_dp = NULL;
339}
340
341static int dsa_port_setup(struct dsa_port *dp)
342{
343 struct devlink_port *dlp = &dp->devlink_port;
344 bool dsa_port_link_registered = false;
345 struct dsa_switch *ds = dp->ds;
346 bool dsa_port_enabled = false;
347 int err = 0;
348
349 if (dp->setup)
350 return 0;
351
352 INIT_LIST_HEAD(&dp->fdbs);
353 INIT_LIST_HEAD(&dp->mdbs);
354
355 if (ds->ops->port_setup) {
356 err = ds->ops->port_setup(ds, dp->index);
357 if (err)
358 return err;
359 }
360
361 switch (dp->type) {
362 case DSA_PORT_TYPE_UNUSED:
363 dsa_port_disable(dp);
364 break;
365 case DSA_PORT_TYPE_CPU:
366 err = dsa_port_link_register_of(dp);
367 if (err)
368 break;
369 dsa_port_link_registered = true;
370
371 err = dsa_port_enable(dp, NULL);
372 if (err)
373 break;
374 dsa_port_enabled = true;
375
376 break;
377 case DSA_PORT_TYPE_DSA:
378 err = dsa_port_link_register_of(dp);
379 if (err)
380 break;
381 dsa_port_link_registered = true;
382
383 err = dsa_port_enable(dp, NULL);
384 if (err)
385 break;
386 dsa_port_enabled = true;
387
388 break;
389 case DSA_PORT_TYPE_USER:
390 of_get_mac_address(dp->dn, dp->mac);
391 err = dsa_slave_create(dp);
392 if (err)
393 break;
394
395 devlink_port_type_eth_set(dlp, dp->slave);
396 break;
397 }
398
399 if (err && dsa_port_enabled)
400 dsa_port_disable(dp);
401 if (err && dsa_port_link_registered)
402 dsa_port_link_unregister_of(dp);
403 if (err) {
404 if (ds->ops->port_teardown)
405 ds->ops->port_teardown(ds, dp->index);
406 return err;
407 }
408
409 dp->setup = true;
410
411 return 0;
412}
413
414static int dsa_port_devlink_setup(struct dsa_port *dp)
415{
416 struct devlink_port *dlp = &dp->devlink_port;
417 struct dsa_switch_tree *dst = dp->ds->dst;
418 struct devlink_port_attrs attrs = {};
419 struct devlink *dl = dp->ds->devlink;
420 const unsigned char *id;
421 unsigned char len;
422 int err;
423
424 id = (const unsigned char *)&dst->index;
425 len = sizeof(dst->index);
426
427 attrs.phys.port_number = dp->index;
428 memcpy(attrs.switch_id.id, id, len);
429 attrs.switch_id.id_len = len;
430 memset(dlp, 0, sizeof(*dlp));
431
432 switch (dp->type) {
433 case DSA_PORT_TYPE_UNUSED:
434 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
435 break;
436 case DSA_PORT_TYPE_CPU:
437 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
438 break;
439 case DSA_PORT_TYPE_DSA:
440 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
441 break;
442 case DSA_PORT_TYPE_USER:
443 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
444 break;
445 }
446
447 devlink_port_attrs_set(dlp, &attrs);
448 err = devlink_port_register(dl, dlp, dp->index);
449
450 if (!err)
451 dp->devlink_port_setup = true;
452
453 return err;
454}
455
456static void dsa_port_teardown(struct dsa_port *dp)
457{
458 struct devlink_port *dlp = &dp->devlink_port;
459 struct dsa_switch *ds = dp->ds;
460 struct dsa_mac_addr *a, *tmp;
461
462 if (!dp->setup)
463 return;
464
465 if (ds->ops->port_teardown)
466 ds->ops->port_teardown(ds, dp->index);
467
468 devlink_port_type_clear(dlp);
469
470 switch (dp->type) {
471 case DSA_PORT_TYPE_UNUSED:
472 break;
473 case DSA_PORT_TYPE_CPU:
474 dsa_port_disable(dp);
475 dsa_port_link_unregister_of(dp);
476 break;
477 case DSA_PORT_TYPE_DSA:
478 dsa_port_disable(dp);
479 dsa_port_link_unregister_of(dp);
480 break;
481 case DSA_PORT_TYPE_USER:
482 if (dp->slave) {
483 dsa_slave_destroy(dp->slave);
484 dp->slave = NULL;
485 }
486 break;
487 }
488
489 list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
490 list_del(&a->list);
491 kfree(a);
492 }
493
494 list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
495 list_del(&a->list);
496 kfree(a);
497 }
498
499 dp->setup = false;
500}
501
502static void dsa_port_devlink_teardown(struct dsa_port *dp)
503{
504 struct devlink_port *dlp = &dp->devlink_port;
505
506 if (dp->devlink_port_setup)
507 devlink_port_unregister(dlp);
508 dp->devlink_port_setup = false;
509}
510
511/* Destroy the current devlink port, and create a new one which has the UNUSED
512 * flavour. At this point, any call to ds->ops->port_setup has been already
513 * balanced out by a call to ds->ops->port_teardown, so we know that any
514 * devlink port regions the driver had are now unregistered. We then call its
515 * ds->ops->port_setup again, in order for the driver to re-create them on the
516 * new devlink port.
517 */
518static int dsa_port_reinit_as_unused(struct dsa_port *dp)
519{
520 struct dsa_switch *ds = dp->ds;
521 int err;
522
523 dsa_port_devlink_teardown(dp);
524 dp->type = DSA_PORT_TYPE_UNUSED;
525 err = dsa_port_devlink_setup(dp);
526 if (err)
527 return err;
528
529 if (ds->ops->port_setup) {
530 /* On error, leave the devlink port registered,
531 * dsa_switch_teardown will clean it up later.
532 */
533 err = ds->ops->port_setup(ds, dp->index);
534 if (err)
535 return err;
536 }
537
538 return 0;
539}
540
541static int dsa_devlink_info_get(struct devlink *dl,
542 struct devlink_info_req *req,
543 struct netlink_ext_ack *extack)
544{
545 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
546
547 if (ds->ops->devlink_info_get)
548 return ds->ops->devlink_info_get(ds, req, extack);
549
550 return -EOPNOTSUPP;
551}
552
553static int dsa_devlink_sb_pool_get(struct devlink *dl,
554 unsigned int sb_index, u16 pool_index,
555 struct devlink_sb_pool_info *pool_info)
556{
557 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
558
559 if (!ds->ops->devlink_sb_pool_get)
560 return -EOPNOTSUPP;
561
562 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
563 pool_info);
564}
565
566static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
567 u16 pool_index, u32 size,
568 enum devlink_sb_threshold_type threshold_type,
569 struct netlink_ext_ack *extack)
570{
571 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
572
573 if (!ds->ops->devlink_sb_pool_set)
574 return -EOPNOTSUPP;
575
576 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
577 threshold_type, extack);
578}
579
580static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
581 unsigned int sb_index, u16 pool_index,
582 u32 *p_threshold)
583{
584 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
585 int port = dsa_devlink_port_to_port(dlp);
586
587 if (!ds->ops->devlink_sb_port_pool_get)
588 return -EOPNOTSUPP;
589
590 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
591 pool_index, p_threshold);
592}
593
594static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
595 unsigned int sb_index, u16 pool_index,
596 u32 threshold,
597 struct netlink_ext_ack *extack)
598{
599 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
600 int port = dsa_devlink_port_to_port(dlp);
601
602 if (!ds->ops->devlink_sb_port_pool_set)
603 return -EOPNOTSUPP;
604
605 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
606 pool_index, threshold, extack);
607}
608
609static int
610dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
611 unsigned int sb_index, u16 tc_index,
612 enum devlink_sb_pool_type pool_type,
613 u16 *p_pool_index, u32 *p_threshold)
614{
615 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
616 int port = dsa_devlink_port_to_port(dlp);
617
618 if (!ds->ops->devlink_sb_tc_pool_bind_get)
619 return -EOPNOTSUPP;
620
621 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
622 tc_index, pool_type,
623 p_pool_index, p_threshold);
624}
625
626static int
627dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
628 unsigned int sb_index, u16 tc_index,
629 enum devlink_sb_pool_type pool_type,
630 u16 pool_index, u32 threshold,
631 struct netlink_ext_ack *extack)
632{
633 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
634 int port = dsa_devlink_port_to_port(dlp);
635
636 if (!ds->ops->devlink_sb_tc_pool_bind_set)
637 return -EOPNOTSUPP;
638
639 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
640 tc_index, pool_type,
641 pool_index, threshold,
642 extack);
643}
644
645static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
646 unsigned int sb_index)
647{
648 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
649
650 if (!ds->ops->devlink_sb_occ_snapshot)
651 return -EOPNOTSUPP;
652
653 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
654}
655
656static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
657 unsigned int sb_index)
658{
659 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
660
661 if (!ds->ops->devlink_sb_occ_max_clear)
662 return -EOPNOTSUPP;
663
664 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
665}
666
667static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
668 unsigned int sb_index,
669 u16 pool_index, u32 *p_cur,
670 u32 *p_max)
671{
672 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
673 int port = dsa_devlink_port_to_port(dlp);
674
675 if (!ds->ops->devlink_sb_occ_port_pool_get)
676 return -EOPNOTSUPP;
677
678 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
679 pool_index, p_cur, p_max);
680}
681
682static int
683dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
684 unsigned int sb_index, u16 tc_index,
685 enum devlink_sb_pool_type pool_type,
686 u32 *p_cur, u32 *p_max)
687{
688 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
689 int port = dsa_devlink_port_to_port(dlp);
690
691 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
692 return -EOPNOTSUPP;
693
694 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
695 sb_index, tc_index,
696 pool_type, p_cur,
697 p_max);
698}
699
700static const struct devlink_ops dsa_devlink_ops = {
701 .info_get = dsa_devlink_info_get,
702 .sb_pool_get = dsa_devlink_sb_pool_get,
703 .sb_pool_set = dsa_devlink_sb_pool_set,
704 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
705 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
706 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
707 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
708 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
709 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
710 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
711 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
712};
713
714static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
715{
716 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
717 struct dsa_switch_tree *dst = ds->dst;
718 int port, err;
719
720 if (tag_ops->proto == dst->default_proto)
721 return 0;
722
723 for (port = 0; port < ds->num_ports; port++) {
724 if (!dsa_is_cpu_port(ds, port))
725 continue;
726
727 err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
728 if (err) {
729 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
730 tag_ops->name, ERR_PTR(err));
731 return err;
732 }
733 }
734
735 return 0;
736}
737
738static int dsa_switch_setup(struct dsa_switch *ds)
739{
740 struct dsa_devlink_priv *dl_priv;
741 struct dsa_port *dp;
742 int err;
743
744 if (ds->setup)
745 return 0;
746
747 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
748 * driver and before ops->setup() has run, since the switch drivers and
749 * the slave MDIO bus driver rely on these values for probing PHY
750 * devices or not
751 */
752 ds->phys_mii_mask |= dsa_user_ports(ds);
753
754 /* Add the switch to devlink before calling setup, so that setup can
755 * add dpipe tables
756 */
757 ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
758 if (!ds->devlink)
759 return -ENOMEM;
760 dl_priv = devlink_priv(ds->devlink);
761 dl_priv->ds = ds;
762
763 err = devlink_register(ds->devlink, ds->dev);
764 if (err)
765 goto free_devlink;
766
767 /* Setup devlink port instances now, so that the switch
768 * setup() can register regions etc, against the ports
769 */
770 list_for_each_entry(dp, &ds->dst->ports, list) {
771 if (dp->ds == ds) {
772 err = dsa_port_devlink_setup(dp);
773 if (err)
774 goto unregister_devlink_ports;
775 }
776 }
777
778 err = dsa_switch_register_notifier(ds);
779 if (err)
780 goto unregister_devlink_ports;
781
782 ds->configure_vlan_while_not_filtering = true;
783
784 err = ds->ops->setup(ds);
785 if (err < 0)
786 goto unregister_notifier;
787
788 err = dsa_switch_setup_tag_protocol(ds);
789 if (err)
790 goto teardown;
791
792 devlink_params_publish(ds->devlink);
793
794 if (!ds->slave_mii_bus && ds->ops->phy_read) {
795 ds->slave_mii_bus = mdiobus_alloc();
796 if (!ds->slave_mii_bus) {
797 err = -ENOMEM;
798 goto teardown;
799 }
800
801 dsa_slave_mii_bus_init(ds);
802
803 err = mdiobus_register(ds->slave_mii_bus);
804 if (err < 0)
805 goto free_slave_mii_bus;
806 }
807
808 ds->setup = true;
809
810 return 0;
811
812free_slave_mii_bus:
813 if (ds->slave_mii_bus && ds->ops->phy_read)
814 mdiobus_free(ds->slave_mii_bus);
815teardown:
816 if (ds->ops->teardown)
817 ds->ops->teardown(ds);
818unregister_notifier:
819 dsa_switch_unregister_notifier(ds);
820unregister_devlink_ports:
821 list_for_each_entry(dp, &ds->dst->ports, list)
822 if (dp->ds == ds)
823 dsa_port_devlink_teardown(dp);
824 devlink_unregister(ds->devlink);
825free_devlink:
826 devlink_free(ds->devlink);
827 ds->devlink = NULL;
828
829 return err;
830}
831
832static void dsa_switch_teardown(struct dsa_switch *ds)
833{
834 struct dsa_port *dp;
835
836 if (!ds->setup)
837 return;
838
839 if (ds->slave_mii_bus && ds->ops->phy_read) {
840 mdiobus_unregister(ds->slave_mii_bus);
841 mdiobus_free(ds->slave_mii_bus);
842 ds->slave_mii_bus = NULL;
843 }
844
845 dsa_switch_unregister_notifier(ds);
846
847 if (ds->ops->teardown)
848 ds->ops->teardown(ds);
849
850 if (ds->devlink) {
851 list_for_each_entry(dp, &ds->dst->ports, list)
852 if (dp->ds == ds)
853 dsa_port_devlink_teardown(dp);
854 devlink_unregister(ds->devlink);
855 devlink_free(ds->devlink);
856 ds->devlink = NULL;
857 }
858
859 ds->setup = false;
860}
861
862/* First tear down the non-shared, then the shared ports. This ensures that
863 * all work items scheduled by our switchdev handlers for user ports have
864 * completed before we destroy the refcounting kept on the shared ports.
865 */
866static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
867{
868 struct dsa_port *dp;
869
870 list_for_each_entry(dp, &dst->ports, list)
871 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
872 dsa_port_teardown(dp);
873
874 dsa_flush_workqueue();
875
876 list_for_each_entry(dp, &dst->ports, list)
877 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
878 dsa_port_teardown(dp);
879}
880
881static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
882{
883 struct dsa_port *dp;
884
885 list_for_each_entry(dp, &dst->ports, list)
886 dsa_switch_teardown(dp->ds);
887}
888
889static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
890{
891 struct dsa_port *dp;
892 int err;
893
894 list_for_each_entry(dp, &dst->ports, list) {
895 err = dsa_switch_setup(dp->ds);
896 if (err)
897 goto teardown;
898 }
899
900 list_for_each_entry(dp, &dst->ports, list) {
901 err = dsa_port_setup(dp);
902 if (err) {
903 err = dsa_port_reinit_as_unused(dp);
904 if (err)
905 goto teardown;
906 }
907 }
908
909 return 0;
910
911teardown:
912 dsa_tree_teardown_ports(dst);
913
914 dsa_tree_teardown_switches(dst);
915
916 return err;
917}
918
919static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
920{
921 struct dsa_port *dp;
922 int err;
923
924 list_for_each_entry(dp, &dst->ports, list) {
925 if (dsa_port_is_cpu(dp)) {
926 err = dsa_master_setup(dp->master, dp);
927 if (err)
928 return err;
929 }
930 }
931
932 return 0;
933}
934
935static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
936{
937 struct dsa_port *dp;
938
939 list_for_each_entry(dp, &dst->ports, list)
940 if (dsa_port_is_cpu(dp))
941 dsa_master_teardown(dp->master);
942}
943
944static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
945{
946 unsigned int len = 0;
947 struct dsa_port *dp;
948
949 list_for_each_entry(dp, &dst->ports, list) {
950 if (dp->ds->num_lag_ids > len)
951 len = dp->ds->num_lag_ids;
952 }
953
954 if (!len)
955 return 0;
956
957 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
958 if (!dst->lags)
959 return -ENOMEM;
960
961 dst->lags_len = len;
962 return 0;
963}
964
965static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
966{
967 kfree(dst->lags);
968}
969
970static int dsa_tree_setup(struct dsa_switch_tree *dst)
971{
972 bool complete;
973 int err;
974
975 if (dst->setup) {
976 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
977 dst->index);
978 return -EEXIST;
979 }
980
981 complete = dsa_tree_setup_routing_table(dst);
982 if (!complete)
983 return 0;
984
985 err = dsa_tree_setup_default_cpu(dst);
986 if (err)
987 return err;
988
989 err = dsa_tree_setup_switches(dst);
990 if (err)
991 goto teardown_default_cpu;
992
993 err = dsa_tree_setup_master(dst);
994 if (err)
995 goto teardown_switches;
996
997 err = dsa_tree_setup_lags(dst);
998 if (err)
999 goto teardown_master;
1000
1001 dst->setup = true;
1002
1003 pr_info("DSA: tree %d setup\n", dst->index);
1004
1005 return 0;
1006
1007teardown_master:
1008 dsa_tree_teardown_master(dst);
1009teardown_switches:
1010 dsa_tree_teardown_ports(dst);
1011 dsa_tree_teardown_switches(dst);
1012teardown_default_cpu:
1013 dsa_tree_teardown_default_cpu(dst);
1014
1015 return err;
1016}
1017
1018static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1019{
1020 struct dsa_link *dl, *next;
1021
1022 if (!dst->setup)
1023 return;
1024
1025 dsa_tree_teardown_lags(dst);
1026
1027 dsa_tree_teardown_master(dst);
1028
1029 dsa_tree_teardown_ports(dst);
1030
1031 dsa_tree_teardown_switches(dst);
1032
1033 dsa_tree_teardown_default_cpu(dst);
1034
1035 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1036 list_del(&dl->list);
1037 kfree(dl);
1038 }
1039
1040 pr_info("DSA: tree %d torn down\n", dst->index);
1041
1042 dst->setup = false;
1043}
1044
1045/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1046 * is that all DSA switches within a tree share the same tagger, otherwise
1047 * they would have formed disjoint trees (different "dsa,member" values).
1048 */
1049int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1050 struct net_device *master,
1051 const struct dsa_device_ops *tag_ops,
1052 const struct dsa_device_ops *old_tag_ops)
1053{
1054 struct dsa_notifier_tag_proto_info info;
1055 struct dsa_port *dp;
1056 int err = -EBUSY;
1057
1058 if (!rtnl_trylock())
1059 return restart_syscall();
1060
1061 /* At the moment we don't allow changing the tag protocol under
1062 * traffic. The rtnl_mutex also happens to serialize concurrent
1063 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1064 * restriction, there needs to be another mutex which serializes this.
1065 */
1066 if (master->flags & IFF_UP)
1067 goto out_unlock;
1068
1069 list_for_each_entry(dp, &dst->ports, list) {
1070 if (!dsa_is_user_port(dp->ds, dp->index))
1071 continue;
1072
1073 if (dp->slave->flags & IFF_UP)
1074 goto out_unlock;
1075 }
1076
1077 info.tag_ops = tag_ops;
1078 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1079 if (err)
1080 goto out_unwind_tagger;
1081
1082 dst->tag_ops = tag_ops;
1083
1084 rtnl_unlock();
1085
1086 return 0;
1087
1088out_unwind_tagger:
1089 info.tag_ops = old_tag_ops;
1090 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1091out_unlock:
1092 rtnl_unlock();
1093 return err;
1094}
1095
1096static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1097{
1098 struct dsa_switch_tree *dst = ds->dst;
1099 struct dsa_port *dp;
1100
1101 list_for_each_entry(dp, &dst->ports, list)
1102 if (dp->ds == ds && dp->index == index)
1103 return dp;
1104
1105 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1106 if (!dp)
1107 return NULL;
1108
1109 dp->ds = ds;
1110 dp->index = index;
1111
1112 INIT_LIST_HEAD(&dp->list);
1113 list_add_tail(&dp->list, &dst->ports);
1114
1115 return dp;
1116}
1117
1118static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1119{
1120 if (!name)
1121 name = "eth%d";
1122
1123 dp->type = DSA_PORT_TYPE_USER;
1124 dp->name = name;
1125
1126 return 0;
1127}
1128
1129static int dsa_port_parse_dsa(struct dsa_port *dp)
1130{
1131 dp->type = DSA_PORT_TYPE_DSA;
1132
1133 return 0;
1134}
1135
1136static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1137 struct net_device *master)
1138{
1139 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1140 struct dsa_switch *mds, *ds = dp->ds;
1141 unsigned int mdp_upstream;
1142 struct dsa_port *mdp;
1143
1144 /* It is possible to stack DSA switches onto one another when that
1145 * happens the switch driver may want to know if its tagging protocol
1146 * is going to work in such a configuration.
1147 */
1148 if (dsa_slave_dev_check(master)) {
1149 mdp = dsa_slave_to_port(master);
1150 mds = mdp->ds;
1151 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1152 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1153 DSA_TAG_PROTO_NONE);
1154 }
1155
1156 /* If the master device is not itself a DSA slave in a disjoint DSA
1157 * tree, then return immediately.
1158 */
1159 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1160}
1161
1162static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1163 const char *user_protocol)
1164{
1165 struct dsa_switch *ds = dp->ds;
1166 struct dsa_switch_tree *dst = ds->dst;
1167 const struct dsa_device_ops *tag_ops;
1168 enum dsa_tag_protocol default_proto;
1169
1170 /* Find out which protocol the switch would prefer. */
1171 default_proto = dsa_get_tag_protocol(dp, master);
1172 if (dst->default_proto) {
1173 if (dst->default_proto != default_proto) {
1174 dev_err(ds->dev,
1175 "A DSA switch tree can have only one tagging protocol\n");
1176 return -EINVAL;
1177 }
1178 } else {
1179 dst->default_proto = default_proto;
1180 }
1181
1182 /* See if the user wants to override that preference. */
1183 if (user_protocol) {
1184 if (!ds->ops->change_tag_protocol) {
1185 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1186 return -EINVAL;
1187 }
1188
1189 tag_ops = dsa_find_tagger_by_name(user_protocol);
1190 } else {
1191 tag_ops = dsa_tag_driver_get(default_proto);
1192 }
1193
1194 if (IS_ERR(tag_ops)) {
1195 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1196 return -EPROBE_DEFER;
1197
1198 dev_warn(ds->dev, "No tagger for this switch\n");
1199 return PTR_ERR(tag_ops);
1200 }
1201
1202 if (dst->tag_ops) {
1203 if (dst->tag_ops != tag_ops) {
1204 dev_err(ds->dev,
1205 "A DSA switch tree can have only one tagging protocol\n");
1206
1207 dsa_tag_driver_put(tag_ops);
1208 return -EINVAL;
1209 }
1210
1211 /* In the case of multiple CPU ports per switch, the tagging
1212 * protocol is still reference-counted only per switch tree.
1213 */
1214 dsa_tag_driver_put(tag_ops);
1215 } else {
1216 dst->tag_ops = tag_ops;
1217 }
1218
1219 dp->master = master;
1220 dp->type = DSA_PORT_TYPE_CPU;
1221 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1222 dp->dst = dst;
1223
1224 /* At this point, the tree may be configured to use a different
1225 * tagger than the one chosen by the switch driver during
1226 * .setup, in the case when a user selects a custom protocol
1227 * through the DT.
1228 *
1229 * This is resolved by syncing the driver with the tree in
1230 * dsa_switch_setup_tag_protocol once .setup has run and the
1231 * driver is ready to accept calls to .change_tag_protocol. If
1232 * the driver does not support the custom protocol at that
1233 * point, the tree is wholly rejected, thereby ensuring that the
1234 * tree and driver are always in agreement on the protocol to
1235 * use.
1236 */
1237 return 0;
1238}
1239
1240static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1241{
1242 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1243 const char *name = of_get_property(dn, "label", NULL);
1244 bool link = of_property_read_bool(dn, "link");
1245
1246 dp->dn = dn;
1247
1248 if (ethernet) {
1249 struct net_device *master;
1250 const char *user_protocol;
1251
1252 master = of_find_net_device_by_node(ethernet);
1253 if (!master)
1254 return -EPROBE_DEFER;
1255
1256 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1257 return dsa_port_parse_cpu(dp, master, user_protocol);
1258 }
1259
1260 if (link)
1261 return dsa_port_parse_dsa(dp);
1262
1263 return dsa_port_parse_user(dp, name);
1264}
1265
1266static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1267 struct device_node *dn)
1268{
1269 struct device_node *ports, *port;
1270 struct dsa_port *dp;
1271 int err = 0;
1272 u32 reg;
1273
1274 ports = of_get_child_by_name(dn, "ports");
1275 if (!ports) {
1276 /* The second possibility is "ethernet-ports" */
1277 ports = of_get_child_by_name(dn, "ethernet-ports");
1278 if (!ports) {
1279 dev_err(ds->dev, "no ports child node found\n");
1280 return -EINVAL;
1281 }
1282 }
1283
1284 for_each_available_child_of_node(ports, port) {
1285 err = of_property_read_u32(port, "reg", ®);
1286 if (err) {
1287 of_node_put(port);
1288 goto out_put_node;
1289 }
1290
1291 if (reg >= ds->num_ports) {
1292 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1293 port, reg, ds->num_ports);
1294 of_node_put(port);
1295 err = -EINVAL;
1296 goto out_put_node;
1297 }
1298
1299 dp = dsa_to_port(ds, reg);
1300
1301 err = dsa_port_parse_of(dp, port);
1302 if (err) {
1303 of_node_put(port);
1304 goto out_put_node;
1305 }
1306 }
1307
1308out_put_node:
1309 of_node_put(ports);
1310 return err;
1311}
1312
1313static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1314 struct device_node *dn)
1315{
1316 u32 m[2] = { 0, 0 };
1317 int sz;
1318
1319 /* Don't error out if this optional property isn't found */
1320 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1321 if (sz < 0 && sz != -EINVAL)
1322 return sz;
1323
1324 ds->index = m[1];
1325
1326 ds->dst = dsa_tree_touch(m[0]);
1327 if (!ds->dst)
1328 return -ENOMEM;
1329
1330 if (dsa_switch_find(ds->dst->index, ds->index)) {
1331 dev_err(ds->dev,
1332 "A DSA switch with index %d already exists in tree %d\n",
1333 ds->index, ds->dst->index);
1334 return -EEXIST;
1335 }
1336
1337 return 0;
1338}
1339
1340static int dsa_switch_touch_ports(struct dsa_switch *ds)
1341{
1342 struct dsa_port *dp;
1343 int port;
1344
1345 for (port = 0; port < ds->num_ports; port++) {
1346 dp = dsa_port_touch(ds, port);
1347 if (!dp)
1348 return -ENOMEM;
1349 }
1350
1351 return 0;
1352}
1353
1354static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1355{
1356 int err;
1357
1358 err = dsa_switch_parse_member_of(ds, dn);
1359 if (err)
1360 return err;
1361
1362 err = dsa_switch_touch_ports(ds);
1363 if (err)
1364 return err;
1365
1366 return dsa_switch_parse_ports_of(ds, dn);
1367}
1368
1369static int dsa_port_parse(struct dsa_port *dp, const char *name,
1370 struct device *dev)
1371{
1372 if (!strcmp(name, "cpu")) {
1373 struct net_device *master;
1374
1375 master = dsa_dev_to_net_device(dev);
1376 if (!master)
1377 return -EPROBE_DEFER;
1378
1379 dev_put(master);
1380
1381 return dsa_port_parse_cpu(dp, master, NULL);
1382 }
1383
1384 if (!strcmp(name, "dsa"))
1385 return dsa_port_parse_dsa(dp);
1386
1387 return dsa_port_parse_user(dp, name);
1388}
1389
1390static int dsa_switch_parse_ports(struct dsa_switch *ds,
1391 struct dsa_chip_data *cd)
1392{
1393 bool valid_name_found = false;
1394 struct dsa_port *dp;
1395 struct device *dev;
1396 const char *name;
1397 unsigned int i;
1398 int err;
1399
1400 for (i = 0; i < DSA_MAX_PORTS; i++) {
1401 name = cd->port_names[i];
1402 dev = cd->netdev[i];
1403 dp = dsa_to_port(ds, i);
1404
1405 if (!name)
1406 continue;
1407
1408 err = dsa_port_parse(dp, name, dev);
1409 if (err)
1410 return err;
1411
1412 valid_name_found = true;
1413 }
1414
1415 if (!valid_name_found && i == DSA_MAX_PORTS)
1416 return -EINVAL;
1417
1418 return 0;
1419}
1420
1421static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1422{
1423 int err;
1424
1425 ds->cd = cd;
1426
1427 /* We don't support interconnected switches nor multiple trees via
1428 * platform data, so this is the unique switch of the tree.
1429 */
1430 ds->index = 0;
1431 ds->dst = dsa_tree_touch(0);
1432 if (!ds->dst)
1433 return -ENOMEM;
1434
1435 err = dsa_switch_touch_ports(ds);
1436 if (err)
1437 return err;
1438
1439 return dsa_switch_parse_ports(ds, cd);
1440}
1441
1442static void dsa_switch_release_ports(struct dsa_switch *ds)
1443{
1444 struct dsa_switch_tree *dst = ds->dst;
1445 struct dsa_port *dp, *next;
1446
1447 list_for_each_entry_safe(dp, next, &dst->ports, list) {
1448 if (dp->ds != ds)
1449 continue;
1450 list_del(&dp->list);
1451 kfree(dp);
1452 }
1453}
1454
1455static int dsa_switch_probe(struct dsa_switch *ds)
1456{
1457 struct dsa_switch_tree *dst;
1458 struct dsa_chip_data *pdata;
1459 struct device_node *np;
1460 int err;
1461
1462 if (!ds->dev)
1463 return -ENODEV;
1464
1465 pdata = ds->dev->platform_data;
1466 np = ds->dev->of_node;
1467
1468 if (!ds->num_ports)
1469 return -EINVAL;
1470
1471 if (np) {
1472 err = dsa_switch_parse_of(ds, np);
1473 if (err)
1474 dsa_switch_release_ports(ds);
1475 } else if (pdata) {
1476 err = dsa_switch_parse(ds, pdata);
1477 if (err)
1478 dsa_switch_release_ports(ds);
1479 } else {
1480 err = -ENODEV;
1481 }
1482
1483 if (err)
1484 return err;
1485
1486 dst = ds->dst;
1487 dsa_tree_get(dst);
1488 err = dsa_tree_setup(dst);
1489 if (err) {
1490 dsa_switch_release_ports(ds);
1491 dsa_tree_put(dst);
1492 }
1493
1494 return err;
1495}
1496
1497int dsa_register_switch(struct dsa_switch *ds)
1498{
1499 int err;
1500
1501 mutex_lock(&dsa2_mutex);
1502 err = dsa_switch_probe(ds);
1503 dsa_tree_put(ds->dst);
1504 mutex_unlock(&dsa2_mutex);
1505
1506 return err;
1507}
1508EXPORT_SYMBOL_GPL(dsa_register_switch);
1509
1510static void dsa_switch_remove(struct dsa_switch *ds)
1511{
1512 struct dsa_switch_tree *dst = ds->dst;
1513
1514 dsa_tree_teardown(dst);
1515 dsa_switch_release_ports(ds);
1516 dsa_tree_put(dst);
1517}
1518
1519void dsa_unregister_switch(struct dsa_switch *ds)
1520{
1521 mutex_lock(&dsa2_mutex);
1522 dsa_switch_remove(ds);
1523 mutex_unlock(&dsa2_mutex);
1524}
1525EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1/*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/rtnetlink.h>
18#include <net/dsa.h>
19#include <linux/of.h>
20#include <linux/of_net.h>
21#include "dsa_priv.h"
22
23static LIST_HEAD(dsa_switch_trees);
24static DEFINE_MUTEX(dsa2_mutex);
25
26static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27{
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
33 return dst;
34 }
35 return NULL;
36}
37
38static void dsa_free_dst(struct kref *ref)
39{
40 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
41 refcount);
42
43 list_del(&dst->list);
44 kfree(dst);
45}
46
47static void dsa_put_dst(struct dsa_switch_tree *dst)
48{
49 kref_put(&dst->refcount, dsa_free_dst);
50}
51
52static struct dsa_switch_tree *dsa_add_dst(u32 tree)
53{
54 struct dsa_switch_tree *dst;
55
56 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
57 if (!dst)
58 return NULL;
59 dst->tree = tree;
60 dst->cpu_switch = -1;
61 INIT_LIST_HEAD(&dst->list);
62 list_add_tail(&dsa_switch_trees, &dst->list);
63 kref_init(&dst->refcount);
64
65 return dst;
66}
67
68static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
69 struct dsa_switch *ds, u32 index)
70{
71 kref_get(&dst->refcount);
72 dst->ds[index] = ds;
73}
74
75static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
76 struct dsa_switch *ds, u32 index)
77{
78 dst->ds[index] = NULL;
79 kref_put(&dst->refcount, dsa_free_dst);
80}
81
82static bool dsa_port_is_dsa(struct device_node *port)
83{
84 const char *name;
85
86 name = of_get_property(port, "label", NULL);
87 if (!name)
88 return false;
89
90 if (!strcmp(name, "dsa"))
91 return true;
92
93 return false;
94}
95
96static bool dsa_port_is_cpu(struct device_node *port)
97{
98 const char *name;
99
100 name = of_get_property(port, "label", NULL);
101 if (!name)
102 return false;
103
104 if (!strcmp(name, "cpu"))
105 return true;
106
107 return false;
108}
109
110static bool dsa_ds_find_port(struct dsa_switch *ds,
111 struct device_node *port)
112{
113 u32 index;
114
115 for (index = 0; index < DSA_MAX_PORTS; index++)
116 if (ds->ports[index].dn == port)
117 return true;
118 return false;
119}
120
121static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
122 struct device_node *port)
123{
124 struct dsa_switch *ds;
125 u32 index;
126
127 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
128 ds = dst->ds[index];
129 if (!ds)
130 continue;
131
132 if (dsa_ds_find_port(ds, port))
133 return ds;
134 }
135
136 return NULL;
137}
138
139static int dsa_port_complete(struct dsa_switch_tree *dst,
140 struct dsa_switch *src_ds,
141 struct device_node *port,
142 u32 src_port)
143{
144 struct device_node *link;
145 int index;
146 struct dsa_switch *dst_ds;
147
148 for (index = 0;; index++) {
149 link = of_parse_phandle(port, "link", index);
150 if (!link)
151 break;
152
153 dst_ds = dsa_dst_find_port(dst, link);
154 of_node_put(link);
155
156 if (!dst_ds)
157 return 1;
158
159 src_ds->rtable[dst_ds->index] = src_port;
160 }
161
162 return 0;
163}
164
165/* A switch is complete if all the DSA ports phandles point to ports
166 * known in the tree. A return value of 1 means the tree is not
167 * complete. This is not an error condition. A value of 0 is
168 * success.
169 */
170static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
171{
172 struct device_node *port;
173 u32 index;
174 int err;
175
176 for (index = 0; index < DSA_MAX_PORTS; index++) {
177 port = ds->ports[index].dn;
178 if (!port)
179 continue;
180
181 if (!dsa_port_is_dsa(port))
182 continue;
183
184 err = dsa_port_complete(dst, ds, port, index);
185 if (err != 0)
186 return err;
187
188 ds->dsa_port_mask |= BIT(index);
189 }
190
191 return 0;
192}
193
194/* A tree is complete if all the DSA ports phandles point to ports
195 * known in the tree. A return value of 1 means the tree is not
196 * complete. This is not an error condition. A value of 0 is
197 * success.
198 */
199static int dsa_dst_complete(struct dsa_switch_tree *dst)
200{
201 struct dsa_switch *ds;
202 u32 index;
203 int err;
204
205 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
206 ds = dst->ds[index];
207 if (!ds)
208 continue;
209
210 err = dsa_ds_complete(dst, ds);
211 if (err != 0)
212 return err;
213 }
214
215 return 0;
216}
217
218static int dsa_dsa_port_apply(struct device_node *port, u32 index,
219 struct dsa_switch *ds)
220{
221 int err;
222
223 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
224 if (err) {
225 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
226 index, err);
227 return err;
228 }
229
230 return 0;
231}
232
233static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
234 struct dsa_switch *ds)
235{
236 dsa_cpu_dsa_destroy(port);
237}
238
239static int dsa_cpu_port_apply(struct device_node *port, u32 index,
240 struct dsa_switch *ds)
241{
242 int err;
243
244 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
245 if (err) {
246 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
247 index, err);
248 return err;
249 }
250
251 ds->cpu_port_mask |= BIT(index);
252
253 return 0;
254}
255
256static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
257 struct dsa_switch *ds)
258{
259 dsa_cpu_dsa_destroy(port);
260 ds->cpu_port_mask &= ~BIT(index);
261
262}
263
264static int dsa_user_port_apply(struct device_node *port, u32 index,
265 struct dsa_switch *ds)
266{
267 const char *name;
268 int err;
269
270 name = of_get_property(port, "label", NULL);
271
272 err = dsa_slave_create(ds, ds->dev, index, name);
273 if (err) {
274 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
275 index, err);
276 ds->ports[index].netdev = NULL;
277 return err;
278 }
279
280 return 0;
281}
282
283static void dsa_user_port_unapply(struct device_node *port, u32 index,
284 struct dsa_switch *ds)
285{
286 if (ds->ports[index].netdev) {
287 dsa_slave_destroy(ds->ports[index].netdev);
288 ds->ports[index].netdev = NULL;
289 ds->enabled_port_mask &= ~(1 << index);
290 }
291}
292
293static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
294{
295 struct device_node *port;
296 u32 index;
297 int err;
298
299 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
300 * driver and before ops->setup() has run, since the switch drivers and
301 * the slave MDIO bus driver rely on these values for probing PHY
302 * devices or not
303 */
304 ds->phys_mii_mask = ds->enabled_port_mask;
305
306 err = ds->ops->setup(ds);
307 if (err < 0)
308 return err;
309
310 if (ds->ops->set_addr) {
311 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
312 if (err < 0)
313 return err;
314 }
315
316 if (!ds->slave_mii_bus && ds->ops->phy_read) {
317 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
318 if (!ds->slave_mii_bus)
319 return -ENOMEM;
320
321 dsa_slave_mii_bus_init(ds);
322
323 err = mdiobus_register(ds->slave_mii_bus);
324 if (err < 0)
325 return err;
326 }
327
328 for (index = 0; index < DSA_MAX_PORTS; index++) {
329 port = ds->ports[index].dn;
330 if (!port)
331 continue;
332
333 if (dsa_port_is_dsa(port)) {
334 err = dsa_dsa_port_apply(port, index, ds);
335 if (err)
336 return err;
337 continue;
338 }
339
340 if (dsa_port_is_cpu(port)) {
341 err = dsa_cpu_port_apply(port, index, ds);
342 if (err)
343 return err;
344 continue;
345 }
346
347 err = dsa_user_port_apply(port, index, ds);
348 if (err)
349 continue;
350 }
351
352 return 0;
353}
354
355static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
356{
357 struct device_node *port;
358 u32 index;
359
360 for (index = 0; index < DSA_MAX_PORTS; index++) {
361 port = ds->ports[index].dn;
362 if (!port)
363 continue;
364
365 if (dsa_port_is_dsa(port)) {
366 dsa_dsa_port_unapply(port, index, ds);
367 continue;
368 }
369
370 if (dsa_port_is_cpu(port)) {
371 dsa_cpu_port_unapply(port, index, ds);
372 continue;
373 }
374
375 dsa_user_port_unapply(port, index, ds);
376 }
377
378 if (ds->slave_mii_bus && ds->ops->phy_read)
379 mdiobus_unregister(ds->slave_mii_bus);
380}
381
382static int dsa_dst_apply(struct dsa_switch_tree *dst)
383{
384 struct dsa_switch *ds;
385 u32 index;
386 int err;
387
388 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
389 ds = dst->ds[index];
390 if (!ds)
391 continue;
392
393 err = dsa_ds_apply(dst, ds);
394 if (err)
395 return err;
396 }
397
398 if (dst->ds[0]) {
399 err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
400 if (err)
401 return err;
402 }
403
404 /* If we use a tagging format that doesn't have an ethertype
405 * field, make sure that all packets from this point on get
406 * sent to the tag format's receive function.
407 */
408 wmb();
409 dst->master_netdev->dsa_ptr = (void *)dst;
410 dst->applied = true;
411
412 return 0;
413}
414
415static void dsa_dst_unapply(struct dsa_switch_tree *dst)
416{
417 struct dsa_switch *ds;
418 u32 index;
419
420 if (!dst->applied)
421 return;
422
423 dst->master_netdev->dsa_ptr = NULL;
424
425 /* If we used a tagging format that doesn't have an ethertype
426 * field, make sure that all packets from this point get sent
427 * without the tag and go through the regular receive path.
428 */
429 wmb();
430
431 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
432 ds = dst->ds[index];
433 if (!ds)
434 continue;
435
436 dsa_ds_unapply(dst, ds);
437 }
438
439 if (dst->ds[0])
440 dsa_cpu_port_ethtool_restore(dst->ds[0]);
441
442 pr_info("DSA: tree %d unapplied\n", dst->tree);
443 dst->applied = false;
444}
445
446static int dsa_cpu_parse(struct device_node *port, u32 index,
447 struct dsa_switch_tree *dst,
448 struct dsa_switch *ds)
449{
450 enum dsa_tag_protocol tag_protocol;
451 struct net_device *ethernet_dev;
452 struct device_node *ethernet;
453
454 ethernet = of_parse_phandle(port, "ethernet", 0);
455 if (!ethernet)
456 return -EINVAL;
457
458 ethernet_dev = of_find_net_device_by_node(ethernet);
459 if (!ethernet_dev)
460 return -EPROBE_DEFER;
461
462 if (!ds->master_netdev)
463 ds->master_netdev = ethernet_dev;
464
465 if (!dst->master_netdev)
466 dst->master_netdev = ethernet_dev;
467
468 if (dst->cpu_switch == -1) {
469 dst->cpu_switch = ds->index;
470 dst->cpu_port = index;
471 }
472
473 tag_protocol = ds->ops->get_tag_protocol(ds);
474 dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
475 if (IS_ERR(dst->tag_ops)) {
476 dev_warn(ds->dev, "No tagger for this switch\n");
477 return PTR_ERR(dst->tag_ops);
478 }
479
480 dst->rcv = dst->tag_ops->rcv;
481
482 return 0;
483}
484
485static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
486{
487 struct device_node *port;
488 u32 index;
489 int err;
490
491 for (index = 0; index < DSA_MAX_PORTS; index++) {
492 port = ds->ports[index].dn;
493 if (!port)
494 continue;
495
496 if (dsa_port_is_cpu(port)) {
497 err = dsa_cpu_parse(port, index, dst, ds);
498 if (err)
499 return err;
500 }
501 }
502
503 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
504
505 return 0;
506}
507
508static int dsa_dst_parse(struct dsa_switch_tree *dst)
509{
510 struct dsa_switch *ds;
511 u32 index;
512 int err;
513
514 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
515 ds = dst->ds[index];
516 if (!ds)
517 continue;
518
519 err = dsa_ds_parse(dst, ds);
520 if (err)
521 return err;
522 }
523
524 if (!dst->master_netdev) {
525 pr_warn("Tree has no master device\n");
526 return -EINVAL;
527 }
528
529 pr_info("DSA: tree %d parsed\n", dst->tree);
530
531 return 0;
532}
533
534static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
535{
536 struct device_node *port;
537 int err;
538 u32 reg;
539
540 for_each_available_child_of_node(ports, port) {
541 err = of_property_read_u32(port, "reg", ®);
542 if (err)
543 return err;
544
545 if (reg >= DSA_MAX_PORTS)
546 return -EINVAL;
547
548 ds->ports[reg].dn = port;
549
550 /* Initialize enabled_port_mask now for ops->setup()
551 * to have access to a correct value, just like what
552 * net/dsa/dsa.c::dsa_switch_setup_one does.
553 */
554 if (!dsa_port_is_cpu(port))
555 ds->enabled_port_mask |= 1 << reg;
556 }
557
558 return 0;
559}
560
561static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
562{
563 int err;
564
565 *tree = *index = 0;
566
567 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
568 if (err) {
569 /* Does not exist, but it is optional */
570 if (err == -EINVAL)
571 return 0;
572 return err;
573 }
574
575 err = of_property_read_u32_index(np, "dsa,member", 1, index);
576 if (err)
577 return err;
578
579 if (*index >= DSA_MAX_SWITCHES)
580 return -EINVAL;
581
582 return 0;
583}
584
585static struct device_node *dsa_get_ports(struct dsa_switch *ds,
586 struct device_node *np)
587{
588 struct device_node *ports;
589
590 ports = of_get_child_by_name(np, "ports");
591 if (!ports) {
592 dev_err(ds->dev, "no ports child node found\n");
593 return ERR_PTR(-EINVAL);
594 }
595
596 return ports;
597}
598
599static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
600{
601 struct device_node *ports = dsa_get_ports(ds, np);
602 struct dsa_switch_tree *dst;
603 u32 tree, index;
604 int i, err;
605
606 err = dsa_parse_member(np, &tree, &index);
607 if (err)
608 return err;
609
610 if (IS_ERR(ports))
611 return PTR_ERR(ports);
612
613 err = dsa_parse_ports_dn(ports, ds);
614 if (err)
615 return err;
616
617 dst = dsa_get_dst(tree);
618 if (!dst) {
619 dst = dsa_add_dst(tree);
620 if (!dst)
621 return -ENOMEM;
622 }
623
624 if (dst->ds[index]) {
625 err = -EBUSY;
626 goto out;
627 }
628
629 ds->dst = dst;
630 ds->index = index;
631
632 /* Initialize the routing table */
633 for (i = 0; i < DSA_MAX_SWITCHES; ++i)
634 ds->rtable[i] = DSA_RTABLE_NONE;
635
636 dsa_dst_add_ds(dst, ds, index);
637
638 err = dsa_dst_complete(dst);
639 if (err < 0)
640 goto out_del_dst;
641
642 if (err == 1) {
643 /* Not all switches registered yet */
644 err = 0;
645 goto out;
646 }
647
648 if (dst->applied) {
649 pr_info("DSA: Disjoint trees?\n");
650 return -EINVAL;
651 }
652
653 err = dsa_dst_parse(dst);
654 if (err)
655 goto out_del_dst;
656
657 err = dsa_dst_apply(dst);
658 if (err) {
659 dsa_dst_unapply(dst);
660 goto out_del_dst;
661 }
662
663 dsa_put_dst(dst);
664 return 0;
665
666out_del_dst:
667 dsa_dst_del_ds(dst, ds, ds->index);
668out:
669 dsa_put_dst(dst);
670
671 return err;
672}
673
674int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
675{
676 int err;
677
678 mutex_lock(&dsa2_mutex);
679 err = _dsa_register_switch(ds, np);
680 mutex_unlock(&dsa2_mutex);
681
682 return err;
683}
684EXPORT_SYMBOL_GPL(dsa_register_switch);
685
686static void _dsa_unregister_switch(struct dsa_switch *ds)
687{
688 struct dsa_switch_tree *dst = ds->dst;
689
690 dsa_dst_unapply(dst);
691
692 dsa_dst_del_ds(dst, ds, ds->index);
693}
694
695void dsa_unregister_switch(struct dsa_switch *ds)
696{
697 mutex_lock(&dsa2_mutex);
698 _dsa_unregister_switch(ds);
699 mutex_unlock(&dsa2_mutex);
700}
701EXPORT_SYMBOL_GPL(dsa_unregister_switch);