Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/dsa.c - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 */
7
8#include <linux/device.h>
9#include <linux/list.h>
10#include <linux/platform_device.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/notifier.h>
14#include <linux/of.h>
15#include <linux/of_mdio.h>
16#include <linux/of_platform.h>
17#include <linux/of_net.h>
18#include <linux/netdevice.h>
19#include <linux/sysfs.h>
20#include <linux/phy_fixed.h>
21#include <linux/ptp_classify.h>
22#include <linux/etherdevice.h>
23
24#include "dsa_priv.h"
25
26static LIST_HEAD(dsa_tag_drivers_list);
27static DEFINE_MUTEX(dsa_tag_drivers_lock);
28
29static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
30 struct net_device *dev)
31{
32 /* Just return the original SKB */
33 return skb;
34}
35
36static const struct dsa_device_ops none_ops = {
37 .name = "none",
38 .proto = DSA_TAG_PROTO_NONE,
39 .xmit = dsa_slave_notag_xmit,
40 .rcv = NULL,
41};
42
43DSA_TAG_DRIVER(none_ops);
44
45static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
46 struct module *owner)
47{
48 dsa_tag_driver->owner = owner;
49
50 mutex_lock(&dsa_tag_drivers_lock);
51 list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
52 mutex_unlock(&dsa_tag_drivers_lock);
53}
54
55void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
56 unsigned int count, struct module *owner)
57{
58 unsigned int i;
59
60 for (i = 0; i < count; i++)
61 dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
62}
63
64static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
65{
66 mutex_lock(&dsa_tag_drivers_lock);
67 list_del(&dsa_tag_driver->list);
68 mutex_unlock(&dsa_tag_drivers_lock);
69}
70EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
71
72void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
73 unsigned int count)
74{
75 unsigned int i;
76
77 for (i = 0; i < count; i++)
78 dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
79}
80EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
81
82const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
83{
84 return ops->name;
85};
86
87/* Function takes a reference on the module owning the tagger,
88 * so dsa_tag_driver_put must be called afterwards.
89 */
90const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf)
91{
92 const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
93 struct dsa_tag_driver *dsa_tag_driver;
94
95 mutex_lock(&dsa_tag_drivers_lock);
96 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
97 const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
98
99 if (!sysfs_streq(buf, tmp->name))
100 continue;
101
102 if (!try_module_get(dsa_tag_driver->owner))
103 break;
104
105 ops = tmp;
106 break;
107 }
108 mutex_unlock(&dsa_tag_drivers_lock);
109
110 return ops;
111}
112
113const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
114{
115 struct dsa_tag_driver *dsa_tag_driver;
116 const struct dsa_device_ops *ops;
117 bool found = false;
118
119 request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
120
121 mutex_lock(&dsa_tag_drivers_lock);
122 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
123 ops = dsa_tag_driver->ops;
124 if (ops->proto == tag_protocol) {
125 found = true;
126 break;
127 }
128 }
129
130 if (found) {
131 if (!try_module_get(dsa_tag_driver->owner))
132 ops = ERR_PTR(-ENOPROTOOPT);
133 } else {
134 ops = ERR_PTR(-ENOPROTOOPT);
135 }
136
137 mutex_unlock(&dsa_tag_drivers_lock);
138
139 return ops;
140}
141
142void dsa_tag_driver_put(const struct dsa_device_ops *ops)
143{
144 struct dsa_tag_driver *dsa_tag_driver;
145
146 mutex_lock(&dsa_tag_drivers_lock);
147 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
148 if (dsa_tag_driver->ops == ops) {
149 module_put(dsa_tag_driver->owner);
150 break;
151 }
152 }
153 mutex_unlock(&dsa_tag_drivers_lock);
154}
155
156static int dev_is_class(struct device *dev, void *class)
157{
158 if (dev->class != NULL && !strcmp(dev->class->name, class))
159 return 1;
160
161 return 0;
162}
163
164static struct device *dev_find_class(struct device *parent, char *class)
165{
166 if (dev_is_class(parent, class)) {
167 get_device(parent);
168 return parent;
169 }
170
171 return device_find_child(parent, class, dev_is_class);
172}
173
174struct net_device *dsa_dev_to_net_device(struct device *dev)
175{
176 struct device *d;
177
178 d = dev_find_class(dev, "net");
179 if (d != NULL) {
180 struct net_device *nd;
181
182 nd = to_net_dev(d);
183 dev_hold(nd);
184 put_device(d);
185
186 return nd;
187 }
188
189 return NULL;
190}
191EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
192
193/* Determine if we should defer delivery of skb until we have a rx timestamp.
194 *
195 * Called from dsa_switch_rcv. For now, this will only work if tagging is
196 * enabled on the switch. Normally the MAC driver would retrieve the hardware
197 * timestamp when it reads the packet out of the hardware. However in a DSA
198 * switch, the DSA driver owning the interface to which the packet is
199 * delivered is never notified unless we do so here.
200 */
201static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
202 struct sk_buff *skb)
203{
204 struct dsa_switch *ds = p->dp->ds;
205 unsigned int type;
206
207 if (skb_headroom(skb) < ETH_HLEN)
208 return false;
209
210 __skb_push(skb, ETH_HLEN);
211
212 type = ptp_classify_raw(skb);
213
214 __skb_pull(skb, ETH_HLEN);
215
216 if (type == PTP_CLASS_NONE)
217 return false;
218
219 if (likely(ds->ops->port_rxtstamp))
220 return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
221
222 return false;
223}
224
225static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
226 struct packet_type *pt, struct net_device *unused)
227{
228 struct dsa_port *cpu_dp = dev->dsa_ptr;
229 struct sk_buff *nskb = NULL;
230 struct dsa_slave_priv *p;
231
232 if (unlikely(!cpu_dp)) {
233 kfree_skb(skb);
234 return 0;
235 }
236
237 skb = skb_unshare(skb, GFP_ATOMIC);
238 if (!skb)
239 return 0;
240
241 nskb = cpu_dp->rcv(skb, dev, pt);
242 if (!nskb) {
243 kfree_skb(skb);
244 return 0;
245 }
246
247 skb = nskb;
248 skb_push(skb, ETH_HLEN);
249 skb->pkt_type = PACKET_HOST;
250 skb->protocol = eth_type_trans(skb, skb->dev);
251
252 if (unlikely(!dsa_slave_dev_check(skb->dev))) {
253 /* Packet is to be injected directly on an upper
254 * device, e.g. a team/bond, so skip all DSA-port
255 * specific actions.
256 */
257 netif_rx(skb);
258 return 0;
259 }
260
261 p = netdev_priv(skb->dev);
262
263 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
264 nskb = dsa_untag_bridge_pvid(skb);
265 if (!nskb) {
266 kfree_skb(skb);
267 return 0;
268 }
269 skb = nskb;
270 }
271
272 dev_sw_netstats_rx_add(skb->dev, skb->len);
273
274 if (dsa_skb_defer_rx_timestamp(p, skb))
275 return 0;
276
277 gro_cells_receive(&p->gcells, skb);
278
279 return 0;
280}
281
282#ifdef CONFIG_PM_SLEEP
283static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
284{
285 const struct dsa_port *dp = dsa_to_port(ds, p);
286
287 return dp->type == DSA_PORT_TYPE_USER && dp->slave;
288}
289
290int dsa_switch_suspend(struct dsa_switch *ds)
291{
292 int i, ret = 0;
293
294 /* Suspend slave network devices */
295 for (i = 0; i < ds->num_ports; i++) {
296 if (!dsa_is_port_initialized(ds, i))
297 continue;
298
299 ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
300 if (ret)
301 return ret;
302 }
303
304 if (ds->ops->suspend)
305 ret = ds->ops->suspend(ds);
306
307 return ret;
308}
309EXPORT_SYMBOL_GPL(dsa_switch_suspend);
310
311int dsa_switch_resume(struct dsa_switch *ds)
312{
313 int i, ret = 0;
314
315 if (ds->ops->resume)
316 ret = ds->ops->resume(ds);
317
318 if (ret)
319 return ret;
320
321 /* Resume slave network devices */
322 for (i = 0; i < ds->num_ports; i++) {
323 if (!dsa_is_port_initialized(ds, i))
324 continue;
325
326 ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
327 if (ret)
328 return ret;
329 }
330
331 return 0;
332}
333EXPORT_SYMBOL_GPL(dsa_switch_resume);
334#endif
335
336static struct packet_type dsa_pack_type __read_mostly = {
337 .type = cpu_to_be16(ETH_P_XDSA),
338 .func = dsa_switch_rcv,
339};
340
341static struct workqueue_struct *dsa_owq;
342
343bool dsa_schedule_work(struct work_struct *work)
344{
345 return queue_work(dsa_owq, work);
346}
347
348void dsa_flush_workqueue(void)
349{
350 flush_workqueue(dsa_owq);
351}
352
353int dsa_devlink_param_get(struct devlink *dl, u32 id,
354 struct devlink_param_gset_ctx *ctx)
355{
356 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
357
358 if (!ds->ops->devlink_param_get)
359 return -EOPNOTSUPP;
360
361 return ds->ops->devlink_param_get(ds, id, ctx);
362}
363EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
364
365int dsa_devlink_param_set(struct devlink *dl, u32 id,
366 struct devlink_param_gset_ctx *ctx)
367{
368 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
369
370 if (!ds->ops->devlink_param_set)
371 return -EOPNOTSUPP;
372
373 return ds->ops->devlink_param_set(ds, id, ctx);
374}
375EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
376
377int dsa_devlink_params_register(struct dsa_switch *ds,
378 const struct devlink_param *params,
379 size_t params_count)
380{
381 return devlink_params_register(ds->devlink, params, params_count);
382}
383EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
384
385void dsa_devlink_params_unregister(struct dsa_switch *ds,
386 const struct devlink_param *params,
387 size_t params_count)
388{
389 devlink_params_unregister(ds->devlink, params, params_count);
390}
391EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
392
393int dsa_devlink_resource_register(struct dsa_switch *ds,
394 const char *resource_name,
395 u64 resource_size,
396 u64 resource_id,
397 u64 parent_resource_id,
398 const struct devlink_resource_size_params *size_params)
399{
400 return devlink_resource_register(ds->devlink, resource_name,
401 resource_size, resource_id,
402 parent_resource_id,
403 size_params);
404}
405EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
406
407void dsa_devlink_resources_unregister(struct dsa_switch *ds)
408{
409 devlink_resources_unregister(ds->devlink, NULL);
410}
411EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
412
413void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
414 u64 resource_id,
415 devlink_resource_occ_get_t *occ_get,
416 void *occ_get_priv)
417{
418 return devlink_resource_occ_get_register(ds->devlink, resource_id,
419 occ_get, occ_get_priv);
420}
421EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
422
423void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
424 u64 resource_id)
425{
426 devlink_resource_occ_get_unregister(ds->devlink, resource_id);
427}
428EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
429
430struct devlink_region *
431dsa_devlink_region_create(struct dsa_switch *ds,
432 const struct devlink_region_ops *ops,
433 u32 region_max_snapshots, u64 region_size)
434{
435 return devlink_region_create(ds->devlink, ops, region_max_snapshots,
436 region_size);
437}
438EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
439
440struct devlink_region *
441dsa_devlink_port_region_create(struct dsa_switch *ds,
442 int port,
443 const struct devlink_port_region_ops *ops,
444 u32 region_max_snapshots, u64 region_size)
445{
446 struct dsa_port *dp = dsa_to_port(ds, port);
447
448 return devlink_port_region_create(&dp->devlink_port, ops,
449 region_max_snapshots,
450 region_size);
451}
452EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
453
454void dsa_devlink_region_destroy(struct devlink_region *region)
455{
456 devlink_region_destroy(region);
457}
458EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
459
460struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
461{
462 if (!netdev || !dsa_slave_dev_check(netdev))
463 return ERR_PTR(-ENODEV);
464
465 return dsa_slave_to_port(netdev);
466}
467EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
468
469static int __init dsa_init_module(void)
470{
471 int rc;
472
473 dsa_owq = alloc_ordered_workqueue("dsa_ordered",
474 WQ_MEM_RECLAIM);
475 if (!dsa_owq)
476 return -ENOMEM;
477
478 rc = dsa_slave_register_notifier();
479 if (rc)
480 goto register_notifier_fail;
481
482 dev_add_pack(&dsa_pack_type);
483
484 dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
485 THIS_MODULE);
486
487 return 0;
488
489register_notifier_fail:
490 destroy_workqueue(dsa_owq);
491
492 return rc;
493}
494module_init(dsa_init_module);
495
496static void __exit dsa_cleanup_module(void)
497{
498 dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
499
500 dsa_slave_unregister_notifier();
501 dev_remove_pack(&dsa_pack_type);
502 destroy_workqueue(dsa_owq);
503}
504module_exit(dsa_cleanup_module);
505
506MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
507MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
508MODULE_LICENSE("GPL");
509MODULE_ALIAS("platform:dsa");
1/*
2 * net/dsa/dsa.c - Hardware switch handling
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/ctype.h>
13#include <linux/device.h>
14#include <linux/hwmon.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <net/dsa.h>
20#include <linux/of.h>
21#include <linux/of_mdio.h>
22#include <linux/of_platform.h>
23#include <linux/of_net.h>
24#include <linux/of_gpio.h>
25#include <linux/sysfs.h>
26#include <linux/phy_fixed.h>
27#include <linux/gpio/consumer.h>
28#include "dsa_priv.h"
29
30char dsa_driver_version[] = "0.1";
31
32
33/* switch driver registration ***********************************************/
34static DEFINE_MUTEX(dsa_switch_drivers_mutex);
35static LIST_HEAD(dsa_switch_drivers);
36
37void register_switch_driver(struct dsa_switch_driver *drv)
38{
39 mutex_lock(&dsa_switch_drivers_mutex);
40 list_add_tail(&drv->list, &dsa_switch_drivers);
41 mutex_unlock(&dsa_switch_drivers_mutex);
42}
43EXPORT_SYMBOL_GPL(register_switch_driver);
44
45void unregister_switch_driver(struct dsa_switch_driver *drv)
46{
47 mutex_lock(&dsa_switch_drivers_mutex);
48 list_del_init(&drv->list);
49 mutex_unlock(&dsa_switch_drivers_mutex);
50}
51EXPORT_SYMBOL_GPL(unregister_switch_driver);
52
53static struct dsa_switch_driver *
54dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
55{
56 struct dsa_switch_driver *ret;
57 struct list_head *list;
58 char *name;
59
60 ret = NULL;
61 name = NULL;
62
63 mutex_lock(&dsa_switch_drivers_mutex);
64 list_for_each(list, &dsa_switch_drivers) {
65 struct dsa_switch_driver *drv;
66
67 drv = list_entry(list, struct dsa_switch_driver, list);
68
69 name = drv->probe(host_dev, sw_addr);
70 if (name != NULL) {
71 ret = drv;
72 break;
73 }
74 }
75 mutex_unlock(&dsa_switch_drivers_mutex);
76
77 *_name = name;
78
79 return ret;
80}
81
82/* hwmon support ************************************************************/
83
84#ifdef CONFIG_NET_DSA_HWMON
85
86static ssize_t temp1_input_show(struct device *dev,
87 struct device_attribute *attr, char *buf)
88{
89 struct dsa_switch *ds = dev_get_drvdata(dev);
90 int temp, ret;
91
92 ret = ds->drv->get_temp(ds, &temp);
93 if (ret < 0)
94 return ret;
95
96 return sprintf(buf, "%d\n", temp * 1000);
97}
98static DEVICE_ATTR_RO(temp1_input);
99
100static ssize_t temp1_max_show(struct device *dev,
101 struct device_attribute *attr, char *buf)
102{
103 struct dsa_switch *ds = dev_get_drvdata(dev);
104 int temp, ret;
105
106 ret = ds->drv->get_temp_limit(ds, &temp);
107 if (ret < 0)
108 return ret;
109
110 return sprintf(buf, "%d\n", temp * 1000);
111}
112
113static ssize_t temp1_max_store(struct device *dev,
114 struct device_attribute *attr, const char *buf,
115 size_t count)
116{
117 struct dsa_switch *ds = dev_get_drvdata(dev);
118 int temp, ret;
119
120 ret = kstrtoint(buf, 0, &temp);
121 if (ret < 0)
122 return ret;
123
124 ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
125 if (ret < 0)
126 return ret;
127
128 return count;
129}
130static DEVICE_ATTR_RW(temp1_max);
131
132static ssize_t temp1_max_alarm_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134{
135 struct dsa_switch *ds = dev_get_drvdata(dev);
136 bool alarm;
137 int ret;
138
139 ret = ds->drv->get_temp_alarm(ds, &alarm);
140 if (ret < 0)
141 return ret;
142
143 return sprintf(buf, "%d\n", alarm);
144}
145static DEVICE_ATTR_RO(temp1_max_alarm);
146
147static struct attribute *dsa_hwmon_attrs[] = {
148 &dev_attr_temp1_input.attr, /* 0 */
149 &dev_attr_temp1_max.attr, /* 1 */
150 &dev_attr_temp1_max_alarm.attr, /* 2 */
151 NULL
152};
153
154static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
155 struct attribute *attr, int index)
156{
157 struct device *dev = container_of(kobj, struct device, kobj);
158 struct dsa_switch *ds = dev_get_drvdata(dev);
159 struct dsa_switch_driver *drv = ds->drv;
160 umode_t mode = attr->mode;
161
162 if (index == 1) {
163 if (!drv->get_temp_limit)
164 mode = 0;
165 else if (!drv->set_temp_limit)
166 mode &= ~S_IWUSR;
167 } else if (index == 2 && !drv->get_temp_alarm) {
168 mode = 0;
169 }
170 return mode;
171}
172
173static const struct attribute_group dsa_hwmon_group = {
174 .attrs = dsa_hwmon_attrs,
175 .is_visible = dsa_hwmon_attrs_visible,
176};
177__ATTRIBUTE_GROUPS(dsa_hwmon);
178
179#endif /* CONFIG_NET_DSA_HWMON */
180
181/* basic switch operations **************************************************/
182static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
183{
184 struct dsa_chip_data *cd = ds->pd;
185 struct device_node *port_dn;
186 struct phy_device *phydev;
187 int ret, port, mode;
188
189 for (port = 0; port < DSA_MAX_PORTS; port++) {
190 if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
191 continue;
192
193 port_dn = cd->port_dn[port];
194 if (of_phy_is_fixed_link(port_dn)) {
195 ret = of_phy_register_fixed_link(port_dn);
196 if (ret) {
197 netdev_err(master,
198 "failed to register fixed PHY\n");
199 return ret;
200 }
201 phydev = of_phy_find_device(port_dn);
202
203 mode = of_get_phy_mode(port_dn);
204 if (mode < 0)
205 mode = PHY_INTERFACE_MODE_NA;
206 phydev->interface = mode;
207
208 genphy_config_init(phydev);
209 genphy_read_status(phydev);
210 if (ds->drv->adjust_link)
211 ds->drv->adjust_link(ds, port, phydev);
212 }
213 }
214 return 0;
215}
216
217static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
218{
219 struct dsa_switch_driver *drv = ds->drv;
220 struct dsa_switch_tree *dst = ds->dst;
221 struct dsa_chip_data *pd = ds->pd;
222 bool valid_name_found = false;
223 int index = ds->index;
224 int i, ret;
225
226 /*
227 * Validate supplied switch configuration.
228 */
229 for (i = 0; i < DSA_MAX_PORTS; i++) {
230 char *name;
231
232 name = pd->port_names[i];
233 if (name == NULL)
234 continue;
235
236 if (!strcmp(name, "cpu")) {
237 if (dst->cpu_switch != -1) {
238 netdev_err(dst->master_netdev,
239 "multiple cpu ports?!\n");
240 ret = -EINVAL;
241 goto out;
242 }
243 dst->cpu_switch = index;
244 dst->cpu_port = i;
245 } else if (!strcmp(name, "dsa")) {
246 ds->dsa_port_mask |= 1 << i;
247 } else {
248 ds->phys_port_mask |= 1 << i;
249 }
250 valid_name_found = true;
251 }
252
253 if (!valid_name_found && i == DSA_MAX_PORTS) {
254 ret = -EINVAL;
255 goto out;
256 }
257
258 /* Make the built-in MII bus mask match the number of ports,
259 * switch drivers can override this later
260 */
261 ds->phys_mii_mask = ds->phys_port_mask;
262
263 /*
264 * If the CPU connects to this switch, set the switch tree
265 * tagging protocol to the preferred tagging format of this
266 * switch.
267 */
268 if (dst->cpu_switch == index) {
269 switch (ds->tag_protocol) {
270#ifdef CONFIG_NET_DSA_TAG_DSA
271 case DSA_TAG_PROTO_DSA:
272 dst->rcv = dsa_netdev_ops.rcv;
273 break;
274#endif
275#ifdef CONFIG_NET_DSA_TAG_EDSA
276 case DSA_TAG_PROTO_EDSA:
277 dst->rcv = edsa_netdev_ops.rcv;
278 break;
279#endif
280#ifdef CONFIG_NET_DSA_TAG_TRAILER
281 case DSA_TAG_PROTO_TRAILER:
282 dst->rcv = trailer_netdev_ops.rcv;
283 break;
284#endif
285#ifdef CONFIG_NET_DSA_TAG_BRCM
286 case DSA_TAG_PROTO_BRCM:
287 dst->rcv = brcm_netdev_ops.rcv;
288 break;
289#endif
290 case DSA_TAG_PROTO_NONE:
291 break;
292 default:
293 ret = -ENOPROTOOPT;
294 goto out;
295 }
296
297 dst->tag_protocol = ds->tag_protocol;
298 }
299
300 /*
301 * Do basic register setup.
302 */
303 ret = drv->setup(ds);
304 if (ret < 0)
305 goto out;
306
307 ret = drv->set_addr(ds, dst->master_netdev->dev_addr);
308 if (ret < 0)
309 goto out;
310
311 ds->slave_mii_bus = devm_mdiobus_alloc(parent);
312 if (ds->slave_mii_bus == NULL) {
313 ret = -ENOMEM;
314 goto out;
315 }
316 dsa_slave_mii_bus_init(ds);
317
318 ret = mdiobus_register(ds->slave_mii_bus);
319 if (ret < 0)
320 goto out;
321
322
323 /*
324 * Create network devices for physical switch ports.
325 */
326 for (i = 0; i < DSA_MAX_PORTS; i++) {
327 if (!(ds->phys_port_mask & (1 << i)))
328 continue;
329
330 ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
331 if (ret < 0) {
332 netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
333 index, i, pd->port_names[i], ret);
334 ret = 0;
335 }
336 }
337
338 /* Perform configuration of the CPU and DSA ports */
339 ret = dsa_cpu_dsa_setup(ds, dst->master_netdev);
340 if (ret < 0) {
341 netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
342 index);
343 ret = 0;
344 }
345
346#ifdef CONFIG_NET_DSA_HWMON
347 /* If the switch provides a temperature sensor,
348 * register with hardware monitoring subsystem.
349 * Treat registration error as non-fatal and ignore it.
350 */
351 if (drv->get_temp) {
352 const char *netname = netdev_name(dst->master_netdev);
353 char hname[IFNAMSIZ + 1];
354 int i, j;
355
356 /* Create valid hwmon 'name' attribute */
357 for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
358 if (isalnum(netname[i]))
359 hname[j++] = netname[i];
360 }
361 hname[j] = '\0';
362 scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d",
363 hname, index);
364 ds->hwmon_dev = hwmon_device_register_with_groups(NULL,
365 ds->hwmon_name, ds, dsa_hwmon_groups);
366 if (IS_ERR(ds->hwmon_dev))
367 ds->hwmon_dev = NULL;
368 }
369#endif /* CONFIG_NET_DSA_HWMON */
370
371 return ret;
372
373out:
374 return ret;
375}
376
377static struct dsa_switch *
378dsa_switch_setup(struct dsa_switch_tree *dst, int index,
379 struct device *parent, struct device *host_dev)
380{
381 struct dsa_chip_data *pd = dst->pd->chip + index;
382 struct dsa_switch_driver *drv;
383 struct dsa_switch *ds;
384 int ret;
385 char *name;
386
387 /*
388 * Probe for switch model.
389 */
390 drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
391 if (drv == NULL) {
392 netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
393 index);
394 return ERR_PTR(-EINVAL);
395 }
396 netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
397 index, name);
398
399
400 /*
401 * Allocate and initialise switch state.
402 */
403 ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
404 if (ds == NULL)
405 return ERR_PTR(-ENOMEM);
406
407 ds->dst = dst;
408 ds->index = index;
409 ds->pd = pd;
410 ds->drv = drv;
411 ds->tag_protocol = drv->tag_protocol;
412 ds->master_dev = host_dev;
413
414 ret = dsa_switch_setup_one(ds, parent);
415 if (ret)
416 return ERR_PTR(ret);
417
418 return ds;
419}
420
421static void dsa_switch_destroy(struct dsa_switch *ds)
422{
423 struct device_node *port_dn;
424 struct phy_device *phydev;
425 struct dsa_chip_data *cd = ds->pd;
426 int port;
427
428#ifdef CONFIG_NET_DSA_HWMON
429 if (ds->hwmon_dev)
430 hwmon_device_unregister(ds->hwmon_dev);
431#endif
432
433 /* Destroy network devices for physical switch ports. */
434 for (port = 0; port < DSA_MAX_PORTS; port++) {
435 if (!(ds->phys_port_mask & (1 << port)))
436 continue;
437
438 if (!ds->ports[port])
439 continue;
440
441 dsa_slave_destroy(ds->ports[port]);
442 }
443
444 /* Remove any fixed link PHYs */
445 for (port = 0; port < DSA_MAX_PORTS; port++) {
446 port_dn = cd->port_dn[port];
447 if (of_phy_is_fixed_link(port_dn)) {
448 phydev = of_phy_find_device(port_dn);
449 if (phydev) {
450 phy_device_free(phydev);
451 of_node_put(port_dn);
452 fixed_phy_unregister(phydev);
453 }
454 }
455 }
456
457 mdiobus_unregister(ds->slave_mii_bus);
458}
459
460#ifdef CONFIG_PM_SLEEP
461static int dsa_switch_suspend(struct dsa_switch *ds)
462{
463 int i, ret = 0;
464
465 /* Suspend slave network devices */
466 for (i = 0; i < DSA_MAX_PORTS; i++) {
467 if (!dsa_is_port_initialized(ds, i))
468 continue;
469
470 ret = dsa_slave_suspend(ds->ports[i]);
471 if (ret)
472 return ret;
473 }
474
475 if (ds->drv->suspend)
476 ret = ds->drv->suspend(ds);
477
478 return ret;
479}
480
481static int dsa_switch_resume(struct dsa_switch *ds)
482{
483 int i, ret = 0;
484
485 if (ds->drv->resume)
486 ret = ds->drv->resume(ds);
487
488 if (ret)
489 return ret;
490
491 /* Resume slave network devices */
492 for (i = 0; i < DSA_MAX_PORTS; i++) {
493 if (!dsa_is_port_initialized(ds, i))
494 continue;
495
496 ret = dsa_slave_resume(ds->ports[i]);
497 if (ret)
498 return ret;
499 }
500
501 return 0;
502}
503#endif
504
505/* platform driver init and cleanup *****************************************/
506static int dev_is_class(struct device *dev, void *class)
507{
508 if (dev->class != NULL && !strcmp(dev->class->name, class))
509 return 1;
510
511 return 0;
512}
513
514static struct device *dev_find_class(struct device *parent, char *class)
515{
516 if (dev_is_class(parent, class)) {
517 get_device(parent);
518 return parent;
519 }
520
521 return device_find_child(parent, class, dev_is_class);
522}
523
524struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
525{
526 struct device *d;
527
528 d = dev_find_class(dev, "mdio_bus");
529 if (d != NULL) {
530 struct mii_bus *bus;
531
532 bus = to_mii_bus(d);
533 put_device(d);
534
535 return bus;
536 }
537
538 return NULL;
539}
540EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus);
541
542static struct net_device *dev_to_net_device(struct device *dev)
543{
544 struct device *d;
545
546 d = dev_find_class(dev, "net");
547 if (d != NULL) {
548 struct net_device *nd;
549
550 nd = to_net_dev(d);
551 dev_hold(nd);
552 put_device(d);
553
554 return nd;
555 }
556
557 return NULL;
558}
559
560#ifdef CONFIG_OF
561static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
562 struct dsa_chip_data *cd,
563 int chip_index, int port_index,
564 struct device_node *link)
565{
566 const __be32 *reg;
567 int link_sw_addr;
568 struct device_node *parent_sw;
569 int len;
570
571 parent_sw = of_get_parent(link);
572 if (!parent_sw)
573 return -EINVAL;
574
575 reg = of_get_property(parent_sw, "reg", &len);
576 if (!reg || (len != sizeof(*reg) * 2))
577 return -EINVAL;
578
579 /*
580 * Get the destination switch number from the second field of its 'reg'
581 * property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
582 */
583 link_sw_addr = be32_to_cpup(reg + 1);
584
585 if (link_sw_addr >= pd->nr_chips)
586 return -EINVAL;
587
588 /* First time routing table allocation */
589 if (!cd->rtable) {
590 cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8),
591 GFP_KERNEL);
592 if (!cd->rtable)
593 return -ENOMEM;
594
595 /* default to no valid uplink/downlink */
596 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
597 }
598
599 cd->rtable[link_sw_addr] = port_index;
600
601 return 0;
602}
603
604static int dsa_of_probe_links(struct dsa_platform_data *pd,
605 struct dsa_chip_data *cd,
606 int chip_index, int port_index,
607 struct device_node *port,
608 const char *port_name)
609{
610 struct device_node *link;
611 int link_index;
612 int ret;
613
614 for (link_index = 0;; link_index++) {
615 link = of_parse_phandle(port, "link", link_index);
616 if (!link)
617 break;
618
619 if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
620 ret = dsa_of_setup_routing_table(pd, cd, chip_index,
621 port_index, link);
622 if (ret)
623 return ret;
624 }
625 }
626 return 0;
627}
628
629static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
630{
631 int i;
632 int port_index;
633
634 for (i = 0; i < pd->nr_chips; i++) {
635 port_index = 0;
636 while (port_index < DSA_MAX_PORTS) {
637 kfree(pd->chip[i].port_names[port_index]);
638 port_index++;
639 }
640 kfree(pd->chip[i].rtable);
641
642 /* Drop our reference to the MDIO bus device */
643 if (pd->chip[i].host_dev)
644 put_device(pd->chip[i].host_dev);
645 }
646 kfree(pd->chip);
647}
648
649static int dsa_of_probe(struct device *dev)
650{
651 struct device_node *np = dev->of_node;
652 struct device_node *child, *mdio, *ethernet, *port;
653 struct mii_bus *mdio_bus, *mdio_bus_switch;
654 struct net_device *ethernet_dev;
655 struct dsa_platform_data *pd;
656 struct dsa_chip_data *cd;
657 const char *port_name;
658 int chip_index, port_index;
659 const unsigned int *sw_addr, *port_reg;
660 int gpio;
661 enum of_gpio_flags of_flags;
662 unsigned long flags;
663 u32 eeprom_len;
664 int ret;
665
666 mdio = of_parse_phandle(np, "dsa,mii-bus", 0);
667 if (!mdio)
668 return -EINVAL;
669
670 mdio_bus = of_mdio_find_bus(mdio);
671 if (!mdio_bus)
672 return -EPROBE_DEFER;
673
674 ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
675 if (!ethernet) {
676 ret = -EINVAL;
677 goto out_put_mdio;
678 }
679
680 ethernet_dev = of_find_net_device_by_node(ethernet);
681 if (!ethernet_dev) {
682 ret = -EPROBE_DEFER;
683 goto out_put_mdio;
684 }
685
686 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
687 if (!pd) {
688 ret = -ENOMEM;
689 goto out_put_ethernet;
690 }
691
692 dev->platform_data = pd;
693 pd->of_netdev = ethernet_dev;
694 pd->nr_chips = of_get_available_child_count(np);
695 if (pd->nr_chips > DSA_MAX_SWITCHES)
696 pd->nr_chips = DSA_MAX_SWITCHES;
697
698 pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data),
699 GFP_KERNEL);
700 if (!pd->chip) {
701 ret = -ENOMEM;
702 goto out_free;
703 }
704
705 chip_index = -1;
706 for_each_available_child_of_node(np, child) {
707 chip_index++;
708 cd = &pd->chip[chip_index];
709
710 cd->of_node = child;
711
712 /* When assigning the host device, increment its refcount */
713 cd->host_dev = get_device(&mdio_bus->dev);
714
715 sw_addr = of_get_property(child, "reg", NULL);
716 if (!sw_addr)
717 continue;
718
719 cd->sw_addr = be32_to_cpup(sw_addr);
720 if (cd->sw_addr >= PHY_MAX_ADDR)
721 continue;
722
723 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
724 cd->eeprom_len = eeprom_len;
725
726 mdio = of_parse_phandle(child, "mii-bus", 0);
727 if (mdio) {
728 mdio_bus_switch = of_mdio_find_bus(mdio);
729 if (!mdio_bus_switch) {
730 ret = -EPROBE_DEFER;
731 goto out_free_chip;
732 }
733
734 /* Drop the mdio_bus device ref, replacing the host
735 * device with the mdio_bus_switch device, keeping
736 * the refcount from of_mdio_find_bus() above.
737 */
738 put_device(cd->host_dev);
739 cd->host_dev = &mdio_bus_switch->dev;
740 }
741 gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
742 &of_flags);
743 if (gpio_is_valid(gpio)) {
744 flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
745 GPIOF_ACTIVE_LOW : 0);
746 ret = devm_gpio_request_one(dev, gpio, flags,
747 "switch_reset");
748 if (ret)
749 goto out_free_chip;
750
751 cd->reset = gpio_to_desc(gpio);
752 gpiod_direction_output(cd->reset, 0);
753 }
754
755 for_each_available_child_of_node(child, port) {
756 port_reg = of_get_property(port, "reg", NULL);
757 if (!port_reg)
758 continue;
759
760 port_index = be32_to_cpup(port_reg);
761 if (port_index >= DSA_MAX_PORTS)
762 break;
763
764 port_name = of_get_property(port, "label", NULL);
765 if (!port_name)
766 continue;
767
768 cd->port_dn[port_index] = port;
769
770 cd->port_names[port_index] = kstrdup(port_name,
771 GFP_KERNEL);
772 if (!cd->port_names[port_index]) {
773 ret = -ENOMEM;
774 goto out_free_chip;
775 }
776
777 ret = dsa_of_probe_links(pd, cd, chip_index,
778 port_index, port, port_name);
779 if (ret)
780 goto out_free_chip;
781
782 }
783 }
784
785 /* The individual chips hold their own refcount on the mdio bus,
786 * so drop ours */
787 put_device(&mdio_bus->dev);
788
789 return 0;
790
791out_free_chip:
792 dsa_of_free_platform_data(pd);
793out_free:
794 kfree(pd);
795 dev->platform_data = NULL;
796out_put_ethernet:
797 put_device(ðernet_dev->dev);
798out_put_mdio:
799 put_device(&mdio_bus->dev);
800 return ret;
801}
802
803static void dsa_of_remove(struct device *dev)
804{
805 struct dsa_platform_data *pd = dev->platform_data;
806
807 if (!dev->of_node)
808 return;
809
810 dsa_of_free_platform_data(pd);
811 put_device(&pd->of_netdev->dev);
812 kfree(pd);
813}
814#else
815static inline int dsa_of_probe(struct device *dev)
816{
817 return 0;
818}
819
820static inline void dsa_of_remove(struct device *dev)
821{
822}
823#endif
824
825static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
826 struct device *parent, struct dsa_platform_data *pd)
827{
828 int i;
829 unsigned configured = 0;
830
831 dst->pd = pd;
832 dst->master_netdev = dev;
833 dst->cpu_switch = -1;
834 dst->cpu_port = -1;
835
836 for (i = 0; i < pd->nr_chips; i++) {
837 struct dsa_switch *ds;
838
839 ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev);
840 if (IS_ERR(ds)) {
841 netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
842 i, PTR_ERR(ds));
843 continue;
844 }
845
846 dst->ds[i] = ds;
847
848 ++configured;
849 }
850
851 /*
852 * If no switch was found, exit cleanly
853 */
854 if (!configured)
855 return -EPROBE_DEFER;
856
857 /*
858 * If we use a tagging format that doesn't have an ethertype
859 * field, make sure that all packets from this point on get
860 * sent to the tag format's receive function.
861 */
862 wmb();
863 dev->dsa_ptr = (void *)dst;
864
865 return 0;
866}
867
868static int dsa_probe(struct platform_device *pdev)
869{
870 struct dsa_platform_data *pd = pdev->dev.platform_data;
871 struct net_device *dev;
872 struct dsa_switch_tree *dst;
873 int ret;
874
875 pr_notice_once("Distributed Switch Architecture driver version %s\n",
876 dsa_driver_version);
877
878 if (pdev->dev.of_node) {
879 ret = dsa_of_probe(&pdev->dev);
880 if (ret)
881 return ret;
882
883 pd = pdev->dev.platform_data;
884 }
885
886 if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL))
887 return -EINVAL;
888
889 if (pd->of_netdev) {
890 dev = pd->of_netdev;
891 dev_hold(dev);
892 } else {
893 dev = dev_to_net_device(pd->netdev);
894 }
895 if (dev == NULL) {
896 ret = -EPROBE_DEFER;
897 goto out;
898 }
899
900 if (dev->dsa_ptr != NULL) {
901 dev_put(dev);
902 ret = -EEXIST;
903 goto out;
904 }
905
906 dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
907 if (dst == NULL) {
908 dev_put(dev);
909 ret = -ENOMEM;
910 goto out;
911 }
912
913 platform_set_drvdata(pdev, dst);
914
915 ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
916 if (ret) {
917 dev_put(dev);
918 goto out;
919 }
920
921 return 0;
922
923out:
924 dsa_of_remove(&pdev->dev);
925
926 return ret;
927}
928
929static void dsa_remove_dst(struct dsa_switch_tree *dst)
930{
931 int i;
932
933 dst->master_netdev->dsa_ptr = NULL;
934
935 /* If we used a tagging format that doesn't have an ethertype
936 * field, make sure that all packets from this point get sent
937 * without the tag and go through the regular receive path.
938 */
939 wmb();
940
941 for (i = 0; i < dst->pd->nr_chips; i++) {
942 struct dsa_switch *ds = dst->ds[i];
943
944 if (ds)
945 dsa_switch_destroy(ds);
946 }
947
948 dev_put(dst->master_netdev);
949}
950
951static int dsa_remove(struct platform_device *pdev)
952{
953 struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
954
955 dsa_remove_dst(dst);
956 dsa_of_remove(&pdev->dev);
957
958 return 0;
959}
960
961static void dsa_shutdown(struct platform_device *pdev)
962{
963}
964
965static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
966 struct packet_type *pt, struct net_device *orig_dev)
967{
968 struct dsa_switch_tree *dst = dev->dsa_ptr;
969
970 if (unlikely(dst == NULL)) {
971 kfree_skb(skb);
972 return 0;
973 }
974
975 return dst->rcv(skb, dev, pt, orig_dev);
976}
977
978static struct packet_type dsa_pack_type __read_mostly = {
979 .type = cpu_to_be16(ETH_P_XDSA),
980 .func = dsa_switch_rcv,
981};
982
983static struct notifier_block dsa_netdevice_nb __read_mostly = {
984 .notifier_call = dsa_slave_netdevice_event,
985};
986
987#ifdef CONFIG_PM_SLEEP
988static int dsa_suspend(struct device *d)
989{
990 struct platform_device *pdev = to_platform_device(d);
991 struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
992 int i, ret = 0;
993
994 for (i = 0; i < dst->pd->nr_chips; i++) {
995 struct dsa_switch *ds = dst->ds[i];
996
997 if (ds != NULL)
998 ret = dsa_switch_suspend(ds);
999 }
1000
1001 return ret;
1002}
1003
1004static int dsa_resume(struct device *d)
1005{
1006 struct platform_device *pdev = to_platform_device(d);
1007 struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
1008 int i, ret = 0;
1009
1010 for (i = 0; i < dst->pd->nr_chips; i++) {
1011 struct dsa_switch *ds = dst->ds[i];
1012
1013 if (ds != NULL)
1014 ret = dsa_switch_resume(ds);
1015 }
1016
1017 return ret;
1018}
1019#endif
1020
1021static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
1022
1023static const struct of_device_id dsa_of_match_table[] = {
1024 { .compatible = "brcm,bcm7445-switch-v4.0" },
1025 { .compatible = "marvell,dsa", },
1026 {}
1027};
1028MODULE_DEVICE_TABLE(of, dsa_of_match_table);
1029
1030static struct platform_driver dsa_driver = {
1031 .probe = dsa_probe,
1032 .remove = dsa_remove,
1033 .shutdown = dsa_shutdown,
1034 .driver = {
1035 .name = "dsa",
1036 .of_match_table = dsa_of_match_table,
1037 .pm = &dsa_pm_ops,
1038 },
1039};
1040
1041static int __init dsa_init_module(void)
1042{
1043 int rc;
1044
1045 register_netdevice_notifier(&dsa_netdevice_nb);
1046
1047 rc = platform_driver_register(&dsa_driver);
1048 if (rc)
1049 return rc;
1050
1051 dev_add_pack(&dsa_pack_type);
1052
1053 return 0;
1054}
1055module_init(dsa_init_module);
1056
1057static void __exit dsa_cleanup_module(void)
1058{
1059 unregister_netdevice_notifier(&dsa_netdevice_nb);
1060 dev_remove_pack(&dsa_pack_type);
1061 platform_driver_unregister(&dsa_driver);
1062}
1063module_exit(dsa_cleanup_module);
1064
1065MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1066MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
1067MODULE_LICENSE("GPL");
1068MODULE_ALIAS("platform:dsa");