Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a master device, switching frames via its switch fabric CPU port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include "dsa_priv.h"
10
11static int dsa_master_get_regs_len(struct net_device *dev)
12{
13 struct dsa_port *cpu_dp = dev->dsa_ptr;
14 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
15 struct dsa_switch *ds = cpu_dp->ds;
16 int port = cpu_dp->index;
17 int ret = 0;
18 int len;
19
20 if (ops->get_regs_len) {
21 len = ops->get_regs_len(dev);
22 if (len < 0)
23 return len;
24 ret += len;
25 }
26
27 ret += sizeof(struct ethtool_drvinfo);
28 ret += sizeof(struct ethtool_regs);
29
30 if (ds->ops->get_regs_len) {
31 len = ds->ops->get_regs_len(ds, port);
32 if (len < 0)
33 return len;
34 ret += len;
35 }
36
37 return ret;
38}
39
40static void dsa_master_get_regs(struct net_device *dev,
41 struct ethtool_regs *regs, void *data)
42{
43 struct dsa_port *cpu_dp = dev->dsa_ptr;
44 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
45 struct dsa_switch *ds = cpu_dp->ds;
46 struct ethtool_drvinfo *cpu_info;
47 struct ethtool_regs *cpu_regs;
48 int port = cpu_dp->index;
49 int len;
50
51 if (ops->get_regs_len && ops->get_regs) {
52 len = ops->get_regs_len(dev);
53 if (len < 0)
54 return;
55 regs->len = len;
56 ops->get_regs(dev, regs, data);
57 data += regs->len;
58 }
59
60 cpu_info = (struct ethtool_drvinfo *)data;
61 strlcpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
62 data += sizeof(*cpu_info);
63 cpu_regs = (struct ethtool_regs *)data;
64 data += sizeof(*cpu_regs);
65
66 if (ds->ops->get_regs_len && ds->ops->get_regs) {
67 len = ds->ops->get_regs_len(ds, port);
68 if (len < 0)
69 return;
70 cpu_regs->len = len;
71 ds->ops->get_regs(ds, port, cpu_regs, data);
72 }
73}
74
75static void dsa_master_get_ethtool_stats(struct net_device *dev,
76 struct ethtool_stats *stats,
77 uint64_t *data)
78{
79 struct dsa_port *cpu_dp = dev->dsa_ptr;
80 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
81 struct dsa_switch *ds = cpu_dp->ds;
82 int port = cpu_dp->index;
83 int count = 0;
84
85 if (ops->get_sset_count && ops->get_ethtool_stats) {
86 count = ops->get_sset_count(dev, ETH_SS_STATS);
87 ops->get_ethtool_stats(dev, stats, data);
88 }
89
90 if (ds->ops->get_ethtool_stats)
91 ds->ops->get_ethtool_stats(ds, port, data + count);
92}
93
94static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
95 struct ethtool_stats *stats,
96 uint64_t *data)
97{
98 struct dsa_port *cpu_dp = dev->dsa_ptr;
99 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
100 struct dsa_switch *ds = cpu_dp->ds;
101 int port = cpu_dp->index;
102 int count = 0;
103
104 if (dev->phydev && !ops->get_ethtool_phy_stats) {
105 count = phy_ethtool_get_sset_count(dev->phydev);
106 if (count >= 0)
107 phy_ethtool_get_stats(dev->phydev, stats, data);
108 } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
109 count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
110 ops->get_ethtool_phy_stats(dev, stats, data);
111 }
112
113 if (count < 0)
114 count = 0;
115
116 if (ds->ops->get_ethtool_phy_stats)
117 ds->ops->get_ethtool_phy_stats(ds, port, data + count);
118}
119
120static int dsa_master_get_sset_count(struct net_device *dev, int sset)
121{
122 struct dsa_port *cpu_dp = dev->dsa_ptr;
123 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
124 struct dsa_switch *ds = cpu_dp->ds;
125 int count = 0;
126
127 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
128 !ops->get_ethtool_phy_stats)
129 count = phy_ethtool_get_sset_count(dev->phydev);
130 else if (ops->get_sset_count)
131 count = ops->get_sset_count(dev, sset);
132
133 if (count < 0)
134 count = 0;
135
136 if (ds->ops->get_sset_count)
137 count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
138
139 return count;
140}
141
142static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
143 uint8_t *data)
144{
145 struct dsa_port *cpu_dp = dev->dsa_ptr;
146 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
147 struct dsa_switch *ds = cpu_dp->ds;
148 int port = cpu_dp->index;
149 int len = ETH_GSTRING_LEN;
150 int mcount = 0, count, i;
151 uint8_t pfx[4];
152 uint8_t *ndata;
153
154 snprintf(pfx, sizeof(pfx), "p%.2d", port);
155 /* We do not want to be NULL-terminated, since this is a prefix */
156 pfx[sizeof(pfx) - 1] = '_';
157
158 if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
159 !ops->get_ethtool_phy_stats) {
160 mcount = phy_ethtool_get_sset_count(dev->phydev);
161 if (mcount < 0)
162 mcount = 0;
163 else
164 phy_ethtool_get_strings(dev->phydev, data);
165 } else if (ops->get_sset_count && ops->get_strings) {
166 mcount = ops->get_sset_count(dev, stringset);
167 if (mcount < 0)
168 mcount = 0;
169 ops->get_strings(dev, stringset, data);
170 }
171
172 if (ds->ops->get_strings) {
173 ndata = data + mcount * len;
174 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
175 * the output after to prepend our CPU port prefix we
176 * constructed earlier
177 */
178 ds->ops->get_strings(ds, port, stringset, ndata);
179 count = ds->ops->get_sset_count(ds, port, stringset);
180 if (count < 0)
181 return;
182 for (i = 0; i < count; i++) {
183 memmove(ndata + (i * len + sizeof(pfx)),
184 ndata + i * len, len - sizeof(pfx));
185 memcpy(ndata + i * len, pfx, sizeof(pfx));
186 }
187 }
188}
189
190static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
191{
192 struct dsa_port *cpu_dp = dev->dsa_ptr;
193 struct dsa_switch *ds = cpu_dp->ds;
194 struct dsa_switch_tree *dst;
195 int err = -EOPNOTSUPP;
196 struct dsa_port *dp;
197
198 dst = ds->dst;
199
200 switch (cmd) {
201 case SIOCGHWTSTAMP:
202 case SIOCSHWTSTAMP:
203 /* Deny PTP operations on master if there is at least one
204 * switch in the tree that is PTP capable.
205 */
206 list_for_each_entry(dp, &dst->ports, list)
207 if (dp->ds->ops->port_hwtstamp_get ||
208 dp->ds->ops->port_hwtstamp_set)
209 return -EBUSY;
210 break;
211 }
212
213 if (dev->netdev_ops->ndo_do_ioctl)
214 err = dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
215
216 return err;
217}
218
219static const struct dsa_netdevice_ops dsa_netdev_ops = {
220 .ndo_do_ioctl = dsa_master_ioctl,
221};
222
223static int dsa_master_ethtool_setup(struct net_device *dev)
224{
225 struct dsa_port *cpu_dp = dev->dsa_ptr;
226 struct dsa_switch *ds = cpu_dp->ds;
227 struct ethtool_ops *ops;
228
229 ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
230 if (!ops)
231 return -ENOMEM;
232
233 cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
234 if (cpu_dp->orig_ethtool_ops)
235 memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
236
237 ops->get_regs_len = dsa_master_get_regs_len;
238 ops->get_regs = dsa_master_get_regs;
239 ops->get_sset_count = dsa_master_get_sset_count;
240 ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
241 ops->get_strings = dsa_master_get_strings;
242 ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
243
244 dev->ethtool_ops = ops;
245
246 return 0;
247}
248
249static void dsa_master_ethtool_teardown(struct net_device *dev)
250{
251 struct dsa_port *cpu_dp = dev->dsa_ptr;
252
253 dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
254 cpu_dp->orig_ethtool_ops = NULL;
255}
256
257static void dsa_netdev_ops_set(struct net_device *dev,
258 const struct dsa_netdevice_ops *ops)
259{
260 dev->dsa_ptr->netdev_ops = ops;
261}
262
263static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
264{
265 const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
266
267 if (!ops->promisc_on_master)
268 return;
269
270 rtnl_lock();
271 dev_set_promiscuity(dev, inc);
272 rtnl_unlock();
273}
274
275static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
276 char *buf)
277{
278 struct net_device *dev = to_net_dev(d);
279 struct dsa_port *cpu_dp = dev->dsa_ptr;
280
281 return sprintf(buf, "%s\n",
282 dsa_tag_protocol_to_str(cpu_dp->tag_ops));
283}
284
285static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
286 const char *buf, size_t count)
287{
288 const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
289 struct net_device *dev = to_net_dev(d);
290 struct dsa_port *cpu_dp = dev->dsa_ptr;
291 int err;
292
293 old_tag_ops = cpu_dp->tag_ops;
294 new_tag_ops = dsa_find_tagger_by_name(buf);
295 /* Bad tagger name, or module is not loaded? */
296 if (IS_ERR(new_tag_ops))
297 return PTR_ERR(new_tag_ops);
298
299 if (new_tag_ops == old_tag_ops)
300 /* Drop the temporarily held duplicate reference, since
301 * the DSA switch tree uses this tagger.
302 */
303 goto out;
304
305 err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, dev, new_tag_ops,
306 old_tag_ops);
307 if (err) {
308 /* On failure the old tagger is restored, so we don't need the
309 * driver for the new one.
310 */
311 dsa_tag_driver_put(new_tag_ops);
312 return err;
313 }
314
315 /* On success we no longer need the module for the old tagging protocol
316 */
317out:
318 dsa_tag_driver_put(old_tag_ops);
319 return count;
320}
321static DEVICE_ATTR_RW(tagging);
322
323static struct attribute *dsa_slave_attrs[] = {
324 &dev_attr_tagging.attr,
325 NULL
326};
327
328static const struct attribute_group dsa_group = {
329 .name = "dsa",
330 .attrs = dsa_slave_attrs,
331};
332
333static void dsa_master_reset_mtu(struct net_device *dev)
334{
335 int err;
336
337 rtnl_lock();
338 err = dev_set_mtu(dev, ETH_DATA_LEN);
339 if (err)
340 netdev_dbg(dev,
341 "Unable to reset MTU to exclude DSA overheads\n");
342 rtnl_unlock();
343}
344
345static struct lock_class_key dsa_master_addr_list_lock_key;
346
347int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
348{
349 const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
350 struct dsa_switch *ds = cpu_dp->ds;
351 struct device_link *consumer_link;
352 int mtu, ret;
353
354 mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
355
356 /* The DSA master must use SET_NETDEV_DEV for this to work. */
357 consumer_link = device_link_add(ds->dev, dev->dev.parent,
358 DL_FLAG_AUTOREMOVE_CONSUMER);
359 if (!consumer_link)
360 netdev_err(dev,
361 "Failed to create a device link to DSA switch %s\n",
362 dev_name(ds->dev));
363
364 rtnl_lock();
365 ret = dev_set_mtu(dev, mtu);
366 rtnl_unlock();
367 if (ret)
368 netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
369 ret, mtu);
370
371 /* If we use a tagging format that doesn't have an ethertype
372 * field, make sure that all packets from this point on get
373 * sent to the tag format's receive function.
374 */
375 wmb();
376
377 dev->dsa_ptr = cpu_dp;
378 lockdep_set_class(&dev->addr_list_lock,
379 &dsa_master_addr_list_lock_key);
380
381 dsa_master_set_promiscuity(dev, 1);
382
383 ret = dsa_master_ethtool_setup(dev);
384 if (ret)
385 goto out_err_reset_promisc;
386
387 dsa_netdev_ops_set(dev, &dsa_netdev_ops);
388
389 ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
390 if (ret)
391 goto out_err_ndo_teardown;
392
393 return ret;
394
395out_err_ndo_teardown:
396 dsa_netdev_ops_set(dev, NULL);
397 dsa_master_ethtool_teardown(dev);
398out_err_reset_promisc:
399 dsa_master_set_promiscuity(dev, -1);
400 return ret;
401}
402
403void dsa_master_teardown(struct net_device *dev)
404{
405 sysfs_remove_group(&dev->dev.kobj, &dsa_group);
406 dsa_netdev_ops_set(dev, NULL);
407 dsa_master_ethtool_teardown(dev);
408 dsa_master_reset_mtu(dev);
409 dsa_master_set_promiscuity(dev, -1);
410
411 dev->dsa_ptr = NULL;
412
413 /* If we used a tagging format that doesn't have an ethertype
414 * field, make sure that all packets from this point get sent
415 * without the tag and go through the regular receive path.
416 */
417 wmb();
418}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a master device, switching frames via its switch fabric CPU port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include <linux/ethtool.h>
10#include <linux/netdevice.h>
11#include <linux/netlink.h>
12#include <net/dsa.h>
13
14#include "dsa.h"
15#include "master.h"
16#include "port.h"
17#include "tag.h"
18
19static int dsa_master_get_regs_len(struct net_device *dev)
20{
21 struct dsa_port *cpu_dp = dev->dsa_ptr;
22 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
23 struct dsa_switch *ds = cpu_dp->ds;
24 int port = cpu_dp->index;
25 int ret = 0;
26 int len;
27
28 if (ops->get_regs_len) {
29 len = ops->get_regs_len(dev);
30 if (len < 0)
31 return len;
32 ret += len;
33 }
34
35 ret += sizeof(struct ethtool_drvinfo);
36 ret += sizeof(struct ethtool_regs);
37
38 if (ds->ops->get_regs_len) {
39 len = ds->ops->get_regs_len(ds, port);
40 if (len < 0)
41 return len;
42 ret += len;
43 }
44
45 return ret;
46}
47
48static void dsa_master_get_regs(struct net_device *dev,
49 struct ethtool_regs *regs, void *data)
50{
51 struct dsa_port *cpu_dp = dev->dsa_ptr;
52 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
53 struct dsa_switch *ds = cpu_dp->ds;
54 struct ethtool_drvinfo *cpu_info;
55 struct ethtool_regs *cpu_regs;
56 int port = cpu_dp->index;
57 int len;
58
59 if (ops->get_regs_len && ops->get_regs) {
60 len = ops->get_regs_len(dev);
61 if (len < 0)
62 return;
63 regs->len = len;
64 ops->get_regs(dev, regs, data);
65 data += regs->len;
66 }
67
68 cpu_info = (struct ethtool_drvinfo *)data;
69 strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
70 data += sizeof(*cpu_info);
71 cpu_regs = (struct ethtool_regs *)data;
72 data += sizeof(*cpu_regs);
73
74 if (ds->ops->get_regs_len && ds->ops->get_regs) {
75 len = ds->ops->get_regs_len(ds, port);
76 if (len < 0)
77 return;
78 cpu_regs->len = len;
79 ds->ops->get_regs(ds, port, cpu_regs, data);
80 }
81}
82
83static void dsa_master_get_ethtool_stats(struct net_device *dev,
84 struct ethtool_stats *stats,
85 uint64_t *data)
86{
87 struct dsa_port *cpu_dp = dev->dsa_ptr;
88 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
89 struct dsa_switch *ds = cpu_dp->ds;
90 int port = cpu_dp->index;
91 int count = 0;
92
93 if (ops->get_sset_count && ops->get_ethtool_stats) {
94 count = ops->get_sset_count(dev, ETH_SS_STATS);
95 ops->get_ethtool_stats(dev, stats, data);
96 }
97
98 if (ds->ops->get_ethtool_stats)
99 ds->ops->get_ethtool_stats(ds, port, data + count);
100}
101
102static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
103 struct ethtool_stats *stats,
104 uint64_t *data)
105{
106 struct dsa_port *cpu_dp = dev->dsa_ptr;
107 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
108 struct dsa_switch *ds = cpu_dp->ds;
109 int port = cpu_dp->index;
110 int count = 0;
111
112 if (dev->phydev && !ops->get_ethtool_phy_stats) {
113 count = phy_ethtool_get_sset_count(dev->phydev);
114 if (count >= 0)
115 phy_ethtool_get_stats(dev->phydev, stats, data);
116 } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
117 count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
118 ops->get_ethtool_phy_stats(dev, stats, data);
119 }
120
121 if (count < 0)
122 count = 0;
123
124 if (ds->ops->get_ethtool_phy_stats)
125 ds->ops->get_ethtool_phy_stats(ds, port, data + count);
126}
127
128static int dsa_master_get_sset_count(struct net_device *dev, int sset)
129{
130 struct dsa_port *cpu_dp = dev->dsa_ptr;
131 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
132 struct dsa_switch *ds = cpu_dp->ds;
133 int count = 0;
134
135 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
136 !ops->get_ethtool_phy_stats)
137 count = phy_ethtool_get_sset_count(dev->phydev);
138 else if (ops->get_sset_count)
139 count = ops->get_sset_count(dev, sset);
140
141 if (count < 0)
142 count = 0;
143
144 if (ds->ops->get_sset_count)
145 count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
146
147 return count;
148}
149
150static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
151 uint8_t *data)
152{
153 struct dsa_port *cpu_dp = dev->dsa_ptr;
154 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
155 struct dsa_switch *ds = cpu_dp->ds;
156 int port = cpu_dp->index;
157 int len = ETH_GSTRING_LEN;
158 int mcount = 0, count, i;
159 uint8_t pfx[4];
160 uint8_t *ndata;
161
162 snprintf(pfx, sizeof(pfx), "p%.2d", port);
163 /* We do not want to be NULL-terminated, since this is a prefix */
164 pfx[sizeof(pfx) - 1] = '_';
165
166 if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
167 !ops->get_ethtool_phy_stats) {
168 mcount = phy_ethtool_get_sset_count(dev->phydev);
169 if (mcount < 0)
170 mcount = 0;
171 else
172 phy_ethtool_get_strings(dev->phydev, data);
173 } else if (ops->get_sset_count && ops->get_strings) {
174 mcount = ops->get_sset_count(dev, stringset);
175 if (mcount < 0)
176 mcount = 0;
177 ops->get_strings(dev, stringset, data);
178 }
179
180 if (ds->ops->get_strings) {
181 ndata = data + mcount * len;
182 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
183 * the output after to prepend our CPU port prefix we
184 * constructed earlier
185 */
186 ds->ops->get_strings(ds, port, stringset, ndata);
187 count = ds->ops->get_sset_count(ds, port, stringset);
188 if (count < 0)
189 return;
190 for (i = 0; i < count; i++) {
191 memmove(ndata + (i * len + sizeof(pfx)),
192 ndata + i * len, len - sizeof(pfx));
193 memcpy(ndata + i * len, pfx, sizeof(pfx));
194 }
195 }
196}
197
198static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
199{
200 struct dsa_port *cpu_dp = dev->dsa_ptr;
201 struct dsa_switch *ds = cpu_dp->ds;
202 struct dsa_switch_tree *dst;
203 int err = -EOPNOTSUPP;
204 struct dsa_port *dp;
205
206 dst = ds->dst;
207
208 switch (cmd) {
209 case SIOCGHWTSTAMP:
210 case SIOCSHWTSTAMP:
211 /* Deny PTP operations on master if there is at least one
212 * switch in the tree that is PTP capable.
213 */
214 list_for_each_entry(dp, &dst->ports, list)
215 if (dsa_port_supports_hwtstamp(dp, ifr))
216 return -EBUSY;
217 break;
218 }
219
220 if (dev->netdev_ops->ndo_eth_ioctl)
221 err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd);
222
223 return err;
224}
225
226static const struct dsa_netdevice_ops dsa_netdev_ops = {
227 .ndo_eth_ioctl = dsa_master_ioctl,
228};
229
230static int dsa_master_ethtool_setup(struct net_device *dev)
231{
232 struct dsa_port *cpu_dp = dev->dsa_ptr;
233 struct dsa_switch *ds = cpu_dp->ds;
234 struct ethtool_ops *ops;
235
236 if (netif_is_lag_master(dev))
237 return 0;
238
239 ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
240 if (!ops)
241 return -ENOMEM;
242
243 cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
244 if (cpu_dp->orig_ethtool_ops)
245 memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
246
247 ops->get_regs_len = dsa_master_get_regs_len;
248 ops->get_regs = dsa_master_get_regs;
249 ops->get_sset_count = dsa_master_get_sset_count;
250 ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
251 ops->get_strings = dsa_master_get_strings;
252 ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
253
254 dev->ethtool_ops = ops;
255
256 return 0;
257}
258
259static void dsa_master_ethtool_teardown(struct net_device *dev)
260{
261 struct dsa_port *cpu_dp = dev->dsa_ptr;
262
263 if (netif_is_lag_master(dev))
264 return;
265
266 dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
267 cpu_dp->orig_ethtool_ops = NULL;
268}
269
270static void dsa_netdev_ops_set(struct net_device *dev,
271 const struct dsa_netdevice_ops *ops)
272{
273 if (netif_is_lag_master(dev))
274 return;
275
276 dev->dsa_ptr->netdev_ops = ops;
277}
278
279/* Keep the master always promiscuous if the tagging protocol requires that
280 * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
281 * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
282 * anyway.
283 */
284static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
285{
286 const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
287
288 if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
289 return;
290
291 ASSERT_RTNL();
292
293 dev_set_promiscuity(dev, inc);
294}
295
296static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
297 char *buf)
298{
299 struct net_device *dev = to_net_dev(d);
300 struct dsa_port *cpu_dp = dev->dsa_ptr;
301
302 return sprintf(buf, "%s\n",
303 dsa_tag_protocol_to_str(cpu_dp->tag_ops));
304}
305
306static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
307 const char *buf, size_t count)
308{
309 const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
310 const char *end = strchrnul(buf, '\n'), *name;
311 struct net_device *dev = to_net_dev(d);
312 struct dsa_port *cpu_dp = dev->dsa_ptr;
313 size_t len = end - buf;
314 int err;
315
316 /* Empty string passed */
317 if (!len)
318 return -ENOPROTOOPT;
319
320 name = kstrndup(buf, len, GFP_KERNEL);
321 if (!name)
322 return -ENOMEM;
323
324 old_tag_ops = cpu_dp->tag_ops;
325 new_tag_ops = dsa_tag_driver_get_by_name(name);
326 kfree(name);
327 /* Bad tagger name? */
328 if (IS_ERR(new_tag_ops))
329 return PTR_ERR(new_tag_ops);
330
331 if (new_tag_ops == old_tag_ops)
332 /* Drop the temporarily held duplicate reference, since
333 * the DSA switch tree uses this tagger.
334 */
335 goto out;
336
337 err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
338 old_tag_ops);
339 if (err) {
340 /* On failure the old tagger is restored, so we don't need the
341 * driver for the new one.
342 */
343 dsa_tag_driver_put(new_tag_ops);
344 return err;
345 }
346
347 /* On success we no longer need the module for the old tagging protocol
348 */
349out:
350 dsa_tag_driver_put(old_tag_ops);
351 return count;
352}
353static DEVICE_ATTR_RW(tagging);
354
355static struct attribute *dsa_slave_attrs[] = {
356 &dev_attr_tagging.attr,
357 NULL
358};
359
360static const struct attribute_group dsa_group = {
361 .name = "dsa",
362 .attrs = dsa_slave_attrs,
363};
364
365static void dsa_master_reset_mtu(struct net_device *dev)
366{
367 int err;
368
369 err = dev_set_mtu(dev, ETH_DATA_LEN);
370 if (err)
371 netdev_dbg(dev,
372 "Unable to reset MTU to exclude DSA overheads\n");
373}
374
375int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
376{
377 const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
378 struct dsa_switch *ds = cpu_dp->ds;
379 struct device_link *consumer_link;
380 int mtu, ret;
381
382 mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
383
384 /* The DSA master must use SET_NETDEV_DEV for this to work. */
385 if (!netif_is_lag_master(dev)) {
386 consumer_link = device_link_add(ds->dev, dev->dev.parent,
387 DL_FLAG_AUTOREMOVE_CONSUMER);
388 if (!consumer_link)
389 netdev_err(dev,
390 "Failed to create a device link to DSA switch %s\n",
391 dev_name(ds->dev));
392 }
393
394 /* The switch driver may not implement ->port_change_mtu(), case in
395 * which dsa_slave_change_mtu() will not update the master MTU either,
396 * so we need to do that here.
397 */
398 ret = dev_set_mtu(dev, mtu);
399 if (ret)
400 netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
401 ret, mtu);
402
403 /* If we use a tagging format that doesn't have an ethertype
404 * field, make sure that all packets from this point on get
405 * sent to the tag format's receive function.
406 */
407 wmb();
408
409 dev->dsa_ptr = cpu_dp;
410
411 dsa_master_set_promiscuity(dev, 1);
412
413 ret = dsa_master_ethtool_setup(dev);
414 if (ret)
415 goto out_err_reset_promisc;
416
417 dsa_netdev_ops_set(dev, &dsa_netdev_ops);
418
419 ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
420 if (ret)
421 goto out_err_ndo_teardown;
422
423 return ret;
424
425out_err_ndo_teardown:
426 dsa_netdev_ops_set(dev, NULL);
427 dsa_master_ethtool_teardown(dev);
428out_err_reset_promisc:
429 dsa_master_set_promiscuity(dev, -1);
430 return ret;
431}
432
433void dsa_master_teardown(struct net_device *dev)
434{
435 sysfs_remove_group(&dev->dev.kobj, &dsa_group);
436 dsa_netdev_ops_set(dev, NULL);
437 dsa_master_ethtool_teardown(dev);
438 dsa_master_reset_mtu(dev);
439 dsa_master_set_promiscuity(dev, -1);
440
441 dev->dsa_ptr = NULL;
442
443 /* If we used a tagging format that doesn't have an ethertype
444 * field, make sure that all packets from this point get sent
445 * without the tag and go through the regular receive path.
446 */
447 wmb();
448}
449
450int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
451 struct netdev_lag_upper_info *uinfo,
452 struct netlink_ext_ack *extack)
453{
454 bool master_setup = false;
455 int err;
456
457 if (!netdev_uses_dsa(lag_dev)) {
458 err = dsa_master_setup(lag_dev, cpu_dp);
459 if (err)
460 return err;
461
462 master_setup = true;
463 }
464
465 err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
466 if (err) {
467 if (extack && !extack->_msg)
468 NL_SET_ERR_MSG_MOD(extack,
469 "CPU port failed to join LAG");
470 goto out_master_teardown;
471 }
472
473 return 0;
474
475out_master_teardown:
476 if (master_setup)
477 dsa_master_teardown(lag_dev);
478 return err;
479}
480
481/* Tear down a master if there isn't any other user port on it,
482 * optionally also destroying LAG information.
483 */
484void dsa_master_lag_teardown(struct net_device *lag_dev,
485 struct dsa_port *cpu_dp)
486{
487 struct net_device *upper;
488 struct list_head *iter;
489
490 dsa_port_lag_leave(cpu_dp, lag_dev);
491
492 netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
493 if (dsa_slave_dev_check(upper))
494 return;
495
496 dsa_master_teardown(lag_dev);
497}