Loading...
1/*
2 * net-sysfs.c - network device class and attributes
3 *
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <net/switchdev.h>
16#include <linux/if_arp.h>
17#include <linux/slab.h>
18#include <linux/nsproxy.h>
19#include <net/sock.h>
20#include <net/net_namespace.h>
21#include <linux/rtnetlink.h>
22#include <linux/vmalloc.h>
23#include <linux/export.h>
24#include <linux/jiffies.h>
25#include <linux/pm_runtime.h>
26#include <linux/of.h>
27
28#include "net-sysfs.h"
29
30#ifdef CONFIG_SYSFS
31static const char fmt_hex[] = "%#x\n";
32static const char fmt_dec[] = "%d\n";
33static const char fmt_ulong[] = "%lu\n";
34static const char fmt_u64[] = "%llu\n";
35
36static inline int dev_isalive(const struct net_device *dev)
37{
38 return dev->reg_state <= NETREG_REGISTERED;
39}
40
41/* use same locking rules as GIF* ioctl's */
42static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
45{
46 struct net_device *ndev = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
48
49 read_lock(&dev_base_lock);
50 if (dev_isalive(ndev))
51 ret = (*format)(ndev, buf);
52 read_unlock(&dev_base_lock);
53
54 return ret;
55}
56
57/* generate a show function for simple field */
58#define NETDEVICE_SHOW(field, format_string) \
59static ssize_t format_##field(const struct net_device *dev, char *buf) \
60{ \
61 return sprintf(buf, format_string, dev->field); \
62} \
63static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
65{ \
66 return netdev_show(dev, attr, buf, format_##field); \
67} \
68
69#define NETDEVICE_SHOW_RO(field, format_string) \
70NETDEVICE_SHOW(field, format_string); \
71static DEVICE_ATTR_RO(field)
72
73#define NETDEVICE_SHOW_RW(field, format_string) \
74NETDEVICE_SHOW(field, format_string); \
75static DEVICE_ATTR_RW(field)
76
77/* use same locking and permission rules as SIF* ioctl's */
78static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
81{
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
84 unsigned long new;
85 int ret = -EINVAL;
86
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88 return -EPERM;
89
90 ret = kstrtoul(buf, 0, &new);
91 if (ret)
92 goto err;
93
94 if (!rtnl_trylock())
95 return restart_syscall();
96
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
99 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
113
114static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct net_device *ndev = to_net_dev(dev);
118
119 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120}
121static DEVICE_ATTR_RO(iflink);
122
123static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
124{
125 return sprintf(buf, fmt_dec, dev->name_assign_type);
126}
127
128static ssize_t name_assign_type_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131{
132 struct net_device *ndev = to_net_dev(dev);
133 ssize_t ret = -EINVAL;
134
135 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
136 ret = netdev_show(dev, attr, buf, format_name_assign_type);
137
138 return ret;
139}
140static DEVICE_ATTR_RO(name_assign_type);
141
142/* use same locking rules as GIFHWADDR ioctl's */
143static ssize_t address_show(struct device *dev, struct device_attribute *attr,
144 char *buf)
145{
146 struct net_device *ndev = to_net_dev(dev);
147 ssize_t ret = -EINVAL;
148
149 read_lock(&dev_base_lock);
150 if (dev_isalive(ndev))
151 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
152 read_unlock(&dev_base_lock);
153 return ret;
154}
155static DEVICE_ATTR_RO(address);
156
157static ssize_t broadcast_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159{
160 struct net_device *ndev = to_net_dev(dev);
161 if (dev_isalive(ndev))
162 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
163 return -EINVAL;
164}
165static DEVICE_ATTR_RO(broadcast);
166
167static int change_carrier(struct net_device *dev, unsigned long new_carrier)
168{
169 if (!netif_running(dev))
170 return -EINVAL;
171 return dev_change_carrier(dev, (bool) new_carrier);
172}
173
174static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t len)
176{
177 return netdev_store(dev, attr, buf, len, change_carrier);
178}
179
180static ssize_t carrier_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 struct net_device *netdev = to_net_dev(dev);
184 if (netif_running(netdev)) {
185 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
186 }
187 return -EINVAL;
188}
189static DEVICE_ATTR_RW(carrier);
190
191static ssize_t speed_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct net_device *netdev = to_net_dev(dev);
195 int ret = -EINVAL;
196
197 if (!rtnl_trylock())
198 return restart_syscall();
199
200 if (netif_running(netdev)) {
201 struct ethtool_link_ksettings cmd;
202
203 if (!__ethtool_get_link_ksettings(netdev, &cmd))
204 ret = sprintf(buf, fmt_dec, cmd.base.speed);
205 }
206 rtnl_unlock();
207 return ret;
208}
209static DEVICE_ATTR_RO(speed);
210
211static ssize_t duplex_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 struct net_device *netdev = to_net_dev(dev);
215 int ret = -EINVAL;
216
217 if (!rtnl_trylock())
218 return restart_syscall();
219
220 if (netif_running(netdev)) {
221 struct ethtool_link_ksettings cmd;
222
223 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
224 const char *duplex;
225
226 switch (cmd.base.duplex) {
227 case DUPLEX_HALF:
228 duplex = "half";
229 break;
230 case DUPLEX_FULL:
231 duplex = "full";
232 break;
233 default:
234 duplex = "unknown";
235 break;
236 }
237 ret = sprintf(buf, "%s\n", duplex);
238 }
239 }
240 rtnl_unlock();
241 return ret;
242}
243static DEVICE_ATTR_RO(duplex);
244
245static ssize_t dormant_show(struct device *dev,
246 struct device_attribute *attr, char *buf)
247{
248 struct net_device *netdev = to_net_dev(dev);
249
250 if (netif_running(netdev))
251 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
252
253 return -EINVAL;
254}
255static DEVICE_ATTR_RO(dormant);
256
257static const char *const operstates[] = {
258 "unknown",
259 "notpresent", /* currently unused */
260 "down",
261 "lowerlayerdown",
262 "testing", /* currently unused */
263 "dormant",
264 "up"
265};
266
267static ssize_t operstate_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 const struct net_device *netdev = to_net_dev(dev);
271 unsigned char operstate;
272
273 read_lock(&dev_base_lock);
274 operstate = netdev->operstate;
275 if (!netif_running(netdev))
276 operstate = IF_OPER_DOWN;
277 read_unlock(&dev_base_lock);
278
279 if (operstate >= ARRAY_SIZE(operstates))
280 return -EINVAL; /* should not happen */
281
282 return sprintf(buf, "%s\n", operstates[operstate]);
283}
284static DEVICE_ATTR_RO(operstate);
285
286static ssize_t carrier_changes_show(struct device *dev,
287 struct device_attribute *attr,
288 char *buf)
289{
290 struct net_device *netdev = to_net_dev(dev);
291 return sprintf(buf, fmt_dec,
292 atomic_read(&netdev->carrier_changes));
293}
294static DEVICE_ATTR_RO(carrier_changes);
295
296/* read-write attributes */
297
298static int change_mtu(struct net_device *dev, unsigned long new_mtu)
299{
300 return dev_set_mtu(dev, (int) new_mtu);
301}
302
303static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
304 const char *buf, size_t len)
305{
306 return netdev_store(dev, attr, buf, len, change_mtu);
307}
308NETDEVICE_SHOW_RW(mtu, fmt_dec);
309
310static int change_flags(struct net_device *dev, unsigned long new_flags)
311{
312 return dev_change_flags(dev, (unsigned int) new_flags);
313}
314
315static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
316 const char *buf, size_t len)
317{
318 return netdev_store(dev, attr, buf, len, change_flags);
319}
320NETDEVICE_SHOW_RW(flags, fmt_hex);
321
322static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
323{
324 dev->tx_queue_len = new_len;
325 return 0;
326}
327
328static ssize_t tx_queue_len_store(struct device *dev,
329 struct device_attribute *attr,
330 const char *buf, size_t len)
331{
332 if (!capable(CAP_NET_ADMIN))
333 return -EPERM;
334
335 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
336}
337NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
338
339static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
340{
341 dev->gro_flush_timeout = val;
342 return 0;
343}
344
345static ssize_t gro_flush_timeout_store(struct device *dev,
346 struct device_attribute *attr,
347 const char *buf, size_t len)
348{
349 if (!capable(CAP_NET_ADMIN))
350 return -EPERM;
351
352 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
353}
354NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
355
356static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
357 const char *buf, size_t len)
358{
359 struct net_device *netdev = to_net_dev(dev);
360 struct net *net = dev_net(netdev);
361 size_t count = len;
362 ssize_t ret;
363
364 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
365 return -EPERM;
366
367 /* ignore trailing newline */
368 if (len > 0 && buf[len - 1] == '\n')
369 --count;
370
371 if (!rtnl_trylock())
372 return restart_syscall();
373 ret = dev_set_alias(netdev, buf, count);
374 rtnl_unlock();
375
376 return ret < 0 ? ret : len;
377}
378
379static ssize_t ifalias_show(struct device *dev,
380 struct device_attribute *attr, char *buf)
381{
382 const struct net_device *netdev = to_net_dev(dev);
383 ssize_t ret = 0;
384
385 if (!rtnl_trylock())
386 return restart_syscall();
387 if (netdev->ifalias)
388 ret = sprintf(buf, "%s\n", netdev->ifalias);
389 rtnl_unlock();
390 return ret;
391}
392static DEVICE_ATTR_RW(ifalias);
393
394static int change_group(struct net_device *dev, unsigned long new_group)
395{
396 dev_set_group(dev, (int) new_group);
397 return 0;
398}
399
400static ssize_t group_store(struct device *dev, struct device_attribute *attr,
401 const char *buf, size_t len)
402{
403 return netdev_store(dev, attr, buf, len, change_group);
404}
405NETDEVICE_SHOW(group, fmt_dec);
406static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
407
408static int change_proto_down(struct net_device *dev, unsigned long proto_down)
409{
410 return dev_change_proto_down(dev, (bool) proto_down);
411}
412
413static ssize_t proto_down_store(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t len)
416{
417 return netdev_store(dev, attr, buf, len, change_proto_down);
418}
419NETDEVICE_SHOW_RW(proto_down, fmt_dec);
420
421static ssize_t phys_port_id_show(struct device *dev,
422 struct device_attribute *attr, char *buf)
423{
424 struct net_device *netdev = to_net_dev(dev);
425 ssize_t ret = -EINVAL;
426
427 if (!rtnl_trylock())
428 return restart_syscall();
429
430 if (dev_isalive(netdev)) {
431 struct netdev_phys_item_id ppid;
432
433 ret = dev_get_phys_port_id(netdev, &ppid);
434 if (!ret)
435 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
436 }
437 rtnl_unlock();
438
439 return ret;
440}
441static DEVICE_ATTR_RO(phys_port_id);
442
443static ssize_t phys_port_name_show(struct device *dev,
444 struct device_attribute *attr, char *buf)
445{
446 struct net_device *netdev = to_net_dev(dev);
447 ssize_t ret = -EINVAL;
448
449 if (!rtnl_trylock())
450 return restart_syscall();
451
452 if (dev_isalive(netdev)) {
453 char name[IFNAMSIZ];
454
455 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
456 if (!ret)
457 ret = sprintf(buf, "%s\n", name);
458 }
459 rtnl_unlock();
460
461 return ret;
462}
463static DEVICE_ATTR_RO(phys_port_name);
464
465static ssize_t phys_switch_id_show(struct device *dev,
466 struct device_attribute *attr, char *buf)
467{
468 struct net_device *netdev = to_net_dev(dev);
469 ssize_t ret = -EINVAL;
470
471 if (!rtnl_trylock())
472 return restart_syscall();
473
474 if (dev_isalive(netdev)) {
475 struct switchdev_attr attr = {
476 .orig_dev = netdev,
477 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
478 .flags = SWITCHDEV_F_NO_RECURSE,
479 };
480
481 ret = switchdev_port_attr_get(netdev, &attr);
482 if (!ret)
483 ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
484 attr.u.ppid.id);
485 }
486 rtnl_unlock();
487
488 return ret;
489}
490static DEVICE_ATTR_RO(phys_switch_id);
491
492static struct attribute *net_class_attrs[] = {
493 &dev_attr_netdev_group.attr,
494 &dev_attr_type.attr,
495 &dev_attr_dev_id.attr,
496 &dev_attr_dev_port.attr,
497 &dev_attr_iflink.attr,
498 &dev_attr_ifindex.attr,
499 &dev_attr_name_assign_type.attr,
500 &dev_attr_addr_assign_type.attr,
501 &dev_attr_addr_len.attr,
502 &dev_attr_link_mode.attr,
503 &dev_attr_address.attr,
504 &dev_attr_broadcast.attr,
505 &dev_attr_speed.attr,
506 &dev_attr_duplex.attr,
507 &dev_attr_dormant.attr,
508 &dev_attr_operstate.attr,
509 &dev_attr_carrier_changes.attr,
510 &dev_attr_ifalias.attr,
511 &dev_attr_carrier.attr,
512 &dev_attr_mtu.attr,
513 &dev_attr_flags.attr,
514 &dev_attr_tx_queue_len.attr,
515 &dev_attr_gro_flush_timeout.attr,
516 &dev_attr_phys_port_id.attr,
517 &dev_attr_phys_port_name.attr,
518 &dev_attr_phys_switch_id.attr,
519 &dev_attr_proto_down.attr,
520 NULL,
521};
522ATTRIBUTE_GROUPS(net_class);
523
524/* Show a given an attribute in the statistics group */
525static ssize_t netstat_show(const struct device *d,
526 struct device_attribute *attr, char *buf,
527 unsigned long offset)
528{
529 struct net_device *dev = to_net_dev(d);
530 ssize_t ret = -EINVAL;
531
532 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
533 offset % sizeof(u64) != 0);
534
535 read_lock(&dev_base_lock);
536 if (dev_isalive(dev)) {
537 struct rtnl_link_stats64 temp;
538 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
539
540 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
541 }
542 read_unlock(&dev_base_lock);
543 return ret;
544}
545
546/* generate a read-only statistics attribute */
547#define NETSTAT_ENTRY(name) \
548static ssize_t name##_show(struct device *d, \
549 struct device_attribute *attr, char *buf) \
550{ \
551 return netstat_show(d, attr, buf, \
552 offsetof(struct rtnl_link_stats64, name)); \
553} \
554static DEVICE_ATTR_RO(name)
555
556NETSTAT_ENTRY(rx_packets);
557NETSTAT_ENTRY(tx_packets);
558NETSTAT_ENTRY(rx_bytes);
559NETSTAT_ENTRY(tx_bytes);
560NETSTAT_ENTRY(rx_errors);
561NETSTAT_ENTRY(tx_errors);
562NETSTAT_ENTRY(rx_dropped);
563NETSTAT_ENTRY(tx_dropped);
564NETSTAT_ENTRY(multicast);
565NETSTAT_ENTRY(collisions);
566NETSTAT_ENTRY(rx_length_errors);
567NETSTAT_ENTRY(rx_over_errors);
568NETSTAT_ENTRY(rx_crc_errors);
569NETSTAT_ENTRY(rx_frame_errors);
570NETSTAT_ENTRY(rx_fifo_errors);
571NETSTAT_ENTRY(rx_missed_errors);
572NETSTAT_ENTRY(tx_aborted_errors);
573NETSTAT_ENTRY(tx_carrier_errors);
574NETSTAT_ENTRY(tx_fifo_errors);
575NETSTAT_ENTRY(tx_heartbeat_errors);
576NETSTAT_ENTRY(tx_window_errors);
577NETSTAT_ENTRY(rx_compressed);
578NETSTAT_ENTRY(tx_compressed);
579NETSTAT_ENTRY(rx_nohandler);
580
581static struct attribute *netstat_attrs[] = {
582 &dev_attr_rx_packets.attr,
583 &dev_attr_tx_packets.attr,
584 &dev_attr_rx_bytes.attr,
585 &dev_attr_tx_bytes.attr,
586 &dev_attr_rx_errors.attr,
587 &dev_attr_tx_errors.attr,
588 &dev_attr_rx_dropped.attr,
589 &dev_attr_tx_dropped.attr,
590 &dev_attr_multicast.attr,
591 &dev_attr_collisions.attr,
592 &dev_attr_rx_length_errors.attr,
593 &dev_attr_rx_over_errors.attr,
594 &dev_attr_rx_crc_errors.attr,
595 &dev_attr_rx_frame_errors.attr,
596 &dev_attr_rx_fifo_errors.attr,
597 &dev_attr_rx_missed_errors.attr,
598 &dev_attr_tx_aborted_errors.attr,
599 &dev_attr_tx_carrier_errors.attr,
600 &dev_attr_tx_fifo_errors.attr,
601 &dev_attr_tx_heartbeat_errors.attr,
602 &dev_attr_tx_window_errors.attr,
603 &dev_attr_rx_compressed.attr,
604 &dev_attr_tx_compressed.attr,
605 &dev_attr_rx_nohandler.attr,
606 NULL
607};
608
609
610static struct attribute_group netstat_group = {
611 .name = "statistics",
612 .attrs = netstat_attrs,
613};
614
615#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
616static struct attribute *wireless_attrs[] = {
617 NULL
618};
619
620static struct attribute_group wireless_group = {
621 .name = "wireless",
622 .attrs = wireless_attrs,
623};
624#endif
625
626#else /* CONFIG_SYSFS */
627#define net_class_groups NULL
628#endif /* CONFIG_SYSFS */
629
630#ifdef CONFIG_SYSFS
631#define to_rx_queue_attr(_attr) container_of(_attr, \
632 struct rx_queue_attribute, attr)
633
634#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
635
636static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
637 char *buf)
638{
639 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
640 struct netdev_rx_queue *queue = to_rx_queue(kobj);
641
642 if (!attribute->show)
643 return -EIO;
644
645 return attribute->show(queue, attribute, buf);
646}
647
648static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
649 const char *buf, size_t count)
650{
651 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
652 struct netdev_rx_queue *queue = to_rx_queue(kobj);
653
654 if (!attribute->store)
655 return -EIO;
656
657 return attribute->store(queue, attribute, buf, count);
658}
659
660static const struct sysfs_ops rx_queue_sysfs_ops = {
661 .show = rx_queue_attr_show,
662 .store = rx_queue_attr_store,
663};
664
665#ifdef CONFIG_RPS
666static ssize_t show_rps_map(struct netdev_rx_queue *queue,
667 struct rx_queue_attribute *attribute, char *buf)
668{
669 struct rps_map *map;
670 cpumask_var_t mask;
671 int i, len;
672
673 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
674 return -ENOMEM;
675
676 rcu_read_lock();
677 map = rcu_dereference(queue->rps_map);
678 if (map)
679 for (i = 0; i < map->len; i++)
680 cpumask_set_cpu(map->cpus[i], mask);
681
682 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
683 rcu_read_unlock();
684 free_cpumask_var(mask);
685
686 return len < PAGE_SIZE ? len : -EINVAL;
687}
688
689static ssize_t store_rps_map(struct netdev_rx_queue *queue,
690 struct rx_queue_attribute *attribute,
691 const char *buf, size_t len)
692{
693 struct rps_map *old_map, *map;
694 cpumask_var_t mask;
695 int err, cpu, i;
696 static DEFINE_MUTEX(rps_map_mutex);
697
698 if (!capable(CAP_NET_ADMIN))
699 return -EPERM;
700
701 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
702 return -ENOMEM;
703
704 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
705 if (err) {
706 free_cpumask_var(mask);
707 return err;
708 }
709
710 map = kzalloc(max_t(unsigned int,
711 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
712 GFP_KERNEL);
713 if (!map) {
714 free_cpumask_var(mask);
715 return -ENOMEM;
716 }
717
718 i = 0;
719 for_each_cpu_and(cpu, mask, cpu_online_mask)
720 map->cpus[i++] = cpu;
721
722 if (i)
723 map->len = i;
724 else {
725 kfree(map);
726 map = NULL;
727 }
728
729 mutex_lock(&rps_map_mutex);
730 old_map = rcu_dereference_protected(queue->rps_map,
731 mutex_is_locked(&rps_map_mutex));
732 rcu_assign_pointer(queue->rps_map, map);
733
734 if (map)
735 static_key_slow_inc(&rps_needed);
736 if (old_map)
737 static_key_slow_dec(&rps_needed);
738
739 mutex_unlock(&rps_map_mutex);
740
741 if (old_map)
742 kfree_rcu(old_map, rcu);
743
744 free_cpumask_var(mask);
745 return len;
746}
747
748static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
749 struct rx_queue_attribute *attr,
750 char *buf)
751{
752 struct rps_dev_flow_table *flow_table;
753 unsigned long val = 0;
754
755 rcu_read_lock();
756 flow_table = rcu_dereference(queue->rps_flow_table);
757 if (flow_table)
758 val = (unsigned long)flow_table->mask + 1;
759 rcu_read_unlock();
760
761 return sprintf(buf, "%lu\n", val);
762}
763
764static void rps_dev_flow_table_release(struct rcu_head *rcu)
765{
766 struct rps_dev_flow_table *table = container_of(rcu,
767 struct rps_dev_flow_table, rcu);
768 vfree(table);
769}
770
771static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
772 struct rx_queue_attribute *attr,
773 const char *buf, size_t len)
774{
775 unsigned long mask, count;
776 struct rps_dev_flow_table *table, *old_table;
777 static DEFINE_SPINLOCK(rps_dev_flow_lock);
778 int rc;
779
780 if (!capable(CAP_NET_ADMIN))
781 return -EPERM;
782
783 rc = kstrtoul(buf, 0, &count);
784 if (rc < 0)
785 return rc;
786
787 if (count) {
788 mask = count - 1;
789 /* mask = roundup_pow_of_two(count) - 1;
790 * without overflows...
791 */
792 while ((mask | (mask >> 1)) != mask)
793 mask |= (mask >> 1);
794 /* On 64 bit arches, must check mask fits in table->mask (u32),
795 * and on 32bit arches, must check
796 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
797 */
798#if BITS_PER_LONG > 32
799 if (mask > (unsigned long)(u32)mask)
800 return -EINVAL;
801#else
802 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
803 / sizeof(struct rps_dev_flow)) {
804 /* Enforce a limit to prevent overflow */
805 return -EINVAL;
806 }
807#endif
808 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
809 if (!table)
810 return -ENOMEM;
811
812 table->mask = mask;
813 for (count = 0; count <= mask; count++)
814 table->flows[count].cpu = RPS_NO_CPU;
815 } else
816 table = NULL;
817
818 spin_lock(&rps_dev_flow_lock);
819 old_table = rcu_dereference_protected(queue->rps_flow_table,
820 lockdep_is_held(&rps_dev_flow_lock));
821 rcu_assign_pointer(queue->rps_flow_table, table);
822 spin_unlock(&rps_dev_flow_lock);
823
824 if (old_table)
825 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
826
827 return len;
828}
829
830static struct rx_queue_attribute rps_cpus_attribute =
831 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
832
833
834static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
835 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
836 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
837#endif /* CONFIG_RPS */
838
839static struct attribute *rx_queue_default_attrs[] = {
840#ifdef CONFIG_RPS
841 &rps_cpus_attribute.attr,
842 &rps_dev_flow_table_cnt_attribute.attr,
843#endif
844 NULL
845};
846
847static void rx_queue_release(struct kobject *kobj)
848{
849 struct netdev_rx_queue *queue = to_rx_queue(kobj);
850#ifdef CONFIG_RPS
851 struct rps_map *map;
852 struct rps_dev_flow_table *flow_table;
853
854
855 map = rcu_dereference_protected(queue->rps_map, 1);
856 if (map) {
857 RCU_INIT_POINTER(queue->rps_map, NULL);
858 kfree_rcu(map, rcu);
859 }
860
861 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
862 if (flow_table) {
863 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
864 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
865 }
866#endif
867
868 memset(kobj, 0, sizeof(*kobj));
869 dev_put(queue->dev);
870}
871
872static const void *rx_queue_namespace(struct kobject *kobj)
873{
874 struct netdev_rx_queue *queue = to_rx_queue(kobj);
875 struct device *dev = &queue->dev->dev;
876 const void *ns = NULL;
877
878 if (dev->class && dev->class->ns_type)
879 ns = dev->class->namespace(dev);
880
881 return ns;
882}
883
884static struct kobj_type rx_queue_ktype = {
885 .sysfs_ops = &rx_queue_sysfs_ops,
886 .release = rx_queue_release,
887 .default_attrs = rx_queue_default_attrs,
888 .namespace = rx_queue_namespace
889};
890
891static int rx_queue_add_kobject(struct net_device *dev, int index)
892{
893 struct netdev_rx_queue *queue = dev->_rx + index;
894 struct kobject *kobj = &queue->kobj;
895 int error = 0;
896
897 kobj->kset = dev->queues_kset;
898 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
899 "rx-%u", index);
900 if (error)
901 goto exit;
902
903 if (dev->sysfs_rx_queue_group) {
904 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
905 if (error)
906 goto exit;
907 }
908
909 kobject_uevent(kobj, KOBJ_ADD);
910 dev_hold(queue->dev);
911
912 return error;
913exit:
914 kobject_put(kobj);
915 return error;
916}
917#endif /* CONFIG_SYSFS */
918
919int
920net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
921{
922#ifdef CONFIG_SYSFS
923 int i;
924 int error = 0;
925
926#ifndef CONFIG_RPS
927 if (!dev->sysfs_rx_queue_group)
928 return 0;
929#endif
930 for (i = old_num; i < new_num; i++) {
931 error = rx_queue_add_kobject(dev, i);
932 if (error) {
933 new_num = old_num;
934 break;
935 }
936 }
937
938 while (--i >= new_num) {
939 if (dev->sysfs_rx_queue_group)
940 sysfs_remove_group(&dev->_rx[i].kobj,
941 dev->sysfs_rx_queue_group);
942 kobject_put(&dev->_rx[i].kobj);
943 }
944
945 return error;
946#else
947 return 0;
948#endif
949}
950
951#ifdef CONFIG_SYSFS
952/*
953 * netdev_queue sysfs structures and functions.
954 */
955struct netdev_queue_attribute {
956 struct attribute attr;
957 ssize_t (*show)(struct netdev_queue *queue,
958 struct netdev_queue_attribute *attr, char *buf);
959 ssize_t (*store)(struct netdev_queue *queue,
960 struct netdev_queue_attribute *attr, const char *buf, size_t len);
961};
962#define to_netdev_queue_attr(_attr) container_of(_attr, \
963 struct netdev_queue_attribute, attr)
964
965#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
966
967static ssize_t netdev_queue_attr_show(struct kobject *kobj,
968 struct attribute *attr, char *buf)
969{
970 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
971 struct netdev_queue *queue = to_netdev_queue(kobj);
972
973 if (!attribute->show)
974 return -EIO;
975
976 return attribute->show(queue, attribute, buf);
977}
978
979static ssize_t netdev_queue_attr_store(struct kobject *kobj,
980 struct attribute *attr,
981 const char *buf, size_t count)
982{
983 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
984 struct netdev_queue *queue = to_netdev_queue(kobj);
985
986 if (!attribute->store)
987 return -EIO;
988
989 return attribute->store(queue, attribute, buf, count);
990}
991
992static const struct sysfs_ops netdev_queue_sysfs_ops = {
993 .show = netdev_queue_attr_show,
994 .store = netdev_queue_attr_store,
995};
996
997static ssize_t show_trans_timeout(struct netdev_queue *queue,
998 struct netdev_queue_attribute *attribute,
999 char *buf)
1000{
1001 unsigned long trans_timeout;
1002
1003 spin_lock_irq(&queue->_xmit_lock);
1004 trans_timeout = queue->trans_timeout;
1005 spin_unlock_irq(&queue->_xmit_lock);
1006
1007 return sprintf(buf, "%lu", trans_timeout);
1008}
1009
1010#ifdef CONFIG_XPS
1011static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1012{
1013 struct net_device *dev = queue->dev;
1014 unsigned int i;
1015
1016 i = queue - dev->_tx;
1017 BUG_ON(i >= dev->num_tx_queues);
1018
1019 return i;
1020}
1021
1022static ssize_t show_tx_maxrate(struct netdev_queue *queue,
1023 struct netdev_queue_attribute *attribute,
1024 char *buf)
1025{
1026 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1027}
1028
1029static ssize_t set_tx_maxrate(struct netdev_queue *queue,
1030 struct netdev_queue_attribute *attribute,
1031 const char *buf, size_t len)
1032{
1033 struct net_device *dev = queue->dev;
1034 int err, index = get_netdev_queue_index(queue);
1035 u32 rate = 0;
1036
1037 err = kstrtou32(buf, 10, &rate);
1038 if (err < 0)
1039 return err;
1040
1041 if (!rtnl_trylock())
1042 return restart_syscall();
1043
1044 err = -EOPNOTSUPP;
1045 if (dev->netdev_ops->ndo_set_tx_maxrate)
1046 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1047
1048 rtnl_unlock();
1049 if (!err) {
1050 queue->tx_maxrate = rate;
1051 return len;
1052 }
1053 return err;
1054}
1055
1056static struct netdev_queue_attribute queue_tx_maxrate =
1057 __ATTR(tx_maxrate, S_IRUGO | S_IWUSR,
1058 show_tx_maxrate, set_tx_maxrate);
1059#endif
1060
1061static struct netdev_queue_attribute queue_trans_timeout =
1062 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
1063
1064#ifdef CONFIG_BQL
1065/*
1066 * Byte queue limits sysfs structures and functions.
1067 */
1068static ssize_t bql_show(char *buf, unsigned int value)
1069{
1070 return sprintf(buf, "%u\n", value);
1071}
1072
1073static ssize_t bql_set(const char *buf, const size_t count,
1074 unsigned int *pvalue)
1075{
1076 unsigned int value;
1077 int err;
1078
1079 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
1080 value = DQL_MAX_LIMIT;
1081 else {
1082 err = kstrtouint(buf, 10, &value);
1083 if (err < 0)
1084 return err;
1085 if (value > DQL_MAX_LIMIT)
1086 return -EINVAL;
1087 }
1088
1089 *pvalue = value;
1090
1091 return count;
1092}
1093
1094static ssize_t bql_show_hold_time(struct netdev_queue *queue,
1095 struct netdev_queue_attribute *attr,
1096 char *buf)
1097{
1098 struct dql *dql = &queue->dql;
1099
1100 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1101}
1102
1103static ssize_t bql_set_hold_time(struct netdev_queue *queue,
1104 struct netdev_queue_attribute *attribute,
1105 const char *buf, size_t len)
1106{
1107 struct dql *dql = &queue->dql;
1108 unsigned int value;
1109 int err;
1110
1111 err = kstrtouint(buf, 10, &value);
1112 if (err < 0)
1113 return err;
1114
1115 dql->slack_hold_time = msecs_to_jiffies(value);
1116
1117 return len;
1118}
1119
1120static struct netdev_queue_attribute bql_hold_time_attribute =
1121 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1122 bql_set_hold_time);
1123
1124static ssize_t bql_show_inflight(struct netdev_queue *queue,
1125 struct netdev_queue_attribute *attr,
1126 char *buf)
1127{
1128 struct dql *dql = &queue->dql;
1129
1130 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1131}
1132
1133static struct netdev_queue_attribute bql_inflight_attribute =
1134 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1135
1136#define BQL_ATTR(NAME, FIELD) \
1137static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1138 struct netdev_queue_attribute *attr, \
1139 char *buf) \
1140{ \
1141 return bql_show(buf, queue->dql.FIELD); \
1142} \
1143 \
1144static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1145 struct netdev_queue_attribute *attr, \
1146 const char *buf, size_t len) \
1147{ \
1148 return bql_set(buf, len, &queue->dql.FIELD); \
1149} \
1150 \
1151static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1152 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1153 bql_set_ ## NAME);
1154
1155BQL_ATTR(limit, limit)
1156BQL_ATTR(limit_max, max_limit)
1157BQL_ATTR(limit_min, min_limit)
1158
1159static struct attribute *dql_attrs[] = {
1160 &bql_limit_attribute.attr,
1161 &bql_limit_max_attribute.attr,
1162 &bql_limit_min_attribute.attr,
1163 &bql_hold_time_attribute.attr,
1164 &bql_inflight_attribute.attr,
1165 NULL
1166};
1167
1168static struct attribute_group dql_group = {
1169 .name = "byte_queue_limits",
1170 .attrs = dql_attrs,
1171};
1172#endif /* CONFIG_BQL */
1173
1174#ifdef CONFIG_XPS
1175static ssize_t show_xps_map(struct netdev_queue *queue,
1176 struct netdev_queue_attribute *attribute, char *buf)
1177{
1178 struct net_device *dev = queue->dev;
1179 struct xps_dev_maps *dev_maps;
1180 cpumask_var_t mask;
1181 unsigned long index;
1182 int i, len;
1183
1184 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1185 return -ENOMEM;
1186
1187 index = get_netdev_queue_index(queue);
1188
1189 rcu_read_lock();
1190 dev_maps = rcu_dereference(dev->xps_maps);
1191 if (dev_maps) {
1192 for_each_possible_cpu(i) {
1193 struct xps_map *map =
1194 rcu_dereference(dev_maps->cpu_map[i]);
1195 if (map) {
1196 int j;
1197 for (j = 0; j < map->len; j++) {
1198 if (map->queues[j] == index) {
1199 cpumask_set_cpu(i, mask);
1200 break;
1201 }
1202 }
1203 }
1204 }
1205 }
1206 rcu_read_unlock();
1207
1208 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1209 free_cpumask_var(mask);
1210 return len < PAGE_SIZE ? len : -EINVAL;
1211}
1212
1213static ssize_t store_xps_map(struct netdev_queue *queue,
1214 struct netdev_queue_attribute *attribute,
1215 const char *buf, size_t len)
1216{
1217 struct net_device *dev = queue->dev;
1218 unsigned long index;
1219 cpumask_var_t mask;
1220 int err;
1221
1222 if (!capable(CAP_NET_ADMIN))
1223 return -EPERM;
1224
1225 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1226 return -ENOMEM;
1227
1228 index = get_netdev_queue_index(queue);
1229
1230 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1231 if (err) {
1232 free_cpumask_var(mask);
1233 return err;
1234 }
1235
1236 err = netif_set_xps_queue(dev, mask, index);
1237
1238 free_cpumask_var(mask);
1239
1240 return err ? : len;
1241}
1242
1243static struct netdev_queue_attribute xps_cpus_attribute =
1244 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1245#endif /* CONFIG_XPS */
1246
1247static struct attribute *netdev_queue_default_attrs[] = {
1248 &queue_trans_timeout.attr,
1249#ifdef CONFIG_XPS
1250 &xps_cpus_attribute.attr,
1251 &queue_tx_maxrate.attr,
1252#endif
1253 NULL
1254};
1255
1256static void netdev_queue_release(struct kobject *kobj)
1257{
1258 struct netdev_queue *queue = to_netdev_queue(kobj);
1259
1260 memset(kobj, 0, sizeof(*kobj));
1261 dev_put(queue->dev);
1262}
1263
1264static const void *netdev_queue_namespace(struct kobject *kobj)
1265{
1266 struct netdev_queue *queue = to_netdev_queue(kobj);
1267 struct device *dev = &queue->dev->dev;
1268 const void *ns = NULL;
1269
1270 if (dev->class && dev->class->ns_type)
1271 ns = dev->class->namespace(dev);
1272
1273 return ns;
1274}
1275
1276static struct kobj_type netdev_queue_ktype = {
1277 .sysfs_ops = &netdev_queue_sysfs_ops,
1278 .release = netdev_queue_release,
1279 .default_attrs = netdev_queue_default_attrs,
1280 .namespace = netdev_queue_namespace,
1281};
1282
1283static int netdev_queue_add_kobject(struct net_device *dev, int index)
1284{
1285 struct netdev_queue *queue = dev->_tx + index;
1286 struct kobject *kobj = &queue->kobj;
1287 int error = 0;
1288
1289 kobj->kset = dev->queues_kset;
1290 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1291 "tx-%u", index);
1292 if (error)
1293 goto exit;
1294
1295#ifdef CONFIG_BQL
1296 error = sysfs_create_group(kobj, &dql_group);
1297 if (error)
1298 goto exit;
1299#endif
1300
1301 kobject_uevent(kobj, KOBJ_ADD);
1302 dev_hold(queue->dev);
1303
1304 return 0;
1305exit:
1306 kobject_put(kobj);
1307 return error;
1308}
1309#endif /* CONFIG_SYSFS */
1310
1311int
1312netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1313{
1314#ifdef CONFIG_SYSFS
1315 int i;
1316 int error = 0;
1317
1318 for (i = old_num; i < new_num; i++) {
1319 error = netdev_queue_add_kobject(dev, i);
1320 if (error) {
1321 new_num = old_num;
1322 break;
1323 }
1324 }
1325
1326 while (--i >= new_num) {
1327 struct netdev_queue *queue = dev->_tx + i;
1328
1329#ifdef CONFIG_BQL
1330 sysfs_remove_group(&queue->kobj, &dql_group);
1331#endif
1332 kobject_put(&queue->kobj);
1333 }
1334
1335 return error;
1336#else
1337 return 0;
1338#endif /* CONFIG_SYSFS */
1339}
1340
1341static int register_queue_kobjects(struct net_device *dev)
1342{
1343 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1344
1345#ifdef CONFIG_SYSFS
1346 dev->queues_kset = kset_create_and_add("queues",
1347 NULL, &dev->dev.kobj);
1348 if (!dev->queues_kset)
1349 return -ENOMEM;
1350 real_rx = dev->real_num_rx_queues;
1351#endif
1352 real_tx = dev->real_num_tx_queues;
1353
1354 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1355 if (error)
1356 goto error;
1357 rxq = real_rx;
1358
1359 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1360 if (error)
1361 goto error;
1362 txq = real_tx;
1363
1364 return 0;
1365
1366error:
1367 netdev_queue_update_kobjects(dev, txq, 0);
1368 net_rx_queue_update_kobjects(dev, rxq, 0);
1369 return error;
1370}
1371
1372static void remove_queue_kobjects(struct net_device *dev)
1373{
1374 int real_rx = 0, real_tx = 0;
1375
1376#ifdef CONFIG_SYSFS
1377 real_rx = dev->real_num_rx_queues;
1378#endif
1379 real_tx = dev->real_num_tx_queues;
1380
1381 net_rx_queue_update_kobjects(dev, real_rx, 0);
1382 netdev_queue_update_kobjects(dev, real_tx, 0);
1383#ifdef CONFIG_SYSFS
1384 kset_unregister(dev->queues_kset);
1385#endif
1386}
1387
1388static bool net_current_may_mount(void)
1389{
1390 struct net *net = current->nsproxy->net_ns;
1391
1392 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1393}
1394
1395static void *net_grab_current_ns(void)
1396{
1397 struct net *ns = current->nsproxy->net_ns;
1398#ifdef CONFIG_NET_NS
1399 if (ns)
1400 atomic_inc(&ns->passive);
1401#endif
1402 return ns;
1403}
1404
1405static const void *net_initial_ns(void)
1406{
1407 return &init_net;
1408}
1409
1410static const void *net_netlink_ns(struct sock *sk)
1411{
1412 return sock_net(sk);
1413}
1414
1415struct kobj_ns_type_operations net_ns_type_operations = {
1416 .type = KOBJ_NS_TYPE_NET,
1417 .current_may_mount = net_current_may_mount,
1418 .grab_current_ns = net_grab_current_ns,
1419 .netlink_ns = net_netlink_ns,
1420 .initial_ns = net_initial_ns,
1421 .drop_ns = net_drop_ns,
1422};
1423EXPORT_SYMBOL_GPL(net_ns_type_operations);
1424
1425static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1426{
1427 struct net_device *dev = to_net_dev(d);
1428 int retval;
1429
1430 /* pass interface to uevent. */
1431 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1432 if (retval)
1433 goto exit;
1434
1435 /* pass ifindex to uevent.
1436 * ifindex is useful as it won't change (interface name may change)
1437 * and is what RtNetlink uses natively. */
1438 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1439
1440exit:
1441 return retval;
1442}
1443
1444/*
1445 * netdev_release -- destroy and free a dead device.
1446 * Called when last reference to device kobject is gone.
1447 */
1448static void netdev_release(struct device *d)
1449{
1450 struct net_device *dev = to_net_dev(d);
1451
1452 BUG_ON(dev->reg_state != NETREG_RELEASED);
1453
1454 kfree(dev->ifalias);
1455 netdev_freemem(dev);
1456}
1457
1458static const void *net_namespace(struct device *d)
1459{
1460 struct net_device *dev = to_net_dev(d);
1461
1462 return dev_net(dev);
1463}
1464
1465static struct class net_class = {
1466 .name = "net",
1467 .dev_release = netdev_release,
1468 .dev_groups = net_class_groups,
1469 .dev_uevent = netdev_uevent,
1470 .ns_type = &net_ns_type_operations,
1471 .namespace = net_namespace,
1472};
1473
1474#ifdef CONFIG_OF_NET
1475static int of_dev_node_match(struct device *dev, const void *data)
1476{
1477 int ret = 0;
1478
1479 if (dev->parent)
1480 ret = dev->parent->of_node == data;
1481
1482 return ret == 0 ? dev->of_node == data : ret;
1483}
1484
1485/*
1486 * of_find_net_device_by_node - lookup the net device for the device node
1487 * @np: OF device node
1488 *
1489 * Looks up the net_device structure corresponding with the device node.
1490 * If successful, returns a pointer to the net_device with the embedded
1491 * struct device refcount incremented by one, or NULL on failure. The
1492 * refcount must be dropped when done with the net_device.
1493 */
1494struct net_device *of_find_net_device_by_node(struct device_node *np)
1495{
1496 struct device *dev;
1497
1498 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1499 if (!dev)
1500 return NULL;
1501
1502 return to_net_dev(dev);
1503}
1504EXPORT_SYMBOL(of_find_net_device_by_node);
1505#endif
1506
1507/* Delete sysfs entries but hold kobject reference until after all
1508 * netdev references are gone.
1509 */
1510void netdev_unregister_kobject(struct net_device *ndev)
1511{
1512 struct device *dev = &(ndev->dev);
1513
1514 kobject_get(&dev->kobj);
1515
1516 remove_queue_kobjects(ndev);
1517
1518 pm_runtime_set_memalloc_noio(dev, false);
1519
1520 device_del(dev);
1521}
1522
1523/* Create sysfs entries for network device. */
1524int netdev_register_kobject(struct net_device *ndev)
1525{
1526 struct device *dev = &(ndev->dev);
1527 const struct attribute_group **groups = ndev->sysfs_groups;
1528 int error = 0;
1529
1530 device_initialize(dev);
1531 dev->class = &net_class;
1532 dev->platform_data = ndev;
1533 dev->groups = groups;
1534
1535 dev_set_name(dev, "%s", ndev->name);
1536
1537#ifdef CONFIG_SYSFS
1538 /* Allow for a device specific group */
1539 if (*groups)
1540 groups++;
1541
1542 *groups++ = &netstat_group;
1543
1544#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1545 if (ndev->ieee80211_ptr)
1546 *groups++ = &wireless_group;
1547#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1548 else if (ndev->wireless_handlers)
1549 *groups++ = &wireless_group;
1550#endif
1551#endif
1552#endif /* CONFIG_SYSFS */
1553
1554 error = device_add(dev);
1555 if (error)
1556 return error;
1557
1558 error = register_queue_kobjects(ndev);
1559 if (error) {
1560 device_del(dev);
1561 return error;
1562 }
1563
1564 pm_runtime_set_memalloc_noio(dev, true);
1565
1566 return error;
1567}
1568
1569int netdev_class_create_file_ns(struct class_attribute *class_attr,
1570 const void *ns)
1571{
1572 return class_create_file_ns(&net_class, class_attr, ns);
1573}
1574EXPORT_SYMBOL(netdev_class_create_file_ns);
1575
1576void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1577 const void *ns)
1578{
1579 class_remove_file_ns(&net_class, class_attr, ns);
1580}
1581EXPORT_SYMBOL(netdev_class_remove_file_ns);
1582
1583int __init netdev_kobject_init(void)
1584{
1585 kobj_ns_type_register(&net_ns_type_operations);
1586 return class_register(&net_class);
1587}
1/*
2 * net-sysfs.c - network device class and attributes
3 *
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/slab.h>
17#include <linux/nsproxy.h>
18#include <net/sock.h>
19#include <net/net_namespace.h>
20#include <linux/rtnetlink.h>
21#include <linux/vmalloc.h>
22#include <linux/export.h>
23#include <linux/jiffies.h>
24#include <linux/pm_runtime.h>
25
26#include "net-sysfs.h"
27
28#ifdef CONFIG_SYSFS
29static const char fmt_hex[] = "%#x\n";
30static const char fmt_long_hex[] = "%#lx\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_udec[] = "%u\n";
33static const char fmt_ulong[] = "%lu\n";
34static const char fmt_u64[] = "%llu\n";
35
36static inline int dev_isalive(const struct net_device *dev)
37{
38 return dev->reg_state <= NETREG_REGISTERED;
39}
40
41/* use same locking rules as GIF* ioctl's */
42static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
45{
46 struct net_device *net = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
48
49 read_lock(&dev_base_lock);
50 if (dev_isalive(net))
51 ret = (*format)(net, buf);
52 read_unlock(&dev_base_lock);
53
54 return ret;
55}
56
57/* generate a show function for simple field */
58#define NETDEVICE_SHOW(field, format_string) \
59static ssize_t format_##field(const struct net_device *net, char *buf) \
60{ \
61 return sprintf(buf, format_string, net->field); \
62} \
63static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
65{ \
66 return netdev_show(dev, attr, buf, format_##field); \
67} \
68
69#define NETDEVICE_SHOW_RO(field, format_string) \
70NETDEVICE_SHOW(field, format_string); \
71static DEVICE_ATTR_RO(field)
72
73#define NETDEVICE_SHOW_RW(field, format_string) \
74NETDEVICE_SHOW(field, format_string); \
75static DEVICE_ATTR_RW(field)
76
77/* use same locking and permission rules as SIF* ioctl's */
78static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
81{
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
84 unsigned long new;
85 int ret = -EINVAL;
86
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88 return -EPERM;
89
90 ret = kstrtoul(buf, 0, &new);
91 if (ret)
92 goto err;
93
94 if (!rtnl_trylock())
95 return restart_syscall();
96
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
99 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110NETDEVICE_SHOW_RO(iflink, fmt_dec);
111NETDEVICE_SHOW_RO(ifindex, fmt_dec);
112NETDEVICE_SHOW_RO(type, fmt_dec);
113NETDEVICE_SHOW_RO(link_mode, fmt_dec);
114
115/* use same locking rules as GIFHWADDR ioctl's */
116static ssize_t address_show(struct device *dev, struct device_attribute *attr,
117 char *buf)
118{
119 struct net_device *net = to_net_dev(dev);
120 ssize_t ret = -EINVAL;
121
122 read_lock(&dev_base_lock);
123 if (dev_isalive(net))
124 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
125 read_unlock(&dev_base_lock);
126 return ret;
127}
128static DEVICE_ATTR_RO(address);
129
130static ssize_t broadcast_show(struct device *dev,
131 struct device_attribute *attr, char *buf)
132{
133 struct net_device *net = to_net_dev(dev);
134 if (dev_isalive(net))
135 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
136 return -EINVAL;
137}
138static DEVICE_ATTR_RO(broadcast);
139
140static int change_carrier(struct net_device *net, unsigned long new_carrier)
141{
142 if (!netif_running(net))
143 return -EINVAL;
144 return dev_change_carrier(net, (bool) new_carrier);
145}
146
147static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
148 const char *buf, size_t len)
149{
150 return netdev_store(dev, attr, buf, len, change_carrier);
151}
152
153static ssize_t carrier_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct net_device *netdev = to_net_dev(dev);
157 if (netif_running(netdev)) {
158 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
159 }
160 return -EINVAL;
161}
162static DEVICE_ATTR_RW(carrier);
163
164static ssize_t speed_show(struct device *dev,
165 struct device_attribute *attr, char *buf)
166{
167 struct net_device *netdev = to_net_dev(dev);
168 int ret = -EINVAL;
169
170 if (!rtnl_trylock())
171 return restart_syscall();
172
173 if (netif_running(netdev)) {
174 struct ethtool_cmd cmd;
175 if (!__ethtool_get_settings(netdev, &cmd))
176 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
177 }
178 rtnl_unlock();
179 return ret;
180}
181static DEVICE_ATTR_RO(speed);
182
183static ssize_t duplex_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
185{
186 struct net_device *netdev = to_net_dev(dev);
187 int ret = -EINVAL;
188
189 if (!rtnl_trylock())
190 return restart_syscall();
191
192 if (netif_running(netdev)) {
193 struct ethtool_cmd cmd;
194 if (!__ethtool_get_settings(netdev, &cmd)) {
195 const char *duplex;
196 switch (cmd.duplex) {
197 case DUPLEX_HALF:
198 duplex = "half";
199 break;
200 case DUPLEX_FULL:
201 duplex = "full";
202 break;
203 default:
204 duplex = "unknown";
205 break;
206 }
207 ret = sprintf(buf, "%s\n", duplex);
208 }
209 }
210 rtnl_unlock();
211 return ret;
212}
213static DEVICE_ATTR_RO(duplex);
214
215static ssize_t dormant_show(struct device *dev,
216 struct device_attribute *attr, char *buf)
217{
218 struct net_device *netdev = to_net_dev(dev);
219
220 if (netif_running(netdev))
221 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
222
223 return -EINVAL;
224}
225static DEVICE_ATTR_RO(dormant);
226
227static const char *const operstates[] = {
228 "unknown",
229 "notpresent", /* currently unused */
230 "down",
231 "lowerlayerdown",
232 "testing", /* currently unused */
233 "dormant",
234 "up"
235};
236
237static ssize_t operstate_show(struct device *dev,
238 struct device_attribute *attr, char *buf)
239{
240 const struct net_device *netdev = to_net_dev(dev);
241 unsigned char operstate;
242
243 read_lock(&dev_base_lock);
244 operstate = netdev->operstate;
245 if (!netif_running(netdev))
246 operstate = IF_OPER_DOWN;
247 read_unlock(&dev_base_lock);
248
249 if (operstate >= ARRAY_SIZE(operstates))
250 return -EINVAL; /* should not happen */
251
252 return sprintf(buf, "%s\n", operstates[operstate]);
253}
254static DEVICE_ATTR_RO(operstate);
255
256static ssize_t carrier_changes_show(struct device *dev,
257 struct device_attribute *attr,
258 char *buf)
259{
260 struct net_device *netdev = to_net_dev(dev);
261 return sprintf(buf, fmt_dec,
262 atomic_read(&netdev->carrier_changes));
263}
264static DEVICE_ATTR_RO(carrier_changes);
265
266/* read-write attributes */
267
268static int change_mtu(struct net_device *net, unsigned long new_mtu)
269{
270 return dev_set_mtu(net, (int) new_mtu);
271}
272
273static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
274 const char *buf, size_t len)
275{
276 return netdev_store(dev, attr, buf, len, change_mtu);
277}
278NETDEVICE_SHOW_RW(mtu, fmt_dec);
279
280static int change_flags(struct net_device *net, unsigned long new_flags)
281{
282 return dev_change_flags(net, (unsigned int) new_flags);
283}
284
285static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
286 const char *buf, size_t len)
287{
288 return netdev_store(dev, attr, buf, len, change_flags);
289}
290NETDEVICE_SHOW_RW(flags, fmt_hex);
291
292static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
293{
294 net->tx_queue_len = new_len;
295 return 0;
296}
297
298static ssize_t tx_queue_len_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t len)
301{
302 if (!capable(CAP_NET_ADMIN))
303 return -EPERM;
304
305 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
306}
307NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
308
309static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
310 const char *buf, size_t len)
311{
312 struct net_device *netdev = to_net_dev(dev);
313 struct net *net = dev_net(netdev);
314 size_t count = len;
315 ssize_t ret;
316
317 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
318 return -EPERM;
319
320 /* ignore trailing newline */
321 if (len > 0 && buf[len - 1] == '\n')
322 --count;
323
324 if (!rtnl_trylock())
325 return restart_syscall();
326 ret = dev_set_alias(netdev, buf, count);
327 rtnl_unlock();
328
329 return ret < 0 ? ret : len;
330}
331
332static ssize_t ifalias_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 const struct net_device *netdev = to_net_dev(dev);
336 ssize_t ret = 0;
337
338 if (!rtnl_trylock())
339 return restart_syscall();
340 if (netdev->ifalias)
341 ret = sprintf(buf, "%s\n", netdev->ifalias);
342 rtnl_unlock();
343 return ret;
344}
345static DEVICE_ATTR_RW(ifalias);
346
347static int change_group(struct net_device *net, unsigned long new_group)
348{
349 dev_set_group(net, (int) new_group);
350 return 0;
351}
352
353static ssize_t group_store(struct device *dev, struct device_attribute *attr,
354 const char *buf, size_t len)
355{
356 return netdev_store(dev, attr, buf, len, change_group);
357}
358NETDEVICE_SHOW(group, fmt_dec);
359static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
360
361static ssize_t phys_port_id_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363{
364 struct net_device *netdev = to_net_dev(dev);
365 ssize_t ret = -EINVAL;
366
367 if (!rtnl_trylock())
368 return restart_syscall();
369
370 if (dev_isalive(netdev)) {
371 struct netdev_phys_port_id ppid;
372
373 ret = dev_get_phys_port_id(netdev, &ppid);
374 if (!ret)
375 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
376 }
377 rtnl_unlock();
378
379 return ret;
380}
381static DEVICE_ATTR_RO(phys_port_id);
382
383static struct attribute *net_class_attrs[] = {
384 &dev_attr_netdev_group.attr,
385 &dev_attr_type.attr,
386 &dev_attr_dev_id.attr,
387 &dev_attr_dev_port.attr,
388 &dev_attr_iflink.attr,
389 &dev_attr_ifindex.attr,
390 &dev_attr_addr_assign_type.attr,
391 &dev_attr_addr_len.attr,
392 &dev_attr_link_mode.attr,
393 &dev_attr_address.attr,
394 &dev_attr_broadcast.attr,
395 &dev_attr_speed.attr,
396 &dev_attr_duplex.attr,
397 &dev_attr_dormant.attr,
398 &dev_attr_operstate.attr,
399 &dev_attr_carrier_changes.attr,
400 &dev_attr_ifalias.attr,
401 &dev_attr_carrier.attr,
402 &dev_attr_mtu.attr,
403 &dev_attr_flags.attr,
404 &dev_attr_tx_queue_len.attr,
405 &dev_attr_phys_port_id.attr,
406 NULL,
407};
408ATTRIBUTE_GROUPS(net_class);
409
410/* Show a given an attribute in the statistics group */
411static ssize_t netstat_show(const struct device *d,
412 struct device_attribute *attr, char *buf,
413 unsigned long offset)
414{
415 struct net_device *dev = to_net_dev(d);
416 ssize_t ret = -EINVAL;
417
418 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
419 offset % sizeof(u64) != 0);
420
421 read_lock(&dev_base_lock);
422 if (dev_isalive(dev)) {
423 struct rtnl_link_stats64 temp;
424 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
425
426 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
427 }
428 read_unlock(&dev_base_lock);
429 return ret;
430}
431
432/* generate a read-only statistics attribute */
433#define NETSTAT_ENTRY(name) \
434static ssize_t name##_show(struct device *d, \
435 struct device_attribute *attr, char *buf) \
436{ \
437 return netstat_show(d, attr, buf, \
438 offsetof(struct rtnl_link_stats64, name)); \
439} \
440static DEVICE_ATTR_RO(name)
441
442NETSTAT_ENTRY(rx_packets);
443NETSTAT_ENTRY(tx_packets);
444NETSTAT_ENTRY(rx_bytes);
445NETSTAT_ENTRY(tx_bytes);
446NETSTAT_ENTRY(rx_errors);
447NETSTAT_ENTRY(tx_errors);
448NETSTAT_ENTRY(rx_dropped);
449NETSTAT_ENTRY(tx_dropped);
450NETSTAT_ENTRY(multicast);
451NETSTAT_ENTRY(collisions);
452NETSTAT_ENTRY(rx_length_errors);
453NETSTAT_ENTRY(rx_over_errors);
454NETSTAT_ENTRY(rx_crc_errors);
455NETSTAT_ENTRY(rx_frame_errors);
456NETSTAT_ENTRY(rx_fifo_errors);
457NETSTAT_ENTRY(rx_missed_errors);
458NETSTAT_ENTRY(tx_aborted_errors);
459NETSTAT_ENTRY(tx_carrier_errors);
460NETSTAT_ENTRY(tx_fifo_errors);
461NETSTAT_ENTRY(tx_heartbeat_errors);
462NETSTAT_ENTRY(tx_window_errors);
463NETSTAT_ENTRY(rx_compressed);
464NETSTAT_ENTRY(tx_compressed);
465
466static struct attribute *netstat_attrs[] = {
467 &dev_attr_rx_packets.attr,
468 &dev_attr_tx_packets.attr,
469 &dev_attr_rx_bytes.attr,
470 &dev_attr_tx_bytes.attr,
471 &dev_attr_rx_errors.attr,
472 &dev_attr_tx_errors.attr,
473 &dev_attr_rx_dropped.attr,
474 &dev_attr_tx_dropped.attr,
475 &dev_attr_multicast.attr,
476 &dev_attr_collisions.attr,
477 &dev_attr_rx_length_errors.attr,
478 &dev_attr_rx_over_errors.attr,
479 &dev_attr_rx_crc_errors.attr,
480 &dev_attr_rx_frame_errors.attr,
481 &dev_attr_rx_fifo_errors.attr,
482 &dev_attr_rx_missed_errors.attr,
483 &dev_attr_tx_aborted_errors.attr,
484 &dev_attr_tx_carrier_errors.attr,
485 &dev_attr_tx_fifo_errors.attr,
486 &dev_attr_tx_heartbeat_errors.attr,
487 &dev_attr_tx_window_errors.attr,
488 &dev_attr_rx_compressed.attr,
489 &dev_attr_tx_compressed.attr,
490 NULL
491};
492
493
494static struct attribute_group netstat_group = {
495 .name = "statistics",
496 .attrs = netstat_attrs,
497};
498
499#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
500static struct attribute *wireless_attrs[] = {
501 NULL
502};
503
504static struct attribute_group wireless_group = {
505 .name = "wireless",
506 .attrs = wireless_attrs,
507};
508#endif
509
510#else /* CONFIG_SYSFS */
511#define net_class_groups NULL
512#endif /* CONFIG_SYSFS */
513
514#ifdef CONFIG_SYSFS
515#define to_rx_queue_attr(_attr) container_of(_attr, \
516 struct rx_queue_attribute, attr)
517
518#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
519
520static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
521 char *buf)
522{
523 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
524 struct netdev_rx_queue *queue = to_rx_queue(kobj);
525
526 if (!attribute->show)
527 return -EIO;
528
529 return attribute->show(queue, attribute, buf);
530}
531
532static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
533 const char *buf, size_t count)
534{
535 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
536 struct netdev_rx_queue *queue = to_rx_queue(kobj);
537
538 if (!attribute->store)
539 return -EIO;
540
541 return attribute->store(queue, attribute, buf, count);
542}
543
544static const struct sysfs_ops rx_queue_sysfs_ops = {
545 .show = rx_queue_attr_show,
546 .store = rx_queue_attr_store,
547};
548
549#ifdef CONFIG_RPS
550static ssize_t show_rps_map(struct netdev_rx_queue *queue,
551 struct rx_queue_attribute *attribute, char *buf)
552{
553 struct rps_map *map;
554 cpumask_var_t mask;
555 size_t len = 0;
556 int i;
557
558 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
559 return -ENOMEM;
560
561 rcu_read_lock();
562 map = rcu_dereference(queue->rps_map);
563 if (map)
564 for (i = 0; i < map->len; i++)
565 cpumask_set_cpu(map->cpus[i], mask);
566
567 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
568 if (PAGE_SIZE - len < 3) {
569 rcu_read_unlock();
570 free_cpumask_var(mask);
571 return -EINVAL;
572 }
573 rcu_read_unlock();
574
575 free_cpumask_var(mask);
576 len += sprintf(buf + len, "\n");
577 return len;
578}
579
580static ssize_t store_rps_map(struct netdev_rx_queue *queue,
581 struct rx_queue_attribute *attribute,
582 const char *buf, size_t len)
583{
584 struct rps_map *old_map, *map;
585 cpumask_var_t mask;
586 int err, cpu, i;
587 static DEFINE_SPINLOCK(rps_map_lock);
588
589 if (!capable(CAP_NET_ADMIN))
590 return -EPERM;
591
592 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
593 return -ENOMEM;
594
595 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
596 if (err) {
597 free_cpumask_var(mask);
598 return err;
599 }
600
601 map = kzalloc(max_t(unsigned int,
602 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
603 GFP_KERNEL);
604 if (!map) {
605 free_cpumask_var(mask);
606 return -ENOMEM;
607 }
608
609 i = 0;
610 for_each_cpu_and(cpu, mask, cpu_online_mask)
611 map->cpus[i++] = cpu;
612
613 if (i)
614 map->len = i;
615 else {
616 kfree(map);
617 map = NULL;
618 }
619
620 spin_lock(&rps_map_lock);
621 old_map = rcu_dereference_protected(queue->rps_map,
622 lockdep_is_held(&rps_map_lock));
623 rcu_assign_pointer(queue->rps_map, map);
624 spin_unlock(&rps_map_lock);
625
626 if (map)
627 static_key_slow_inc(&rps_needed);
628 if (old_map) {
629 kfree_rcu(old_map, rcu);
630 static_key_slow_dec(&rps_needed);
631 }
632 free_cpumask_var(mask);
633 return len;
634}
635
636static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
637 struct rx_queue_attribute *attr,
638 char *buf)
639{
640 struct rps_dev_flow_table *flow_table;
641 unsigned long val = 0;
642
643 rcu_read_lock();
644 flow_table = rcu_dereference(queue->rps_flow_table);
645 if (flow_table)
646 val = (unsigned long)flow_table->mask + 1;
647 rcu_read_unlock();
648
649 return sprintf(buf, "%lu\n", val);
650}
651
652static void rps_dev_flow_table_release(struct rcu_head *rcu)
653{
654 struct rps_dev_flow_table *table = container_of(rcu,
655 struct rps_dev_flow_table, rcu);
656 vfree(table);
657}
658
659static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
660 struct rx_queue_attribute *attr,
661 const char *buf, size_t len)
662{
663 unsigned long mask, count;
664 struct rps_dev_flow_table *table, *old_table;
665 static DEFINE_SPINLOCK(rps_dev_flow_lock);
666 int rc;
667
668 if (!capable(CAP_NET_ADMIN))
669 return -EPERM;
670
671 rc = kstrtoul(buf, 0, &count);
672 if (rc < 0)
673 return rc;
674
675 if (count) {
676 mask = count - 1;
677 /* mask = roundup_pow_of_two(count) - 1;
678 * without overflows...
679 */
680 while ((mask | (mask >> 1)) != mask)
681 mask |= (mask >> 1);
682 /* On 64 bit arches, must check mask fits in table->mask (u32),
683 * and on 32bit arches, must check
684 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
685 */
686#if BITS_PER_LONG > 32
687 if (mask > (unsigned long)(u32)mask)
688 return -EINVAL;
689#else
690 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
691 / sizeof(struct rps_dev_flow)) {
692 /* Enforce a limit to prevent overflow */
693 return -EINVAL;
694 }
695#endif
696 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
697 if (!table)
698 return -ENOMEM;
699
700 table->mask = mask;
701 for (count = 0; count <= mask; count++)
702 table->flows[count].cpu = RPS_NO_CPU;
703 } else
704 table = NULL;
705
706 spin_lock(&rps_dev_flow_lock);
707 old_table = rcu_dereference_protected(queue->rps_flow_table,
708 lockdep_is_held(&rps_dev_flow_lock));
709 rcu_assign_pointer(queue->rps_flow_table, table);
710 spin_unlock(&rps_dev_flow_lock);
711
712 if (old_table)
713 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
714
715 return len;
716}
717
718static struct rx_queue_attribute rps_cpus_attribute =
719 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
720
721
722static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
723 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
724 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
725#endif /* CONFIG_RPS */
726
727static struct attribute *rx_queue_default_attrs[] = {
728#ifdef CONFIG_RPS
729 &rps_cpus_attribute.attr,
730 &rps_dev_flow_table_cnt_attribute.attr,
731#endif
732 NULL
733};
734
735static void rx_queue_release(struct kobject *kobj)
736{
737 struct netdev_rx_queue *queue = to_rx_queue(kobj);
738#ifdef CONFIG_RPS
739 struct rps_map *map;
740 struct rps_dev_flow_table *flow_table;
741
742
743 map = rcu_dereference_protected(queue->rps_map, 1);
744 if (map) {
745 RCU_INIT_POINTER(queue->rps_map, NULL);
746 kfree_rcu(map, rcu);
747 }
748
749 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
750 if (flow_table) {
751 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
752 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
753 }
754#endif
755
756 memset(kobj, 0, sizeof(*kobj));
757 dev_put(queue->dev);
758}
759
760static const void *rx_queue_namespace(struct kobject *kobj)
761{
762 struct netdev_rx_queue *queue = to_rx_queue(kobj);
763 struct device *dev = &queue->dev->dev;
764 const void *ns = NULL;
765
766 if (dev->class && dev->class->ns_type)
767 ns = dev->class->namespace(dev);
768
769 return ns;
770}
771
772static struct kobj_type rx_queue_ktype = {
773 .sysfs_ops = &rx_queue_sysfs_ops,
774 .release = rx_queue_release,
775 .default_attrs = rx_queue_default_attrs,
776 .namespace = rx_queue_namespace
777};
778
779static int rx_queue_add_kobject(struct net_device *net, int index)
780{
781 struct netdev_rx_queue *queue = net->_rx + index;
782 struct kobject *kobj = &queue->kobj;
783 int error = 0;
784
785 kobj->kset = net->queues_kset;
786 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
787 "rx-%u", index);
788 if (error)
789 goto exit;
790
791 if (net->sysfs_rx_queue_group) {
792 error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
793 if (error)
794 goto exit;
795 }
796
797 kobject_uevent(kobj, KOBJ_ADD);
798 dev_hold(queue->dev);
799
800 return error;
801exit:
802 kobject_put(kobj);
803 return error;
804}
805#endif /* CONFIG_SYSFS */
806
807int
808net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
809{
810#ifdef CONFIG_SYSFS
811 int i;
812 int error = 0;
813
814#ifndef CONFIG_RPS
815 if (!net->sysfs_rx_queue_group)
816 return 0;
817#endif
818 for (i = old_num; i < new_num; i++) {
819 error = rx_queue_add_kobject(net, i);
820 if (error) {
821 new_num = old_num;
822 break;
823 }
824 }
825
826 while (--i >= new_num) {
827 if (net->sysfs_rx_queue_group)
828 sysfs_remove_group(&net->_rx[i].kobj,
829 net->sysfs_rx_queue_group);
830 kobject_put(&net->_rx[i].kobj);
831 }
832
833 return error;
834#else
835 return 0;
836#endif
837}
838
839#ifdef CONFIG_SYSFS
840/*
841 * netdev_queue sysfs structures and functions.
842 */
843struct netdev_queue_attribute {
844 struct attribute attr;
845 ssize_t (*show)(struct netdev_queue *queue,
846 struct netdev_queue_attribute *attr, char *buf);
847 ssize_t (*store)(struct netdev_queue *queue,
848 struct netdev_queue_attribute *attr, const char *buf, size_t len);
849};
850#define to_netdev_queue_attr(_attr) container_of(_attr, \
851 struct netdev_queue_attribute, attr)
852
853#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
854
855static ssize_t netdev_queue_attr_show(struct kobject *kobj,
856 struct attribute *attr, char *buf)
857{
858 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
859 struct netdev_queue *queue = to_netdev_queue(kobj);
860
861 if (!attribute->show)
862 return -EIO;
863
864 return attribute->show(queue, attribute, buf);
865}
866
867static ssize_t netdev_queue_attr_store(struct kobject *kobj,
868 struct attribute *attr,
869 const char *buf, size_t count)
870{
871 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
872 struct netdev_queue *queue = to_netdev_queue(kobj);
873
874 if (!attribute->store)
875 return -EIO;
876
877 return attribute->store(queue, attribute, buf, count);
878}
879
880static const struct sysfs_ops netdev_queue_sysfs_ops = {
881 .show = netdev_queue_attr_show,
882 .store = netdev_queue_attr_store,
883};
884
885static ssize_t show_trans_timeout(struct netdev_queue *queue,
886 struct netdev_queue_attribute *attribute,
887 char *buf)
888{
889 unsigned long trans_timeout;
890
891 spin_lock_irq(&queue->_xmit_lock);
892 trans_timeout = queue->trans_timeout;
893 spin_unlock_irq(&queue->_xmit_lock);
894
895 return sprintf(buf, "%lu", trans_timeout);
896}
897
898static struct netdev_queue_attribute queue_trans_timeout =
899 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
900
901#ifdef CONFIG_BQL
902/*
903 * Byte queue limits sysfs structures and functions.
904 */
905static ssize_t bql_show(char *buf, unsigned int value)
906{
907 return sprintf(buf, "%u\n", value);
908}
909
910static ssize_t bql_set(const char *buf, const size_t count,
911 unsigned int *pvalue)
912{
913 unsigned int value;
914 int err;
915
916 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
917 value = DQL_MAX_LIMIT;
918 else {
919 err = kstrtouint(buf, 10, &value);
920 if (err < 0)
921 return err;
922 if (value > DQL_MAX_LIMIT)
923 return -EINVAL;
924 }
925
926 *pvalue = value;
927
928 return count;
929}
930
931static ssize_t bql_show_hold_time(struct netdev_queue *queue,
932 struct netdev_queue_attribute *attr,
933 char *buf)
934{
935 struct dql *dql = &queue->dql;
936
937 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
938}
939
940static ssize_t bql_set_hold_time(struct netdev_queue *queue,
941 struct netdev_queue_attribute *attribute,
942 const char *buf, size_t len)
943{
944 struct dql *dql = &queue->dql;
945 unsigned int value;
946 int err;
947
948 err = kstrtouint(buf, 10, &value);
949 if (err < 0)
950 return err;
951
952 dql->slack_hold_time = msecs_to_jiffies(value);
953
954 return len;
955}
956
957static struct netdev_queue_attribute bql_hold_time_attribute =
958 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
959 bql_set_hold_time);
960
961static ssize_t bql_show_inflight(struct netdev_queue *queue,
962 struct netdev_queue_attribute *attr,
963 char *buf)
964{
965 struct dql *dql = &queue->dql;
966
967 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
968}
969
970static struct netdev_queue_attribute bql_inflight_attribute =
971 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
972
973#define BQL_ATTR(NAME, FIELD) \
974static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
975 struct netdev_queue_attribute *attr, \
976 char *buf) \
977{ \
978 return bql_show(buf, queue->dql.FIELD); \
979} \
980 \
981static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
982 struct netdev_queue_attribute *attr, \
983 const char *buf, size_t len) \
984{ \
985 return bql_set(buf, len, &queue->dql.FIELD); \
986} \
987 \
988static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
989 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
990 bql_set_ ## NAME);
991
992BQL_ATTR(limit, limit)
993BQL_ATTR(limit_max, max_limit)
994BQL_ATTR(limit_min, min_limit)
995
996static struct attribute *dql_attrs[] = {
997 &bql_limit_attribute.attr,
998 &bql_limit_max_attribute.attr,
999 &bql_limit_min_attribute.attr,
1000 &bql_hold_time_attribute.attr,
1001 &bql_inflight_attribute.attr,
1002 NULL
1003};
1004
1005static struct attribute_group dql_group = {
1006 .name = "byte_queue_limits",
1007 .attrs = dql_attrs,
1008};
1009#endif /* CONFIG_BQL */
1010
1011#ifdef CONFIG_XPS
1012static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1013{
1014 struct net_device *dev = queue->dev;
1015 unsigned int i;
1016
1017 i = queue - dev->_tx;
1018 BUG_ON(i >= dev->num_tx_queues);
1019
1020 return i;
1021}
1022
1023
1024static ssize_t show_xps_map(struct netdev_queue *queue,
1025 struct netdev_queue_attribute *attribute, char *buf)
1026{
1027 struct net_device *dev = queue->dev;
1028 struct xps_dev_maps *dev_maps;
1029 cpumask_var_t mask;
1030 unsigned long index;
1031 size_t len = 0;
1032 int i;
1033
1034 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1035 return -ENOMEM;
1036
1037 index = get_netdev_queue_index(queue);
1038
1039 rcu_read_lock();
1040 dev_maps = rcu_dereference(dev->xps_maps);
1041 if (dev_maps) {
1042 for_each_possible_cpu(i) {
1043 struct xps_map *map =
1044 rcu_dereference(dev_maps->cpu_map[i]);
1045 if (map) {
1046 int j;
1047 for (j = 0; j < map->len; j++) {
1048 if (map->queues[j] == index) {
1049 cpumask_set_cpu(i, mask);
1050 break;
1051 }
1052 }
1053 }
1054 }
1055 }
1056 rcu_read_unlock();
1057
1058 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1059 if (PAGE_SIZE - len < 3) {
1060 free_cpumask_var(mask);
1061 return -EINVAL;
1062 }
1063
1064 free_cpumask_var(mask);
1065 len += sprintf(buf + len, "\n");
1066 return len;
1067}
1068
1069static ssize_t store_xps_map(struct netdev_queue *queue,
1070 struct netdev_queue_attribute *attribute,
1071 const char *buf, size_t len)
1072{
1073 struct net_device *dev = queue->dev;
1074 unsigned long index;
1075 cpumask_var_t mask;
1076 int err;
1077
1078 if (!capable(CAP_NET_ADMIN))
1079 return -EPERM;
1080
1081 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1082 return -ENOMEM;
1083
1084 index = get_netdev_queue_index(queue);
1085
1086 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1087 if (err) {
1088 free_cpumask_var(mask);
1089 return err;
1090 }
1091
1092 err = netif_set_xps_queue(dev, mask, index);
1093
1094 free_cpumask_var(mask);
1095
1096 return err ? : len;
1097}
1098
1099static struct netdev_queue_attribute xps_cpus_attribute =
1100 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1101#endif /* CONFIG_XPS */
1102
1103static struct attribute *netdev_queue_default_attrs[] = {
1104 &queue_trans_timeout.attr,
1105#ifdef CONFIG_XPS
1106 &xps_cpus_attribute.attr,
1107#endif
1108 NULL
1109};
1110
1111static void netdev_queue_release(struct kobject *kobj)
1112{
1113 struct netdev_queue *queue = to_netdev_queue(kobj);
1114
1115 memset(kobj, 0, sizeof(*kobj));
1116 dev_put(queue->dev);
1117}
1118
1119static const void *netdev_queue_namespace(struct kobject *kobj)
1120{
1121 struct netdev_queue *queue = to_netdev_queue(kobj);
1122 struct device *dev = &queue->dev->dev;
1123 const void *ns = NULL;
1124
1125 if (dev->class && dev->class->ns_type)
1126 ns = dev->class->namespace(dev);
1127
1128 return ns;
1129}
1130
1131static struct kobj_type netdev_queue_ktype = {
1132 .sysfs_ops = &netdev_queue_sysfs_ops,
1133 .release = netdev_queue_release,
1134 .default_attrs = netdev_queue_default_attrs,
1135 .namespace = netdev_queue_namespace,
1136};
1137
1138static int netdev_queue_add_kobject(struct net_device *net, int index)
1139{
1140 struct netdev_queue *queue = net->_tx + index;
1141 struct kobject *kobj = &queue->kobj;
1142 int error = 0;
1143
1144 kobj->kset = net->queues_kset;
1145 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1146 "tx-%u", index);
1147 if (error)
1148 goto exit;
1149
1150#ifdef CONFIG_BQL
1151 error = sysfs_create_group(kobj, &dql_group);
1152 if (error)
1153 goto exit;
1154#endif
1155
1156 kobject_uevent(kobj, KOBJ_ADD);
1157 dev_hold(queue->dev);
1158
1159 return 0;
1160exit:
1161 kobject_put(kobj);
1162 return error;
1163}
1164#endif /* CONFIG_SYSFS */
1165
1166int
1167netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1168{
1169#ifdef CONFIG_SYSFS
1170 int i;
1171 int error = 0;
1172
1173 for (i = old_num; i < new_num; i++) {
1174 error = netdev_queue_add_kobject(net, i);
1175 if (error) {
1176 new_num = old_num;
1177 break;
1178 }
1179 }
1180
1181 while (--i >= new_num) {
1182 struct netdev_queue *queue = net->_tx + i;
1183
1184#ifdef CONFIG_BQL
1185 sysfs_remove_group(&queue->kobj, &dql_group);
1186#endif
1187 kobject_put(&queue->kobj);
1188 }
1189
1190 return error;
1191#else
1192 return 0;
1193#endif /* CONFIG_SYSFS */
1194}
1195
1196static int register_queue_kobjects(struct net_device *net)
1197{
1198 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1199
1200#ifdef CONFIG_SYSFS
1201 net->queues_kset = kset_create_and_add("queues",
1202 NULL, &net->dev.kobj);
1203 if (!net->queues_kset)
1204 return -ENOMEM;
1205 real_rx = net->real_num_rx_queues;
1206#endif
1207 real_tx = net->real_num_tx_queues;
1208
1209 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1210 if (error)
1211 goto error;
1212 rxq = real_rx;
1213
1214 error = netdev_queue_update_kobjects(net, 0, real_tx);
1215 if (error)
1216 goto error;
1217 txq = real_tx;
1218
1219 return 0;
1220
1221error:
1222 netdev_queue_update_kobjects(net, txq, 0);
1223 net_rx_queue_update_kobjects(net, rxq, 0);
1224 return error;
1225}
1226
1227static void remove_queue_kobjects(struct net_device *net)
1228{
1229 int real_rx = 0, real_tx = 0;
1230
1231#ifdef CONFIG_SYSFS
1232 real_rx = net->real_num_rx_queues;
1233#endif
1234 real_tx = net->real_num_tx_queues;
1235
1236 net_rx_queue_update_kobjects(net, real_rx, 0);
1237 netdev_queue_update_kobjects(net, real_tx, 0);
1238#ifdef CONFIG_SYSFS
1239 kset_unregister(net->queues_kset);
1240#endif
1241}
1242
1243static bool net_current_may_mount(void)
1244{
1245 struct net *net = current->nsproxy->net_ns;
1246
1247 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1248}
1249
1250static void *net_grab_current_ns(void)
1251{
1252 struct net *ns = current->nsproxy->net_ns;
1253#ifdef CONFIG_NET_NS
1254 if (ns)
1255 atomic_inc(&ns->passive);
1256#endif
1257 return ns;
1258}
1259
1260static const void *net_initial_ns(void)
1261{
1262 return &init_net;
1263}
1264
1265static const void *net_netlink_ns(struct sock *sk)
1266{
1267 return sock_net(sk);
1268}
1269
1270struct kobj_ns_type_operations net_ns_type_operations = {
1271 .type = KOBJ_NS_TYPE_NET,
1272 .current_may_mount = net_current_may_mount,
1273 .grab_current_ns = net_grab_current_ns,
1274 .netlink_ns = net_netlink_ns,
1275 .initial_ns = net_initial_ns,
1276 .drop_ns = net_drop_ns,
1277};
1278EXPORT_SYMBOL_GPL(net_ns_type_operations);
1279
1280static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1281{
1282 struct net_device *dev = to_net_dev(d);
1283 int retval;
1284
1285 /* pass interface to uevent. */
1286 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1287 if (retval)
1288 goto exit;
1289
1290 /* pass ifindex to uevent.
1291 * ifindex is useful as it won't change (interface name may change)
1292 * and is what RtNetlink uses natively. */
1293 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1294
1295exit:
1296 return retval;
1297}
1298
1299/*
1300 * netdev_release -- destroy and free a dead device.
1301 * Called when last reference to device kobject is gone.
1302 */
1303static void netdev_release(struct device *d)
1304{
1305 struct net_device *dev = to_net_dev(d);
1306
1307 BUG_ON(dev->reg_state != NETREG_RELEASED);
1308
1309 kfree(dev->ifalias);
1310 netdev_freemem(dev);
1311}
1312
1313static const void *net_namespace(struct device *d)
1314{
1315 struct net_device *dev;
1316 dev = container_of(d, struct net_device, dev);
1317 return dev_net(dev);
1318}
1319
1320static struct class net_class = {
1321 .name = "net",
1322 .dev_release = netdev_release,
1323 .dev_groups = net_class_groups,
1324 .dev_uevent = netdev_uevent,
1325 .ns_type = &net_ns_type_operations,
1326 .namespace = net_namespace,
1327};
1328
1329/* Delete sysfs entries but hold kobject reference until after all
1330 * netdev references are gone.
1331 */
1332void netdev_unregister_kobject(struct net_device * net)
1333{
1334 struct device *dev = &(net->dev);
1335
1336 kobject_get(&dev->kobj);
1337
1338 remove_queue_kobjects(net);
1339
1340 pm_runtime_set_memalloc_noio(dev, false);
1341
1342 device_del(dev);
1343}
1344
1345/* Create sysfs entries for network device. */
1346int netdev_register_kobject(struct net_device *net)
1347{
1348 struct device *dev = &(net->dev);
1349 const struct attribute_group **groups = net->sysfs_groups;
1350 int error = 0;
1351
1352 device_initialize(dev);
1353 dev->class = &net_class;
1354 dev->platform_data = net;
1355 dev->groups = groups;
1356
1357 dev_set_name(dev, "%s", net->name);
1358
1359#ifdef CONFIG_SYSFS
1360 /* Allow for a device specific group */
1361 if (*groups)
1362 groups++;
1363
1364 *groups++ = &netstat_group;
1365
1366#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1367 if (net->ieee80211_ptr)
1368 *groups++ = &wireless_group;
1369#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1370 else if (net->wireless_handlers)
1371 *groups++ = &wireless_group;
1372#endif
1373#endif
1374#endif /* CONFIG_SYSFS */
1375
1376 error = device_add(dev);
1377 if (error)
1378 return error;
1379
1380 error = register_queue_kobjects(net);
1381 if (error) {
1382 device_del(dev);
1383 return error;
1384 }
1385
1386 pm_runtime_set_memalloc_noio(dev, true);
1387
1388 return error;
1389}
1390
1391int netdev_class_create_file_ns(struct class_attribute *class_attr,
1392 const void *ns)
1393{
1394 return class_create_file_ns(&net_class, class_attr, ns);
1395}
1396EXPORT_SYMBOL(netdev_class_create_file_ns);
1397
1398void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1399 const void *ns)
1400{
1401 class_remove_file_ns(&net_class, class_attr, ns);
1402}
1403EXPORT_SYMBOL(netdev_class_remove_file_ns);
1404
1405int __init netdev_kobject_init(void)
1406{
1407 kobj_ns_type_register(&net_ns_type_operations);
1408 return class_register(&net_class);
1409}