Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/netdevice.h>
3#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <net/wext.h>
6
7#include "dev.h"
8
9#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
10
11#define get_bucket(x) ((x) >> BUCKET_SPACE)
12#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
13#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
14
15static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
16{
17 struct net *net = seq_file_net(seq);
18 struct net_device *dev;
19 struct hlist_head *h;
20 unsigned int count = 0, offset = get_offset(*pos);
21
22 h = &net->dev_index_head[get_bucket(*pos)];
23 hlist_for_each_entry_rcu(dev, h, index_hlist) {
24 if (++count == offset)
25 return dev;
26 }
27
28 return NULL;
29}
30
31static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
32{
33 struct net_device *dev;
34 unsigned int bucket;
35
36 do {
37 dev = dev_from_same_bucket(seq, pos);
38 if (dev)
39 return dev;
40
41 bucket = get_bucket(*pos) + 1;
42 *pos = set_bucket_offset(bucket, 1);
43 } while (bucket < NETDEV_HASHENTRIES);
44
45 return NULL;
46}
47
48/*
49 * This is invoked by the /proc filesystem handler to display a device
50 * in detail.
51 */
52static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
53 __acquires(RCU)
54{
55 rcu_read_lock();
56 if (!*pos)
57 return SEQ_START_TOKEN;
58
59 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
60 return NULL;
61
62 return dev_from_bucket(seq, pos);
63}
64
65static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
66{
67 ++*pos;
68 return dev_from_bucket(seq, pos);
69}
70
71static void dev_seq_stop(struct seq_file *seq, void *v)
72 __releases(RCU)
73{
74 rcu_read_unlock();
75}
76
77static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
78{
79 struct rtnl_link_stats64 temp;
80 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
81
82 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
83 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
84 dev->name, stats->rx_bytes, stats->rx_packets,
85 stats->rx_errors,
86 stats->rx_dropped + stats->rx_missed_errors,
87 stats->rx_fifo_errors,
88 stats->rx_length_errors + stats->rx_over_errors +
89 stats->rx_crc_errors + stats->rx_frame_errors,
90 stats->rx_compressed, stats->multicast,
91 stats->tx_bytes, stats->tx_packets,
92 stats->tx_errors, stats->tx_dropped,
93 stats->tx_fifo_errors, stats->collisions,
94 stats->tx_carrier_errors +
95 stats->tx_aborted_errors +
96 stats->tx_window_errors +
97 stats->tx_heartbeat_errors,
98 stats->tx_compressed);
99}
100
101/*
102 * Called from the PROCfs module. This now uses the new arbitrary sized
103 * /proc/net interface to create /proc/net/dev
104 */
105static int dev_seq_show(struct seq_file *seq, void *v)
106{
107 if (v == SEQ_START_TOKEN)
108 seq_puts(seq, "Inter-| Receive "
109 " | Transmit\n"
110 " face |bytes packets errs drop fifo frame "
111 "compressed multicast|bytes packets errs "
112 "drop fifo colls carrier compressed\n");
113 else
114 dev_seq_printf_stats(seq, v);
115 return 0;
116}
117
118static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
119{
120 return skb_queue_len_lockless(&sd->input_pkt_queue);
121}
122
123static u32 softnet_process_queue_len(struct softnet_data *sd)
124{
125 return skb_queue_len_lockless(&sd->process_queue);
126}
127
128static struct softnet_data *softnet_get_online(loff_t *pos)
129{
130 struct softnet_data *sd = NULL;
131
132 while (*pos < nr_cpu_ids)
133 if (cpu_online(*pos)) {
134 sd = &per_cpu(softnet_data, *pos);
135 break;
136 } else
137 ++*pos;
138 return sd;
139}
140
141static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
142{
143 return softnet_get_online(pos);
144}
145
146static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
147{
148 ++*pos;
149 return softnet_get_online(pos);
150}
151
152static void softnet_seq_stop(struct seq_file *seq, void *v)
153{
154}
155
156static int softnet_seq_show(struct seq_file *seq, void *v)
157{
158 struct softnet_data *sd = v;
159 u32 input_qlen = softnet_input_pkt_queue_len(sd);
160 u32 process_qlen = softnet_process_queue_len(sd);
161 unsigned int flow_limit_count = 0;
162
163#ifdef CONFIG_NET_FLOW_LIMIT
164 struct sd_flow_limit *fl;
165
166 rcu_read_lock();
167 fl = rcu_dereference(sd->flow_limit);
168 if (fl)
169 flow_limit_count = fl->count;
170 rcu_read_unlock();
171#endif
172
173 /* the index is the CPU id owing this sd. Since offline CPUs are not
174 * displayed, it would be othrwise not trivial for the user-space
175 * mapping the data a specific CPU
176 */
177 seq_printf(seq,
178 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
179 "%08x %08x\n",
180 sd->processed, sd->dropped, sd->time_squeeze, 0,
181 0, 0, 0, 0, /* was fastroute */
182 0, /* was cpu_collision */
183 sd->received_rps, flow_limit_count,
184 input_qlen + process_qlen, (int)seq->index,
185 input_qlen, process_qlen);
186 return 0;
187}
188
189static const struct seq_operations dev_seq_ops = {
190 .start = dev_seq_start,
191 .next = dev_seq_next,
192 .stop = dev_seq_stop,
193 .show = dev_seq_show,
194};
195
196static const struct seq_operations softnet_seq_ops = {
197 .start = softnet_seq_start,
198 .next = softnet_seq_next,
199 .stop = softnet_seq_stop,
200 .show = softnet_seq_show,
201};
202
203static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
204{
205 struct list_head *ptype_list = NULL;
206 struct packet_type *pt = NULL;
207 struct net_device *dev;
208 loff_t i = 0;
209 int t;
210
211 for_each_netdev_rcu(seq_file_net(seq), dev) {
212 ptype_list = &dev->ptype_all;
213 list_for_each_entry_rcu(pt, ptype_list, list) {
214 if (i == pos)
215 return pt;
216 ++i;
217 }
218 }
219
220 list_for_each_entry_rcu(pt, &ptype_all, list) {
221 if (i == pos)
222 return pt;
223 ++i;
224 }
225
226 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
227 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
228 if (i == pos)
229 return pt;
230 ++i;
231 }
232 }
233 return NULL;
234}
235
236static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
237 __acquires(RCU)
238{
239 rcu_read_lock();
240 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
241}
242
243static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
244{
245 struct net_device *dev;
246 struct packet_type *pt;
247 struct list_head *nxt;
248 int hash;
249
250 ++*pos;
251 if (v == SEQ_START_TOKEN)
252 return ptype_get_idx(seq, 0);
253
254 pt = v;
255 nxt = pt->list.next;
256 if (pt->dev) {
257 if (nxt != &pt->dev->ptype_all)
258 goto found;
259
260 dev = pt->dev;
261 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
262 if (!list_empty(&dev->ptype_all)) {
263 nxt = dev->ptype_all.next;
264 goto found;
265 }
266 }
267
268 nxt = ptype_all.next;
269 goto ptype_all;
270 }
271
272 if (pt->type == htons(ETH_P_ALL)) {
273ptype_all:
274 if (nxt != &ptype_all)
275 goto found;
276 hash = 0;
277 nxt = ptype_base[0].next;
278 } else
279 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
280
281 while (nxt == &ptype_base[hash]) {
282 if (++hash >= PTYPE_HASH_SIZE)
283 return NULL;
284 nxt = ptype_base[hash].next;
285 }
286found:
287 return list_entry(nxt, struct packet_type, list);
288}
289
290static void ptype_seq_stop(struct seq_file *seq, void *v)
291 __releases(RCU)
292{
293 rcu_read_unlock();
294}
295
296static int ptype_seq_show(struct seq_file *seq, void *v)
297{
298 struct packet_type *pt = v;
299
300 if (v == SEQ_START_TOKEN)
301 seq_puts(seq, "Type Device Function\n");
302 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
303 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
304 if (pt->type == htons(ETH_P_ALL))
305 seq_puts(seq, "ALL ");
306 else
307 seq_printf(seq, "%04x", ntohs(pt->type));
308
309 seq_printf(seq, " %-8s %ps\n",
310 pt->dev ? pt->dev->name : "", pt->func);
311 }
312
313 return 0;
314}
315
316static const struct seq_operations ptype_seq_ops = {
317 .start = ptype_seq_start,
318 .next = ptype_seq_next,
319 .stop = ptype_seq_stop,
320 .show = ptype_seq_show,
321};
322
323static int __net_init dev_proc_net_init(struct net *net)
324{
325 int rc = -ENOMEM;
326
327 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
328 sizeof(struct seq_net_private)))
329 goto out;
330 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
331 &softnet_seq_ops))
332 goto out_dev;
333 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
334 sizeof(struct seq_net_private)))
335 goto out_softnet;
336
337 if (wext_proc_init(net))
338 goto out_ptype;
339 rc = 0;
340out:
341 return rc;
342out_ptype:
343 remove_proc_entry("ptype", net->proc_net);
344out_softnet:
345 remove_proc_entry("softnet_stat", net->proc_net);
346out_dev:
347 remove_proc_entry("dev", net->proc_net);
348 goto out;
349}
350
351static void __net_exit dev_proc_net_exit(struct net *net)
352{
353 wext_proc_exit(net);
354
355 remove_proc_entry("ptype", net->proc_net);
356 remove_proc_entry("softnet_stat", net->proc_net);
357 remove_proc_entry("dev", net->proc_net);
358}
359
360static struct pernet_operations __net_initdata dev_proc_ops = {
361 .init = dev_proc_net_init,
362 .exit = dev_proc_net_exit,
363};
364
365static int dev_mc_seq_show(struct seq_file *seq, void *v)
366{
367 struct netdev_hw_addr *ha;
368 struct net_device *dev = v;
369
370 if (v == SEQ_START_TOKEN)
371 return 0;
372
373 netif_addr_lock_bh(dev);
374 netdev_for_each_mc_addr(ha, dev) {
375 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
376 dev->ifindex, dev->name,
377 ha->refcount, ha->global_use,
378 (int)dev->addr_len, ha->addr);
379 }
380 netif_addr_unlock_bh(dev);
381 return 0;
382}
383
384static const struct seq_operations dev_mc_seq_ops = {
385 .start = dev_seq_start,
386 .next = dev_seq_next,
387 .stop = dev_seq_stop,
388 .show = dev_mc_seq_show,
389};
390
391static int __net_init dev_mc_net_init(struct net *net)
392{
393 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
394 sizeof(struct seq_net_private)))
395 return -ENOMEM;
396 return 0;
397}
398
399static void __net_exit dev_mc_net_exit(struct net *net)
400{
401 remove_proc_entry("dev_mcast", net->proc_net);
402}
403
404static struct pernet_operations __net_initdata dev_mc_net_ops = {
405 .init = dev_mc_net_init,
406 .exit = dev_mc_net_exit,
407};
408
409int __init dev_proc_init(void)
410{
411 int ret = register_pernet_subsys(&dev_proc_ops);
412 if (!ret)
413 return register_pernet_subsys(&dev_mc_net_ops);
414 return ret;
415}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/netdevice.h>
3#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <net/wext.h>
6#include <net/hotdata.h>
7
8#include "dev.h"
9
10static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
11{
12 unsigned long ifindex = *pos;
13 struct net_device *dev;
14
15 for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
16 *pos = dev->ifindex;
17 return dev;
18 }
19 return NULL;
20}
21
22static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
23 __acquires(RCU)
24{
25 rcu_read_lock();
26 if (!*pos)
27 return SEQ_START_TOKEN;
28
29 return dev_seq_from_index(seq, pos);
30}
31
32static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
33{
34 ++*pos;
35 return dev_seq_from_index(seq, pos);
36}
37
38static void dev_seq_stop(struct seq_file *seq, void *v)
39 __releases(RCU)
40{
41 rcu_read_unlock();
42}
43
44static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
45{
46 struct rtnl_link_stats64 temp;
47 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
48
49 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
50 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
51 dev->name, stats->rx_bytes, stats->rx_packets,
52 stats->rx_errors,
53 stats->rx_dropped + stats->rx_missed_errors,
54 stats->rx_fifo_errors,
55 stats->rx_length_errors + stats->rx_over_errors +
56 stats->rx_crc_errors + stats->rx_frame_errors,
57 stats->rx_compressed, stats->multicast,
58 stats->tx_bytes, stats->tx_packets,
59 stats->tx_errors, stats->tx_dropped,
60 stats->tx_fifo_errors, stats->collisions,
61 stats->tx_carrier_errors +
62 stats->tx_aborted_errors +
63 stats->tx_window_errors +
64 stats->tx_heartbeat_errors,
65 stats->tx_compressed);
66}
67
68/*
69 * Called from the PROCfs module. This now uses the new arbitrary sized
70 * /proc/net interface to create /proc/net/dev
71 */
72static int dev_seq_show(struct seq_file *seq, void *v)
73{
74 if (v == SEQ_START_TOKEN)
75 seq_puts(seq, "Inter-| Receive "
76 " | Transmit\n"
77 " face |bytes packets errs drop fifo frame "
78 "compressed multicast|bytes packets errs "
79 "drop fifo colls carrier compressed\n");
80 else
81 dev_seq_printf_stats(seq, v);
82 return 0;
83}
84
85static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
86{
87 return skb_queue_len_lockless(&sd->input_pkt_queue);
88}
89
90static u32 softnet_process_queue_len(struct softnet_data *sd)
91{
92 return skb_queue_len_lockless(&sd->process_queue);
93}
94
95static struct softnet_data *softnet_get_online(loff_t *pos)
96{
97 struct softnet_data *sd = NULL;
98
99 while (*pos < nr_cpu_ids)
100 if (cpu_online(*pos)) {
101 sd = &per_cpu(softnet_data, *pos);
102 break;
103 } else
104 ++*pos;
105 return sd;
106}
107
108static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
109{
110 return softnet_get_online(pos);
111}
112
113static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
114{
115 ++*pos;
116 return softnet_get_online(pos);
117}
118
119static void softnet_seq_stop(struct seq_file *seq, void *v)
120{
121}
122
123static int softnet_seq_show(struct seq_file *seq, void *v)
124{
125 struct softnet_data *sd = v;
126 u32 input_qlen = softnet_input_pkt_queue_len(sd);
127 u32 process_qlen = softnet_process_queue_len(sd);
128 unsigned int flow_limit_count = 0;
129
130#ifdef CONFIG_NET_FLOW_LIMIT
131 struct sd_flow_limit *fl;
132
133 rcu_read_lock();
134 fl = rcu_dereference(sd->flow_limit);
135 if (fl)
136 flow_limit_count = fl->count;
137 rcu_read_unlock();
138#endif
139
140 /* the index is the CPU id owing this sd. Since offline CPUs are not
141 * displayed, it would be othrwise not trivial for the user-space
142 * mapping the data a specific CPU
143 */
144 seq_printf(seq,
145 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
146 "%08x %08x\n",
147 sd->processed, atomic_read(&sd->dropped),
148 sd->time_squeeze, 0,
149 0, 0, 0, 0, /* was fastroute */
150 0, /* was cpu_collision */
151 sd->received_rps, flow_limit_count,
152 input_qlen + process_qlen, (int)seq->index,
153 input_qlen, process_qlen);
154 return 0;
155}
156
157static const struct seq_operations dev_seq_ops = {
158 .start = dev_seq_start,
159 .next = dev_seq_next,
160 .stop = dev_seq_stop,
161 .show = dev_seq_show,
162};
163
164static const struct seq_operations softnet_seq_ops = {
165 .start = softnet_seq_start,
166 .next = softnet_seq_next,
167 .stop = softnet_seq_stop,
168 .show = softnet_seq_show,
169};
170
171static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
172{
173 struct list_head *ptype_list = NULL;
174 struct packet_type *pt = NULL;
175 struct net_device *dev;
176 loff_t i = 0;
177 int t;
178
179 for_each_netdev_rcu(seq_file_net(seq), dev) {
180 ptype_list = &dev->ptype_all;
181 list_for_each_entry_rcu(pt, ptype_list, list) {
182 if (i == pos)
183 return pt;
184 ++i;
185 }
186 }
187
188 list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
189 if (i == pos)
190 return pt;
191 ++i;
192 }
193
194 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
195 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
196 if (i == pos)
197 return pt;
198 ++i;
199 }
200 }
201 return NULL;
202}
203
204static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
205 __acquires(RCU)
206{
207 rcu_read_lock();
208 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
209}
210
211static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
212{
213 struct net_device *dev;
214 struct packet_type *pt;
215 struct list_head *nxt;
216 int hash;
217
218 ++*pos;
219 if (v == SEQ_START_TOKEN)
220 return ptype_get_idx(seq, 0);
221
222 pt = v;
223 nxt = pt->list.next;
224 if (pt->dev) {
225 if (nxt != &pt->dev->ptype_all)
226 goto found;
227
228 dev = pt->dev;
229 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
230 if (!list_empty(&dev->ptype_all)) {
231 nxt = dev->ptype_all.next;
232 goto found;
233 }
234 }
235
236 nxt = net_hotdata.ptype_all.next;
237 goto ptype_all;
238 }
239
240 if (pt->type == htons(ETH_P_ALL)) {
241ptype_all:
242 if (nxt != &net_hotdata.ptype_all)
243 goto found;
244 hash = 0;
245 nxt = ptype_base[0].next;
246 } else
247 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
248
249 while (nxt == &ptype_base[hash]) {
250 if (++hash >= PTYPE_HASH_SIZE)
251 return NULL;
252 nxt = ptype_base[hash].next;
253 }
254found:
255 return list_entry(nxt, struct packet_type, list);
256}
257
258static void ptype_seq_stop(struct seq_file *seq, void *v)
259 __releases(RCU)
260{
261 rcu_read_unlock();
262}
263
264static int ptype_seq_show(struct seq_file *seq, void *v)
265{
266 struct packet_type *pt = v;
267
268 if (v == SEQ_START_TOKEN)
269 seq_puts(seq, "Type Device Function\n");
270 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
271 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
272 if (pt->type == htons(ETH_P_ALL))
273 seq_puts(seq, "ALL ");
274 else
275 seq_printf(seq, "%04x", ntohs(pt->type));
276
277 seq_printf(seq, " %-8s %ps\n",
278 pt->dev ? pt->dev->name : "", pt->func);
279 }
280
281 return 0;
282}
283
284static const struct seq_operations ptype_seq_ops = {
285 .start = ptype_seq_start,
286 .next = ptype_seq_next,
287 .stop = ptype_seq_stop,
288 .show = ptype_seq_show,
289};
290
291static int __net_init dev_proc_net_init(struct net *net)
292{
293 int rc = -ENOMEM;
294
295 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
296 sizeof(struct seq_net_private)))
297 goto out;
298 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
299 &softnet_seq_ops))
300 goto out_dev;
301 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
302 sizeof(struct seq_net_private)))
303 goto out_softnet;
304
305 if (wext_proc_init(net))
306 goto out_ptype;
307 rc = 0;
308out:
309 return rc;
310out_ptype:
311 remove_proc_entry("ptype", net->proc_net);
312out_softnet:
313 remove_proc_entry("softnet_stat", net->proc_net);
314out_dev:
315 remove_proc_entry("dev", net->proc_net);
316 goto out;
317}
318
319static void __net_exit dev_proc_net_exit(struct net *net)
320{
321 wext_proc_exit(net);
322
323 remove_proc_entry("ptype", net->proc_net);
324 remove_proc_entry("softnet_stat", net->proc_net);
325 remove_proc_entry("dev", net->proc_net);
326}
327
328static struct pernet_operations __net_initdata dev_proc_ops = {
329 .init = dev_proc_net_init,
330 .exit = dev_proc_net_exit,
331};
332
333static int dev_mc_seq_show(struct seq_file *seq, void *v)
334{
335 struct netdev_hw_addr *ha;
336 struct net_device *dev = v;
337
338 if (v == SEQ_START_TOKEN)
339 return 0;
340
341 netif_addr_lock_bh(dev);
342 netdev_for_each_mc_addr(ha, dev) {
343 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
344 dev->ifindex, dev->name,
345 ha->refcount, ha->global_use,
346 (int)dev->addr_len, ha->addr);
347 }
348 netif_addr_unlock_bh(dev);
349 return 0;
350}
351
352static const struct seq_operations dev_mc_seq_ops = {
353 .start = dev_seq_start,
354 .next = dev_seq_next,
355 .stop = dev_seq_stop,
356 .show = dev_mc_seq_show,
357};
358
359static int __net_init dev_mc_net_init(struct net *net)
360{
361 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
362 sizeof(struct seq_net_private)))
363 return -ENOMEM;
364 return 0;
365}
366
367static void __net_exit dev_mc_net_exit(struct net *net)
368{
369 remove_proc_entry("dev_mcast", net->proc_net);
370}
371
372static struct pernet_operations __net_initdata dev_mc_net_ops = {
373 .init = dev_mc_net_init,
374 .exit = dev_mc_net_exit,
375};
376
377int __init dev_proc_init(void)
378{
379 int ret = register_pernet_subsys(&dev_proc_ops);
380 if (!ret)
381 return register_pernet_subsys(&dev_mc_net_ops);
382 return ret;
383}