Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/netdevice.h>
3#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <net/wext.h>
6
7#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
8
9#define get_bucket(x) ((x) >> BUCKET_SPACE)
10#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
11#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
12
13static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
14{
15 struct net *net = seq_file_net(seq);
16 struct net_device *dev;
17 struct hlist_head *h;
18 unsigned int count = 0, offset = get_offset(*pos);
19
20 h = &net->dev_index_head[get_bucket(*pos)];
21 hlist_for_each_entry_rcu(dev, h, index_hlist) {
22 if (++count == offset)
23 return dev;
24 }
25
26 return NULL;
27}
28
29static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
30{
31 struct net_device *dev;
32 unsigned int bucket;
33
34 do {
35 dev = dev_from_same_bucket(seq, pos);
36 if (dev)
37 return dev;
38
39 bucket = get_bucket(*pos) + 1;
40 *pos = set_bucket_offset(bucket, 1);
41 } while (bucket < NETDEV_HASHENTRIES);
42
43 return NULL;
44}
45
46/*
47 * This is invoked by the /proc filesystem handler to display a device
48 * in detail.
49 */
50static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
51 __acquires(RCU)
52{
53 rcu_read_lock();
54 if (!*pos)
55 return SEQ_START_TOKEN;
56
57 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
58 return NULL;
59
60 return dev_from_bucket(seq, pos);
61}
62
63static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
64{
65 ++*pos;
66 return dev_from_bucket(seq, pos);
67}
68
69static void dev_seq_stop(struct seq_file *seq, void *v)
70 __releases(RCU)
71{
72 rcu_read_unlock();
73}
74
75static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
76{
77 struct rtnl_link_stats64 temp;
78 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
79
80 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
81 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
82 dev->name, stats->rx_bytes, stats->rx_packets,
83 stats->rx_errors,
84 stats->rx_dropped + stats->rx_missed_errors,
85 stats->rx_fifo_errors,
86 stats->rx_length_errors + stats->rx_over_errors +
87 stats->rx_crc_errors + stats->rx_frame_errors,
88 stats->rx_compressed, stats->multicast,
89 stats->tx_bytes, stats->tx_packets,
90 stats->tx_errors, stats->tx_dropped,
91 stats->tx_fifo_errors, stats->collisions,
92 stats->tx_carrier_errors +
93 stats->tx_aborted_errors +
94 stats->tx_window_errors +
95 stats->tx_heartbeat_errors,
96 stats->tx_compressed);
97}
98
99/*
100 * Called from the PROCfs module. This now uses the new arbitrary sized
101 * /proc/net interface to create /proc/net/dev
102 */
103static int dev_seq_show(struct seq_file *seq, void *v)
104{
105 if (v == SEQ_START_TOKEN)
106 seq_puts(seq, "Inter-| Receive "
107 " | Transmit\n"
108 " face |bytes packets errs drop fifo frame "
109 "compressed multicast|bytes packets errs "
110 "drop fifo colls carrier compressed\n");
111 else
112 dev_seq_printf_stats(seq, v);
113 return 0;
114}
115
116static u32 softnet_backlog_len(struct softnet_data *sd)
117{
118 return skb_queue_len_lockless(&sd->input_pkt_queue) +
119 skb_queue_len_lockless(&sd->process_queue);
120}
121
122static struct softnet_data *softnet_get_online(loff_t *pos)
123{
124 struct softnet_data *sd = NULL;
125
126 while (*pos < nr_cpu_ids)
127 if (cpu_online(*pos)) {
128 sd = &per_cpu(softnet_data, *pos);
129 break;
130 } else
131 ++*pos;
132 return sd;
133}
134
135static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
136{
137 return softnet_get_online(pos);
138}
139
140static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
141{
142 ++*pos;
143 return softnet_get_online(pos);
144}
145
146static void softnet_seq_stop(struct seq_file *seq, void *v)
147{
148}
149
150static int softnet_seq_show(struct seq_file *seq, void *v)
151{
152 struct softnet_data *sd = v;
153 unsigned int flow_limit_count = 0;
154
155#ifdef CONFIG_NET_FLOW_LIMIT
156 struct sd_flow_limit *fl;
157
158 rcu_read_lock();
159 fl = rcu_dereference(sd->flow_limit);
160 if (fl)
161 flow_limit_count = fl->count;
162 rcu_read_unlock();
163#endif
164
165 /* the index is the CPU id owing this sd. Since offline CPUs are not
166 * displayed, it would be othrwise not trivial for the user-space
167 * mapping the data a specific CPU
168 */
169 seq_printf(seq,
170 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
171 sd->processed, sd->dropped, sd->time_squeeze, 0,
172 0, 0, 0, 0, /* was fastroute */
173 0, /* was cpu_collision */
174 sd->received_rps, flow_limit_count,
175 softnet_backlog_len(sd), (int)seq->index);
176 return 0;
177}
178
179static const struct seq_operations dev_seq_ops = {
180 .start = dev_seq_start,
181 .next = dev_seq_next,
182 .stop = dev_seq_stop,
183 .show = dev_seq_show,
184};
185
186static const struct seq_operations softnet_seq_ops = {
187 .start = softnet_seq_start,
188 .next = softnet_seq_next,
189 .stop = softnet_seq_stop,
190 .show = softnet_seq_show,
191};
192
193static void *ptype_get_idx(loff_t pos)
194{
195 struct packet_type *pt = NULL;
196 loff_t i = 0;
197 int t;
198
199 list_for_each_entry_rcu(pt, &ptype_all, list) {
200 if (i == pos)
201 return pt;
202 ++i;
203 }
204
205 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
206 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
207 if (i == pos)
208 return pt;
209 ++i;
210 }
211 }
212 return NULL;
213}
214
215static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
216 __acquires(RCU)
217{
218 rcu_read_lock();
219 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
220}
221
222static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
223{
224 struct packet_type *pt;
225 struct list_head *nxt;
226 int hash;
227
228 ++*pos;
229 if (v == SEQ_START_TOKEN)
230 return ptype_get_idx(0);
231
232 pt = v;
233 nxt = pt->list.next;
234 if (pt->type == htons(ETH_P_ALL)) {
235 if (nxt != &ptype_all)
236 goto found;
237 hash = 0;
238 nxt = ptype_base[0].next;
239 } else
240 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
241
242 while (nxt == &ptype_base[hash]) {
243 if (++hash >= PTYPE_HASH_SIZE)
244 return NULL;
245 nxt = ptype_base[hash].next;
246 }
247found:
248 return list_entry(nxt, struct packet_type, list);
249}
250
251static void ptype_seq_stop(struct seq_file *seq, void *v)
252 __releases(RCU)
253{
254 rcu_read_unlock();
255}
256
257static int ptype_seq_show(struct seq_file *seq, void *v)
258{
259 struct packet_type *pt = v;
260
261 if (v == SEQ_START_TOKEN)
262 seq_puts(seq, "Type Device Function\n");
263 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
264 if (pt->type == htons(ETH_P_ALL))
265 seq_puts(seq, "ALL ");
266 else
267 seq_printf(seq, "%04x", ntohs(pt->type));
268
269 seq_printf(seq, " %-8s %ps\n",
270 pt->dev ? pt->dev->name : "", pt->func);
271 }
272
273 return 0;
274}
275
276static const struct seq_operations ptype_seq_ops = {
277 .start = ptype_seq_start,
278 .next = ptype_seq_next,
279 .stop = ptype_seq_stop,
280 .show = ptype_seq_show,
281};
282
283static int __net_init dev_proc_net_init(struct net *net)
284{
285 int rc = -ENOMEM;
286
287 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
288 sizeof(struct seq_net_private)))
289 goto out;
290 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
291 &softnet_seq_ops))
292 goto out_dev;
293 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
294 sizeof(struct seq_net_private)))
295 goto out_softnet;
296
297 if (wext_proc_init(net))
298 goto out_ptype;
299 rc = 0;
300out:
301 return rc;
302out_ptype:
303 remove_proc_entry("ptype", net->proc_net);
304out_softnet:
305 remove_proc_entry("softnet_stat", net->proc_net);
306out_dev:
307 remove_proc_entry("dev", net->proc_net);
308 goto out;
309}
310
311static void __net_exit dev_proc_net_exit(struct net *net)
312{
313 wext_proc_exit(net);
314
315 remove_proc_entry("ptype", net->proc_net);
316 remove_proc_entry("softnet_stat", net->proc_net);
317 remove_proc_entry("dev", net->proc_net);
318}
319
320static struct pernet_operations __net_initdata dev_proc_ops = {
321 .init = dev_proc_net_init,
322 .exit = dev_proc_net_exit,
323};
324
325static int dev_mc_seq_show(struct seq_file *seq, void *v)
326{
327 struct netdev_hw_addr *ha;
328 struct net_device *dev = v;
329
330 if (v == SEQ_START_TOKEN)
331 return 0;
332
333 netif_addr_lock_bh(dev);
334 netdev_for_each_mc_addr(ha, dev) {
335 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
336 dev->ifindex, dev->name,
337 ha->refcount, ha->global_use,
338 (int)dev->addr_len, ha->addr);
339 }
340 netif_addr_unlock_bh(dev);
341 return 0;
342}
343
344static const struct seq_operations dev_mc_seq_ops = {
345 .start = dev_seq_start,
346 .next = dev_seq_next,
347 .stop = dev_seq_stop,
348 .show = dev_mc_seq_show,
349};
350
351static int __net_init dev_mc_net_init(struct net *net)
352{
353 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
354 sizeof(struct seq_net_private)))
355 return -ENOMEM;
356 return 0;
357}
358
359static void __net_exit dev_mc_net_exit(struct net *net)
360{
361 remove_proc_entry("dev_mcast", net->proc_net);
362}
363
364static struct pernet_operations __net_initdata dev_mc_net_ops = {
365 .init = dev_mc_net_init,
366 .exit = dev_mc_net_exit,
367};
368
369int __init dev_proc_init(void)
370{
371 int ret = register_pernet_subsys(&dev_proc_ops);
372 if (!ret)
373 return register_pernet_subsys(&dev_mc_net_ops);
374 return ret;
375}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/netdevice.h>
3#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <net/wext.h>
6
7#include "dev.h"
8
9#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
10
11#define get_bucket(x) ((x) >> BUCKET_SPACE)
12#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
13#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
14
15static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
16{
17 struct net *net = seq_file_net(seq);
18 struct net_device *dev;
19 struct hlist_head *h;
20 unsigned int count = 0, offset = get_offset(*pos);
21
22 h = &net->dev_index_head[get_bucket(*pos)];
23 hlist_for_each_entry_rcu(dev, h, index_hlist) {
24 if (++count == offset)
25 return dev;
26 }
27
28 return NULL;
29}
30
31static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
32{
33 struct net_device *dev;
34 unsigned int bucket;
35
36 do {
37 dev = dev_from_same_bucket(seq, pos);
38 if (dev)
39 return dev;
40
41 bucket = get_bucket(*pos) + 1;
42 *pos = set_bucket_offset(bucket, 1);
43 } while (bucket < NETDEV_HASHENTRIES);
44
45 return NULL;
46}
47
48/*
49 * This is invoked by the /proc filesystem handler to display a device
50 * in detail.
51 */
52static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
53 __acquires(RCU)
54{
55 rcu_read_lock();
56 if (!*pos)
57 return SEQ_START_TOKEN;
58
59 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
60 return NULL;
61
62 return dev_from_bucket(seq, pos);
63}
64
65static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
66{
67 ++*pos;
68 return dev_from_bucket(seq, pos);
69}
70
71static void dev_seq_stop(struct seq_file *seq, void *v)
72 __releases(RCU)
73{
74 rcu_read_unlock();
75}
76
77static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
78{
79 struct rtnl_link_stats64 temp;
80 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
81
82 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
83 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
84 dev->name, stats->rx_bytes, stats->rx_packets,
85 stats->rx_errors,
86 stats->rx_dropped + stats->rx_missed_errors,
87 stats->rx_fifo_errors,
88 stats->rx_length_errors + stats->rx_over_errors +
89 stats->rx_crc_errors + stats->rx_frame_errors,
90 stats->rx_compressed, stats->multicast,
91 stats->tx_bytes, stats->tx_packets,
92 stats->tx_errors, stats->tx_dropped,
93 stats->tx_fifo_errors, stats->collisions,
94 stats->tx_carrier_errors +
95 stats->tx_aborted_errors +
96 stats->tx_window_errors +
97 stats->tx_heartbeat_errors,
98 stats->tx_compressed);
99}
100
101/*
102 * Called from the PROCfs module. This now uses the new arbitrary sized
103 * /proc/net interface to create /proc/net/dev
104 */
105static int dev_seq_show(struct seq_file *seq, void *v)
106{
107 if (v == SEQ_START_TOKEN)
108 seq_puts(seq, "Inter-| Receive "
109 " | Transmit\n"
110 " face |bytes packets errs drop fifo frame "
111 "compressed multicast|bytes packets errs "
112 "drop fifo colls carrier compressed\n");
113 else
114 dev_seq_printf_stats(seq, v);
115 return 0;
116}
117
118static u32 softnet_backlog_len(struct softnet_data *sd)
119{
120 return skb_queue_len_lockless(&sd->input_pkt_queue) +
121 skb_queue_len_lockless(&sd->process_queue);
122}
123
124static struct softnet_data *softnet_get_online(loff_t *pos)
125{
126 struct softnet_data *sd = NULL;
127
128 while (*pos < nr_cpu_ids)
129 if (cpu_online(*pos)) {
130 sd = &per_cpu(softnet_data, *pos);
131 break;
132 } else
133 ++*pos;
134 return sd;
135}
136
137static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
138{
139 return softnet_get_online(pos);
140}
141
142static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
143{
144 ++*pos;
145 return softnet_get_online(pos);
146}
147
148static void softnet_seq_stop(struct seq_file *seq, void *v)
149{
150}
151
152static int softnet_seq_show(struct seq_file *seq, void *v)
153{
154 struct softnet_data *sd = v;
155 unsigned int flow_limit_count = 0;
156
157#ifdef CONFIG_NET_FLOW_LIMIT
158 struct sd_flow_limit *fl;
159
160 rcu_read_lock();
161 fl = rcu_dereference(sd->flow_limit);
162 if (fl)
163 flow_limit_count = fl->count;
164 rcu_read_unlock();
165#endif
166
167 /* the index is the CPU id owing this sd. Since offline CPUs are not
168 * displayed, it would be othrwise not trivial for the user-space
169 * mapping the data a specific CPU
170 */
171 seq_printf(seq,
172 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
173 sd->processed, sd->dropped, sd->time_squeeze, 0,
174 0, 0, 0, 0, /* was fastroute */
175 0, /* was cpu_collision */
176 sd->received_rps, flow_limit_count,
177 softnet_backlog_len(sd), (int)seq->index);
178 return 0;
179}
180
181static const struct seq_operations dev_seq_ops = {
182 .start = dev_seq_start,
183 .next = dev_seq_next,
184 .stop = dev_seq_stop,
185 .show = dev_seq_show,
186};
187
188static const struct seq_operations softnet_seq_ops = {
189 .start = softnet_seq_start,
190 .next = softnet_seq_next,
191 .stop = softnet_seq_stop,
192 .show = softnet_seq_show,
193};
194
195static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
196{
197 struct list_head *ptype_list = NULL;
198 struct packet_type *pt = NULL;
199 struct net_device *dev;
200 loff_t i = 0;
201 int t;
202
203 for_each_netdev_rcu(seq_file_net(seq), dev) {
204 ptype_list = &dev->ptype_all;
205 list_for_each_entry_rcu(pt, ptype_list, list) {
206 if (i == pos)
207 return pt;
208 ++i;
209 }
210 }
211
212 list_for_each_entry_rcu(pt, &ptype_all, list) {
213 if (i == pos)
214 return pt;
215 ++i;
216 }
217
218 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
219 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
220 if (i == pos)
221 return pt;
222 ++i;
223 }
224 }
225 return NULL;
226}
227
228static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
229 __acquires(RCU)
230{
231 rcu_read_lock();
232 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
233}
234
235static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
236{
237 struct net_device *dev;
238 struct packet_type *pt;
239 struct list_head *nxt;
240 int hash;
241
242 ++*pos;
243 if (v == SEQ_START_TOKEN)
244 return ptype_get_idx(seq, 0);
245
246 pt = v;
247 nxt = pt->list.next;
248 if (pt->dev) {
249 if (nxt != &pt->dev->ptype_all)
250 goto found;
251
252 dev = pt->dev;
253 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
254 if (!list_empty(&dev->ptype_all)) {
255 nxt = dev->ptype_all.next;
256 goto found;
257 }
258 }
259
260 nxt = ptype_all.next;
261 goto ptype_all;
262 }
263
264 if (pt->type == htons(ETH_P_ALL)) {
265ptype_all:
266 if (nxt != &ptype_all)
267 goto found;
268 hash = 0;
269 nxt = ptype_base[0].next;
270 } else
271 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
272
273 while (nxt == &ptype_base[hash]) {
274 if (++hash >= PTYPE_HASH_SIZE)
275 return NULL;
276 nxt = ptype_base[hash].next;
277 }
278found:
279 return list_entry(nxt, struct packet_type, list);
280}
281
282static void ptype_seq_stop(struct seq_file *seq, void *v)
283 __releases(RCU)
284{
285 rcu_read_unlock();
286}
287
288static int ptype_seq_show(struct seq_file *seq, void *v)
289{
290 struct packet_type *pt = v;
291
292 if (v == SEQ_START_TOKEN)
293 seq_puts(seq, "Type Device Function\n");
294 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
295 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
296 if (pt->type == htons(ETH_P_ALL))
297 seq_puts(seq, "ALL ");
298 else
299 seq_printf(seq, "%04x", ntohs(pt->type));
300
301 seq_printf(seq, " %-8s %ps\n",
302 pt->dev ? pt->dev->name : "", pt->func);
303 }
304
305 return 0;
306}
307
308static const struct seq_operations ptype_seq_ops = {
309 .start = ptype_seq_start,
310 .next = ptype_seq_next,
311 .stop = ptype_seq_stop,
312 .show = ptype_seq_show,
313};
314
315static int __net_init dev_proc_net_init(struct net *net)
316{
317 int rc = -ENOMEM;
318
319 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
320 sizeof(struct seq_net_private)))
321 goto out;
322 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
323 &softnet_seq_ops))
324 goto out_dev;
325 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
326 sizeof(struct seq_net_private)))
327 goto out_softnet;
328
329 if (wext_proc_init(net))
330 goto out_ptype;
331 rc = 0;
332out:
333 return rc;
334out_ptype:
335 remove_proc_entry("ptype", net->proc_net);
336out_softnet:
337 remove_proc_entry("softnet_stat", net->proc_net);
338out_dev:
339 remove_proc_entry("dev", net->proc_net);
340 goto out;
341}
342
343static void __net_exit dev_proc_net_exit(struct net *net)
344{
345 wext_proc_exit(net);
346
347 remove_proc_entry("ptype", net->proc_net);
348 remove_proc_entry("softnet_stat", net->proc_net);
349 remove_proc_entry("dev", net->proc_net);
350}
351
352static struct pernet_operations __net_initdata dev_proc_ops = {
353 .init = dev_proc_net_init,
354 .exit = dev_proc_net_exit,
355};
356
357static int dev_mc_seq_show(struct seq_file *seq, void *v)
358{
359 struct netdev_hw_addr *ha;
360 struct net_device *dev = v;
361
362 if (v == SEQ_START_TOKEN)
363 return 0;
364
365 netif_addr_lock_bh(dev);
366 netdev_for_each_mc_addr(ha, dev) {
367 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
368 dev->ifindex, dev->name,
369 ha->refcount, ha->global_use,
370 (int)dev->addr_len, ha->addr);
371 }
372 netif_addr_unlock_bh(dev);
373 return 0;
374}
375
376static const struct seq_operations dev_mc_seq_ops = {
377 .start = dev_seq_start,
378 .next = dev_seq_next,
379 .stop = dev_seq_stop,
380 .show = dev_mc_seq_show,
381};
382
383static int __net_init dev_mc_net_init(struct net *net)
384{
385 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
386 sizeof(struct seq_net_private)))
387 return -ENOMEM;
388 return 0;
389}
390
391static void __net_exit dev_mc_net_exit(struct net *net)
392{
393 remove_proc_entry("dev_mcast", net->proc_net);
394}
395
396static struct pernet_operations __net_initdata dev_mc_net_ops = {
397 .init = dev_mc_net_init,
398 .exit = dev_mc_net_exit,
399};
400
401int __init dev_proc_init(void)
402{
403 int ret = register_pernet_subsys(&dev_proc_ops);
404 if (!ret)
405 return register_pernet_subsys(&dev_mc_net_ops);
406 return ret;
407}