Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/netdevice.h>
  2#include <linux/proc_fs.h>
  3#include <linux/seq_file.h>
  4#include <net/wext.h>
  5
 
 
  6#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
  7
  8#define get_bucket(x) ((x) >> BUCKET_SPACE)
  9#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
 10#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
 11
 12extern struct list_head ptype_all __read_mostly;
 13extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 14
 15static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
 16{
 17	struct net *net = seq_file_net(seq);
 18	struct net_device *dev;
 19	struct hlist_head *h;
 20	unsigned int count = 0, offset = get_offset(*pos);
 21
 22	h = &net->dev_name_head[get_bucket(*pos)];
 23	hlist_for_each_entry_rcu(dev, h, name_hlist) {
 24		if (++count == offset)
 25			return dev;
 26	}
 27
 28	return NULL;
 29}
 30
 31static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
 32{
 33	struct net_device *dev;
 34	unsigned int bucket;
 35
 36	do {
 37		dev = dev_from_same_bucket(seq, pos);
 38		if (dev)
 39			return dev;
 40
 41		bucket = get_bucket(*pos) + 1;
 42		*pos = set_bucket_offset(bucket, 1);
 43	} while (bucket < NETDEV_HASHENTRIES);
 44
 45	return NULL;
 46}
 47
 48/*
 49 *	This is invoked by the /proc filesystem handler to display a device
 50 *	in detail.
 51 */
 52static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
 53	__acquires(RCU)
 54{
 55	rcu_read_lock();
 56	if (!*pos)
 57		return SEQ_START_TOKEN;
 58
 59	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
 60		return NULL;
 61
 62	return dev_from_bucket(seq, pos);
 63}
 64
 65static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 66{
 67	++*pos;
 68	return dev_from_bucket(seq, pos);
 69}
 70
 71static void dev_seq_stop(struct seq_file *seq, void *v)
 72	__releases(RCU)
 73{
 74	rcu_read_unlock();
 75}
 76
 77static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 78{
 79	struct rtnl_link_stats64 temp;
 80	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 81
 82	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
 83		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
 84		   dev->name, stats->rx_bytes, stats->rx_packets,
 85		   stats->rx_errors,
 86		   stats->rx_dropped + stats->rx_missed_errors,
 87		   stats->rx_fifo_errors,
 88		   stats->rx_length_errors + stats->rx_over_errors +
 89		    stats->rx_crc_errors + stats->rx_frame_errors,
 90		   stats->rx_compressed, stats->multicast,
 91		   stats->tx_bytes, stats->tx_packets,
 92		   stats->tx_errors, stats->tx_dropped,
 93		   stats->tx_fifo_errors, stats->collisions,
 94		   stats->tx_carrier_errors +
 95		    stats->tx_aborted_errors +
 96		    stats->tx_window_errors +
 97		    stats->tx_heartbeat_errors,
 98		   stats->tx_compressed);
 99}
100
101/*
102 *	Called from the PROCfs module. This now uses the new arbitrary sized
103 *	/proc/net interface to create /proc/net/dev
104 */
105static int dev_seq_show(struct seq_file *seq, void *v)
106{
107	if (v == SEQ_START_TOKEN)
108		seq_puts(seq, "Inter-|   Receive                            "
109			      "                    |  Transmit\n"
110			      " face |bytes    packets errs drop fifo frame "
111			      "compressed multicast|bytes    packets errs "
112			      "drop fifo colls carrier compressed\n");
113	else
114		dev_seq_printf_stats(seq, v);
115	return 0;
116}
117
 
 
 
 
 
 
 
 
 
 
118static struct softnet_data *softnet_get_online(loff_t *pos)
119{
120	struct softnet_data *sd = NULL;
121
122	while (*pos < nr_cpu_ids)
123		if (cpu_online(*pos)) {
124			sd = &per_cpu(softnet_data, *pos);
125			break;
126		} else
127			++*pos;
128	return sd;
129}
130
131static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
132{
133	return softnet_get_online(pos);
134}
135
136static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
137{
138	++*pos;
139	return softnet_get_online(pos);
140}
141
142static void softnet_seq_stop(struct seq_file *seq, void *v)
143{
144}
145
146static int softnet_seq_show(struct seq_file *seq, void *v)
147{
148	struct softnet_data *sd = v;
 
 
149	unsigned int flow_limit_count = 0;
150
151#ifdef CONFIG_NET_FLOW_LIMIT
152	struct sd_flow_limit *fl;
153
154	rcu_read_lock();
155	fl = rcu_dereference(sd->flow_limit);
156	if (fl)
157		flow_limit_count = fl->count;
158	rcu_read_unlock();
159#endif
160
 
 
 
 
161	seq_printf(seq,
162		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
 
163		   sd->processed, sd->dropped, sd->time_squeeze, 0,
164		   0, 0, 0, 0, /* was fastroute */
165		   sd->cpu_collision, sd->received_rps, flow_limit_count);
 
 
 
166	return 0;
167}
168
169static const struct seq_operations dev_seq_ops = {
170	.start = dev_seq_start,
171	.next  = dev_seq_next,
172	.stop  = dev_seq_stop,
173	.show  = dev_seq_show,
174};
175
176static int dev_seq_open(struct inode *inode, struct file *file)
177{
178	return seq_open_net(inode, file, &dev_seq_ops,
179			    sizeof(struct seq_net_private));
180}
181
182static const struct file_operations dev_seq_fops = {
183	.owner	 = THIS_MODULE,
184	.open    = dev_seq_open,
185	.read    = seq_read,
186	.llseek  = seq_lseek,
187	.release = seq_release_net,
188};
189
190static const struct seq_operations softnet_seq_ops = {
191	.start = softnet_seq_start,
192	.next  = softnet_seq_next,
193	.stop  = softnet_seq_stop,
194	.show  = softnet_seq_show,
195};
196
197static int softnet_seq_open(struct inode *inode, struct file *file)
198{
199	return seq_open(file, &softnet_seq_ops);
200}
201
202static const struct file_operations softnet_seq_fops = {
203	.owner	 = THIS_MODULE,
204	.open    = softnet_seq_open,
205	.read    = seq_read,
206	.llseek  = seq_lseek,
207	.release = seq_release,
208};
209
210static void *ptype_get_idx(loff_t pos)
211{
 
212	struct packet_type *pt = NULL;
 
213	loff_t i = 0;
214	int t;
215
 
 
 
 
 
 
 
 
 
216	list_for_each_entry_rcu(pt, &ptype_all, list) {
217		if (i == pos)
218			return pt;
219		++i;
220	}
221
222	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
223		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
224			if (i == pos)
225				return pt;
226			++i;
227		}
228	}
229	return NULL;
230}
231
232static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
233	__acquires(RCU)
234{
235	rcu_read_lock();
236	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
237}
238
239static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
240{
 
241	struct packet_type *pt;
242	struct list_head *nxt;
243	int hash;
244
245	++*pos;
246	if (v == SEQ_START_TOKEN)
247		return ptype_get_idx(0);
248
249	pt = v;
250	nxt = pt->list.next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251	if (pt->type == htons(ETH_P_ALL)) {
 
252		if (nxt != &ptype_all)
253			goto found;
254		hash = 0;
255		nxt = ptype_base[0].next;
256	} else
257		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
258
259	while (nxt == &ptype_base[hash]) {
260		if (++hash >= PTYPE_HASH_SIZE)
261			return NULL;
262		nxt = ptype_base[hash].next;
263	}
264found:
265	return list_entry(nxt, struct packet_type, list);
266}
267
268static void ptype_seq_stop(struct seq_file *seq, void *v)
269	__releases(RCU)
270{
271	rcu_read_unlock();
272}
273
274static int ptype_seq_show(struct seq_file *seq, void *v)
275{
276	struct packet_type *pt = v;
277
278	if (v == SEQ_START_TOKEN)
279		seq_puts(seq, "Type Device      Function\n");
280	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
 
281		if (pt->type == htons(ETH_P_ALL))
282			seq_puts(seq, "ALL ");
283		else
284			seq_printf(seq, "%04x", ntohs(pt->type));
285
286		seq_printf(seq, " %-8s %pf\n",
287			   pt->dev ? pt->dev->name : "", pt->func);
288	}
289
290	return 0;
291}
292
293static const struct seq_operations ptype_seq_ops = {
294	.start = ptype_seq_start,
295	.next  = ptype_seq_next,
296	.stop  = ptype_seq_stop,
297	.show  = ptype_seq_show,
298};
299
300static int ptype_seq_open(struct inode *inode, struct file *file)
301{
302	return seq_open_net(inode, file, &ptype_seq_ops,
303			sizeof(struct seq_net_private));
304}
305
306static const struct file_operations ptype_seq_fops = {
307	.owner	 = THIS_MODULE,
308	.open    = ptype_seq_open,
309	.read    = seq_read,
310	.llseek  = seq_lseek,
311	.release = seq_release_net,
312};
313
314
315static int __net_init dev_proc_net_init(struct net *net)
316{
317	int rc = -ENOMEM;
318
319	if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
 
320		goto out;
321	if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
322			 &softnet_seq_fops))
323		goto out_dev;
324	if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
 
325		goto out_softnet;
326
327	if (wext_proc_init(net))
328		goto out_ptype;
329	rc = 0;
330out:
331	return rc;
332out_ptype:
333	remove_proc_entry("ptype", net->proc_net);
334out_softnet:
335	remove_proc_entry("softnet_stat", net->proc_net);
336out_dev:
337	remove_proc_entry("dev", net->proc_net);
338	goto out;
339}
340
341static void __net_exit dev_proc_net_exit(struct net *net)
342{
343	wext_proc_exit(net);
344
345	remove_proc_entry("ptype", net->proc_net);
346	remove_proc_entry("softnet_stat", net->proc_net);
347	remove_proc_entry("dev", net->proc_net);
348}
349
350static struct pernet_operations __net_initdata dev_proc_ops = {
351	.init = dev_proc_net_init,
352	.exit = dev_proc_net_exit,
353};
354
355static int dev_mc_seq_show(struct seq_file *seq, void *v)
356{
357	struct netdev_hw_addr *ha;
358	struct net_device *dev = v;
359
360	if (v == SEQ_START_TOKEN)
361		return 0;
362
363	netif_addr_lock_bh(dev);
364	netdev_for_each_mc_addr(ha, dev) {
365		int i;
366
367		seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
368			   dev->name, ha->refcount, ha->global_use);
369
370		for (i = 0; i < dev->addr_len; i++)
371			seq_printf(seq, "%02x", ha->addr[i]);
372
373		seq_putc(seq, '\n');
374	}
375	netif_addr_unlock_bh(dev);
376	return 0;
377}
378
379static const struct seq_operations dev_mc_seq_ops = {
380	.start = dev_seq_start,
381	.next  = dev_seq_next,
382	.stop  = dev_seq_stop,
383	.show  = dev_mc_seq_show,
384};
385
386static int dev_mc_seq_open(struct inode *inode, struct file *file)
387{
388	return seq_open_net(inode, file, &dev_mc_seq_ops,
389			    sizeof(struct seq_net_private));
390}
391
392static const struct file_operations dev_mc_seq_fops = {
393	.owner	 = THIS_MODULE,
394	.open    = dev_mc_seq_open,
395	.read    = seq_read,
396	.llseek  = seq_lseek,
397	.release = seq_release_net,
398};
399
400static int __net_init dev_mc_net_init(struct net *net)
401{
402	if (!proc_create("dev_mcast", 0, net->proc_net, &dev_mc_seq_fops))
 
403		return -ENOMEM;
404	return 0;
405}
406
407static void __net_exit dev_mc_net_exit(struct net *net)
408{
409	remove_proc_entry("dev_mcast", net->proc_net);
410}
411
412static struct pernet_operations __net_initdata dev_mc_net_ops = {
413	.init = dev_mc_net_init,
414	.exit = dev_mc_net_exit,
415};
416
417int __init dev_proc_init(void)
418{
419	int ret = register_pernet_subsys(&dev_proc_ops);
420	if (!ret)
421		return register_pernet_subsys(&dev_mc_net_ops);
422	return ret;
423}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/netdevice.h>
  3#include <linux/proc_fs.h>
  4#include <linux/seq_file.h>
  5#include <net/wext.h>
  6
  7#include "dev.h"
  8
  9#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
 10
 11#define get_bucket(x) ((x) >> BUCKET_SPACE)
 12#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
 13#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
 14
 
 
 
 15static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
 16{
 17	struct net *net = seq_file_net(seq);
 18	struct net_device *dev;
 19	struct hlist_head *h;
 20	unsigned int count = 0, offset = get_offset(*pos);
 21
 22	h = &net->dev_index_head[get_bucket(*pos)];
 23	hlist_for_each_entry_rcu(dev, h, index_hlist) {
 24		if (++count == offset)
 25			return dev;
 26	}
 27
 28	return NULL;
 29}
 30
 31static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
 32{
 33	struct net_device *dev;
 34	unsigned int bucket;
 35
 36	do {
 37		dev = dev_from_same_bucket(seq, pos);
 38		if (dev)
 39			return dev;
 40
 41		bucket = get_bucket(*pos) + 1;
 42		*pos = set_bucket_offset(bucket, 1);
 43	} while (bucket < NETDEV_HASHENTRIES);
 44
 45	return NULL;
 46}
 47
 48/*
 49 *	This is invoked by the /proc filesystem handler to display a device
 50 *	in detail.
 51 */
 52static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
 53	__acquires(RCU)
 54{
 55	rcu_read_lock();
 56	if (!*pos)
 57		return SEQ_START_TOKEN;
 58
 59	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
 60		return NULL;
 61
 62	return dev_from_bucket(seq, pos);
 63}
 64
 65static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 66{
 67	++*pos;
 68	return dev_from_bucket(seq, pos);
 69}
 70
 71static void dev_seq_stop(struct seq_file *seq, void *v)
 72	__releases(RCU)
 73{
 74	rcu_read_unlock();
 75}
 76
 77static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 78{
 79	struct rtnl_link_stats64 temp;
 80	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 81
 82	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
 83		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
 84		   dev->name, stats->rx_bytes, stats->rx_packets,
 85		   stats->rx_errors,
 86		   stats->rx_dropped + stats->rx_missed_errors,
 87		   stats->rx_fifo_errors,
 88		   stats->rx_length_errors + stats->rx_over_errors +
 89		    stats->rx_crc_errors + stats->rx_frame_errors,
 90		   stats->rx_compressed, stats->multicast,
 91		   stats->tx_bytes, stats->tx_packets,
 92		   stats->tx_errors, stats->tx_dropped,
 93		   stats->tx_fifo_errors, stats->collisions,
 94		   stats->tx_carrier_errors +
 95		    stats->tx_aborted_errors +
 96		    stats->tx_window_errors +
 97		    stats->tx_heartbeat_errors,
 98		   stats->tx_compressed);
 99}
100
101/*
102 *	Called from the PROCfs module. This now uses the new arbitrary sized
103 *	/proc/net interface to create /proc/net/dev
104 */
105static int dev_seq_show(struct seq_file *seq, void *v)
106{
107	if (v == SEQ_START_TOKEN)
108		seq_puts(seq, "Inter-|   Receive                            "
109			      "                    |  Transmit\n"
110			      " face |bytes    packets errs drop fifo frame "
111			      "compressed multicast|bytes    packets errs "
112			      "drop fifo colls carrier compressed\n");
113	else
114		dev_seq_printf_stats(seq, v);
115	return 0;
116}
117
118static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
119{
120	return skb_queue_len_lockless(&sd->input_pkt_queue);
121}
122
123static u32 softnet_process_queue_len(struct softnet_data *sd)
124{
125	return skb_queue_len_lockless(&sd->process_queue);
126}
127
128static struct softnet_data *softnet_get_online(loff_t *pos)
129{
130	struct softnet_data *sd = NULL;
131
132	while (*pos < nr_cpu_ids)
133		if (cpu_online(*pos)) {
134			sd = &per_cpu(softnet_data, *pos);
135			break;
136		} else
137			++*pos;
138	return sd;
139}
140
141static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
142{
143	return softnet_get_online(pos);
144}
145
146static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
147{
148	++*pos;
149	return softnet_get_online(pos);
150}
151
152static void softnet_seq_stop(struct seq_file *seq, void *v)
153{
154}
155
156static int softnet_seq_show(struct seq_file *seq, void *v)
157{
158	struct softnet_data *sd = v;
159	u32 input_qlen = softnet_input_pkt_queue_len(sd);
160	u32 process_qlen = softnet_process_queue_len(sd);
161	unsigned int flow_limit_count = 0;
162
163#ifdef CONFIG_NET_FLOW_LIMIT
164	struct sd_flow_limit *fl;
165
166	rcu_read_lock();
167	fl = rcu_dereference(sd->flow_limit);
168	if (fl)
169		flow_limit_count = fl->count;
170	rcu_read_unlock();
171#endif
172
173	/* the index is the CPU id owing this sd. Since offline CPUs are not
174	 * displayed, it would be othrwise not trivial for the user-space
175	 * mapping the data a specific CPU
176	 */
177	seq_printf(seq,
178		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
179		   "%08x %08x\n",
180		   sd->processed, sd->dropped, sd->time_squeeze, 0,
181		   0, 0, 0, 0, /* was fastroute */
182		   0,	/* was cpu_collision */
183		   sd->received_rps, flow_limit_count,
184		   input_qlen + process_qlen, (int)seq->index,
185		   input_qlen, process_qlen);
186	return 0;
187}
188
189static const struct seq_operations dev_seq_ops = {
190	.start = dev_seq_start,
191	.next  = dev_seq_next,
192	.stop  = dev_seq_stop,
193	.show  = dev_seq_show,
194};
195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196static const struct seq_operations softnet_seq_ops = {
197	.start = softnet_seq_start,
198	.next  = softnet_seq_next,
199	.stop  = softnet_seq_stop,
200	.show  = softnet_seq_show,
201};
202
203static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
204{
205	struct list_head *ptype_list = NULL;
206	struct packet_type *pt = NULL;
207	struct net_device *dev;
208	loff_t i = 0;
209	int t;
210
211	for_each_netdev_rcu(seq_file_net(seq), dev) {
212		ptype_list = &dev->ptype_all;
213		list_for_each_entry_rcu(pt, ptype_list, list) {
214			if (i == pos)
215				return pt;
216			++i;
217		}
218	}
219
220	list_for_each_entry_rcu(pt, &ptype_all, list) {
221		if (i == pos)
222			return pt;
223		++i;
224	}
225
226	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
227		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
228			if (i == pos)
229				return pt;
230			++i;
231		}
232	}
233	return NULL;
234}
235
236static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
237	__acquires(RCU)
238{
239	rcu_read_lock();
240	return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
241}
242
243static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
244{
245	struct net_device *dev;
246	struct packet_type *pt;
247	struct list_head *nxt;
248	int hash;
249
250	++*pos;
251	if (v == SEQ_START_TOKEN)
252		return ptype_get_idx(seq, 0);
253
254	pt = v;
255	nxt = pt->list.next;
256	if (pt->dev) {
257		if (nxt != &pt->dev->ptype_all)
258			goto found;
259
260		dev = pt->dev;
261		for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
262			if (!list_empty(&dev->ptype_all)) {
263				nxt = dev->ptype_all.next;
264				goto found;
265			}
266		}
267
268		nxt = ptype_all.next;
269		goto ptype_all;
270	}
271
272	if (pt->type == htons(ETH_P_ALL)) {
273ptype_all:
274		if (nxt != &ptype_all)
275			goto found;
276		hash = 0;
277		nxt = ptype_base[0].next;
278	} else
279		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
280
281	while (nxt == &ptype_base[hash]) {
282		if (++hash >= PTYPE_HASH_SIZE)
283			return NULL;
284		nxt = ptype_base[hash].next;
285	}
286found:
287	return list_entry(nxt, struct packet_type, list);
288}
289
290static void ptype_seq_stop(struct seq_file *seq, void *v)
291	__releases(RCU)
292{
293	rcu_read_unlock();
294}
295
296static int ptype_seq_show(struct seq_file *seq, void *v)
297{
298	struct packet_type *pt = v;
299
300	if (v == SEQ_START_TOKEN)
301		seq_puts(seq, "Type Device      Function\n");
302	else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
303		 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
304		if (pt->type == htons(ETH_P_ALL))
305			seq_puts(seq, "ALL ");
306		else
307			seq_printf(seq, "%04x", ntohs(pt->type));
308
309		seq_printf(seq, " %-8s %ps\n",
310			   pt->dev ? pt->dev->name : "", pt->func);
311	}
312
313	return 0;
314}
315
316static const struct seq_operations ptype_seq_ops = {
317	.start = ptype_seq_start,
318	.next  = ptype_seq_next,
319	.stop  = ptype_seq_stop,
320	.show  = ptype_seq_show,
321};
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323static int __net_init dev_proc_net_init(struct net *net)
324{
325	int rc = -ENOMEM;
326
327	if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
328			sizeof(struct seq_net_private)))
329		goto out;
330	if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
331			 &softnet_seq_ops))
332		goto out_dev;
333	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
334			sizeof(struct seq_net_private)))
335		goto out_softnet;
336
337	if (wext_proc_init(net))
338		goto out_ptype;
339	rc = 0;
340out:
341	return rc;
342out_ptype:
343	remove_proc_entry("ptype", net->proc_net);
344out_softnet:
345	remove_proc_entry("softnet_stat", net->proc_net);
346out_dev:
347	remove_proc_entry("dev", net->proc_net);
348	goto out;
349}
350
351static void __net_exit dev_proc_net_exit(struct net *net)
352{
353	wext_proc_exit(net);
354
355	remove_proc_entry("ptype", net->proc_net);
356	remove_proc_entry("softnet_stat", net->proc_net);
357	remove_proc_entry("dev", net->proc_net);
358}
359
360static struct pernet_operations __net_initdata dev_proc_ops = {
361	.init = dev_proc_net_init,
362	.exit = dev_proc_net_exit,
363};
364
365static int dev_mc_seq_show(struct seq_file *seq, void *v)
366{
367	struct netdev_hw_addr *ha;
368	struct net_device *dev = v;
369
370	if (v == SEQ_START_TOKEN)
371		return 0;
372
373	netif_addr_lock_bh(dev);
374	netdev_for_each_mc_addr(ha, dev) {
375		seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
376			   dev->ifindex, dev->name,
377			   ha->refcount, ha->global_use,
378			   (int)dev->addr_len, ha->addr);
 
 
 
 
 
379	}
380	netif_addr_unlock_bh(dev);
381	return 0;
382}
383
384static const struct seq_operations dev_mc_seq_ops = {
385	.start = dev_seq_start,
386	.next  = dev_seq_next,
387	.stop  = dev_seq_stop,
388	.show  = dev_mc_seq_show,
389};
390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391static int __net_init dev_mc_net_init(struct net *net)
392{
393	if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
394			sizeof(struct seq_net_private)))
395		return -ENOMEM;
396	return 0;
397}
398
399static void __net_exit dev_mc_net_exit(struct net *net)
400{
401	remove_proc_entry("dev_mcast", net->proc_net);
402}
403
404static struct pernet_operations __net_initdata dev_mc_net_ops = {
405	.init = dev_mc_net_init,
406	.exit = dev_mc_net_exit,
407};
408
409int __init dev_proc_init(void)
410{
411	int ret = register_pernet_subsys(&dev_proc_ops);
412	if (!ret)
413		return register_pernet_subsys(&dev_mc_net_ops);
414	return ret;
415}