Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * net/core/gen_stats.c
  3 *
  4 *             This program is free software; you can redistribute it and/or
  5 *             modify it under the terms of the GNU General Public License
  6 *             as published by the Free Software Foundation; either version
  7 *             2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:  Thomas Graf <tgraf@suug.ch>
 10 *           Jamal Hadi Salim
 11 *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 12 *
 13 * See Documentation/networking/gen_stats.txt
 14 */
 15
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/interrupt.h>
 20#include <linux/socket.h>
 21#include <linux/rtnetlink.h>
 22#include <linux/gen_stats.h>
 23#include <net/netlink.h>
 24#include <net/gen_stats.h>
 25
 26
 27static inline int
 28gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
 29{
 30	if (nla_put(d->skb, type, size, buf))
 31		goto nla_put_failure;
 32	return 0;
 33
 34nla_put_failure:
 
 
 35	kfree(d->xstats);
 36	d->xstats = NULL;
 37	d->xstats_len = 0;
 38	spin_unlock_bh(d->lock);
 39	return -1;
 40}
 41
 42/**
 43 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
 44 * @skb: socket buffer to put statistics TLVs into
 45 * @type: TLV type for top level statistic TLV
 46 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
 47 * @xstats_type: TLV type for backward compatibility xstats TLV
 48 * @lock: statistics lock
 49 * @d: dumping handle
 
 50 *
 51 * Initializes the dumping handle, grabs the statistic lock and appends
 52 * an empty TLV header to the socket buffer for use a container for all
 53 * other statistic TLVS.
 54 *
 55 * The dumping handle is marked to be in backward compatibility mode telling
 56 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
 57 *
 58 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
 59 */
 60int
 61gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
 62	int xstats_type, spinlock_t *lock, struct gnet_dump *d)
 
 63	__acquires(lock)
 64{
 65	memset(d, 0, sizeof(*d));
 66
 67	spin_lock_bh(lock);
 68	d->lock = lock;
 69	if (type)
 70		d->tail = (struct nlattr *)skb_tail_pointer(skb);
 71	d->skb = skb;
 72	d->compat_tc_stats = tc_stats_type;
 73	d->compat_xstats = xstats_type;
 
 
 
 
 
 
 
 74
 75	if (d->tail)
 76		return gnet_stats_copy(d, type, NULL, 0);
 
 
 
 
 
 
 
 
 
 77
 78	return 0;
 79}
 80EXPORT_SYMBOL(gnet_stats_start_copy_compat);
 81
 82/**
 83 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
 84 * @skb: socket buffer to put statistics TLVs into
 85 * @type: TLV type for top level statistic TLV
 86 * @lock: statistics lock
 87 * @d: dumping handle
 
 88 *
 89 * Initializes the dumping handle, grabs the statistic lock and appends
 90 * an empty TLV header to the socket buffer for use a container for all
 91 * other statistic TLVS.
 92 *
 93 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
 94 */
 95int
 96gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
 97	struct gnet_dump *d)
 98{
 99	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
100}
101EXPORT_SYMBOL(gnet_stats_start_copy);
102
103static void
104__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
105			    struct gnet_stats_basic_cpu __percpu *cpu)
106{
107	int i;
108
109	for_each_possible_cpu(i) {
110		struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
111		unsigned int start;
112		u64 bytes;
113		u32 packets;
114
115		do {
116			start = u64_stats_fetch_begin_irq(&bcpu->syncp);
117			bytes = bcpu->bstats.bytes;
118			packets = bcpu->bstats.packets;
119		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
120
121		bstats->bytes += bytes;
122		bstats->packets += packets;
123	}
124}
125
126void
127__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
 
128			struct gnet_stats_basic_cpu __percpu *cpu,
129			struct gnet_stats_basic_packed *b)
130{
 
 
131	if (cpu) {
132		__gnet_stats_copy_basic_cpu(bstats, cpu);
133	} else {
 
 
 
 
134		bstats->bytes = b->bytes;
135		bstats->packets = b->packets;
136	}
137}
138EXPORT_SYMBOL(__gnet_stats_copy_basic);
139
140/**
141 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
142 * @d: dumping handle
143 * @cpu: copy statistic per cpu
144 * @b: basic statistics
145 *
146 * Appends the basic statistics to the top level TLV created by
147 * gnet_stats_start_copy().
148 *
149 * Returns 0 on success or -1 with the statistic lock released
150 * if the room in the socket buffer was not sufficient.
151 */
152int
153gnet_stats_copy_basic(struct gnet_dump *d,
154		      struct gnet_stats_basic_cpu __percpu *cpu,
155		      struct gnet_stats_basic_packed *b)
156{
157	struct gnet_stats_basic_packed bstats = {0};
158
159	__gnet_stats_copy_basic(&bstats, cpu, b);
160
161	if (d->compat_tc_stats) {
162		d->tc_stats.bytes = bstats.bytes;
163		d->tc_stats.packets = bstats.packets;
164	}
165
166	if (d->tail) {
167		struct gnet_stats_basic sb;
168
169		memset(&sb, 0, sizeof(sb));
170		sb.bytes = bstats.bytes;
171		sb.packets = bstats.packets;
172		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
 
173	}
174	return 0;
175}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176EXPORT_SYMBOL(gnet_stats_copy_basic);
177
178/**
179 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
 
180 * @d: dumping handle
 
181 * @b: basic statistics
182 * @r: rate estimator statistics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183 *
184 * Appends the rate estimator statistics to the top level TLV created by
185 * gnet_stats_start_copy().
186 *
187 * Returns 0 on success or -1 with the statistic lock released
188 * if the room in the socket buffer was not sufficient.
189 */
190int
191gnet_stats_copy_rate_est(struct gnet_dump *d,
192			 const struct gnet_stats_basic_packed *b,
193			 struct gnet_stats_rate_est64 *r)
194{
 
195	struct gnet_stats_rate_est est;
196	int res;
197
198	if (b && !gen_estimator_active(b, r))
199		return 0;
200
201	est.bps = min_t(u64, UINT_MAX, r->bps);
202	/* we have some time before reaching 2^32 packets per second */
203	est.pps = r->pps;
204
205	if (d->compat_tc_stats) {
206		d->tc_stats.bps = est.bps;
207		d->tc_stats.pps = est.pps;
208	}
209
210	if (d->tail) {
211		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est));
212		if (res < 0 || est.bps == r->bps)
 
213			return res;
214		/* emit 64bit stats only if needed */
215		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r));
 
216	}
217
218	return 0;
219}
220EXPORT_SYMBOL(gnet_stats_copy_rate_est);
221
222static void
223__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
224			    const struct gnet_stats_queue __percpu *q)
225{
226	int i;
227
228	for_each_possible_cpu(i) {
229		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
230
231		qstats->qlen = 0;
232		qstats->backlog += qcpu->backlog;
233		qstats->drops += qcpu->drops;
234		qstats->requeues += qcpu->requeues;
235		qstats->overlimits += qcpu->overlimits;
236	}
237}
238
239static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
240				    const struct gnet_stats_queue __percpu *cpu,
241				    const struct gnet_stats_queue *q,
242				    __u32 qlen)
243{
244	if (cpu) {
245		__gnet_stats_copy_queue_cpu(qstats, cpu);
246	} else {
247		qstats->qlen = q->qlen;
248		qstats->backlog = q->backlog;
249		qstats->drops = q->drops;
250		qstats->requeues = q->requeues;
251		qstats->overlimits = q->overlimits;
252	}
253
254	qstats->qlen = qlen;
255}
 
256
257/**
258 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
259 * @d: dumping handle
260 * @cpu_q: per cpu queue statistics
261 * @q: queue statistics
262 * @qlen: queue length statistics
263 *
264 * Appends the queue statistics to the top level TLV created by
265 * gnet_stats_start_copy(). Using per cpu queue statistics if
266 * they are available.
267 *
268 * Returns 0 on success or -1 with the statistic lock released
269 * if the room in the socket buffer was not sufficient.
270 */
271int
272gnet_stats_copy_queue(struct gnet_dump *d,
273		      struct gnet_stats_queue __percpu *cpu_q,
274		      struct gnet_stats_queue *q, __u32 qlen)
275{
276	struct gnet_stats_queue qstats = {0};
277
278	__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
279
280	if (d->compat_tc_stats) {
281		d->tc_stats.drops = qstats.drops;
282		d->tc_stats.qlen = qstats.qlen;
283		d->tc_stats.backlog = qstats.backlog;
284		d->tc_stats.overlimits = qstats.overlimits;
285	}
286
287	if (d->tail)
288		return gnet_stats_copy(d, TCA_STATS_QUEUE,
289				       &qstats, sizeof(qstats));
 
290
291	return 0;
292}
293EXPORT_SYMBOL(gnet_stats_copy_queue);
294
295/**
296 * gnet_stats_copy_app - copy application specific statistics into statistics TLV
297 * @d: dumping handle
298 * @st: application specific statistics data
299 * @len: length of data
300 *
301 * Appends the application specific statistics to the top level TLV created by
302 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
303 * handle is in backward compatibility mode.
304 *
305 * Returns 0 on success or -1 with the statistic lock released
306 * if the room in the socket buffer was not sufficient.
307 */
308int
309gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
310{
311	if (d->compat_xstats) {
312		d->xstats = kmemdup(st, len, GFP_ATOMIC);
313		if (!d->xstats)
314			goto err_out;
315		d->xstats_len = len;
316	}
317
318	if (d->tail)
319		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
 
320
321	return 0;
322
323err_out:
 
 
324	d->xstats_len = 0;
325	spin_unlock_bh(d->lock);
326	return -1;
327}
328EXPORT_SYMBOL(gnet_stats_copy_app);
329
330/**
331 * gnet_stats_finish_copy - finish dumping procedure
332 * @d: dumping handle
333 *
334 * Corrects the length of the top level TLV to include all TLVs added
335 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
336 * if gnet_stats_start_copy_compat() was used and releases the statistics
337 * lock.
338 *
339 * Returns 0 on success or -1 with the statistic lock released
340 * if the room in the socket buffer was not sufficient.
341 */
342int
343gnet_stats_finish_copy(struct gnet_dump *d)
344{
345	if (d->tail)
346		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
347
348	if (d->compat_tc_stats)
349		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
350			sizeof(d->tc_stats)) < 0)
351			return -1;
352
353	if (d->compat_xstats && d->xstats) {
354		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
355			d->xstats_len) < 0)
356			return -1;
357	}
358
 
 
359	kfree(d->xstats);
360	d->xstats = NULL;
361	d->xstats_len = 0;
362	spin_unlock_bh(d->lock);
363	return 0;
364}
365EXPORT_SYMBOL(gnet_stats_finish_copy);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/core/gen_stats.c
  4 *
 
 
 
 
 
  5 * Authors:  Thomas Graf <tgraf@suug.ch>
  6 *           Jamal Hadi Salim
  7 *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  8 *
  9 * See Documentation/networking/gen_stats.txt
 10 */
 11
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 15#include <linux/interrupt.h>
 16#include <linux/socket.h>
 17#include <linux/rtnetlink.h>
 18#include <linux/gen_stats.h>
 19#include <net/netlink.h>
 20#include <net/gen_stats.h>
 21
 22
 23static inline int
 24gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
 25{
 26	if (nla_put_64bit(d->skb, type, size, buf, padattr))
 27		goto nla_put_failure;
 28	return 0;
 29
 30nla_put_failure:
 31	if (d->lock)
 32		spin_unlock_bh(d->lock);
 33	kfree(d->xstats);
 34	d->xstats = NULL;
 35	d->xstats_len = 0;
 
 36	return -1;
 37}
 38
 39/**
 40 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
 41 * @skb: socket buffer to put statistics TLVs into
 42 * @type: TLV type for top level statistic TLV
 43 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
 44 * @xstats_type: TLV type for backward compatibility xstats TLV
 45 * @lock: statistics lock
 46 * @d: dumping handle
 47 * @padattr: padding attribute
 48 *
 49 * Initializes the dumping handle, grabs the statistic lock and appends
 50 * an empty TLV header to the socket buffer for use a container for all
 51 * other statistic TLVS.
 52 *
 53 * The dumping handle is marked to be in backward compatibility mode telling
 54 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
 55 *
 56 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
 57 */
 58int
 59gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
 60			     int xstats_type, spinlock_t *lock,
 61			     struct gnet_dump *d, int padattr)
 62	__acquires(lock)
 63{
 64	memset(d, 0, sizeof(*d));
 65
 
 
 66	if (type)
 67		d->tail = (struct nlattr *)skb_tail_pointer(skb);
 68	d->skb = skb;
 69	d->compat_tc_stats = tc_stats_type;
 70	d->compat_xstats = xstats_type;
 71	d->padattr = padattr;
 72	if (lock) {
 73		d->lock = lock;
 74		spin_lock_bh(lock);
 75	}
 76	if (d->tail) {
 77		int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
 78
 79		/* The initial attribute added in gnet_stats_copy() may be
 80		 * preceded by a padding attribute, in which case d->tail will
 81		 * end up pointing at the padding instead of the real attribute.
 82		 * Fix this so gnet_stats_finish_copy() adjusts the length of
 83		 * the right attribute.
 84		 */
 85		if (ret == 0 && d->tail->nla_type == padattr)
 86			d->tail = (struct nlattr *)((char *)d->tail +
 87						    NLA_ALIGN(d->tail->nla_len));
 88		return ret;
 89	}
 90
 91	return 0;
 92}
 93EXPORT_SYMBOL(gnet_stats_start_copy_compat);
 94
 95/**
 96 * gnet_stats_start_copy - start dumping procedure in compatibility mode
 97 * @skb: socket buffer to put statistics TLVs into
 98 * @type: TLV type for top level statistic TLV
 99 * @lock: statistics lock
100 * @d: dumping handle
101 * @padattr: padding attribute
102 *
103 * Initializes the dumping handle, grabs the statistic lock and appends
104 * an empty TLV header to the socket buffer for use a container for all
105 * other statistic TLVS.
106 *
107 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
108 */
109int
110gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
111		      struct gnet_dump *d, int padattr)
112{
113	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
114}
115EXPORT_SYMBOL(gnet_stats_start_copy);
116
117static void
118__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
119			    struct gnet_stats_basic_cpu __percpu *cpu)
120{
121	int i;
122
123	for_each_possible_cpu(i) {
124		struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125		unsigned int start;
126		u64 bytes;
127		u32 packets;
128
129		do {
130			start = u64_stats_fetch_begin_irq(&bcpu->syncp);
131			bytes = bcpu->bstats.bytes;
132			packets = bcpu->bstats.packets;
133		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
134
135		bstats->bytes += bytes;
136		bstats->packets += packets;
137	}
138}
139
140void
141__gnet_stats_copy_basic(const seqcount_t *running,
142			struct gnet_stats_basic_packed *bstats,
143			struct gnet_stats_basic_cpu __percpu *cpu,
144			struct gnet_stats_basic_packed *b)
145{
146	unsigned int seq;
147
148	if (cpu) {
149		__gnet_stats_copy_basic_cpu(bstats, cpu);
150		return;
151	}
152	do {
153		if (running)
154			seq = read_seqcount_begin(running);
155		bstats->bytes = b->bytes;
156		bstats->packets = b->packets;
157	} while (running && read_seqcount_retry(running, seq));
158}
159EXPORT_SYMBOL(__gnet_stats_copy_basic);
160
161static int
162___gnet_stats_copy_basic(const seqcount_t *running,
163			 struct gnet_dump *d,
164			 struct gnet_stats_basic_cpu __percpu *cpu,
165			 struct gnet_stats_basic_packed *b,
166			 int type)
 
 
 
 
 
 
 
 
 
 
167{
168	struct gnet_stats_basic_packed bstats = {0};
169
170	__gnet_stats_copy_basic(running, &bstats, cpu, b);
171
172	if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
173		d->tc_stats.bytes = bstats.bytes;
174		d->tc_stats.packets = bstats.packets;
175	}
176
177	if (d->tail) {
178		struct gnet_stats_basic sb;
179
180		memset(&sb, 0, sizeof(sb));
181		sb.bytes = bstats.bytes;
182		sb.packets = bstats.packets;
183		return gnet_stats_copy(d, type, &sb, sizeof(sb),
184				       TCA_STATS_PAD);
185	}
186	return 0;
187}
188
189/**
190 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
191 * @running: seqcount_t pointer
192 * @d: dumping handle
193 * @cpu: copy statistic per cpu
194 * @b: basic statistics
195 *
196 * Appends the basic statistics to the top level TLV created by
197 * gnet_stats_start_copy().
198 *
199 * Returns 0 on success or -1 with the statistic lock released
200 * if the room in the socket buffer was not sufficient.
201 */
202int
203gnet_stats_copy_basic(const seqcount_t *running,
204		      struct gnet_dump *d,
205		      struct gnet_stats_basic_cpu __percpu *cpu,
206		      struct gnet_stats_basic_packed *b)
207{
208	return ___gnet_stats_copy_basic(running, d, cpu, b,
209					TCA_STATS_BASIC);
210}
211EXPORT_SYMBOL(gnet_stats_copy_basic);
212
213/**
214 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
215 * @running: seqcount_t pointer
216 * @d: dumping handle
217 * @cpu: copy statistic per cpu
218 * @b: basic statistics
219 *
220 * Appends the basic statistics to the top level TLV created by
221 * gnet_stats_start_copy().
222 *
223 * Returns 0 on success or -1 with the statistic lock released
224 * if the room in the socket buffer was not sufficient.
225 */
226int
227gnet_stats_copy_basic_hw(const seqcount_t *running,
228			 struct gnet_dump *d,
229			 struct gnet_stats_basic_cpu __percpu *cpu,
230			 struct gnet_stats_basic_packed *b)
231{
232	return ___gnet_stats_copy_basic(running, d, cpu, b,
233					TCA_STATS_BASIC_HW);
234}
235EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
236
237/**
238 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
239 * @d: dumping handle
240 * @rate_est: rate estimator
241 *
242 * Appends the rate estimator statistics to the top level TLV created by
243 * gnet_stats_start_copy().
244 *
245 * Returns 0 on success or -1 with the statistic lock released
246 * if the room in the socket buffer was not sufficient.
247 */
248int
249gnet_stats_copy_rate_est(struct gnet_dump *d,
250			 struct net_rate_estimator __rcu **rate_est)
 
251{
252	struct gnet_stats_rate_est64 sample;
253	struct gnet_stats_rate_est est;
254	int res;
255
256	if (!gen_estimator_read(rate_est, &sample))
257		return 0;
258	est.bps = min_t(u64, UINT_MAX, sample.bps);
 
259	/* we have some time before reaching 2^32 packets per second */
260	est.pps = sample.pps;
261
262	if (d->compat_tc_stats) {
263		d->tc_stats.bps = est.bps;
264		d->tc_stats.pps = est.pps;
265	}
266
267	if (d->tail) {
268		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
269				      TCA_STATS_PAD);
270		if (res < 0 || est.bps == sample.bps)
271			return res;
272		/* emit 64bit stats only if needed */
273		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
274				       sizeof(sample), TCA_STATS_PAD);
275	}
276
277	return 0;
278}
279EXPORT_SYMBOL(gnet_stats_copy_rate_est);
280
281static void
282__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
283			    const struct gnet_stats_queue __percpu *q)
284{
285	int i;
286
287	for_each_possible_cpu(i) {
288		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
289
290		qstats->qlen = 0;
291		qstats->backlog += qcpu->backlog;
292		qstats->drops += qcpu->drops;
293		qstats->requeues += qcpu->requeues;
294		qstats->overlimits += qcpu->overlimits;
295	}
296}
297
298void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
299			     const struct gnet_stats_queue __percpu *cpu,
300			     const struct gnet_stats_queue *q,
301			     __u32 qlen)
302{
303	if (cpu) {
304		__gnet_stats_copy_queue_cpu(qstats, cpu);
305	} else {
306		qstats->qlen = q->qlen;
307		qstats->backlog = q->backlog;
308		qstats->drops = q->drops;
309		qstats->requeues = q->requeues;
310		qstats->overlimits = q->overlimits;
311	}
312
313	qstats->qlen = qlen;
314}
315EXPORT_SYMBOL(__gnet_stats_copy_queue);
316
317/**
318 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
319 * @d: dumping handle
320 * @cpu_q: per cpu queue statistics
321 * @q: queue statistics
322 * @qlen: queue length statistics
323 *
324 * Appends the queue statistics to the top level TLV created by
325 * gnet_stats_start_copy(). Using per cpu queue statistics if
326 * they are available.
327 *
328 * Returns 0 on success or -1 with the statistic lock released
329 * if the room in the socket buffer was not sufficient.
330 */
331int
332gnet_stats_copy_queue(struct gnet_dump *d,
333		      struct gnet_stats_queue __percpu *cpu_q,
334		      struct gnet_stats_queue *q, __u32 qlen)
335{
336	struct gnet_stats_queue qstats = {0};
337
338	__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
339
340	if (d->compat_tc_stats) {
341		d->tc_stats.drops = qstats.drops;
342		d->tc_stats.qlen = qstats.qlen;
343		d->tc_stats.backlog = qstats.backlog;
344		d->tc_stats.overlimits = qstats.overlimits;
345	}
346
347	if (d->tail)
348		return gnet_stats_copy(d, TCA_STATS_QUEUE,
349				       &qstats, sizeof(qstats),
350				       TCA_STATS_PAD);
351
352	return 0;
353}
354EXPORT_SYMBOL(gnet_stats_copy_queue);
355
356/**
357 * gnet_stats_copy_app - copy application specific statistics into statistics TLV
358 * @d: dumping handle
359 * @st: application specific statistics data
360 * @len: length of data
361 *
362 * Appends the application specific statistics to the top level TLV created by
363 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
364 * handle is in backward compatibility mode.
365 *
366 * Returns 0 on success or -1 with the statistic lock released
367 * if the room in the socket buffer was not sufficient.
368 */
369int
370gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
371{
372	if (d->compat_xstats) {
373		d->xstats = kmemdup(st, len, GFP_ATOMIC);
374		if (!d->xstats)
375			goto err_out;
376		d->xstats_len = len;
377	}
378
379	if (d->tail)
380		return gnet_stats_copy(d, TCA_STATS_APP, st, len,
381				       TCA_STATS_PAD);
382
383	return 0;
384
385err_out:
386	if (d->lock)
387		spin_unlock_bh(d->lock);
388	d->xstats_len = 0;
 
389	return -1;
390}
391EXPORT_SYMBOL(gnet_stats_copy_app);
392
393/**
394 * gnet_stats_finish_copy - finish dumping procedure
395 * @d: dumping handle
396 *
397 * Corrects the length of the top level TLV to include all TLVs added
398 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
399 * if gnet_stats_start_copy_compat() was used and releases the statistics
400 * lock.
401 *
402 * Returns 0 on success or -1 with the statistic lock released
403 * if the room in the socket buffer was not sufficient.
404 */
405int
406gnet_stats_finish_copy(struct gnet_dump *d)
407{
408	if (d->tail)
409		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
410
411	if (d->compat_tc_stats)
412		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
413				    sizeof(d->tc_stats), d->padattr) < 0)
414			return -1;
415
416	if (d->compat_xstats && d->xstats) {
417		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
418				    d->xstats_len, d->padattr) < 0)
419			return -1;
420	}
421
422	if (d->lock)
423		spin_unlock_bh(d->lock);
424	kfree(d->xstats);
425	d->xstats = NULL;
426	d->xstats_len = 0;
 
427	return 0;
428}
429EXPORT_SYMBOL(gnet_stats_finish_copy);