Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 11#include <linux/timekeeping.h>
 12#include <net/net_namespace.h>
 13#include <net/sock.h>
 14#include <net/netlink.h>
 15#include <net/genetlink.h>
 16#include <net/psample.h>
 17#include <linux/spinlock.h>
 18#include <net/ip_tunnels.h>
 19#include <net/dst_metadata.h>
 20
 21#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 22
 23static LIST_HEAD(psample_groups_list);
 24static DEFINE_SPINLOCK(psample_groups_lock);
 25
 26/* multicast groups */
 27enum psample_nl_multicast_groups {
 28	PSAMPLE_NL_MCGRP_CONFIG,
 29	PSAMPLE_NL_MCGRP_SAMPLE,
 30};
 31
 32static const struct genl_multicast_group psample_nl_mcgrps[] = {
 33	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 34	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 35};
 36
 37static struct genl_family psample_nl_family __ro_after_init;
 38
 39static int psample_group_nl_fill(struct sk_buff *msg,
 40				 struct psample_group *group,
 41				 enum psample_command cmd, u32 portid, u32 seq,
 42				 int flags)
 43{
 44	void *hdr;
 45	int ret;
 46
 47	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 48	if (!hdr)
 49		return -EMSGSIZE;
 50
 51	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 52	if (ret < 0)
 53		goto error;
 54
 55	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 56	if (ret < 0)
 57		goto error;
 58
 59	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 60	if (ret < 0)
 61		goto error;
 62
 63	genlmsg_end(msg, hdr);
 64	return 0;
 65
 66error:
 67	genlmsg_cancel(msg, hdr);
 68	return -EMSGSIZE;
 69}
 70
 71static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 72					   struct netlink_callback *cb)
 73{
 74	struct psample_group *group;
 75	int start = cb->args[0];
 76	int idx = 0;
 77	int err;
 78
 79	spin_lock_bh(&psample_groups_lock);
 80	list_for_each_entry(group, &psample_groups_list, list) {
 81		if (!net_eq(group->net, sock_net(msg->sk)))
 82			continue;
 83		if (idx < start) {
 84			idx++;
 85			continue;
 86		}
 87		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 88					    NETLINK_CB(cb->skb).portid,
 89					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 90		if (err)
 91			break;
 92		idx++;
 93	}
 94
 95	spin_unlock_bh(&psample_groups_lock);
 96	cb->args[0] = idx;
 97	return msg->len;
 98}
 99
100static const struct genl_small_ops psample_nl_ops[] = {
101	{
102		.cmd = PSAMPLE_CMD_GET_GROUP,
103		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
104		.dumpit = psample_nl_cmd_get_group_dumpit,
105		/* can be retrieved by unprivileged users */
106	}
107};
108
109static struct genl_family psample_nl_family __ro_after_init = {
110	.name		= PSAMPLE_GENL_NAME,
111	.version	= PSAMPLE_GENL_VERSION,
112	.maxattr	= PSAMPLE_ATTR_MAX,
113	.netnsok	= true,
114	.module		= THIS_MODULE,
115	.mcgrps		= psample_nl_mcgrps,
116	.small_ops	= psample_nl_ops,
117	.n_small_ops	= ARRAY_SIZE(psample_nl_ops),
118	.resv_start_op	= PSAMPLE_CMD_GET_GROUP + 1,
119	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
120};
121
122static void psample_group_notify(struct psample_group *group,
123				 enum psample_command cmd)
124{
125	struct sk_buff *msg;
126	int err;
127
128	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
129	if (!msg)
130		return;
131
132	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
133	if (!err)
134		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
135					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
136	else
137		nlmsg_free(msg);
138}
139
140static struct psample_group *psample_group_create(struct net *net,
141						  u32 group_num)
142{
143	struct psample_group *group;
144
145	group = kzalloc(sizeof(*group), GFP_ATOMIC);
146	if (!group)
147		return NULL;
148
149	group->net = net;
150	group->group_num = group_num;
151	list_add_tail(&group->list, &psample_groups_list);
152
153	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
154	return group;
155}
156
157static void psample_group_destroy(struct psample_group *group)
158{
159	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
160	list_del(&group->list);
161	kfree_rcu(group, rcu);
162}
163
164static struct psample_group *
165psample_group_lookup(struct net *net, u32 group_num)
166{
167	struct psample_group *group;
168
169	list_for_each_entry(group, &psample_groups_list, list)
170		if ((group->group_num == group_num) && (group->net == net))
171			return group;
172	return NULL;
173}
174
175struct psample_group *psample_group_get(struct net *net, u32 group_num)
176{
177	struct psample_group *group;
178
179	spin_lock_bh(&psample_groups_lock);
180
181	group = psample_group_lookup(net, group_num);
182	if (!group) {
183		group = psample_group_create(net, group_num);
184		if (!group)
185			goto out;
186	}
187	group->refcount++;
188
189out:
190	spin_unlock_bh(&psample_groups_lock);
191	return group;
192}
193EXPORT_SYMBOL_GPL(psample_group_get);
194
195void psample_group_take(struct psample_group *group)
196{
197	spin_lock_bh(&psample_groups_lock);
198	group->refcount++;
199	spin_unlock_bh(&psample_groups_lock);
200}
201EXPORT_SYMBOL_GPL(psample_group_take);
202
203void psample_group_put(struct psample_group *group)
204{
205	spin_lock_bh(&psample_groups_lock);
206
207	if (--group->refcount == 0)
208		psample_group_destroy(group);
209
210	spin_unlock_bh(&psample_groups_lock);
211}
212EXPORT_SYMBOL_GPL(psample_group_put);
213
214#ifdef CONFIG_INET
215static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
216			      struct ip_tunnel_info *tun_info)
217{
218	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
219	const void *tun_opts = ip_tunnel_info_opts(tun_info);
220	const struct ip_tunnel_key *tun_key = &tun_info->key;
221	int tun_opts_len = tun_info->options_len;
222
223	if (tun_key->tun_flags & TUNNEL_KEY &&
224	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
225			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
226		return -EMSGSIZE;
227
228	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
229	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
230		return -EMSGSIZE;
231
232	switch (tun_proto) {
233	case AF_INET:
234		if (tun_key->u.ipv4.src &&
235		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
236				    tun_key->u.ipv4.src))
237			return -EMSGSIZE;
238		if (tun_key->u.ipv4.dst &&
239		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
240				    tun_key->u.ipv4.dst))
241			return -EMSGSIZE;
242		break;
243	case AF_INET6:
244		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
245		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
246				     &tun_key->u.ipv6.src))
247			return -EMSGSIZE;
248		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
249		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
250				     &tun_key->u.ipv6.dst))
251			return -EMSGSIZE;
252		break;
253	}
254	if (tun_key->tos &&
255	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
256		return -EMSGSIZE;
257	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
258		return -EMSGSIZE;
259	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
260	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
261		return -EMSGSIZE;
262	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
263	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
264		return -EMSGSIZE;
265	if (tun_key->tp_src &&
266	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
267		return -EMSGSIZE;
268	if (tun_key->tp_dst &&
269	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
270		return -EMSGSIZE;
271	if ((tun_key->tun_flags & TUNNEL_OAM) &&
272	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
273		return -EMSGSIZE;
274	if (tun_opts_len) {
275		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
276		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
277			    tun_opts_len, tun_opts))
278			return -EMSGSIZE;
279		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
280			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
281				 tun_opts_len, tun_opts))
282			return -EMSGSIZE;
283	}
284
285	return 0;
286}
287
288static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
289			    struct ip_tunnel_info *tun_info)
290{
291	struct nlattr *nla;
292	int err;
293
294	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
295	if (!nla)
296		return -EMSGSIZE;
297
298	err = __psample_ip_tun_to_nlattr(skb, tun_info);
299	if (err) {
300		nla_nest_cancel(skb, nla);
301		return err;
302	}
303
304	nla_nest_end(skb, nla);
305
306	return 0;
307}
308
309static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
310{
311	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
312	const struct ip_tunnel_key *tun_key = &tun_info->key;
313	int tun_opts_len = tun_info->options_len;
314	int sum = nla_total_size(0);	/* PSAMPLE_ATTR_TUNNEL */
315
316	if (tun_key->tun_flags & TUNNEL_KEY)
317		sum += nla_total_size_64bit(sizeof(u64));
318
319	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
320		sum += nla_total_size(0);
321
322	switch (tun_proto) {
323	case AF_INET:
324		if (tun_key->u.ipv4.src)
325			sum += nla_total_size(sizeof(u32));
326		if (tun_key->u.ipv4.dst)
327			sum += nla_total_size(sizeof(u32));
328		break;
329	case AF_INET6:
330		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
331			sum += nla_total_size(sizeof(struct in6_addr));
332		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
333			sum += nla_total_size(sizeof(struct in6_addr));
334		break;
335	}
336	if (tun_key->tos)
337		sum += nla_total_size(sizeof(u8));
338	sum += nla_total_size(sizeof(u8));	/* TTL */
339	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
340		sum += nla_total_size(0);
341	if (tun_key->tun_flags & TUNNEL_CSUM)
342		sum += nla_total_size(0);
343	if (tun_key->tp_src)
344		sum += nla_total_size(sizeof(u16));
345	if (tun_key->tp_dst)
346		sum += nla_total_size(sizeof(u16));
347	if (tun_key->tun_flags & TUNNEL_OAM)
348		sum += nla_total_size(0);
349	if (tun_opts_len) {
350		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
351			sum += nla_total_size(tun_opts_len);
352		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
353			sum += nla_total_size(tun_opts_len);
354	}
355
356	return sum;
357}
358#endif
359
360void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
361			   u32 sample_rate, const struct psample_metadata *md)
 
362{
363	ktime_t tstamp = ktime_get_real();
364	int out_ifindex = md->out_ifindex;
365	int in_ifindex = md->in_ifindex;
366	u32 trunc_size = md->trunc_size;
367#ifdef CONFIG_INET
368	struct ip_tunnel_info *tun_info;
369#endif
370	struct sk_buff *nl_skb;
371	int data_len;
372	int meta_len;
373	void *data;
374	int ret;
375
376	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
377		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
378		   (md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
379		   (md->out_tc_occ_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
380		   (md->latency_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
381		   nla_total_size(sizeof(u32)) +	/* sample_rate */
382		   nla_total_size(sizeof(u32)) +	/* orig_size */
383		   nla_total_size(sizeof(u32)) +	/* group_num */
384		   nla_total_size(sizeof(u32)) +	/* seq */
385		   nla_total_size_64bit(sizeof(u64)) +	/* timestamp */
386		   nla_total_size(sizeof(u16));		/* protocol */
387
388#ifdef CONFIG_INET
389	tun_info = skb_tunnel_info(skb);
390	if (tun_info)
391		meta_len += psample_tunnel_meta_len(tun_info);
392#endif
393
394	data_len = min(skb->len, trunc_size);
395	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
396		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
397			    - NLA_ALIGNTO;
398
399	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
400	if (unlikely(!nl_skb))
401		return;
402
403	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
404			   PSAMPLE_CMD_SAMPLE);
405	if (unlikely(!data))
406		goto error;
407
408	if (in_ifindex) {
409		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
410		if (unlikely(ret < 0))
411			goto error;
412	}
413
414	if (out_ifindex) {
415		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
416		if (unlikely(ret < 0))
417			goto error;
418	}
419
420	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
421	if (unlikely(ret < 0))
422		goto error;
423
424	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
425	if (unlikely(ret < 0))
426		goto error;
427
428	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
429	if (unlikely(ret < 0))
430		goto error;
431
432	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
433	if (unlikely(ret < 0))
434		goto error;
435
436	if (md->out_tc_valid) {
437		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OUT_TC, md->out_tc);
438		if (unlikely(ret < 0))
439			goto error;
440	}
441
442	if (md->out_tc_occ_valid) {
443		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_OUT_TC_OCC,
444					md->out_tc_occ, PSAMPLE_ATTR_PAD);
445		if (unlikely(ret < 0))
446			goto error;
447	}
448
449	if (md->latency_valid) {
450		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_LATENCY,
451					md->latency, PSAMPLE_ATTR_PAD);
452		if (unlikely(ret < 0))
453			goto error;
454	}
455
456	ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_TIMESTAMP,
457				ktime_to_ns(tstamp), PSAMPLE_ATTR_PAD);
458	if (unlikely(ret < 0))
459		goto error;
460
461	ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_PROTO,
462			  be16_to_cpu(skb->protocol));
463	if (unlikely(ret < 0))
464		goto error;
465
466	if (data_len) {
467		int nla_len = nla_total_size(data_len);
468		struct nlattr *nla;
469
470		nla = skb_put(nl_skb, nla_len);
471		nla->nla_type = PSAMPLE_ATTR_DATA;
472		nla->nla_len = nla_attr_size(data_len);
473
474		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
475			goto error;
476	}
477
478#ifdef CONFIG_INET
479	if (tun_info) {
480		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
481		if (unlikely(ret < 0))
482			goto error;
483	}
484#endif
485
486	genlmsg_end(nl_skb, data);
487	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
488				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
489
490	return;
491error:
492	pr_err_ratelimited("Could not create psample log message\n");
493	nlmsg_free(nl_skb);
494}
495EXPORT_SYMBOL_GPL(psample_sample_packet);
496
497static int __init psample_module_init(void)
498{
499	return genl_register_family(&psample_nl_family);
500}
501
502static void __exit psample_module_exit(void)
503{
504	genl_unregister_family(&psample_nl_family);
505}
506
507module_init(psample_module_init);
508module_exit(psample_module_exit);
509
510MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
511MODULE_DESCRIPTION("netlink channel for packet sampling");
512MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 
 11#include <net/net_namespace.h>
 12#include <net/sock.h>
 13#include <net/netlink.h>
 14#include <net/genetlink.h>
 15#include <net/psample.h>
 16#include <linux/spinlock.h>
 
 
 17
 18#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 19
 20static LIST_HEAD(psample_groups_list);
 21static DEFINE_SPINLOCK(psample_groups_lock);
 22
 23/* multicast groups */
 24enum psample_nl_multicast_groups {
 25	PSAMPLE_NL_MCGRP_CONFIG,
 26	PSAMPLE_NL_MCGRP_SAMPLE,
 27};
 28
 29static const struct genl_multicast_group psample_nl_mcgrps[] = {
 30	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 31	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 32};
 33
 34static struct genl_family psample_nl_family __ro_after_init;
 35
 36static int psample_group_nl_fill(struct sk_buff *msg,
 37				 struct psample_group *group,
 38				 enum psample_command cmd, u32 portid, u32 seq,
 39				 int flags)
 40{
 41	void *hdr;
 42	int ret;
 43
 44	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 45	if (!hdr)
 46		return -EMSGSIZE;
 47
 48	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 49	if (ret < 0)
 50		goto error;
 51
 52	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 53	if (ret < 0)
 54		goto error;
 55
 56	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 57	if (ret < 0)
 58		goto error;
 59
 60	genlmsg_end(msg, hdr);
 61	return 0;
 62
 63error:
 64	genlmsg_cancel(msg, hdr);
 65	return -EMSGSIZE;
 66}
 67
 68static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 69					   struct netlink_callback *cb)
 70{
 71	struct psample_group *group;
 72	int start = cb->args[0];
 73	int idx = 0;
 74	int err;
 75
 76	spin_lock_bh(&psample_groups_lock);
 77	list_for_each_entry(group, &psample_groups_list, list) {
 78		if (!net_eq(group->net, sock_net(msg->sk)))
 79			continue;
 80		if (idx < start) {
 81			idx++;
 82			continue;
 83		}
 84		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 85					    NETLINK_CB(cb->skb).portid,
 86					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 87		if (err)
 88			break;
 89		idx++;
 90	}
 91
 92	spin_unlock_bh(&psample_groups_lock);
 93	cb->args[0] = idx;
 94	return msg->len;
 95}
 96
 97static const struct genl_ops psample_nl_ops[] = {
 98	{
 99		.cmd = PSAMPLE_CMD_GET_GROUP,
100		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
101		.dumpit = psample_nl_cmd_get_group_dumpit,
102		/* can be retrieved by unprivileged users */
103	}
104};
105
106static struct genl_family psample_nl_family __ro_after_init = {
107	.name		= PSAMPLE_GENL_NAME,
108	.version	= PSAMPLE_GENL_VERSION,
109	.maxattr	= PSAMPLE_ATTR_MAX,
110	.netnsok	= true,
111	.module		= THIS_MODULE,
112	.mcgrps		= psample_nl_mcgrps,
113	.ops		= psample_nl_ops,
114	.n_ops		= ARRAY_SIZE(psample_nl_ops),
 
115	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
116};
117
118static void psample_group_notify(struct psample_group *group,
119				 enum psample_command cmd)
120{
121	struct sk_buff *msg;
122	int err;
123
124	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
125	if (!msg)
126		return;
127
128	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
129	if (!err)
130		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
131					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
132	else
133		nlmsg_free(msg);
134}
135
136static struct psample_group *psample_group_create(struct net *net,
137						  u32 group_num)
138{
139	struct psample_group *group;
140
141	group = kzalloc(sizeof(*group), GFP_ATOMIC);
142	if (!group)
143		return NULL;
144
145	group->net = net;
146	group->group_num = group_num;
147	list_add_tail(&group->list, &psample_groups_list);
148
149	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
150	return group;
151}
152
153static void psample_group_destroy(struct psample_group *group)
154{
155	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
156	list_del(&group->list);
157	kfree_rcu(group, rcu);
158}
159
160static struct psample_group *
161psample_group_lookup(struct net *net, u32 group_num)
162{
163	struct psample_group *group;
164
165	list_for_each_entry(group, &psample_groups_list, list)
166		if ((group->group_num == group_num) && (group->net == net))
167			return group;
168	return NULL;
169}
170
171struct psample_group *psample_group_get(struct net *net, u32 group_num)
172{
173	struct psample_group *group;
174
175	spin_lock_bh(&psample_groups_lock);
176
177	group = psample_group_lookup(net, group_num);
178	if (!group) {
179		group = psample_group_create(net, group_num);
180		if (!group)
181			goto out;
182	}
183	group->refcount++;
184
185out:
186	spin_unlock_bh(&psample_groups_lock);
187	return group;
188}
189EXPORT_SYMBOL_GPL(psample_group_get);
190
191void psample_group_take(struct psample_group *group)
192{
193	spin_lock_bh(&psample_groups_lock);
194	group->refcount++;
195	spin_unlock_bh(&psample_groups_lock);
196}
197EXPORT_SYMBOL_GPL(psample_group_take);
198
199void psample_group_put(struct psample_group *group)
200{
201	spin_lock_bh(&psample_groups_lock);
202
203	if (--group->refcount == 0)
204		psample_group_destroy(group);
205
206	spin_unlock_bh(&psample_groups_lock);
207}
208EXPORT_SYMBOL_GPL(psample_group_put);
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
211			   u32 trunc_size, int in_ifindex, int out_ifindex,
212			   u32 sample_rate)
213{
 
 
 
 
 
 
 
214	struct sk_buff *nl_skb;
215	int data_len;
216	int meta_len;
217	void *data;
218	int ret;
219
220	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
221		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
 
 
 
222		   nla_total_size(sizeof(u32)) +	/* sample_rate */
223		   nla_total_size(sizeof(u32)) +	/* orig_size */
224		   nla_total_size(sizeof(u32)) +	/* group_num */
225		   nla_total_size(sizeof(u32));		/* seq */
 
 
 
 
 
 
 
 
226
227	data_len = min(skb->len, trunc_size);
228	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
229		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
230			    - NLA_ALIGNTO;
231
232	nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
233	if (unlikely(!nl_skb))
234		return;
235
236	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
237			   PSAMPLE_CMD_SAMPLE);
238	if (unlikely(!data))
239		goto error;
240
241	if (in_ifindex) {
242		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
243		if (unlikely(ret < 0))
244			goto error;
245	}
246
247	if (out_ifindex) {
248		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
249		if (unlikely(ret < 0))
250			goto error;
251	}
252
253	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
254	if (unlikely(ret < 0))
255		goto error;
256
257	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
258	if (unlikely(ret < 0))
259		goto error;
260
261	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
262	if (unlikely(ret < 0))
263		goto error;
264
265	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
266	if (unlikely(ret < 0))
267		goto error;
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269	if (data_len) {
270		int nla_len = nla_total_size(data_len);
271		struct nlattr *nla;
272
273		nla = skb_put(nl_skb, nla_len);
274		nla->nla_type = PSAMPLE_ATTR_DATA;
275		nla->nla_len = nla_attr_size(data_len);
276
277		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
278			goto error;
279	}
 
 
 
 
 
 
 
 
280
281	genlmsg_end(nl_skb, data);
282	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
283				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
284
285	return;
286error:
287	pr_err_ratelimited("Could not create psample log message\n");
288	nlmsg_free(nl_skb);
289}
290EXPORT_SYMBOL_GPL(psample_sample_packet);
291
292static int __init psample_module_init(void)
293{
294	return genl_register_family(&psample_nl_family);
295}
296
297static void __exit psample_module_exit(void)
298{
299	genl_unregister_family(&psample_nl_family);
300}
301
302module_init(psample_module_init);
303module_exit(psample_module_exit);
304
305MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
306MODULE_DESCRIPTION("netlink channel for packet sampling");
307MODULE_LICENSE("GPL v2");