Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 11#include <net/net_namespace.h>
 12#include <net/sock.h>
 13#include <net/netlink.h>
 14#include <net/genetlink.h>
 15#include <net/psample.h>
 16#include <linux/spinlock.h>
 17#include <net/ip_tunnels.h>
 18#include <net/dst_metadata.h>
 19
 20#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 21
 22static LIST_HEAD(psample_groups_list);
 23static DEFINE_SPINLOCK(psample_groups_lock);
 24
 25/* multicast groups */
 26enum psample_nl_multicast_groups {
 27	PSAMPLE_NL_MCGRP_CONFIG,
 28	PSAMPLE_NL_MCGRP_SAMPLE,
 29};
 30
 31static const struct genl_multicast_group psample_nl_mcgrps[] = {
 32	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 33	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 34};
 35
 36static struct genl_family psample_nl_family __ro_after_init;
 37
 38static int psample_group_nl_fill(struct sk_buff *msg,
 39				 struct psample_group *group,
 40				 enum psample_command cmd, u32 portid, u32 seq,
 41				 int flags)
 42{
 43	void *hdr;
 44	int ret;
 45
 46	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 47	if (!hdr)
 48		return -EMSGSIZE;
 49
 50	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 51	if (ret < 0)
 52		goto error;
 53
 54	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 55	if (ret < 0)
 56		goto error;
 57
 58	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 59	if (ret < 0)
 60		goto error;
 61
 62	genlmsg_end(msg, hdr);
 63	return 0;
 64
 65error:
 66	genlmsg_cancel(msg, hdr);
 67	return -EMSGSIZE;
 68}
 69
 70static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 71					   struct netlink_callback *cb)
 72{
 73	struct psample_group *group;
 74	int start = cb->args[0];
 75	int idx = 0;
 76	int err;
 77
 78	spin_lock_bh(&psample_groups_lock);
 79	list_for_each_entry(group, &psample_groups_list, list) {
 80		if (!net_eq(group->net, sock_net(msg->sk)))
 81			continue;
 82		if (idx < start) {
 83			idx++;
 84			continue;
 85		}
 86		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 87					    NETLINK_CB(cb->skb).portid,
 88					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 89		if (err)
 90			break;
 91		idx++;
 92	}
 93
 94	spin_unlock_bh(&psample_groups_lock);
 95	cb->args[0] = idx;
 96	return msg->len;
 97}
 98
 99static const struct genl_ops psample_nl_ops[] = {
100	{
101		.cmd = PSAMPLE_CMD_GET_GROUP,
102		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
103		.dumpit = psample_nl_cmd_get_group_dumpit,
104		/* can be retrieved by unprivileged users */
105	}
106};
107
108static struct genl_family psample_nl_family __ro_after_init = {
109	.name		= PSAMPLE_GENL_NAME,
110	.version	= PSAMPLE_GENL_VERSION,
111	.maxattr	= PSAMPLE_ATTR_MAX,
112	.netnsok	= true,
113	.module		= THIS_MODULE,
114	.mcgrps		= psample_nl_mcgrps,
115	.ops		= psample_nl_ops,
116	.n_ops		= ARRAY_SIZE(psample_nl_ops),
117	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
118};
119
120static void psample_group_notify(struct psample_group *group,
121				 enum psample_command cmd)
122{
123	struct sk_buff *msg;
124	int err;
125
126	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
127	if (!msg)
128		return;
129
130	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
131	if (!err)
132		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
133					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
134	else
135		nlmsg_free(msg);
136}
137
138static struct psample_group *psample_group_create(struct net *net,
139						  u32 group_num)
140{
141	struct psample_group *group;
142
143	group = kzalloc(sizeof(*group), GFP_ATOMIC);
144	if (!group)
145		return NULL;
146
147	group->net = net;
148	group->group_num = group_num;
149	list_add_tail(&group->list, &psample_groups_list);
150
151	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
152	return group;
153}
154
155static void psample_group_destroy(struct psample_group *group)
156{
157	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
158	list_del(&group->list);
159	kfree_rcu(group, rcu);
160}
161
162static struct psample_group *
163psample_group_lookup(struct net *net, u32 group_num)
164{
165	struct psample_group *group;
166
167	list_for_each_entry(group, &psample_groups_list, list)
168		if ((group->group_num == group_num) && (group->net == net))
169			return group;
170	return NULL;
171}
172
173struct psample_group *psample_group_get(struct net *net, u32 group_num)
174{
175	struct psample_group *group;
176
177	spin_lock_bh(&psample_groups_lock);
178
179	group = psample_group_lookup(net, group_num);
180	if (!group) {
181		group = psample_group_create(net, group_num);
182		if (!group)
183			goto out;
184	}
185	group->refcount++;
186
187out:
188	spin_unlock_bh(&psample_groups_lock);
189	return group;
190}
191EXPORT_SYMBOL_GPL(psample_group_get);
192
193void psample_group_take(struct psample_group *group)
194{
195	spin_lock_bh(&psample_groups_lock);
196	group->refcount++;
197	spin_unlock_bh(&psample_groups_lock);
198}
199EXPORT_SYMBOL_GPL(psample_group_take);
200
201void psample_group_put(struct psample_group *group)
202{
203	spin_lock_bh(&psample_groups_lock);
204
205	if (--group->refcount == 0)
206		psample_group_destroy(group);
207
208	spin_unlock_bh(&psample_groups_lock);
209}
210EXPORT_SYMBOL_GPL(psample_group_put);
211
212#ifdef CONFIG_INET
213static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
214			      struct ip_tunnel_info *tun_info)
215{
216	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
217	const void *tun_opts = ip_tunnel_info_opts(tun_info);
218	const struct ip_tunnel_key *tun_key = &tun_info->key;
219	int tun_opts_len = tun_info->options_len;
220
221	if (tun_key->tun_flags & TUNNEL_KEY &&
222	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
223			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
224		return -EMSGSIZE;
225
226	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
227	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
228		return -EMSGSIZE;
229
230	switch (tun_proto) {
231	case AF_INET:
232		if (tun_key->u.ipv4.src &&
233		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
234				    tun_key->u.ipv4.src))
235			return -EMSGSIZE;
236		if (tun_key->u.ipv4.dst &&
237		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
238				    tun_key->u.ipv4.dst))
239			return -EMSGSIZE;
240		break;
241	case AF_INET6:
242		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
243		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
244				     &tun_key->u.ipv6.src))
245			return -EMSGSIZE;
246		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
247		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
248				     &tun_key->u.ipv6.dst))
249			return -EMSGSIZE;
250		break;
251	}
252	if (tun_key->tos &&
253	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
254		return -EMSGSIZE;
255	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
256		return -EMSGSIZE;
257	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
258	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
259		return -EMSGSIZE;
260	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
261	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
262		return -EMSGSIZE;
263	if (tun_key->tp_src &&
264	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
265		return -EMSGSIZE;
266	if (tun_key->tp_dst &&
267	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
268		return -EMSGSIZE;
269	if ((tun_key->tun_flags & TUNNEL_OAM) &&
270	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
271		return -EMSGSIZE;
272	if (tun_opts_len) {
273		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
274		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
275			    tun_opts_len, tun_opts))
276			return -EMSGSIZE;
277		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
278			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
279				 tun_opts_len, tun_opts))
280			return -EMSGSIZE;
281	}
282
283	return 0;
284}
285
286static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
287			    struct ip_tunnel_info *tun_info)
288{
289	struct nlattr *nla;
290	int err;
291
292	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
293	if (!nla)
294		return -EMSGSIZE;
295
296	err = __psample_ip_tun_to_nlattr(skb, tun_info);
297	if (err) {
298		nla_nest_cancel(skb, nla);
299		return err;
300	}
301
302	nla_nest_end(skb, nla);
303
304	return 0;
305}
306
307static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
308{
309	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
310	const struct ip_tunnel_key *tun_key = &tun_info->key;
311	int tun_opts_len = tun_info->options_len;
312	int sum = 0;
313
314	if (tun_key->tun_flags & TUNNEL_KEY)
315		sum += nla_total_size(sizeof(u64));
316
317	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
318		sum += nla_total_size(0);
319
320	switch (tun_proto) {
321	case AF_INET:
322		if (tun_key->u.ipv4.src)
323			sum += nla_total_size(sizeof(u32));
324		if (tun_key->u.ipv4.dst)
325			sum += nla_total_size(sizeof(u32));
326		break;
327	case AF_INET6:
328		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
329			sum += nla_total_size(sizeof(struct in6_addr));
330		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
331			sum += nla_total_size(sizeof(struct in6_addr));
332		break;
333	}
334	if (tun_key->tos)
335		sum += nla_total_size(sizeof(u8));
336	sum += nla_total_size(sizeof(u8));	/* TTL */
337	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
338		sum += nla_total_size(0);
339	if (tun_key->tun_flags & TUNNEL_CSUM)
340		sum += nla_total_size(0);
341	if (tun_key->tp_src)
342		sum += nla_total_size(sizeof(u16));
343	if (tun_key->tp_dst)
344		sum += nla_total_size(sizeof(u16));
345	if (tun_key->tun_flags & TUNNEL_OAM)
346		sum += nla_total_size(0);
347	if (tun_opts_len) {
348		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
349			sum += nla_total_size(tun_opts_len);
350		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
351			sum += nla_total_size(tun_opts_len);
352	}
353
354	return sum;
355}
356#endif
357
358void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
359			   u32 trunc_size, int in_ifindex, int out_ifindex,
360			   u32 sample_rate)
361{
362#ifdef CONFIG_INET
363	struct ip_tunnel_info *tun_info;
364#endif
365	struct sk_buff *nl_skb;
366	int data_len;
367	int meta_len;
368	void *data;
369	int ret;
370
371	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
372		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
373		   nla_total_size(sizeof(u32)) +	/* sample_rate */
374		   nla_total_size(sizeof(u32)) +	/* orig_size */
375		   nla_total_size(sizeof(u32)) +	/* group_num */
376		   nla_total_size(sizeof(u32));		/* seq */
377
378#ifdef CONFIG_INET
379	tun_info = skb_tunnel_info(skb);
380	if (tun_info)
381		meta_len += psample_tunnel_meta_len(tun_info);
382#endif
383
384	data_len = min(skb->len, trunc_size);
385	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
386		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
387			    - NLA_ALIGNTO;
388
389	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
390	if (unlikely(!nl_skb))
391		return;
392
393	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
394			   PSAMPLE_CMD_SAMPLE);
395	if (unlikely(!data))
396		goto error;
397
398	if (in_ifindex) {
399		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
400		if (unlikely(ret < 0))
401			goto error;
402	}
403
404	if (out_ifindex) {
405		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
406		if (unlikely(ret < 0))
407			goto error;
408	}
409
410	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
411	if (unlikely(ret < 0))
412		goto error;
413
414	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
415	if (unlikely(ret < 0))
416		goto error;
417
418	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
419	if (unlikely(ret < 0))
420		goto error;
421
422	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
423	if (unlikely(ret < 0))
424		goto error;
425
426	if (data_len) {
427		int nla_len = nla_total_size(data_len);
428		struct nlattr *nla;
429
430		nla = skb_put(nl_skb, nla_len);
431		nla->nla_type = PSAMPLE_ATTR_DATA;
432		nla->nla_len = nla_attr_size(data_len);
433
434		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
435			goto error;
436	}
437
438#ifdef CONFIG_INET
439	if (tun_info) {
440		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
441		if (unlikely(ret < 0))
442			goto error;
443	}
444#endif
445
446	genlmsg_end(nl_skb, data);
447	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
448				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
449
450	return;
451error:
452	pr_err_ratelimited("Could not create psample log message\n");
453	nlmsg_free(nl_skb);
454}
455EXPORT_SYMBOL_GPL(psample_sample_packet);
456
457static int __init psample_module_init(void)
458{
459	return genl_register_family(&psample_nl_family);
460}
461
462static void __exit psample_module_exit(void)
463{
464	genl_unregister_family(&psample_nl_family);
465}
466
467module_init(psample_module_init);
468module_exit(psample_module_exit);
469
470MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
471MODULE_DESCRIPTION("netlink channel for packet sampling");
472MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 11#include <net/net_namespace.h>
 12#include <net/sock.h>
 13#include <net/netlink.h>
 14#include <net/genetlink.h>
 15#include <net/psample.h>
 16#include <linux/spinlock.h>
 
 
 17
 18#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 19
 20static LIST_HEAD(psample_groups_list);
 21static DEFINE_SPINLOCK(psample_groups_lock);
 22
 23/* multicast groups */
 24enum psample_nl_multicast_groups {
 25	PSAMPLE_NL_MCGRP_CONFIG,
 26	PSAMPLE_NL_MCGRP_SAMPLE,
 27};
 28
 29static const struct genl_multicast_group psample_nl_mcgrps[] = {
 30	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 31	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 32};
 33
 34static struct genl_family psample_nl_family __ro_after_init;
 35
 36static int psample_group_nl_fill(struct sk_buff *msg,
 37				 struct psample_group *group,
 38				 enum psample_command cmd, u32 portid, u32 seq,
 39				 int flags)
 40{
 41	void *hdr;
 42	int ret;
 43
 44	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 45	if (!hdr)
 46		return -EMSGSIZE;
 47
 48	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 49	if (ret < 0)
 50		goto error;
 51
 52	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 53	if (ret < 0)
 54		goto error;
 55
 56	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 57	if (ret < 0)
 58		goto error;
 59
 60	genlmsg_end(msg, hdr);
 61	return 0;
 62
 63error:
 64	genlmsg_cancel(msg, hdr);
 65	return -EMSGSIZE;
 66}
 67
 68static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 69					   struct netlink_callback *cb)
 70{
 71	struct psample_group *group;
 72	int start = cb->args[0];
 73	int idx = 0;
 74	int err;
 75
 76	spin_lock_bh(&psample_groups_lock);
 77	list_for_each_entry(group, &psample_groups_list, list) {
 78		if (!net_eq(group->net, sock_net(msg->sk)))
 79			continue;
 80		if (idx < start) {
 81			idx++;
 82			continue;
 83		}
 84		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 85					    NETLINK_CB(cb->skb).portid,
 86					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 87		if (err)
 88			break;
 89		idx++;
 90	}
 91
 92	spin_unlock_bh(&psample_groups_lock);
 93	cb->args[0] = idx;
 94	return msg->len;
 95}
 96
 97static const struct genl_ops psample_nl_ops[] = {
 98	{
 99		.cmd = PSAMPLE_CMD_GET_GROUP,
100		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
101		.dumpit = psample_nl_cmd_get_group_dumpit,
102		/* can be retrieved by unprivileged users */
103	}
104};
105
106static struct genl_family psample_nl_family __ro_after_init = {
107	.name		= PSAMPLE_GENL_NAME,
108	.version	= PSAMPLE_GENL_VERSION,
109	.maxattr	= PSAMPLE_ATTR_MAX,
110	.netnsok	= true,
111	.module		= THIS_MODULE,
112	.mcgrps		= psample_nl_mcgrps,
113	.ops		= psample_nl_ops,
114	.n_ops		= ARRAY_SIZE(psample_nl_ops),
115	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
116};
117
118static void psample_group_notify(struct psample_group *group,
119				 enum psample_command cmd)
120{
121	struct sk_buff *msg;
122	int err;
123
124	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
125	if (!msg)
126		return;
127
128	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
129	if (!err)
130		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
131					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
132	else
133		nlmsg_free(msg);
134}
135
136static struct psample_group *psample_group_create(struct net *net,
137						  u32 group_num)
138{
139	struct psample_group *group;
140
141	group = kzalloc(sizeof(*group), GFP_ATOMIC);
142	if (!group)
143		return NULL;
144
145	group->net = net;
146	group->group_num = group_num;
147	list_add_tail(&group->list, &psample_groups_list);
148
149	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
150	return group;
151}
152
153static void psample_group_destroy(struct psample_group *group)
154{
155	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
156	list_del(&group->list);
157	kfree_rcu(group, rcu);
158}
159
160static struct psample_group *
161psample_group_lookup(struct net *net, u32 group_num)
162{
163	struct psample_group *group;
164
165	list_for_each_entry(group, &psample_groups_list, list)
166		if ((group->group_num == group_num) && (group->net == net))
167			return group;
168	return NULL;
169}
170
171struct psample_group *psample_group_get(struct net *net, u32 group_num)
172{
173	struct psample_group *group;
174
175	spin_lock_bh(&psample_groups_lock);
176
177	group = psample_group_lookup(net, group_num);
178	if (!group) {
179		group = psample_group_create(net, group_num);
180		if (!group)
181			goto out;
182	}
183	group->refcount++;
184
185out:
186	spin_unlock_bh(&psample_groups_lock);
187	return group;
188}
189EXPORT_SYMBOL_GPL(psample_group_get);
190
191void psample_group_take(struct psample_group *group)
192{
193	spin_lock_bh(&psample_groups_lock);
194	group->refcount++;
195	spin_unlock_bh(&psample_groups_lock);
196}
197EXPORT_SYMBOL_GPL(psample_group_take);
198
199void psample_group_put(struct psample_group *group)
200{
201	spin_lock_bh(&psample_groups_lock);
202
203	if (--group->refcount == 0)
204		psample_group_destroy(group);
205
206	spin_unlock_bh(&psample_groups_lock);
207}
208EXPORT_SYMBOL_GPL(psample_group_put);
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
211			   u32 trunc_size, int in_ifindex, int out_ifindex,
212			   u32 sample_rate)
213{
 
 
 
214	struct sk_buff *nl_skb;
215	int data_len;
216	int meta_len;
217	void *data;
218	int ret;
219
220	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
221		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
222		   nla_total_size(sizeof(u32)) +	/* sample_rate */
223		   nla_total_size(sizeof(u32)) +	/* orig_size */
224		   nla_total_size(sizeof(u32)) +	/* group_num */
225		   nla_total_size(sizeof(u32));		/* seq */
226
 
 
 
 
 
 
227	data_len = min(skb->len, trunc_size);
228	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
229		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
230			    - NLA_ALIGNTO;
231
232	nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
233	if (unlikely(!nl_skb))
234		return;
235
236	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
237			   PSAMPLE_CMD_SAMPLE);
238	if (unlikely(!data))
239		goto error;
240
241	if (in_ifindex) {
242		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
243		if (unlikely(ret < 0))
244			goto error;
245	}
246
247	if (out_ifindex) {
248		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
249		if (unlikely(ret < 0))
250			goto error;
251	}
252
253	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
254	if (unlikely(ret < 0))
255		goto error;
256
257	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
258	if (unlikely(ret < 0))
259		goto error;
260
261	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
262	if (unlikely(ret < 0))
263		goto error;
264
265	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
266	if (unlikely(ret < 0))
267		goto error;
268
269	if (data_len) {
270		int nla_len = nla_total_size(data_len);
271		struct nlattr *nla;
272
273		nla = skb_put(nl_skb, nla_len);
274		nla->nla_type = PSAMPLE_ATTR_DATA;
275		nla->nla_len = nla_attr_size(data_len);
276
277		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
278			goto error;
279	}
 
 
 
 
 
 
 
 
280
281	genlmsg_end(nl_skb, data);
282	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
283				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
284
285	return;
286error:
287	pr_err_ratelimited("Could not create psample log message\n");
288	nlmsg_free(nl_skb);
289}
290EXPORT_SYMBOL_GPL(psample_sample_packet);
291
292static int __init psample_module_init(void)
293{
294	return genl_register_family(&psample_nl_family);
295}
296
297static void __exit psample_module_exit(void)
298{
299	genl_unregister_family(&psample_nl_family);
300}
301
302module_init(psample_module_init);
303module_exit(psample_module_exit);
304
305MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
306MODULE_DESCRIPTION("netlink channel for packet sampling");
307MODULE_LICENSE("GPL v2");