Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 
 11#include <net/net_namespace.h>
 12#include <net/sock.h>
 13#include <net/netlink.h>
 14#include <net/genetlink.h>
 15#include <net/psample.h>
 16#include <linux/spinlock.h>
 17#include <net/ip_tunnels.h>
 18#include <net/dst_metadata.h>
 19
 20#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 21
 22static LIST_HEAD(psample_groups_list);
 23static DEFINE_SPINLOCK(psample_groups_lock);
 24
 25/* multicast groups */
 26enum psample_nl_multicast_groups {
 27	PSAMPLE_NL_MCGRP_CONFIG,
 28	PSAMPLE_NL_MCGRP_SAMPLE,
 29};
 30
 31static const struct genl_multicast_group psample_nl_mcgrps[] = {
 32	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 33	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 34};
 35
 36static struct genl_family psample_nl_family __ro_after_init;
 37
 38static int psample_group_nl_fill(struct sk_buff *msg,
 39				 struct psample_group *group,
 40				 enum psample_command cmd, u32 portid, u32 seq,
 41				 int flags)
 42{
 43	void *hdr;
 44	int ret;
 45
 46	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 47	if (!hdr)
 48		return -EMSGSIZE;
 49
 50	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 51	if (ret < 0)
 52		goto error;
 53
 54	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 55	if (ret < 0)
 56		goto error;
 57
 58	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 59	if (ret < 0)
 60		goto error;
 61
 62	genlmsg_end(msg, hdr);
 63	return 0;
 64
 65error:
 66	genlmsg_cancel(msg, hdr);
 67	return -EMSGSIZE;
 68}
 69
 70static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 71					   struct netlink_callback *cb)
 72{
 73	struct psample_group *group;
 74	int start = cb->args[0];
 75	int idx = 0;
 76	int err;
 77
 78	spin_lock_bh(&psample_groups_lock);
 79	list_for_each_entry(group, &psample_groups_list, list) {
 80		if (!net_eq(group->net, sock_net(msg->sk)))
 81			continue;
 82		if (idx < start) {
 83			idx++;
 84			continue;
 85		}
 86		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 87					    NETLINK_CB(cb->skb).portid,
 88					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 89		if (err)
 90			break;
 91		idx++;
 92	}
 93
 94	spin_unlock_bh(&psample_groups_lock);
 95	cb->args[0] = idx;
 96	return msg->len;
 97}
 98
 99static const struct genl_ops psample_nl_ops[] = {
100	{
101		.cmd = PSAMPLE_CMD_GET_GROUP,
102		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
103		.dumpit = psample_nl_cmd_get_group_dumpit,
104		/* can be retrieved by unprivileged users */
105	}
106};
107
108static struct genl_family psample_nl_family __ro_after_init = {
109	.name		= PSAMPLE_GENL_NAME,
110	.version	= PSAMPLE_GENL_VERSION,
111	.maxattr	= PSAMPLE_ATTR_MAX,
112	.netnsok	= true,
113	.module		= THIS_MODULE,
114	.mcgrps		= psample_nl_mcgrps,
115	.ops		= psample_nl_ops,
116	.n_ops		= ARRAY_SIZE(psample_nl_ops),
117	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
118};
119
120static void psample_group_notify(struct psample_group *group,
121				 enum psample_command cmd)
122{
123	struct sk_buff *msg;
124	int err;
125
126	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
127	if (!msg)
128		return;
129
130	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
131	if (!err)
132		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
133					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
134	else
135		nlmsg_free(msg);
136}
137
138static struct psample_group *psample_group_create(struct net *net,
139						  u32 group_num)
140{
141	struct psample_group *group;
142
143	group = kzalloc(sizeof(*group), GFP_ATOMIC);
144	if (!group)
145		return NULL;
146
147	group->net = net;
148	group->group_num = group_num;
149	list_add_tail(&group->list, &psample_groups_list);
150
151	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
152	return group;
153}
154
155static void psample_group_destroy(struct psample_group *group)
156{
157	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
158	list_del(&group->list);
159	kfree_rcu(group, rcu);
160}
161
162static struct psample_group *
163psample_group_lookup(struct net *net, u32 group_num)
164{
165	struct psample_group *group;
166
167	list_for_each_entry(group, &psample_groups_list, list)
168		if ((group->group_num == group_num) && (group->net == net))
169			return group;
170	return NULL;
171}
172
173struct psample_group *psample_group_get(struct net *net, u32 group_num)
174{
175	struct psample_group *group;
176
177	spin_lock_bh(&psample_groups_lock);
178
179	group = psample_group_lookup(net, group_num);
180	if (!group) {
181		group = psample_group_create(net, group_num);
182		if (!group)
183			goto out;
184	}
185	group->refcount++;
186
187out:
188	spin_unlock_bh(&psample_groups_lock);
189	return group;
190}
191EXPORT_SYMBOL_GPL(psample_group_get);
192
193void psample_group_take(struct psample_group *group)
194{
195	spin_lock_bh(&psample_groups_lock);
196	group->refcount++;
197	spin_unlock_bh(&psample_groups_lock);
198}
199EXPORT_SYMBOL_GPL(psample_group_take);
200
201void psample_group_put(struct psample_group *group)
202{
203	spin_lock_bh(&psample_groups_lock);
204
205	if (--group->refcount == 0)
206		psample_group_destroy(group);
207
208	spin_unlock_bh(&psample_groups_lock);
209}
210EXPORT_SYMBOL_GPL(psample_group_put);
211
212#ifdef CONFIG_INET
213static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
214			      struct ip_tunnel_info *tun_info)
215{
216	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
217	const void *tun_opts = ip_tunnel_info_opts(tun_info);
218	const struct ip_tunnel_key *tun_key = &tun_info->key;
219	int tun_opts_len = tun_info->options_len;
220
221	if (tun_key->tun_flags & TUNNEL_KEY &&
222	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
223			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
224		return -EMSGSIZE;
225
226	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
227	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
228		return -EMSGSIZE;
229
230	switch (tun_proto) {
231	case AF_INET:
232		if (tun_key->u.ipv4.src &&
233		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
234				    tun_key->u.ipv4.src))
235			return -EMSGSIZE;
236		if (tun_key->u.ipv4.dst &&
237		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
238				    tun_key->u.ipv4.dst))
239			return -EMSGSIZE;
240		break;
241	case AF_INET6:
242		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
243		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
244				     &tun_key->u.ipv6.src))
245			return -EMSGSIZE;
246		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
247		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
248				     &tun_key->u.ipv6.dst))
249			return -EMSGSIZE;
250		break;
251	}
252	if (tun_key->tos &&
253	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
254		return -EMSGSIZE;
255	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
256		return -EMSGSIZE;
257	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
258	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
259		return -EMSGSIZE;
260	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
261	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
262		return -EMSGSIZE;
263	if (tun_key->tp_src &&
264	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
265		return -EMSGSIZE;
266	if (tun_key->tp_dst &&
267	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
268		return -EMSGSIZE;
269	if ((tun_key->tun_flags & TUNNEL_OAM) &&
270	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
271		return -EMSGSIZE;
272	if (tun_opts_len) {
273		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
274		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
275			    tun_opts_len, tun_opts))
276			return -EMSGSIZE;
277		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
278			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
279				 tun_opts_len, tun_opts))
280			return -EMSGSIZE;
281	}
282
283	return 0;
284}
285
286static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
287			    struct ip_tunnel_info *tun_info)
288{
289	struct nlattr *nla;
290	int err;
291
292	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
293	if (!nla)
294		return -EMSGSIZE;
295
296	err = __psample_ip_tun_to_nlattr(skb, tun_info);
297	if (err) {
298		nla_nest_cancel(skb, nla);
299		return err;
300	}
301
302	nla_nest_end(skb, nla);
303
304	return 0;
305}
306
307static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
308{
309	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
310	const struct ip_tunnel_key *tun_key = &tun_info->key;
311	int tun_opts_len = tun_info->options_len;
312	int sum = 0;
313
314	if (tun_key->tun_flags & TUNNEL_KEY)
315		sum += nla_total_size(sizeof(u64));
316
317	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
318		sum += nla_total_size(0);
319
320	switch (tun_proto) {
321	case AF_INET:
322		if (tun_key->u.ipv4.src)
323			sum += nla_total_size(sizeof(u32));
324		if (tun_key->u.ipv4.dst)
325			sum += nla_total_size(sizeof(u32));
326		break;
327	case AF_INET6:
328		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
329			sum += nla_total_size(sizeof(struct in6_addr));
330		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
331			sum += nla_total_size(sizeof(struct in6_addr));
332		break;
333	}
334	if (tun_key->tos)
335		sum += nla_total_size(sizeof(u8));
336	sum += nla_total_size(sizeof(u8));	/* TTL */
337	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
338		sum += nla_total_size(0);
339	if (tun_key->tun_flags & TUNNEL_CSUM)
340		sum += nla_total_size(0);
341	if (tun_key->tp_src)
342		sum += nla_total_size(sizeof(u16));
343	if (tun_key->tp_dst)
344		sum += nla_total_size(sizeof(u16));
345	if (tun_key->tun_flags & TUNNEL_OAM)
346		sum += nla_total_size(0);
347	if (tun_opts_len) {
348		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
349			sum += nla_total_size(tun_opts_len);
350		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
351			sum += nla_total_size(tun_opts_len);
352	}
353
354	return sum;
355}
356#endif
357
358void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
359			   u32 trunc_size, int in_ifindex, int out_ifindex,
360			   u32 sample_rate)
361{
 
 
 
 
362#ifdef CONFIG_INET
363	struct ip_tunnel_info *tun_info;
364#endif
365	struct sk_buff *nl_skb;
366	int data_len;
367	int meta_len;
368	void *data;
369	int ret;
370
371	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
372		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
 
 
 
373		   nla_total_size(sizeof(u32)) +	/* sample_rate */
374		   nla_total_size(sizeof(u32)) +	/* orig_size */
375		   nla_total_size(sizeof(u32)) +	/* group_num */
376		   nla_total_size(sizeof(u32));		/* seq */
 
 
377
378#ifdef CONFIG_INET
379	tun_info = skb_tunnel_info(skb);
380	if (tun_info)
381		meta_len += psample_tunnel_meta_len(tun_info);
382#endif
383
384	data_len = min(skb->len, trunc_size);
385	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
386		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
387			    - NLA_ALIGNTO;
388
389	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
390	if (unlikely(!nl_skb))
391		return;
392
393	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
394			   PSAMPLE_CMD_SAMPLE);
395	if (unlikely(!data))
396		goto error;
397
398	if (in_ifindex) {
399		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
400		if (unlikely(ret < 0))
401			goto error;
402	}
403
404	if (out_ifindex) {
405		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
406		if (unlikely(ret < 0))
407			goto error;
408	}
409
410	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
411	if (unlikely(ret < 0))
412		goto error;
413
414	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
415	if (unlikely(ret < 0))
416		goto error;
417
418	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
419	if (unlikely(ret < 0))
420		goto error;
421
422	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
423	if (unlikely(ret < 0))
424		goto error;
425
426	if (data_len) {
427		int nla_len = nla_total_size(data_len);
428		struct nlattr *nla;
429
430		nla = skb_put(nl_skb, nla_len);
431		nla->nla_type = PSAMPLE_ATTR_DATA;
432		nla->nla_len = nla_attr_size(data_len);
433
434		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
435			goto error;
436	}
437
438#ifdef CONFIG_INET
439	if (tun_info) {
440		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
441		if (unlikely(ret < 0))
442			goto error;
443	}
444#endif
445
446	genlmsg_end(nl_skb, data);
447	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
448				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
449
450	return;
451error:
452	pr_err_ratelimited("Could not create psample log message\n");
453	nlmsg_free(nl_skb);
454}
455EXPORT_SYMBOL_GPL(psample_sample_packet);
456
457static int __init psample_module_init(void)
458{
459	return genl_register_family(&psample_nl_family);
460}
461
462static void __exit psample_module_exit(void)
463{
464	genl_unregister_family(&psample_nl_family);
465}
466
467module_init(psample_module_init);
468module_exit(psample_module_exit);
469
470MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
471MODULE_DESCRIPTION("netlink channel for packet sampling");
472MODULE_LICENSE("GPL v2");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/psample/psample.c - Netlink channel for packet sampling
  4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/kernel.h>
  9#include <linux/skbuff.h>
 10#include <linux/module.h>
 11#include <linux/timekeeping.h>
 12#include <net/net_namespace.h>
 13#include <net/sock.h>
 14#include <net/netlink.h>
 15#include <net/genetlink.h>
 16#include <net/psample.h>
 17#include <linux/spinlock.h>
 18#include <net/ip_tunnels.h>
 19#include <net/dst_metadata.h>
 20
 21#define PSAMPLE_MAX_PACKET_SIZE 0xffff
 22
 23static LIST_HEAD(psample_groups_list);
 24static DEFINE_SPINLOCK(psample_groups_lock);
 25
 26/* multicast groups */
 27enum psample_nl_multicast_groups {
 28	PSAMPLE_NL_MCGRP_CONFIG,
 29	PSAMPLE_NL_MCGRP_SAMPLE,
 30};
 31
 32static const struct genl_multicast_group psample_nl_mcgrps[] = {
 33	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
 34	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
 35};
 36
 37static struct genl_family psample_nl_family __ro_after_init;
 38
 39static int psample_group_nl_fill(struct sk_buff *msg,
 40				 struct psample_group *group,
 41				 enum psample_command cmd, u32 portid, u32 seq,
 42				 int flags)
 43{
 44	void *hdr;
 45	int ret;
 46
 47	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
 48	if (!hdr)
 49		return -EMSGSIZE;
 50
 51	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
 52	if (ret < 0)
 53		goto error;
 54
 55	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
 56	if (ret < 0)
 57		goto error;
 58
 59	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
 60	if (ret < 0)
 61		goto error;
 62
 63	genlmsg_end(msg, hdr);
 64	return 0;
 65
 66error:
 67	genlmsg_cancel(msg, hdr);
 68	return -EMSGSIZE;
 69}
 70
 71static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
 72					   struct netlink_callback *cb)
 73{
 74	struct psample_group *group;
 75	int start = cb->args[0];
 76	int idx = 0;
 77	int err;
 78
 79	spin_lock_bh(&psample_groups_lock);
 80	list_for_each_entry(group, &psample_groups_list, list) {
 81		if (!net_eq(group->net, sock_net(msg->sk)))
 82			continue;
 83		if (idx < start) {
 84			idx++;
 85			continue;
 86		}
 87		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
 88					    NETLINK_CB(cb->skb).portid,
 89					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
 90		if (err)
 91			break;
 92		idx++;
 93	}
 94
 95	spin_unlock_bh(&psample_groups_lock);
 96	cb->args[0] = idx;
 97	return msg->len;
 98}
 99
100static const struct genl_small_ops psample_nl_ops[] = {
101	{
102		.cmd = PSAMPLE_CMD_GET_GROUP,
103		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
104		.dumpit = psample_nl_cmd_get_group_dumpit,
105		/* can be retrieved by unprivileged users */
106	}
107};
108
109static struct genl_family psample_nl_family __ro_after_init = {
110	.name		= PSAMPLE_GENL_NAME,
111	.version	= PSAMPLE_GENL_VERSION,
112	.maxattr	= PSAMPLE_ATTR_MAX,
113	.netnsok	= true,
114	.module		= THIS_MODULE,
115	.mcgrps		= psample_nl_mcgrps,
116	.small_ops	= psample_nl_ops,
117	.n_small_ops	= ARRAY_SIZE(psample_nl_ops),
118	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
119};
120
121static void psample_group_notify(struct psample_group *group,
122				 enum psample_command cmd)
123{
124	struct sk_buff *msg;
125	int err;
126
127	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
128	if (!msg)
129		return;
130
131	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
132	if (!err)
133		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
134					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
135	else
136		nlmsg_free(msg);
137}
138
139static struct psample_group *psample_group_create(struct net *net,
140						  u32 group_num)
141{
142	struct psample_group *group;
143
144	group = kzalloc(sizeof(*group), GFP_ATOMIC);
145	if (!group)
146		return NULL;
147
148	group->net = net;
149	group->group_num = group_num;
150	list_add_tail(&group->list, &psample_groups_list);
151
152	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
153	return group;
154}
155
156static void psample_group_destroy(struct psample_group *group)
157{
158	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
159	list_del(&group->list);
160	kfree_rcu(group, rcu);
161}
162
163static struct psample_group *
164psample_group_lookup(struct net *net, u32 group_num)
165{
166	struct psample_group *group;
167
168	list_for_each_entry(group, &psample_groups_list, list)
169		if ((group->group_num == group_num) && (group->net == net))
170			return group;
171	return NULL;
172}
173
174struct psample_group *psample_group_get(struct net *net, u32 group_num)
175{
176	struct psample_group *group;
177
178	spin_lock_bh(&psample_groups_lock);
179
180	group = psample_group_lookup(net, group_num);
181	if (!group) {
182		group = psample_group_create(net, group_num);
183		if (!group)
184			goto out;
185	}
186	group->refcount++;
187
188out:
189	spin_unlock_bh(&psample_groups_lock);
190	return group;
191}
192EXPORT_SYMBOL_GPL(psample_group_get);
193
194void psample_group_take(struct psample_group *group)
195{
196	spin_lock_bh(&psample_groups_lock);
197	group->refcount++;
198	spin_unlock_bh(&psample_groups_lock);
199}
200EXPORT_SYMBOL_GPL(psample_group_take);
201
202void psample_group_put(struct psample_group *group)
203{
204	spin_lock_bh(&psample_groups_lock);
205
206	if (--group->refcount == 0)
207		psample_group_destroy(group);
208
209	spin_unlock_bh(&psample_groups_lock);
210}
211EXPORT_SYMBOL_GPL(psample_group_put);
212
213#ifdef CONFIG_INET
214static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
215			      struct ip_tunnel_info *tun_info)
216{
217	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
218	const void *tun_opts = ip_tunnel_info_opts(tun_info);
219	const struct ip_tunnel_key *tun_key = &tun_info->key;
220	int tun_opts_len = tun_info->options_len;
221
222	if (tun_key->tun_flags & TUNNEL_KEY &&
223	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
224			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
225		return -EMSGSIZE;
226
227	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
228	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
229		return -EMSGSIZE;
230
231	switch (tun_proto) {
232	case AF_INET:
233		if (tun_key->u.ipv4.src &&
234		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
235				    tun_key->u.ipv4.src))
236			return -EMSGSIZE;
237		if (tun_key->u.ipv4.dst &&
238		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
239				    tun_key->u.ipv4.dst))
240			return -EMSGSIZE;
241		break;
242	case AF_INET6:
243		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
244		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
245				     &tun_key->u.ipv6.src))
246			return -EMSGSIZE;
247		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
248		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
249				     &tun_key->u.ipv6.dst))
250			return -EMSGSIZE;
251		break;
252	}
253	if (tun_key->tos &&
254	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
255		return -EMSGSIZE;
256	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
257		return -EMSGSIZE;
258	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
259	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
260		return -EMSGSIZE;
261	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
262	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
263		return -EMSGSIZE;
264	if (tun_key->tp_src &&
265	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
266		return -EMSGSIZE;
267	if (tun_key->tp_dst &&
268	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
269		return -EMSGSIZE;
270	if ((tun_key->tun_flags & TUNNEL_OAM) &&
271	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
272		return -EMSGSIZE;
273	if (tun_opts_len) {
274		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
275		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
276			    tun_opts_len, tun_opts))
277			return -EMSGSIZE;
278		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
279			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
280				 tun_opts_len, tun_opts))
281			return -EMSGSIZE;
282	}
283
284	return 0;
285}
286
287static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
288			    struct ip_tunnel_info *tun_info)
289{
290	struct nlattr *nla;
291	int err;
292
293	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
294	if (!nla)
295		return -EMSGSIZE;
296
297	err = __psample_ip_tun_to_nlattr(skb, tun_info);
298	if (err) {
299		nla_nest_cancel(skb, nla);
300		return err;
301	}
302
303	nla_nest_end(skb, nla);
304
305	return 0;
306}
307
308static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
309{
310	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
311	const struct ip_tunnel_key *tun_key = &tun_info->key;
312	int tun_opts_len = tun_info->options_len;
313	int sum = nla_total_size(0);	/* PSAMPLE_ATTR_TUNNEL */
314
315	if (tun_key->tun_flags & TUNNEL_KEY)
316		sum += nla_total_size_64bit(sizeof(u64));
317
318	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
319		sum += nla_total_size(0);
320
321	switch (tun_proto) {
322	case AF_INET:
323		if (tun_key->u.ipv4.src)
324			sum += nla_total_size(sizeof(u32));
325		if (tun_key->u.ipv4.dst)
326			sum += nla_total_size(sizeof(u32));
327		break;
328	case AF_INET6:
329		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
330			sum += nla_total_size(sizeof(struct in6_addr));
331		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
332			sum += nla_total_size(sizeof(struct in6_addr));
333		break;
334	}
335	if (tun_key->tos)
336		sum += nla_total_size(sizeof(u8));
337	sum += nla_total_size(sizeof(u8));	/* TTL */
338	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
339		sum += nla_total_size(0);
340	if (tun_key->tun_flags & TUNNEL_CSUM)
341		sum += nla_total_size(0);
342	if (tun_key->tp_src)
343		sum += nla_total_size(sizeof(u16));
344	if (tun_key->tp_dst)
345		sum += nla_total_size(sizeof(u16));
346	if (tun_key->tun_flags & TUNNEL_OAM)
347		sum += nla_total_size(0);
348	if (tun_opts_len) {
349		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
350			sum += nla_total_size(tun_opts_len);
351		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
352			sum += nla_total_size(tun_opts_len);
353	}
354
355	return sum;
356}
357#endif
358
359void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
360			   u32 sample_rate, const struct psample_metadata *md)
 
361{
362	ktime_t tstamp = ktime_get_real();
363	int out_ifindex = md->out_ifindex;
364	int in_ifindex = md->in_ifindex;
365	u32 trunc_size = md->trunc_size;
366#ifdef CONFIG_INET
367	struct ip_tunnel_info *tun_info;
368#endif
369	struct sk_buff *nl_skb;
370	int data_len;
371	int meta_len;
372	void *data;
373	int ret;
374
375	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
376		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
377		   (md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
378		   (md->out_tc_occ_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
379		   (md->latency_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
380		   nla_total_size(sizeof(u32)) +	/* sample_rate */
381		   nla_total_size(sizeof(u32)) +	/* orig_size */
382		   nla_total_size(sizeof(u32)) +	/* group_num */
383		   nla_total_size(sizeof(u32)) +	/* seq */
384		   nla_total_size_64bit(sizeof(u64)) +	/* timestamp */
385		   nla_total_size(sizeof(u16));		/* protocol */
386
387#ifdef CONFIG_INET
388	tun_info = skb_tunnel_info(skb);
389	if (tun_info)
390		meta_len += psample_tunnel_meta_len(tun_info);
391#endif
392
393	data_len = min(skb->len, trunc_size);
394	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
395		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
396			    - NLA_ALIGNTO;
397
398	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
399	if (unlikely(!nl_skb))
400		return;
401
402	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
403			   PSAMPLE_CMD_SAMPLE);
404	if (unlikely(!data))
405		goto error;
406
407	if (in_ifindex) {
408		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
409		if (unlikely(ret < 0))
410			goto error;
411	}
412
413	if (out_ifindex) {
414		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
415		if (unlikely(ret < 0))
416			goto error;
417	}
418
419	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
420	if (unlikely(ret < 0))
421		goto error;
422
423	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
424	if (unlikely(ret < 0))
425		goto error;
426
427	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
428	if (unlikely(ret < 0))
429		goto error;
430
431	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
432	if (unlikely(ret < 0))
433		goto error;
434
435	if (md->out_tc_valid) {
436		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OUT_TC, md->out_tc);
437		if (unlikely(ret < 0))
438			goto error;
439	}
440
441	if (md->out_tc_occ_valid) {
442		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_OUT_TC_OCC,
443					md->out_tc_occ, PSAMPLE_ATTR_PAD);
444		if (unlikely(ret < 0))
445			goto error;
446	}
447
448	if (md->latency_valid) {
449		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_LATENCY,
450					md->latency, PSAMPLE_ATTR_PAD);
451		if (unlikely(ret < 0))
452			goto error;
453	}
454
455	ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_TIMESTAMP,
456				ktime_to_ns(tstamp), PSAMPLE_ATTR_PAD);
457	if (unlikely(ret < 0))
458		goto error;
459
460	ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_PROTO,
461			  be16_to_cpu(skb->protocol));
462	if (unlikely(ret < 0))
463		goto error;
464
465	if (data_len) {
466		int nla_len = nla_total_size(data_len);
467		struct nlattr *nla;
468
469		nla = skb_put(nl_skb, nla_len);
470		nla->nla_type = PSAMPLE_ATTR_DATA;
471		nla->nla_len = nla_attr_size(data_len);
472
473		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
474			goto error;
475	}
476
477#ifdef CONFIG_INET
478	if (tun_info) {
479		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
480		if (unlikely(ret < 0))
481			goto error;
482	}
483#endif
484
485	genlmsg_end(nl_skb, data);
486	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
487				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
488
489	return;
490error:
491	pr_err_ratelimited("Could not create psample log message\n");
492	nlmsg_free(nl_skb);
493}
494EXPORT_SYMBOL_GPL(psample_sample_packet);
495
496static int __init psample_module_init(void)
497{
498	return genl_register_family(&psample_nl_family);
499}
500
501static void __exit psample_module_exit(void)
502{
503	genl_unregister_family(&psample_nl_family);
504}
505
506module_init(psample_module_init);
507module_exit(psample_module_exit);
508
509MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
510MODULE_DESCRIPTION("netlink channel for packet sampling");
511MODULE_LICENSE("GPL v2");