Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/kernel.h>
  3#include <linux/slab.h>
  4#include <net/act_api.h>
  5#include <net/flow_offload.h>
  6#include <linux/rtnetlink.h>
  7#include <linux/mutex.h>
  8#include <linux/rhashtable.h>
  9
 10struct flow_rule *flow_rule_alloc(unsigned int num_actions)
 11{
 12	struct flow_rule *rule;
 13	int i;
 14
 15	rule = kzalloc(struct_size(rule, action.entries, num_actions),
 16		       GFP_KERNEL);
 17	if (!rule)
 18		return NULL;
 19
 20	rule->action.num_entries = num_actions;
 21	/* Pre-fill each action hw_stats with DONT_CARE.
 22	 * Caller can override this if it wants stats for a given action.
 23	 */
 24	for (i = 0; i < num_actions; i++)
 25		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
 26
 27	return rule;
 28}
 29EXPORT_SYMBOL(flow_rule_alloc);
 30
 31struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
 32{
 33	struct flow_offload_action *fl_action;
 34	int i;
 35
 36	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
 37			    GFP_KERNEL);
 38	if (!fl_action)
 39		return NULL;
 40
 41	fl_action->action.num_entries = num_actions;
 42	/* Pre-fill each action hw_stats with DONT_CARE.
 43	 * Caller can override this if it wants stats for a given action.
 44	 */
 45	for (i = 0; i < num_actions; i++)
 46		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
 47
 48	return fl_action;
 49}
 50
 51#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
 52	const struct flow_match *__m = &(__rule)->match;			\
 53	struct flow_dissector *__d = (__m)->dissector;				\
 54										\
 55	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
 56	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
 57
 58void flow_rule_match_meta(const struct flow_rule *rule,
 59			  struct flow_match_meta *out)
 60{
 61	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
 62}
 63EXPORT_SYMBOL(flow_rule_match_meta);
 64
 65void flow_rule_match_basic(const struct flow_rule *rule,
 66			   struct flow_match_basic *out)
 67{
 68	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
 69}
 70EXPORT_SYMBOL(flow_rule_match_basic);
 71
 72void flow_rule_match_control(const struct flow_rule *rule,
 73			     struct flow_match_control *out)
 74{
 75	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
 76}
 77EXPORT_SYMBOL(flow_rule_match_control);
 78
 79void flow_rule_match_eth_addrs(const struct flow_rule *rule,
 80			       struct flow_match_eth_addrs *out)
 81{
 82	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
 83}
 84EXPORT_SYMBOL(flow_rule_match_eth_addrs);
 85
 86void flow_rule_match_vlan(const struct flow_rule *rule,
 87			  struct flow_match_vlan *out)
 88{
 89	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
 90}
 91EXPORT_SYMBOL(flow_rule_match_vlan);
 92
 93void flow_rule_match_cvlan(const struct flow_rule *rule,
 94			   struct flow_match_vlan *out)
 95{
 96	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
 97}
 98EXPORT_SYMBOL(flow_rule_match_cvlan);
 99
100void flow_rule_match_arp(const struct flow_rule *rule,
101			 struct flow_match_arp *out)
102{
103	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
104}
105EXPORT_SYMBOL(flow_rule_match_arp);
106
107void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
108				struct flow_match_ipv4_addrs *out)
109{
110	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
111}
112EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
113
114void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
115				struct flow_match_ipv6_addrs *out)
116{
117	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
118}
119EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
120
121void flow_rule_match_ip(const struct flow_rule *rule,
122			struct flow_match_ip *out)
123{
124	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
125}
126EXPORT_SYMBOL(flow_rule_match_ip);
127
128void flow_rule_match_ports(const struct flow_rule *rule,
129			   struct flow_match_ports *out)
130{
131	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
132}
133EXPORT_SYMBOL(flow_rule_match_ports);
134
135void flow_rule_match_ports_range(const struct flow_rule *rule,
136				 struct flow_match_ports_range *out)
137{
138	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
139}
140EXPORT_SYMBOL(flow_rule_match_ports_range);
141
142void flow_rule_match_tcp(const struct flow_rule *rule,
143			 struct flow_match_tcp *out)
144{
145	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
146}
147EXPORT_SYMBOL(flow_rule_match_tcp);
148
149void flow_rule_match_icmp(const struct flow_rule *rule,
150			  struct flow_match_icmp *out)
151{
152	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
153}
154EXPORT_SYMBOL(flow_rule_match_icmp);
155
156void flow_rule_match_mpls(const struct flow_rule *rule,
157			  struct flow_match_mpls *out)
158{
159	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
160}
161EXPORT_SYMBOL(flow_rule_match_mpls);
162
163void flow_rule_match_enc_control(const struct flow_rule *rule,
164				 struct flow_match_control *out)
165{
166	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
167}
168EXPORT_SYMBOL(flow_rule_match_enc_control);
169
170void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
171				    struct flow_match_ipv4_addrs *out)
172{
173	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
174}
175EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
176
177void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
178				    struct flow_match_ipv6_addrs *out)
179{
180	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
181}
182EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
183
184void flow_rule_match_enc_ip(const struct flow_rule *rule,
185			    struct flow_match_ip *out)
186{
187	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
188}
189EXPORT_SYMBOL(flow_rule_match_enc_ip);
190
191void flow_rule_match_enc_ports(const struct flow_rule *rule,
192			       struct flow_match_ports *out)
193{
194	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
195}
196EXPORT_SYMBOL(flow_rule_match_enc_ports);
197
198void flow_rule_match_enc_keyid(const struct flow_rule *rule,
199			       struct flow_match_enc_keyid *out)
200{
201	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
202}
203EXPORT_SYMBOL(flow_rule_match_enc_keyid);
204
205void flow_rule_match_enc_opts(const struct flow_rule *rule,
206			      struct flow_match_enc_opts *out)
207{
208	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
209}
210EXPORT_SYMBOL(flow_rule_match_enc_opts);
211
212struct flow_action_cookie *flow_action_cookie_create(void *data,
213						     unsigned int len,
214						     gfp_t gfp)
215{
216	struct flow_action_cookie *cookie;
217
218	cookie = kmalloc(sizeof(*cookie) + len, gfp);
219	if (!cookie)
220		return NULL;
221	cookie->cookie_len = len;
222	memcpy(cookie->cookie, data, len);
223	return cookie;
224}
225EXPORT_SYMBOL(flow_action_cookie_create);
226
227void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
228{
229	kfree(cookie);
230}
231EXPORT_SYMBOL(flow_action_cookie_destroy);
232
233void flow_rule_match_ct(const struct flow_rule *rule,
234			struct flow_match_ct *out)
235{
236	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
237}
238EXPORT_SYMBOL(flow_rule_match_ct);
239
240void flow_rule_match_pppoe(const struct flow_rule *rule,
241			   struct flow_match_pppoe *out)
242{
243	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
244}
245EXPORT_SYMBOL(flow_rule_match_pppoe);
246
247void flow_rule_match_l2tpv3(const struct flow_rule *rule,
248			    struct flow_match_l2tpv3 *out)
249{
250	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
251}
252EXPORT_SYMBOL(flow_rule_match_l2tpv3);
253
254struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
255					  void *cb_ident, void *cb_priv,
256					  void (*release)(void *cb_priv))
257{
258	struct flow_block_cb *block_cb;
259
260	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
261	if (!block_cb)
262		return ERR_PTR(-ENOMEM);
263
264	block_cb->cb = cb;
265	block_cb->cb_ident = cb_ident;
266	block_cb->cb_priv = cb_priv;
267	block_cb->release = release;
268
269	return block_cb;
270}
271EXPORT_SYMBOL(flow_block_cb_alloc);
272
273void flow_block_cb_free(struct flow_block_cb *block_cb)
274{
275	if (block_cb->release)
276		block_cb->release(block_cb->cb_priv);
277
278	kfree(block_cb);
279}
280EXPORT_SYMBOL(flow_block_cb_free);
281
282struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
283					   flow_setup_cb_t *cb, void *cb_ident)
284{
285	struct flow_block_cb *block_cb;
286
287	list_for_each_entry(block_cb, &block->cb_list, list) {
288		if (block_cb->cb == cb &&
289		    block_cb->cb_ident == cb_ident)
290			return block_cb;
291	}
292
293	return NULL;
294}
295EXPORT_SYMBOL(flow_block_cb_lookup);
296
297void *flow_block_cb_priv(struct flow_block_cb *block_cb)
298{
299	return block_cb->cb_priv;
300}
301EXPORT_SYMBOL(flow_block_cb_priv);
302
303void flow_block_cb_incref(struct flow_block_cb *block_cb)
304{
305	block_cb->refcnt++;
306}
307EXPORT_SYMBOL(flow_block_cb_incref);
308
309unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
310{
311	return --block_cb->refcnt;
312}
313EXPORT_SYMBOL(flow_block_cb_decref);
314
315bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
316			   struct list_head *driver_block_list)
317{
318	struct flow_block_cb *block_cb;
319
320	list_for_each_entry(block_cb, driver_block_list, driver_list) {
321		if (block_cb->cb == cb &&
322		    block_cb->cb_ident == cb_ident)
323			return true;
324	}
325
326	return false;
327}
328EXPORT_SYMBOL(flow_block_cb_is_busy);
329
330int flow_block_cb_setup_simple(struct flow_block_offload *f,
331			       struct list_head *driver_block_list,
332			       flow_setup_cb_t *cb,
333			       void *cb_ident, void *cb_priv,
334			       bool ingress_only)
335{
336	struct flow_block_cb *block_cb;
337
338	if (ingress_only &&
339	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
340		return -EOPNOTSUPP;
341
342	f->driver_block_list = driver_block_list;
343
344	switch (f->command) {
345	case FLOW_BLOCK_BIND:
346		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
347			return -EBUSY;
348
349		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
350		if (IS_ERR(block_cb))
351			return PTR_ERR(block_cb);
352
353		flow_block_cb_add(block_cb, f);
354		list_add_tail(&block_cb->driver_list, driver_block_list);
355		return 0;
356	case FLOW_BLOCK_UNBIND:
357		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
358		if (!block_cb)
359			return -ENOENT;
360
361		flow_block_cb_remove(block_cb, f);
362		list_del(&block_cb->driver_list);
363		return 0;
364	default:
365		return -EOPNOTSUPP;
366	}
367}
368EXPORT_SYMBOL(flow_block_cb_setup_simple);
369
370static DEFINE_MUTEX(flow_indr_block_lock);
371static LIST_HEAD(flow_block_indr_list);
372static LIST_HEAD(flow_block_indr_dev_list);
373static LIST_HEAD(flow_indir_dev_list);
374
375struct flow_indr_dev {
376	struct list_head		list;
377	flow_indr_block_bind_cb_t	*cb;
378	void				*cb_priv;
379	refcount_t			refcnt;
380};
381
382static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
383						 void *cb_priv)
384{
385	struct flow_indr_dev *indr_dev;
386
387	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
388	if (!indr_dev)
389		return NULL;
390
391	indr_dev->cb		= cb;
392	indr_dev->cb_priv	= cb_priv;
393	refcount_set(&indr_dev->refcnt, 1);
394
395	return indr_dev;
396}
397
398struct flow_indir_dev_info {
399	void *data;
400	struct net_device *dev;
401	struct Qdisc *sch;
402	enum tc_setup_type type;
403	void (*cleanup)(struct flow_block_cb *block_cb);
404	struct list_head list;
405	enum flow_block_command command;
406	enum flow_block_binder_type binder_type;
407	struct list_head *cb_list;
408};
409
410static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
411{
412	struct flow_block_offload bo;
413	struct flow_indir_dev_info *cur;
414
415	list_for_each_entry(cur, &flow_indir_dev_list, list) {
416		memset(&bo, 0, sizeof(bo));
417		bo.command = cur->command;
418		bo.binder_type = cur->binder_type;
419		INIT_LIST_HEAD(&bo.cb_list);
420		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
421		list_splice(&bo.cb_list, cur->cb_list);
422	}
423}
424
425int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
426{
427	struct flow_indr_dev *indr_dev;
428
429	mutex_lock(&flow_indr_block_lock);
430	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
431		if (indr_dev->cb == cb &&
432		    indr_dev->cb_priv == cb_priv) {
433			refcount_inc(&indr_dev->refcnt);
434			mutex_unlock(&flow_indr_block_lock);
435			return 0;
436		}
437	}
438
439	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
440	if (!indr_dev) {
441		mutex_unlock(&flow_indr_block_lock);
442		return -ENOMEM;
443	}
444
445	list_add(&indr_dev->list, &flow_block_indr_dev_list);
446	existing_qdiscs_register(cb, cb_priv);
447	mutex_unlock(&flow_indr_block_lock);
448
449	tcf_action_reoffload_cb(cb, cb_priv, true);
450
451	return 0;
452}
453EXPORT_SYMBOL(flow_indr_dev_register);
454
455static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
456				      void *cb_priv,
457				      struct list_head *cleanup_list)
458{
459	struct flow_block_cb *this, *next;
460
461	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
462		if (this->release == release &&
463		    this->indr.cb_priv == cb_priv)
464			list_move(&this->indr.list, cleanup_list);
465	}
466}
467
468static void flow_block_indr_notify(struct list_head *cleanup_list)
469{
470	struct flow_block_cb *this, *next;
471
472	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
473		list_del(&this->indr.list);
474		this->indr.cleanup(this);
475	}
476}
477
478void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
479			      void (*release)(void *cb_priv))
480{
481	struct flow_indr_dev *this, *next, *indr_dev = NULL;
482	LIST_HEAD(cleanup_list);
483
484	mutex_lock(&flow_indr_block_lock);
485	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
486		if (this->cb == cb &&
487		    this->cb_priv == cb_priv &&
488		    refcount_dec_and_test(&this->refcnt)) {
489			indr_dev = this;
490			list_del(&indr_dev->list);
491			break;
492		}
493	}
494
495	if (!indr_dev) {
496		mutex_unlock(&flow_indr_block_lock);
497		return;
498	}
499
500	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
501	mutex_unlock(&flow_indr_block_lock);
502
503	tcf_action_reoffload_cb(cb, cb_priv, false);
504	flow_block_indr_notify(&cleanup_list);
505	kfree(indr_dev);
506}
507EXPORT_SYMBOL(flow_indr_dev_unregister);
508
509static void flow_block_indr_init(struct flow_block_cb *flow_block,
510				 struct flow_block_offload *bo,
511				 struct net_device *dev, struct Qdisc *sch, void *data,
512				 void *cb_priv,
513				 void (*cleanup)(struct flow_block_cb *block_cb))
514{
515	flow_block->indr.binder_type = bo->binder_type;
516	flow_block->indr.data = data;
517	flow_block->indr.cb_priv = cb_priv;
518	flow_block->indr.dev = dev;
519	flow_block->indr.sch = sch;
520	flow_block->indr.cleanup = cleanup;
521}
522
523struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
524					       void *cb_ident, void *cb_priv,
525					       void (*release)(void *cb_priv),
526					       struct flow_block_offload *bo,
527					       struct net_device *dev,
528					       struct Qdisc *sch, void *data,
529					       void *indr_cb_priv,
530					       void (*cleanup)(struct flow_block_cb *block_cb))
531{
532	struct flow_block_cb *block_cb;
533
534	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
535	if (IS_ERR(block_cb))
536		goto out;
537
538	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
539	list_add(&block_cb->indr.list, &flow_block_indr_list);
540
541out:
542	return block_cb;
543}
544EXPORT_SYMBOL(flow_indr_block_cb_alloc);
545
546static struct flow_indir_dev_info *find_indir_dev(void *data)
547{
548	struct flow_indir_dev_info *cur;
549
550	list_for_each_entry(cur, &flow_indir_dev_list, list) {
551		if (cur->data == data)
552			return cur;
553	}
554	return NULL;
555}
556
557static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
558			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
559			 struct flow_block_offload *bo)
560{
561	struct flow_indir_dev_info *info;
562
563	info = find_indir_dev(data);
564	if (info)
565		return -EEXIST;
566
567	info = kzalloc(sizeof(*info), GFP_KERNEL);
568	if (!info)
569		return -ENOMEM;
570
571	info->data = data;
572	info->dev = dev;
573	info->sch = sch;
574	info->type = type;
575	info->cleanup = cleanup;
576	info->command = bo->command;
577	info->binder_type = bo->binder_type;
578	info->cb_list = bo->cb_list_head;
579
580	list_add(&info->list, &flow_indir_dev_list);
581	return 0;
582}
583
584static int indir_dev_remove(void *data)
585{
586	struct flow_indir_dev_info *info;
587
588	info = find_indir_dev(data);
589	if (!info)
590		return -ENOENT;
591
592	list_del(&info->list);
593
594	kfree(info);
595	return 0;
596}
597
598int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
599				enum tc_setup_type type, void *data,
600				struct flow_block_offload *bo,
601				void (*cleanup)(struct flow_block_cb *block_cb))
602{
603	struct flow_indr_dev *this;
604	u32 count = 0;
605	int err;
606
607	mutex_lock(&flow_indr_block_lock);
608	if (bo) {
609		if (bo->command == FLOW_BLOCK_BIND)
610			indir_dev_add(data, dev, sch, type, cleanup, bo);
611		else if (bo->command == FLOW_BLOCK_UNBIND)
612			indir_dev_remove(data);
613	}
614
615	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
616		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
617		if (!err)
618			count++;
619	}
620
621	mutex_unlock(&flow_indr_block_lock);
622
623	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
624}
625EXPORT_SYMBOL(flow_indr_dev_setup_offload);
626
627bool flow_indr_dev_exists(void)
628{
629	return !list_empty(&flow_block_indr_dev_list);
630}
631EXPORT_SYMBOL(flow_indr_dev_exists);