Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/kernel.h>
  3#include <linux/slab.h>
 
  4#include <net/flow_offload.h>
  5#include <linux/rtnetlink.h>
  6#include <linux/mutex.h>
  7#include <linux/rhashtable.h>
  8
  9struct flow_rule *flow_rule_alloc(unsigned int num_actions)
 10{
 11	struct flow_rule *rule;
 12	int i;
 13
 14	rule = kzalloc(struct_size(rule, action.entries, num_actions),
 15		       GFP_KERNEL);
 16	if (!rule)
 17		return NULL;
 18
 19	rule->action.num_entries = num_actions;
 20	/* Pre-fill each action hw_stats with DONT_CARE.
 21	 * Caller can override this if it wants stats for a given action.
 22	 */
 23	for (i = 0; i < num_actions; i++)
 24		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
 25
 26	return rule;
 27}
 28EXPORT_SYMBOL(flow_rule_alloc);
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
 31	const struct flow_match *__m = &(__rule)->match;			\
 32	struct flow_dissector *__d = (__m)->dissector;				\
 33										\
 34	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
 35	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
 36
 37void flow_rule_match_meta(const struct flow_rule *rule,
 38			  struct flow_match_meta *out)
 39{
 40	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
 41}
 42EXPORT_SYMBOL(flow_rule_match_meta);
 43
 44void flow_rule_match_basic(const struct flow_rule *rule,
 45			   struct flow_match_basic *out)
 46{
 47	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
 48}
 49EXPORT_SYMBOL(flow_rule_match_basic);
 50
 51void flow_rule_match_control(const struct flow_rule *rule,
 52			     struct flow_match_control *out)
 53{
 54	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
 55}
 56EXPORT_SYMBOL(flow_rule_match_control);
 57
 58void flow_rule_match_eth_addrs(const struct flow_rule *rule,
 59			       struct flow_match_eth_addrs *out)
 60{
 61	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
 62}
 63EXPORT_SYMBOL(flow_rule_match_eth_addrs);
 64
 65void flow_rule_match_vlan(const struct flow_rule *rule,
 66			  struct flow_match_vlan *out)
 67{
 68	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
 69}
 70EXPORT_SYMBOL(flow_rule_match_vlan);
 71
 72void flow_rule_match_cvlan(const struct flow_rule *rule,
 73			   struct flow_match_vlan *out)
 74{
 75	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
 76}
 77EXPORT_SYMBOL(flow_rule_match_cvlan);
 78
 
 
 
 
 
 
 
 79void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
 80				struct flow_match_ipv4_addrs *out)
 81{
 82	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
 83}
 84EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
 85
 86void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
 87				struct flow_match_ipv6_addrs *out)
 88{
 89	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
 90}
 91EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
 92
 93void flow_rule_match_ip(const struct flow_rule *rule,
 94			struct flow_match_ip *out)
 95{
 96	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
 97}
 98EXPORT_SYMBOL(flow_rule_match_ip);
 99
100void flow_rule_match_ports(const struct flow_rule *rule,
101			   struct flow_match_ports *out)
102{
103	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
104}
105EXPORT_SYMBOL(flow_rule_match_ports);
106
 
 
 
 
 
 
 
107void flow_rule_match_tcp(const struct flow_rule *rule,
108			 struct flow_match_tcp *out)
109{
110	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
111}
112EXPORT_SYMBOL(flow_rule_match_tcp);
113
 
 
 
 
 
 
 
114void flow_rule_match_icmp(const struct flow_rule *rule,
115			  struct flow_match_icmp *out)
116{
117	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
118}
119EXPORT_SYMBOL(flow_rule_match_icmp);
120
121void flow_rule_match_mpls(const struct flow_rule *rule,
122			  struct flow_match_mpls *out)
123{
124	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
125}
126EXPORT_SYMBOL(flow_rule_match_mpls);
127
128void flow_rule_match_enc_control(const struct flow_rule *rule,
129				 struct flow_match_control *out)
130{
131	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
132}
133EXPORT_SYMBOL(flow_rule_match_enc_control);
134
135void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
136				    struct flow_match_ipv4_addrs *out)
137{
138	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
139}
140EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
141
142void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
143				    struct flow_match_ipv6_addrs *out)
144{
145	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
146}
147EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
148
149void flow_rule_match_enc_ip(const struct flow_rule *rule,
150			    struct flow_match_ip *out)
151{
152	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
153}
154EXPORT_SYMBOL(flow_rule_match_enc_ip);
155
156void flow_rule_match_enc_ports(const struct flow_rule *rule,
157			       struct flow_match_ports *out)
158{
159	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
160}
161EXPORT_SYMBOL(flow_rule_match_enc_ports);
162
163void flow_rule_match_enc_keyid(const struct flow_rule *rule,
164			       struct flow_match_enc_keyid *out)
165{
166	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
167}
168EXPORT_SYMBOL(flow_rule_match_enc_keyid);
169
170void flow_rule_match_enc_opts(const struct flow_rule *rule,
171			      struct flow_match_enc_opts *out)
172{
173	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
174}
175EXPORT_SYMBOL(flow_rule_match_enc_opts);
176
177struct flow_action_cookie *flow_action_cookie_create(void *data,
178						     unsigned int len,
179						     gfp_t gfp)
180{
181	struct flow_action_cookie *cookie;
182
183	cookie = kmalloc(sizeof(*cookie) + len, gfp);
184	if (!cookie)
185		return NULL;
186	cookie->cookie_len = len;
187	memcpy(cookie->cookie, data, len);
188	return cookie;
189}
190EXPORT_SYMBOL(flow_action_cookie_create);
191
192void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
193{
194	kfree(cookie);
195}
196EXPORT_SYMBOL(flow_action_cookie_destroy);
197
198void flow_rule_match_ct(const struct flow_rule *rule,
199			struct flow_match_ct *out)
200{
201	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
202}
203EXPORT_SYMBOL(flow_rule_match_ct);
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
206					  void *cb_ident, void *cb_priv,
207					  void (*release)(void *cb_priv))
208{
209	struct flow_block_cb *block_cb;
210
211	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
212	if (!block_cb)
213		return ERR_PTR(-ENOMEM);
214
215	block_cb->cb = cb;
216	block_cb->cb_ident = cb_ident;
217	block_cb->cb_priv = cb_priv;
218	block_cb->release = release;
219
220	return block_cb;
221}
222EXPORT_SYMBOL(flow_block_cb_alloc);
223
224void flow_block_cb_free(struct flow_block_cb *block_cb)
225{
226	if (block_cb->release)
227		block_cb->release(block_cb->cb_priv);
228
229	kfree(block_cb);
230}
231EXPORT_SYMBOL(flow_block_cb_free);
232
233struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
234					   flow_setup_cb_t *cb, void *cb_ident)
235{
236	struct flow_block_cb *block_cb;
237
238	list_for_each_entry(block_cb, &block->cb_list, list) {
239		if (block_cb->cb == cb &&
240		    block_cb->cb_ident == cb_ident)
241			return block_cb;
242	}
243
244	return NULL;
245}
246EXPORT_SYMBOL(flow_block_cb_lookup);
247
248void *flow_block_cb_priv(struct flow_block_cb *block_cb)
249{
250	return block_cb->cb_priv;
251}
252EXPORT_SYMBOL(flow_block_cb_priv);
253
254void flow_block_cb_incref(struct flow_block_cb *block_cb)
255{
256	block_cb->refcnt++;
257}
258EXPORT_SYMBOL(flow_block_cb_incref);
259
260unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
261{
262	return --block_cb->refcnt;
263}
264EXPORT_SYMBOL(flow_block_cb_decref);
265
266bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
267			   struct list_head *driver_block_list)
268{
269	struct flow_block_cb *block_cb;
270
271	list_for_each_entry(block_cb, driver_block_list, driver_list) {
272		if (block_cb->cb == cb &&
273		    block_cb->cb_ident == cb_ident)
274			return true;
275	}
276
277	return false;
278}
279EXPORT_SYMBOL(flow_block_cb_is_busy);
280
281int flow_block_cb_setup_simple(struct flow_block_offload *f,
282			       struct list_head *driver_block_list,
283			       flow_setup_cb_t *cb,
284			       void *cb_ident, void *cb_priv,
285			       bool ingress_only)
286{
287	struct flow_block_cb *block_cb;
288
289	if (ingress_only &&
290	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
291		return -EOPNOTSUPP;
292
293	f->driver_block_list = driver_block_list;
294
295	switch (f->command) {
296	case FLOW_BLOCK_BIND:
297		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
298			return -EBUSY;
299
300		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
301		if (IS_ERR(block_cb))
302			return PTR_ERR(block_cb);
303
304		flow_block_cb_add(block_cb, f);
305		list_add_tail(&block_cb->driver_list, driver_block_list);
306		return 0;
307	case FLOW_BLOCK_UNBIND:
308		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
309		if (!block_cb)
310			return -ENOENT;
311
312		flow_block_cb_remove(block_cb, f);
313		list_del(&block_cb->driver_list);
314		return 0;
315	default:
316		return -EOPNOTSUPP;
317	}
318}
319EXPORT_SYMBOL(flow_block_cb_setup_simple);
320
321static DEFINE_MUTEX(flow_indr_block_lock);
322static LIST_HEAD(flow_block_indr_list);
323static LIST_HEAD(flow_block_indr_dev_list);
324static LIST_HEAD(flow_indir_dev_list);
325
326struct flow_indr_dev {
327	struct list_head		list;
328	flow_indr_block_bind_cb_t	*cb;
329	void				*cb_priv;
330	refcount_t			refcnt;
331	struct rcu_head			rcu;
332};
333
334static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
335						 void *cb_priv)
336{
337	struct flow_indr_dev *indr_dev;
338
339	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
340	if (!indr_dev)
341		return NULL;
342
343	indr_dev->cb		= cb;
344	indr_dev->cb_priv	= cb_priv;
345	refcount_set(&indr_dev->refcnt, 1);
346
347	return indr_dev;
348}
349
350struct flow_indir_dev_info {
351	void *data;
352	struct net_device *dev;
353	struct Qdisc *sch;
354	enum tc_setup_type type;
355	void (*cleanup)(struct flow_block_cb *block_cb);
356	struct list_head list;
357	enum flow_block_command command;
358	enum flow_block_binder_type binder_type;
359	struct list_head *cb_list;
360};
361
362static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
363{
364	struct flow_block_offload bo;
365	struct flow_indir_dev_info *cur;
366
367	list_for_each_entry(cur, &flow_indir_dev_list, list) {
368		memset(&bo, 0, sizeof(bo));
369		bo.command = cur->command;
370		bo.binder_type = cur->binder_type;
371		INIT_LIST_HEAD(&bo.cb_list);
372		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
373		list_splice(&bo.cb_list, cur->cb_list);
374	}
375}
376
377int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
378{
379	struct flow_indr_dev *indr_dev;
380
381	mutex_lock(&flow_indr_block_lock);
382	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
383		if (indr_dev->cb == cb &&
384		    indr_dev->cb_priv == cb_priv) {
385			refcount_inc(&indr_dev->refcnt);
386			mutex_unlock(&flow_indr_block_lock);
387			return 0;
388		}
389	}
390
391	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
392	if (!indr_dev) {
393		mutex_unlock(&flow_indr_block_lock);
394		return -ENOMEM;
395	}
396
397	list_add(&indr_dev->list, &flow_block_indr_dev_list);
398	existing_qdiscs_register(cb, cb_priv);
399	mutex_unlock(&flow_indr_block_lock);
400
 
 
401	return 0;
402}
403EXPORT_SYMBOL(flow_indr_dev_register);
404
405static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
406				      void *cb_priv,
407				      struct list_head *cleanup_list)
408{
409	struct flow_block_cb *this, *next;
410
411	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
412		if (this->release == release &&
413		    this->indr.cb_priv == cb_priv)
414			list_move(&this->indr.list, cleanup_list);
415	}
416}
417
418static void flow_block_indr_notify(struct list_head *cleanup_list)
419{
420	struct flow_block_cb *this, *next;
421
422	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
423		list_del(&this->indr.list);
424		this->indr.cleanup(this);
425	}
426}
427
428void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
429			      void (*release)(void *cb_priv))
430{
431	struct flow_indr_dev *this, *next, *indr_dev = NULL;
432	LIST_HEAD(cleanup_list);
433
434	mutex_lock(&flow_indr_block_lock);
435	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
436		if (this->cb == cb &&
437		    this->cb_priv == cb_priv &&
438		    refcount_dec_and_test(&this->refcnt)) {
439			indr_dev = this;
440			list_del(&indr_dev->list);
441			break;
442		}
443	}
444
445	if (!indr_dev) {
446		mutex_unlock(&flow_indr_block_lock);
447		return;
448	}
449
450	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
451	mutex_unlock(&flow_indr_block_lock);
452
 
453	flow_block_indr_notify(&cleanup_list);
454	kfree(indr_dev);
455}
456EXPORT_SYMBOL(flow_indr_dev_unregister);
457
458static void flow_block_indr_init(struct flow_block_cb *flow_block,
459				 struct flow_block_offload *bo,
460				 struct net_device *dev, struct Qdisc *sch, void *data,
461				 void *cb_priv,
462				 void (*cleanup)(struct flow_block_cb *block_cb))
463{
464	flow_block->indr.binder_type = bo->binder_type;
465	flow_block->indr.data = data;
466	flow_block->indr.cb_priv = cb_priv;
467	flow_block->indr.dev = dev;
468	flow_block->indr.sch = sch;
469	flow_block->indr.cleanup = cleanup;
470}
471
472struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
473					       void *cb_ident, void *cb_priv,
474					       void (*release)(void *cb_priv),
475					       struct flow_block_offload *bo,
476					       struct net_device *dev,
477					       struct Qdisc *sch, void *data,
478					       void *indr_cb_priv,
479					       void (*cleanup)(struct flow_block_cb *block_cb))
480{
481	struct flow_block_cb *block_cb;
482
483	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
484	if (IS_ERR(block_cb))
485		goto out;
486
487	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
488	list_add(&block_cb->indr.list, &flow_block_indr_list);
489
490out:
491	return block_cb;
492}
493EXPORT_SYMBOL(flow_indr_block_cb_alloc);
494
495static struct flow_indir_dev_info *find_indir_dev(void *data)
496{
497	struct flow_indir_dev_info *cur;
498
499	list_for_each_entry(cur, &flow_indir_dev_list, list) {
500		if (cur->data == data)
501			return cur;
502	}
503	return NULL;
504}
505
506static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
507			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
508			 struct flow_block_offload *bo)
509{
510	struct flow_indir_dev_info *info;
511
512	info = find_indir_dev(data);
513	if (info)
514		return -EEXIST;
515
516	info = kzalloc(sizeof(*info), GFP_KERNEL);
517	if (!info)
518		return -ENOMEM;
519
520	info->data = data;
521	info->dev = dev;
522	info->sch = sch;
523	info->type = type;
524	info->cleanup = cleanup;
525	info->command = bo->command;
526	info->binder_type = bo->binder_type;
527	info->cb_list = bo->cb_list_head;
528
529	list_add(&info->list, &flow_indir_dev_list);
530	return 0;
531}
532
533static int indir_dev_remove(void *data)
534{
535	struct flow_indir_dev_info *info;
536
537	info = find_indir_dev(data);
538	if (!info)
539		return -ENOENT;
540
541	list_del(&info->list);
542
543	kfree(info);
544	return 0;
545}
546
547int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
548				enum tc_setup_type type, void *data,
549				struct flow_block_offload *bo,
550				void (*cleanup)(struct flow_block_cb *block_cb))
551{
552	struct flow_indr_dev *this;
 
 
553
554	mutex_lock(&flow_indr_block_lock);
 
 
 
 
 
 
555
556	if (bo->command == FLOW_BLOCK_BIND)
557		indir_dev_add(data, dev, sch, type, cleanup, bo);
558	else if (bo->command == FLOW_BLOCK_UNBIND)
559		indir_dev_remove(data);
560
561	list_for_each_entry(this, &flow_block_indr_dev_list, list)
562		this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
563
564	mutex_unlock(&flow_indr_block_lock);
565
566	return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
567}
568EXPORT_SYMBOL(flow_indr_dev_setup_offload);
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/kernel.h>
  3#include <linux/slab.h>
  4#include <net/act_api.h>
  5#include <net/flow_offload.h>
  6#include <linux/rtnetlink.h>
  7#include <linux/mutex.h>
  8#include <linux/rhashtable.h>
  9
 10struct flow_rule *flow_rule_alloc(unsigned int num_actions)
 11{
 12	struct flow_rule *rule;
 13	int i;
 14
 15	rule = kzalloc(struct_size(rule, action.entries, num_actions),
 16		       GFP_KERNEL);
 17	if (!rule)
 18		return NULL;
 19
 20	rule->action.num_entries = num_actions;
 21	/* Pre-fill each action hw_stats with DONT_CARE.
 22	 * Caller can override this if it wants stats for a given action.
 23	 */
 24	for (i = 0; i < num_actions; i++)
 25		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
 26
 27	return rule;
 28}
 29EXPORT_SYMBOL(flow_rule_alloc);
 30
 31struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
 32{
 33	struct flow_offload_action *fl_action;
 34	int i;
 35
 36	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
 37			    GFP_KERNEL);
 38	if (!fl_action)
 39		return NULL;
 40
 41	fl_action->action.num_entries = num_actions;
 42	/* Pre-fill each action hw_stats with DONT_CARE.
 43	 * Caller can override this if it wants stats for a given action.
 44	 */
 45	for (i = 0; i < num_actions; i++)
 46		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
 47
 48	return fl_action;
 49}
 50
 51#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
 52	const struct flow_match *__m = &(__rule)->match;			\
 53	struct flow_dissector *__d = (__m)->dissector;				\
 54										\
 55	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
 56	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
 57
 58void flow_rule_match_meta(const struct flow_rule *rule,
 59			  struct flow_match_meta *out)
 60{
 61	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
 62}
 63EXPORT_SYMBOL(flow_rule_match_meta);
 64
 65void flow_rule_match_basic(const struct flow_rule *rule,
 66			   struct flow_match_basic *out)
 67{
 68	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
 69}
 70EXPORT_SYMBOL(flow_rule_match_basic);
 71
 72void flow_rule_match_control(const struct flow_rule *rule,
 73			     struct flow_match_control *out)
 74{
 75	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
 76}
 77EXPORT_SYMBOL(flow_rule_match_control);
 78
 79void flow_rule_match_eth_addrs(const struct flow_rule *rule,
 80			       struct flow_match_eth_addrs *out)
 81{
 82	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
 83}
 84EXPORT_SYMBOL(flow_rule_match_eth_addrs);
 85
 86void flow_rule_match_vlan(const struct flow_rule *rule,
 87			  struct flow_match_vlan *out)
 88{
 89	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
 90}
 91EXPORT_SYMBOL(flow_rule_match_vlan);
 92
 93void flow_rule_match_cvlan(const struct flow_rule *rule,
 94			   struct flow_match_vlan *out)
 95{
 96	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
 97}
 98EXPORT_SYMBOL(flow_rule_match_cvlan);
 99
100void flow_rule_match_arp(const struct flow_rule *rule,
101			 struct flow_match_arp *out)
102{
103	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
104}
105EXPORT_SYMBOL(flow_rule_match_arp);
106
107void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
108				struct flow_match_ipv4_addrs *out)
109{
110	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
111}
112EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
113
114void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
115				struct flow_match_ipv6_addrs *out)
116{
117	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
118}
119EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
120
121void flow_rule_match_ip(const struct flow_rule *rule,
122			struct flow_match_ip *out)
123{
124	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
125}
126EXPORT_SYMBOL(flow_rule_match_ip);
127
128void flow_rule_match_ports(const struct flow_rule *rule,
129			   struct flow_match_ports *out)
130{
131	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
132}
133EXPORT_SYMBOL(flow_rule_match_ports);
134
135void flow_rule_match_ports_range(const struct flow_rule *rule,
136				 struct flow_match_ports_range *out)
137{
138	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
139}
140EXPORT_SYMBOL(flow_rule_match_ports_range);
141
142void flow_rule_match_tcp(const struct flow_rule *rule,
143			 struct flow_match_tcp *out)
144{
145	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
146}
147EXPORT_SYMBOL(flow_rule_match_tcp);
148
149void flow_rule_match_ipsec(const struct flow_rule *rule,
150			   struct flow_match_ipsec *out)
151{
152	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out);
153}
154EXPORT_SYMBOL(flow_rule_match_ipsec);
155
156void flow_rule_match_icmp(const struct flow_rule *rule,
157			  struct flow_match_icmp *out)
158{
159	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
160}
161EXPORT_SYMBOL(flow_rule_match_icmp);
162
163void flow_rule_match_mpls(const struct flow_rule *rule,
164			  struct flow_match_mpls *out)
165{
166	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
167}
168EXPORT_SYMBOL(flow_rule_match_mpls);
169
170void flow_rule_match_enc_control(const struct flow_rule *rule,
171				 struct flow_match_control *out)
172{
173	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
174}
175EXPORT_SYMBOL(flow_rule_match_enc_control);
176
177void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
178				    struct flow_match_ipv4_addrs *out)
179{
180	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
181}
182EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
183
184void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
185				    struct flow_match_ipv6_addrs *out)
186{
187	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
188}
189EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
190
191void flow_rule_match_enc_ip(const struct flow_rule *rule,
192			    struct flow_match_ip *out)
193{
194	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
195}
196EXPORT_SYMBOL(flow_rule_match_enc_ip);
197
198void flow_rule_match_enc_ports(const struct flow_rule *rule,
199			       struct flow_match_ports *out)
200{
201	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
202}
203EXPORT_SYMBOL(flow_rule_match_enc_ports);
204
205void flow_rule_match_enc_keyid(const struct flow_rule *rule,
206			       struct flow_match_enc_keyid *out)
207{
208	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
209}
210EXPORT_SYMBOL(flow_rule_match_enc_keyid);
211
212void flow_rule_match_enc_opts(const struct flow_rule *rule,
213			      struct flow_match_enc_opts *out)
214{
215	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
216}
217EXPORT_SYMBOL(flow_rule_match_enc_opts);
218
219struct flow_action_cookie *flow_action_cookie_create(void *data,
220						     unsigned int len,
221						     gfp_t gfp)
222{
223	struct flow_action_cookie *cookie;
224
225	cookie = kmalloc(sizeof(*cookie) + len, gfp);
226	if (!cookie)
227		return NULL;
228	cookie->cookie_len = len;
229	memcpy(cookie->cookie, data, len);
230	return cookie;
231}
232EXPORT_SYMBOL(flow_action_cookie_create);
233
234void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
235{
236	kfree(cookie);
237}
238EXPORT_SYMBOL(flow_action_cookie_destroy);
239
240void flow_rule_match_ct(const struct flow_rule *rule,
241			struct flow_match_ct *out)
242{
243	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
244}
245EXPORT_SYMBOL(flow_rule_match_ct);
246
247void flow_rule_match_pppoe(const struct flow_rule *rule,
248			   struct flow_match_pppoe *out)
249{
250	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
251}
252EXPORT_SYMBOL(flow_rule_match_pppoe);
253
254void flow_rule_match_l2tpv3(const struct flow_rule *rule,
255			    struct flow_match_l2tpv3 *out)
256{
257	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
258}
259EXPORT_SYMBOL(flow_rule_match_l2tpv3);
260
261struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
262					  void *cb_ident, void *cb_priv,
263					  void (*release)(void *cb_priv))
264{
265	struct flow_block_cb *block_cb;
266
267	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
268	if (!block_cb)
269		return ERR_PTR(-ENOMEM);
270
271	block_cb->cb = cb;
272	block_cb->cb_ident = cb_ident;
273	block_cb->cb_priv = cb_priv;
274	block_cb->release = release;
275
276	return block_cb;
277}
278EXPORT_SYMBOL(flow_block_cb_alloc);
279
280void flow_block_cb_free(struct flow_block_cb *block_cb)
281{
282	if (block_cb->release)
283		block_cb->release(block_cb->cb_priv);
284
285	kfree(block_cb);
286}
287EXPORT_SYMBOL(flow_block_cb_free);
288
289struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
290					   flow_setup_cb_t *cb, void *cb_ident)
291{
292	struct flow_block_cb *block_cb;
293
294	list_for_each_entry(block_cb, &block->cb_list, list) {
295		if (block_cb->cb == cb &&
296		    block_cb->cb_ident == cb_ident)
297			return block_cb;
298	}
299
300	return NULL;
301}
302EXPORT_SYMBOL(flow_block_cb_lookup);
303
304void *flow_block_cb_priv(struct flow_block_cb *block_cb)
305{
306	return block_cb->cb_priv;
307}
308EXPORT_SYMBOL(flow_block_cb_priv);
309
310void flow_block_cb_incref(struct flow_block_cb *block_cb)
311{
312	block_cb->refcnt++;
313}
314EXPORT_SYMBOL(flow_block_cb_incref);
315
316unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
317{
318	return --block_cb->refcnt;
319}
320EXPORT_SYMBOL(flow_block_cb_decref);
321
322bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
323			   struct list_head *driver_block_list)
324{
325	struct flow_block_cb *block_cb;
326
327	list_for_each_entry(block_cb, driver_block_list, driver_list) {
328		if (block_cb->cb == cb &&
329		    block_cb->cb_ident == cb_ident)
330			return true;
331	}
332
333	return false;
334}
335EXPORT_SYMBOL(flow_block_cb_is_busy);
336
337int flow_block_cb_setup_simple(struct flow_block_offload *f,
338			       struct list_head *driver_block_list,
339			       flow_setup_cb_t *cb,
340			       void *cb_ident, void *cb_priv,
341			       bool ingress_only)
342{
343	struct flow_block_cb *block_cb;
344
345	if (ingress_only &&
346	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
347		return -EOPNOTSUPP;
348
349	f->driver_block_list = driver_block_list;
350
351	switch (f->command) {
352	case FLOW_BLOCK_BIND:
353		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
354			return -EBUSY;
355
356		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
357		if (IS_ERR(block_cb))
358			return PTR_ERR(block_cb);
359
360		flow_block_cb_add(block_cb, f);
361		list_add_tail(&block_cb->driver_list, driver_block_list);
362		return 0;
363	case FLOW_BLOCK_UNBIND:
364		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
365		if (!block_cb)
366			return -ENOENT;
367
368		flow_block_cb_remove(block_cb, f);
369		list_del(&block_cb->driver_list);
370		return 0;
371	default:
372		return -EOPNOTSUPP;
373	}
374}
375EXPORT_SYMBOL(flow_block_cb_setup_simple);
376
377static DEFINE_MUTEX(flow_indr_block_lock);
378static LIST_HEAD(flow_block_indr_list);
379static LIST_HEAD(flow_block_indr_dev_list);
380static LIST_HEAD(flow_indir_dev_list);
381
382struct flow_indr_dev {
383	struct list_head		list;
384	flow_indr_block_bind_cb_t	*cb;
385	void				*cb_priv;
386	refcount_t			refcnt;
 
387};
388
389static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
390						 void *cb_priv)
391{
392	struct flow_indr_dev *indr_dev;
393
394	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
395	if (!indr_dev)
396		return NULL;
397
398	indr_dev->cb		= cb;
399	indr_dev->cb_priv	= cb_priv;
400	refcount_set(&indr_dev->refcnt, 1);
401
402	return indr_dev;
403}
404
405struct flow_indir_dev_info {
406	void *data;
407	struct net_device *dev;
408	struct Qdisc *sch;
409	enum tc_setup_type type;
410	void (*cleanup)(struct flow_block_cb *block_cb);
411	struct list_head list;
412	enum flow_block_command command;
413	enum flow_block_binder_type binder_type;
414	struct list_head *cb_list;
415};
416
417static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
418{
419	struct flow_block_offload bo;
420	struct flow_indir_dev_info *cur;
421
422	list_for_each_entry(cur, &flow_indir_dev_list, list) {
423		memset(&bo, 0, sizeof(bo));
424		bo.command = cur->command;
425		bo.binder_type = cur->binder_type;
426		INIT_LIST_HEAD(&bo.cb_list);
427		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
428		list_splice(&bo.cb_list, cur->cb_list);
429	}
430}
431
432int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
433{
434	struct flow_indr_dev *indr_dev;
435
436	mutex_lock(&flow_indr_block_lock);
437	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
438		if (indr_dev->cb == cb &&
439		    indr_dev->cb_priv == cb_priv) {
440			refcount_inc(&indr_dev->refcnt);
441			mutex_unlock(&flow_indr_block_lock);
442			return 0;
443		}
444	}
445
446	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
447	if (!indr_dev) {
448		mutex_unlock(&flow_indr_block_lock);
449		return -ENOMEM;
450	}
451
452	list_add(&indr_dev->list, &flow_block_indr_dev_list);
453	existing_qdiscs_register(cb, cb_priv);
454	mutex_unlock(&flow_indr_block_lock);
455
456	tcf_action_reoffload_cb(cb, cb_priv, true);
457
458	return 0;
459}
460EXPORT_SYMBOL(flow_indr_dev_register);
461
462static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
463				      void *cb_priv,
464				      struct list_head *cleanup_list)
465{
466	struct flow_block_cb *this, *next;
467
468	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
469		if (this->release == release &&
470		    this->indr.cb_priv == cb_priv)
471			list_move(&this->indr.list, cleanup_list);
472	}
473}
474
475static void flow_block_indr_notify(struct list_head *cleanup_list)
476{
477	struct flow_block_cb *this, *next;
478
479	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
480		list_del(&this->indr.list);
481		this->indr.cleanup(this);
482	}
483}
484
485void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
486			      void (*release)(void *cb_priv))
487{
488	struct flow_indr_dev *this, *next, *indr_dev = NULL;
489	LIST_HEAD(cleanup_list);
490
491	mutex_lock(&flow_indr_block_lock);
492	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
493		if (this->cb == cb &&
494		    this->cb_priv == cb_priv &&
495		    refcount_dec_and_test(&this->refcnt)) {
496			indr_dev = this;
497			list_del(&indr_dev->list);
498			break;
499		}
500	}
501
502	if (!indr_dev) {
503		mutex_unlock(&flow_indr_block_lock);
504		return;
505	}
506
507	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
508	mutex_unlock(&flow_indr_block_lock);
509
510	tcf_action_reoffload_cb(cb, cb_priv, false);
511	flow_block_indr_notify(&cleanup_list);
512	kfree(indr_dev);
513}
514EXPORT_SYMBOL(flow_indr_dev_unregister);
515
516static void flow_block_indr_init(struct flow_block_cb *flow_block,
517				 struct flow_block_offload *bo,
518				 struct net_device *dev, struct Qdisc *sch, void *data,
519				 void *cb_priv,
520				 void (*cleanup)(struct flow_block_cb *block_cb))
521{
522	flow_block->indr.binder_type = bo->binder_type;
523	flow_block->indr.data = data;
524	flow_block->indr.cb_priv = cb_priv;
525	flow_block->indr.dev = dev;
526	flow_block->indr.sch = sch;
527	flow_block->indr.cleanup = cleanup;
528}
529
530struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
531					       void *cb_ident, void *cb_priv,
532					       void (*release)(void *cb_priv),
533					       struct flow_block_offload *bo,
534					       struct net_device *dev,
535					       struct Qdisc *sch, void *data,
536					       void *indr_cb_priv,
537					       void (*cleanup)(struct flow_block_cb *block_cb))
538{
539	struct flow_block_cb *block_cb;
540
541	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
542	if (IS_ERR(block_cb))
543		goto out;
544
545	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
546	list_add(&block_cb->indr.list, &flow_block_indr_list);
547
548out:
549	return block_cb;
550}
551EXPORT_SYMBOL(flow_indr_block_cb_alloc);
552
553static struct flow_indir_dev_info *find_indir_dev(void *data)
554{
555	struct flow_indir_dev_info *cur;
556
557	list_for_each_entry(cur, &flow_indir_dev_list, list) {
558		if (cur->data == data)
559			return cur;
560	}
561	return NULL;
562}
563
564static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
565			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
566			 struct flow_block_offload *bo)
567{
568	struct flow_indir_dev_info *info;
569
570	info = find_indir_dev(data);
571	if (info)
572		return -EEXIST;
573
574	info = kzalloc(sizeof(*info), GFP_KERNEL);
575	if (!info)
576		return -ENOMEM;
577
578	info->data = data;
579	info->dev = dev;
580	info->sch = sch;
581	info->type = type;
582	info->cleanup = cleanup;
583	info->command = bo->command;
584	info->binder_type = bo->binder_type;
585	info->cb_list = bo->cb_list_head;
586
587	list_add(&info->list, &flow_indir_dev_list);
588	return 0;
589}
590
591static int indir_dev_remove(void *data)
592{
593	struct flow_indir_dev_info *info;
594
595	info = find_indir_dev(data);
596	if (!info)
597		return -ENOENT;
598
599	list_del(&info->list);
600
601	kfree(info);
602	return 0;
603}
604
605int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
606				enum tc_setup_type type, void *data,
607				struct flow_block_offload *bo,
608				void (*cleanup)(struct flow_block_cb *block_cb))
609{
610	struct flow_indr_dev *this;
611	u32 count = 0;
612	int err;
613
614	mutex_lock(&flow_indr_block_lock);
615	if (bo) {
616		if (bo->command == FLOW_BLOCK_BIND)
617			indir_dev_add(data, dev, sch, type, cleanup, bo);
618		else if (bo->command == FLOW_BLOCK_UNBIND)
619			indir_dev_remove(data);
620	}
621
622	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
623		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
624		if (!err)
625			count++;
626	}
 
 
627
628	mutex_unlock(&flow_indr_block_lock);
629
630	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
631}
632EXPORT_SYMBOL(flow_indr_dev_setup_offload);
633
634bool flow_indr_dev_exists(void)
635{
636	return !list_empty(&flow_block_indr_dev_list);
637}
638EXPORT_SYMBOL(flow_indr_dev_exists);