Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __NET_PKT_CLS_H
  3#define __NET_PKT_CLS_H
  4
  5#include <linux/pkt_cls.h>
  6#include <linux/workqueue.h>
  7#include <net/sch_generic.h>
  8#include <net/act_api.h>
  9#include <net/net_namespace.h>
 10
 11/* TC action not accessible from user space */
 12#define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
 13
 14/* Basic packet classifier frontend definitions. */
 15
 16struct tcf_walker {
 17	int	stop;
 18	int	skip;
 19	int	count;
 20	bool	nonempty;
 21	unsigned long cookie;
 22	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
 23};
 24
 25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
 26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 27
 28struct tcf_block_ext_info {
 29	enum flow_block_binder_type binder_type;
 30	tcf_chain_head_change_t *chain_head_change;
 31	void *chain_head_change_priv;
 32	u32 block_index;
 33};
 34
 
 
 
 
 
 
 35struct tcf_block_cb;
 36bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 37
 38#ifdef CONFIG_NET_CLS
 39struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
 40				       u32 chain_index);
 41void tcf_chain_put_by_act(struct tcf_chain *chain);
 42struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
 43				     struct tcf_chain *chain);
 44struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
 45				     struct tcf_proto *tp, bool rtnl_held);
 46void tcf_block_netif_keep_dst(struct tcf_block *block);
 47int tcf_block_get(struct tcf_block **p_block,
 48		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 49		  struct netlink_ext_ack *extack);
 50int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 51		      struct tcf_block_ext_info *ei,
 52		      struct netlink_ext_ack *extack);
 53void tcf_block_put(struct tcf_block *block);
 54void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 55		       struct tcf_block_ext_info *ei);
 56
 57static inline bool tcf_block_shared(struct tcf_block *block)
 58{
 59	return block->index;
 60}
 61
 62static inline bool tcf_block_non_null_shared(struct tcf_block *block)
 63{
 64	return block && block->index;
 65}
 66
 67static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 68{
 69	WARN_ON(tcf_block_shared(block));
 70	return block->q;
 71}
 72
 73int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 74		 struct tcf_result *res, bool compat_mode);
 
 
 
 
 75
 76#else
 77static inline bool tcf_block_shared(struct tcf_block *block)
 78{
 79	return false;
 80}
 81
 82static inline bool tcf_block_non_null_shared(struct tcf_block *block)
 83{
 84	return false;
 85}
 86
 87static inline
 88int tcf_block_get(struct tcf_block **p_block,
 89		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 90		  struct netlink_ext_ack *extack)
 91{
 92	return 0;
 93}
 94
 95static inline
 96int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 97		      struct tcf_block_ext_info *ei,
 98		      struct netlink_ext_ack *extack)
 99{
100	return 0;
101}
102
103static inline void tcf_block_put(struct tcf_block *block)
104{
105}
106
107static inline
108void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
109		       struct tcf_block_ext_info *ei)
110{
111}
112
113static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
114{
115	return NULL;
116}
117
118static inline
119int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
120			       void *cb_priv)
121{
122	return 0;
123}
124
125static inline
126void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
127				  void *cb_priv)
128{
129}
130
131static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132			       struct tcf_result *res, bool compat_mode)
133{
134	return TC_ACT_UNSPEC;
135}
 
 
 
 
 
 
 
 
 
136#endif
137
138static inline unsigned long
139__cls_set_class(unsigned long *clp, unsigned long cl)
140{
141	return xchg(clp, cl);
142}
143
144static inline unsigned long
145cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
146{
147	unsigned long old_cl;
148
149	sch_tree_lock(q);
150	old_cl = __cls_set_class(clp, cl);
151	sch_tree_unlock(q);
152	return old_cl;
153}
154
155static inline void
156tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
157{
158	struct Qdisc *q = tp->chain->block->q;
159	unsigned long cl;
160
161	/* Check q as it is not set for shared blocks. In that case,
162	 * setting class is not supported.
163	 */
164	if (!q)
165		return;
166	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
167	cl = cls_set_class(q, &r->class, cl);
168	if (cl)
 
 
 
 
 
 
 
 
169		q->ops->cl_ops->unbind_tcf(q, cl);
170}
171
172static inline void
173tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
174{
175	struct Qdisc *q = tp->chain->block->q;
176	unsigned long cl;
177
178	if (!q)
179		return;
180	if ((cl = __cls_set_class(&r->class, 0)) != 0)
181		q->ops->cl_ops->unbind_tcf(q, cl);
182}
183
184struct tcf_exts {
185#ifdef CONFIG_NET_CLS_ACT
186	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
187	int nr_actions;
188	struct tc_action **actions;
189	struct net *net;
190#endif
191	/* Map to export classifier specific extension TLV types to the
192	 * generic extensions API. Unsupported extensions must be set to 0.
193	 */
194	int action;
195	int police;
196};
197
198static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
199				int action, int police)
200{
201#ifdef CONFIG_NET_CLS_ACT
202	exts->type = 0;
203	exts->nr_actions = 0;
204	exts->net = net;
205	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
206				GFP_KERNEL);
207	if (!exts->actions)
208		return -ENOMEM;
209#endif
210	exts->action = action;
211	exts->police = police;
212	return 0;
213}
214
215/* Return false if the netns is being destroyed in cleanup_net(). Callers
216 * need to do cleanup synchronously in this case, otherwise may race with
217 * tc_action_net_exit(). Return true for other cases.
218 */
219static inline bool tcf_exts_get_net(struct tcf_exts *exts)
220{
221#ifdef CONFIG_NET_CLS_ACT
222	exts->net = maybe_get_net(exts->net);
223	return exts->net != NULL;
224#else
225	return true;
226#endif
227}
228
229static inline void tcf_exts_put_net(struct tcf_exts *exts)
230{
231#ifdef CONFIG_NET_CLS_ACT
232	if (exts->net)
233		put_net(exts->net);
234#endif
235}
236
237#ifdef CONFIG_NET_CLS_ACT
238#define tcf_exts_for_each_action(i, a, exts) \
239	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
240#else
241#define tcf_exts_for_each_action(i, a, exts) \
242	for (; 0; (void)(i), (void)(a), (void)(exts))
243#endif
244
245static inline void
246tcf_exts_stats_update(const struct tcf_exts *exts,
247		      u64 bytes, u64 packets, u64 lastuse)
 
248{
249#ifdef CONFIG_NET_CLS_ACT
250	int i;
251
252	preempt_disable();
253
254	for (i = 0; i < exts->nr_actions; i++) {
255		struct tc_action *a = exts->actions[i];
256
257		tcf_action_stats_update(a, bytes, packets, lastuse, true);
 
 
 
258	}
259
260	preempt_enable();
261#endif
262}
263
264/**
265 * tcf_exts_has_actions - check if at least one action is present
266 * @exts: tc filter extensions handle
267 *
268 * Returns true if at least one action is present.
269 */
270static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
271{
272#ifdef CONFIG_NET_CLS_ACT
273	return exts->nr_actions;
274#else
275	return false;
276#endif
277}
278
279/**
280 * tcf_exts_exec - execute tc filter extensions
281 * @skb: socket buffer
282 * @exts: tc filter extensions handle
283 * @res: desired result
284 *
285 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
286 * a negative number if the filter must be considered unmatched or
287 * a positive action code (TC_ACT_*) which must be returned to the
288 * underlying layer.
289 */
290static inline int
291tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
292	      struct tcf_result *res)
293{
294#ifdef CONFIG_NET_CLS_ACT
295	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
296#endif
297	return TC_ACT_OK;
298}
299
300int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
301		      struct nlattr **tb, struct nlattr *rate_tlv,
302		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
303		      struct netlink_ext_ack *extack);
304void tcf_exts_destroy(struct tcf_exts *exts);
305void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
306int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
 
307int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
308
309/**
310 * struct tcf_pkt_info - packet information
311 */
312struct tcf_pkt_info {
313	unsigned char *		ptr;
314	int			nexthdr;
315};
316
317#ifdef CONFIG_NET_EMATCH
318
319struct tcf_ematch_ops;
320
321/**
322 * struct tcf_ematch - extended match (ematch)
323 * 
324 * @matchid: identifier to allow userspace to reidentify a match
325 * @flags: flags specifying attributes and the relation to other matches
326 * @ops: the operations lookup table of the corresponding ematch module
327 * @datalen: length of the ematch specific configuration data
328 * @data: ematch specific data
329 */
330struct tcf_ematch {
331	struct tcf_ematch_ops * ops;
332	unsigned long		data;
333	unsigned int		datalen;
334	u16			matchid;
335	u16			flags;
336	struct net		*net;
337};
338
339static inline int tcf_em_is_container(struct tcf_ematch *em)
340{
341	return !em->ops;
342}
343
344static inline int tcf_em_is_simple(struct tcf_ematch *em)
345{
346	return em->flags & TCF_EM_SIMPLE;
347}
348
349static inline int tcf_em_is_inverted(struct tcf_ematch *em)
350{
351	return em->flags & TCF_EM_INVERT;
352}
353
354static inline int tcf_em_last_match(struct tcf_ematch *em)
355{
356	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
357}
358
359static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
360{
361	if (tcf_em_last_match(em))
362		return 1;
363
364	if (result == 0 && em->flags & TCF_EM_REL_AND)
365		return 1;
366
367	if (result != 0 && em->flags & TCF_EM_REL_OR)
368		return 1;
369
370	return 0;
371}
372	
373/**
374 * struct tcf_ematch_tree - ematch tree handle
375 *
376 * @hdr: ematch tree header supplied by userspace
377 * @matches: array of ematches
378 */
379struct tcf_ematch_tree {
380	struct tcf_ematch_tree_hdr hdr;
381	struct tcf_ematch *	matches;
382	
383};
384
385/**
386 * struct tcf_ematch_ops - ematch module operations
387 * 
388 * @kind: identifier (kind) of this ematch module
389 * @datalen: length of expected configuration data (optional)
390 * @change: called during validation (optional)
391 * @match: called during ematch tree evaluation, must return 1/0
392 * @destroy: called during destroyage (optional)
393 * @dump: called during dumping process (optional)
394 * @owner: owner, must be set to THIS_MODULE
395 * @link: link to previous/next ematch module (internal use)
396 */
397struct tcf_ematch_ops {
398	int			kind;
399	int			datalen;
400	int			(*change)(struct net *net, void *,
401					  int, struct tcf_ematch *);
402	int			(*match)(struct sk_buff *, struct tcf_ematch *,
403					 struct tcf_pkt_info *);
404	void			(*destroy)(struct tcf_ematch *);
405	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
406	struct module		*owner;
407	struct list_head	link;
408};
409
410int tcf_em_register(struct tcf_ematch_ops *);
411void tcf_em_unregister(struct tcf_ematch_ops *);
412int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
413			 struct tcf_ematch_tree *);
414void tcf_em_tree_destroy(struct tcf_ematch_tree *);
415int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
416int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
417			struct tcf_pkt_info *);
418
419/**
420 * tcf_em_tree_match - evaulate an ematch tree
421 *
422 * @skb: socket buffer of the packet in question
423 * @tree: ematch tree to be used for evaluation
424 * @info: packet information examined by classifier
425 *
426 * This function matches @skb against the ematch tree in @tree by going
427 * through all ematches respecting their logic relations returning
428 * as soon as the result is obvious.
429 *
430 * Returns 1 if the ematch tree as-one matches, no ematches are configured
431 * or ematch is not enabled in the kernel, otherwise 0 is returned.
432 */
433static inline int tcf_em_tree_match(struct sk_buff *skb,
434				    struct tcf_ematch_tree *tree,
435				    struct tcf_pkt_info *info)
436{
437	if (tree->hdr.nmatches)
438		return __tcf_em_tree_match(skb, tree, info);
439	else
440		return 1;
441}
442
443#define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
444
445#else /* CONFIG_NET_EMATCH */
446
447struct tcf_ematch_tree {
448};
449
450#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
451#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
452#define tcf_em_tree_dump(skb, t, tlv) (0)
453#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
454
455#endif /* CONFIG_NET_EMATCH */
456
457static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
458{
459	switch (layer) {
460		case TCF_LAYER_LINK:
461			return skb_mac_header(skb);
462		case TCF_LAYER_NETWORK:
463			return skb_network_header(skb);
464		case TCF_LAYER_TRANSPORT:
465			return skb_transport_header(skb);
466	}
467
468	return NULL;
469}
470
471static inline int tcf_valid_offset(const struct sk_buff *skb,
472				   const unsigned char *ptr, const int len)
473{
474	return likely((ptr + len) <= skb_tail_pointer(skb) &&
475		      ptr >= skb->head &&
476		      (ptr <= (ptr + len)));
477}
478
479static inline int
480tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
481		 struct netlink_ext_ack *extack)
482{
483	char indev[IFNAMSIZ];
484	struct net_device *dev;
485
486	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
487		NL_SET_ERR_MSG(extack, "Interface name too long");
 
488		return -EINVAL;
489	}
490	dev = __dev_get_by_name(net, indev);
491	if (!dev)
 
 
492		return -ENODEV;
 
493	return dev->ifindex;
494}
495
496static inline bool
497tcf_match_indev(struct sk_buff *skb, int ifindex)
498{
499	if (!ifindex)
500		return true;
501	if  (!skb->skb_iif)
502		return false;
503	return ifindex == skb->skb_iif;
504}
505
506int tc_setup_flow_action(struct flow_action *flow_action,
507			 const struct tcf_exts *exts, bool rtnl_held);
508void tc_cleanup_flow_action(struct flow_action *flow_action);
509
510int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
511		     void *type_data, bool err_stop, bool rtnl_held);
512int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
513		    enum tc_setup_type type, void *type_data, bool err_stop,
514		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
515int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
516			enum tc_setup_type type, void *type_data, bool err_stop,
517			u32 *old_flags, unsigned int *old_in_hw_count,
518			u32 *new_flags, unsigned int *new_in_hw_count,
519			bool rtnl_held);
520int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
521			enum tc_setup_type type, void *type_data, bool err_stop,
522			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
523int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
524			  bool add, flow_setup_cb_t *cb,
525			  enum tc_setup_type type, void *type_data,
526			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
527unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529struct tc_cls_u32_knode {
530	struct tcf_exts *exts;
531	struct tcf_result *res;
532	struct tc_u32_sel *sel;
533	u32 handle;
534	u32 val;
535	u32 mask;
536	u32 link_handle;
537	u8 fshift;
538};
539
540struct tc_cls_u32_hnode {
541	u32 handle;
542	u32 prio;
543	unsigned int divisor;
544};
545
546enum tc_clsu32_command {
547	TC_CLSU32_NEW_KNODE,
548	TC_CLSU32_REPLACE_KNODE,
549	TC_CLSU32_DELETE_KNODE,
550	TC_CLSU32_NEW_HNODE,
551	TC_CLSU32_REPLACE_HNODE,
552	TC_CLSU32_DELETE_HNODE,
553};
554
555struct tc_cls_u32_offload {
556	struct flow_cls_common_offload common;
557	/* knode values */
558	enum tc_clsu32_command command;
559	union {
560		struct tc_cls_u32_knode knode;
561		struct tc_cls_u32_hnode hnode;
562	};
563};
564
565static inline bool tc_can_offload(const struct net_device *dev)
566{
567	return dev->features & NETIF_F_HW_TC;
568}
569
570static inline bool tc_can_offload_extack(const struct net_device *dev,
571					 struct netlink_ext_ack *extack)
572{
573	bool can = tc_can_offload(dev);
574
575	if (!can)
576		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
577
578	return can;
579}
580
581static inline bool
582tc_cls_can_offload_and_chain0(const struct net_device *dev,
583			      struct flow_cls_common_offload *common)
584{
585	if (!tc_can_offload_extack(dev, common->extack))
586		return false;
587	if (common->chain_index) {
588		NL_SET_ERR_MSG(common->extack,
589			       "Driver supports only offload of chain 0");
590		return false;
591	}
592	return true;
593}
594
595static inline bool tc_skip_hw(u32 flags)
596{
597	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
598}
599
600static inline bool tc_skip_sw(u32 flags)
601{
602	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
603}
604
605/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
606static inline bool tc_flags_valid(u32 flags)
607{
608	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
609		      TCA_CLS_FLAGS_VERBOSE))
610		return false;
611
612	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
613	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
614		return false;
615
616	return true;
617}
618
619static inline bool tc_in_hw(u32 flags)
620{
621	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
622}
623
624static inline void
625tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
626			   const struct tcf_proto *tp, u32 flags,
627			   struct netlink_ext_ack *extack)
628{
629	cls_common->chain_index = tp->chain->index;
630	cls_common->protocol = tp->protocol;
631	cls_common->prio = tp->prio >> 16;
632	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
633		cls_common->extack = extack;
634}
635
636enum tc_matchall_command {
637	TC_CLSMATCHALL_REPLACE,
638	TC_CLSMATCHALL_DESTROY,
639	TC_CLSMATCHALL_STATS,
640};
641
642struct tc_cls_matchall_offload {
643	struct flow_cls_common_offload common;
644	enum tc_matchall_command command;
645	struct flow_rule *rule;
646	struct flow_stats stats;
647	unsigned long cookie;
648};
649
650enum tc_clsbpf_command {
651	TC_CLSBPF_OFFLOAD,
652	TC_CLSBPF_STATS,
653};
654
655struct tc_cls_bpf_offload {
656	struct flow_cls_common_offload common;
657	enum tc_clsbpf_command command;
658	struct tcf_exts *exts;
659	struct bpf_prog *prog;
660	struct bpf_prog *oldprog;
661	const char *name;
662	bool exts_integrated;
663};
664
665struct tc_mqprio_qopt_offload {
666	/* struct tc_mqprio_qopt must always be the first element */
667	struct tc_mqprio_qopt qopt;
668	u16 mode;
669	u16 shaper;
670	u32 flags;
671	u64 min_rate[TC_QOPT_MAX_QUEUE];
672	u64 max_rate[TC_QOPT_MAX_QUEUE];
673};
674
675/* This structure holds cookie structure that is passed from user
676 * to the kernel for actions and classifiers
677 */
678struct tc_cookie {
679	u8  *data;
680	u32 len;
681	struct rcu_head rcu;
682};
683
684struct tc_qopt_offload_stats {
685	struct gnet_stats_basic_packed *bstats;
686	struct gnet_stats_queue *qstats;
687};
688
689enum tc_mq_command {
690	TC_MQ_CREATE,
691	TC_MQ_DESTROY,
692	TC_MQ_STATS,
693	TC_MQ_GRAFT,
694};
695
696struct tc_mq_opt_offload_graft_params {
697	unsigned long queue;
698	u32 child_handle;
699};
700
701struct tc_mq_qopt_offload {
702	enum tc_mq_command command;
703	u32 handle;
704	union {
705		struct tc_qopt_offload_stats stats;
706		struct tc_mq_opt_offload_graft_params graft_params;
707	};
708};
709
710enum tc_red_command {
711	TC_RED_REPLACE,
712	TC_RED_DESTROY,
713	TC_RED_STATS,
714	TC_RED_XSTATS,
715	TC_RED_GRAFT,
716};
717
718struct tc_red_qopt_offload_params {
719	u32 min;
720	u32 max;
721	u32 probability;
722	u32 limit;
723	bool is_ecn;
724	bool is_harddrop;
 
725	struct gnet_stats_queue *qstats;
726};
727
728struct tc_red_qopt_offload {
729	enum tc_red_command command;
730	u32 handle;
731	u32 parent;
732	union {
733		struct tc_red_qopt_offload_params set;
734		struct tc_qopt_offload_stats stats;
735		struct red_stats *xstats;
736		u32 child_handle;
737	};
738};
739
740enum tc_gred_command {
741	TC_GRED_REPLACE,
742	TC_GRED_DESTROY,
743	TC_GRED_STATS,
744};
745
746struct tc_gred_vq_qopt_offload_params {
747	bool present;
748	u32 limit;
749	u32 prio;
750	u32 min;
751	u32 max;
752	bool is_ecn;
753	bool is_harddrop;
754	u32 probability;
755	/* Only need backlog, see struct tc_prio_qopt_offload_params */
756	u32 *backlog;
757};
758
759struct tc_gred_qopt_offload_params {
760	bool grio_on;
761	bool wred_on;
762	unsigned int dp_cnt;
763	unsigned int dp_def;
764	struct gnet_stats_queue *qstats;
765	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
766};
767
768struct tc_gred_qopt_offload_stats {
769	struct gnet_stats_basic_packed bstats[MAX_DPs];
770	struct gnet_stats_queue qstats[MAX_DPs];
771	struct red_stats *xstats[MAX_DPs];
772};
773
774struct tc_gred_qopt_offload {
775	enum tc_gred_command command;
776	u32 handle;
777	u32 parent;
778	union {
779		struct tc_gred_qopt_offload_params set;
780		struct tc_gred_qopt_offload_stats stats;
781	};
782};
783
784enum tc_prio_command {
785	TC_PRIO_REPLACE,
786	TC_PRIO_DESTROY,
787	TC_PRIO_STATS,
788	TC_PRIO_GRAFT,
789};
790
791struct tc_prio_qopt_offload_params {
792	int bands;
793	u8 priomap[TC_PRIO_MAX + 1];
794	/* In case that a prio qdisc is offloaded and now is changed to a
795	 * non-offloadedable config, it needs to update the backlog & qlen
796	 * values to negate the HW backlog & qlen values (and only them).
797	 */
798	struct gnet_stats_queue *qstats;
799};
800
801struct tc_prio_qopt_offload_graft_params {
802	u8 band;
803	u32 child_handle;
804};
805
806struct tc_prio_qopt_offload {
807	enum tc_prio_command command;
808	u32 handle;
809	u32 parent;
810	union {
811		struct tc_prio_qopt_offload_params replace_params;
812		struct tc_qopt_offload_stats stats;
813		struct tc_prio_qopt_offload_graft_params graft_params;
814	};
815};
816
817enum tc_root_command {
818	TC_ROOT_GRAFT,
819};
820
821struct tc_root_qopt_offload {
822	enum tc_root_command command;
823	u32 handle;
824	bool ingress;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
825};
826
827#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __NET_PKT_CLS_H
  3#define __NET_PKT_CLS_H
  4
  5#include <linux/pkt_cls.h>
  6#include <linux/workqueue.h>
  7#include <net/sch_generic.h>
  8#include <net/act_api.h>
  9#include <net/net_namespace.h>
 10
 11/* TC action not accessible from user space */
 12#define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
 13
 14/* Basic packet classifier frontend definitions. */
 15
 16struct tcf_walker {
 17	int	stop;
 18	int	skip;
 19	int	count;
 20	bool	nonempty;
 21	unsigned long cookie;
 22	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
 23};
 24
 25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
 26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 27
 28struct tcf_block_ext_info {
 29	enum flow_block_binder_type binder_type;
 30	tcf_chain_head_change_t *chain_head_change;
 31	void *chain_head_change_priv;
 32	u32 block_index;
 33};
 34
 35struct tcf_qevent {
 36	struct tcf_block	*block;
 37	struct tcf_block_ext_info info;
 38	struct tcf_proto __rcu *filter_chain;
 39};
 40
 41struct tcf_block_cb;
 42bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 43
 44#ifdef CONFIG_NET_CLS
 45struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
 46				       u32 chain_index);
 47void tcf_chain_put_by_act(struct tcf_chain *chain);
 48struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
 49				     struct tcf_chain *chain);
 50struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
 51				     struct tcf_proto *tp, bool rtnl_held);
 52void tcf_block_netif_keep_dst(struct tcf_block *block);
 53int tcf_block_get(struct tcf_block **p_block,
 54		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 55		  struct netlink_ext_ack *extack);
 56int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 57		      struct tcf_block_ext_info *ei,
 58		      struct netlink_ext_ack *extack);
 59void tcf_block_put(struct tcf_block *block);
 60void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 61		       struct tcf_block_ext_info *ei);
 62
 63static inline bool tcf_block_shared(struct tcf_block *block)
 64{
 65	return block->index;
 66}
 67
 68static inline bool tcf_block_non_null_shared(struct tcf_block *block)
 69{
 70	return block && block->index;
 71}
 72
 73static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 74{
 75	WARN_ON(tcf_block_shared(block));
 76	return block->q;
 77}
 78
 79int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 80		 struct tcf_result *res, bool compat_mode);
 81int tcf_classify_ingress(struct sk_buff *skb,
 82			 const struct tcf_block *ingress_block,
 83			 const struct tcf_proto *tp, struct tcf_result *res,
 84			 bool compat_mode);
 85
 86#else
 87static inline bool tcf_block_shared(struct tcf_block *block)
 88{
 89	return false;
 90}
 91
 92static inline bool tcf_block_non_null_shared(struct tcf_block *block)
 93{
 94	return false;
 95}
 96
 97static inline
 98int tcf_block_get(struct tcf_block **p_block,
 99		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
100		  struct netlink_ext_ack *extack)
101{
102	return 0;
103}
104
105static inline
106int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
107		      struct tcf_block_ext_info *ei,
108		      struct netlink_ext_ack *extack)
109{
110	return 0;
111}
112
113static inline void tcf_block_put(struct tcf_block *block)
114{
115}
116
117static inline
118void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
119		       struct tcf_block_ext_info *ei)
120{
121}
122
123static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
124{
125	return NULL;
126}
127
128static inline
129int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
130			       void *cb_priv)
131{
132	return 0;
133}
134
135static inline
136void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
137				  void *cb_priv)
138{
139}
140
141static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
142			       struct tcf_result *res, bool compat_mode)
143{
144	return TC_ACT_UNSPEC;
145}
146
147static inline int tcf_classify_ingress(struct sk_buff *skb,
148				       const struct tcf_block *ingress_block,
149				       const struct tcf_proto *tp,
150				       struct tcf_result *res, bool compat_mode)
151{
152	return TC_ACT_UNSPEC;
153}
154
155#endif
156
157static inline unsigned long
158__cls_set_class(unsigned long *clp, unsigned long cl)
159{
160	return xchg(clp, cl);
161}
162
163static inline void
164__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
165{
166	unsigned long cl;
167
168	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
169	cl = __cls_set_class(&r->class, cl);
170	if (cl)
171		q->ops->cl_ops->unbind_tcf(q, cl);
172}
173
174static inline void
175tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
176{
177	struct Qdisc *q = tp->chain->block->q;
 
178
179	/* Check q as it is not set for shared blocks. In that case,
180	 * setting class is not supported.
181	 */
182	if (!q)
183		return;
184	sch_tree_lock(q);
185	__tcf_bind_filter(q, r, base);
186	sch_tree_unlock(q);
187}
188
189static inline void
190__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
191{
192	unsigned long cl;
193
194	if ((cl = __cls_set_class(&r->class, 0)) != 0)
195		q->ops->cl_ops->unbind_tcf(q, cl);
196}
197
198static inline void
199tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
200{
201	struct Qdisc *q = tp->chain->block->q;
 
202
203	if (!q)
204		return;
205	__tcf_unbind_filter(q, r);
 
206}
207
208struct tcf_exts {
209#ifdef CONFIG_NET_CLS_ACT
210	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
211	int nr_actions;
212	struct tc_action **actions;
213	struct net *net;
214#endif
215	/* Map to export classifier specific extension TLV types to the
216	 * generic extensions API. Unsupported extensions must be set to 0.
217	 */
218	int action;
219	int police;
220};
221
222static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
223				int action, int police)
224{
225#ifdef CONFIG_NET_CLS_ACT
226	exts->type = 0;
227	exts->nr_actions = 0;
228	exts->net = net;
229	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
230				GFP_KERNEL);
231	if (!exts->actions)
232		return -ENOMEM;
233#endif
234	exts->action = action;
235	exts->police = police;
236	return 0;
237}
238
239/* Return false if the netns is being destroyed in cleanup_net(). Callers
240 * need to do cleanup synchronously in this case, otherwise may race with
241 * tc_action_net_exit(). Return true for other cases.
242 */
243static inline bool tcf_exts_get_net(struct tcf_exts *exts)
244{
245#ifdef CONFIG_NET_CLS_ACT
246	exts->net = maybe_get_net(exts->net);
247	return exts->net != NULL;
248#else
249	return true;
250#endif
251}
252
253static inline void tcf_exts_put_net(struct tcf_exts *exts)
254{
255#ifdef CONFIG_NET_CLS_ACT
256	if (exts->net)
257		put_net(exts->net);
258#endif
259}
260
261#ifdef CONFIG_NET_CLS_ACT
262#define tcf_exts_for_each_action(i, a, exts) \
263	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
264#else
265#define tcf_exts_for_each_action(i, a, exts) \
266	for (; 0; (void)(i), (void)(a), (void)(exts))
267#endif
268
269static inline void
270tcf_exts_stats_update(const struct tcf_exts *exts,
271		      u64 bytes, u64 packets, u64 drops, u64 lastuse,
272		      u8 used_hw_stats, bool used_hw_stats_valid)
273{
274#ifdef CONFIG_NET_CLS_ACT
275	int i;
276
277	preempt_disable();
278
279	for (i = 0; i < exts->nr_actions; i++) {
280		struct tc_action *a = exts->actions[i];
281
282		tcf_action_stats_update(a, bytes, packets, drops,
283					lastuse, true);
284		a->used_hw_stats = used_hw_stats;
285		a->used_hw_stats_valid = used_hw_stats_valid;
286	}
287
288	preempt_enable();
289#endif
290}
291
292/**
293 * tcf_exts_has_actions - check if at least one action is present
294 * @exts: tc filter extensions handle
295 *
296 * Returns true if at least one action is present.
297 */
298static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
299{
300#ifdef CONFIG_NET_CLS_ACT
301	return exts->nr_actions;
302#else
303	return false;
304#endif
305}
306
307/**
308 * tcf_exts_exec - execute tc filter extensions
309 * @skb: socket buffer
310 * @exts: tc filter extensions handle
311 * @res: desired result
312 *
313 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
314 * a negative number if the filter must be considered unmatched or
315 * a positive action code (TC_ACT_*) which must be returned to the
316 * underlying layer.
317 */
318static inline int
319tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
320	      struct tcf_result *res)
321{
322#ifdef CONFIG_NET_CLS_ACT
323	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
324#endif
325	return TC_ACT_OK;
326}
327
328int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
329		      struct nlattr **tb, struct nlattr *rate_tlv,
330		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
331		      struct netlink_ext_ack *extack);
332void tcf_exts_destroy(struct tcf_exts *exts);
333void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
334int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
335int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
336int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
337
338/**
339 * struct tcf_pkt_info - packet information
340 */
341struct tcf_pkt_info {
342	unsigned char *		ptr;
343	int			nexthdr;
344};
345
346#ifdef CONFIG_NET_EMATCH
347
348struct tcf_ematch_ops;
349
350/**
351 * struct tcf_ematch - extended match (ematch)
352 * 
353 * @matchid: identifier to allow userspace to reidentify a match
354 * @flags: flags specifying attributes and the relation to other matches
355 * @ops: the operations lookup table of the corresponding ematch module
356 * @datalen: length of the ematch specific configuration data
357 * @data: ematch specific data
358 */
359struct tcf_ematch {
360	struct tcf_ematch_ops * ops;
361	unsigned long		data;
362	unsigned int		datalen;
363	u16			matchid;
364	u16			flags;
365	struct net		*net;
366};
367
368static inline int tcf_em_is_container(struct tcf_ematch *em)
369{
370	return !em->ops;
371}
372
373static inline int tcf_em_is_simple(struct tcf_ematch *em)
374{
375	return em->flags & TCF_EM_SIMPLE;
376}
377
378static inline int tcf_em_is_inverted(struct tcf_ematch *em)
379{
380	return em->flags & TCF_EM_INVERT;
381}
382
383static inline int tcf_em_last_match(struct tcf_ematch *em)
384{
385	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
386}
387
388static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
389{
390	if (tcf_em_last_match(em))
391		return 1;
392
393	if (result == 0 && em->flags & TCF_EM_REL_AND)
394		return 1;
395
396	if (result != 0 && em->flags & TCF_EM_REL_OR)
397		return 1;
398
399	return 0;
400}
401	
402/**
403 * struct tcf_ematch_tree - ematch tree handle
404 *
405 * @hdr: ematch tree header supplied by userspace
406 * @matches: array of ematches
407 */
408struct tcf_ematch_tree {
409	struct tcf_ematch_tree_hdr hdr;
410	struct tcf_ematch *	matches;
411	
412};
413
414/**
415 * struct tcf_ematch_ops - ematch module operations
416 * 
417 * @kind: identifier (kind) of this ematch module
418 * @datalen: length of expected configuration data (optional)
419 * @change: called during validation (optional)
420 * @match: called during ematch tree evaluation, must return 1/0
421 * @destroy: called during destroyage (optional)
422 * @dump: called during dumping process (optional)
423 * @owner: owner, must be set to THIS_MODULE
424 * @link: link to previous/next ematch module (internal use)
425 */
426struct tcf_ematch_ops {
427	int			kind;
428	int			datalen;
429	int			(*change)(struct net *net, void *,
430					  int, struct tcf_ematch *);
431	int			(*match)(struct sk_buff *, struct tcf_ematch *,
432					 struct tcf_pkt_info *);
433	void			(*destroy)(struct tcf_ematch *);
434	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
435	struct module		*owner;
436	struct list_head	link;
437};
438
439int tcf_em_register(struct tcf_ematch_ops *);
440void tcf_em_unregister(struct tcf_ematch_ops *);
441int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
442			 struct tcf_ematch_tree *);
443void tcf_em_tree_destroy(struct tcf_ematch_tree *);
444int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
445int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
446			struct tcf_pkt_info *);
447
448/**
449 * tcf_em_tree_match - evaulate an ematch tree
450 *
451 * @skb: socket buffer of the packet in question
452 * @tree: ematch tree to be used for evaluation
453 * @info: packet information examined by classifier
454 *
455 * This function matches @skb against the ematch tree in @tree by going
456 * through all ematches respecting their logic relations returning
457 * as soon as the result is obvious.
458 *
459 * Returns 1 if the ematch tree as-one matches, no ematches are configured
460 * or ematch is not enabled in the kernel, otherwise 0 is returned.
461 */
462static inline int tcf_em_tree_match(struct sk_buff *skb,
463				    struct tcf_ematch_tree *tree,
464				    struct tcf_pkt_info *info)
465{
466	if (tree->hdr.nmatches)
467		return __tcf_em_tree_match(skb, tree, info);
468	else
469		return 1;
470}
471
472#define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
473
474#else /* CONFIG_NET_EMATCH */
475
476struct tcf_ematch_tree {
477};
478
479#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
480#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
481#define tcf_em_tree_dump(skb, t, tlv) (0)
482#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
483
484#endif /* CONFIG_NET_EMATCH */
485
486static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
487{
488	switch (layer) {
489		case TCF_LAYER_LINK:
490			return skb_mac_header(skb);
491		case TCF_LAYER_NETWORK:
492			return skb_network_header(skb);
493		case TCF_LAYER_TRANSPORT:
494			return skb_transport_header(skb);
495	}
496
497	return NULL;
498}
499
500static inline int tcf_valid_offset(const struct sk_buff *skb,
501				   const unsigned char *ptr, const int len)
502{
503	return likely((ptr + len) <= skb_tail_pointer(skb) &&
504		      ptr >= skb->head &&
505		      (ptr <= (ptr + len)));
506}
507
508static inline int
509tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
510		 struct netlink_ext_ack *extack)
511{
512	char indev[IFNAMSIZ];
513	struct net_device *dev;
514
515	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
516		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
517				    "Interface name too long");
518		return -EINVAL;
519	}
520	dev = __dev_get_by_name(net, indev);
521	if (!dev) {
522		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
523				    "Network device not found");
524		return -ENODEV;
525	}
526	return dev->ifindex;
527}
528
529static inline bool
530tcf_match_indev(struct sk_buff *skb, int ifindex)
531{
532	if (!ifindex)
533		return true;
534	if  (!skb->skb_iif)
535		return false;
536	return ifindex == skb->skb_iif;
537}
538
539int tc_setup_flow_action(struct flow_action *flow_action,
540			 const struct tcf_exts *exts);
541void tc_cleanup_flow_action(struct flow_action *flow_action);
542
543int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
544		     void *type_data, bool err_stop, bool rtnl_held);
545int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
546		    enum tc_setup_type type, void *type_data, bool err_stop,
547		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
548int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
549			enum tc_setup_type type, void *type_data, bool err_stop,
550			u32 *old_flags, unsigned int *old_in_hw_count,
551			u32 *new_flags, unsigned int *new_in_hw_count,
552			bool rtnl_held);
553int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
554			enum tc_setup_type type, void *type_data, bool err_stop,
555			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
556int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
557			  bool add, flow_setup_cb_t *cb,
558			  enum tc_setup_type type, void *type_data,
559			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
560unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
561
562#ifdef CONFIG_NET_CLS_ACT
563int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
564		    enum flow_block_binder_type binder_type,
565		    struct nlattr *block_index_attr,
566		    struct netlink_ext_ack *extack);
567void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
568int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
569			       struct netlink_ext_ack *extack);
570struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
571				  struct sk_buff **to_free, int *ret);
572int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
573#else
574static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
575				  enum flow_block_binder_type binder_type,
576				  struct nlattr *block_index_attr,
577				  struct netlink_ext_ack *extack)
578{
579	return 0;
580}
581
582static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
583{
584}
585
586static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
587					     struct netlink_ext_ack *extack)
588{
589	return 0;
590}
591
592static inline struct sk_buff *
593tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
594		  struct sk_buff **to_free, int *ret)
595{
596	return skb;
597}
598
599static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
600{
601	return 0;
602}
603#endif
604
605struct tc_cls_u32_knode {
606	struct tcf_exts *exts;
607	struct tcf_result *res;
608	struct tc_u32_sel *sel;
609	u32 handle;
610	u32 val;
611	u32 mask;
612	u32 link_handle;
613	u8 fshift;
614};
615
616struct tc_cls_u32_hnode {
617	u32 handle;
618	u32 prio;
619	unsigned int divisor;
620};
621
622enum tc_clsu32_command {
623	TC_CLSU32_NEW_KNODE,
624	TC_CLSU32_REPLACE_KNODE,
625	TC_CLSU32_DELETE_KNODE,
626	TC_CLSU32_NEW_HNODE,
627	TC_CLSU32_REPLACE_HNODE,
628	TC_CLSU32_DELETE_HNODE,
629};
630
631struct tc_cls_u32_offload {
632	struct flow_cls_common_offload common;
633	/* knode values */
634	enum tc_clsu32_command command;
635	union {
636		struct tc_cls_u32_knode knode;
637		struct tc_cls_u32_hnode hnode;
638	};
639};
640
641static inline bool tc_can_offload(const struct net_device *dev)
642{
643	return dev->features & NETIF_F_HW_TC;
644}
645
646static inline bool tc_can_offload_extack(const struct net_device *dev,
647					 struct netlink_ext_ack *extack)
648{
649	bool can = tc_can_offload(dev);
650
651	if (!can)
652		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
653
654	return can;
655}
656
657static inline bool
658tc_cls_can_offload_and_chain0(const struct net_device *dev,
659			      struct flow_cls_common_offload *common)
660{
661	if (!tc_can_offload_extack(dev, common->extack))
662		return false;
663	if (common->chain_index) {
664		NL_SET_ERR_MSG(common->extack,
665			       "Driver supports only offload of chain 0");
666		return false;
667	}
668	return true;
669}
670
671static inline bool tc_skip_hw(u32 flags)
672{
673	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
674}
675
676static inline bool tc_skip_sw(u32 flags)
677{
678	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
679}
680
681/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
682static inline bool tc_flags_valid(u32 flags)
683{
684	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
685		      TCA_CLS_FLAGS_VERBOSE))
686		return false;
687
688	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
689	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
690		return false;
691
692	return true;
693}
694
695static inline bool tc_in_hw(u32 flags)
696{
697	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
698}
699
700static inline void
701tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
702			   const struct tcf_proto *tp, u32 flags,
703			   struct netlink_ext_ack *extack)
704{
705	cls_common->chain_index = tp->chain->index;
706	cls_common->protocol = tp->protocol;
707	cls_common->prio = tp->prio >> 16;
708	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
709		cls_common->extack = extack;
710}
711
712enum tc_matchall_command {
713	TC_CLSMATCHALL_REPLACE,
714	TC_CLSMATCHALL_DESTROY,
715	TC_CLSMATCHALL_STATS,
716};
717
718struct tc_cls_matchall_offload {
719	struct flow_cls_common_offload common;
720	enum tc_matchall_command command;
721	struct flow_rule *rule;
722	struct flow_stats stats;
723	unsigned long cookie;
724};
725
726enum tc_clsbpf_command {
727	TC_CLSBPF_OFFLOAD,
728	TC_CLSBPF_STATS,
729};
730
731struct tc_cls_bpf_offload {
732	struct flow_cls_common_offload common;
733	enum tc_clsbpf_command command;
734	struct tcf_exts *exts;
735	struct bpf_prog *prog;
736	struct bpf_prog *oldprog;
737	const char *name;
738	bool exts_integrated;
739};
740
741struct tc_mqprio_qopt_offload {
742	/* struct tc_mqprio_qopt must always be the first element */
743	struct tc_mqprio_qopt qopt;
744	u16 mode;
745	u16 shaper;
746	u32 flags;
747	u64 min_rate[TC_QOPT_MAX_QUEUE];
748	u64 max_rate[TC_QOPT_MAX_QUEUE];
749};
750
751/* This structure holds cookie structure that is passed from user
752 * to the kernel for actions and classifiers
753 */
754struct tc_cookie {
755	u8  *data;
756	u32 len;
757	struct rcu_head rcu;
758};
759
760struct tc_qopt_offload_stats {
761	struct gnet_stats_basic_packed *bstats;
762	struct gnet_stats_queue *qstats;
763};
764
765enum tc_mq_command {
766	TC_MQ_CREATE,
767	TC_MQ_DESTROY,
768	TC_MQ_STATS,
769	TC_MQ_GRAFT,
770};
771
772struct tc_mq_opt_offload_graft_params {
773	unsigned long queue;
774	u32 child_handle;
775};
776
777struct tc_mq_qopt_offload {
778	enum tc_mq_command command;
779	u32 handle;
780	union {
781		struct tc_qopt_offload_stats stats;
782		struct tc_mq_opt_offload_graft_params graft_params;
783	};
784};
785
786enum tc_red_command {
787	TC_RED_REPLACE,
788	TC_RED_DESTROY,
789	TC_RED_STATS,
790	TC_RED_XSTATS,
791	TC_RED_GRAFT,
792};
793
794struct tc_red_qopt_offload_params {
795	u32 min;
796	u32 max;
797	u32 probability;
798	u32 limit;
799	bool is_ecn;
800	bool is_harddrop;
801	bool is_nodrop;
802	struct gnet_stats_queue *qstats;
803};
804
805struct tc_red_qopt_offload {
806	enum tc_red_command command;
807	u32 handle;
808	u32 parent;
809	union {
810		struct tc_red_qopt_offload_params set;
811		struct tc_qopt_offload_stats stats;
812		struct red_stats *xstats;
813		u32 child_handle;
814	};
815};
816
817enum tc_gred_command {
818	TC_GRED_REPLACE,
819	TC_GRED_DESTROY,
820	TC_GRED_STATS,
821};
822
823struct tc_gred_vq_qopt_offload_params {
824	bool present;
825	u32 limit;
826	u32 prio;
827	u32 min;
828	u32 max;
829	bool is_ecn;
830	bool is_harddrop;
831	u32 probability;
832	/* Only need backlog, see struct tc_prio_qopt_offload_params */
833	u32 *backlog;
834};
835
836struct tc_gred_qopt_offload_params {
837	bool grio_on;
838	bool wred_on;
839	unsigned int dp_cnt;
840	unsigned int dp_def;
841	struct gnet_stats_queue *qstats;
842	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
843};
844
845struct tc_gred_qopt_offload_stats {
846	struct gnet_stats_basic_packed bstats[MAX_DPs];
847	struct gnet_stats_queue qstats[MAX_DPs];
848	struct red_stats *xstats[MAX_DPs];
849};
850
851struct tc_gred_qopt_offload {
852	enum tc_gred_command command;
853	u32 handle;
854	u32 parent;
855	union {
856		struct tc_gred_qopt_offload_params set;
857		struct tc_gred_qopt_offload_stats stats;
858	};
859};
860
861enum tc_prio_command {
862	TC_PRIO_REPLACE,
863	TC_PRIO_DESTROY,
864	TC_PRIO_STATS,
865	TC_PRIO_GRAFT,
866};
867
868struct tc_prio_qopt_offload_params {
869	int bands;
870	u8 priomap[TC_PRIO_MAX + 1];
871	/* At the point of un-offloading the Qdisc, the reported backlog and
872	 * qlen need to be reduced by the portion that is in HW.
 
873	 */
874	struct gnet_stats_queue *qstats;
875};
876
877struct tc_prio_qopt_offload_graft_params {
878	u8 band;
879	u32 child_handle;
880};
881
882struct tc_prio_qopt_offload {
883	enum tc_prio_command command;
884	u32 handle;
885	u32 parent;
886	union {
887		struct tc_prio_qopt_offload_params replace_params;
888		struct tc_qopt_offload_stats stats;
889		struct tc_prio_qopt_offload_graft_params graft_params;
890	};
891};
892
893enum tc_root_command {
894	TC_ROOT_GRAFT,
895};
896
897struct tc_root_qopt_offload {
898	enum tc_root_command command;
899	u32 handle;
900	bool ingress;
901};
902
903enum tc_ets_command {
904	TC_ETS_REPLACE,
905	TC_ETS_DESTROY,
906	TC_ETS_STATS,
907	TC_ETS_GRAFT,
908};
909
910struct tc_ets_qopt_offload_replace_params {
911	unsigned int bands;
912	u8 priomap[TC_PRIO_MAX + 1];
913	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
914	unsigned int weights[TCQ_ETS_MAX_BANDS];
915	struct gnet_stats_queue *qstats;
916};
917
918struct tc_ets_qopt_offload_graft_params {
919	u8 band;
920	u32 child_handle;
921};
922
923struct tc_ets_qopt_offload {
924	enum tc_ets_command command;
925	u32 handle;
926	u32 parent;
927	union {
928		struct tc_ets_qopt_offload_replace_params replace_params;
929		struct tc_qopt_offload_stats stats;
930		struct tc_ets_qopt_offload_graft_params graft_params;
931	};
932};
933
934enum tc_tbf_command {
935	TC_TBF_REPLACE,
936	TC_TBF_DESTROY,
937	TC_TBF_STATS,
938};
939
940struct tc_tbf_qopt_offload_replace_params {
941	struct psched_ratecfg rate;
942	u32 max_size;
943	struct gnet_stats_queue *qstats;
944};
945
946struct tc_tbf_qopt_offload {
947	enum tc_tbf_command command;
948	u32 handle;
949	u32 parent;
950	union {
951		struct tc_tbf_qopt_offload_replace_params replace_params;
952		struct tc_qopt_offload_stats stats;
953	};
954};
955
956enum tc_fifo_command {
957	TC_FIFO_REPLACE,
958	TC_FIFO_DESTROY,
959	TC_FIFO_STATS,
960};
961
962struct tc_fifo_qopt_offload {
963	enum tc_fifo_command command;
964	u32 handle;
965	u32 parent;
966	union {
967		struct tc_qopt_offload_stats stats;
968	};
969};
970
971#endif