Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __NET_PKT_CLS_H
   3#define __NET_PKT_CLS_H
   4
   5#include <linux/pkt_cls.h>
   6#include <linux/workqueue.h>
   7#include <net/sch_generic.h>
   8#include <net/act_api.h>
   9#include <net/net_namespace.h>
  10
  11/* TC action not accessible from user space */
  12#define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
  13
  14/* Basic packet classifier frontend definitions. */
  15
  16struct tcf_walker {
  17	int	stop;
  18	int	skip;
  19	int	count;
  20	bool	nonempty;
  21	unsigned long cookie;
  22	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  23};
  24
  25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  27
 
 
 
 
 
 
  28struct tcf_block_ext_info {
  29	enum flow_block_binder_type binder_type;
  30	tcf_chain_head_change_t *chain_head_change;
  31	void *chain_head_change_priv;
  32	u32 block_index;
  33};
  34
  35struct tcf_qevent {
  36	struct tcf_block	*block;
  37	struct tcf_block_ext_info info;
  38	struct tcf_proto __rcu *filter_chain;
  39};
  40
  41struct tcf_block_cb;
  42bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
  43
  44#ifdef CONFIG_NET_CLS
  45struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
  46				       u32 chain_index);
  47void tcf_chain_put_by_act(struct tcf_chain *chain);
  48struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
  49				     struct tcf_chain *chain);
  50struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
  51				     struct tcf_proto *tp);
  52void tcf_block_netif_keep_dst(struct tcf_block *block);
  53int tcf_block_get(struct tcf_block **p_block,
  54		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  55		  struct netlink_ext_ack *extack);
  56int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  57		      struct tcf_block_ext_info *ei,
  58		      struct netlink_ext_ack *extack);
  59void tcf_block_put(struct tcf_block *block);
  60void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  61		       struct tcf_block_ext_info *ei);
  62
  63static inline bool tcf_block_shared(struct tcf_block *block)
  64{
  65	return block->index;
  66}
  67
  68static inline bool tcf_block_non_null_shared(struct tcf_block *block)
  69{
  70	return block && block->index;
  71}
  72
  73static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  74{
  75	WARN_ON(tcf_block_shared(block));
  76	return block->q;
  77}
  78
  79int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  80		 struct tcf_result *res, bool compat_mode);
  81int tcf_classify_ingress(struct sk_buff *skb,
  82			 const struct tcf_block *ingress_block,
  83			 const struct tcf_proto *tp, struct tcf_result *res,
  84			 bool compat_mode);
  85
  86#else
  87static inline bool tcf_block_shared(struct tcf_block *block)
  88{
  89	return false;
  90}
  91
  92static inline bool tcf_block_non_null_shared(struct tcf_block *block)
  93{
  94	return false;
  95}
 
 
 
 
 
 
 
 
 
 
 
 
 
  96
 
  97static inline
  98int tcf_block_get(struct tcf_block **p_block,
  99		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 100		  struct netlink_ext_ack *extack)
 101{
 102	return 0;
 103}
 104
 105static inline
 106int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 107		      struct tcf_block_ext_info *ei,
 108		      struct netlink_ext_ack *extack)
 109{
 110	return 0;
 111}
 112
 113static inline void tcf_block_put(struct tcf_block *block)
 114{
 115}
 116
 117static inline
 118void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 119		       struct tcf_block_ext_info *ei)
 120{
 121}
 122
 123static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 124{
 125	return NULL;
 126}
 127
 
 
 
 
 
 128static inline
 129int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
 130			       void *cb_priv)
 131{
 132	return 0;
 133}
 134
 135static inline
 136void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
 137				  void *cb_priv)
 138{
 139}
 140
 141static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 142			       struct tcf_result *res, bool compat_mode)
 143{
 144	return TC_ACT_UNSPEC;
 145}
 146
 147static inline int tcf_classify_ingress(struct sk_buff *skb,
 148				       const struct tcf_block *ingress_block,
 149				       const struct tcf_proto *tp,
 150				       struct tcf_result *res, bool compat_mode)
 151{
 152	return TC_ACT_UNSPEC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153}
 154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155#endif
 156
 157static inline unsigned long
 158__cls_set_class(unsigned long *clp, unsigned long cl)
 159{
 160	return xchg(clp, cl);
 161}
 162
 163static inline void
 164__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
 165{
 166	unsigned long cl;
 167
 168	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
 169	cl = __cls_set_class(&r->class, cl);
 170	if (cl)
 171		q->ops->cl_ops->unbind_tcf(q, cl);
 172}
 173
 174static inline void
 175tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
 176{
 177	struct Qdisc *q = tp->chain->block->q;
 
 178
 179	/* Check q as it is not set for shared blocks. In that case,
 180	 * setting class is not supported.
 181	 */
 182	if (!q)
 183		return;
 184	sch_tree_lock(q);
 185	__tcf_bind_filter(q, r, base);
 186	sch_tree_unlock(q);
 187}
 188
 189static inline void
 190__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
 191{
 192	unsigned long cl;
 193
 194	if ((cl = __cls_set_class(&r->class, 0)) != 0)
 195		q->ops->cl_ops->unbind_tcf(q, cl);
 196}
 197
 198static inline void
 199tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
 200{
 201	struct Qdisc *q = tp->chain->block->q;
 
 202
 203	if (!q)
 204		return;
 205	__tcf_unbind_filter(q, r);
 
 206}
 207
 208struct tcf_exts {
 209#ifdef CONFIG_NET_CLS_ACT
 210	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
 211	int nr_actions;
 212	struct tc_action **actions;
 213	struct net *net;
 214#endif
 215	/* Map to export classifier specific extension TLV types to the
 216	 * generic extensions API. Unsupported extensions must be set to 0.
 217	 */
 218	int action;
 219	int police;
 220};
 221
 222static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
 223				int action, int police)
 224{
 225#ifdef CONFIG_NET_CLS_ACT
 226	exts->type = 0;
 227	exts->nr_actions = 0;
 228	exts->net = net;
 229	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
 230				GFP_KERNEL);
 231	if (!exts->actions)
 232		return -ENOMEM;
 233#endif
 234	exts->action = action;
 235	exts->police = police;
 236	return 0;
 237}
 238
 239/* Return false if the netns is being destroyed in cleanup_net(). Callers
 240 * need to do cleanup synchronously in this case, otherwise may race with
 241 * tc_action_net_exit(). Return true for other cases.
 242 */
 243static inline bool tcf_exts_get_net(struct tcf_exts *exts)
 244{
 245#ifdef CONFIG_NET_CLS_ACT
 246	exts->net = maybe_get_net(exts->net);
 247	return exts->net != NULL;
 248#else
 249	return true;
 250#endif
 251}
 252
 253static inline void tcf_exts_put_net(struct tcf_exts *exts)
 254{
 255#ifdef CONFIG_NET_CLS_ACT
 256	if (exts->net)
 257		put_net(exts->net);
 258#endif
 259}
 260
 
 
 
 261#ifdef CONFIG_NET_CLS_ACT
 262#define tcf_exts_for_each_action(i, a, exts) \
 263	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
 264#else
 265#define tcf_exts_for_each_action(i, a, exts) \
 266	for (; 0; (void)(i), (void)(a), (void)(exts))
 
 
 267#endif
 
 268
 269static inline void
 270tcf_exts_stats_update(const struct tcf_exts *exts,
 271		      u64 bytes, u64 packets, u64 drops, u64 lastuse,
 272		      u8 used_hw_stats, bool used_hw_stats_valid)
 273{
 274#ifdef CONFIG_NET_CLS_ACT
 275	int i;
 276
 277	preempt_disable();
 278
 279	for (i = 0; i < exts->nr_actions; i++) {
 280		struct tc_action *a = exts->actions[i];
 281
 282		tcf_action_stats_update(a, bytes, packets, drops,
 283					lastuse, true);
 284		a->used_hw_stats = used_hw_stats;
 285		a->used_hw_stats_valid = used_hw_stats_valid;
 286	}
 287
 288	preempt_enable();
 289#endif
 290}
 291
 292/**
 293 * tcf_exts_has_actions - check if at least one action is present
 294 * @exts: tc filter extensions handle
 295 *
 296 * Returns true if at least one action is present.
 297 */
 298static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
 299{
 300#ifdef CONFIG_NET_CLS_ACT
 301	return exts->nr_actions;
 302#else
 303	return false;
 304#endif
 305}
 306
 307/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308 * tcf_exts_exec - execute tc filter extensions
 309 * @skb: socket buffer
 310 * @exts: tc filter extensions handle
 311 * @res: desired result
 312 *
 313 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
 314 * a negative number if the filter must be considered unmatched or
 315 * a positive action code (TC_ACT_*) which must be returned to the
 316 * underlying layer.
 317 */
 318static inline int
 319tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
 320	      struct tcf_result *res)
 321{
 322#ifdef CONFIG_NET_CLS_ACT
 323	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
 324#endif
 325	return TC_ACT_OK;
 326}
 327
 328int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
 329		      struct nlattr **tb, struct nlattr *rate_tlv,
 330		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
 331		      struct netlink_ext_ack *extack);
 332void tcf_exts_destroy(struct tcf_exts *exts);
 333void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
 334int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
 335int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
 336int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
 337
 338/**
 339 * struct tcf_pkt_info - packet information
 340 *
 341 * @ptr: start of the pkt data
 342 * @nexthdr: offset of the next header
 343 */
 344struct tcf_pkt_info {
 345	unsigned char *		ptr;
 346	int			nexthdr;
 347};
 348
 349#ifdef CONFIG_NET_EMATCH
 350
 351struct tcf_ematch_ops;
 352
 353/**
 354 * struct tcf_ematch - extended match (ematch)
 355 * 
 356 * @matchid: identifier to allow userspace to reidentify a match
 357 * @flags: flags specifying attributes and the relation to other matches
 358 * @ops: the operations lookup table of the corresponding ematch module
 359 * @datalen: length of the ematch specific configuration data
 360 * @data: ematch specific data
 361 * @net: the network namespace
 362 */
 363struct tcf_ematch {
 364	struct tcf_ematch_ops * ops;
 365	unsigned long		data;
 366	unsigned int		datalen;
 367	u16			matchid;
 368	u16			flags;
 369	struct net		*net;
 370};
 371
 372static inline int tcf_em_is_container(struct tcf_ematch *em)
 373{
 374	return !em->ops;
 375}
 376
 377static inline int tcf_em_is_simple(struct tcf_ematch *em)
 378{
 379	return em->flags & TCF_EM_SIMPLE;
 380}
 381
 382static inline int tcf_em_is_inverted(struct tcf_ematch *em)
 383{
 384	return em->flags & TCF_EM_INVERT;
 385}
 386
 387static inline int tcf_em_last_match(struct tcf_ematch *em)
 388{
 389	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
 390}
 391
 392static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
 393{
 394	if (tcf_em_last_match(em))
 395		return 1;
 396
 397	if (result == 0 && em->flags & TCF_EM_REL_AND)
 398		return 1;
 399
 400	if (result != 0 && em->flags & TCF_EM_REL_OR)
 401		return 1;
 402
 403	return 0;
 404}
 405	
 406/**
 407 * struct tcf_ematch_tree - ematch tree handle
 408 *
 409 * @hdr: ematch tree header supplied by userspace
 410 * @matches: array of ematches
 411 */
 412struct tcf_ematch_tree {
 413	struct tcf_ematch_tree_hdr hdr;
 414	struct tcf_ematch *	matches;
 415	
 416};
 417
 418/**
 419 * struct tcf_ematch_ops - ematch module operations
 420 * 
 421 * @kind: identifier (kind) of this ematch module
 422 * @datalen: length of expected configuration data (optional)
 423 * @change: called during validation (optional)
 424 * @match: called during ematch tree evaluation, must return 1/0
 425 * @destroy: called during destroyage (optional)
 426 * @dump: called during dumping process (optional)
 427 * @owner: owner, must be set to THIS_MODULE
 428 * @link: link to previous/next ematch module (internal use)
 429 */
 430struct tcf_ematch_ops {
 431	int			kind;
 432	int			datalen;
 433	int			(*change)(struct net *net, void *,
 434					  int, struct tcf_ematch *);
 435	int			(*match)(struct sk_buff *, struct tcf_ematch *,
 436					 struct tcf_pkt_info *);
 437	void			(*destroy)(struct tcf_ematch *);
 438	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
 439	struct module		*owner;
 440	struct list_head	link;
 441};
 442
 443int tcf_em_register(struct tcf_ematch_ops *);
 444void tcf_em_unregister(struct tcf_ematch_ops *);
 445int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
 446			 struct tcf_ematch_tree *);
 447void tcf_em_tree_destroy(struct tcf_ematch_tree *);
 448int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
 449int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
 450			struct tcf_pkt_info *);
 451
 452/**
 453 * tcf_em_tree_match - evaulate an ematch tree
 454 *
 455 * @skb: socket buffer of the packet in question
 456 * @tree: ematch tree to be used for evaluation
 457 * @info: packet information examined by classifier
 458 *
 459 * This function matches @skb against the ematch tree in @tree by going
 460 * through all ematches respecting their logic relations returning
 461 * as soon as the result is obvious.
 462 *
 463 * Returns 1 if the ematch tree as-one matches, no ematches are configured
 464 * or ematch is not enabled in the kernel, otherwise 0 is returned.
 465 */
 466static inline int tcf_em_tree_match(struct sk_buff *skb,
 467				    struct tcf_ematch_tree *tree,
 468				    struct tcf_pkt_info *info)
 469{
 470	if (tree->hdr.nmatches)
 471		return __tcf_em_tree_match(skb, tree, info);
 472	else
 473		return 1;
 474}
 475
 476#define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
 477
 478#else /* CONFIG_NET_EMATCH */
 479
 480struct tcf_ematch_tree {
 481};
 482
 483#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
 484#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
 485#define tcf_em_tree_dump(skb, t, tlv) (0)
 486#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
 487
 488#endif /* CONFIG_NET_EMATCH */
 489
 490static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
 491{
 492	switch (layer) {
 493		case TCF_LAYER_LINK:
 494			return skb_mac_header(skb);
 495		case TCF_LAYER_NETWORK:
 496			return skb_network_header(skb);
 497		case TCF_LAYER_TRANSPORT:
 498			return skb_transport_header(skb);
 499	}
 500
 501	return NULL;
 502}
 503
 504static inline int tcf_valid_offset(const struct sk_buff *skb,
 505				   const unsigned char *ptr, const int len)
 506{
 507	return likely((ptr + len) <= skb_tail_pointer(skb) &&
 508		      ptr >= skb->head &&
 509		      (ptr <= (ptr + len)));
 510}
 511
 
 
 
 512static inline int
 513tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
 514		 struct netlink_ext_ack *extack)
 515{
 516	char indev[IFNAMSIZ];
 517	struct net_device *dev;
 518
 519	if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
 520		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
 521				    "Interface name too long");
 522		return -EINVAL;
 523	}
 524	dev = __dev_get_by_name(net, indev);
 525	if (!dev) {
 526		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
 527				    "Network device not found");
 528		return -ENODEV;
 529	}
 530	return dev->ifindex;
 531}
 532
 533static inline bool
 534tcf_match_indev(struct sk_buff *skb, int ifindex)
 535{
 536	if (!ifindex)
 537		return true;
 538	if  (!skb->skb_iif)
 539		return false;
 540	return ifindex == skb->skb_iif;
 541}
 
 542
 543int tc_setup_flow_action(struct flow_action *flow_action,
 544			 const struct tcf_exts *exts);
 545void tc_cleanup_flow_action(struct flow_action *flow_action);
 546
 547int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
 548		     void *type_data, bool err_stop, bool rtnl_held);
 549int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
 550		    enum tc_setup_type type, void *type_data, bool err_stop,
 551		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
 552int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
 553			enum tc_setup_type type, void *type_data, bool err_stop,
 554			u32 *old_flags, unsigned int *old_in_hw_count,
 555			u32 *new_flags, unsigned int *new_in_hw_count,
 556			bool rtnl_held);
 557int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
 558			enum tc_setup_type type, void *type_data, bool err_stop,
 559			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
 560int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
 561			  bool add, flow_setup_cb_t *cb,
 562			  enum tc_setup_type type, void *type_data,
 563			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
 564unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
 565
 566#ifdef CONFIG_NET_CLS_ACT
 567int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
 568		    enum flow_block_binder_type binder_type,
 569		    struct nlattr *block_index_attr,
 570		    struct netlink_ext_ack *extack);
 571void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
 572int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
 573			       struct netlink_ext_ack *extack);
 574struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
 575				  struct sk_buff **to_free, int *ret);
 576int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
 577#else
 578static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
 579				  enum flow_block_binder_type binder_type,
 580				  struct nlattr *block_index_attr,
 581				  struct netlink_ext_ack *extack)
 582{
 583	return 0;
 584}
 585
 586static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
 587{
 588}
 589
 590static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
 591					     struct netlink_ext_ack *extack)
 592{
 593	return 0;
 594}
 595
 596static inline struct sk_buff *
 597tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
 598		  struct sk_buff **to_free, int *ret)
 599{
 600	return skb;
 601}
 602
 603static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
 604{
 605	return 0;
 606}
 607#endif
 
 608
 609struct tc_cls_u32_knode {
 610	struct tcf_exts *exts;
 611	struct tcf_result *res;
 612	struct tc_u32_sel *sel;
 613	u32 handle;
 614	u32 val;
 615	u32 mask;
 616	u32 link_handle;
 617	u8 fshift;
 618};
 619
 620struct tc_cls_u32_hnode {
 621	u32 handle;
 622	u32 prio;
 623	unsigned int divisor;
 624};
 625
 626enum tc_clsu32_command {
 627	TC_CLSU32_NEW_KNODE,
 628	TC_CLSU32_REPLACE_KNODE,
 629	TC_CLSU32_DELETE_KNODE,
 630	TC_CLSU32_NEW_HNODE,
 631	TC_CLSU32_REPLACE_HNODE,
 632	TC_CLSU32_DELETE_HNODE,
 633};
 634
 635struct tc_cls_u32_offload {
 636	struct flow_cls_common_offload common;
 637	/* knode values */
 638	enum tc_clsu32_command command;
 639	union {
 640		struct tc_cls_u32_knode knode;
 641		struct tc_cls_u32_hnode hnode;
 642	};
 643};
 644
 645static inline bool tc_can_offload(const struct net_device *dev)
 646{
 647	return dev->features & NETIF_F_HW_TC;
 648}
 649
 650static inline bool tc_can_offload_extack(const struct net_device *dev,
 651					 struct netlink_ext_ack *extack)
 652{
 653	bool can = tc_can_offload(dev);
 654
 655	if (!can)
 656		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
 657
 658	return can;
 659}
 660
 661static inline bool
 662tc_cls_can_offload_and_chain0(const struct net_device *dev,
 663			      struct flow_cls_common_offload *common)
 664{
 665	if (!tc_can_offload_extack(dev, common->extack))
 666		return false;
 667	if (common->chain_index) {
 668		NL_SET_ERR_MSG(common->extack,
 669			       "Driver supports only offload of chain 0");
 670		return false;
 671	}
 672	return true;
 673}
 674
 675static inline bool tc_skip_hw(u32 flags)
 676{
 677	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
 678}
 679
 680static inline bool tc_skip_sw(u32 flags)
 681{
 682	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
 683}
 684
 685/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
 686static inline bool tc_flags_valid(u32 flags)
 687{
 688	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
 689		      TCA_CLS_FLAGS_VERBOSE))
 690		return false;
 691
 692	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
 693	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
 694		return false;
 695
 696	return true;
 697}
 698
 699static inline bool tc_in_hw(u32 flags)
 700{
 701	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
 702}
 703
 704static inline void
 705tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
 706			   const struct tcf_proto *tp, u32 flags,
 707			   struct netlink_ext_ack *extack)
 708{
 709	cls_common->chain_index = tp->chain->index;
 710	cls_common->protocol = tp->protocol;
 711	cls_common->prio = tp->prio >> 16;
 712	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
 713		cls_common->extack = extack;
 714}
 715
 716#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 717static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
 718{
 719	struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
 
 720
 721	if (tc_skb_ext)
 722		memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
 723	return tc_skb_ext;
 724}
 725#endif
 
 
 
 
 
 726
 727enum tc_matchall_command {
 728	TC_CLSMATCHALL_REPLACE,
 729	TC_CLSMATCHALL_DESTROY,
 730	TC_CLSMATCHALL_STATS,
 731};
 732
 733struct tc_cls_matchall_offload {
 734	struct flow_cls_common_offload common;
 735	enum tc_matchall_command command;
 736	struct flow_rule *rule;
 737	struct flow_stats stats;
 738	unsigned long cookie;
 739};
 740
 741enum tc_clsbpf_command {
 742	TC_CLSBPF_OFFLOAD,
 743	TC_CLSBPF_STATS,
 744};
 745
 746struct tc_cls_bpf_offload {
 747	struct flow_cls_common_offload common;
 748	enum tc_clsbpf_command command;
 749	struct tcf_exts *exts;
 750	struct bpf_prog *prog;
 751	struct bpf_prog *oldprog;
 752	const char *name;
 753	bool exts_integrated;
 754};
 755
 756struct tc_mqprio_qopt_offload {
 757	/* struct tc_mqprio_qopt must always be the first element */
 758	struct tc_mqprio_qopt qopt;
 759	u16 mode;
 760	u16 shaper;
 761	u32 flags;
 762	u64 min_rate[TC_QOPT_MAX_QUEUE];
 763	u64 max_rate[TC_QOPT_MAX_QUEUE];
 764};
 765
 766/* This structure holds cookie structure that is passed from user
 767 * to the kernel for actions and classifiers
 768 */
 769struct tc_cookie {
 770	u8  *data;
 771	u32 len;
 772	struct rcu_head rcu;
 773};
 774
 775struct tc_qopt_offload_stats {
 776	struct gnet_stats_basic_packed *bstats;
 777	struct gnet_stats_queue *qstats;
 778};
 779
 780enum tc_mq_command {
 781	TC_MQ_CREATE,
 782	TC_MQ_DESTROY,
 783	TC_MQ_STATS,
 784	TC_MQ_GRAFT,
 785};
 786
 787struct tc_mq_opt_offload_graft_params {
 788	unsigned long queue;
 789	u32 child_handle;
 790};
 791
 792struct tc_mq_qopt_offload {
 793	enum tc_mq_command command;
 794	u32 handle;
 795	union {
 796		struct tc_qopt_offload_stats stats;
 797		struct tc_mq_opt_offload_graft_params graft_params;
 798	};
 799};
 800
 801enum tc_htb_command {
 802	/* Root */
 803	TC_HTB_CREATE, /* Initialize HTB offload. */
 804	TC_HTB_DESTROY, /* Destroy HTB offload. */
 805
 806	/* Classes */
 807	/* Allocate qid and create leaf. */
 808	TC_HTB_LEAF_ALLOC_QUEUE,
 809	/* Convert leaf to inner, preserve and return qid, create new leaf. */
 810	TC_HTB_LEAF_TO_INNER,
 811	/* Delete leaf, while siblings remain. */
 812	TC_HTB_LEAF_DEL,
 813	/* Delete leaf, convert parent to leaf, preserving qid. */
 814	TC_HTB_LEAF_DEL_LAST,
 815	/* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
 816	TC_HTB_LEAF_DEL_LAST_FORCE,
 817	/* Modify parameters of a node. */
 818	TC_HTB_NODE_MODIFY,
 819
 820	/* Class qdisc */
 821	TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
 822};
 823
 824struct tc_htb_qopt_offload {
 825	struct netlink_ext_ack *extack;
 826	enum tc_htb_command command;
 827	u32 parent_classid;
 828	u16 classid;
 829	u16 qid;
 830	u64 rate;
 831	u64 ceil;
 832};
 833
 834#define TC_HTB_CLASSID_ROOT U32_MAX
 835
 836enum tc_red_command {
 837	TC_RED_REPLACE,
 838	TC_RED_DESTROY,
 839	TC_RED_STATS,
 840	TC_RED_XSTATS,
 841	TC_RED_GRAFT,
 842};
 843
 844struct tc_red_qopt_offload_params {
 845	u32 min;
 846	u32 max;
 847	u32 probability;
 848	u32 limit;
 849	bool is_ecn;
 850	bool is_harddrop;
 851	bool is_nodrop;
 852	struct gnet_stats_queue *qstats;
 853};
 854
 855struct tc_red_qopt_offload {
 856	enum tc_red_command command;
 857	u32 handle;
 858	u32 parent;
 859	union {
 860		struct tc_red_qopt_offload_params set;
 861		struct tc_qopt_offload_stats stats;
 862		struct red_stats *xstats;
 863		u32 child_handle;
 864	};
 865};
 866
 867enum tc_gred_command {
 868	TC_GRED_REPLACE,
 869	TC_GRED_DESTROY,
 870	TC_GRED_STATS,
 871};
 872
 873struct tc_gred_vq_qopt_offload_params {
 874	bool present;
 875	u32 limit;
 876	u32 prio;
 877	u32 min;
 878	u32 max;
 879	bool is_ecn;
 880	bool is_harddrop;
 881	u32 probability;
 882	/* Only need backlog, see struct tc_prio_qopt_offload_params */
 883	u32 *backlog;
 884};
 885
 886struct tc_gred_qopt_offload_params {
 887	bool grio_on;
 888	bool wred_on;
 889	unsigned int dp_cnt;
 890	unsigned int dp_def;
 891	struct gnet_stats_queue *qstats;
 892	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
 893};
 894
 895struct tc_gred_qopt_offload_stats {
 896	struct gnet_stats_basic_packed bstats[MAX_DPs];
 897	struct gnet_stats_queue qstats[MAX_DPs];
 898	struct red_stats *xstats[MAX_DPs];
 899};
 900
 901struct tc_gred_qopt_offload {
 902	enum tc_gred_command command;
 903	u32 handle;
 904	u32 parent;
 905	union {
 906		struct tc_gred_qopt_offload_params set;
 907		struct tc_gred_qopt_offload_stats stats;
 908	};
 909};
 910
 911enum tc_prio_command {
 912	TC_PRIO_REPLACE,
 913	TC_PRIO_DESTROY,
 914	TC_PRIO_STATS,
 915	TC_PRIO_GRAFT,
 916};
 917
 918struct tc_prio_qopt_offload_params {
 919	int bands;
 920	u8 priomap[TC_PRIO_MAX + 1];
 921	/* At the point of un-offloading the Qdisc, the reported backlog and
 922	 * qlen need to be reduced by the portion that is in HW.
 
 923	 */
 924	struct gnet_stats_queue *qstats;
 925};
 926
 927struct tc_prio_qopt_offload_graft_params {
 928	u8 band;
 929	u32 child_handle;
 930};
 931
 932struct tc_prio_qopt_offload {
 933	enum tc_prio_command command;
 934	u32 handle;
 935	u32 parent;
 936	union {
 937		struct tc_prio_qopt_offload_params replace_params;
 938		struct tc_qopt_offload_stats stats;
 939		struct tc_prio_qopt_offload_graft_params graft_params;
 940	};
 941};
 942
 943enum tc_root_command {
 944	TC_ROOT_GRAFT,
 945};
 946
 947struct tc_root_qopt_offload {
 948	enum tc_root_command command;
 949	u32 handle;
 950	bool ingress;
 951};
 952
 953enum tc_ets_command {
 954	TC_ETS_REPLACE,
 955	TC_ETS_DESTROY,
 956	TC_ETS_STATS,
 957	TC_ETS_GRAFT,
 958};
 959
 960struct tc_ets_qopt_offload_replace_params {
 961	unsigned int bands;
 962	u8 priomap[TC_PRIO_MAX + 1];
 963	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
 964	unsigned int weights[TCQ_ETS_MAX_BANDS];
 965	struct gnet_stats_queue *qstats;
 966};
 967
 968struct tc_ets_qopt_offload_graft_params {
 969	u8 band;
 970	u32 child_handle;
 971};
 972
 973struct tc_ets_qopt_offload {
 974	enum tc_ets_command command;
 975	u32 handle;
 976	u32 parent;
 977	union {
 978		struct tc_ets_qopt_offload_replace_params replace_params;
 979		struct tc_qopt_offload_stats stats;
 980		struct tc_ets_qopt_offload_graft_params graft_params;
 981	};
 982};
 983
 984enum tc_tbf_command {
 985	TC_TBF_REPLACE,
 986	TC_TBF_DESTROY,
 987	TC_TBF_STATS,
 988};
 989
 990struct tc_tbf_qopt_offload_replace_params {
 991	struct psched_ratecfg rate;
 992	u32 max_size;
 993	struct gnet_stats_queue *qstats;
 994};
 995
 996struct tc_tbf_qopt_offload {
 997	enum tc_tbf_command command;
 998	u32 handle;
 999	u32 parent;
1000	union {
1001		struct tc_tbf_qopt_offload_replace_params replace_params;
1002		struct tc_qopt_offload_stats stats;
1003	};
1004};
1005
1006enum tc_fifo_command {
1007	TC_FIFO_REPLACE,
1008	TC_FIFO_DESTROY,
1009	TC_FIFO_STATS,
1010};
1011
1012struct tc_fifo_qopt_offload {
1013	enum tc_fifo_command command;
1014	u32 handle;
1015	u32 parent;
1016	union {
1017		struct tc_qopt_offload_stats stats;
1018	};
1019};
1020
1021#endif
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __NET_PKT_CLS_H
  3#define __NET_PKT_CLS_H
  4
  5#include <linux/pkt_cls.h>
  6#include <linux/workqueue.h>
  7#include <net/sch_generic.h>
  8#include <net/act_api.h>
 
 
 
 
  9
 10/* Basic packet classifier frontend definitions. */
 11
 12struct tcf_walker {
 13	int	stop;
 14	int	skip;
 15	int	count;
 
 
 16	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
 17};
 18
 19int register_tcf_proto_ops(struct tcf_proto_ops *ops);
 20int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 21
 22enum tcf_block_binder_type {
 23	TCF_BLOCK_BINDER_TYPE_UNSPEC,
 24	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
 25	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
 26};
 27
 28struct tcf_block_ext_info {
 29	enum tcf_block_binder_type binder_type;
 30	tcf_chain_head_change_t *chain_head_change;
 31	void *chain_head_change_priv;
 32	u32 block_index;
 33};
 34
 
 
 
 
 
 
 35struct tcf_block_cb;
 36bool tcf_queue_work(struct work_struct *work);
 37
 38#ifdef CONFIG_NET_CLS
 39struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 40				bool create);
 41void tcf_chain_put(struct tcf_chain *chain);
 
 
 
 
 42void tcf_block_netif_keep_dst(struct tcf_block *block);
 43int tcf_block_get(struct tcf_block **p_block,
 44		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 45		  struct netlink_ext_ack *extack);
 46int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 47		      struct tcf_block_ext_info *ei,
 48		      struct netlink_ext_ack *extack);
 49void tcf_block_put(struct tcf_block *block);
 50void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 51		       struct tcf_block_ext_info *ei);
 52
 53static inline bool tcf_block_shared(struct tcf_block *block)
 54{
 55	return block->index;
 56}
 57
 
 
 
 
 
 58static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 59{
 60	WARN_ON(tcf_block_shared(block));
 61	return block->q;
 62}
 63
 64static inline struct net_device *tcf_block_dev(struct tcf_block *block)
 
 
 
 
 
 
 
 
 65{
 66	return tcf_block_q(block)->dev_queue->dev;
 67}
 68
 69void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
 70struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
 71					 tc_setup_cb_t *cb, void *cb_ident);
 72void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
 73unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
 74struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
 75					     tc_setup_cb_t *cb, void *cb_ident,
 76					     void *cb_priv);
 77int tcf_block_cb_register(struct tcf_block *block,
 78			  tc_setup_cb_t *cb, void *cb_ident,
 79			  void *cb_priv);
 80void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
 81void tcf_block_cb_unregister(struct tcf_block *block,
 82			     tc_setup_cb_t *cb, void *cb_ident);
 83
 84int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 85		 struct tcf_result *res, bool compat_mode);
 86
 87#else
 88static inline
 89int tcf_block_get(struct tcf_block **p_block,
 90		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 91		  struct netlink_ext_ack *extack)
 92{
 93	return 0;
 94}
 95
 96static inline
 97int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 98		      struct tcf_block_ext_info *ei,
 99		      struct netlink_ext_ack *extack)
100{
101	return 0;
102}
103
104static inline void tcf_block_put(struct tcf_block *block)
105{
106}
107
108static inline
109void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
110		       struct tcf_block_ext_info *ei)
111{
112}
113
114static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
115{
116	return NULL;
117}
118
119static inline struct net_device *tcf_block_dev(struct tcf_block *block)
120{
121	return NULL;
122}
123
124static inline
125int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
126			       void *cb_priv)
127{
128	return 0;
129}
130
131static inline
132void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
133				  void *cb_priv)
134{
135}
136
137static inline
138void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
139{
140	return NULL;
141}
142
143static inline
144struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
145					 tc_setup_cb_t *cb, void *cb_ident)
 
146{
147	return NULL;
148}
149
150static inline
151void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
152{
153}
154
155static inline
156unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
157{
158	return 0;
159}
160
161static inline
162struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
163					     tc_setup_cb_t *cb, void *cb_ident,
164					     void *cb_priv)
165{
166	return NULL;
167}
168
169static inline
170int tcf_block_cb_register(struct tcf_block *block,
171			  tc_setup_cb_t *cb, void *cb_ident,
172			  void *cb_priv)
173{
174	return 0;
175}
176
177static inline
178void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
179{
180}
181
182static inline
183void tcf_block_cb_unregister(struct tcf_block *block,
184			     tc_setup_cb_t *cb, void *cb_ident)
185{
186}
187
188static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
189			       struct tcf_result *res, bool compat_mode)
190{
191	return TC_ACT_UNSPEC;
192}
193#endif
194
195static inline unsigned long
196__cls_set_class(unsigned long *clp, unsigned long cl)
197{
198	return xchg(clp, cl);
199}
200
201static inline unsigned long
202cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
203{
204	unsigned long old_cl;
205
206	sch_tree_lock(q);
207	old_cl = __cls_set_class(clp, cl);
208	sch_tree_unlock(q);
209	return old_cl;
210}
211
212static inline void
213tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
214{
215	struct Qdisc *q = tp->chain->block->q;
216	unsigned long cl;
217
218	/* Check q as it is not set for shared blocks. In that case,
219	 * setting class is not supported.
220	 */
221	if (!q)
222		return;
223	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
224	cl = cls_set_class(q, &r->class, cl);
225	if (cl)
 
 
 
 
 
 
 
 
226		q->ops->cl_ops->unbind_tcf(q, cl);
227}
228
229static inline void
230tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
231{
232	struct Qdisc *q = tp->chain->block->q;
233	unsigned long cl;
234
235	if (!q)
236		return;
237	if ((cl = __cls_set_class(&r->class, 0)) != 0)
238		q->ops->cl_ops->unbind_tcf(q, cl);
239}
240
241struct tcf_exts {
242#ifdef CONFIG_NET_CLS_ACT
243	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
244	int nr_actions;
245	struct tc_action **actions;
246	struct net *net;
247#endif
248	/* Map to export classifier specific extension TLV types to the
249	 * generic extensions API. Unsupported extensions must be set to 0.
250	 */
251	int action;
252	int police;
253};
254
255static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
 
256{
257#ifdef CONFIG_NET_CLS_ACT
258	exts->type = 0;
259	exts->nr_actions = 0;
260	exts->net = NULL;
261	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
262				GFP_KERNEL);
263	if (!exts->actions)
264		return -ENOMEM;
265#endif
266	exts->action = action;
267	exts->police = police;
268	return 0;
269}
270
271/* Return false if the netns is being destroyed in cleanup_net(). Callers
272 * need to do cleanup synchronously in this case, otherwise may race with
273 * tc_action_net_exit(). Return true for other cases.
274 */
275static inline bool tcf_exts_get_net(struct tcf_exts *exts)
276{
277#ifdef CONFIG_NET_CLS_ACT
278	exts->net = maybe_get_net(exts->net);
279	return exts->net != NULL;
280#else
281	return true;
282#endif
283}
284
285static inline void tcf_exts_put_net(struct tcf_exts *exts)
286{
287#ifdef CONFIG_NET_CLS_ACT
288	if (exts->net)
289		put_net(exts->net);
290#endif
291}
292
293static inline void tcf_exts_to_list(const struct tcf_exts *exts,
294				    struct list_head *actions)
295{
296#ifdef CONFIG_NET_CLS_ACT
297	int i;
298
299	for (i = 0; i < exts->nr_actions; i++) {
300		struct tc_action *a = exts->actions[i];
301
302		list_add_tail(&a->list, actions);
303	}
304#endif
305}
306
307static inline void
308tcf_exts_stats_update(const struct tcf_exts *exts,
309		      u64 bytes, u64 packets, u64 lastuse)
 
310{
311#ifdef CONFIG_NET_CLS_ACT
312	int i;
313
314	preempt_disable();
315
316	for (i = 0; i < exts->nr_actions; i++) {
317		struct tc_action *a = exts->actions[i];
318
319		tcf_action_stats_update(a, bytes, packets, lastuse);
 
 
 
320	}
321
322	preempt_enable();
323#endif
324}
325
326/**
327 * tcf_exts_has_actions - check if at least one action is present
328 * @exts: tc filter extensions handle
329 *
330 * Returns true if at least one action is present.
331 */
332static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
333{
334#ifdef CONFIG_NET_CLS_ACT
335	return exts->nr_actions;
336#else
337	return false;
338#endif
339}
340
341/**
342 * tcf_exts_has_one_action - check if exactly one action is present
343 * @exts: tc filter extensions handle
344 *
345 * Returns true if exactly one action is present.
346 */
347static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
348{
349#ifdef CONFIG_NET_CLS_ACT
350	return exts->nr_actions == 1;
351#else
352	return false;
353#endif
354}
355
356/**
357 * tcf_exts_exec - execute tc filter extensions
358 * @skb: socket buffer
359 * @exts: tc filter extensions handle
360 * @res: desired result
361 *
362 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
363 * a negative number if the filter must be considered unmatched or
364 * a positive action code (TC_ACT_*) which must be returned to the
365 * underlying layer.
366 */
367static inline int
368tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
369	      struct tcf_result *res)
370{
371#ifdef CONFIG_NET_CLS_ACT
372	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
373#endif
374	return TC_ACT_OK;
375}
376
377int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
378		      struct nlattr **tb, struct nlattr *rate_tlv,
379		      struct tcf_exts *exts, bool ovr,
380		      struct netlink_ext_ack *extack);
381void tcf_exts_destroy(struct tcf_exts *exts);
382void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
383int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
 
384int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
385
386/**
387 * struct tcf_pkt_info - packet information
 
 
 
388 */
389struct tcf_pkt_info {
390	unsigned char *		ptr;
391	int			nexthdr;
392};
393
394#ifdef CONFIG_NET_EMATCH
395
396struct tcf_ematch_ops;
397
398/**
399 * struct tcf_ematch - extended match (ematch)
400 * 
401 * @matchid: identifier to allow userspace to reidentify a match
402 * @flags: flags specifying attributes and the relation to other matches
403 * @ops: the operations lookup table of the corresponding ematch module
404 * @datalen: length of the ematch specific configuration data
405 * @data: ematch specific data
 
406 */
407struct tcf_ematch {
408	struct tcf_ematch_ops * ops;
409	unsigned long		data;
410	unsigned int		datalen;
411	u16			matchid;
412	u16			flags;
413	struct net		*net;
414};
415
416static inline int tcf_em_is_container(struct tcf_ematch *em)
417{
418	return !em->ops;
419}
420
421static inline int tcf_em_is_simple(struct tcf_ematch *em)
422{
423	return em->flags & TCF_EM_SIMPLE;
424}
425
426static inline int tcf_em_is_inverted(struct tcf_ematch *em)
427{
428	return em->flags & TCF_EM_INVERT;
429}
430
431static inline int tcf_em_last_match(struct tcf_ematch *em)
432{
433	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
434}
435
436static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
437{
438	if (tcf_em_last_match(em))
439		return 1;
440
441	if (result == 0 && em->flags & TCF_EM_REL_AND)
442		return 1;
443
444	if (result != 0 && em->flags & TCF_EM_REL_OR)
445		return 1;
446
447	return 0;
448}
449	
450/**
451 * struct tcf_ematch_tree - ematch tree handle
452 *
453 * @hdr: ematch tree header supplied by userspace
454 * @matches: array of ematches
455 */
456struct tcf_ematch_tree {
457	struct tcf_ematch_tree_hdr hdr;
458	struct tcf_ematch *	matches;
459	
460};
461
462/**
463 * struct tcf_ematch_ops - ematch module operations
464 * 
465 * @kind: identifier (kind) of this ematch module
466 * @datalen: length of expected configuration data (optional)
467 * @change: called during validation (optional)
468 * @match: called during ematch tree evaluation, must return 1/0
469 * @destroy: called during destroyage (optional)
470 * @dump: called during dumping process (optional)
471 * @owner: owner, must be set to THIS_MODULE
472 * @link: link to previous/next ematch module (internal use)
473 */
474struct tcf_ematch_ops {
475	int			kind;
476	int			datalen;
477	int			(*change)(struct net *net, void *,
478					  int, struct tcf_ematch *);
479	int			(*match)(struct sk_buff *, struct tcf_ematch *,
480					 struct tcf_pkt_info *);
481	void			(*destroy)(struct tcf_ematch *);
482	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
483	struct module		*owner;
484	struct list_head	link;
485};
486
487int tcf_em_register(struct tcf_ematch_ops *);
488void tcf_em_unregister(struct tcf_ematch_ops *);
489int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
490			 struct tcf_ematch_tree *);
491void tcf_em_tree_destroy(struct tcf_ematch_tree *);
492int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
493int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
494			struct tcf_pkt_info *);
495
496/**
497 * tcf_em_tree_match - evaulate an ematch tree
498 *
499 * @skb: socket buffer of the packet in question
500 * @tree: ematch tree to be used for evaluation
501 * @info: packet information examined by classifier
502 *
503 * This function matches @skb against the ematch tree in @tree by going
504 * through all ematches respecting their logic relations returning
505 * as soon as the result is obvious.
506 *
507 * Returns 1 if the ematch tree as-one matches, no ematches are configured
508 * or ematch is not enabled in the kernel, otherwise 0 is returned.
509 */
510static inline int tcf_em_tree_match(struct sk_buff *skb,
511				    struct tcf_ematch_tree *tree,
512				    struct tcf_pkt_info *info)
513{
514	if (tree->hdr.nmatches)
515		return __tcf_em_tree_match(skb, tree, info);
516	else
517		return 1;
518}
519
520#define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
521
522#else /* CONFIG_NET_EMATCH */
523
524struct tcf_ematch_tree {
525};
526
527#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
528#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
529#define tcf_em_tree_dump(skb, t, tlv) (0)
530#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
531
532#endif /* CONFIG_NET_EMATCH */
533
534static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
535{
536	switch (layer) {
537		case TCF_LAYER_LINK:
538			return skb_mac_header(skb);
539		case TCF_LAYER_NETWORK:
540			return skb_network_header(skb);
541		case TCF_LAYER_TRANSPORT:
542			return skb_transport_header(skb);
543	}
544
545	return NULL;
546}
547
548static inline int tcf_valid_offset(const struct sk_buff *skb,
549				   const unsigned char *ptr, const int len)
550{
551	return likely((ptr + len) <= skb_tail_pointer(skb) &&
552		      ptr >= skb->head &&
553		      (ptr <= (ptr + len)));
554}
555
556#ifdef CONFIG_NET_CLS_IND
557#include <net/net_namespace.h>
558
559static inline int
560tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
561		 struct netlink_ext_ack *extack)
562{
563	char indev[IFNAMSIZ];
564	struct net_device *dev;
565
566	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
567		NL_SET_ERR_MSG(extack, "Interface name too long");
 
568		return -EINVAL;
569	}
570	dev = __dev_get_by_name(net, indev);
571	if (!dev)
 
 
572		return -ENODEV;
 
573	return dev->ifindex;
574}
575
576static inline bool
577tcf_match_indev(struct sk_buff *skb, int ifindex)
578{
579	if (!ifindex)
580		return true;
581	if  (!skb->skb_iif)
582		return false;
583	return ifindex == skb->skb_iif;
584}
585#endif /* CONFIG_NET_CLS_IND */
586
587int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
588		     enum tc_setup_type type, void *type_data, bool err_stop);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589
590enum tc_block_command {
591	TC_BLOCK_BIND,
592	TC_BLOCK_UNBIND,
593};
 
 
 
 
 
594
595struct tc_block_offload {
596	enum tc_block_command command;
597	enum tcf_block_binder_type binder_type;
598	struct tcf_block *block;
599};
 
600
601struct tc_cls_common_offload {
602	u32 chain_index;
603	__be16 protocol;
604	u32 prio;
605	struct netlink_ext_ack *extack;
606};
607
608struct tc_cls_u32_knode {
609	struct tcf_exts *exts;
 
610	struct tc_u32_sel *sel;
611	u32 handle;
612	u32 val;
613	u32 mask;
614	u32 link_handle;
615	u8 fshift;
616};
617
618struct tc_cls_u32_hnode {
619	u32 handle;
620	u32 prio;
621	unsigned int divisor;
622};
623
624enum tc_clsu32_command {
625	TC_CLSU32_NEW_KNODE,
626	TC_CLSU32_REPLACE_KNODE,
627	TC_CLSU32_DELETE_KNODE,
628	TC_CLSU32_NEW_HNODE,
629	TC_CLSU32_REPLACE_HNODE,
630	TC_CLSU32_DELETE_HNODE,
631};
632
633struct tc_cls_u32_offload {
634	struct tc_cls_common_offload common;
635	/* knode values */
636	enum tc_clsu32_command command;
637	union {
638		struct tc_cls_u32_knode knode;
639		struct tc_cls_u32_hnode hnode;
640	};
641};
642
643static inline bool tc_can_offload(const struct net_device *dev)
644{
645	return dev->features & NETIF_F_HW_TC;
646}
647
648static inline bool tc_can_offload_extack(const struct net_device *dev,
649					 struct netlink_ext_ack *extack)
650{
651	bool can = tc_can_offload(dev);
652
653	if (!can)
654		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
655
656	return can;
657}
658
659static inline bool
660tc_cls_can_offload_and_chain0(const struct net_device *dev,
661			      struct tc_cls_common_offload *common)
662{
663	if (!tc_can_offload_extack(dev, common->extack))
664		return false;
665	if (common->chain_index) {
666		NL_SET_ERR_MSG(common->extack,
667			       "Driver supports only offload of chain 0");
668		return false;
669	}
670	return true;
671}
672
673static inline bool tc_skip_hw(u32 flags)
674{
675	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
676}
677
678static inline bool tc_skip_sw(u32 flags)
679{
680	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
681}
682
683/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
684static inline bool tc_flags_valid(u32 flags)
685{
686	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
 
687		return false;
688
 
689	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
690		return false;
691
692	return true;
693}
694
695static inline bool tc_in_hw(u32 flags)
696{
697	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
698}
699
700static inline void
701tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
702			   const struct tcf_proto *tp, u32 flags,
703			   struct netlink_ext_ack *extack)
704{
705	cls_common->chain_index = tp->chain->index;
706	cls_common->protocol = tp->protocol;
707	cls_common->prio = tp->prio;
708	if (tc_skip_sw(flags))
709		cls_common->extack = extack;
710}
711
712enum tc_fl_command {
713	TC_CLSFLOWER_REPLACE,
714	TC_CLSFLOWER_DESTROY,
715	TC_CLSFLOWER_STATS,
716};
717
718struct tc_cls_flower_offload {
719	struct tc_cls_common_offload common;
720	enum tc_fl_command command;
721	unsigned long cookie;
722	struct flow_dissector *dissector;
723	struct fl_flow_key *mask;
724	struct fl_flow_key *key;
725	struct tcf_exts *exts;
726	u32 classid;
727};
728
729enum tc_matchall_command {
730	TC_CLSMATCHALL_REPLACE,
731	TC_CLSMATCHALL_DESTROY,
 
732};
733
734struct tc_cls_matchall_offload {
735	struct tc_cls_common_offload common;
736	enum tc_matchall_command command;
737	struct tcf_exts *exts;
 
738	unsigned long cookie;
739};
740
741enum tc_clsbpf_command {
742	TC_CLSBPF_OFFLOAD,
743	TC_CLSBPF_STATS,
744};
745
746struct tc_cls_bpf_offload {
747	struct tc_cls_common_offload common;
748	enum tc_clsbpf_command command;
749	struct tcf_exts *exts;
750	struct bpf_prog *prog;
751	struct bpf_prog *oldprog;
752	const char *name;
753	bool exts_integrated;
754};
755
756struct tc_mqprio_qopt_offload {
757	/* struct tc_mqprio_qopt must always be the first element */
758	struct tc_mqprio_qopt qopt;
759	u16 mode;
760	u16 shaper;
761	u32 flags;
762	u64 min_rate[TC_QOPT_MAX_QUEUE];
763	u64 max_rate[TC_QOPT_MAX_QUEUE];
764};
765
766/* This structure holds cookie structure that is passed from user
767 * to the kernel for actions and classifiers
768 */
769struct tc_cookie {
770	u8  *data;
771	u32 len;
 
772};
773
774struct tc_qopt_offload_stats {
775	struct gnet_stats_basic_packed *bstats;
776	struct gnet_stats_queue *qstats;
777};
778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779enum tc_red_command {
780	TC_RED_REPLACE,
781	TC_RED_DESTROY,
782	TC_RED_STATS,
783	TC_RED_XSTATS,
 
784};
785
786struct tc_red_qopt_offload_params {
787	u32 min;
788	u32 max;
789	u32 probability;
 
790	bool is_ecn;
 
 
791	struct gnet_stats_queue *qstats;
792};
793
794struct tc_red_qopt_offload {
795	enum tc_red_command command;
796	u32 handle;
797	u32 parent;
798	union {
799		struct tc_red_qopt_offload_params set;
800		struct tc_qopt_offload_stats stats;
801		struct red_stats *xstats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
802	};
803};
804
805enum tc_prio_command {
806	TC_PRIO_REPLACE,
807	TC_PRIO_DESTROY,
808	TC_PRIO_STATS,
809	TC_PRIO_GRAFT,
810};
811
812struct tc_prio_qopt_offload_params {
813	int bands;
814	u8 priomap[TC_PRIO_MAX + 1];
815	/* In case that a prio qdisc is offloaded and now is changed to a
816	 * non-offloadedable config, it needs to update the backlog & qlen
817	 * values to negate the HW backlog & qlen values (and only them).
818	 */
819	struct gnet_stats_queue *qstats;
820};
821
822struct tc_prio_qopt_offload_graft_params {
823	u8 band;
824	u32 child_handle;
825};
826
827struct tc_prio_qopt_offload {
828	enum tc_prio_command command;
829	u32 handle;
830	u32 parent;
831	union {
832		struct tc_prio_qopt_offload_params replace_params;
833		struct tc_qopt_offload_stats stats;
834		struct tc_prio_qopt_offload_graft_params graft_params;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835	};
836};
837
838#endif