Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/act_api.c	Packet action API.
   4 *
   5 * Author:	Jamal Hadi Salim
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/kernel.h>
  10#include <linux/string.h>
  11#include <linux/errno.h>
  12#include <linux/slab.h>
  13#include <linux/skbuff.h>
  14#include <linux/init.h>
  15#include <linux/kmod.h>
  16#include <linux/err.h>
  17#include <linux/module.h>
  18#include <net/net_namespace.h>
  19#include <net/sock.h>
  20#include <net/sch_generic.h>
  21#include <net/pkt_cls.h>
  22#include <net/tc_act/tc_pedit.h>
  23#include <net/act_api.h>
  24#include <net/netlink.h>
  25#include <net/flow_offload.h>
  26#include <net/tc_wrapper.h>
  27
  28#ifdef CONFIG_INET
  29DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
  30EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
  31#endif
  32
  33int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
  34{
  35#ifdef CONFIG_INET
  36	if (static_branch_unlikely(&tcf_frag_xmit_count))
  37		return sch_frag_xmit_hook(skb, xmit);
  38#endif
  39
  40	return xmit(skb);
  41}
  42EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
  43
  44static void tcf_action_goto_chain_exec(const struct tc_action *a,
  45				       struct tcf_result *res)
  46{
  47	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
  48
  49	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
  50}
  51
  52static void tcf_free_cookie_rcu(struct rcu_head *p)
  53{
  54	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
  55
  56	kfree(cookie->data);
  57	kfree(cookie);
  58}
  59
  60static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  61				  struct tc_cookie *new_cookie)
  62{
  63	struct tc_cookie *old;
  64
  65	old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
  66	if (old)
  67		call_rcu(&old->rcu, tcf_free_cookie_rcu);
  68}
  69
  70int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
  71			     struct tcf_chain **newchain,
  72			     struct netlink_ext_ack *extack)
  73{
  74	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
  75	u32 chain_index;
  76
  77	if (!opcode)
  78		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
  79	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
  80		ret = 0;
  81	if (ret) {
  82		NL_SET_ERR_MSG(extack, "invalid control action");
  83		goto end;
  84	}
  85
  86	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
  87		chain_index = action & TC_ACT_EXT_VAL_MASK;
  88		if (!tp || !newchain) {
  89			ret = -EINVAL;
  90			NL_SET_ERR_MSG(extack,
  91				       "can't goto NULL proto/chain");
  92			goto end;
  93		}
  94		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
  95		if (!*newchain) {
  96			ret = -ENOMEM;
  97			NL_SET_ERR_MSG(extack,
  98				       "can't allocate goto_chain");
  99		}
 100	}
 101end:
 102	return ret;
 103}
 104EXPORT_SYMBOL(tcf_action_check_ctrlact);
 105
 106struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
 107					 struct tcf_chain *goto_chain)
 108{
 109	a->tcfa_action = action;
 110	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
 111	return goto_chain;
 112}
 113EXPORT_SYMBOL(tcf_action_set_ctrlact);
 114
 115/* XXX: For standalone actions, we don't need a RCU grace period either, because
 116 * actions are always connected to filters and filters are already destroyed in
 117 * RCU callbacks, so after a RCU grace period actions are already disconnected
 118 * from filters. Readers later can not find us.
 119 */
 120static void free_tcf(struct tc_action *p)
 121{
 122	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
 123
 124	free_percpu(p->cpu_bstats);
 125	free_percpu(p->cpu_bstats_hw);
 126	free_percpu(p->cpu_qstats);
 127
 128	tcf_set_action_cookie(&p->act_cookie, NULL);
 129	if (chain)
 130		tcf_chain_put_by_act(chain);
 131
 132	kfree(p);
 133}
 134
 135static void offload_action_hw_count_set(struct tc_action *act,
 136					u32 hw_count)
 137{
 138	act->in_hw_count = hw_count;
 139}
 140
 141static void offload_action_hw_count_inc(struct tc_action *act,
 142					u32 hw_count)
 143{
 144	act->in_hw_count += hw_count;
 145}
 146
 147static void offload_action_hw_count_dec(struct tc_action *act,
 148					u32 hw_count)
 149{
 150	act->in_hw_count = act->in_hw_count > hw_count ?
 151			   act->in_hw_count - hw_count : 0;
 152}
 153
 154static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
 155{
 156	if (is_tcf_pedit(act))
 157		return tcf_pedit_nkeys(act);
 158	else
 159		return 1;
 160}
 161
 162static bool tc_act_skip_hw(u32 flags)
 163{
 164	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
 165}
 166
 167static bool tc_act_skip_sw(u32 flags)
 168{
 169	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
 170}
 171
 172static bool tc_act_in_hw(struct tc_action *act)
 173{
 174	return !!act->in_hw_count;
 175}
 176
 177/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
 178static bool tc_act_flags_valid(u32 flags)
 179{
 180	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
 181
 182	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
 183}
 184
 185static int offload_action_init(struct flow_offload_action *fl_action,
 186			       struct tc_action *act,
 187			       enum offload_act_command  cmd,
 188			       struct netlink_ext_ack *extack)
 189{
 190	int err;
 191
 192	fl_action->extack = extack;
 193	fl_action->command = cmd;
 194	fl_action->index = act->tcfa_index;
 
 195
 196	if (act->ops->offload_act_setup) {
 197		spin_lock_bh(&act->tcfa_lock);
 198		err = act->ops->offload_act_setup(act, fl_action, NULL,
 199						  false, extack);
 200		spin_unlock_bh(&act->tcfa_lock);
 201		return err;
 202	}
 203
 204	return -EOPNOTSUPP;
 205}
 206
 207static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
 208				     u32 *hw_count)
 209{
 210	int err;
 211
 212	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
 213					  fl_act, NULL, NULL);
 214	if (err < 0)
 215		return err;
 216
 217	if (hw_count)
 218		*hw_count = err;
 219
 220	return 0;
 221}
 222
 223static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
 224					u32 *hw_count,
 225					flow_indr_block_bind_cb_t *cb,
 226					void *cb_priv)
 227{
 228	int err;
 229
 230	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
 231	if (err < 0)
 232		return err;
 233
 234	if (hw_count)
 235		*hw_count = 1;
 236
 237	return 0;
 238}
 239
 240static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
 241				  u32 *hw_count,
 242				  flow_indr_block_bind_cb_t *cb,
 243				  void *cb_priv)
 244{
 245	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
 246						 cb, cb_priv) :
 247		    tcf_action_offload_cmd_ex(fl_act, hw_count);
 248}
 249
 250static int tcf_action_offload_add_ex(struct tc_action *action,
 251				     struct netlink_ext_ack *extack,
 252				     flow_indr_block_bind_cb_t *cb,
 253				     void *cb_priv)
 254{
 255	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
 256	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
 257		[0] = action,
 258	};
 259	struct flow_offload_action *fl_action;
 260	u32 in_hw_count = 0;
 261	int num, err = 0;
 262
 263	if (tc_act_skip_hw(action->tcfa_flags))
 264		return 0;
 265
 266	num = tcf_offload_act_num_actions_single(action);
 267	fl_action = offload_action_alloc(num);
 268	if (!fl_action)
 269		return -ENOMEM;
 270
 271	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
 272	if (err)
 273		goto fl_err;
 274
 275	err = tc_setup_action(&fl_action->action, actions, extack);
 276	if (err) {
 277		NL_SET_ERR_MSG_MOD(extack,
 278				   "Failed to setup tc actions for offload");
 279		goto fl_err;
 280	}
 281
 282	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
 283	if (!err)
 284		cb ? offload_action_hw_count_inc(action, in_hw_count) :
 285		     offload_action_hw_count_set(action, in_hw_count);
 286
 287	if (skip_sw && !tc_act_in_hw(action))
 288		err = -EINVAL;
 289
 290	tc_cleanup_offload_action(&fl_action->action);
 291
 292fl_err:
 293	kfree(fl_action);
 294
 295	return err;
 296}
 297
 298/* offload the tc action after it is inserted */
 299static int tcf_action_offload_add(struct tc_action *action,
 300				  struct netlink_ext_ack *extack)
 301{
 302	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
 303}
 304
 305int tcf_action_update_hw_stats(struct tc_action *action)
 306{
 307	struct flow_offload_action fl_act = {};
 308	int err;
 309
 310	if (!tc_act_in_hw(action))
 311		return -EOPNOTSUPP;
 312
 313	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
 314	if (err)
 315		return err;
 316
 317	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
 318	if (!err) {
 319		preempt_disable();
 320		tcf_action_stats_update(action, fl_act.stats.bytes,
 321					fl_act.stats.pkts,
 322					fl_act.stats.drops,
 323					fl_act.stats.lastused,
 324					true);
 325		preempt_enable();
 326		action->used_hw_stats = fl_act.stats.used_hw_stats;
 327		action->used_hw_stats_valid = true;
 328	} else {
 329		return -EOPNOTSUPP;
 330	}
 331
 332	return 0;
 333}
 334EXPORT_SYMBOL(tcf_action_update_hw_stats);
 335
 336static int tcf_action_offload_del_ex(struct tc_action *action,
 337				     flow_indr_block_bind_cb_t *cb,
 338				     void *cb_priv)
 339{
 340	struct flow_offload_action fl_act = {};
 341	u32 in_hw_count = 0;
 342	int err = 0;
 343
 344	if (!tc_act_in_hw(action))
 345		return 0;
 346
 347	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
 348	if (err)
 349		return err;
 350
 351	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
 352	if (err < 0)
 353		return err;
 354
 355	if (!cb && action->in_hw_count != in_hw_count)
 356		return -EINVAL;
 357
 358	/* do not need to update hw state when deleting action */
 359	if (cb && in_hw_count)
 360		offload_action_hw_count_dec(action, in_hw_count);
 361
 362	return 0;
 363}
 364
 365static int tcf_action_offload_del(struct tc_action *action)
 366{
 367	return tcf_action_offload_del_ex(action, NULL, NULL);
 368}
 369
 370static void tcf_action_cleanup(struct tc_action *p)
 371{
 372	tcf_action_offload_del(p);
 373	if (p->ops->cleanup)
 374		p->ops->cleanup(p);
 375
 376	gen_kill_estimator(&p->tcfa_rate_est);
 377	free_tcf(p);
 378}
 379
 380static int __tcf_action_put(struct tc_action *p, bool bind)
 381{
 382	struct tcf_idrinfo *idrinfo = p->idrinfo;
 383
 384	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
 385		if (bind)
 386			atomic_dec(&p->tcfa_bindcnt);
 387		idr_remove(&idrinfo->action_idr, p->tcfa_index);
 388		mutex_unlock(&idrinfo->lock);
 389
 390		tcf_action_cleanup(p);
 391		return 1;
 392	}
 393
 394	if (bind)
 395		atomic_dec(&p->tcfa_bindcnt);
 396
 397	return 0;
 398}
 399
 400static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 401{
 402	int ret = 0;
 403
 404	/* Release with strict==1 and bind==0 is only called through act API
 405	 * interface (classifiers always bind). Only case when action with
 406	 * positive reference count and zero bind count can exist is when it was
 407	 * also created with act API (unbinding last classifier will destroy the
 408	 * action if it was created by classifier). So only case when bind count
 409	 * can be changed after initial check is when unbound action is
 410	 * destroyed by act API while classifier binds to action with same id
 411	 * concurrently. This result either creation of new action(same behavior
 412	 * as before), or reusing existing action if concurrent process
 413	 * increments reference count before action is deleted. Both scenarios
 414	 * are acceptable.
 415	 */
 416	if (p) {
 417		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
 418			return -EPERM;
 419
 420		if (__tcf_action_put(p, bind))
 421			ret = ACT_P_DELETED;
 422	}
 423
 424	return ret;
 425}
 426
 427int tcf_idr_release(struct tc_action *a, bool bind)
 428{
 429	const struct tc_action_ops *ops = a->ops;
 430	int ret;
 431
 432	ret = __tcf_idr_release(a, bind, false);
 433	if (ret == ACT_P_DELETED)
 434		module_put(ops->owner);
 435	return ret;
 436}
 437EXPORT_SYMBOL(tcf_idr_release);
 438
 439static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 440{
 441	struct tc_cookie *act_cookie;
 442	u32 cookie_len = 0;
 443
 444	rcu_read_lock();
 445	act_cookie = rcu_dereference(act->act_cookie);
 446
 447	if (act_cookie)
 448		cookie_len = nla_total_size(act_cookie->len);
 449	rcu_read_unlock();
 450
 451	return  nla_total_size(0) /* action number nested */
 452		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
 453		+ cookie_len /* TCA_ACT_COOKIE */
 454		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
 455		+ nla_total_size(0) /* TCA_ACT_STATS nested */
 456		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
 457		/* TCA_STATS_BASIC */
 458		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
 459		/* TCA_STATS_PKT64 */
 460		+ nla_total_size_64bit(sizeof(u64))
 461		/* TCA_STATS_QUEUE */
 462		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
 463		+ nla_total_size(0) /* TCA_OPTIONS nested */
 464		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
 465}
 466
 467static size_t tcf_action_full_attrs_size(size_t sz)
 468{
 469	return NLMSG_HDRLEN                     /* struct nlmsghdr */
 470		+ sizeof(struct tcamsg)
 471		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
 472		+ sz;
 473}
 474
 475static size_t tcf_action_fill_size(const struct tc_action *act)
 476{
 477	size_t sz = tcf_action_shared_attrs_size(act);
 478
 479	if (act->ops->get_fill_size)
 480		return act->ops->get_fill_size(act) + sz;
 481	return sz;
 482}
 483
 484static int
 485tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
 486{
 487	unsigned char *b = skb_tail_pointer(skb);
 488	struct tc_cookie *cookie;
 489
 490	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
 491		goto nla_put_failure;
 492	if (tcf_action_copy_stats(skb, a, 0))
 493		goto nla_put_failure;
 494	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
 495		goto nla_put_failure;
 496
 497	rcu_read_lock();
 498	cookie = rcu_dereference(a->act_cookie);
 499	if (cookie) {
 500		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
 501			rcu_read_unlock();
 502			goto nla_put_failure;
 503		}
 504	}
 505	rcu_read_unlock();
 506
 507	return 0;
 508
 509nla_put_failure:
 510	nlmsg_trim(skb, b);
 511	return -1;
 512}
 513
 514static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
 515			   struct netlink_callback *cb)
 516{
 517	int err = 0, index = -1, s_i = 0, n_i = 0;
 518	u32 act_flags = cb->args[2];
 519	unsigned long jiffy_since = cb->args[3];
 520	struct nlattr *nest;
 521	struct idr *idr = &idrinfo->action_idr;
 522	struct tc_action *p;
 523	unsigned long id = 1;
 524	unsigned long tmp;
 525
 526	mutex_lock(&idrinfo->lock);
 527
 528	s_i = cb->args[0];
 529
 530	idr_for_each_entry_ul(idr, p, tmp, id) {
 531		index++;
 532		if (index < s_i)
 533			continue;
 534		if (IS_ERR(p))
 535			continue;
 536
 537		if (jiffy_since &&
 538		    time_after(jiffy_since,
 539			       (unsigned long)p->tcfa_tm.lastuse))
 540			continue;
 541
 
 
 542		nest = nla_nest_start_noflag(skb, n_i);
 543		if (!nest) {
 544			index--;
 545			goto nla_put_failure;
 546		}
 547		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
 548			tcf_action_dump_terse(skb, p, true) :
 549			tcf_action_dump_1(skb, p, 0, 0);
 550		if (err < 0) {
 551			index--;
 552			nlmsg_trim(skb, nest);
 553			goto done;
 554		}
 555		nla_nest_end(skb, nest);
 556		n_i++;
 557		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
 558		    n_i >= TCA_ACT_MAX_PRIO)
 559			goto done;
 560	}
 561done:
 562	if (index >= 0)
 563		cb->args[0] = index + 1;
 564
 565	mutex_unlock(&idrinfo->lock);
 566	if (n_i) {
 567		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
 568			cb->args[1] = n_i;
 569	}
 570	return n_i;
 571
 572nla_put_failure:
 573	nla_nest_cancel(skb, nest);
 574	goto done;
 575}
 576
 577static int tcf_idr_release_unsafe(struct tc_action *p)
 578{
 579	if (atomic_read(&p->tcfa_bindcnt) > 0)
 580		return -EPERM;
 581
 582	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
 583		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
 584		tcf_action_cleanup(p);
 585		return ACT_P_DELETED;
 586	}
 587
 588	return 0;
 589}
 590
 591static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
 592			  const struct tc_action_ops *ops,
 593			  struct netlink_ext_ack *extack)
 594{
 595	struct nlattr *nest;
 596	int n_i = 0;
 597	int ret = -EINVAL;
 598	struct idr *idr = &idrinfo->action_idr;
 599	struct tc_action *p;
 600	unsigned long id = 1;
 601	unsigned long tmp;
 602
 603	nest = nla_nest_start_noflag(skb, 0);
 604	if (nest == NULL)
 605		goto nla_put_failure;
 606	if (nla_put_string(skb, TCA_KIND, ops->kind))
 607		goto nla_put_failure;
 608
 609	ret = 0;
 610	mutex_lock(&idrinfo->lock);
 611	idr_for_each_entry_ul(idr, p, tmp, id) {
 612		if (IS_ERR(p))
 613			continue;
 614		ret = tcf_idr_release_unsafe(p);
 615		if (ret == ACT_P_DELETED)
 616			module_put(ops->owner);
 617		else if (ret < 0)
 618			break;
 619		n_i++;
 620	}
 621	mutex_unlock(&idrinfo->lock);
 622	if (ret < 0) {
 623		if (n_i)
 624			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
 625		else
 626			goto nla_put_failure;
 627	}
 628
 629	ret = nla_put_u32(skb, TCA_FCNT, n_i);
 630	if (ret)
 631		goto nla_put_failure;
 632	nla_nest_end(skb, nest);
 633
 634	return n_i;
 635nla_put_failure:
 636	nla_nest_cancel(skb, nest);
 637	return ret;
 638}
 639
 640int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 641		       struct netlink_callback *cb, int type,
 642		       const struct tc_action_ops *ops,
 643		       struct netlink_ext_ack *extack)
 644{
 645	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 646
 647	if (type == RTM_DELACTION) {
 648		return tcf_del_walker(idrinfo, skb, ops, extack);
 649	} else if (type == RTM_GETACTION) {
 650		return tcf_dump_walker(idrinfo, skb, cb);
 651	} else {
 652		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
 653		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
 654		return -EINVAL;
 655	}
 656}
 657EXPORT_SYMBOL(tcf_generic_walker);
 658
 659int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
 660{
 661	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 662	struct tc_action *p;
 663
 664	mutex_lock(&idrinfo->lock);
 665	p = idr_find(&idrinfo->action_idr, index);
 666	if (IS_ERR(p))
 667		p = NULL;
 668	else if (p)
 669		refcount_inc(&p->tcfa_refcnt);
 670	mutex_unlock(&idrinfo->lock);
 671
 672	if (p) {
 673		*a = p;
 674		return true;
 675	}
 676	return false;
 677}
 678EXPORT_SYMBOL(tcf_idr_search);
 679
 680static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
 681				struct netlink_callback *cb, int type,
 682				const struct tc_action_ops *ops,
 683				struct netlink_ext_ack *extack)
 684{
 685	struct tc_action_net *tn = net_generic(net, ops->net_id);
 686
 687	if (unlikely(ops->walk))
 688		return ops->walk(net, skb, cb, type, ops, extack);
 689
 690	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 691}
 692
 693static int __tcf_idr_search(struct net *net,
 694			    const struct tc_action_ops *ops,
 695			    struct tc_action **a, u32 index)
 696{
 697	struct tc_action_net *tn = net_generic(net, ops->net_id);
 698
 699	if (unlikely(ops->lookup))
 700		return ops->lookup(net, a, index);
 701
 702	return tcf_idr_search(tn, a, index);
 703}
 704
 705static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 706{
 707	struct tc_action *p;
 708	int ret = 0;
 709
 710	mutex_lock(&idrinfo->lock);
 711	p = idr_find(&idrinfo->action_idr, index);
 712	if (!p) {
 713		mutex_unlock(&idrinfo->lock);
 714		return -ENOENT;
 715	}
 716
 717	if (!atomic_read(&p->tcfa_bindcnt)) {
 718		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
 719			struct module *owner = p->ops->owner;
 720
 721			WARN_ON(p != idr_remove(&idrinfo->action_idr,
 722						p->tcfa_index));
 723			mutex_unlock(&idrinfo->lock);
 724
 725			tcf_action_cleanup(p);
 726			module_put(owner);
 727			return 0;
 728		}
 729		ret = 0;
 730	} else {
 731		ret = -EPERM;
 732	}
 733
 734	mutex_unlock(&idrinfo->lock);
 735	return ret;
 736}
 737
 738int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
 739		   struct tc_action **a, const struct tc_action_ops *ops,
 740		   int bind, bool cpustats, u32 flags)
 741{
 742	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
 743	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 744	int err = -ENOMEM;
 745
 746	if (unlikely(!p))
 747		return -ENOMEM;
 748	refcount_set(&p->tcfa_refcnt, 1);
 749	if (bind)
 750		atomic_set(&p->tcfa_bindcnt, 1);
 751
 752	if (cpustats) {
 753		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 754		if (!p->cpu_bstats)
 755			goto err1;
 756		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 757		if (!p->cpu_bstats_hw)
 758			goto err2;
 759		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 760		if (!p->cpu_qstats)
 761			goto err3;
 762	}
 763	gnet_stats_basic_sync_init(&p->tcfa_bstats);
 764	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
 765	spin_lock_init(&p->tcfa_lock);
 766	p->tcfa_index = index;
 767	p->tcfa_tm.install = jiffies;
 768	p->tcfa_tm.lastuse = jiffies;
 769	p->tcfa_tm.firstuse = 0;
 770	p->tcfa_flags = flags;
 771	if (est) {
 772		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
 773					&p->tcfa_rate_est,
 774					&p->tcfa_lock, false, est);
 775		if (err)
 776			goto err4;
 777	}
 778
 779	p->idrinfo = idrinfo;
 780	__module_get(ops->owner);
 781	p->ops = ops;
 782	*a = p;
 783	return 0;
 784err4:
 785	free_percpu(p->cpu_qstats);
 786err3:
 787	free_percpu(p->cpu_bstats_hw);
 788err2:
 789	free_percpu(p->cpu_bstats);
 790err1:
 791	kfree(p);
 792	return err;
 793}
 794EXPORT_SYMBOL(tcf_idr_create);
 795
 796int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
 797			      struct nlattr *est, struct tc_action **a,
 798			      const struct tc_action_ops *ops, int bind,
 799			      u32 flags)
 800{
 801	/* Set cpustats according to actions flags. */
 802	return tcf_idr_create(tn, index, est, a, ops, bind,
 803			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
 804}
 805EXPORT_SYMBOL(tcf_idr_create_from_flags);
 806
 807/* Cleanup idr index that was allocated but not initialized. */
 808
 809void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
 810{
 811	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 812
 813	mutex_lock(&idrinfo->lock);
 814	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
 815	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
 816	mutex_unlock(&idrinfo->lock);
 817}
 818EXPORT_SYMBOL(tcf_idr_cleanup);
 819
 820/* Check if action with specified index exists. If actions is found, increments
 821 * its reference and bind counters, and return 1. Otherwise insert temporary
 822 * error pointer (to prevent concurrent users from inserting actions with same
 823 * index) and return 0.
 
 
 
 824 */
 825
 826int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
 827			struct tc_action **a, int bind)
 828{
 829	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 830	struct tc_action *p;
 831	int ret;
 
 832
 833again:
 834	mutex_lock(&idrinfo->lock);
 835	if (*index) {
 
 
 836		p = idr_find(&idrinfo->action_idr, *index);
 
 837		if (IS_ERR(p)) {
 838			/* This means that another process allocated
 839			 * index but did not assign the pointer yet.
 840			 */
 841			mutex_unlock(&idrinfo->lock);
 842			goto again;
 843		}
 844
 845		if (p) {
 846			refcount_inc(&p->tcfa_refcnt);
 847			if (bind)
 848				atomic_inc(&p->tcfa_bindcnt);
 849			*a = p;
 850			ret = 1;
 851		} else {
 852			*a = NULL;
 853			ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
 854					    *index, GFP_KERNEL);
 855			if (!ret)
 856				idr_replace(&idrinfo->action_idr,
 857					    ERR_PTR(-EBUSY), *index);
 858		}
 
 
 
 
 
 
 
 
 859	} else {
 
 860		*index = 1;
 861		*a = NULL;
 862		ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
 863				    UINT_MAX, GFP_KERNEL);
 864		if (!ret)
 865			idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
 866				    *index);
 867	}
 
 
 
 
 
 
 
 868	mutex_unlock(&idrinfo->lock);
 
 
 
 
 
 
 
 869	return ret;
 870}
 871EXPORT_SYMBOL(tcf_idr_check_alloc);
 872
 873void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
 874			 struct tcf_idrinfo *idrinfo)
 875{
 876	struct idr *idr = &idrinfo->action_idr;
 877	struct tc_action *p;
 878	int ret;
 879	unsigned long id = 1;
 880	unsigned long tmp;
 881
 882	idr_for_each_entry_ul(idr, p, tmp, id) {
 883		ret = __tcf_idr_release(p, false, true);
 884		if (ret == ACT_P_DELETED)
 885			module_put(ops->owner);
 886		else if (ret < 0)
 887			return;
 888	}
 889	idr_destroy(&idrinfo->action_idr);
 890}
 891EXPORT_SYMBOL(tcf_idrinfo_destroy);
 892
 893static LIST_HEAD(act_base);
 894static DEFINE_RWLOCK(act_mod_lock);
 895/* since act ops id is stored in pernet subsystem list,
 896 * then there is no way to walk through only all the action
 897 * subsystem, so we keep tc action pernet ops id for
 898 * reoffload to walk through.
 899 */
 900static LIST_HEAD(act_pernet_id_list);
 901static DEFINE_MUTEX(act_id_mutex);
 902struct tc_act_pernet_id {
 903	struct list_head list;
 904	unsigned int id;
 905};
 906
 907static int tcf_pernet_add_id_list(unsigned int id)
 908{
 909	struct tc_act_pernet_id *id_ptr;
 910	int ret = 0;
 911
 912	mutex_lock(&act_id_mutex);
 913	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
 914		if (id_ptr->id == id) {
 915			ret = -EEXIST;
 916			goto err_out;
 917		}
 918	}
 919
 920	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
 921	if (!id_ptr) {
 922		ret = -ENOMEM;
 923		goto err_out;
 924	}
 925	id_ptr->id = id;
 926
 927	list_add_tail(&id_ptr->list, &act_pernet_id_list);
 928
 929err_out:
 930	mutex_unlock(&act_id_mutex);
 931	return ret;
 932}
 933
 934static void tcf_pernet_del_id_list(unsigned int id)
 935{
 936	struct tc_act_pernet_id *id_ptr;
 937
 938	mutex_lock(&act_id_mutex);
 939	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
 940		if (id_ptr->id == id) {
 941			list_del(&id_ptr->list);
 942			kfree(id_ptr);
 943			break;
 944		}
 945	}
 946	mutex_unlock(&act_id_mutex);
 947}
 948
 949int tcf_register_action(struct tc_action_ops *act,
 950			struct pernet_operations *ops)
 951{
 952	struct tc_action_ops *a;
 953	int ret;
 954
 955	if (!act->act || !act->dump || !act->init)
 956		return -EINVAL;
 957
 958	/* We have to register pernet ops before making the action ops visible,
 959	 * otherwise tcf_action_init_1() could get a partially initialized
 960	 * netns.
 961	 */
 962	ret = register_pernet_subsys(ops);
 963	if (ret)
 964		return ret;
 965
 966	if (ops->id) {
 967		ret = tcf_pernet_add_id_list(*ops->id);
 968		if (ret)
 969			goto err_id;
 970	}
 971
 972	write_lock(&act_mod_lock);
 973	list_for_each_entry(a, &act_base, head) {
 974		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
 975			ret = -EEXIST;
 976			goto err_out;
 977		}
 978	}
 979	list_add_tail(&act->head, &act_base);
 980	write_unlock(&act_mod_lock);
 981
 982	return 0;
 983
 984err_out:
 985	write_unlock(&act_mod_lock);
 986	if (ops->id)
 987		tcf_pernet_del_id_list(*ops->id);
 988err_id:
 989	unregister_pernet_subsys(ops);
 990	return ret;
 991}
 992EXPORT_SYMBOL(tcf_register_action);
 993
 994int tcf_unregister_action(struct tc_action_ops *act,
 995			  struct pernet_operations *ops)
 996{
 997	struct tc_action_ops *a;
 998	int err = -ENOENT;
 999
1000	write_lock(&act_mod_lock);
1001	list_for_each_entry(a, &act_base, head) {
1002		if (a == act) {
1003			list_del(&act->head);
1004			err = 0;
1005			break;
1006		}
1007	}
1008	write_unlock(&act_mod_lock);
1009	if (!err) {
1010		unregister_pernet_subsys(ops);
1011		if (ops->id)
1012			tcf_pernet_del_id_list(*ops->id);
1013	}
1014	return err;
1015}
1016EXPORT_SYMBOL(tcf_unregister_action);
1017
1018/* lookup by name */
1019static struct tc_action_ops *tc_lookup_action_n(char *kind)
1020{
1021	struct tc_action_ops *a, *res = NULL;
1022
1023	if (kind) {
1024		read_lock(&act_mod_lock);
1025		list_for_each_entry(a, &act_base, head) {
1026			if (strcmp(kind, a->kind) == 0) {
1027				if (try_module_get(a->owner))
1028					res = a;
1029				break;
1030			}
1031		}
1032		read_unlock(&act_mod_lock);
1033	}
1034	return res;
1035}
1036
1037/* lookup by nlattr */
1038static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1039{
1040	struct tc_action_ops *a, *res = NULL;
1041
1042	if (kind) {
1043		read_lock(&act_mod_lock);
1044		list_for_each_entry(a, &act_base, head) {
1045			if (nla_strcmp(kind, a->kind) == 0) {
1046				if (try_module_get(a->owner))
1047					res = a;
1048				break;
1049			}
1050		}
1051		read_unlock(&act_mod_lock);
1052	}
1053	return res;
1054}
1055
1056/*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1057#define TCA_ACT_MAX_PRIO_MASK 0x1FF
1058int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1059		    int nr_actions, struct tcf_result *res)
1060{
1061	u32 jmp_prgcnt = 0;
1062	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1063	int i;
1064	int ret = TC_ACT_OK;
1065
1066	if (skb_skip_tc_classify(skb))
1067		return TC_ACT_OK;
1068
1069restart_act_graph:
1070	for (i = 0; i < nr_actions; i++) {
1071		const struct tc_action *a = actions[i];
1072		int repeat_ttl;
1073
1074		if (jmp_prgcnt > 0) {
1075			jmp_prgcnt -= 1;
1076			continue;
1077		}
1078
1079		if (tc_act_skip_sw(a->tcfa_flags))
1080			continue;
1081
1082		repeat_ttl = 32;
1083repeat:
1084		ret = tc_act(skb, a, res);
1085		if (unlikely(ret == TC_ACT_REPEAT)) {
1086			if (--repeat_ttl != 0)
1087				goto repeat;
1088			/* suspicious opcode, stop pipeline */
1089			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1090			return TC_ACT_OK;
1091		}
1092		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1093			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1094			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1095				/* faulty opcode, stop pipeline */
1096				return TC_ACT_OK;
1097			} else {
1098				jmp_ttl -= 1;
1099				if (jmp_ttl > 0)
1100					goto restart_act_graph;
1101				else /* faulty graph, stop pipeline */
1102					return TC_ACT_OK;
1103			}
1104		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1105			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1106				net_warn_ratelimited("can't go to NULL chain!\n");
 
1107				return TC_ACT_SHOT;
1108			}
1109			tcf_action_goto_chain_exec(a, res);
1110		}
1111
1112		if (ret != TC_ACT_PIPE)
1113			break;
1114	}
1115
1116	return ret;
1117}
1118EXPORT_SYMBOL(tcf_action_exec);
1119
1120int tcf_action_destroy(struct tc_action *actions[], int bind)
1121{
1122	const struct tc_action_ops *ops;
1123	struct tc_action *a;
1124	int ret = 0, i;
1125
1126	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1127		a = actions[i];
1128		actions[i] = NULL;
1129		ops = a->ops;
1130		ret = __tcf_idr_release(a, bind, true);
1131		if (ret == ACT_P_DELETED)
1132			module_put(ops->owner);
1133		else if (ret < 0)
1134			return ret;
1135	}
1136	return ret;
1137}
1138
1139static int tcf_action_put(struct tc_action *p)
1140{
1141	return __tcf_action_put(p, false);
1142}
1143
1144/* Put all actions in this array, skip those NULL's. */
1145static void tcf_action_put_many(struct tc_action *actions[])
1146{
 
1147	int i;
1148
1149	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1150		struct tc_action *a = actions[i];
1151		const struct tc_action_ops *ops;
 
 
 
 
 
 
 
 
1152
1153		if (!a)
 
 
 
1154			continue;
1155		ops = a->ops;
1156		if (tcf_action_put(a))
1157			module_put(ops->owner);
1158	}
1159}
1160
1161int
1162tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1163{
1164	return a->ops->dump(skb, a, bind, ref);
1165}
1166
1167int
1168tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1169{
1170	int err = -EINVAL;
1171	unsigned char *b = skb_tail_pointer(skb);
1172	struct nlattr *nest;
1173	u32 flags;
1174
1175	if (tcf_action_dump_terse(skb, a, false))
1176		goto nla_put_failure;
1177
1178	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1179	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1180			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
1181		goto nla_put_failure;
1182
1183	if (a->used_hw_stats_valid &&
1184	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1185			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1186		goto nla_put_failure;
1187
1188	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1189	if (flags &&
1190	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1191			       flags, flags))
1192		goto nla_put_failure;
1193
1194	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1195		goto nla_put_failure;
1196
1197	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1198	if (nest == NULL)
1199		goto nla_put_failure;
1200	err = tcf_action_dump_old(skb, a, bind, ref);
1201	if (err > 0) {
1202		nla_nest_end(skb, nest);
1203		return err;
1204	}
1205
1206nla_put_failure:
1207	nlmsg_trim(skb, b);
1208	return -1;
1209}
1210EXPORT_SYMBOL(tcf_action_dump_1);
1211
1212int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1213		    int bind, int ref, bool terse)
1214{
1215	struct tc_action *a;
1216	int err = -EINVAL, i;
1217	struct nlattr *nest;
1218
1219	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1220		a = actions[i];
1221		nest = nla_nest_start_noflag(skb, i + 1);
1222		if (nest == NULL)
1223			goto nla_put_failure;
1224		err = terse ? tcf_action_dump_terse(skb, a, false) :
1225			tcf_action_dump_1(skb, a, bind, ref);
1226		if (err < 0)
1227			goto errout;
1228		nla_nest_end(skb, nest);
1229	}
1230
1231	return 0;
1232
1233nla_put_failure:
1234	err = -EINVAL;
1235errout:
1236	nla_nest_cancel(skb, nest);
1237	return err;
1238}
1239
1240static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1241{
1242	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1243	if (!c)
1244		return NULL;
1245
1246	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1247	if (!c->data) {
1248		kfree(c);
1249		return NULL;
1250	}
1251	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1252
1253	return c;
1254}
1255
1256static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1257{
1258	struct nla_bitfield32 hw_stats_bf;
1259
1260	/* If the user did not pass the attr, that means he does
1261	 * not care about the type. Return "any" in that case
1262	 * which is setting on all supported types.
1263	 */
1264	if (!hw_stats_attr)
1265		return TCA_ACT_HW_STATS_ANY;
1266	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1267	return hw_stats_bf.value;
1268}
1269
1270static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1271	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1272	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1273	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1274				    .len = TC_COOKIE_MAX_SIZE },
1275	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1276	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1277							TCA_ACT_FLAGS_SKIP_HW |
1278							TCA_ACT_FLAGS_SKIP_SW),
1279	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1280};
1281
1282void tcf_idr_insert_many(struct tc_action *actions[])
1283{
 
1284	int i;
1285
1286	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1287		struct tc_action *a = actions[i];
1288		struct tcf_idrinfo *idrinfo;
1289
1290		if (!a)
1291			continue;
 
1292		idrinfo = a->idrinfo;
1293		mutex_lock(&idrinfo->lock);
1294		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1295		 * it is just created, otherwise this is just a nop.
1296		 */
1297		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1298		mutex_unlock(&idrinfo->lock);
1299	}
1300}
1301
1302struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1303					 bool rtnl_held,
1304					 struct netlink_ext_ack *extack)
1305{
 
1306	struct nlattr *tb[TCA_ACT_MAX + 1];
1307	struct tc_action_ops *a_o;
1308	char act_name[IFNAMSIZ];
1309	struct nlattr *kind;
1310	int err;
1311
1312	if (!police) {
1313		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1314						  tcf_action_policy, extack);
1315		if (err < 0)
1316			return ERR_PTR(err);
1317		err = -EINVAL;
1318		kind = tb[TCA_ACT_KIND];
1319		if (!kind) {
1320			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1321			return ERR_PTR(err);
1322		}
1323		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1324			NL_SET_ERR_MSG(extack, "TC action name too long");
1325			return ERR_PTR(err);
1326		}
1327	} else {
1328		if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
1329			NL_SET_ERR_MSG(extack, "TC action name too long");
1330			return ERR_PTR(-EINVAL);
1331		}
1332	}
1333
1334	a_o = tc_lookup_action_n(act_name);
1335	if (a_o == NULL) {
1336#ifdef CONFIG_MODULES
 
 
1337		if (rtnl_held)
1338			rtnl_unlock();
1339		request_module("act_%s", act_name);
1340		if (rtnl_held)
1341			rtnl_lock();
1342
1343		a_o = tc_lookup_action_n(act_name);
1344
1345		/* We dropped the RTNL semaphore in order to
1346		 * perform the module load.  So, even if we
1347		 * succeeded in loading the module we have to
1348		 * tell the caller to replay the request.  We
1349		 * indicate this using -EAGAIN.
1350		 */
1351		if (a_o != NULL) {
1352			module_put(a_o->owner);
1353			return ERR_PTR(-EAGAIN);
1354		}
1355#endif
1356		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1357		return ERR_PTR(-ENOENT);
1358	}
1359
1360	return a_o;
1361}
1362
1363struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1364				    struct nlattr *nla, struct nlattr *est,
1365				    struct tc_action_ops *a_o, int *init_res,
1366				    u32 flags, struct netlink_ext_ack *extack)
1367{
1368	bool police = flags & TCA_ACT_FLAGS_POLICE;
1369	struct nla_bitfield32 userflags = { 0, 0 };
 
1370	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1371	struct nlattr *tb[TCA_ACT_MAX + 1];
1372	struct tc_cookie *cookie = NULL;
1373	struct tc_action *a;
1374	int err;
1375
1376	/* backward compatibility for policer */
1377	if (!police) {
1378		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1379						  tcf_action_policy, extack);
1380		if (err < 0)
1381			return ERR_PTR(err);
1382		if (tb[TCA_ACT_COOKIE]) {
1383			cookie = nla_memdup_cookie(tb);
1384			if (!cookie) {
1385				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1386				err = -ENOMEM;
1387				goto err_out;
1388			}
1389		}
1390		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1391		if (tb[TCA_ACT_FLAGS]) {
1392			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1393			if (!tc_act_flags_valid(userflags.value)) {
1394				err = -EINVAL;
1395				goto err_out;
1396			}
1397		}
1398
1399		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1400				userflags.value | flags, extack);
1401	} else {
1402		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1403				extack);
1404	}
1405	if (err < 0)
1406		goto err_out;
1407	*init_res = err;
1408
1409	if (!police && tb[TCA_ACT_COOKIE])
1410		tcf_set_action_cookie(&a->act_cookie, cookie);
1411
1412	if (!police)
1413		a->hw_stats = hw_stats;
1414
1415	return a;
1416
1417err_out:
1418	if (cookie) {
1419		kfree(cookie->data);
1420		kfree(cookie);
1421	}
1422	return ERR_PTR(err);
1423}
1424
1425static bool tc_act_bind(u32 flags)
1426{
1427	return !!(flags & TCA_ACT_FLAGS_BIND);
1428}
1429
1430/* Returns numbers of initialized actions or negative error. */
1431
1432int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1433		    struct nlattr *est, struct tc_action *actions[],
1434		    int init_res[], size_t *attr_size,
1435		    u32 flags, u32 fl_flags,
1436		    struct netlink_ext_ack *extack)
1437{
1438	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1439	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1440	struct tc_action *act;
1441	size_t sz = 0;
1442	int err;
1443	int i;
1444
1445	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1446					  extack);
1447	if (err < 0)
1448		return err;
1449
1450	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1451		struct tc_action_ops *a_o;
1452
1453		a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1454					 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1455					 extack);
1456		if (IS_ERR(a_o)) {
1457			err = PTR_ERR(a_o);
1458			goto err_mod;
1459		}
1460		ops[i - 1] = a_o;
1461	}
1462
1463	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1464		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1465					&init_res[i - 1], flags, extack);
1466		if (IS_ERR(act)) {
1467			err = PTR_ERR(act);
1468			goto err;
1469		}
1470		sz += tcf_action_fill_size(act);
1471		/* Start from index 0 */
1472		actions[i - 1] = act;
1473		if (tc_act_bind(flags)) {
1474			bool skip_sw = tc_skip_sw(fl_flags);
1475			bool skip_hw = tc_skip_hw(fl_flags);
1476
1477			if (tc_act_bind(act->tcfa_flags))
1478				continue;
1479			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1480			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1481				NL_SET_ERR_MSG(extack,
1482					       "Mismatch between action and filter offload flags");
1483				err = -EINVAL;
1484				goto err;
1485			}
1486		} else {
1487			err = tcf_action_offload_add(act, extack);
1488			if (tc_act_skip_sw(act->tcfa_flags) && err)
1489				goto err;
1490		}
1491	}
1492
1493	/* We have to commit them all together, because if any error happened in
1494	 * between, we could not handle the failure gracefully.
1495	 */
1496	tcf_idr_insert_many(actions);
1497
1498	*attr_size = tcf_action_full_attrs_size(sz);
1499	err = i - 1;
1500	goto err_mod;
1501
1502err:
1503	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1504err_mod:
1505	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1506		if (ops[i])
1507			module_put(ops[i]->owner);
1508	}
1509	return err;
1510}
1511
1512void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1513			     u64 drops, bool hw)
1514{
1515	if (a->cpu_bstats) {
1516		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1517
1518		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1519
1520		if (hw)
1521			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1522				       bytes, packets);
1523		return;
1524	}
1525
1526	_bstats_update(&a->tcfa_bstats, bytes, packets);
1527	a->tcfa_qstats.drops += drops;
1528	if (hw)
1529		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1530}
1531EXPORT_SYMBOL(tcf_action_update_stats);
1532
1533int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1534			  int compat_mode)
1535{
1536	int err = 0;
1537	struct gnet_dump d;
1538
1539	if (p == NULL)
1540		goto errout;
1541
1542	/* update hw stats for this action */
1543	tcf_action_update_hw_stats(p);
1544
1545	/* compat_mode being true specifies a call that is supposed
1546	 * to add additional backward compatibility statistic TLVs.
1547	 */
1548	if (compat_mode) {
1549		if (p->type == TCA_OLD_COMPAT)
1550			err = gnet_stats_start_copy_compat(skb, 0,
1551							   TCA_STATS,
1552							   TCA_XSTATS,
1553							   &p->tcfa_lock, &d,
1554							   TCA_PAD);
1555		else
1556			return 0;
1557	} else
1558		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1559					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1560
1561	if (err < 0)
1562		goto errout;
1563
1564	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1565				  &p->tcfa_bstats, false) < 0 ||
1566	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1567				     &p->tcfa_bstats_hw, false) < 0 ||
1568	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1569	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1570				  &p->tcfa_qstats,
1571				  p->tcfa_qstats.qlen) < 0)
1572		goto errout;
1573
1574	if (gnet_stats_finish_copy(&d) < 0)
1575		goto errout;
1576
1577	return 0;
1578
1579errout:
1580	return -1;
1581}
1582
1583static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1584			u32 portid, u32 seq, u16 flags, int event, int bind,
1585			int ref)
1586{
1587	struct tcamsg *t;
1588	struct nlmsghdr *nlh;
1589	unsigned char *b = skb_tail_pointer(skb);
1590	struct nlattr *nest;
1591
1592	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1593	if (!nlh)
1594		goto out_nlmsg_trim;
1595	t = nlmsg_data(nlh);
1596	t->tca_family = AF_UNSPEC;
1597	t->tca__pad1 = 0;
1598	t->tca__pad2 = 0;
1599
 
 
 
 
1600	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1601	if (!nest)
1602		goto out_nlmsg_trim;
1603
1604	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1605		goto out_nlmsg_trim;
1606
1607	nla_nest_end(skb, nest);
1608
1609	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 
1610	return skb->len;
1611
1612out_nlmsg_trim:
1613	nlmsg_trim(skb, b);
1614	return -1;
1615}
1616
1617static int
1618tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1619	       struct tc_action *actions[], int event,
1620	       struct netlink_ext_ack *extack)
1621{
1622	struct sk_buff *skb;
1623
1624	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1625	if (!skb)
1626		return -ENOBUFS;
1627	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1628			 0, 1) <= 0) {
1629		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1630		kfree_skb(skb);
1631		return -EINVAL;
1632	}
1633
1634	return rtnl_unicast(skb, net, portid);
1635}
1636
1637static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1638					  struct nlmsghdr *n, u32 portid,
1639					  struct netlink_ext_ack *extack)
1640{
1641	struct nlattr *tb[TCA_ACT_MAX + 1];
1642	const struct tc_action_ops *ops;
1643	struct tc_action *a;
1644	int index;
1645	int err;
1646
1647	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1648					  tcf_action_policy, extack);
1649	if (err < 0)
1650		goto err_out;
1651
1652	err = -EINVAL;
1653	if (tb[TCA_ACT_INDEX] == NULL ||
1654	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1655		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1656		goto err_out;
1657	}
1658	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1659
1660	err = -EINVAL;
1661	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1662	if (!ops) { /* could happen in batch of actions */
1663		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1664		goto err_out;
1665	}
1666	err = -ENOENT;
1667	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1668		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1669		goto err_mod;
1670	}
1671
1672	module_put(ops->owner);
1673	return a;
1674
1675err_mod:
1676	module_put(ops->owner);
1677err_out:
1678	return ERR_PTR(err);
1679}
1680
1681static int tca_action_flush(struct net *net, struct nlattr *nla,
1682			    struct nlmsghdr *n, u32 portid,
1683			    struct netlink_ext_ack *extack)
1684{
1685	struct sk_buff *skb;
1686	unsigned char *b;
1687	struct nlmsghdr *nlh;
1688	struct tcamsg *t;
1689	struct netlink_callback dcb;
1690	struct nlattr *nest;
1691	struct nlattr *tb[TCA_ACT_MAX + 1];
1692	const struct tc_action_ops *ops;
1693	struct nlattr *kind;
1694	int err = -ENOMEM;
1695
1696	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1697	if (!skb)
1698		return err;
1699
1700	b = skb_tail_pointer(skb);
1701
1702	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1703					  tcf_action_policy, extack);
1704	if (err < 0)
1705		goto err_out;
1706
1707	err = -EINVAL;
1708	kind = tb[TCA_ACT_KIND];
1709	ops = tc_lookup_action(kind);
1710	if (!ops) { /*some idjot trying to flush unknown action */
1711		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1712		goto err_out;
1713	}
1714
1715	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1716			sizeof(*t), 0);
1717	if (!nlh) {
1718		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1719		goto out_module_put;
1720	}
1721	t = nlmsg_data(nlh);
1722	t->tca_family = AF_UNSPEC;
1723	t->tca__pad1 = 0;
1724	t->tca__pad2 = 0;
1725
1726	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1727	if (!nest) {
1728		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1729		goto out_module_put;
1730	}
1731
1732	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1733	if (err <= 0) {
1734		nla_nest_cancel(skb, nest);
1735		goto out_module_put;
1736	}
1737
1738	nla_nest_end(skb, nest);
1739
1740	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1741	nlh->nlmsg_flags |= NLM_F_ROOT;
1742	module_put(ops->owner);
1743	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1744			     n->nlmsg_flags & NLM_F_ECHO);
1745	if (err < 0)
1746		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1747
1748	return err;
1749
1750out_module_put:
1751	module_put(ops->owner);
1752err_out:
1753	kfree_skb(skb);
1754	return err;
1755}
1756
1757static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1758{
 
1759	int i;
1760
1761	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1762		struct tc_action *a = actions[i];
1763		const struct tc_action_ops *ops = a->ops;
1764		/* Actions can be deleted concurrently so we must save their
1765		 * type and id to search again after reference is released.
1766		 */
1767		struct tcf_idrinfo *idrinfo = a->idrinfo;
1768		u32 act_index = a->tcfa_index;
1769
1770		actions[i] = NULL;
1771		if (tcf_action_put(a)) {
1772			/* last reference, action was deleted concurrently */
1773			module_put(ops->owner);
1774		} else  {
1775			int ret;
1776
1777			/* now do the delete */
1778			ret = tcf_idr_delete_index(idrinfo, act_index);
1779			if (ret < 0)
1780				return ret;
1781		}
1782	}
1783	return 0;
1784}
1785
1786static int
1787tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1788{
1789	size_t attr_size = tcf_action_fill_size(action);
1790	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1791		[0] = action,
1792	};
1793	const struct tc_action_ops *ops = action->ops;
1794	struct sk_buff *skb;
1795	int ret;
1796
1797	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1798			GFP_KERNEL);
1799	if (!skb)
1800		return -ENOBUFS;
1801
1802	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
1803		kfree_skb(skb);
1804		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805	}
1806
1807	ret = tcf_idr_release_unsafe(action);
1808	if (ret == ACT_P_DELETED) {
1809		module_put(ops->owner);
1810		ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
1811	} else {
1812		kfree_skb(skb);
1813	}
1814
1815	return ret;
1816}
1817
1818int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1819			    void *cb_priv, bool add)
1820{
1821	struct tc_act_pernet_id *id_ptr;
1822	struct tcf_idrinfo *idrinfo;
1823	struct tc_action_net *tn;
1824	struct tc_action *p;
1825	unsigned int act_id;
1826	unsigned long tmp;
1827	unsigned long id;
1828	struct idr *idr;
1829	struct net *net;
1830	int ret;
1831
1832	if (!cb)
1833		return -EINVAL;
1834
1835	down_read(&net_rwsem);
1836	mutex_lock(&act_id_mutex);
1837
1838	for_each_net(net) {
1839		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1840			act_id = id_ptr->id;
1841			tn = net_generic(net, act_id);
1842			if (!tn)
1843				continue;
1844			idrinfo = tn->idrinfo;
1845			if (!idrinfo)
1846				continue;
1847
1848			mutex_lock(&idrinfo->lock);
1849			idr = &idrinfo->action_idr;
1850			idr_for_each_entry_ul(idr, p, tmp, id) {
1851				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1852					continue;
1853				if (add) {
1854					tcf_action_offload_add_ex(p, NULL, cb,
1855								  cb_priv);
1856					continue;
1857				}
1858
1859				/* cb unregister to update hw count */
1860				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1861				if (ret < 0)
1862					continue;
1863				if (tc_act_skip_sw(p->tcfa_flags) &&
1864				    !tc_act_in_hw(p))
1865					tcf_reoffload_del_notify(net, p);
1866			}
1867			mutex_unlock(&idrinfo->lock);
1868		}
1869	}
1870	mutex_unlock(&act_id_mutex);
1871	up_read(&net_rwsem);
1872
1873	return 0;
1874}
1875
1876static int
1877tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1878	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
 
1879{
1880	int ret;
1881	struct sk_buff *skb;
1882
1883	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1884			GFP_KERNEL);
1885	if (!skb)
1886		return -ENOBUFS;
1887
1888	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1889			 0, 2) <= 0) {
1890		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1891		kfree_skb(skb);
1892		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1893	}
1894
1895	/* now do the delete */
1896	ret = tcf_action_delete(net, actions);
1897	if (ret < 0) {
1898		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1899		kfree_skb(skb);
1900		return ret;
1901	}
1902
1903	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1904			     n->nlmsg_flags & NLM_F_ECHO);
1905	return ret;
1906}
1907
1908static int
1909tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1910	      u32 portid, int event, struct netlink_ext_ack *extack)
1911{
1912	int i, ret;
1913	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1914	struct tc_action *act;
1915	size_t attr_size = 0;
1916	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1917
1918	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1919					  extack);
1920	if (ret < 0)
1921		return ret;
1922
1923	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1924		if (tb[1])
1925			return tca_action_flush(net, tb[1], n, portid, extack);
1926
1927		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1928		return -EINVAL;
1929	}
1930
1931	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1932		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1933		if (IS_ERR(act)) {
1934			ret = PTR_ERR(act);
1935			goto err;
1936		}
1937		attr_size += tcf_action_fill_size(act);
1938		actions[i - 1] = act;
1939	}
1940
1941	attr_size = tcf_action_full_attrs_size(attr_size);
1942
1943	if (event == RTM_GETACTION)
1944		ret = tcf_get_notify(net, portid, n, actions, event, extack);
1945	else { /* delete */
1946		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1947		if (ret)
1948			goto err;
1949		return 0;
1950	}
1951err:
1952	tcf_action_put_many(actions);
1953	return ret;
1954}
1955
1956static int
1957tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1958	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
 
1959{
1960	struct sk_buff *skb;
1961
1962	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1963			GFP_KERNEL);
1964	if (!skb)
1965		return -ENOBUFS;
1966
1967	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1968			 RTM_NEWACTION, 0, 0) <= 0) {
1969		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1970		kfree_skb(skb);
1971		return -EINVAL;
1972	}
1973
1974	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1975			      n->nlmsg_flags & NLM_F_ECHO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1976}
1977
1978static int tcf_action_add(struct net *net, struct nlattr *nla,
1979			  struct nlmsghdr *n, u32 portid, u32 flags,
1980			  struct netlink_ext_ack *extack)
1981{
1982	size_t attr_size = 0;
1983	int loop, ret, i;
1984	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1985	int init_res[TCA_ACT_MAX_PRIO] = {};
1986
1987	for (loop = 0; loop < 10; loop++) {
1988		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1989				      &attr_size, flags, 0, extack);
1990		if (ret != -EAGAIN)
1991			break;
1992	}
1993
1994	if (ret < 0)
1995		return ret;
 
1996	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1997
1998	/* only put existing actions */
1999	for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
2000		if (init_res[i] == ACT_P_CREATED)
2001			actions[i] = NULL;
2002	tcf_action_put_many(actions);
2003
2004	return ret;
2005}
2006
2007static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2008	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2009						 TCA_ACT_FLAG_TERSE_DUMP),
2010	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2011};
2012
2013static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2014			 struct netlink_ext_ack *extack)
2015{
2016	struct net *net = sock_net(skb->sk);
2017	struct nlattr *tca[TCA_ROOT_MAX + 1];
2018	u32 portid = NETLINK_CB(skb).portid;
2019	u32 flags = 0;
2020	int ret = 0;
2021
2022	if ((n->nlmsg_type != RTM_GETACTION) &&
2023	    !netlink_capable(skb, CAP_NET_ADMIN))
2024		return -EPERM;
2025
2026	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2027				     TCA_ROOT_MAX, NULL, extack);
2028	if (ret < 0)
2029		return ret;
2030
2031	if (tca[TCA_ACT_TAB] == NULL) {
2032		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2033		return -EINVAL;
2034	}
2035
2036	/* n->nlmsg_flags & NLM_F_CREATE */
2037	switch (n->nlmsg_type) {
2038	case RTM_NEWACTION:
2039		/* we are going to assume all other flags
2040		 * imply create only if it doesn't exist
2041		 * Note that CREATE | EXCL implies that
2042		 * but since we want avoid ambiguity (eg when flags
2043		 * is zero) then just set this
2044		 */
2045		if (n->nlmsg_flags & NLM_F_REPLACE)
2046			flags = TCA_ACT_FLAGS_REPLACE;
2047		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2048				     extack);
2049		break;
2050	case RTM_DELACTION:
2051		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2052				    portid, RTM_DELACTION, extack);
2053		break;
2054	case RTM_GETACTION:
2055		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2056				    portid, RTM_GETACTION, extack);
2057		break;
2058	default:
2059		BUG();
2060	}
2061
2062	return ret;
2063}
2064
2065static struct nlattr *find_dump_kind(struct nlattr **nla)
2066{
2067	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2068	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2069	struct nlattr *kind;
2070
2071	tb1 = nla[TCA_ACT_TAB];
2072	if (tb1 == NULL)
2073		return NULL;
2074
2075	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2076		return NULL;
2077
2078	if (tb[1] == NULL)
2079		return NULL;
2080	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2081		return NULL;
2082	kind = tb2[TCA_ACT_KIND];
2083
2084	return kind;
2085}
2086
2087static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2088{
2089	struct net *net = sock_net(skb->sk);
2090	struct nlmsghdr *nlh;
2091	unsigned char *b = skb_tail_pointer(skb);
2092	struct nlattr *nest;
2093	struct tc_action_ops *a_o;
2094	int ret = 0;
2095	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2096	struct nlattr *tb[TCA_ROOT_MAX + 1];
2097	struct nlattr *count_attr = NULL;
2098	unsigned long jiffy_since = 0;
2099	struct nlattr *kind = NULL;
2100	struct nla_bitfield32 bf;
2101	u32 msecs_since = 0;
2102	u32 act_count = 0;
2103
2104	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2105				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2106	if (ret < 0)
2107		return ret;
2108
2109	kind = find_dump_kind(tb);
2110	if (kind == NULL) {
2111		pr_info("tc_dump_action: action bad kind\n");
2112		return 0;
2113	}
2114
2115	a_o = tc_lookup_action(kind);
2116	if (a_o == NULL)
2117		return 0;
2118
2119	cb->args[2] = 0;
2120	if (tb[TCA_ROOT_FLAGS]) {
2121		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2122		cb->args[2] = bf.value;
2123	}
2124
2125	if (tb[TCA_ROOT_TIME_DELTA]) {
2126		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2127	}
2128
2129	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2130			cb->nlh->nlmsg_type, sizeof(*t), 0);
2131	if (!nlh)
2132		goto out_module_put;
2133
2134	if (msecs_since)
2135		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2136
2137	t = nlmsg_data(nlh);
2138	t->tca_family = AF_UNSPEC;
2139	t->tca__pad1 = 0;
2140	t->tca__pad2 = 0;
2141	cb->args[3] = jiffy_since;
2142	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2143	if (!count_attr)
2144		goto out_module_put;
2145
2146	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2147	if (nest == NULL)
2148		goto out_module_put;
2149
2150	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2151	if (ret < 0)
2152		goto out_module_put;
2153
2154	if (ret > 0) {
2155		nla_nest_end(skb, nest);
2156		ret = skb->len;
2157		act_count = cb->args[1];
2158		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2159		cb->args[1] = 0;
2160	} else
2161		nlmsg_trim(skb, b);
2162
2163	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2164	if (NETLINK_CB(cb->skb).portid && ret)
2165		nlh->nlmsg_flags |= NLM_F_MULTI;
2166	module_put(a_o->owner);
2167	return skb->len;
2168
2169out_module_put:
2170	module_put(a_o->owner);
2171	nlmsg_trim(skb, b);
2172	return skb->len;
2173}
2174
2175static int __init tc_action_init(void)
2176{
2177	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2178	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2179	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2180		      0);
2181
2182	return 0;
2183}
2184
2185subsys_initcall(tc_action_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/act_api.c	Packet action API.
   4 *
   5 * Author:	Jamal Hadi Salim
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/kernel.h>
  10#include <linux/string.h>
  11#include <linux/errno.h>
  12#include <linux/slab.h>
  13#include <linux/skbuff.h>
  14#include <linux/init.h>
  15#include <linux/kmod.h>
  16#include <linux/err.h>
  17#include <linux/module.h>
  18#include <net/net_namespace.h>
  19#include <net/sock.h>
  20#include <net/sch_generic.h>
  21#include <net/pkt_cls.h>
  22#include <net/tc_act/tc_pedit.h>
  23#include <net/act_api.h>
  24#include <net/netlink.h>
  25#include <net/flow_offload.h>
  26#include <net/tc_wrapper.h>
  27
  28#ifdef CONFIG_INET
  29DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
  30EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
  31#endif
  32
  33int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
  34{
  35#ifdef CONFIG_INET
  36	if (static_branch_unlikely(&tcf_frag_xmit_count))
  37		return sch_frag_xmit_hook(skb, xmit);
  38#endif
  39
  40	return xmit(skb);
  41}
  42EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
  43
  44static void tcf_action_goto_chain_exec(const struct tc_action *a,
  45				       struct tcf_result *res)
  46{
  47	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
  48
  49	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
  50}
  51
  52static void tcf_free_cookie_rcu(struct rcu_head *p)
  53{
  54	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
  55
  56	kfree(cookie->data);
  57	kfree(cookie);
  58}
  59
  60static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  61				  struct tc_cookie *new_cookie)
  62{
  63	struct tc_cookie *old;
  64
  65	old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
  66	if (old)
  67		call_rcu(&old->rcu, tcf_free_cookie_rcu);
  68}
  69
  70int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
  71			     struct tcf_chain **newchain,
  72			     struct netlink_ext_ack *extack)
  73{
  74	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
  75	u32 chain_index;
  76
  77	if (!opcode)
  78		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
  79	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
  80		ret = 0;
  81	if (ret) {
  82		NL_SET_ERR_MSG(extack, "invalid control action");
  83		goto end;
  84	}
  85
  86	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
  87		chain_index = action & TC_ACT_EXT_VAL_MASK;
  88		if (!tp || !newchain) {
  89			ret = -EINVAL;
  90			NL_SET_ERR_MSG(extack,
  91				       "can't goto NULL proto/chain");
  92			goto end;
  93		}
  94		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
  95		if (!*newchain) {
  96			ret = -ENOMEM;
  97			NL_SET_ERR_MSG(extack,
  98				       "can't allocate goto_chain");
  99		}
 100	}
 101end:
 102	return ret;
 103}
 104EXPORT_SYMBOL(tcf_action_check_ctrlact);
 105
 106struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
 107					 struct tcf_chain *goto_chain)
 108{
 109	a->tcfa_action = action;
 110	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
 111	return goto_chain;
 112}
 113EXPORT_SYMBOL(tcf_action_set_ctrlact);
 114
 115/* XXX: For standalone actions, we don't need a RCU grace period either, because
 116 * actions are always connected to filters and filters are already destroyed in
 117 * RCU callbacks, so after a RCU grace period actions are already disconnected
 118 * from filters. Readers later can not find us.
 119 */
 120static void free_tcf(struct tc_action *p)
 121{
 122	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
 123
 124	free_percpu(p->cpu_bstats);
 125	free_percpu(p->cpu_bstats_hw);
 126	free_percpu(p->cpu_qstats);
 127
 128	tcf_set_action_cookie(&p->user_cookie, NULL);
 129	if (chain)
 130		tcf_chain_put_by_act(chain);
 131
 132	kfree(p);
 133}
 134
 135static void offload_action_hw_count_set(struct tc_action *act,
 136					u32 hw_count)
 137{
 138	act->in_hw_count = hw_count;
 139}
 140
 141static void offload_action_hw_count_inc(struct tc_action *act,
 142					u32 hw_count)
 143{
 144	act->in_hw_count += hw_count;
 145}
 146
 147static void offload_action_hw_count_dec(struct tc_action *act,
 148					u32 hw_count)
 149{
 150	act->in_hw_count = act->in_hw_count > hw_count ?
 151			   act->in_hw_count - hw_count : 0;
 152}
 153
 154static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
 155{
 156	if (is_tcf_pedit(act))
 157		return tcf_pedit_nkeys(act);
 158	else
 159		return 1;
 160}
 161
 162static bool tc_act_skip_hw(u32 flags)
 163{
 164	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
 165}
 166
 167static bool tc_act_skip_sw(u32 flags)
 168{
 169	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
 170}
 171
 
 
 
 
 
 172/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
 173static bool tc_act_flags_valid(u32 flags)
 174{
 175	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
 176
 177	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
 178}
 179
 180static int offload_action_init(struct flow_offload_action *fl_action,
 181			       struct tc_action *act,
 182			       enum offload_act_command  cmd,
 183			       struct netlink_ext_ack *extack)
 184{
 185	int err;
 186
 187	fl_action->extack = extack;
 188	fl_action->command = cmd;
 189	fl_action->index = act->tcfa_index;
 190	fl_action->cookie = (unsigned long)act;
 191
 192	if (act->ops->offload_act_setup) {
 193		spin_lock_bh(&act->tcfa_lock);
 194		err = act->ops->offload_act_setup(act, fl_action, NULL,
 195						  false, extack);
 196		spin_unlock_bh(&act->tcfa_lock);
 197		return err;
 198	}
 199
 200	return -EOPNOTSUPP;
 201}
 202
 203static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
 204				     u32 *hw_count)
 205{
 206	int err;
 207
 208	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
 209					  fl_act, NULL, NULL);
 210	if (err < 0)
 211		return err;
 212
 213	if (hw_count)
 214		*hw_count = err;
 215
 216	return 0;
 217}
 218
 219static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
 220					u32 *hw_count,
 221					flow_indr_block_bind_cb_t *cb,
 222					void *cb_priv)
 223{
 224	int err;
 225
 226	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
 227	if (err < 0)
 228		return err;
 229
 230	if (hw_count)
 231		*hw_count = 1;
 232
 233	return 0;
 234}
 235
 236static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
 237				  u32 *hw_count,
 238				  flow_indr_block_bind_cb_t *cb,
 239				  void *cb_priv)
 240{
 241	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
 242						 cb, cb_priv) :
 243		    tcf_action_offload_cmd_ex(fl_act, hw_count);
 244}
 245
 246static int tcf_action_offload_add_ex(struct tc_action *action,
 247				     struct netlink_ext_ack *extack,
 248				     flow_indr_block_bind_cb_t *cb,
 249				     void *cb_priv)
 250{
 251	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
 252	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
 253		[0] = action,
 254	};
 255	struct flow_offload_action *fl_action;
 256	u32 in_hw_count = 0;
 257	int num, err = 0;
 258
 259	if (tc_act_skip_hw(action->tcfa_flags))
 260		return 0;
 261
 262	num = tcf_offload_act_num_actions_single(action);
 263	fl_action = offload_action_alloc(num);
 264	if (!fl_action)
 265		return -ENOMEM;
 266
 267	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
 268	if (err)
 269		goto fl_err;
 270
 271	err = tc_setup_action(&fl_action->action, actions, 0, extack);
 272	if (err) {
 273		NL_SET_ERR_MSG_MOD(extack,
 274				   "Failed to setup tc actions for offload");
 275		goto fl_err;
 276	}
 277
 278	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
 279	if (!err)
 280		cb ? offload_action_hw_count_inc(action, in_hw_count) :
 281		     offload_action_hw_count_set(action, in_hw_count);
 282
 283	if (skip_sw && !tc_act_in_hw(action))
 284		err = -EINVAL;
 285
 286	tc_cleanup_offload_action(&fl_action->action);
 287
 288fl_err:
 289	kfree(fl_action);
 290
 291	return err;
 292}
 293
 294/* offload the tc action after it is inserted */
 295static int tcf_action_offload_add(struct tc_action *action,
 296				  struct netlink_ext_ack *extack)
 297{
 298	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
 299}
 300
 301int tcf_action_update_hw_stats(struct tc_action *action)
 302{
 303	struct flow_offload_action fl_act = {};
 304	int err;
 305
 
 
 
 306	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
 307	if (err)
 308		return err;
 309
 310	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
 311	if (!err) {
 312		preempt_disable();
 313		tcf_action_stats_update(action, fl_act.stats.bytes,
 314					fl_act.stats.pkts,
 315					fl_act.stats.drops,
 316					fl_act.stats.lastused,
 317					true);
 318		preempt_enable();
 319		action->used_hw_stats = fl_act.stats.used_hw_stats;
 320		action->used_hw_stats_valid = true;
 321	} else {
 322		return -EOPNOTSUPP;
 323	}
 324
 325	return 0;
 326}
 327EXPORT_SYMBOL(tcf_action_update_hw_stats);
 328
 329static int tcf_action_offload_del_ex(struct tc_action *action,
 330				     flow_indr_block_bind_cb_t *cb,
 331				     void *cb_priv)
 332{
 333	struct flow_offload_action fl_act = {};
 334	u32 in_hw_count = 0;
 335	int err = 0;
 336
 337	if (!tc_act_in_hw(action))
 338		return 0;
 339
 340	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
 341	if (err)
 342		return err;
 343
 344	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
 345	if (err < 0)
 346		return err;
 347
 348	if (!cb && action->in_hw_count != in_hw_count)
 349		return -EINVAL;
 350
 351	/* do not need to update hw state when deleting action */
 352	if (cb && in_hw_count)
 353		offload_action_hw_count_dec(action, in_hw_count);
 354
 355	return 0;
 356}
 357
 358static int tcf_action_offload_del(struct tc_action *action)
 359{
 360	return tcf_action_offload_del_ex(action, NULL, NULL);
 361}
 362
 363static void tcf_action_cleanup(struct tc_action *p)
 364{
 365	tcf_action_offload_del(p);
 366	if (p->ops->cleanup)
 367		p->ops->cleanup(p);
 368
 369	gen_kill_estimator(&p->tcfa_rate_est);
 370	free_tcf(p);
 371}
 372
 373static int __tcf_action_put(struct tc_action *p, bool bind)
 374{
 375	struct tcf_idrinfo *idrinfo = p->idrinfo;
 376
 377	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
 378		if (bind)
 379			atomic_dec(&p->tcfa_bindcnt);
 380		idr_remove(&idrinfo->action_idr, p->tcfa_index);
 381		mutex_unlock(&idrinfo->lock);
 382
 383		tcf_action_cleanup(p);
 384		return 1;
 385	}
 386
 387	if (bind)
 388		atomic_dec(&p->tcfa_bindcnt);
 389
 390	return 0;
 391}
 392
 393static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 394{
 395	int ret = 0;
 396
 397	/* Release with strict==1 and bind==0 is only called through act API
 398	 * interface (classifiers always bind). Only case when action with
 399	 * positive reference count and zero bind count can exist is when it was
 400	 * also created with act API (unbinding last classifier will destroy the
 401	 * action if it was created by classifier). So only case when bind count
 402	 * can be changed after initial check is when unbound action is
 403	 * destroyed by act API while classifier binds to action with same id
 404	 * concurrently. This result either creation of new action(same behavior
 405	 * as before), or reusing existing action if concurrent process
 406	 * increments reference count before action is deleted. Both scenarios
 407	 * are acceptable.
 408	 */
 409	if (p) {
 410		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
 411			return -EPERM;
 412
 413		if (__tcf_action_put(p, bind))
 414			ret = ACT_P_DELETED;
 415	}
 416
 417	return ret;
 418}
 419
 420int tcf_idr_release(struct tc_action *a, bool bind)
 421{
 422	const struct tc_action_ops *ops = a->ops;
 423	int ret;
 424
 425	ret = __tcf_idr_release(a, bind, false);
 426	if (ret == ACT_P_DELETED)
 427		module_put(ops->owner);
 428	return ret;
 429}
 430EXPORT_SYMBOL(tcf_idr_release);
 431
 432static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 433{
 434	struct tc_cookie *user_cookie;
 435	u32 cookie_len = 0;
 436
 437	rcu_read_lock();
 438	user_cookie = rcu_dereference(act->user_cookie);
 439
 440	if (user_cookie)
 441		cookie_len = nla_total_size(user_cookie->len);
 442	rcu_read_unlock();
 443
 444	return  nla_total_size(0) /* action number nested */
 445		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
 446		+ cookie_len /* TCA_ACT_COOKIE */
 447		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
 448		+ nla_total_size(0) /* TCA_ACT_STATS nested */
 449		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
 450		/* TCA_STATS_BASIC */
 451		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
 452		/* TCA_STATS_PKT64 */
 453		+ nla_total_size_64bit(sizeof(u64))
 454		/* TCA_STATS_QUEUE */
 455		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
 456		+ nla_total_size(0) /* TCA_ACT_OPTIONS nested */
 457		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
 458}
 459
 460static size_t tcf_action_full_attrs_size(size_t sz)
 461{
 462	return NLMSG_HDRLEN                     /* struct nlmsghdr */
 463		+ sizeof(struct tcamsg)
 464		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
 465		+ sz;
 466}
 467
 468static size_t tcf_action_fill_size(const struct tc_action *act)
 469{
 470	size_t sz = tcf_action_shared_attrs_size(act);
 471
 472	if (act->ops->get_fill_size)
 473		return act->ops->get_fill_size(act) + sz;
 474	return sz;
 475}
 476
 477static int
 478tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
 479{
 480	unsigned char *b = skb_tail_pointer(skb);
 481	struct tc_cookie *cookie;
 482
 483	if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind))
 484		goto nla_put_failure;
 485	if (tcf_action_copy_stats(skb, a, 0))
 486		goto nla_put_failure;
 487	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
 488		goto nla_put_failure;
 489
 490	rcu_read_lock();
 491	cookie = rcu_dereference(a->user_cookie);
 492	if (cookie) {
 493		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
 494			rcu_read_unlock();
 495			goto nla_put_failure;
 496		}
 497	}
 498	rcu_read_unlock();
 499
 500	return 0;
 501
 502nla_put_failure:
 503	nlmsg_trim(skb, b);
 504	return -1;
 505}
 506
 507static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
 508			   struct netlink_callback *cb)
 509{
 510	int err = 0, index = -1, s_i = 0, n_i = 0;
 511	u32 act_flags = cb->args[2];
 512	unsigned long jiffy_since = cb->args[3];
 513	struct nlattr *nest;
 514	struct idr *idr = &idrinfo->action_idr;
 515	struct tc_action *p;
 516	unsigned long id = 1;
 517	unsigned long tmp;
 518
 519	mutex_lock(&idrinfo->lock);
 520
 521	s_i = cb->args[0];
 522
 523	idr_for_each_entry_ul(idr, p, tmp, id) {
 524		index++;
 525		if (index < s_i)
 526			continue;
 527		if (IS_ERR(p))
 528			continue;
 529
 530		if (jiffy_since &&
 531		    time_after(jiffy_since,
 532			       (unsigned long)p->tcfa_tm.lastuse))
 533			continue;
 534
 535		tcf_action_update_hw_stats(p);
 536
 537		nest = nla_nest_start_noflag(skb, n_i);
 538		if (!nest) {
 539			index--;
 540			goto nla_put_failure;
 541		}
 542		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
 543			tcf_action_dump_terse(skb, p, true) :
 544			tcf_action_dump_1(skb, p, 0, 0);
 545		if (err < 0) {
 546			index--;
 547			nlmsg_trim(skb, nest);
 548			goto done;
 549		}
 550		nla_nest_end(skb, nest);
 551		n_i++;
 552		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
 553		    n_i >= TCA_ACT_MAX_PRIO)
 554			goto done;
 555	}
 556done:
 557	if (index >= 0)
 558		cb->args[0] = index + 1;
 559
 560	mutex_unlock(&idrinfo->lock);
 561	if (n_i) {
 562		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
 563			cb->args[1] = n_i;
 564	}
 565	return n_i;
 566
 567nla_put_failure:
 568	nla_nest_cancel(skb, nest);
 569	goto done;
 570}
 571
 572static int tcf_idr_release_unsafe(struct tc_action *p)
 573{
 574	if (atomic_read(&p->tcfa_bindcnt) > 0)
 575		return -EPERM;
 576
 577	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
 578		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
 579		tcf_action_cleanup(p);
 580		return ACT_P_DELETED;
 581	}
 582
 583	return 0;
 584}
 585
 586static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
 587			  const struct tc_action_ops *ops,
 588			  struct netlink_ext_ack *extack)
 589{
 590	struct nlattr *nest;
 591	int n_i = 0;
 592	int ret = -EINVAL;
 593	struct idr *idr = &idrinfo->action_idr;
 594	struct tc_action *p;
 595	unsigned long id = 1;
 596	unsigned long tmp;
 597
 598	nest = nla_nest_start_noflag(skb, 0);
 599	if (nest == NULL)
 600		goto nla_put_failure;
 601	if (nla_put_string(skb, TCA_ACT_KIND, ops->kind))
 602		goto nla_put_failure;
 603
 604	ret = 0;
 605	mutex_lock(&idrinfo->lock);
 606	idr_for_each_entry_ul(idr, p, tmp, id) {
 607		if (IS_ERR(p))
 608			continue;
 609		ret = tcf_idr_release_unsafe(p);
 610		if (ret == ACT_P_DELETED)
 611			module_put(ops->owner);
 612		else if (ret < 0)
 613			break;
 614		n_i++;
 615	}
 616	mutex_unlock(&idrinfo->lock);
 617	if (ret < 0) {
 618		if (n_i)
 619			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
 620		else
 621			goto nla_put_failure;
 622	}
 623
 624	ret = nla_put_u32(skb, TCA_FCNT, n_i);
 625	if (ret)
 626		goto nla_put_failure;
 627	nla_nest_end(skb, nest);
 628
 629	return n_i;
 630nla_put_failure:
 631	nla_nest_cancel(skb, nest);
 632	return ret;
 633}
 634
 635int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 636		       struct netlink_callback *cb, int type,
 637		       const struct tc_action_ops *ops,
 638		       struct netlink_ext_ack *extack)
 639{
 640	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 641
 642	if (type == RTM_DELACTION) {
 643		return tcf_del_walker(idrinfo, skb, ops, extack);
 644	} else if (type == RTM_GETACTION) {
 645		return tcf_dump_walker(idrinfo, skb, cb);
 646	} else {
 647		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
 648		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
 649		return -EINVAL;
 650	}
 651}
 652EXPORT_SYMBOL(tcf_generic_walker);
 653
 654int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
 655{
 656	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 657	struct tc_action *p;
 658
 659	mutex_lock(&idrinfo->lock);
 660	p = idr_find(&idrinfo->action_idr, index);
 661	if (IS_ERR(p))
 662		p = NULL;
 663	else if (p)
 664		refcount_inc(&p->tcfa_refcnt);
 665	mutex_unlock(&idrinfo->lock);
 666
 667	if (p) {
 668		*a = p;
 669		return true;
 670	}
 671	return false;
 672}
 673EXPORT_SYMBOL(tcf_idr_search);
 674
 675static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
 676				struct netlink_callback *cb, int type,
 677				const struct tc_action_ops *ops,
 678				struct netlink_ext_ack *extack)
 679{
 680	struct tc_action_net *tn = net_generic(net, ops->net_id);
 681
 682	if (unlikely(ops->walk))
 683		return ops->walk(net, skb, cb, type, ops, extack);
 684
 685	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 686}
 687
 688static int __tcf_idr_search(struct net *net,
 689			    const struct tc_action_ops *ops,
 690			    struct tc_action **a, u32 index)
 691{
 692	struct tc_action_net *tn = net_generic(net, ops->net_id);
 693
 694	if (unlikely(ops->lookup))
 695		return ops->lookup(net, a, index);
 696
 697	return tcf_idr_search(tn, a, index);
 698}
 699
 700static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 701{
 702	struct tc_action *p;
 703	int ret = 0;
 704
 705	mutex_lock(&idrinfo->lock);
 706	p = idr_find(&idrinfo->action_idr, index);
 707	if (!p) {
 708		mutex_unlock(&idrinfo->lock);
 709		return -ENOENT;
 710	}
 711
 712	if (!atomic_read(&p->tcfa_bindcnt)) {
 713		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
 714			struct module *owner = p->ops->owner;
 715
 716			WARN_ON(p != idr_remove(&idrinfo->action_idr,
 717						p->tcfa_index));
 718			mutex_unlock(&idrinfo->lock);
 719
 720			tcf_action_cleanup(p);
 721			module_put(owner);
 722			return 0;
 723		}
 724		ret = 0;
 725	} else {
 726		ret = -EPERM;
 727	}
 728
 729	mutex_unlock(&idrinfo->lock);
 730	return ret;
 731}
 732
 733int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
 734		   struct tc_action **a, const struct tc_action_ops *ops,
 735		   int bind, bool cpustats, u32 flags)
 736{
 737	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
 738	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 739	int err = -ENOMEM;
 740
 741	if (unlikely(!p))
 742		return -ENOMEM;
 743	refcount_set(&p->tcfa_refcnt, 1);
 744	if (bind)
 745		atomic_set(&p->tcfa_bindcnt, 1);
 746
 747	if (cpustats) {
 748		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 749		if (!p->cpu_bstats)
 750			goto err1;
 751		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
 752		if (!p->cpu_bstats_hw)
 753			goto err2;
 754		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 755		if (!p->cpu_qstats)
 756			goto err3;
 757	}
 758	gnet_stats_basic_sync_init(&p->tcfa_bstats);
 759	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
 760	spin_lock_init(&p->tcfa_lock);
 761	p->tcfa_index = index;
 762	p->tcfa_tm.install = jiffies;
 763	p->tcfa_tm.lastuse = jiffies;
 764	p->tcfa_tm.firstuse = 0;
 765	p->tcfa_flags = flags;
 766	if (est) {
 767		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
 768					&p->tcfa_rate_est,
 769					&p->tcfa_lock, false, est);
 770		if (err)
 771			goto err4;
 772	}
 773
 774	p->idrinfo = idrinfo;
 775	__module_get(ops->owner);
 776	p->ops = ops;
 777	*a = p;
 778	return 0;
 779err4:
 780	free_percpu(p->cpu_qstats);
 781err3:
 782	free_percpu(p->cpu_bstats_hw);
 783err2:
 784	free_percpu(p->cpu_bstats);
 785err1:
 786	kfree(p);
 787	return err;
 788}
 789EXPORT_SYMBOL(tcf_idr_create);
 790
 791int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
 792			      struct nlattr *est, struct tc_action **a,
 793			      const struct tc_action_ops *ops, int bind,
 794			      u32 flags)
 795{
 796	/* Set cpustats according to actions flags. */
 797	return tcf_idr_create(tn, index, est, a, ops, bind,
 798			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
 799}
 800EXPORT_SYMBOL(tcf_idr_create_from_flags);
 801
 802/* Cleanup idr index that was allocated but not initialized. */
 803
 804void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
 805{
 806	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 807
 808	mutex_lock(&idrinfo->lock);
 809	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
 810	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
 811	mutex_unlock(&idrinfo->lock);
 812}
 813EXPORT_SYMBOL(tcf_idr_cleanup);
 814
 815/* Check if action with specified index exists. If actions is found, increments
 816 * its reference and bind counters, and return 1. Otherwise insert temporary
 817 * error pointer (to prevent concurrent users from inserting actions with same
 818 * index) and return 0.
 819 *
 820 * May return -EAGAIN for binding actions in case of a parallel add/delete on
 821 * the requested index.
 822 */
 823
 824int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
 825			struct tc_action **a, int bind)
 826{
 827	struct tcf_idrinfo *idrinfo = tn->idrinfo;
 828	struct tc_action *p;
 829	int ret;
 830	u32 max;
 831
 
 
 832	if (*index) {
 833again:
 834		rcu_read_lock();
 835		p = idr_find(&idrinfo->action_idr, *index);
 836
 837		if (IS_ERR(p)) {
 838			/* This means that another process allocated
 839			 * index but did not assign the pointer yet.
 840			 */
 841			rcu_read_unlock();
 842			goto again;
 843		}
 844
 845		if (!p) {
 846			/* Empty slot, try to allocate it */
 847			max = *index;
 848			rcu_read_unlock();
 849			goto new;
 850		}
 851
 852		if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
 853			/* Action was deleted in parallel */
 854			rcu_read_unlock();
 855			return -EAGAIN;
 
 
 856		}
 857
 858		if (bind)
 859			atomic_inc(&p->tcfa_bindcnt);
 860		*a = p;
 861
 862		rcu_read_unlock();
 863
 864		return 1;
 865	} else {
 866		/* Find a slot */
 867		*index = 1;
 868		max = UINT_MAX;
 
 
 
 
 
 869	}
 870
 871new:
 872	*a = NULL;
 873
 874	mutex_lock(&idrinfo->lock);
 875	ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
 876			    GFP_KERNEL);
 877	mutex_unlock(&idrinfo->lock);
 878
 879	/* N binds raced for action allocation,
 880	 * retry for all the ones that failed.
 881	 */
 882	if (ret == -ENOSPC && *index == max)
 883		ret = -EAGAIN;
 884
 885	return ret;
 886}
 887EXPORT_SYMBOL(tcf_idr_check_alloc);
 888
 889void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
 890			 struct tcf_idrinfo *idrinfo)
 891{
 892	struct idr *idr = &idrinfo->action_idr;
 893	struct tc_action *p;
 894	int ret;
 895	unsigned long id = 1;
 896	unsigned long tmp;
 897
 898	idr_for_each_entry_ul(idr, p, tmp, id) {
 899		ret = __tcf_idr_release(p, false, true);
 900		if (ret == ACT_P_DELETED)
 901			module_put(ops->owner);
 902		else if (ret < 0)
 903			return;
 904	}
 905	idr_destroy(&idrinfo->action_idr);
 906}
 907EXPORT_SYMBOL(tcf_idrinfo_destroy);
 908
 909static LIST_HEAD(act_base);
 910static DEFINE_RWLOCK(act_mod_lock);
 911/* since act ops id is stored in pernet subsystem list,
 912 * then there is no way to walk through only all the action
 913 * subsystem, so we keep tc action pernet ops id for
 914 * reoffload to walk through.
 915 */
 916static LIST_HEAD(act_pernet_id_list);
 917static DEFINE_MUTEX(act_id_mutex);
 918struct tc_act_pernet_id {
 919	struct list_head list;
 920	unsigned int id;
 921};
 922
 923static int tcf_pernet_add_id_list(unsigned int id)
 924{
 925	struct tc_act_pernet_id *id_ptr;
 926	int ret = 0;
 927
 928	mutex_lock(&act_id_mutex);
 929	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
 930		if (id_ptr->id == id) {
 931			ret = -EEXIST;
 932			goto err_out;
 933		}
 934	}
 935
 936	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
 937	if (!id_ptr) {
 938		ret = -ENOMEM;
 939		goto err_out;
 940	}
 941	id_ptr->id = id;
 942
 943	list_add_tail(&id_ptr->list, &act_pernet_id_list);
 944
 945err_out:
 946	mutex_unlock(&act_id_mutex);
 947	return ret;
 948}
 949
 950static void tcf_pernet_del_id_list(unsigned int id)
 951{
 952	struct tc_act_pernet_id *id_ptr;
 953
 954	mutex_lock(&act_id_mutex);
 955	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
 956		if (id_ptr->id == id) {
 957			list_del(&id_ptr->list);
 958			kfree(id_ptr);
 959			break;
 960		}
 961	}
 962	mutex_unlock(&act_id_mutex);
 963}
 964
 965int tcf_register_action(struct tc_action_ops *act,
 966			struct pernet_operations *ops)
 967{
 968	struct tc_action_ops *a;
 969	int ret;
 970
 971	if (!act->act || !act->dump || !act->init)
 972		return -EINVAL;
 973
 974	/* We have to register pernet ops before making the action ops visible,
 975	 * otherwise tcf_action_init_1() could get a partially initialized
 976	 * netns.
 977	 */
 978	ret = register_pernet_subsys(ops);
 979	if (ret)
 980		return ret;
 981
 982	if (ops->id) {
 983		ret = tcf_pernet_add_id_list(*ops->id);
 984		if (ret)
 985			goto err_id;
 986	}
 987
 988	write_lock(&act_mod_lock);
 989	list_for_each_entry(a, &act_base, head) {
 990		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
 991			ret = -EEXIST;
 992			goto err_out;
 993		}
 994	}
 995	list_add_tail(&act->head, &act_base);
 996	write_unlock(&act_mod_lock);
 997
 998	return 0;
 999
1000err_out:
1001	write_unlock(&act_mod_lock);
1002	if (ops->id)
1003		tcf_pernet_del_id_list(*ops->id);
1004err_id:
1005	unregister_pernet_subsys(ops);
1006	return ret;
1007}
1008EXPORT_SYMBOL(tcf_register_action);
1009
1010int tcf_unregister_action(struct tc_action_ops *act,
1011			  struct pernet_operations *ops)
1012{
1013	struct tc_action_ops *a;
1014	int err = -ENOENT;
1015
1016	write_lock(&act_mod_lock);
1017	list_for_each_entry(a, &act_base, head) {
1018		if (a == act) {
1019			list_del(&act->head);
1020			err = 0;
1021			break;
1022		}
1023	}
1024	write_unlock(&act_mod_lock);
1025	if (!err) {
1026		unregister_pernet_subsys(ops);
1027		if (ops->id)
1028			tcf_pernet_del_id_list(*ops->id);
1029	}
1030	return err;
1031}
1032EXPORT_SYMBOL(tcf_unregister_action);
1033
1034/* lookup by name */
1035static struct tc_action_ops *tc_lookup_action_n(char *kind)
1036{
1037	struct tc_action_ops *a, *res = NULL;
1038
1039	if (kind) {
1040		read_lock(&act_mod_lock);
1041		list_for_each_entry(a, &act_base, head) {
1042			if (strcmp(kind, a->kind) == 0) {
1043				if (try_module_get(a->owner))
1044					res = a;
1045				break;
1046			}
1047		}
1048		read_unlock(&act_mod_lock);
1049	}
1050	return res;
1051}
1052
1053/* lookup by nlattr */
1054static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1055{
1056	struct tc_action_ops *a, *res = NULL;
1057
1058	if (kind) {
1059		read_lock(&act_mod_lock);
1060		list_for_each_entry(a, &act_base, head) {
1061			if (nla_strcmp(kind, a->kind) == 0) {
1062				if (try_module_get(a->owner))
1063					res = a;
1064				break;
1065			}
1066		}
1067		read_unlock(&act_mod_lock);
1068	}
1069	return res;
1070}
1071
1072/*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1073#define TCA_ACT_MAX_PRIO_MASK 0x1FF
1074int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1075		    int nr_actions, struct tcf_result *res)
1076{
1077	u32 jmp_prgcnt = 0;
1078	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1079	int i;
1080	int ret = TC_ACT_OK;
1081
1082	if (skb_skip_tc_classify(skb))
1083		return TC_ACT_OK;
1084
1085restart_act_graph:
1086	for (i = 0; i < nr_actions; i++) {
1087		const struct tc_action *a = actions[i];
1088		int repeat_ttl;
1089
1090		if (jmp_prgcnt > 0) {
1091			jmp_prgcnt -= 1;
1092			continue;
1093		}
1094
1095		if (tc_act_skip_sw(a->tcfa_flags))
1096			continue;
1097
1098		repeat_ttl = 32;
1099repeat:
1100		ret = tc_act(skb, a, res);
1101		if (unlikely(ret == TC_ACT_REPEAT)) {
1102			if (--repeat_ttl != 0)
1103				goto repeat;
1104			/* suspicious opcode, stop pipeline */
1105			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1106			return TC_ACT_OK;
1107		}
1108		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1109			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1110			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1111				/* faulty opcode, stop pipeline */
1112				return TC_ACT_OK;
1113			} else {
1114				jmp_ttl -= 1;
1115				if (jmp_ttl > 0)
1116					goto restart_act_graph;
1117				else /* faulty graph, stop pipeline */
1118					return TC_ACT_OK;
1119			}
1120		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1121			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1122				tcf_set_drop_reason(skb,
1123						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1124				return TC_ACT_SHOT;
1125			}
1126			tcf_action_goto_chain_exec(a, res);
1127		}
1128
1129		if (ret != TC_ACT_PIPE)
1130			break;
1131	}
1132
1133	return ret;
1134}
1135EXPORT_SYMBOL(tcf_action_exec);
1136
1137int tcf_action_destroy(struct tc_action *actions[], int bind)
1138{
1139	const struct tc_action_ops *ops;
1140	struct tc_action *a;
1141	int ret = 0, i;
1142
1143	tcf_act_for_each_action(i, a, actions) {
 
1144		actions[i] = NULL;
1145		ops = a->ops;
1146		ret = __tcf_idr_release(a, bind, true);
1147		if (ret == ACT_P_DELETED)
1148			module_put(ops->owner);
1149		else if (ret < 0)
1150			return ret;
1151	}
1152	return ret;
1153}
1154
1155static int tcf_action_put(struct tc_action *p)
1156{
1157	return __tcf_action_put(p, false);
1158}
1159
 
1160static void tcf_action_put_many(struct tc_action *actions[])
1161{
1162	struct tc_action *a;
1163	int i;
1164
1165	tcf_act_for_each_action(i, a, actions) {
1166		const struct tc_action_ops *ops = a->ops;
1167		if (tcf_action_put(a))
1168			module_put(ops->owner);
1169	}
1170}
1171
1172static void tca_put_bound_many(struct tc_action *actions[], int init_res[])
1173{
1174	struct tc_action *a;
1175	int i;
1176
1177	tcf_act_for_each_action(i, a, actions) {
1178		const struct tc_action_ops *ops = a->ops;
1179
1180		if (init_res[i] == ACT_P_CREATED)
1181			continue;
1182
1183		if (tcf_action_put(a))
1184			module_put(ops->owner);
1185	}
1186}
1187
1188int
1189tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1190{
1191	return a->ops->dump(skb, a, bind, ref);
1192}
1193
1194int
1195tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1196{
1197	int err = -EINVAL;
1198	unsigned char *b = skb_tail_pointer(skb);
1199	struct nlattr *nest;
1200	u32 flags;
1201
1202	if (tcf_action_dump_terse(skb, a, false))
1203		goto nla_put_failure;
1204
1205	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1206	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1207			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
1208		goto nla_put_failure;
1209
1210	if (a->used_hw_stats_valid &&
1211	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1212			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1213		goto nla_put_failure;
1214
1215	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1216	if (flags &&
1217	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1218			       flags, flags))
1219		goto nla_put_failure;
1220
1221	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1222		goto nla_put_failure;
1223
1224	nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
1225	if (nest == NULL)
1226		goto nla_put_failure;
1227	err = tcf_action_dump_old(skb, a, bind, ref);
1228	if (err > 0) {
1229		nla_nest_end(skb, nest);
1230		return err;
1231	}
1232
1233nla_put_failure:
1234	nlmsg_trim(skb, b);
1235	return -1;
1236}
1237EXPORT_SYMBOL(tcf_action_dump_1);
1238
1239int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1240		    int bind, int ref, bool terse)
1241{
1242	struct tc_action *a;
1243	int err = -EINVAL, i;
1244	struct nlattr *nest;
1245
1246	tcf_act_for_each_action(i, a, actions) {
 
1247		nest = nla_nest_start_noflag(skb, i + 1);
1248		if (nest == NULL)
1249			goto nla_put_failure;
1250		err = terse ? tcf_action_dump_terse(skb, a, false) :
1251			tcf_action_dump_1(skb, a, bind, ref);
1252		if (err < 0)
1253			goto errout;
1254		nla_nest_end(skb, nest);
1255	}
1256
1257	return 0;
1258
1259nla_put_failure:
1260	err = -EINVAL;
1261errout:
1262	nla_nest_cancel(skb, nest);
1263	return err;
1264}
1265
1266static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1267{
1268	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1269	if (!c)
1270		return NULL;
1271
1272	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1273	if (!c->data) {
1274		kfree(c);
1275		return NULL;
1276	}
1277	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1278
1279	return c;
1280}
1281
1282static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1283{
1284	struct nla_bitfield32 hw_stats_bf;
1285
1286	/* If the user did not pass the attr, that means he does
1287	 * not care about the type. Return "any" in that case
1288	 * which is setting on all supported types.
1289	 */
1290	if (!hw_stats_attr)
1291		return TCA_ACT_HW_STATS_ANY;
1292	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1293	return hw_stats_bf.value;
1294}
1295
1296static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1297	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1298	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1299	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1300				    .len = TC_COOKIE_MAX_SIZE },
1301	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1302	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1303							TCA_ACT_FLAGS_SKIP_HW |
1304							TCA_ACT_FLAGS_SKIP_SW),
1305	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1306};
1307
1308void tcf_idr_insert_many(struct tc_action *actions[], int init_res[])
1309{
1310	struct tc_action *a;
1311	int i;
1312
1313	tcf_act_for_each_action(i, a, actions) {
 
1314		struct tcf_idrinfo *idrinfo;
1315
1316		if (init_res[i] == ACT_P_BOUND)
1317			continue;
1318
1319		idrinfo = a->idrinfo;
1320		mutex_lock(&idrinfo->lock);
1321		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
 
 
1322		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1323		mutex_unlock(&idrinfo->lock);
1324	}
1325}
1326
1327struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
 
1328					 struct netlink_ext_ack *extack)
1329{
1330	bool police = flags & TCA_ACT_FLAGS_POLICE;
1331	struct nlattr *tb[TCA_ACT_MAX + 1];
1332	struct tc_action_ops *a_o;
1333	char act_name[IFNAMSIZ];
1334	struct nlattr *kind;
1335	int err;
1336
1337	if (!police) {
1338		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1339						  tcf_action_policy, extack);
1340		if (err < 0)
1341			return ERR_PTR(err);
1342		err = -EINVAL;
1343		kind = tb[TCA_ACT_KIND];
1344		if (!kind) {
1345			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1346			return ERR_PTR(err);
1347		}
1348		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1349			NL_SET_ERR_MSG(extack, "TC action name too long");
1350			return ERR_PTR(err);
1351		}
1352	} else {
1353		if (strscpy(act_name, "police", IFNAMSIZ) < 0) {
1354			NL_SET_ERR_MSG(extack, "TC action name too long");
1355			return ERR_PTR(-EINVAL);
1356		}
1357	}
1358
1359	a_o = tc_lookup_action_n(act_name);
1360	if (a_o == NULL) {
1361#ifdef CONFIG_MODULES
1362		bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
1363
1364		if (rtnl_held)
1365			rtnl_unlock();
1366		request_module("act_%s", act_name);
1367		if (rtnl_held)
1368			rtnl_lock();
1369
1370		a_o = tc_lookup_action_n(act_name);
1371
1372		/* We dropped the RTNL semaphore in order to
1373		 * perform the module load.  So, even if we
1374		 * succeeded in loading the module we have to
1375		 * tell the caller to replay the request.  We
1376		 * indicate this using -EAGAIN.
1377		 */
1378		if (a_o != NULL) {
1379			module_put(a_o->owner);
1380			return ERR_PTR(-EAGAIN);
1381		}
1382#endif
1383		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1384		return ERR_PTR(-ENOENT);
1385	}
1386
1387	return a_o;
1388}
1389
1390struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1391				    struct nlattr *nla, struct nlattr *est,
1392				    struct tc_action_ops *a_o, int *init_res,
1393				    u32 flags, struct netlink_ext_ack *extack)
1394{
1395	bool police = flags & TCA_ACT_FLAGS_POLICE;
1396	struct nla_bitfield32 userflags = { 0, 0 };
1397	struct tc_cookie *user_cookie = NULL;
1398	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1399	struct nlattr *tb[TCA_ACT_MAX + 1];
 
1400	struct tc_action *a;
1401	int err;
1402
1403	/* backward compatibility for policer */
1404	if (!police) {
1405		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1406						  tcf_action_policy, extack);
1407		if (err < 0)
1408			return ERR_PTR(err);
1409		if (tb[TCA_ACT_COOKIE]) {
1410			user_cookie = nla_memdup_cookie(tb);
1411			if (!user_cookie) {
1412				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1413				err = -ENOMEM;
1414				goto err_out;
1415			}
1416		}
1417		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1418		if (tb[TCA_ACT_FLAGS]) {
1419			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1420			if (!tc_act_flags_valid(userflags.value)) {
1421				err = -EINVAL;
1422				goto err_out;
1423			}
1424		}
1425
1426		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1427				userflags.value | flags, extack);
1428	} else {
1429		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1430				extack);
1431	}
1432	if (err < 0)
1433		goto err_out;
1434	*init_res = err;
1435
1436	if (!police && tb[TCA_ACT_COOKIE])
1437		tcf_set_action_cookie(&a->user_cookie, user_cookie);
1438
1439	if (!police)
1440		a->hw_stats = hw_stats;
1441
1442	return a;
1443
1444err_out:
1445	if (user_cookie) {
1446		kfree(user_cookie->data);
1447		kfree(user_cookie);
1448	}
1449	return ERR_PTR(err);
1450}
1451
1452static bool tc_act_bind(u32 flags)
1453{
1454	return !!(flags & TCA_ACT_FLAGS_BIND);
1455}
1456
1457/* Returns numbers of initialized actions or negative error. */
1458
1459int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1460		    struct nlattr *est, struct tc_action *actions[],
1461		    int init_res[], size_t *attr_size,
1462		    u32 flags, u32 fl_flags,
1463		    struct netlink_ext_ack *extack)
1464{
1465	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1466	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1467	struct tc_action *act;
1468	size_t sz = 0;
1469	int err;
1470	int i;
1471
1472	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1473					  extack);
1474	if (err < 0)
1475		return err;
1476
1477	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1478		struct tc_action_ops *a_o;
1479
1480		a_o = tc_action_load_ops(tb[i], flags, extack);
 
 
1481		if (IS_ERR(a_o)) {
1482			err = PTR_ERR(a_o);
1483			goto err_mod;
1484		}
1485		ops[i - 1] = a_o;
1486	}
1487
1488	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1489		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1490					&init_res[i - 1], flags, extack);
1491		if (IS_ERR(act)) {
1492			err = PTR_ERR(act);
1493			goto err;
1494		}
1495		sz += tcf_action_fill_size(act);
1496		/* Start from index 0 */
1497		actions[i - 1] = act;
1498		if (tc_act_bind(flags)) {
1499			bool skip_sw = tc_skip_sw(fl_flags);
1500			bool skip_hw = tc_skip_hw(fl_flags);
1501
1502			if (tc_act_bind(act->tcfa_flags))
1503				continue;
1504			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1505			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1506				NL_SET_ERR_MSG(extack,
1507					       "Mismatch between action and filter offload flags");
1508				err = -EINVAL;
1509				goto err;
1510			}
1511		} else {
1512			err = tcf_action_offload_add(act, extack);
1513			if (tc_act_skip_sw(act->tcfa_flags) && err)
1514				goto err;
1515		}
1516	}
1517
1518	/* We have to commit them all together, because if any error happened in
1519	 * between, we could not handle the failure gracefully.
1520	 */
1521	tcf_idr_insert_many(actions, init_res);
1522
1523	*attr_size = tcf_action_full_attrs_size(sz);
1524	err = i - 1;
1525	goto err_mod;
1526
1527err:
1528	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1529err_mod:
1530	for (i = 0; i < TCA_ACT_MAX_PRIO && ops[i]; i++)
1531		module_put(ops[i]->owner);
 
 
1532	return err;
1533}
1534
1535void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1536			     u64 drops, bool hw)
1537{
1538	if (a->cpu_bstats) {
1539		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1540
1541		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1542
1543		if (hw)
1544			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1545				       bytes, packets);
1546		return;
1547	}
1548
1549	_bstats_update(&a->tcfa_bstats, bytes, packets);
1550	a->tcfa_qstats.drops += drops;
1551	if (hw)
1552		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1553}
1554EXPORT_SYMBOL(tcf_action_update_stats);
1555
1556int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1557			  int compat_mode)
1558{
1559	int err = 0;
1560	struct gnet_dump d;
1561
1562	if (p == NULL)
1563		goto errout;
1564
 
 
 
1565	/* compat_mode being true specifies a call that is supposed
1566	 * to add additional backward compatibility statistic TLVs.
1567	 */
1568	if (compat_mode) {
1569		if (p->type == TCA_OLD_COMPAT)
1570			err = gnet_stats_start_copy_compat(skb, 0,
1571							   TCA_STATS,
1572							   TCA_XSTATS,
1573							   &p->tcfa_lock, &d,
1574							   TCA_PAD);
1575		else
1576			return 0;
1577	} else
1578		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1579					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1580
1581	if (err < 0)
1582		goto errout;
1583
1584	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1585				  &p->tcfa_bstats, false) < 0 ||
1586	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1587				     &p->tcfa_bstats_hw, false) < 0 ||
1588	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1589	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1590				  &p->tcfa_qstats,
1591				  p->tcfa_qstats.qlen) < 0)
1592		goto errout;
1593
1594	if (gnet_stats_finish_copy(&d) < 0)
1595		goto errout;
1596
1597	return 0;
1598
1599errout:
1600	return -1;
1601}
1602
1603static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1604			u32 portid, u32 seq, u16 flags, int event, int bind,
1605			int ref, struct netlink_ext_ack *extack)
1606{
1607	struct tcamsg *t;
1608	struct nlmsghdr *nlh;
1609	unsigned char *b = skb_tail_pointer(skb);
1610	struct nlattr *nest;
1611
1612	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1613	if (!nlh)
1614		goto out_nlmsg_trim;
1615	t = nlmsg_data(nlh);
1616	t->tca_family = AF_UNSPEC;
1617	t->tca__pad1 = 0;
1618	t->tca__pad2 = 0;
1619
1620	if (extack && extack->_msg &&
1621	    nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
1622		goto out_nlmsg_trim;
1623
1624	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1625	if (!nest)
1626		goto out_nlmsg_trim;
1627
1628	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1629		goto out_nlmsg_trim;
1630
1631	nla_nest_end(skb, nest);
1632
1633	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1634
1635	return skb->len;
1636
1637out_nlmsg_trim:
1638	nlmsg_trim(skb, b);
1639	return -1;
1640}
1641
1642static int
1643tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1644	       struct tc_action *actions[], int event,
1645	       struct netlink_ext_ack *extack)
1646{
1647	struct sk_buff *skb;
1648
1649	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1650	if (!skb)
1651		return -ENOBUFS;
1652	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1653			 0, 1, NULL) <= 0) {
1654		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1655		kfree_skb(skb);
1656		return -EINVAL;
1657	}
1658
1659	return rtnl_unicast(skb, net, portid);
1660}
1661
1662static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1663					  struct nlmsghdr *n, u32 portid,
1664					  struct netlink_ext_ack *extack)
1665{
1666	struct nlattr *tb[TCA_ACT_MAX + 1];
1667	const struct tc_action_ops *ops;
1668	struct tc_action *a;
1669	int index;
1670	int err;
1671
1672	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1673					  tcf_action_policy, extack);
1674	if (err < 0)
1675		goto err_out;
1676
1677	err = -EINVAL;
1678	if (tb[TCA_ACT_INDEX] == NULL ||
1679	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1680		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1681		goto err_out;
1682	}
1683	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1684
1685	err = -EINVAL;
1686	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1687	if (!ops) { /* could happen in batch of actions */
1688		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1689		goto err_out;
1690	}
1691	err = -ENOENT;
1692	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1693		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1694		goto err_mod;
1695	}
1696
1697	module_put(ops->owner);
1698	return a;
1699
1700err_mod:
1701	module_put(ops->owner);
1702err_out:
1703	return ERR_PTR(err);
1704}
1705
1706static int tca_action_flush(struct net *net, struct nlattr *nla,
1707			    struct nlmsghdr *n, u32 portid,
1708			    struct netlink_ext_ack *extack)
1709{
1710	struct sk_buff *skb;
1711	unsigned char *b;
1712	struct nlmsghdr *nlh;
1713	struct tcamsg *t;
1714	struct netlink_callback dcb;
1715	struct nlattr *nest;
1716	struct nlattr *tb[TCA_ACT_MAX + 1];
1717	const struct tc_action_ops *ops;
1718	struct nlattr *kind;
1719	int err = -ENOMEM;
1720
1721	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1722	if (!skb)
1723		return err;
1724
1725	b = skb_tail_pointer(skb);
1726
1727	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1728					  tcf_action_policy, extack);
1729	if (err < 0)
1730		goto err_out;
1731
1732	err = -EINVAL;
1733	kind = tb[TCA_ACT_KIND];
1734	ops = tc_lookup_action(kind);
1735	if (!ops) { /*some idjot trying to flush unknown action */
1736		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1737		goto err_out;
1738	}
1739
1740	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1741			sizeof(*t), 0);
1742	if (!nlh) {
1743		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1744		goto out_module_put;
1745	}
1746	t = nlmsg_data(nlh);
1747	t->tca_family = AF_UNSPEC;
1748	t->tca__pad1 = 0;
1749	t->tca__pad2 = 0;
1750
1751	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1752	if (!nest) {
1753		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1754		goto out_module_put;
1755	}
1756
1757	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1758	if (err <= 0) {
1759		nla_nest_cancel(skb, nest);
1760		goto out_module_put;
1761	}
1762
1763	nla_nest_end(skb, nest);
1764
1765	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1766	nlh->nlmsg_flags |= NLM_F_ROOT;
1767	module_put(ops->owner);
1768	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1769			     n->nlmsg_flags & NLM_F_ECHO);
1770	if (err < 0)
1771		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1772
1773	return err;
1774
1775out_module_put:
1776	module_put(ops->owner);
1777err_out:
1778	kfree_skb(skb);
1779	return err;
1780}
1781
1782static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1783{
1784	struct tc_action *a;
1785	int i;
1786
1787	tcf_act_for_each_action(i, a, actions) {
 
1788		const struct tc_action_ops *ops = a->ops;
1789		/* Actions can be deleted concurrently so we must save their
1790		 * type and id to search again after reference is released.
1791		 */
1792		struct tcf_idrinfo *idrinfo = a->idrinfo;
1793		u32 act_index = a->tcfa_index;
1794
1795		actions[i] = NULL;
1796		if (tcf_action_put(a)) {
1797			/* last reference, action was deleted concurrently */
1798			module_put(ops->owner);
1799		} else {
1800			int ret;
1801
1802			/* now do the delete */
1803			ret = tcf_idr_delete_index(idrinfo, act_index);
1804			if (ret < 0)
1805				return ret;
1806		}
1807	}
1808	return 0;
1809}
1810
1811static struct sk_buff *tcf_reoffload_del_notify_msg(struct net *net,
1812						    struct tc_action *action)
1813{
1814	size_t attr_size = tcf_action_fill_size(action);
1815	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1816		[0] = action,
1817	};
 
1818	struct sk_buff *skb;
 
1819
1820	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
 
1821	if (!skb)
1822		return ERR_PTR(-ENOBUFS);
1823
1824	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
1825		kfree_skb(skb);
1826		return ERR_PTR(-EINVAL);
1827	}
1828
1829	return skb;
1830}
1831
1832static int tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1833{
1834	const struct tc_action_ops *ops = action->ops;
1835	struct sk_buff *skb;
1836	int ret;
1837
1838	if (!rtnl_notify_needed(net, 0, RTNLGRP_TC)) {
1839		skb = NULL;
1840	} else {
1841		skb = tcf_reoffload_del_notify_msg(net, action);
1842		if (IS_ERR(skb))
1843			return PTR_ERR(skb);
1844	}
1845
1846	ret = tcf_idr_release_unsafe(action);
1847	if (ret == ACT_P_DELETED) {
1848		module_put(ops->owner);
1849		ret = rtnetlink_maybe_send(skb, net, 0, RTNLGRP_TC, 0);
1850	} else {
1851		kfree_skb(skb);
1852	}
1853
1854	return ret;
1855}
1856
1857int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1858			    void *cb_priv, bool add)
1859{
1860	struct tc_act_pernet_id *id_ptr;
1861	struct tcf_idrinfo *idrinfo;
1862	struct tc_action_net *tn;
1863	struct tc_action *p;
1864	unsigned int act_id;
1865	unsigned long tmp;
1866	unsigned long id;
1867	struct idr *idr;
1868	struct net *net;
1869	int ret;
1870
1871	if (!cb)
1872		return -EINVAL;
1873
1874	down_read(&net_rwsem);
1875	mutex_lock(&act_id_mutex);
1876
1877	for_each_net(net) {
1878		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1879			act_id = id_ptr->id;
1880			tn = net_generic(net, act_id);
1881			if (!tn)
1882				continue;
1883			idrinfo = tn->idrinfo;
1884			if (!idrinfo)
1885				continue;
1886
1887			mutex_lock(&idrinfo->lock);
1888			idr = &idrinfo->action_idr;
1889			idr_for_each_entry_ul(idr, p, tmp, id) {
1890				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1891					continue;
1892				if (add) {
1893					tcf_action_offload_add_ex(p, NULL, cb,
1894								  cb_priv);
1895					continue;
1896				}
1897
1898				/* cb unregister to update hw count */
1899				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1900				if (ret < 0)
1901					continue;
1902				if (tc_act_skip_sw(p->tcfa_flags) &&
1903				    !tc_act_in_hw(p))
1904					tcf_reoffload_del_notify(net, p);
1905			}
1906			mutex_unlock(&idrinfo->lock);
1907		}
1908	}
1909	mutex_unlock(&act_id_mutex);
1910	up_read(&net_rwsem);
1911
1912	return 0;
1913}
1914
1915static struct sk_buff *tcf_del_notify_msg(struct net *net, struct nlmsghdr *n,
1916					  struct tc_action *actions[],
1917					  u32 portid, size_t attr_size,
1918					  struct netlink_ext_ack *extack)
1919{
 
1920	struct sk_buff *skb;
1921
1922	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
 
1923	if (!skb)
1924		return ERR_PTR(-ENOBUFS);
1925
1926	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1927			 0, 2, extack) <= 0) {
1928		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1929		kfree_skb(skb);
1930		return ERR_PTR(-EINVAL);
1931	}
1932
1933	return skb;
1934}
1935
1936static int tcf_del_notify(struct net *net, struct nlmsghdr *n,
1937			  struct tc_action *actions[], u32 portid,
1938			  size_t attr_size, struct netlink_ext_ack *extack)
1939{
1940	struct sk_buff *skb;
1941	int ret;
1942
1943	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
1944		skb = NULL;
1945	} else {
1946		skb = tcf_del_notify_msg(net, n, actions, portid, attr_size,
1947					 extack);
1948		if (IS_ERR(skb))
1949			return PTR_ERR(skb);
1950	}
1951
1952	/* now do the delete */
1953	ret = tcf_action_delete(net, actions);
1954	if (ret < 0) {
1955		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1956		kfree_skb(skb);
1957		return ret;
1958	}
1959
1960	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
1961				    n->nlmsg_flags & NLM_F_ECHO);
 
1962}
1963
1964static int
1965tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1966	      u32 portid, int event, struct netlink_ext_ack *extack)
1967{
1968	int i, ret;
1969	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1970	struct tc_action *act;
1971	size_t attr_size = 0;
1972	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1973
1974	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1975					  extack);
1976	if (ret < 0)
1977		return ret;
1978
1979	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1980		if (tb[1])
1981			return tca_action_flush(net, tb[1], n, portid, extack);
1982
1983		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1984		return -EINVAL;
1985	}
1986
1987	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1988		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1989		if (IS_ERR(act)) {
1990			ret = PTR_ERR(act);
1991			goto err;
1992		}
1993		attr_size += tcf_action_fill_size(act);
1994		actions[i - 1] = act;
1995	}
1996
1997	attr_size = tcf_action_full_attrs_size(attr_size);
1998
1999	if (event == RTM_GETACTION)
2000		ret = tcf_get_notify(net, portid, n, actions, event, extack);
2001	else { /* delete */
2002		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
2003		if (ret)
2004			goto err;
2005		return 0;
2006	}
2007err:
2008	tcf_action_put_many(actions);
2009	return ret;
2010}
2011
2012static struct sk_buff *tcf_add_notify_msg(struct net *net, struct nlmsghdr *n,
2013					  struct tc_action *actions[],
2014					  u32 portid, size_t attr_size,
2015					  struct netlink_ext_ack *extack)
2016{
2017	struct sk_buff *skb;
2018
2019	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
 
2020	if (!skb)
2021		return ERR_PTR(-ENOBUFS);
2022
2023	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
2024			 RTM_NEWACTION, 0, 0, extack) <= 0) {
2025		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
2026		kfree_skb(skb);
2027		return ERR_PTR(-EINVAL);
2028	}
2029
2030	return skb;
2031}
2032
2033static int tcf_add_notify(struct net *net, struct nlmsghdr *n,
2034			  struct tc_action *actions[], u32 portid,
2035			  size_t attr_size, struct netlink_ext_ack *extack)
2036{
2037	struct sk_buff *skb;
2038
2039	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2040		skb = NULL;
2041	} else {
2042		skb = tcf_add_notify_msg(net, n, actions, portid, attr_size,
2043					 extack);
2044		if (IS_ERR(skb))
2045			return PTR_ERR(skb);
2046	}
2047
2048	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2049				    n->nlmsg_flags & NLM_F_ECHO);
2050}
2051
2052static int tcf_action_add(struct net *net, struct nlattr *nla,
2053			  struct nlmsghdr *n, u32 portid, u32 flags,
2054			  struct netlink_ext_ack *extack)
2055{
2056	size_t attr_size = 0;
2057	int loop, ret;
2058	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2059	int init_res[TCA_ACT_MAX_PRIO] = {};
2060
2061	for (loop = 0; loop < 10; loop++) {
2062		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
2063				      &attr_size, flags, 0, extack);
2064		if (ret != -EAGAIN)
2065			break;
2066	}
2067
2068	if (ret < 0)
2069		return ret;
2070
2071	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2072
2073	/* only put bound actions */
2074	tca_put_bound_many(actions, init_res);
 
 
 
2075
2076	return ret;
2077}
2078
2079static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2080	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2081						 TCA_ACT_FLAG_TERSE_DUMP),
2082	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2083};
2084
2085static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2086			 struct netlink_ext_ack *extack)
2087{
2088	struct net *net = sock_net(skb->sk);
2089	struct nlattr *tca[TCA_ROOT_MAX + 1];
2090	u32 portid = NETLINK_CB(skb).portid;
2091	u32 flags = 0;
2092	int ret = 0;
2093
2094	if ((n->nlmsg_type != RTM_GETACTION) &&
2095	    !netlink_capable(skb, CAP_NET_ADMIN))
2096		return -EPERM;
2097
2098	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2099				     TCA_ROOT_MAX, NULL, extack);
2100	if (ret < 0)
2101		return ret;
2102
2103	if (tca[TCA_ACT_TAB] == NULL) {
2104		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2105		return -EINVAL;
2106	}
2107
2108	/* n->nlmsg_flags & NLM_F_CREATE */
2109	switch (n->nlmsg_type) {
2110	case RTM_NEWACTION:
2111		/* we are going to assume all other flags
2112		 * imply create only if it doesn't exist
2113		 * Note that CREATE | EXCL implies that
2114		 * but since we want avoid ambiguity (eg when flags
2115		 * is zero) then just set this
2116		 */
2117		if (n->nlmsg_flags & NLM_F_REPLACE)
2118			flags = TCA_ACT_FLAGS_REPLACE;
2119		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2120				     extack);
2121		break;
2122	case RTM_DELACTION:
2123		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2124				    portid, RTM_DELACTION, extack);
2125		break;
2126	case RTM_GETACTION:
2127		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2128				    portid, RTM_GETACTION, extack);
2129		break;
2130	default:
2131		BUG();
2132	}
2133
2134	return ret;
2135}
2136
2137static struct nlattr *find_dump_kind(struct nlattr **nla)
2138{
2139	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2140	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2141	struct nlattr *kind;
2142
2143	tb1 = nla[TCA_ACT_TAB];
2144	if (tb1 == NULL)
2145		return NULL;
2146
2147	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2148		return NULL;
2149
2150	if (tb[1] == NULL)
2151		return NULL;
2152	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2153		return NULL;
2154	kind = tb2[TCA_ACT_KIND];
2155
2156	return kind;
2157}
2158
2159static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2160{
2161	struct net *net = sock_net(skb->sk);
2162	struct nlmsghdr *nlh;
2163	unsigned char *b = skb_tail_pointer(skb);
2164	struct nlattr *nest;
2165	struct tc_action_ops *a_o;
2166	int ret = 0;
2167	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2168	struct nlattr *tb[TCA_ROOT_MAX + 1];
2169	struct nlattr *count_attr = NULL;
2170	unsigned long jiffy_since = 0;
2171	struct nlattr *kind = NULL;
2172	struct nla_bitfield32 bf;
2173	u32 msecs_since = 0;
2174	u32 act_count = 0;
2175
2176	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2177				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2178	if (ret < 0)
2179		return ret;
2180
2181	kind = find_dump_kind(tb);
2182	if (kind == NULL) {
2183		pr_info("tc_dump_action: action bad kind\n");
2184		return 0;
2185	}
2186
2187	a_o = tc_lookup_action(kind);
2188	if (a_o == NULL)
2189		return 0;
2190
2191	cb->args[2] = 0;
2192	if (tb[TCA_ROOT_FLAGS]) {
2193		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2194		cb->args[2] = bf.value;
2195	}
2196
2197	if (tb[TCA_ROOT_TIME_DELTA]) {
2198		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2199	}
2200
2201	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2202			cb->nlh->nlmsg_type, sizeof(*t), 0);
2203	if (!nlh)
2204		goto out_module_put;
2205
2206	if (msecs_since)
2207		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2208
2209	t = nlmsg_data(nlh);
2210	t->tca_family = AF_UNSPEC;
2211	t->tca__pad1 = 0;
2212	t->tca__pad2 = 0;
2213	cb->args[3] = jiffy_since;
2214	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2215	if (!count_attr)
2216		goto out_module_put;
2217
2218	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2219	if (nest == NULL)
2220		goto out_module_put;
2221
2222	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2223	if (ret < 0)
2224		goto out_module_put;
2225
2226	if (ret > 0) {
2227		nla_nest_end(skb, nest);
2228		ret = skb->len;
2229		act_count = cb->args[1];
2230		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2231		cb->args[1] = 0;
2232	} else
2233		nlmsg_trim(skb, b);
2234
2235	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2236	if (NETLINK_CB(cb->skb).portid && ret)
2237		nlh->nlmsg_flags |= NLM_F_MULTI;
2238	module_put(a_o->owner);
2239	return skb->len;
2240
2241out_module_put:
2242	module_put(a_o->owner);
2243	nlmsg_trim(skb, b);
2244	return skb->len;
2245}
2246
2247static int __init tc_action_init(void)
2248{
2249	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2250	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2251	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2252		      0);
2253
2254	return 0;
2255}
2256
2257subsys_initcall(tc_action_init);