Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/bits.h>
   4#include <linux/bitfield.h>
   5#include <linux/idr.h>
   6#include <linux/kernel.h>
   7#include <linux/netdevice.h>
   8#include <linux/netlink.h>
   9#include <linux/skbuff.h>
  10#include <linux/xarray.h>
  11#include <net/devlink.h>
  12#include <net/net_shaper.h>
  13
  14#include "shaper_nl_gen.h"
  15
  16#include "../core/dev.h"
  17
  18#define NET_SHAPER_SCOPE_SHIFT	26
  19#define NET_SHAPER_ID_MASK	GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
  20#define NET_SHAPER_SCOPE_MASK	GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
  21
  22#define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
  23
  24struct net_shaper_hierarchy {
  25	struct xarray shapers;
  26};
  27
  28struct net_shaper_nl_ctx {
  29	struct net_shaper_binding binding;
  30	netdevice_tracker dev_tracker;
  31	unsigned long start_index;
  32};
  33
  34static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
  35{
  36	return &((struct net_shaper_nl_ctx *)ctx)->binding;
  37}
  38
  39static void net_shaper_lock(struct net_shaper_binding *binding)
  40{
  41	switch (binding->type) {
  42	case NET_SHAPER_BINDING_TYPE_NETDEV:
  43		mutex_lock(&binding->netdev->lock);
  44		break;
  45	}
  46}
  47
  48static void net_shaper_unlock(struct net_shaper_binding *binding)
  49{
  50	switch (binding->type) {
  51	case NET_SHAPER_BINDING_TYPE_NETDEV:
  52		mutex_unlock(&binding->netdev->lock);
  53		break;
  54	}
  55}
  56
  57static struct net_shaper_hierarchy *
  58net_shaper_hierarchy(struct net_shaper_binding *binding)
  59{
  60	/* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
  61	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
  62		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
  63
  64	/* No other type supported yet. */
  65	return NULL;
  66}
  67
  68static const struct net_shaper_ops *
  69net_shaper_ops(struct net_shaper_binding *binding)
  70{
  71	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
  72		return binding->netdev->netdev_ops->net_shaper_ops;
  73
  74	/* No other type supported yet. */
  75	return NULL;
  76}
  77
  78/* Count the number of [multi] attributes of the given type. */
  79static int net_shaper_list_len(struct genl_info *info, int type)
  80{
  81	struct nlattr *attr;
  82	int rem, cnt = 0;
  83
  84	nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
  85			       genlmsg_len(info->genlhdr), rem)
  86		cnt++;
  87	return cnt;
  88}
  89
  90static int net_shaper_handle_size(void)
  91{
  92	return nla_total_size(nla_total_size(sizeof(u32)) +
  93			      nla_total_size(sizeof(u32)));
  94}
  95
  96static int net_shaper_fill_binding(struct sk_buff *msg,
  97				   const struct net_shaper_binding *binding,
  98				   u32 type)
  99{
 100	/* Should never happen, as currently only NETDEV is supported. */
 101	if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
 102		return -EINVAL;
 103
 104	if (nla_put_u32(msg, type, binding->netdev->ifindex))
 105		return -EMSGSIZE;
 106
 107	return 0;
 108}
 109
 110static int net_shaper_fill_handle(struct sk_buff *msg,
 111				  const struct net_shaper_handle *handle,
 112				  u32 type)
 113{
 114	struct nlattr *handle_attr;
 115
 116	if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
 117		return 0;
 118
 119	handle_attr = nla_nest_start(msg, type);
 120	if (!handle_attr)
 121		return -EMSGSIZE;
 122
 123	if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
 124	    (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
 125	     nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
 126		goto handle_nest_cancel;
 127
 128	nla_nest_end(msg, handle_attr);
 129	return 0;
 130
 131handle_nest_cancel:
 132	nla_nest_cancel(msg, handle_attr);
 133	return -EMSGSIZE;
 134}
 135
 136static int
 137net_shaper_fill_one(struct sk_buff *msg,
 138		    const struct net_shaper_binding *binding,
 139		    const struct net_shaper *shaper,
 140		    const struct genl_info *info)
 141{
 142	void *hdr;
 143
 144	hdr = genlmsg_iput(msg, info);
 145	if (!hdr)
 146		return -EMSGSIZE;
 147
 148	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
 149	    net_shaper_fill_handle(msg, &shaper->parent,
 150				   NET_SHAPER_A_PARENT) ||
 151	    net_shaper_fill_handle(msg, &shaper->handle,
 152				   NET_SHAPER_A_HANDLE) ||
 153	    ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
 154	     nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
 155	    (shaper->bw_min &&
 156	     nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
 157	    (shaper->bw_max &&
 158	     nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
 159	    (shaper->burst &&
 160	     nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
 161	    (shaper->priority &&
 162	     nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
 163	    (shaper->weight &&
 164	     nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
 165		goto nla_put_failure;
 166
 167	genlmsg_end(msg, hdr);
 168
 169	return 0;
 170
 171nla_put_failure:
 172	genlmsg_cancel(msg, hdr);
 173	return -EMSGSIZE;
 174}
 175
 176/* Initialize the context fetching the relevant device and
 177 * acquiring a reference to it.
 178 */
 179static int net_shaper_ctx_setup(const struct genl_info *info, int type,
 180				struct net_shaper_nl_ctx *ctx)
 181{
 182	struct net *ns = genl_info_net(info);
 183	struct net_device *dev;
 184	int ifindex;
 185
 186	if (GENL_REQ_ATTR_CHECK(info, type))
 187		return -EINVAL;
 188
 189	ifindex = nla_get_u32(info->attrs[type]);
 190	dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
 191	if (!dev) {
 192		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
 193		return -ENOENT;
 194	}
 195
 196	if (!dev->netdev_ops->net_shaper_ops) {
 197		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
 198		netdev_put(dev, &ctx->dev_tracker);
 199		return -EOPNOTSUPP;
 200	}
 201
 202	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
 203	ctx->binding.netdev = dev;
 204	return 0;
 205}
 206
 207static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
 208{
 209	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
 210		netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
 211}
 212
 213static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
 214{
 215	return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
 216		FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
 217}
 218
 219static void net_shaper_index_to_handle(u32 index,
 220				       struct net_shaper_handle *handle)
 221{
 222	handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
 223	handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
 224}
 225
 226static void net_shaper_default_parent(const struct net_shaper_handle *handle,
 227				      struct net_shaper_handle *parent)
 228{
 229	switch (handle->scope) {
 230	case NET_SHAPER_SCOPE_UNSPEC:
 231	case NET_SHAPER_SCOPE_NETDEV:
 232	case __NET_SHAPER_SCOPE_MAX:
 233		parent->scope = NET_SHAPER_SCOPE_UNSPEC;
 234		break;
 235
 236	case NET_SHAPER_SCOPE_QUEUE:
 237	case NET_SHAPER_SCOPE_NODE:
 238		parent->scope = NET_SHAPER_SCOPE_NETDEV;
 239		break;
 240	}
 241	parent->id = 0;
 242}
 243
 244/*
 245 * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
 246 * it's cleared by xa_store().
 247 */
 248#define NET_SHAPER_NOT_VALID XA_MARK_1
 249
 250static struct net_shaper *
 251net_shaper_lookup(struct net_shaper_binding *binding,
 252		  const struct net_shaper_handle *handle)
 253{
 254	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 255	u32 index = net_shaper_handle_to_index(handle);
 256
 257	if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
 258				      NET_SHAPER_NOT_VALID))
 259		return NULL;
 260
 261	return xa_load(&hierarchy->shapers, index);
 262}
 263
 264/* Allocate on demand the per device shaper's hierarchy container.
 265 * Called under the net shaper lock
 266 */
 267static struct net_shaper_hierarchy *
 268net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
 269{
 270	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 271
 272	if (hierarchy)
 273		return hierarchy;
 274
 275	hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL);
 276	if (!hierarchy)
 277		return NULL;
 278
 279	/* The flag is required for ID allocation */
 280	xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
 281
 282	switch (binding->type) {
 283	case NET_SHAPER_BINDING_TYPE_NETDEV:
 284		/* Pairs with READ_ONCE in net_shaper_hierarchy. */
 285		WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
 286		break;
 287	}
 288	return hierarchy;
 289}
 290
 291/* Prepare the hierarchy container to actually insert the given shaper, doing
 292 * in advance the needed allocations.
 293 */
 294static int net_shaper_pre_insert(struct net_shaper_binding *binding,
 295				 struct net_shaper_handle *handle,
 296				 struct netlink_ext_ack *extack)
 297{
 298	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 299	struct net_shaper *prev, *cur;
 300	bool id_allocated = false;
 301	int ret, index;
 302
 303	if (!hierarchy)
 304		return -ENOMEM;
 305
 306	index = net_shaper_handle_to_index(handle);
 307	cur = xa_load(&hierarchy->shapers, index);
 308	if (cur)
 309		return 0;
 310
 311	/* Allocated a new id, if needed. */
 312	if (handle->scope == NET_SHAPER_SCOPE_NODE &&
 313	    handle->id == NET_SHAPER_ID_UNSPEC) {
 314		u32 min, max;
 315
 316		handle->id = NET_SHAPER_ID_MASK - 1;
 317		max = net_shaper_handle_to_index(handle);
 318		handle->id = 0;
 319		min = net_shaper_handle_to_index(handle);
 320
 321		ret = xa_alloc(&hierarchy->shapers, &index, NULL,
 322			       XA_LIMIT(min, max), GFP_KERNEL);
 323		if (ret < 0) {
 324			NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
 325			return ret;
 326		}
 327
 328		net_shaper_index_to_handle(index, handle);
 329		id_allocated = true;
 330	}
 331
 332	cur = kzalloc(sizeof(*cur), GFP_KERNEL);
 333	if (!cur) {
 334		ret = -ENOMEM;
 335		goto free_id;
 336	}
 337
 338	/* Mark 'tentative' shaper inside the hierarchy container.
 339	 * xa_set_mark is a no-op if the previous store fails.
 340	 */
 341	xa_lock(&hierarchy->shapers);
 342	prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
 343	__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
 344	xa_unlock(&hierarchy->shapers);
 345	if (xa_err(prev)) {
 346		NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
 347		kfree_rcu(cur, rcu);
 348		ret = xa_err(prev);
 349		goto free_id;
 350	}
 351	return 0;
 352
 353free_id:
 354	if (id_allocated)
 355		xa_erase(&hierarchy->shapers, index);
 356	return ret;
 357}
 358
 359/* Commit the tentative insert with the actual values.
 360 * Must be called only after a successful net_shaper_pre_insert().
 361 */
 362static void net_shaper_commit(struct net_shaper_binding *binding,
 363			      int nr_shapers, const struct net_shaper *shapers)
 364{
 365	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 366	struct net_shaper *cur;
 367	int index;
 368	int i;
 369
 370	xa_lock(&hierarchy->shapers);
 371	for (i = 0; i < nr_shapers; ++i) {
 372		index = net_shaper_handle_to_index(&shapers[i].handle);
 373
 374		cur = xa_load(&hierarchy->shapers, index);
 375		if (WARN_ON_ONCE(!cur))
 376			continue;
 377
 378		/* Successful update: drop the tentative mark
 379		 * and update the hierarchy container.
 380		 */
 381		__xa_clear_mark(&hierarchy->shapers, index,
 382				NET_SHAPER_NOT_VALID);
 383		*cur = shapers[i];
 384	}
 385	xa_unlock(&hierarchy->shapers);
 386}
 387
 388/* Rollback all the tentative inserts from the hierarchy. */
 389static void net_shaper_rollback(struct net_shaper_binding *binding)
 390{
 391	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 392	struct net_shaper *cur;
 393	unsigned long index;
 394
 395	if (!hierarchy)
 396		return;
 397
 398	xa_lock(&hierarchy->shapers);
 399	xa_for_each_marked(&hierarchy->shapers, index, cur,
 400			   NET_SHAPER_NOT_VALID) {
 401		__xa_erase(&hierarchy->shapers, index);
 402		kfree(cur);
 403	}
 404	xa_unlock(&hierarchy->shapers);
 405}
 406
 407static int net_shaper_parse_handle(const struct nlattr *attr,
 408				   const struct genl_info *info,
 409				   struct net_shaper_handle *handle)
 410{
 411	struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
 412	struct nlattr *id_attr;
 413	u32 id = 0;
 414	int ret;
 415
 416	ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
 417			       net_shaper_handle_nl_policy, info->extack);
 418	if (ret < 0)
 419		return ret;
 420
 421	if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
 422			      NET_SHAPER_A_HANDLE_SCOPE))
 423		return -EINVAL;
 424
 425	handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
 426
 427	/* The default id for NODE scope shapers is an invalid one
 428	 * to help the 'group' operation discriminate between new
 429	 * NODE shaper creation (ID_UNSPEC) and reuse of existing
 430	 * shaper (any other value).
 431	 */
 432	id_attr = tb[NET_SHAPER_A_HANDLE_ID];
 433	if (id_attr)
 434		id = nla_get_u32(id_attr);
 435	else if (handle->scope == NET_SHAPER_SCOPE_NODE)
 436		id = NET_SHAPER_ID_UNSPEC;
 437
 438	handle->id = id;
 439	return 0;
 440}
 441
 442static int net_shaper_validate_caps(struct net_shaper_binding *binding,
 443				    struct nlattr **tb,
 444				    const struct genl_info *info,
 445				    struct net_shaper *shaper)
 446{
 447	const struct net_shaper_ops *ops = net_shaper_ops(binding);
 448	struct nlattr *bad = NULL;
 449	unsigned long caps = 0;
 450
 451	ops->capabilities(binding, shaper->handle.scope, &caps);
 452
 453	if (tb[NET_SHAPER_A_PRIORITY] &&
 454	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
 455		bad = tb[NET_SHAPER_A_PRIORITY];
 456	if (tb[NET_SHAPER_A_WEIGHT] &&
 457	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
 458		bad = tb[NET_SHAPER_A_WEIGHT];
 459	if (tb[NET_SHAPER_A_BW_MIN] &&
 460	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
 461		bad = tb[NET_SHAPER_A_BW_MIN];
 462	if (tb[NET_SHAPER_A_BW_MAX] &&
 463	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
 464		bad = tb[NET_SHAPER_A_BW_MAX];
 465	if (tb[NET_SHAPER_A_BURST] &&
 466	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
 467		bad = tb[NET_SHAPER_A_BURST];
 468
 469	if (!caps)
 470		bad = tb[NET_SHAPER_A_HANDLE];
 471
 472	if (bad) {
 473		NL_SET_BAD_ATTR(info->extack, bad);
 474		return -EOPNOTSUPP;
 475	}
 476
 477	if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
 478	    binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
 479	    shaper->handle.id >= binding->netdev->real_num_tx_queues) {
 480		NL_SET_ERR_MSG_FMT(info->extack,
 481				   "Not existing queue id %d max %d",
 482				   shaper->handle.id,
 483				   binding->netdev->real_num_tx_queues);
 484		return -ENOENT;
 485	}
 486
 487	/* The metric is really used only if there is *any* rate-related
 488	 * setting, either in current attributes set or in pre-existing
 489	 * values.
 490	 */
 491	if (shaper->burst || shaper->bw_min || shaper->bw_max) {
 492		u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
 493				 shaper->metric;
 494
 495		/* The metric test can fail even when the user did not
 496		 * specify the METRIC attribute. Pointing to rate related
 497		 * attribute will be confusing, as the attribute itself
 498		 * could be indeed supported, with a different metric.
 499		 * Be more specific.
 500		 */
 501		if (!(caps & BIT(metric_cap))) {
 502			NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
 503					   shaper->metric);
 504			return -EOPNOTSUPP;
 505		}
 506	}
 507	return 0;
 508}
 509
 510static int net_shaper_parse_info(struct net_shaper_binding *binding,
 511				 struct nlattr **tb,
 512				 const struct genl_info *info,
 513				 struct net_shaper *shaper,
 514				 bool *exists)
 515{
 516	struct net_shaper *old;
 517	int ret;
 518
 519	/* The shaper handle is the only mandatory attribute. */
 520	if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
 521		return -EINVAL;
 522
 523	ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
 524				      &shaper->handle);
 525	if (ret)
 526		return ret;
 527
 528	if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
 529		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
 530		return -EINVAL;
 531	}
 532
 533	/* Fetch existing hierarchy, if any, so that user provide info will
 534	 * incrementally update the existing shaper configuration.
 535	 */
 536	old = net_shaper_lookup(binding, &shaper->handle);
 537	if (old)
 538		*shaper = *old;
 539	*exists = !!old;
 540
 541	if (tb[NET_SHAPER_A_METRIC])
 542		shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
 543
 544	if (tb[NET_SHAPER_A_BW_MIN])
 545		shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
 546
 547	if (tb[NET_SHAPER_A_BW_MAX])
 548		shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
 549
 550	if (tb[NET_SHAPER_A_BURST])
 551		shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
 552
 553	if (tb[NET_SHAPER_A_PRIORITY])
 554		shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
 555
 556	if (tb[NET_SHAPER_A_WEIGHT])
 557		shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
 558
 559	ret = net_shaper_validate_caps(binding, tb, info, shaper);
 560	if (ret < 0)
 561		return ret;
 562
 563	return 0;
 564}
 565
 566static int net_shaper_validate_nesting(struct net_shaper_binding *binding,
 567				       const struct net_shaper *shaper,
 568				       struct netlink_ext_ack *extack)
 569{
 570	const struct net_shaper_ops *ops = net_shaper_ops(binding);
 571	unsigned long caps = 0;
 572
 573	ops->capabilities(binding, shaper->handle.scope, &caps);
 574	if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) {
 575		NL_SET_ERR_MSG_FMT(extack,
 576				   "Nesting not supported for scope %d",
 577				   shaper->handle.scope);
 578		return -EOPNOTSUPP;
 579	}
 580	return 0;
 581}
 582
 583/* Fetch the existing leaf and update it with the user-provided
 584 * attributes.
 585 */
 586static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
 587				 const struct nlattr *attr,
 588				 const struct genl_info *info,
 589				 const struct net_shaper *node,
 590				 struct net_shaper *shaper)
 591{
 592	struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
 593	bool exists;
 594	int ret;
 595
 596	ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
 597			       net_shaper_leaf_info_nl_policy, info->extack);
 598	if (ret < 0)
 599		return ret;
 600
 601	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
 602	if (ret < 0)
 603		return ret;
 604
 605	if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
 606		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
 607		return -EINVAL;
 608	}
 609
 610	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
 611		ret = net_shaper_validate_nesting(binding, shaper,
 612						  info->extack);
 613		if (ret < 0)
 614			return ret;
 615	}
 616
 617	if (!exists)
 618		net_shaper_default_parent(&shaper->handle, &shaper->parent);
 619	return 0;
 620}
 621
 622/* Alike net_parse_shaper_info(), but additionally allow the user specifying
 623 * the shaper's parent handle.
 624 */
 625static int net_shaper_parse_node(struct net_shaper_binding *binding,
 626				 struct nlattr **tb,
 627				 const struct genl_info *info,
 628				 struct net_shaper *shaper)
 629{
 630	bool exists;
 631	int ret;
 632
 633	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
 634	if (ret)
 635		return ret;
 636
 637	if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
 638	    shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
 639		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
 640		return -EINVAL;
 641	}
 642
 643	if (tb[NET_SHAPER_A_PARENT]) {
 644		ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
 645					      &shaper->parent);
 646		if (ret)
 647			return ret;
 648
 649		if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
 650		    shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
 651			NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
 652			return -EINVAL;
 653		}
 654	}
 655	return 0;
 656}
 657
 658static int net_shaper_generic_pre(struct genl_info *info, int type)
 659{
 660	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
 661
 662	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
 663
 664	return net_shaper_ctx_setup(info, type, ctx);
 665}
 666
 667int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
 668			   struct sk_buff *skb, struct genl_info *info)
 669{
 670	return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
 671}
 672
 673static void net_shaper_generic_post(struct genl_info *info)
 674{
 675	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
 676}
 677
 678void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
 679			     struct sk_buff *skb, struct genl_info *info)
 680{
 681	net_shaper_generic_post(info);
 682}
 683
 684int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
 685{
 686	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
 687	const struct genl_info *info = genl_info_dump(cb);
 688
 689	return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
 690}
 691
 692int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
 693{
 694	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
 695	return 0;
 696}
 697
 698int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
 699			       struct sk_buff *skb, struct genl_info *info)
 700{
 701	return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX);
 702}
 703
 704void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
 705				 struct sk_buff *skb, struct genl_info *info)
 706{
 707	net_shaper_generic_post(info);
 708}
 709
 710int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
 711{
 712	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
 713
 714	return net_shaper_ctx_setup(genl_info_dump(cb),
 715				    NET_SHAPER_A_CAPS_IFINDEX, ctx);
 716}
 717
 718int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
 719{
 720	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
 721
 722	net_shaper_ctx_cleanup(ctx);
 723	return 0;
 724}
 725
 726int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
 727{
 728	struct net_shaper_binding *binding;
 729	struct net_shaper_handle handle;
 730	struct net_shaper *shaper;
 731	struct sk_buff *msg;
 732	int ret;
 733
 734	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
 735		return -EINVAL;
 736
 737	binding = net_shaper_binding_from_ctx(info->ctx);
 738	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
 739				      &handle);
 740	if (ret < 0)
 741		return ret;
 742
 743	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 744	if (!msg)
 745		return -ENOMEM;
 746
 747	rcu_read_lock();
 748	shaper = net_shaper_lookup(binding, &handle);
 749	if (!shaper) {
 750		NL_SET_BAD_ATTR(info->extack,
 751				info->attrs[NET_SHAPER_A_HANDLE]);
 752		rcu_read_unlock();
 753		ret = -ENOENT;
 754		goto free_msg;
 755	}
 756
 757	ret = net_shaper_fill_one(msg, binding, shaper, info);
 758	rcu_read_unlock();
 759	if (ret)
 760		goto free_msg;
 761
 762	ret = genlmsg_reply(msg, info);
 763	if (ret)
 764		goto free_msg;
 765
 766	return 0;
 767
 768free_msg:
 769	nlmsg_free(msg);
 770	return ret;
 771}
 772
 773int net_shaper_nl_get_dumpit(struct sk_buff *skb,
 774			     struct netlink_callback *cb)
 775{
 776	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
 777	const struct genl_info *info = genl_info_dump(cb);
 778	struct net_shaper_hierarchy *hierarchy;
 779	struct net_shaper_binding *binding;
 780	struct net_shaper *shaper;
 781	int ret = 0;
 782
 783	/* Don't error out dumps performed before any set operation. */
 784	binding = net_shaper_binding_from_ctx(ctx);
 785	hierarchy = net_shaper_hierarchy(binding);
 786	if (!hierarchy)
 787		return 0;
 788
 789	rcu_read_lock();
 790	for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
 791				 U32_MAX, XA_PRESENT)); ctx->start_index++) {
 792		ret = net_shaper_fill_one(skb, binding, shaper, info);
 793		if (ret)
 794			break;
 795	}
 796	rcu_read_unlock();
 797
 798	return ret;
 799}
 800
 801int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
 802{
 803	struct net_shaper_hierarchy *hierarchy;
 804	struct net_shaper_binding *binding;
 805	const struct net_shaper_ops *ops;
 806	struct net_shaper_handle handle;
 807	struct net_shaper shaper = {};
 808	bool exists;
 809	int ret;
 810
 811	binding = net_shaper_binding_from_ctx(info->ctx);
 812
 813	net_shaper_lock(binding);
 814	ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
 815				    &exists);
 816	if (ret)
 817		goto unlock;
 818
 819	if (!exists)
 820		net_shaper_default_parent(&shaper.handle, &shaper.parent);
 821
 822	hierarchy = net_shaper_hierarchy_setup(binding);
 823	if (!hierarchy) {
 824		ret = -ENOMEM;
 825		goto unlock;
 826	}
 827
 828	/* The 'set' operation can't create node-scope shapers. */
 829	handle = shaper.handle;
 830	if (handle.scope == NET_SHAPER_SCOPE_NODE &&
 831	    !net_shaper_lookup(binding, &handle)) {
 832		ret = -ENOENT;
 833		goto unlock;
 834	}
 835
 836	ret = net_shaper_pre_insert(binding, &handle, info->extack);
 837	if (ret)
 838		goto unlock;
 839
 840	ops = net_shaper_ops(binding);
 841	ret = ops->set(binding, &shaper, info->extack);
 842	if (ret) {
 843		net_shaper_rollback(binding);
 844		goto unlock;
 845	}
 846
 847	net_shaper_commit(binding, 1, &shaper);
 848
 849unlock:
 850	net_shaper_unlock(binding);
 851	return ret;
 852}
 853
 854static int __net_shaper_delete(struct net_shaper_binding *binding,
 855			       struct net_shaper *shaper,
 856			       struct netlink_ext_ack *extack)
 857{
 858	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
 859	struct net_shaper_handle parent_handle, handle = shaper->handle;
 860	const struct net_shaper_ops *ops = net_shaper_ops(binding);
 861	int ret;
 862
 863again:
 864	parent_handle = shaper->parent;
 865
 866	ret = ops->delete(binding, &handle, extack);
 867	if (ret < 0)
 868		return ret;
 869
 870	xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
 871	kfree_rcu(shaper, rcu);
 872
 873	/* Eventually delete the parent, if it is left over with no leaves. */
 874	if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
 875		shaper = net_shaper_lookup(binding, &parent_handle);
 876		if (shaper && !--shaper->leaves) {
 877			handle = parent_handle;
 878			goto again;
 879		}
 880	}
 881	return 0;
 882}
 883
 884static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
 885				 const struct net_shaper_handle *b)
 886{
 887	/* Must avoid holes in struct net_shaper_handle. */
 888	BUILD_BUG_ON(sizeof(*a) != 8);
 889
 890	return memcmp(a, b, sizeof(*a));
 891}
 892
 893static int net_shaper_parent_from_leaves(int leaves_count,
 894					 const struct net_shaper *leaves,
 895					 struct net_shaper *node,
 896					 struct netlink_ext_ack *extack)
 897{
 898	struct net_shaper_handle parent = leaves[0].parent;
 899	int i;
 900
 901	for (i = 1; i < leaves_count; ++i) {
 902		if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
 903			NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
 904			return -EINVAL;
 905		}
 906	}
 907
 908	node->parent = parent;
 909	return 0;
 910}
 911
 912static int __net_shaper_group(struct net_shaper_binding *binding,
 913			      bool update_node, int leaves_count,
 914			      struct net_shaper *leaves,
 915			      struct net_shaper *node,
 916			      struct netlink_ext_ack *extack)
 917{
 918	const struct net_shaper_ops *ops = net_shaper_ops(binding);
 919	struct net_shaper_handle leaf_handle;
 920	struct net_shaper *parent = NULL;
 921	bool new_node = false;
 922	int i, ret;
 923
 924	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
 925		new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
 926
 927		if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
 928			/* The related attribute is not available when
 929			 * reaching here from the delete() op.
 930			 */
 931			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
 932					   node->handle.scope, node->handle.id);
 933			return -ENOENT;
 934		}
 935
 936		/* When unspecified, the node parent scope is inherited from
 937		 * the leaves.
 938		 */
 939		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
 940			ret = net_shaper_parent_from_leaves(leaves_count,
 941							    leaves, node,
 942							    extack);
 943			if (ret)
 944				return ret;
 945		}
 946
 947	} else {
 948		net_shaper_default_parent(&node->handle, &node->parent);
 949	}
 950
 951	if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
 952		parent = net_shaper_lookup(binding, &node->parent);
 953		if (!parent) {
 954			NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
 955					   node->parent.scope, node->parent.id);
 956			return -ENOENT;
 957		}
 958
 959		ret = net_shaper_validate_nesting(binding, node, extack);
 960		if (ret < 0)
 961			return ret;
 962	}
 963
 964	if (update_node) {
 965		/* For newly created node scope shaper, the following will
 966		 * update the handle, due to id allocation.
 967		 */
 968		ret = net_shaper_pre_insert(binding, &node->handle, extack);
 969		if (ret)
 970			return ret;
 971	}
 972
 973	for (i = 0; i < leaves_count; ++i) {
 974		leaf_handle = leaves[i].handle;
 975
 976		ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
 977		if (ret)
 978			goto rollback;
 979
 980		if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
 981			continue;
 982
 983		/* The leaves shapers will be nested to the node, update the
 984		 * linking accordingly.
 985		 */
 986		leaves[i].parent = node->handle;
 987		node->leaves++;
 988	}
 989
 990	ret = ops->group(binding, leaves_count, leaves, node, extack);
 991	if (ret < 0)
 992		goto rollback;
 993
 994	/* The node's parent gains a new leaf only when the node itself
 995	 * is created by this group operation
 996	 */
 997	if (new_node && parent)
 998		parent->leaves++;
 999	if (update_node)
1000		net_shaper_commit(binding, 1, node);
1001	net_shaper_commit(binding, leaves_count, leaves);
1002	return 0;
1003
1004rollback:
1005	net_shaper_rollback(binding);
1006	return ret;
1007}
1008
1009static int net_shaper_pre_del_node(struct net_shaper_binding *binding,
1010				   const struct net_shaper *shaper,
1011				   struct netlink_ext_ack *extack)
1012{
1013	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1014	struct net_shaper *cur, *leaves, node = {};
1015	int ret, leaves_count = 0;
1016	unsigned long index;
1017	bool update_node;
1018
1019	if (!shaper->leaves)
1020		return 0;
1021
1022	/* Fetch the new node information. */
1023	node.handle = shaper->parent;
1024	cur = net_shaper_lookup(binding, &node.handle);
1025	if (cur) {
1026		node = *cur;
1027	} else {
1028		/* A scope NODE shaper can be nested only to the NETDEV scope
1029		 * shaper without creating the latter, this check may fail only
1030		 * if the data is in inconsistent status.
1031		 */
1032		if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
1033			return -EINVAL;
1034	}
1035
1036	leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper),
1037			 GFP_KERNEL);
1038	if (!leaves)
1039		return -ENOMEM;
1040
1041	/* Build the leaves arrays. */
1042	xa_for_each(&hierarchy->shapers, index, cur) {
1043		if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
1044			continue;
1045
1046		if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
1047			ret = -EINVAL;
1048			goto free;
1049		}
1050
1051		leaves[leaves_count++] = *cur;
1052	}
1053
1054	/* When re-linking to the netdev shaper, avoid the eventual, implicit,
1055	 * creation of the new node, would be surprising since the user is
1056	 * doing a delete operation.
1057	 */
1058	update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
1059	ret = __net_shaper_group(binding, update_node, leaves_count,
1060				 leaves, &node, extack);
1061
1062free:
1063	kfree(leaves);
1064	return ret;
1065}
1066
1067int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
1068{
1069	struct net_shaper_hierarchy *hierarchy;
1070	struct net_shaper_binding *binding;
1071	struct net_shaper_handle handle;
1072	struct net_shaper *shaper;
1073	int ret;
1074
1075	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
1076		return -EINVAL;
1077
1078	binding = net_shaper_binding_from_ctx(info->ctx);
1079
1080	net_shaper_lock(binding);
1081	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
1082				      &handle);
1083	if (ret)
1084		goto unlock;
1085
1086	hierarchy = net_shaper_hierarchy(binding);
1087	if (!hierarchy) {
1088		ret = -ENOENT;
1089		goto unlock;
1090	}
1091
1092	shaper = net_shaper_lookup(binding, &handle);
1093	if (!shaper) {
1094		ret = -ENOENT;
1095		goto unlock;
1096	}
1097
1098	if (handle.scope == NET_SHAPER_SCOPE_NODE) {
1099		ret = net_shaper_pre_del_node(binding, shaper, info->extack);
1100		if (ret)
1101			goto unlock;
1102	}
1103
1104	ret = __net_shaper_delete(binding, shaper, info->extack);
1105
1106unlock:
1107	net_shaper_unlock(binding);
1108	return ret;
1109}
1110
1111static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
1112				       const struct net_shaper_handle *handle,
1113				       struct genl_info *info,
1114				       struct sk_buff *msg)
1115{
1116	void *hdr;
1117
1118	hdr = genlmsg_iput(msg, info);
1119	if (!hdr)
1120		goto free_msg;
1121
1122	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
1123	    net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
1124		goto free_msg;
1125
1126	genlmsg_end(msg, hdr);
1127
1128	return genlmsg_reply(msg, info);
1129
1130free_msg:
1131	/* Should never happen as msg is pre-allocated with enough space. */
1132	WARN_ONCE(true, "calculated message payload length (%d)",
1133		  net_shaper_handle_size());
1134	nlmsg_free(msg);
1135	return -EMSGSIZE;
1136}
1137
1138int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
1139{
1140	struct net_shaper **old_nodes, *leaves, node = {};
1141	struct net_shaper_hierarchy *hierarchy;
1142	struct net_shaper_binding *binding;
1143	int i, ret, rem, leaves_count;
1144	int old_nodes_count = 0;
1145	struct sk_buff *msg;
1146	struct nlattr *attr;
1147
1148	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
1149		return -EINVAL;
1150
1151	binding = net_shaper_binding_from_ctx(info->ctx);
1152
1153	/* The group operation is optional. */
1154	if (!net_shaper_ops(binding)->group)
1155		return -EOPNOTSUPP;
1156
1157	net_shaper_lock(binding);
1158	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
1159	if (!leaves_count) {
1160		NL_SET_BAD_ATTR(info->extack,
1161				info->attrs[NET_SHAPER_A_LEAVES]);
1162		ret = -EINVAL;
1163		goto unlock;
1164	}
1165
1166	leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
1167			 sizeof(struct net_shaper *), GFP_KERNEL);
1168	if (!leaves) {
1169		ret = -ENOMEM;
1170		goto unlock;
1171	}
1172	old_nodes = (void *)&leaves[leaves_count];
1173
1174	ret = net_shaper_parse_node(binding, info->attrs, info, &node);
1175	if (ret)
1176		goto free_leaves;
1177
1178	i = 0;
1179	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
1180			       genlmsg_data(info->genlhdr),
1181			       genlmsg_len(info->genlhdr), rem) {
1182		if (WARN_ON_ONCE(i >= leaves_count))
1183			goto free_leaves;
1184
1185		ret = net_shaper_parse_leaf(binding, attr, info,
1186					    &node, &leaves[i]);
1187		if (ret)
1188			goto free_leaves;
1189		i++;
1190	}
1191
1192	/* Prepare the msg reply in advance, to avoid device operation
1193	 * rollback on allocation failure.
1194	 */
1195	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
1196	if (!msg)
1197		goto free_leaves;
1198
1199	hierarchy = net_shaper_hierarchy_setup(binding);
1200	if (!hierarchy) {
1201		ret = -ENOMEM;
1202		goto free_msg;
1203	}
1204
1205	/* Record the node shapers that this group() operation can make
1206	 * childless for later cleanup.
1207	 */
1208	for (i = 0; i < leaves_count; i++) {
1209		if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
1210		    net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
1211			struct net_shaper *tmp;
1212
1213			tmp = net_shaper_lookup(binding, &leaves[i].parent);
1214			if (!tmp)
1215				continue;
1216
1217			old_nodes[old_nodes_count++] = tmp;
1218		}
1219	}
1220
1221	ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
1222				 info->extack);
1223	if (ret)
1224		goto free_msg;
1225
1226	/* Check if we need to delete any node left alone by the new leaves
1227	 * linkage.
1228	 */
1229	for (i = 0; i < old_nodes_count; ++i) {
1230		struct net_shaper *tmp = old_nodes[i];
1231
1232		if (--tmp->leaves > 0)
1233			continue;
1234
1235		/* Errors here are not fatal: the grouping operation is
1236		 * completed, and user-space can still explicitly clean-up
1237		 * left-over nodes.
1238		 */
1239		__net_shaper_delete(binding, tmp, info->extack);
1240	}
1241
1242	ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
1243	if (ret)
1244		GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
1245
1246free_leaves:
1247	kfree(leaves);
1248
1249unlock:
1250	net_shaper_unlock(binding);
1251	return ret;
1252
1253free_msg:
1254	kfree_skb(msg);
1255	goto free_leaves;
1256}
1257
1258static int
1259net_shaper_cap_fill_one(struct sk_buff *msg,
1260			struct net_shaper_binding *binding,
1261			enum net_shaper_scope scope, unsigned long flags,
1262			const struct genl_info *info)
1263{
1264	unsigned long cur;
1265	void *hdr;
1266
1267	hdr = genlmsg_iput(msg, info);
1268	if (!hdr)
1269		return -EMSGSIZE;
1270
1271	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) ||
1272	    nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope))
1273		goto nla_put_failure;
1274
1275	for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
1276	     cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
1277		if (flags & BIT(cur) && nla_put_flag(msg, cur))
1278			goto nla_put_failure;
1279	}
1280
1281	genlmsg_end(msg, hdr);
1282
1283	return 0;
1284
1285nla_put_failure:
1286	genlmsg_cancel(msg, hdr);
1287	return -EMSGSIZE;
1288}
1289
1290int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info)
1291{
1292	struct net_shaper_binding *binding;
1293	const struct net_shaper_ops *ops;
1294	enum net_shaper_scope scope;
1295	unsigned long flags = 0;
1296	struct sk_buff *msg;
1297	int ret;
1298
1299	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE))
1300		return -EINVAL;
1301
1302	binding = net_shaper_binding_from_ctx(info->ctx);
1303	scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]);
1304	ops = net_shaper_ops(binding);
1305	ops->capabilities(binding, scope, &flags);
1306	if (!flags)
1307		return -EOPNOTSUPP;
1308
1309	msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1310	if (!msg)
1311		return -ENOMEM;
1312
1313	ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info);
1314	if (ret)
1315		goto free_msg;
1316
1317	ret =  genlmsg_reply(msg, info);
1318	if (ret)
1319		goto free_msg;
1320	return 0;
1321
1322free_msg:
1323	nlmsg_free(msg);
1324	return ret;
1325}
1326
1327int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
1328				 struct netlink_callback *cb)
1329{
1330	const struct genl_info *info = genl_info_dump(cb);
1331	struct net_shaper_binding *binding;
1332	const struct net_shaper_ops *ops;
1333	enum net_shaper_scope scope;
1334	int ret;
1335
1336	binding = net_shaper_binding_from_ctx(cb->ctx);
1337	ops = net_shaper_ops(binding);
1338	for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) {
1339		unsigned long flags = 0;
1340
1341		ops->capabilities(binding, scope, &flags);
1342		if (!flags)
1343			continue;
1344
1345		ret = net_shaper_cap_fill_one(skb, binding, scope, flags,
1346					      info);
1347		if (ret)
1348			return ret;
1349	}
1350
1351	return 0;
1352}
1353
1354static void net_shaper_flush(struct net_shaper_binding *binding)
1355{
1356	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1357	struct net_shaper *cur;
1358	unsigned long index;
1359
1360	if (!hierarchy)
1361		return;
1362
1363	net_shaper_lock(binding);
1364	xa_lock(&hierarchy->shapers);
1365	xa_for_each(&hierarchy->shapers, index, cur) {
1366		__xa_erase(&hierarchy->shapers, index);
1367		kfree(cur);
1368	}
1369	xa_unlock(&hierarchy->shapers);
1370	net_shaper_unlock(binding);
1371
1372	kfree(hierarchy);
1373}
1374
1375void net_shaper_flush_netdev(struct net_device *dev)
1376{
1377	struct net_shaper_binding binding = {
1378		.type = NET_SHAPER_BINDING_TYPE_NETDEV,
1379		.netdev = dev,
1380	};
1381
1382	net_shaper_flush(&binding);
1383}
1384
1385void net_shaper_set_real_num_tx_queues(struct net_device *dev,
1386				       unsigned int txq)
1387{
1388	struct net_shaper_hierarchy *hierarchy;
1389	struct net_shaper_binding binding;
1390	int i;
1391
1392	binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
1393	binding.netdev = dev;
1394	hierarchy = net_shaper_hierarchy(&binding);
1395	if (!hierarchy)
1396		return;
1397
1398	/* Only drivers implementing shapers support ensure
1399	 * the lock is acquired in advance.
1400	 */
1401	lockdep_assert_held(&dev->lock);
1402
1403	/* Take action only when decreasing the tx queue number. */
1404	for (i = txq; i < dev->real_num_tx_queues; ++i) {
1405		struct net_shaper_handle handle, parent_handle;
1406		struct net_shaper *shaper;
1407		u32 index;
1408
1409		handle.scope = NET_SHAPER_SCOPE_QUEUE;
1410		handle.id = i;
1411		shaper = net_shaper_lookup(&binding, &handle);
1412		if (!shaper)
1413			continue;
1414
1415		/* Don't touch the H/W for the queue shaper, the drivers already
1416		 * deleted the queue and related resources.
1417		 */
1418		parent_handle = shaper->parent;
1419		index = net_shaper_handle_to_index(&handle);
1420		xa_erase(&hierarchy->shapers, index);
1421		kfree_rcu(shaper, rcu);
1422
1423		/* The recursion on parent does the full job. */
1424		if (parent_handle.scope != NET_SHAPER_SCOPE_NODE)
1425			continue;
1426
1427		shaper = net_shaper_lookup(&binding, &parent_handle);
1428		if (shaper && !--shaper->leaves)
1429			__net_shaper_delete(&binding, shaper, NULL);
1430	}
1431}
1432
1433static int __init shaper_init(void)
1434{
1435	return genl_register_family(&net_shaper_nl_family);
1436}
1437
1438subsys_initcall(shaper_init);