Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Generic nexthop implementation
   3 *
   4 * Copyright (c) 2017-19 Cumulus Networks
   5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
   6 */
   7
   8#include <linux/nexthop.h>
   9#include <linux/rtnetlink.h>
  10#include <linux/slab.h>
 
  11#include <net/arp.h>
  12#include <net/ipv6_stubs.h>
  13#include <net/lwtunnel.h>
  14#include <net/ndisc.h>
  15#include <net/nexthop.h>
  16#include <net/route.h>
  17#include <net/sock.h>
  18
  19#define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
  20#define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
  21
  22static void remove_nexthop(struct net *net, struct nexthop *nh,
  23			   struct nl_info *nlinfo);
  24
  25#define NH_DEV_HASHBITS  8
  26#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
  27
 
 
 
  28static const struct nla_policy rtm_nh_policy_new[] = {
  29	[NHA_ID]		= { .type = NLA_U32 },
  30	[NHA_GROUP]		= { .type = NLA_BINARY },
  31	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
  32	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
  33	[NHA_OIF]		= { .type = NLA_U32 },
  34	[NHA_GATEWAY]		= { .type = NLA_BINARY },
  35	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
  36	[NHA_ENCAP]		= { .type = NLA_NESTED },
  37	[NHA_FDB]		= { .type = NLA_FLAG },
  38	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
 
  39};
  40
  41static const struct nla_policy rtm_nh_policy_get[] = {
  42	[NHA_ID]		= { .type = NLA_U32 },
 
 
 
 
 
 
  43};
  44
  45static const struct nla_policy rtm_nh_policy_dump[] = {
  46	[NHA_OIF]		= { .type = NLA_U32 },
  47	[NHA_GROUPS]		= { .type = NLA_FLAG },
  48	[NHA_MASTER]		= { .type = NLA_U32 },
  49	[NHA_FDB]		= { .type = NLA_FLAG },
 
 
  50};
  51
  52static const struct nla_policy rtm_nh_res_policy_new[] = {
  53	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
  54	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
  55	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
  56};
  57
  58static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
  59	[NHA_ID]		= { .type = NLA_U32 },
  60	[NHA_OIF]		= { .type = NLA_U32 },
  61	[NHA_MASTER]		= { .type = NLA_U32 },
  62	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
  63};
  64
  65static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
  66	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
  67};
  68
  69static const struct nla_policy rtm_nh_policy_get_bucket[] = {
  70	[NHA_ID]		= { .type = NLA_U32 },
  71	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
  72};
  73
  74static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
  75	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
  76};
  77
  78static bool nexthop_notifiers_is_empty(struct net *net)
  79{
  80	return !net->nexthop.notifier_chain.head;
  81}
  82
  83static void
  84__nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
  85			       const struct nh_info *nhi)
  86{
  87	nh_info->dev = nhi->fib_nhc.nhc_dev;
  88	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
  89	if (nh_info->gw_family == AF_INET)
  90		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
  91	else if (nh_info->gw_family == AF_INET6)
  92		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
  93
 
  94	nh_info->is_reject = nhi->reject_nh;
  95	nh_info->is_fdb = nhi->fdb_nh;
  96	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
  97}
  98
  99static int nh_notifier_single_info_init(struct nh_notifier_info *info,
 100					const struct nexthop *nh)
 101{
 102	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
 103
 104	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
 105	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
 106	if (!info->nh)
 107		return -ENOMEM;
 108
 109	__nh_notifier_single_info_init(info->nh, nhi);
 110
 111	return 0;
 112}
 113
 114static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
 115{
 116	kfree(info->nh);
 117}
 118
 119static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
 120				       struct nh_group *nhg)
 121{
 122	u16 num_nh = nhg->num_nh;
 123	int i;
 124
 125	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
 126	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
 127			       GFP_KERNEL);
 128	if (!info->nh_grp)
 129		return -ENOMEM;
 130
 131	info->nh_grp->num_nh = num_nh;
 132	info->nh_grp->is_fdb = nhg->fdb_nh;
 
 133
 134	for (i = 0; i < num_nh; i++) {
 135		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 136		struct nh_info *nhi;
 137
 138		nhi = rtnl_dereference(nhge->nh->nh_info);
 139		info->nh_grp->nh_entries[i].id = nhge->nh->id;
 140		info->nh_grp->nh_entries[i].weight = nhge->weight;
 141		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
 142					       nhi);
 143	}
 144
 145	return 0;
 146}
 147
 148static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
 149					   struct nh_group *nhg)
 150{
 151	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
 152	u16 num_nh_buckets = res_table->num_nh_buckets;
 153	unsigned long size;
 154	u16 i;
 155
 156	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
 157	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
 158	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
 159				       __GFP_NOWARN);
 160	if (!info->nh_res_table)
 161		return -ENOMEM;
 162
 163	info->nh_res_table->num_nh_buckets = num_nh_buckets;
 
 164
 165	for (i = 0; i < num_nh_buckets; i++) {
 166		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
 167		struct nh_grp_entry *nhge;
 168		struct nh_info *nhi;
 169
 170		nhge = rtnl_dereference(bucket->nh_entry);
 171		nhi = rtnl_dereference(nhge->nh->nh_info);
 172		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
 173					       nhi);
 174	}
 175
 176	return 0;
 177}
 178
 179static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
 180				     const struct nexthop *nh)
 181{
 182	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 183
 184	if (nhg->hash_threshold)
 185		return nh_notifier_mpath_info_init(info, nhg);
 186	else if (nhg->resilient)
 187		return nh_notifier_res_table_info_init(info, nhg);
 188	return -EINVAL;
 189}
 190
 191static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
 192				      const struct nexthop *nh)
 193{
 194	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 195
 196	if (nhg->hash_threshold)
 197		kfree(info->nh_grp);
 198	else if (nhg->resilient)
 199		vfree(info->nh_res_table);
 200}
 201
 202static int nh_notifier_info_init(struct nh_notifier_info *info,
 203				 const struct nexthop *nh)
 204{
 205	info->id = nh->id;
 206
 207	if (nh->is_group)
 208		return nh_notifier_grp_info_init(info, nh);
 209	else
 210		return nh_notifier_single_info_init(info, nh);
 211}
 212
 213static void nh_notifier_info_fini(struct nh_notifier_info *info,
 214				  const struct nexthop *nh)
 215{
 216	if (nh->is_group)
 217		nh_notifier_grp_info_fini(info, nh);
 218	else
 219		nh_notifier_single_info_fini(info);
 220}
 221
 222static int call_nexthop_notifiers(struct net *net,
 223				  enum nexthop_event_type event_type,
 224				  struct nexthop *nh,
 225				  struct netlink_ext_ack *extack)
 226{
 227	struct nh_notifier_info info = {
 228		.net = net,
 229		.extack = extack,
 230	};
 231	int err;
 232
 233	ASSERT_RTNL();
 234
 235	if (nexthop_notifiers_is_empty(net))
 236		return 0;
 237
 238	err = nh_notifier_info_init(&info, nh);
 239	if (err) {
 240		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
 241		return err;
 242	}
 243
 244	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 245					   event_type, &info);
 246	nh_notifier_info_fini(&info, nh);
 247
 248	return notifier_to_errno(err);
 249}
 250
 251static int
 252nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
 253				      bool force, unsigned int *p_idle_timer_ms)
 254{
 255	struct nh_res_table *res_table;
 256	struct nh_group *nhg;
 257	struct nexthop *nh;
 258	int err = 0;
 259
 260	/* When 'force' is false, nexthop bucket replacement is performed
 261	 * because the bucket was deemed to be idle. In this case, capable
 262	 * listeners can choose to perform an atomic replacement: The bucket is
 263	 * only replaced if it is inactive. However, if the idle timer interval
 264	 * is smaller than the interval in which a listener is querying
 265	 * buckets' activity from the device, then atomic replacement should
 266	 * not be tried. Pass the idle timer value to listeners, so that they
 267	 * could determine which type of replacement to perform.
 268	 */
 269	if (force) {
 270		*p_idle_timer_ms = 0;
 271		return 0;
 272	}
 273
 274	rcu_read_lock();
 275
 276	nh = nexthop_find_by_id(info->net, info->id);
 277	if (!nh) {
 278		err = -EINVAL;
 279		goto out;
 280	}
 281
 282	nhg = rcu_dereference(nh->nh_grp);
 283	res_table = rcu_dereference(nhg->res_table);
 284	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
 285
 286out:
 287	rcu_read_unlock();
 288
 289	return err;
 290}
 291
 292static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
 293					    u16 bucket_index, bool force,
 294					    struct nh_info *oldi,
 295					    struct nh_info *newi)
 296{
 297	unsigned int idle_timer_ms;
 298	int err;
 299
 300	err = nh_notifier_res_bucket_idle_timer_get(info, force,
 301						    &idle_timer_ms);
 302	if (err)
 303		return err;
 304
 305	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
 306	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
 307				      GFP_KERNEL);
 308	if (!info->nh_res_bucket)
 309		return -ENOMEM;
 310
 311	info->nh_res_bucket->bucket_index = bucket_index;
 312	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
 313	info->nh_res_bucket->force = force;
 314	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
 315	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
 316	return 0;
 317}
 318
 319static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
 320{
 321	kfree(info->nh_res_bucket);
 322}
 323
 324static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
 325					       u16 bucket_index, bool force,
 326					       struct nh_info *oldi,
 327					       struct nh_info *newi,
 328					       struct netlink_ext_ack *extack)
 329{
 330	struct nh_notifier_info info = {
 331		.net = net,
 332		.extack = extack,
 333		.id = nhg_id,
 334	};
 335	int err;
 336
 337	if (nexthop_notifiers_is_empty(net))
 338		return 0;
 339
 340	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
 341					       oldi, newi);
 342	if (err)
 343		return err;
 344
 345	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 346					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
 347	nh_notifier_res_bucket_info_fini(&info);
 348
 349	return notifier_to_errno(err);
 350}
 351
 352/* There are three users of RES_TABLE, and NHs etc. referenced from there:
 353 *
 354 * 1) a collection of callbacks for NH maintenance. This operates under
 355 *    RTNL,
 356 * 2) the delayed work that gradually balances the resilient table,
 357 * 3) and nexthop_select_path(), operating under RCU.
 358 *
 359 * Both the delayed work and the RTNL block are writers, and need to
 360 * maintain mutual exclusion. Since there are only two and well-known
 361 * writers for each table, the RTNL code can make sure it has exclusive
 362 * access thus:
 363 *
 364 * - Have the DW operate without locking;
 365 * - synchronously cancel the DW;
 366 * - do the writing;
 367 * - if the write was not actually a delete, call upkeep, which schedules
 368 *   DW again if necessary.
 369 *
 370 * The functions that are always called from the RTNL context use
 371 * rtnl_dereference(). The functions that can also be called from the DW do
 372 * a raw dereference and rely on the above mutual exclusion scheme.
 373 */
 374#define nh_res_dereference(p) (rcu_dereference_raw(p))
 375
 376static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
 377					     u16 bucket_index, bool force,
 378					     struct nexthop *old_nh,
 379					     struct nexthop *new_nh,
 380					     struct netlink_ext_ack *extack)
 381{
 382	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
 383	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
 384
 385	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
 386						   force, oldi, newi, extack);
 387}
 388
 389static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
 390					    struct netlink_ext_ack *extack)
 391{
 392	struct nh_notifier_info info = {
 393		.net = net,
 394		.extack = extack,
 
 395	};
 396	struct nh_group *nhg;
 397	int err;
 398
 399	ASSERT_RTNL();
 400
 401	if (nexthop_notifiers_is_empty(net))
 402		return 0;
 403
 404	/* At this point, the nexthop buckets are still not populated. Only
 405	 * emit a notification with the logical nexthops, so that a listener
 406	 * could potentially veto it in case of unsupported configuration.
 407	 */
 408	nhg = rtnl_dereference(nh->nh_grp);
 409	err = nh_notifier_mpath_info_init(&info, nhg);
 410	if (err) {
 411		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
 412		return err;
 413	}
 414
 415	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 416					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
 417					   &info);
 418	kfree(info.nh_grp);
 419
 420	return notifier_to_errno(err);
 421}
 422
 423static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
 424				 enum nexthop_event_type event_type,
 425				 struct nexthop *nh,
 426				 struct netlink_ext_ack *extack)
 427{
 428	struct nh_notifier_info info = {
 429		.net = net,
 430		.extack = extack,
 431	};
 432	int err;
 433
 434	err = nh_notifier_info_init(&info, nh);
 435	if (err)
 436		return err;
 437
 438	err = nb->notifier_call(nb, event_type, &info);
 439	nh_notifier_info_fini(&info, nh);
 440
 441	return notifier_to_errno(err);
 442}
 443
 444static unsigned int nh_dev_hashfn(unsigned int val)
 445{
 446	unsigned int mask = NH_DEV_HASHSIZE - 1;
 447
 448	return (val ^
 449		(val >> NH_DEV_HASHBITS) ^
 450		(val >> (NH_DEV_HASHBITS * 2))) & mask;
 451}
 452
 453static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
 454{
 455	struct net_device *dev = nhi->fib_nhc.nhc_dev;
 456	struct hlist_head *head;
 457	unsigned int hash;
 458
 459	WARN_ON(!dev);
 460
 461	hash = nh_dev_hashfn(dev->ifindex);
 462	head = &net->nexthop.devhash[hash];
 463	hlist_add_head(&nhi->dev_hash, head);
 464}
 465
 466static void nexthop_free_group(struct nexthop *nh)
 467{
 468	struct nh_group *nhg;
 469	int i;
 470
 471	nhg = rcu_dereference_raw(nh->nh_grp);
 472	for (i = 0; i < nhg->num_nh; ++i) {
 473		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 474
 475		WARN_ON(!list_empty(&nhge->nh_list));
 
 476		nexthop_put(nhge->nh);
 477	}
 478
 479	WARN_ON(nhg->spare == nhg);
 480
 481	if (nhg->resilient)
 482		vfree(rcu_dereference_raw(nhg->res_table));
 483
 484	kfree(nhg->spare);
 485	kfree(nhg);
 486}
 487
 488static void nexthop_free_single(struct nexthop *nh)
 489{
 490	struct nh_info *nhi;
 491
 492	nhi = rcu_dereference_raw(nh->nh_info);
 493	switch (nhi->family) {
 494	case AF_INET:
 495		fib_nh_release(nh->net, &nhi->fib_nh);
 496		break;
 497	case AF_INET6:
 498		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
 499		break;
 500	}
 501	kfree(nhi);
 502}
 503
 504void nexthop_free_rcu(struct rcu_head *head)
 505{
 506	struct nexthop *nh = container_of(head, struct nexthop, rcu);
 507
 508	if (nh->is_group)
 509		nexthop_free_group(nh);
 510	else
 511		nexthop_free_single(nh);
 512
 513	kfree(nh);
 514}
 515EXPORT_SYMBOL_GPL(nexthop_free_rcu);
 516
 517static struct nexthop *nexthop_alloc(void)
 518{
 519	struct nexthop *nh;
 520
 521	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
 522	if (nh) {
 523		INIT_LIST_HEAD(&nh->fi_list);
 524		INIT_LIST_HEAD(&nh->f6i_list);
 525		INIT_LIST_HEAD(&nh->grp_list);
 526		INIT_LIST_HEAD(&nh->fdb_list);
 527	}
 528	return nh;
 529}
 530
 531static struct nh_group *nexthop_grp_alloc(u16 num_nh)
 532{
 533	struct nh_group *nhg;
 534
 535	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
 536	if (nhg)
 537		nhg->num_nh = num_nh;
 538
 539	return nhg;
 540}
 541
 542static void nh_res_table_upkeep_dw(struct work_struct *work);
 543
 544static struct nh_res_table *
 545nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
 546{
 547	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
 548	struct nh_res_table *res_table;
 549	unsigned long size;
 550
 551	size = struct_size(res_table, nh_buckets, num_nh_buckets);
 552	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
 553	if (!res_table)
 554		return NULL;
 555
 556	res_table->net = net;
 557	res_table->nhg_id = nhg_id;
 558	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
 559	INIT_LIST_HEAD(&res_table->uw_nh_entries);
 560	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
 561	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
 562	res_table->num_nh_buckets = num_nh_buckets;
 563	return res_table;
 564}
 565
 566static void nh_base_seq_inc(struct net *net)
 567{
 568	while (++net->nexthop.seq == 0)
 569		;
 570}
 571
 572/* no reference taken; rcu lock or rtnl must be held */
 573struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
 574{
 575	struct rb_node **pp, *parent = NULL, *next;
 576
 577	pp = &net->nexthop.rb_root.rb_node;
 578	while (1) {
 579		struct nexthop *nh;
 580
 581		next = rcu_dereference_raw(*pp);
 582		if (!next)
 583			break;
 584		parent = next;
 585
 586		nh = rb_entry(parent, struct nexthop, rb_node);
 587		if (id < nh->id)
 588			pp = &next->rb_left;
 589		else if (id > nh->id)
 590			pp = &next->rb_right;
 591		else
 592			return nh;
 593	}
 594	return NULL;
 595}
 596EXPORT_SYMBOL_GPL(nexthop_find_by_id);
 597
 598/* used for auto id allocation; called with rtnl held */
 599static u32 nh_find_unused_id(struct net *net)
 600{
 601	u32 id_start = net->nexthop.last_id_allocated;
 602
 603	while (1) {
 604		net->nexthop.last_id_allocated++;
 605		if (net->nexthop.last_id_allocated == id_start)
 606			break;
 607
 608		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
 609			return net->nexthop.last_id_allocated;
 610	}
 611	return 0;
 612}
 613
 614static void nh_res_time_set_deadline(unsigned long next_time,
 615				     unsigned long *deadline)
 616{
 617	if (time_before(next_time, *deadline))
 618		*deadline = next_time;
 619}
 620
 621static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
 622{
 623	if (list_empty(&res_table->uw_nh_entries))
 624		return 0;
 625	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
 626}
 627
 628static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
 629{
 630	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
 631	struct nlattr *nest;
 632
 633	nest = nla_nest_start(skb, NHA_RES_GROUP);
 634	if (!nest)
 635		return -EMSGSIZE;
 636
 637	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
 638			res_table->num_nh_buckets) ||
 639	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
 640			jiffies_to_clock_t(res_table->idle_timer)) ||
 641	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
 642			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
 643	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
 644			      nh_res_table_unbalanced_time(res_table),
 645			      NHA_RES_GROUP_PAD))
 646		goto nla_put_failure;
 647
 648	nla_nest_end(skb, nest);
 649	return 0;
 650
 651nla_put_failure:
 652	nla_nest_cancel(skb, nest);
 653	return -EMSGSIZE;
 654}
 655
 656static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658	struct nexthop_grp *p;
 659	size_t len = nhg->num_nh * sizeof(*p);
 660	struct nlattr *nla;
 661	u16 group_type = 0;
 662	int i;
 663
 664	if (nhg->hash_threshold)
 665		group_type = NEXTHOP_GRP_TYPE_MPATH;
 666	else if (nhg->resilient)
 667		group_type = NEXTHOP_GRP_TYPE_RES;
 668
 669	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
 670		goto nla_put_failure;
 671
 672	nla = nla_reserve(skb, NHA_GROUP, len);
 673	if (!nla)
 674		goto nla_put_failure;
 675
 676	p = nla_data(nla);
 677	for (i = 0; i < nhg->num_nh; ++i) {
 678		p->id = nhg->nh_entries[i].nh->id;
 679		p->weight = nhg->nh_entries[i].weight - 1;
 680		p += 1;
 681	}
 682
 683	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
 684		goto nla_put_failure;
 685
 
 
 
 
 
 686	return 0;
 687
 688nla_put_failure:
 689	return -EMSGSIZE;
 690}
 691
 692static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
 693			int event, u32 portid, u32 seq, unsigned int nlflags)
 
 694{
 695	struct fib6_nh *fib6_nh;
 696	struct fib_nh *fib_nh;
 697	struct nlmsghdr *nlh;
 698	struct nh_info *nhi;
 699	struct nhmsg *nhm;
 700
 701	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
 702	if (!nlh)
 703		return -EMSGSIZE;
 704
 705	nhm = nlmsg_data(nlh);
 706	nhm->nh_family = AF_UNSPEC;
 707	nhm->nh_flags = nh->nh_flags;
 708	nhm->nh_protocol = nh->protocol;
 709	nhm->nh_scope = 0;
 710	nhm->resvd = 0;
 711
 712	if (nla_put_u32(skb, NHA_ID, nh->id))
 713		goto nla_put_failure;
 714
 715	if (nh->is_group) {
 716		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 717
 718		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
 719			goto nla_put_failure;
 720		if (nla_put_nh_group(skb, nhg))
 721			goto nla_put_failure;
 722		goto out;
 723	}
 724
 725	nhi = rtnl_dereference(nh->nh_info);
 726	nhm->nh_family = nhi->family;
 727	if (nhi->reject_nh) {
 728		if (nla_put_flag(skb, NHA_BLACKHOLE))
 729			goto nla_put_failure;
 730		goto out;
 731	} else if (nhi->fdb_nh) {
 732		if (nla_put_flag(skb, NHA_FDB))
 733			goto nla_put_failure;
 734	} else {
 735		const struct net_device *dev;
 736
 737		dev = nhi->fib_nhc.nhc_dev;
 738		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
 739			goto nla_put_failure;
 740	}
 741
 742	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
 743	switch (nhi->family) {
 744	case AF_INET:
 745		fib_nh = &nhi->fib_nh;
 746		if (fib_nh->fib_nh_gw_family &&
 747		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
 748			goto nla_put_failure;
 749		break;
 750
 751	case AF_INET6:
 752		fib6_nh = &nhi->fib6_nh;
 753		if (fib6_nh->fib_nh_gw_family &&
 754		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
 755			goto nla_put_failure;
 756		break;
 757	}
 758
 759	if (nhi->fib_nhc.nhc_lwtstate &&
 760	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
 761				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
 762		goto nla_put_failure;
 763
 764out:
 765	nlmsg_end(skb, nlh);
 766	return 0;
 767
 768nla_put_failure:
 769	nlmsg_cancel(skb, nlh);
 770	return -EMSGSIZE;
 771}
 772
 773static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
 774{
 775	return nla_total_size(0) +	/* NHA_RES_GROUP */
 776		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
 777		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
 778		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
 779		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
 780}
 781
 782static size_t nh_nlmsg_size_grp(struct nexthop *nh)
 783{
 784	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 785	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
 786	size_t tot = nla_total_size(sz) +
 787		nla_total_size(2); /* NHA_GROUP_TYPE */
 788
 789	if (nhg->resilient)
 790		tot += nh_nlmsg_size_grp_res(nhg);
 791
 792	return tot;
 793}
 794
 795static size_t nh_nlmsg_size_single(struct nexthop *nh)
 796{
 797	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
 798	size_t sz;
 799
 800	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
 801	 * are mutually exclusive
 802	 */
 803	sz = nla_total_size(4);  /* NHA_OIF */
 804
 805	switch (nhi->family) {
 806	case AF_INET:
 807		if (nhi->fib_nh.fib_nh_gw_family)
 808			sz += nla_total_size(4);  /* NHA_GATEWAY */
 809		break;
 810
 811	case AF_INET6:
 812		/* NHA_GATEWAY */
 813		if (nhi->fib6_nh.fib_nh_gw_family)
 814			sz += nla_total_size(sizeof(const struct in6_addr));
 815		break;
 816	}
 817
 818	if (nhi->fib_nhc.nhc_lwtstate) {
 819		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
 820		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
 821	}
 822
 823	return sz;
 824}
 825
 826static size_t nh_nlmsg_size(struct nexthop *nh)
 827{
 828	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
 829
 830	sz += nla_total_size(4); /* NHA_ID */
 831
 832	if (nh->is_group)
 833		sz += nh_nlmsg_size_grp(nh);
 834	else
 835		sz += nh_nlmsg_size_single(nh);
 836
 837	return sz;
 838}
 839
 840static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
 841{
 842	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
 843	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 844	struct sk_buff *skb;
 845	int err = -ENOBUFS;
 846
 847	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
 848	if (!skb)
 849		goto errout;
 850
 851	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
 852	if (err < 0) {
 853		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
 854		WARN_ON(err == -EMSGSIZE);
 855		kfree_skb(skb);
 856		goto errout;
 857	}
 858
 859	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
 860		    info->nlh, gfp_any());
 861	return;
 862errout:
 863	if (err < 0)
 864		rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
 865}
 866
 867static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
 868{
 869	return (unsigned long)atomic_long_read(&bucket->used_time);
 870}
 871
 872static unsigned long
 873nh_res_bucket_idle_point(const struct nh_res_table *res_table,
 874			 const struct nh_res_bucket *bucket,
 875			 unsigned long now)
 876{
 877	unsigned long time = nh_res_bucket_used_time(bucket);
 878
 879	/* Bucket was not used since it was migrated. The idle time is now. */
 880	if (time == bucket->migrated_time)
 881		return now;
 882
 883	return time + res_table->idle_timer;
 884}
 885
 886static unsigned long
 887nh_res_table_unb_point(const struct nh_res_table *res_table)
 888{
 889	return res_table->unbalanced_since + res_table->unbalanced_timer;
 890}
 891
 892static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
 893				   struct nh_res_bucket *bucket)
 894{
 895	unsigned long now = jiffies;
 896
 897	atomic_long_set(&bucket->used_time, (long)now);
 898	bucket->migrated_time = now;
 899}
 900
 901static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
 902{
 903	atomic_long_set(&bucket->used_time, (long)jiffies);
 904}
 905
 906static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
 907{
 908	unsigned long used_time = nh_res_bucket_used_time(bucket);
 909
 910	return jiffies_delta_to_clock_t(jiffies - used_time);
 911}
 912
 913static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
 914			      struct nh_res_bucket *bucket, u16 bucket_index,
 915			      int event, u32 portid, u32 seq,
 916			      unsigned int nlflags,
 917			      struct netlink_ext_ack *extack)
 918{
 919	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
 920	struct nlmsghdr *nlh;
 921	struct nlattr *nest;
 922	struct nhmsg *nhm;
 923
 924	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
 925	if (!nlh)
 926		return -EMSGSIZE;
 927
 928	nhm = nlmsg_data(nlh);
 929	nhm->nh_family = AF_UNSPEC;
 930	nhm->nh_flags = bucket->nh_flags;
 931	nhm->nh_protocol = nh->protocol;
 932	nhm->nh_scope = 0;
 933	nhm->resvd = 0;
 934
 935	if (nla_put_u32(skb, NHA_ID, nh->id))
 936		goto nla_put_failure;
 937
 938	nest = nla_nest_start(skb, NHA_RES_BUCKET);
 939	if (!nest)
 940		goto nla_put_failure;
 941
 942	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
 943	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
 944	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
 945			      nh_res_bucket_idle_time(bucket),
 946			      NHA_RES_BUCKET_PAD))
 947		goto nla_put_failure_nest;
 948
 949	nla_nest_end(skb, nest);
 950	nlmsg_end(skb, nlh);
 951	return 0;
 952
 953nla_put_failure_nest:
 954	nla_nest_cancel(skb, nest);
 955nla_put_failure:
 956	nlmsg_cancel(skb, nlh);
 957	return -EMSGSIZE;
 958}
 959
 960static void nexthop_bucket_notify(struct nh_res_table *res_table,
 961				  u16 bucket_index)
 962{
 963	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
 964	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
 965	struct nexthop *nh = nhge->nh_parent;
 966	struct sk_buff *skb;
 967	int err = -ENOBUFS;
 968
 969	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 970	if (!skb)
 971		goto errout;
 972
 973	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
 974				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
 975				 NULL);
 976	if (err < 0) {
 977		kfree_skb(skb);
 978		goto errout;
 979	}
 980
 981	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
 982	return;
 983errout:
 984	if (err < 0)
 985		rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
 986}
 987
 988static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
 989			   bool *is_fdb, struct netlink_ext_ack *extack)
 990{
 991	if (nh->is_group) {
 992		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 993
 994		/* Nesting groups within groups is not supported. */
 995		if (nhg->hash_threshold) {
 996			NL_SET_ERR_MSG(extack,
 997				       "Hash-threshold group can not be a nexthop within a group");
 998			return false;
 999		}
1000		if (nhg->resilient) {
1001			NL_SET_ERR_MSG(extack,
1002				       "Resilient group can not be a nexthop within a group");
1003			return false;
1004		}
1005		*is_fdb = nhg->fdb_nh;
1006	} else {
1007		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1008
1009		if (nhi->reject_nh && npaths > 1) {
1010			NL_SET_ERR_MSG(extack,
1011				       "Blackhole nexthop can not be used in a group with more than 1 path");
1012			return false;
1013		}
1014		*is_fdb = nhi->fdb_nh;
1015	}
1016
1017	return true;
1018}
1019
1020static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1021				   struct netlink_ext_ack *extack)
1022{
1023	struct nh_info *nhi;
1024
1025	nhi = rtnl_dereference(nh->nh_info);
1026
1027	if (!nhi->fdb_nh) {
1028		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1029		return -EINVAL;
1030	}
1031
1032	if (*nh_family == AF_UNSPEC) {
1033		*nh_family = nhi->family;
1034	} else if (*nh_family != nhi->family) {
1035		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1036		return -EINVAL;
1037	}
1038
1039	return 0;
1040}
1041
1042static int nh_check_attr_group(struct net *net,
1043			       struct nlattr *tb[], size_t tb_size,
1044			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1045{
1046	unsigned int len = nla_len(tb[NHA_GROUP]);
1047	u8 nh_family = AF_UNSPEC;
1048	struct nexthop_grp *nhg;
1049	unsigned int i, j;
1050	u8 nhg_fdb = 0;
1051
1052	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1053		NL_SET_ERR_MSG(extack,
1054			       "Invalid length for nexthop group attribute");
1055		return -EINVAL;
1056	}
1057
1058	/* convert len to number of nexthop ids */
1059	len /= sizeof(*nhg);
1060
1061	nhg = nla_data(tb[NHA_GROUP]);
1062	for (i = 0; i < len; ++i) {
1063		if (nhg[i].resvd1 || nhg[i].resvd2) {
1064			NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1065			return -EINVAL;
1066		}
1067		if (nhg[i].weight > 254) {
1068			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1069			return -EINVAL;
1070		}
1071		for (j = i + 1; j < len; ++j) {
1072			if (nhg[i].id == nhg[j].id) {
1073				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1074				return -EINVAL;
1075			}
1076		}
1077	}
1078
1079	if (tb[NHA_FDB])
1080		nhg_fdb = 1;
1081	nhg = nla_data(tb[NHA_GROUP]);
1082	for (i = 0; i < len; ++i) {
1083		struct nexthop *nh;
1084		bool is_fdb_nh;
1085
1086		nh = nexthop_find_by_id(net, nhg[i].id);
1087		if (!nh) {
1088			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1089			return -EINVAL;
1090		}
1091		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1092			return -EINVAL;
1093
1094		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1095			return -EINVAL;
1096
1097		if (!nhg_fdb && is_fdb_nh) {
1098			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1099			return -EINVAL;
1100		}
1101	}
1102	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1103		if (!tb[i])
1104			continue;
1105		switch (i) {
 
1106		case NHA_FDB:
1107			continue;
1108		case NHA_RES_GROUP:
1109			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1110				continue;
1111			break;
1112		}
1113		NL_SET_ERR_MSG(extack,
1114			       "No other attributes can be set in nexthop groups");
1115		return -EINVAL;
1116	}
1117
1118	return 0;
1119}
1120
1121static bool ipv6_good_nh(const struct fib6_nh *nh)
1122{
1123	int state = NUD_REACHABLE;
1124	struct neighbour *n;
1125
1126	rcu_read_lock_bh();
1127
1128	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1129	if (n)
1130		state = n->nud_state;
1131
1132	rcu_read_unlock_bh();
1133
1134	return !!(state & NUD_VALID);
1135}
1136
1137static bool ipv4_good_nh(const struct fib_nh *nh)
1138{
1139	int state = NUD_REACHABLE;
1140	struct neighbour *n;
1141
1142	rcu_read_lock_bh();
1143
1144	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1145				      (__force u32)nh->fib_nh_gw4);
1146	if (n)
1147		state = n->nud_state;
1148
1149	rcu_read_unlock_bh();
1150
1151	return !!(state & NUD_VALID);
1152}
1153
1154static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155{
1156	struct nexthop *rc = NULL;
1157	int i;
1158
1159	for (i = 0; i < nhg->num_nh; ++i) {
1160		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1161		struct nh_info *nhi;
1162
1163		if (hash > atomic_read(&nhge->hthr.upper_bound))
1164			continue;
1165
1166		nhi = rcu_dereference(nhge->nh->nh_info);
1167		if (nhi->fdb_nh)
1168			return nhge->nh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170		/* nexthops always check if it is good and does
1171		 * not rely on a sysctl for this behavior
1172		 */
1173		switch (nhi->family) {
1174		case AF_INET:
1175			if (ipv4_good_nh(&nhi->fib_nh))
1176				return nhge->nh;
1177			break;
1178		case AF_INET6:
1179			if (ipv6_good_nh(&nhi->fib6_nh))
1180				return nhge->nh;
1181			break;
1182		}
1183
1184		if (!rc)
1185			rc = nhge->nh;
1186	}
1187
1188	return rc;
 
 
 
1189}
1190
1191static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1192{
1193	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1194	u16 bucket_index = hash % res_table->num_nh_buckets;
1195	struct nh_res_bucket *bucket;
1196	struct nh_grp_entry *nhge;
1197
1198	/* nexthop_select_path() is expected to return a non-NULL value, so
1199	 * skip protocol validation and just hand out whatever there is.
1200	 */
1201	bucket = &res_table->nh_buckets[bucket_index];
1202	nh_res_bucket_set_busy(bucket);
1203	nhge = rcu_dereference(bucket->nh_entry);
 
1204	return nhge->nh;
1205}
1206
1207struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1208{
1209	struct nh_group *nhg;
1210
1211	if (!nh->is_group)
1212		return nh;
1213
1214	nhg = rcu_dereference(nh->nh_grp);
1215	if (nhg->hash_threshold)
1216		return nexthop_select_path_hthr(nhg, hash);
1217	else if (nhg->resilient)
1218		return nexthop_select_path_res(nhg, hash);
1219
1220	/* Unreachable. */
1221	return NULL;
1222}
1223EXPORT_SYMBOL_GPL(nexthop_select_path);
1224
1225int nexthop_for_each_fib6_nh(struct nexthop *nh,
1226			     int (*cb)(struct fib6_nh *nh, void *arg),
1227			     void *arg)
1228{
1229	struct nh_info *nhi;
1230	int err;
1231
1232	if (nh->is_group) {
1233		struct nh_group *nhg;
1234		int i;
1235
1236		nhg = rcu_dereference_rtnl(nh->nh_grp);
1237		for (i = 0; i < nhg->num_nh; i++) {
1238			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1239
1240			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1241			err = cb(&nhi->fib6_nh, arg);
1242			if (err)
1243				return err;
1244		}
1245	} else {
1246		nhi = rcu_dereference_rtnl(nh->nh_info);
1247		err = cb(&nhi->fib6_nh, arg);
1248		if (err)
1249			return err;
1250	}
1251
1252	return 0;
1253}
1254EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1255
1256static int check_src_addr(const struct in6_addr *saddr,
1257			  struct netlink_ext_ack *extack)
1258{
1259	if (!ipv6_addr_any(saddr)) {
1260		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1261		return -EINVAL;
1262	}
1263	return 0;
1264}
1265
1266int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1267		       struct netlink_ext_ack *extack)
1268{
1269	struct nh_info *nhi;
1270	bool is_fdb_nh;
1271
1272	/* fib6_src is unique to a fib6_info and limits the ability to cache
1273	 * routes in fib6_nh within a nexthop that is potentially shared
1274	 * across multiple fib entries. If the config wants to use source
1275	 * routing it can not use nexthop objects. mlxsw also does not allow
1276	 * fib6_src on routes.
1277	 */
1278	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1279		return -EINVAL;
1280
1281	if (nh->is_group) {
1282		struct nh_group *nhg;
1283
1284		nhg = rtnl_dereference(nh->nh_grp);
1285		if (nhg->has_v4)
1286			goto no_v4_nh;
1287		is_fdb_nh = nhg->fdb_nh;
1288	} else {
1289		nhi = rtnl_dereference(nh->nh_info);
1290		if (nhi->family == AF_INET)
1291			goto no_v4_nh;
1292		is_fdb_nh = nhi->fdb_nh;
1293	}
1294
1295	if (is_fdb_nh) {
1296		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1297		return -EINVAL;
1298	}
1299
1300	return 0;
1301no_v4_nh:
1302	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1303	return -EINVAL;
1304}
1305EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1306
1307/* if existing nexthop has ipv6 routes linked to it, need
1308 * to verify this new spec works with ipv6
1309 */
1310static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1311			      struct netlink_ext_ack *extack)
1312{
1313	struct fib6_info *f6i;
1314
1315	if (list_empty(&old->f6i_list))
1316		return 0;
1317
1318	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1319		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1320			return -EINVAL;
1321	}
1322
1323	return fib6_check_nexthop(new, NULL, extack);
1324}
1325
1326static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1327			       struct netlink_ext_ack *extack)
1328{
1329	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1330		NL_SET_ERR_MSG(extack,
1331			       "Route with host scope can not have a gateway");
1332		return -EINVAL;
1333	}
1334
1335	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1336		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1337		return -EINVAL;
1338	}
1339
1340	return 0;
1341}
1342
1343/* Invoked by fib add code to verify nexthop by id is ok with
1344 * config for prefix; parts of fib_check_nh not done when nexthop
1345 * object is used.
1346 */
1347int fib_check_nexthop(struct nexthop *nh, u8 scope,
1348		      struct netlink_ext_ack *extack)
1349{
1350	struct nh_info *nhi;
1351	int err = 0;
1352
1353	if (nh->is_group) {
1354		struct nh_group *nhg;
1355
1356		nhg = rtnl_dereference(nh->nh_grp);
1357		if (nhg->fdb_nh) {
1358			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1359			err = -EINVAL;
1360			goto out;
1361		}
1362
1363		if (scope == RT_SCOPE_HOST) {
1364			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1365			err = -EINVAL;
1366			goto out;
1367		}
1368
1369		/* all nexthops in a group have the same scope */
1370		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1371		err = nexthop_check_scope(nhi, scope, extack);
1372	} else {
1373		nhi = rtnl_dereference(nh->nh_info);
1374		if (nhi->fdb_nh) {
1375			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1376			err = -EINVAL;
1377			goto out;
1378		}
1379		err = nexthop_check_scope(nhi, scope, extack);
1380	}
1381
1382out:
1383	return err;
1384}
1385
1386static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1387			     struct netlink_ext_ack *extack)
1388{
1389	struct fib_info *fi;
1390
1391	list_for_each_entry(fi, &old->fi_list, nh_list) {
1392		int err;
1393
1394		err = fib_check_nexthop(new, fi->fib_scope, extack);
1395		if (err)
1396			return err;
1397	}
1398	return 0;
1399}
1400
1401static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1402{
1403	return nhge->res.count_buckets == nhge->res.wants_buckets;
1404}
1405
1406static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1407{
1408	return nhge->res.count_buckets > nhge->res.wants_buckets;
1409}
1410
1411static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1412{
1413	return nhge->res.count_buckets < nhge->res.wants_buckets;
1414}
1415
1416static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1417{
1418	return list_empty(&res_table->uw_nh_entries);
1419}
1420
1421static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1422{
1423	struct nh_grp_entry *nhge;
1424
1425	if (bucket->occupied) {
1426		nhge = nh_res_dereference(bucket->nh_entry);
1427		nhge->res.count_buckets--;
1428		bucket->occupied = false;
1429	}
1430}
1431
1432static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1433				 struct nh_grp_entry *nhge)
1434{
1435	nh_res_bucket_unset_nh(bucket);
1436
1437	bucket->occupied = true;
1438	rcu_assign_pointer(bucket->nh_entry, nhge);
1439	nhge->res.count_buckets++;
1440}
1441
1442static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1443					 struct nh_res_bucket *bucket,
1444					 unsigned long *deadline, bool *force)
1445{
1446	unsigned long now = jiffies;
1447	struct nh_grp_entry *nhge;
1448	unsigned long idle_point;
1449
1450	if (!bucket->occupied) {
1451		/* The bucket is not occupied, its NHGE pointer is either
1452		 * NULL or obsolete. We _have to_ migrate: set force.
1453		 */
1454		*force = true;
1455		return true;
1456	}
1457
1458	nhge = nh_res_dereference(bucket->nh_entry);
1459
1460	/* If the bucket is populated by an underweight or balanced
1461	 * nexthop, do not migrate.
1462	 */
1463	if (!nh_res_nhge_is_ow(nhge))
1464		return false;
1465
1466	/* At this point we know that the bucket is populated with an
1467	 * overweight nexthop. It needs to be migrated to a new nexthop if
1468	 * the idle timer of unbalanced timer expired.
1469	 */
1470
1471	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1472	if (time_after_eq(now, idle_point)) {
1473		/* The bucket is idle. We _can_ migrate: unset force. */
1474		*force = false;
1475		return true;
1476	}
1477
1478	/* Unbalanced timer of 0 means "never force". */
1479	if (res_table->unbalanced_timer) {
1480		unsigned long unb_point;
1481
1482		unb_point = nh_res_table_unb_point(res_table);
1483		if (time_after(now, unb_point)) {
1484			/* The bucket is not idle, but the unbalanced timer
1485			 * expired. We _can_ migrate, but set force anyway,
1486			 * so that drivers know to ignore activity reports
1487			 * from the HW.
1488			 */
1489			*force = true;
1490			return true;
1491		}
1492
1493		nh_res_time_set_deadline(unb_point, deadline);
1494	}
1495
1496	nh_res_time_set_deadline(idle_point, deadline);
1497	return false;
1498}
1499
1500static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1501				  u16 bucket_index, bool notify,
1502				  bool notify_nl, bool force)
1503{
1504	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1505	struct nh_grp_entry *new_nhge;
1506	struct netlink_ext_ack extack;
1507	int err;
1508
1509	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1510					    struct nh_grp_entry,
1511					    res.uw_nh_entry);
1512	if (WARN_ON_ONCE(!new_nhge))
1513		/* If this function is called, "bucket" is either not
1514		 * occupied, or it belongs to a next hop that is
1515		 * overweight. In either case, there ought to be a
1516		 * corresponding underweight next hop.
1517		 */
1518		return false;
1519
1520	if (notify) {
1521		struct nh_grp_entry *old_nhge;
1522
1523		old_nhge = nh_res_dereference(bucket->nh_entry);
1524		err = call_nexthop_res_bucket_notifiers(res_table->net,
1525							res_table->nhg_id,
1526							bucket_index, force,
1527							old_nhge->nh,
1528							new_nhge->nh, &extack);
1529		if (err) {
1530			pr_err_ratelimited("%s\n", extack._msg);
1531			if (!force)
1532				return false;
1533			/* It is not possible to veto a forced replacement, so
1534			 * just clear the hardware flags from the nexthop
1535			 * bucket to indicate to user space that this bucket is
1536			 * not correctly populated in hardware.
1537			 */
1538			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1539		}
1540	}
1541
1542	nh_res_bucket_set_nh(bucket, new_nhge);
1543	nh_res_bucket_set_idle(res_table, bucket);
1544
1545	if (notify_nl)
1546		nexthop_bucket_notify(res_table, bucket_index);
1547
1548	if (nh_res_nhge_is_balanced(new_nhge))
1549		list_del(&new_nhge->res.uw_nh_entry);
1550	return true;
1551}
1552
1553#define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1554
1555static void nh_res_table_upkeep(struct nh_res_table *res_table,
1556				bool notify, bool notify_nl)
1557{
1558	unsigned long now = jiffies;
1559	unsigned long deadline;
1560	u16 i;
1561
1562	/* Deadline is the next time that upkeep should be run. It is the
1563	 * earliest time at which one of the buckets might be migrated.
1564	 * Start at the most pessimistic estimate: either unbalanced_timer
1565	 * from now, or if there is none, idle_timer from now. For each
1566	 * encountered time point, call nh_res_time_set_deadline() to
1567	 * refine the estimate.
1568	 */
1569	if (res_table->unbalanced_timer)
1570		deadline = now + res_table->unbalanced_timer;
1571	else
1572		deadline = now + res_table->idle_timer;
1573
1574	for (i = 0; i < res_table->num_nh_buckets; i++) {
1575		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1576		bool force;
1577
1578		if (nh_res_bucket_should_migrate(res_table, bucket,
1579						 &deadline, &force)) {
1580			if (!nh_res_bucket_migrate(res_table, i, notify,
1581						   notify_nl, force)) {
1582				unsigned long idle_point;
1583
1584				/* A driver can override the migration
1585				 * decision if the HW reports that the
1586				 * bucket is actually not idle. Therefore
1587				 * remark the bucket as busy again and
1588				 * update the deadline.
1589				 */
1590				nh_res_bucket_set_busy(bucket);
1591				idle_point = nh_res_bucket_idle_point(res_table,
1592								      bucket,
1593								      now);
1594				nh_res_time_set_deadline(idle_point, &deadline);
1595			}
1596		}
1597	}
1598
1599	/* If the group is still unbalanced, schedule the next upkeep to
1600	 * either the deadline computed above, or the minimum deadline,
1601	 * whichever comes later.
1602	 */
1603	if (!nh_res_table_is_balanced(res_table)) {
1604		unsigned long now = jiffies;
1605		unsigned long min_deadline;
1606
1607		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1608		if (time_before(deadline, min_deadline))
1609			deadline = min_deadline;
1610
1611		queue_delayed_work(system_power_efficient_wq,
1612				   &res_table->upkeep_dw, deadline - now);
1613	}
1614}
1615
1616static void nh_res_table_upkeep_dw(struct work_struct *work)
1617{
1618	struct delayed_work *dw = to_delayed_work(work);
1619	struct nh_res_table *res_table;
1620
1621	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1622	nh_res_table_upkeep(res_table, true, true);
1623}
1624
1625static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1626{
1627	cancel_delayed_work_sync(&res_table->upkeep_dw);
1628}
1629
1630static void nh_res_group_rebalance(struct nh_group *nhg,
1631				   struct nh_res_table *res_table)
1632{
1633	int prev_upper_bound = 0;
1634	int total = 0;
1635	int w = 0;
1636	int i;
1637
1638	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1639
1640	for (i = 0; i < nhg->num_nh; ++i)
1641		total += nhg->nh_entries[i].weight;
1642
1643	for (i = 0; i < nhg->num_nh; ++i) {
1644		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1645		int upper_bound;
1646
1647		w += nhge->weight;
1648		upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1649						total);
1650		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1651		prev_upper_bound = upper_bound;
1652
1653		if (nh_res_nhge_is_uw(nhge)) {
1654			if (list_empty(&res_table->uw_nh_entries))
1655				res_table->unbalanced_since = jiffies;
1656			list_add(&nhge->res.uw_nh_entry,
1657				 &res_table->uw_nh_entries);
1658		}
1659	}
1660}
1661
1662/* Migrate buckets in res_table so that they reference NHGE's from NHG with
1663 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1664 * entry in NHG as not occupied.
1665 */
1666static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1667					 struct nh_group *nhg)
1668{
1669	u16 i;
1670
1671	for (i = 0; i < res_table->num_nh_buckets; i++) {
1672		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1673		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1674		bool found = false;
1675		int j;
1676
1677		for (j = 0; j < nhg->num_nh; j++) {
1678			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1679
1680			if (nhge->nh->id == id) {
1681				nh_res_bucket_set_nh(bucket, nhge);
1682				found = true;
1683				break;
1684			}
1685		}
1686
1687		if (!found)
1688			nh_res_bucket_unset_nh(bucket);
1689	}
1690}
1691
1692static void replace_nexthop_grp_res(struct nh_group *oldg,
1693				    struct nh_group *newg)
1694{
1695	/* For NH group replacement, the new NHG might only have a stub
1696	 * hash table with 0 buckets, because the number of buckets was not
1697	 * specified. For NH removal, oldg and newg both reference the same
1698	 * res_table. So in any case, in the following, we want to work
1699	 * with oldg->res_table.
1700	 */
1701	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1702	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1703	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1704
1705	nh_res_table_cancel_upkeep(old_res_table);
1706	nh_res_table_migrate_buckets(old_res_table, newg);
1707	nh_res_group_rebalance(newg, old_res_table);
1708	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1709		old_res_table->unbalanced_since = prev_unbalanced_since;
1710	nh_res_table_upkeep(old_res_table, true, false);
1711}
1712
1713static void nh_hthr_group_rebalance(struct nh_group *nhg)
1714{
1715	int total = 0;
1716	int w = 0;
1717	int i;
1718
1719	for (i = 0; i < nhg->num_nh; ++i)
1720		total += nhg->nh_entries[i].weight;
1721
1722	for (i = 0; i < nhg->num_nh; ++i) {
1723		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1724		int upper_bound;
1725
1726		w += nhge->weight;
1727		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1728		atomic_set(&nhge->hthr.upper_bound, upper_bound);
1729	}
1730}
1731
1732static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1733				struct nl_info *nlinfo)
1734{
1735	struct nh_grp_entry *nhges, *new_nhges;
1736	struct nexthop *nhp = nhge->nh_parent;
1737	struct netlink_ext_ack extack;
1738	struct nexthop *nh = nhge->nh;
1739	struct nh_group *nhg, *newg;
1740	int i, j, err;
1741
1742	WARN_ON(!nh);
1743
1744	nhg = rtnl_dereference(nhp->nh_grp);
1745	newg = nhg->spare;
1746
1747	/* last entry, keep it visible and remove the parent */
1748	if (nhg->num_nh == 1) {
1749		remove_nexthop(net, nhp, nlinfo);
1750		return;
1751	}
1752
1753	newg->has_v4 = false;
1754	newg->is_multipath = nhg->is_multipath;
1755	newg->hash_threshold = nhg->hash_threshold;
1756	newg->resilient = nhg->resilient;
1757	newg->fdb_nh = nhg->fdb_nh;
1758	newg->num_nh = nhg->num_nh;
1759
1760	/* copy old entries to new except the one getting removed */
1761	nhges = nhg->nh_entries;
1762	new_nhges = newg->nh_entries;
1763	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1764		struct nh_info *nhi;
1765
1766		/* current nexthop getting removed */
1767		if (nhg->nh_entries[i].nh == nh) {
1768			newg->num_nh--;
1769			continue;
1770		}
1771
1772		nhi = rtnl_dereference(nhges[i].nh->nh_info);
1773		if (nhi->family == AF_INET)
1774			newg->has_v4 = true;
1775
1776		list_del(&nhges[i].nh_list);
 
1777		new_nhges[j].nh_parent = nhges[i].nh_parent;
1778		new_nhges[j].nh = nhges[i].nh;
1779		new_nhges[j].weight = nhges[i].weight;
1780		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1781		j++;
1782	}
1783
1784	if (newg->hash_threshold)
1785		nh_hthr_group_rebalance(newg);
1786	else if (newg->resilient)
1787		replace_nexthop_grp_res(nhg, newg);
1788
1789	rcu_assign_pointer(nhp->nh_grp, newg);
1790
1791	list_del(&nhge->nh_list);
 
1792	nexthop_put(nhge->nh);
1793
1794	/* Removal of a NH from a resilient group is notified through
1795	 * bucket notifications.
1796	 */
1797	if (newg->hash_threshold) {
1798		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1799					     &extack);
1800		if (err)
1801			pr_err("%s\n", extack._msg);
1802	}
1803
1804	if (nlinfo)
1805		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1806}
1807
1808static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1809				       struct nl_info *nlinfo)
1810{
1811	struct nh_grp_entry *nhge, *tmp;
1812
1813	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1814		remove_nh_grp_entry(net, nhge, nlinfo);
1815
1816	/* make sure all see the newly published array before releasing rtnl */
1817	synchronize_net();
1818}
1819
1820static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1821{
1822	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1823	struct nh_res_table *res_table;
1824	int i, num_nh = nhg->num_nh;
1825
1826	for (i = 0; i < num_nh; ++i) {
1827		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1828
1829		if (WARN_ON(!nhge->nh))
1830			continue;
1831
1832		list_del_init(&nhge->nh_list);
1833	}
1834
1835	if (nhg->resilient) {
1836		res_table = rtnl_dereference(nhg->res_table);
1837		nh_res_table_cancel_upkeep(res_table);
1838	}
1839}
1840
1841/* not called for nexthop replace */
1842static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1843{
1844	struct fib6_info *f6i, *tmp;
1845	bool do_flush = false;
1846	struct fib_info *fi;
1847
1848	list_for_each_entry(fi, &nh->fi_list, nh_list) {
1849		fi->fib_flags |= RTNH_F_DEAD;
1850		do_flush = true;
1851	}
1852	if (do_flush)
1853		fib_flush(net);
1854
1855	/* ip6_del_rt removes the entry from this list hence the _safe */
1856	list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1857		/* __ip6_del_rt does a release, so do a hold here */
1858		fib6_info_hold(f6i);
1859		ipv6_stub->ip6_del_rt(net, f6i,
1860				      !net->ipv4.sysctl_nexthop_compat_mode);
1861	}
1862}
1863
1864static void __remove_nexthop(struct net *net, struct nexthop *nh,
1865			     struct nl_info *nlinfo)
1866{
1867	__remove_nexthop_fib(net, nh);
1868
1869	if (nh->is_group) {
1870		remove_nexthop_group(nh, nlinfo);
1871	} else {
1872		struct nh_info *nhi;
1873
1874		nhi = rtnl_dereference(nh->nh_info);
1875		if (nhi->fib_nhc.nhc_dev)
1876			hlist_del(&nhi->dev_hash);
1877
1878		remove_nexthop_from_groups(net, nh, nlinfo);
1879	}
1880}
1881
1882static void remove_nexthop(struct net *net, struct nexthop *nh,
1883			   struct nl_info *nlinfo)
1884{
1885	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
1886
1887	/* remove from the tree */
1888	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
1889
1890	if (nlinfo)
1891		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
1892
1893	__remove_nexthop(net, nh, nlinfo);
1894	nh_base_seq_inc(net);
1895
1896	nexthop_put(nh);
1897}
1898
1899/* if any FIB entries reference this nexthop, any dst entries
1900 * need to be regenerated
1901 */
1902static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
 
1903{
1904	struct fib6_info *f6i;
 
 
1905
1906	if (!list_empty(&nh->fi_list))
1907		rt_cache_flush(net);
1908
1909	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
1910		ipv6_stub->fib6_update_sernum(net, f6i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1911}
1912
1913static int replace_nexthop_grp(struct net *net, struct nexthop *old,
1914			       struct nexthop *new, const struct nh_config *cfg,
1915			       struct netlink_ext_ack *extack)
1916{
1917	struct nh_res_table *tmp_table = NULL;
1918	struct nh_res_table *new_res_table;
1919	struct nh_res_table *old_res_table;
1920	struct nh_group *oldg, *newg;
1921	int i, err;
1922
1923	if (!new->is_group) {
1924		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
1925		return -EINVAL;
1926	}
1927
1928	oldg = rtnl_dereference(old->nh_grp);
1929	newg = rtnl_dereference(new->nh_grp);
1930
1931	if (newg->hash_threshold != oldg->hash_threshold) {
1932		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
1933		return -EINVAL;
1934	}
1935
1936	if (newg->hash_threshold) {
1937		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
1938					     extack);
1939		if (err)
1940			return err;
1941	} else if (newg->resilient) {
1942		new_res_table = rtnl_dereference(newg->res_table);
1943		old_res_table = rtnl_dereference(oldg->res_table);
1944
1945		/* Accept if num_nh_buckets was not given, but if it was
1946		 * given, demand that the value be correct.
1947		 */
1948		if (cfg->nh_grp_res_has_num_buckets &&
1949		    cfg->nh_grp_res_num_buckets !=
1950		    old_res_table->num_nh_buckets) {
1951			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
1952			return -EINVAL;
1953		}
1954
1955		/* Emit a pre-replace notification so that listeners could veto
1956		 * a potentially unsupported configuration. Otherwise,
1957		 * individual bucket replacement notifications would need to be
1958		 * vetoed, which is something that should only happen if the
1959		 * bucket is currently active.
1960		 */
1961		err = call_nexthop_res_table_notifiers(net, new, extack);
1962		if (err)
1963			return err;
1964
1965		if (cfg->nh_grp_res_has_idle_timer)
1966			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
1967		if (cfg->nh_grp_res_has_unbalanced_timer)
1968			old_res_table->unbalanced_timer =
1969				cfg->nh_grp_res_unbalanced_timer;
1970
1971		replace_nexthop_grp_res(oldg, newg);
1972
1973		tmp_table = new_res_table;
1974		rcu_assign_pointer(newg->res_table, old_res_table);
1975		rcu_assign_pointer(newg->spare->res_table, old_res_table);
1976	}
1977
1978	/* update parents - used by nexthop code for cleanup */
1979	for (i = 0; i < newg->num_nh; i++)
1980		newg->nh_entries[i].nh_parent = old;
1981
1982	rcu_assign_pointer(old->nh_grp, newg);
1983
 
 
 
1984	if (newg->resilient) {
1985		/* Make sure concurrent readers are not using 'oldg' anymore. */
1986		synchronize_net();
1987		rcu_assign_pointer(oldg->res_table, tmp_table);
1988		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
1989	}
1990
1991	for (i = 0; i < oldg->num_nh; i++)
1992		oldg->nh_entries[i].nh_parent = new;
1993
1994	rcu_assign_pointer(new->nh_grp, oldg);
1995
1996	return 0;
1997}
1998
1999static void nh_group_v4_update(struct nh_group *nhg)
2000{
2001	struct nh_grp_entry *nhges;
2002	bool has_v4 = false;
2003	int i;
2004
2005	nhges = nhg->nh_entries;
2006	for (i = 0; i < nhg->num_nh; i++) {
2007		struct nh_info *nhi;
2008
2009		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2010		if (nhi->family == AF_INET)
2011			has_v4 = true;
2012	}
2013	nhg->has_v4 = has_v4;
2014}
2015
2016static int replace_nexthop_single_notify_res(struct net *net,
2017					     struct nh_res_table *res_table,
2018					     struct nexthop *old,
2019					     struct nh_info *oldi,
2020					     struct nh_info *newi,
2021					     struct netlink_ext_ack *extack)
2022{
2023	u32 nhg_id = res_table->nhg_id;
2024	int err;
2025	u16 i;
2026
2027	for (i = 0; i < res_table->num_nh_buckets; i++) {
2028		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2029		struct nh_grp_entry *nhge;
2030
2031		nhge = rtnl_dereference(bucket->nh_entry);
2032		if (nhge->nh == old) {
2033			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2034								  i, true,
2035								  oldi, newi,
2036								  extack);
2037			if (err)
2038				goto err_notify;
2039		}
2040	}
2041
2042	return 0;
2043
2044err_notify:
2045	while (i-- > 0) {
2046		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2047		struct nh_grp_entry *nhge;
2048
2049		nhge = rtnl_dereference(bucket->nh_entry);
2050		if (nhge->nh == old)
2051			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2052							    true, newi, oldi,
2053							    extack);
2054	}
2055	return err;
2056}
2057
2058static int replace_nexthop_single_notify(struct net *net,
2059					 struct nexthop *group_nh,
2060					 struct nexthop *old,
2061					 struct nh_info *oldi,
2062					 struct nh_info *newi,
2063					 struct netlink_ext_ack *extack)
2064{
2065	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2066	struct nh_res_table *res_table;
2067
2068	if (nhg->hash_threshold) {
2069		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2070					      group_nh, extack);
2071	} else if (nhg->resilient) {
2072		res_table = rtnl_dereference(nhg->res_table);
2073		return replace_nexthop_single_notify_res(net, res_table,
2074							 old, oldi, newi,
2075							 extack);
2076	}
2077
2078	return -EINVAL;
2079}
2080
2081static int replace_nexthop_single(struct net *net, struct nexthop *old,
2082				  struct nexthop *new,
2083				  struct netlink_ext_ack *extack)
2084{
2085	u8 old_protocol, old_nh_flags;
2086	struct nh_info *oldi, *newi;
2087	struct nh_grp_entry *nhge;
2088	int err;
2089
2090	if (new->is_group) {
2091		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2092		return -EINVAL;
2093	}
2094
2095	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2096	if (err)
2097		return err;
2098
2099	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2100	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2101	 */
2102	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2103
2104	oldi = rtnl_dereference(old->nh_info);
2105	newi = rtnl_dereference(new->nh_info);
2106
2107	newi->nh_parent = old;
2108	oldi->nh_parent = new;
2109
2110	old_protocol = old->protocol;
2111	old_nh_flags = old->nh_flags;
2112
2113	old->protocol = new->protocol;
2114	old->nh_flags = new->nh_flags;
2115
2116	rcu_assign_pointer(old->nh_info, newi);
2117	rcu_assign_pointer(new->nh_info, oldi);
2118
2119	/* Send a replace notification for all the groups using the nexthop. */
2120	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2121		struct nexthop *nhp = nhge->nh_parent;
2122
2123		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2124						    extack);
2125		if (err)
2126			goto err_notify;
2127	}
2128
2129	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2130	 * update IPv4 indication in all the groups using the nexthop.
2131	 */
2132	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2133		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2134			struct nexthop *nhp = nhge->nh_parent;
2135			struct nh_group *nhg;
2136
2137			nhg = rtnl_dereference(nhp->nh_grp);
2138			nh_group_v4_update(nhg);
2139		}
2140	}
2141
2142	return 0;
2143
2144err_notify:
2145	rcu_assign_pointer(new->nh_info, newi);
2146	rcu_assign_pointer(old->nh_info, oldi);
2147	old->nh_flags = old_nh_flags;
2148	old->protocol = old_protocol;
2149	oldi->nh_parent = old;
2150	newi->nh_parent = new;
2151	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2152		struct nexthop *nhp = nhge->nh_parent;
2153
2154		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2155	}
2156	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2157	return err;
2158}
2159
2160static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2161				     struct nl_info *info)
2162{
2163	struct fib6_info *f6i;
2164
2165	if (!list_empty(&nh->fi_list)) {
2166		struct fib_info *fi;
2167
2168		/* expectation is a few fib_info per nexthop and then
2169		 * a lot of routes per fib_info. So mark the fib_info
2170		 * and then walk the fib tables once
2171		 */
2172		list_for_each_entry(fi, &nh->fi_list, nh_list)
2173			fi->nh_updated = true;
2174
2175		fib_info_notify_update(net, info);
2176
2177		list_for_each_entry(fi, &nh->fi_list, nh_list)
2178			fi->nh_updated = false;
2179	}
2180
2181	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2182		ipv6_stub->fib6_rt_update(net, f6i, info);
2183}
2184
2185/* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2186 * linked to this nexthop and for all groups that the nexthop
2187 * is a member of
2188 */
2189static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2190				   struct nl_info *info)
2191{
2192	struct nh_grp_entry *nhge;
2193
2194	__nexthop_replace_notify(net, nh, info);
2195
2196	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2197		__nexthop_replace_notify(net, nhge->nh_parent, info);
2198}
2199
2200static int replace_nexthop(struct net *net, struct nexthop *old,
2201			   struct nexthop *new, const struct nh_config *cfg,
2202			   struct netlink_ext_ack *extack)
2203{
2204	bool new_is_reject = false;
2205	struct nh_grp_entry *nhge;
2206	int err;
2207
2208	/* check that existing FIB entries are ok with the
2209	 * new nexthop definition
2210	 */
2211	err = fib_check_nh_list(old, new, extack);
2212	if (err)
2213		return err;
2214
2215	err = fib6_check_nh_list(old, new, extack);
2216	if (err)
2217		return err;
2218
2219	if (!new->is_group) {
2220		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2221
2222		new_is_reject = nhi->reject_nh;
2223	}
2224
2225	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2226		/* if new nexthop is a blackhole, any groups using this
2227		 * nexthop cannot have more than 1 path
2228		 */
2229		if (new_is_reject &&
2230		    nexthop_num_path(nhge->nh_parent) > 1) {
2231			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2232			return -EINVAL;
2233		}
2234
2235		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2236		if (err)
2237			return err;
2238
2239		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2240		if (err)
2241			return err;
2242	}
2243
2244	if (old->is_group)
2245		err = replace_nexthop_grp(net, old, new, cfg, extack);
2246	else
2247		err = replace_nexthop_single(net, old, new, extack);
2248
2249	if (!err) {
2250		nh_rt_cache_flush(net, old);
2251
2252		__remove_nexthop(net, new, NULL);
2253		nexthop_put(new);
2254	}
2255
2256	return err;
2257}
2258
2259/* called with rtnl_lock held */
2260static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2261			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2262{
2263	struct rb_node **pp, *parent = NULL, *next;
2264	struct rb_root *root = &net->nexthop.rb_root;
2265	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2266	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2267	u32 new_id = new_nh->id;
2268	int replace_notify = 0;
2269	int rc = -EEXIST;
2270
2271	pp = &root->rb_node;
2272	while (1) {
2273		struct nexthop *nh;
2274
2275		next = *pp;
2276		if (!next)
2277			break;
2278
2279		parent = next;
2280
2281		nh = rb_entry(parent, struct nexthop, rb_node);
2282		if (new_id < nh->id) {
2283			pp = &next->rb_left;
2284		} else if (new_id > nh->id) {
2285			pp = &next->rb_right;
2286		} else if (replace) {
2287			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2288			if (!rc) {
2289				new_nh = nh; /* send notification with old nh */
2290				replace_notify = 1;
2291			}
2292			goto out;
2293		} else {
2294			/* id already exists and not a replace */
2295			goto out;
2296		}
2297	}
2298
2299	if (replace && !create) {
2300		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2301		rc = -ENOENT;
2302		goto out;
2303	}
2304
2305	if (new_nh->is_group) {
2306		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2307		struct nh_res_table *res_table;
2308
2309		if (nhg->resilient) {
2310			res_table = rtnl_dereference(nhg->res_table);
2311
2312			/* Not passing the number of buckets is OK when
2313			 * replacing, but not when creating a new group.
2314			 */
2315			if (!cfg->nh_grp_res_has_num_buckets) {
2316				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2317				rc = -EINVAL;
2318				goto out;
2319			}
2320
2321			nh_res_group_rebalance(nhg, res_table);
2322
2323			/* Do not send bucket notifications, we do full
2324			 * notification below.
2325			 */
2326			nh_res_table_upkeep(res_table, false, false);
2327		}
2328	}
2329
2330	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2331	rb_insert_color(&new_nh->rb_node, root);
2332
2333	/* The initial insertion is a full notification for hash-threshold as
2334	 * well as resilient groups.
2335	 */
2336	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2337	if (rc)
2338		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2339
2340out:
2341	if (!rc) {
2342		nh_base_seq_inc(net);
2343		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2344		if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
 
2345			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2346	}
2347
2348	return rc;
2349}
2350
2351/* rtnl */
2352/* remove all nexthops tied to a device being deleted */
2353static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2354{
2355	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2356	struct net *net = dev_net(dev);
2357	struct hlist_head *head = &net->nexthop.devhash[hash];
2358	struct hlist_node *n;
2359	struct nh_info *nhi;
2360
2361	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2362		if (nhi->fib_nhc.nhc_dev != dev)
2363			continue;
2364
2365		if (nhi->reject_nh &&
2366		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2367			continue;
2368
2369		remove_nexthop(net, nhi->nh_parent, NULL);
2370	}
2371}
2372
2373/* rtnl; called when net namespace is deleted */
2374static void flush_all_nexthops(struct net *net)
2375{
2376	struct rb_root *root = &net->nexthop.rb_root;
2377	struct rb_node *node;
2378	struct nexthop *nh;
2379
2380	while ((node = rb_first(root))) {
2381		nh = rb_entry(node, struct nexthop, rb_node);
2382		remove_nexthop(net, nh, NULL);
2383		cond_resched();
2384	}
2385}
2386
2387static struct nexthop *nexthop_create_group(struct net *net,
2388					    struct nh_config *cfg)
2389{
2390	struct nlattr *grps_attr = cfg->nh_grp;
2391	struct nexthop_grp *entry = nla_data(grps_attr);
2392	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2393	struct nh_group *nhg;
2394	struct nexthop *nh;
2395	int err;
2396	int i;
2397
2398	if (WARN_ON(!num_nh))
2399		return ERR_PTR(-EINVAL);
2400
2401	nh = nexthop_alloc();
2402	if (!nh)
2403		return ERR_PTR(-ENOMEM);
2404
2405	nh->is_group = 1;
2406
2407	nhg = nexthop_grp_alloc(num_nh);
2408	if (!nhg) {
2409		kfree(nh);
2410		return ERR_PTR(-ENOMEM);
2411	}
2412
2413	/* spare group used for removals */
2414	nhg->spare = nexthop_grp_alloc(num_nh);
2415	if (!nhg->spare) {
2416		kfree(nhg);
2417		kfree(nh);
2418		return ERR_PTR(-ENOMEM);
2419	}
2420	nhg->spare->spare = nhg;
2421
2422	for (i = 0; i < nhg->num_nh; ++i) {
2423		struct nexthop *nhe;
2424		struct nh_info *nhi;
2425
2426		nhe = nexthop_find_by_id(net, entry[i].id);
2427		if (!nexthop_get(nhe)) {
2428			err = -ENOENT;
2429			goto out_no_nh;
2430		}
2431
2432		nhi = rtnl_dereference(nhe->nh_info);
2433		if (nhi->family == AF_INET)
2434			nhg->has_v4 = true;
2435
 
 
 
 
 
 
 
2436		nhg->nh_entries[i].nh = nhe;
2437		nhg->nh_entries[i].weight = entry[i].weight + 1;
2438		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2439		nhg->nh_entries[i].nh_parent = nh;
2440	}
2441
2442	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2443		nhg->hash_threshold = 1;
2444		nhg->is_multipath = true;
2445	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2446		struct nh_res_table *res_table;
2447
2448		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2449		if (!res_table) {
2450			err = -ENOMEM;
2451			goto out_no_nh;
2452		}
2453
2454		rcu_assign_pointer(nhg->spare->res_table, res_table);
2455		rcu_assign_pointer(nhg->res_table, res_table);
2456		nhg->resilient = true;
2457		nhg->is_multipath = true;
2458	}
2459
2460	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2461
2462	if (nhg->hash_threshold)
2463		nh_hthr_group_rebalance(nhg);
2464
2465	if (cfg->nh_fdb)
2466		nhg->fdb_nh = 1;
2467
 
 
 
2468	rcu_assign_pointer(nh->nh_grp, nhg);
2469
2470	return nh;
2471
2472out_no_nh:
2473	for (i--; i >= 0; --i) {
2474		list_del(&nhg->nh_entries[i].nh_list);
 
2475		nexthop_put(nhg->nh_entries[i].nh);
2476	}
2477
2478	kfree(nhg->spare);
2479	kfree(nhg);
2480	kfree(nh);
2481
2482	return ERR_PTR(err);
2483}
2484
2485static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2486			  struct nh_info *nhi, struct nh_config *cfg,
2487			  struct netlink_ext_ack *extack)
2488{
2489	struct fib_nh *fib_nh = &nhi->fib_nh;
2490	struct fib_config fib_cfg = {
2491		.fc_oif   = cfg->nh_ifindex,
2492		.fc_gw4   = cfg->gw.ipv4,
2493		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2494		.fc_flags = cfg->nh_flags,
2495		.fc_nlinfo = cfg->nlinfo,
2496		.fc_encap = cfg->nh_encap,
2497		.fc_encap_type = cfg->nh_encap_type,
2498	};
2499	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2500	int err;
2501
2502	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2503	if (err) {
2504		fib_nh_release(net, fib_nh);
2505		goto out;
2506	}
2507
2508	if (nhi->fdb_nh)
2509		goto out;
2510
2511	/* sets nh_dev if successful */
2512	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2513	if (!err) {
2514		nh->nh_flags = fib_nh->fib_nh_flags;
2515		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2516					  fib_nh->fib_nh_scope);
2517	} else {
2518		fib_nh_release(net, fib_nh);
2519	}
2520out:
2521	return err;
2522}
2523
2524static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2525			  struct nh_info *nhi, struct nh_config *cfg,
2526			  struct netlink_ext_ack *extack)
2527{
2528	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2529	struct fib6_config fib6_cfg = {
2530		.fc_table = l3mdev_fib_table(cfg->dev),
2531		.fc_ifindex = cfg->nh_ifindex,
2532		.fc_gateway = cfg->gw.ipv6,
2533		.fc_flags = cfg->nh_flags,
2534		.fc_nlinfo = cfg->nlinfo,
2535		.fc_encap = cfg->nh_encap,
2536		.fc_encap_type = cfg->nh_encap_type,
2537		.fc_is_fdb = cfg->nh_fdb,
2538	};
2539	int err;
2540
2541	if (!ipv6_addr_any(&cfg->gw.ipv6))
2542		fib6_cfg.fc_flags |= RTF_GATEWAY;
2543
2544	/* sets nh_dev if successful */
2545	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2546				      extack);
2547	if (err)
 
 
 
2548		ipv6_stub->fib6_nh_release(fib6_nh);
2549	else
2550		nh->nh_flags = fib6_nh->fib_nh_flags;
2551
 
2552	return err;
2553}
2554
2555static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2556				      struct netlink_ext_ack *extack)
2557{
2558	struct nh_info *nhi;
2559	struct nexthop *nh;
2560	int err = 0;
2561
2562	nh = nexthop_alloc();
2563	if (!nh)
2564		return ERR_PTR(-ENOMEM);
2565
2566	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2567	if (!nhi) {
2568		kfree(nh);
2569		return ERR_PTR(-ENOMEM);
2570	}
2571
2572	nh->nh_flags = cfg->nh_flags;
2573	nh->net = net;
2574
2575	nhi->nh_parent = nh;
2576	nhi->family = cfg->nh_family;
2577	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2578
2579	if (cfg->nh_fdb)
2580		nhi->fdb_nh = 1;
2581
2582	if (cfg->nh_blackhole) {
2583		nhi->reject_nh = 1;
2584		cfg->nh_ifindex = net->loopback_dev->ifindex;
2585	}
2586
2587	switch (cfg->nh_family) {
2588	case AF_INET:
2589		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2590		break;
2591	case AF_INET6:
2592		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2593		break;
2594	}
2595
2596	if (err) {
2597		kfree(nhi);
2598		kfree(nh);
2599		return ERR_PTR(err);
2600	}
2601
2602	/* add the entry to the device based hash */
2603	if (!nhi->fdb_nh)
2604		nexthop_devhash_add(net, nhi);
2605
2606	rcu_assign_pointer(nh->nh_info, nhi);
2607
2608	return nh;
2609}
2610
2611/* called with rtnl lock held */
2612static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2613				   struct netlink_ext_ack *extack)
2614{
2615	struct nexthop *nh;
2616	int err;
2617
2618	if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2619		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2620		return ERR_PTR(-EINVAL);
2621	}
2622
2623	if (!cfg->nh_id) {
2624		cfg->nh_id = nh_find_unused_id(net);
2625		if (!cfg->nh_id) {
2626			NL_SET_ERR_MSG(extack, "No unused id");
2627			return ERR_PTR(-EINVAL);
2628		}
2629	}
2630
2631	if (cfg->nh_grp)
2632		nh = nexthop_create_group(net, cfg);
2633	else
2634		nh = nexthop_create(net, cfg, extack);
2635
2636	if (IS_ERR(nh))
2637		return nh;
2638
2639	refcount_set(&nh->refcnt, 1);
2640	nh->id = cfg->nh_id;
2641	nh->protocol = cfg->nh_protocol;
2642	nh->net = net;
2643
2644	err = insert_nexthop(net, nh, cfg, extack);
2645	if (err) {
2646		__remove_nexthop(net, nh, NULL);
2647		nexthop_put(nh);
2648		nh = ERR_PTR(err);
2649	}
2650
2651	return nh;
2652}
2653
2654static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2655			    unsigned long *timer_p, bool *has_p,
2656			    struct netlink_ext_ack *extack)
2657{
2658	unsigned long timer;
2659	u32 value;
2660
2661	if (!attr) {
2662		*timer_p = fallback;
2663		*has_p = false;
2664		return 0;
2665	}
2666
2667	value = nla_get_u32(attr);
2668	timer = clock_t_to_jiffies(value);
2669	if (timer == ~0UL) {
2670		NL_SET_ERR_MSG(extack, "Timer value too large");
2671		return -EINVAL;
2672	}
2673
2674	*timer_p = timer;
2675	*has_p = true;
2676	return 0;
2677}
2678
2679static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2680				    struct netlink_ext_ack *extack)
2681{
2682	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2683	int err;
2684
2685	if (res) {
2686		err = nla_parse_nested(tb,
2687				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2688				       res, rtm_nh_res_policy_new, extack);
2689		if (err < 0)
2690			return err;
2691	}
2692
2693	if (tb[NHA_RES_GROUP_BUCKETS]) {
2694		cfg->nh_grp_res_num_buckets =
2695			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2696		cfg->nh_grp_res_has_num_buckets = true;
2697		if (!cfg->nh_grp_res_num_buckets) {
2698			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2699			return -EINVAL;
2700		}
2701	}
2702
2703	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2704			       NH_RES_DEFAULT_IDLE_TIMER,
2705			       &cfg->nh_grp_res_idle_timer,
2706			       &cfg->nh_grp_res_has_idle_timer,
2707			       extack);
2708	if (err)
2709		return err;
2710
2711	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2712				NH_RES_DEFAULT_UNBALANCED_TIMER,
2713				&cfg->nh_grp_res_unbalanced_timer,
2714				&cfg->nh_grp_res_has_unbalanced_timer,
2715				extack);
2716}
2717
2718static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2719			    struct nlmsghdr *nlh, struct nh_config *cfg,
2720			    struct netlink_ext_ack *extack)
2721{
2722	struct nhmsg *nhm = nlmsg_data(nlh);
2723	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2724	int err;
2725
2726	err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2727			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
2728			  rtm_nh_policy_new, extack);
2729	if (err < 0)
2730		return err;
2731
2732	err = -EINVAL;
2733	if (nhm->resvd || nhm->nh_scope) {
2734		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2735		goto out;
2736	}
2737	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2738		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2739		goto out;
2740	}
2741
2742	switch (nhm->nh_family) {
2743	case AF_INET:
2744	case AF_INET6:
2745		break;
2746	case AF_UNSPEC:
2747		if (tb[NHA_GROUP])
2748			break;
2749		fallthrough;
2750	default:
2751		NL_SET_ERR_MSG(extack, "Invalid address family");
2752		goto out;
2753	}
2754
2755	memset(cfg, 0, sizeof(*cfg));
2756	cfg->nlflags = nlh->nlmsg_flags;
2757	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2758	cfg->nlinfo.nlh = nlh;
2759	cfg->nlinfo.nl_net = net;
2760
2761	cfg->nh_family = nhm->nh_family;
2762	cfg->nh_protocol = nhm->nh_protocol;
2763	cfg->nh_flags = nhm->nh_flags;
2764
2765	if (tb[NHA_ID])
2766		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2767
2768	if (tb[NHA_FDB]) {
2769		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2770		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
2771			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2772			goto out;
2773		}
2774		if (nhm->nh_flags) {
2775			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2776			goto out;
2777		}
2778		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2779	}
2780
2781	if (tb[NHA_GROUP]) {
2782		if (nhm->nh_family != AF_UNSPEC) {
2783			NL_SET_ERR_MSG(extack, "Invalid family for group");
2784			goto out;
2785		}
2786		cfg->nh_grp = tb[NHA_GROUP];
2787
2788		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2789		if (tb[NHA_GROUP_TYPE])
2790			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2791
2792		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2793			NL_SET_ERR_MSG(extack, "Invalid group type");
2794			goto out;
2795		}
2796		err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
2797					  cfg->nh_grp_type, extack);
2798		if (err)
2799			goto out;
2800
2801		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
2802			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
2803						       cfg, extack);
2804
 
 
 
2805		/* no other attributes should be set */
2806		goto out;
2807	}
2808
2809	if (tb[NHA_BLACKHOLE]) {
2810		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2811		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2812			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2813			goto out;
2814		}
2815
2816		cfg->nh_blackhole = 1;
2817		err = 0;
2818		goto out;
2819	}
2820
2821	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2822		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2823		goto out;
2824	}
2825
2826	if (!cfg->nh_fdb && tb[NHA_OIF]) {
2827		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2828		if (cfg->nh_ifindex)
2829			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
2830
2831		if (!cfg->dev) {
2832			NL_SET_ERR_MSG(extack, "Invalid device index");
2833			goto out;
2834		} else if (!(cfg->dev->flags & IFF_UP)) {
2835			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2836			err = -ENETDOWN;
2837			goto out;
2838		} else if (!netif_carrier_ok(cfg->dev)) {
2839			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
2840			err = -ENETDOWN;
2841			goto out;
2842		}
2843	}
2844
2845	err = -EINVAL;
2846	if (tb[NHA_GATEWAY]) {
2847		struct nlattr *gwa = tb[NHA_GATEWAY];
2848
2849		switch (cfg->nh_family) {
2850		case AF_INET:
2851			if (nla_len(gwa) != sizeof(u32)) {
2852				NL_SET_ERR_MSG(extack, "Invalid gateway");
2853				goto out;
2854			}
2855			cfg->gw.ipv4 = nla_get_be32(gwa);
2856			break;
2857		case AF_INET6:
2858			if (nla_len(gwa) != sizeof(struct in6_addr)) {
2859				NL_SET_ERR_MSG(extack, "Invalid gateway");
2860				goto out;
2861			}
2862			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
2863			break;
2864		default:
2865			NL_SET_ERR_MSG(extack,
2866				       "Unknown address family for gateway");
2867			goto out;
2868		}
2869	} else {
2870		/* device only nexthop (no gateway) */
2871		if (cfg->nh_flags & RTNH_F_ONLINK) {
2872			NL_SET_ERR_MSG(extack,
2873				       "ONLINK flag can not be set for nexthop without a gateway");
2874			goto out;
2875		}
2876	}
2877
2878	if (tb[NHA_ENCAP]) {
2879		cfg->nh_encap = tb[NHA_ENCAP];
2880
2881		if (!tb[NHA_ENCAP_TYPE]) {
2882			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
2883			goto out;
2884		}
2885
2886		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
2887		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
2888		if (err < 0)
2889			goto out;
2890
2891	} else if (tb[NHA_ENCAP_TYPE]) {
2892		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
2893		goto out;
2894	}
2895
 
 
 
 
2896
2897	err = 0;
2898out:
2899	return err;
2900}
2901
2902/* rtnl */
2903static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2904			   struct netlink_ext_ack *extack)
2905{
2906	struct net *net = sock_net(skb->sk);
2907	struct nh_config cfg;
2908	struct nexthop *nh;
2909	int err;
2910
2911	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
2912	if (!err) {
2913		nh = nexthop_add(net, &cfg, extack);
2914		if (IS_ERR(nh))
2915			err = PTR_ERR(nh);
2916	}
2917
2918	return err;
2919}
2920
2921static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
2922				  struct nlattr **tb, u32 *id,
2923				  struct netlink_ext_ack *extack)
2924{
2925	struct nhmsg *nhm = nlmsg_data(nlh);
2926
2927	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2928		NL_SET_ERR_MSG(extack, "Invalid values in header");
2929		return -EINVAL;
2930	}
2931
2932	if (!tb[NHA_ID]) {
2933		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
2934		return -EINVAL;
2935	}
2936
2937	*id = nla_get_u32(tb[NHA_ID]);
2938	if (!(*id)) {
2939		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
2940		return -EINVAL;
2941	}
2942
2943	return 0;
2944}
2945
2946static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
2947				struct netlink_ext_ack *extack)
2948{
2949	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
2950	int err;
2951
2952	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2953			  ARRAY_SIZE(rtm_nh_policy_get) - 1,
2954			  rtm_nh_policy_get, extack);
2955	if (err < 0)
2956		return err;
2957
2958	return __nh_valid_get_del_req(nlh, tb, id, extack);
2959}
2960
2961/* rtnl */
2962static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2963			   struct netlink_ext_ack *extack)
2964{
 
2965	struct net *net = sock_net(skb->sk);
2966	struct nl_info nlinfo = {
2967		.nlh = nlh,
2968		.nl_net = net,
2969		.portid = NETLINK_CB(skb).portid,
2970	};
2971	struct nexthop *nh;
2972	int err;
2973	u32 id;
2974
2975	err = nh_valid_get_del_req(nlh, &id, extack);
 
 
 
 
 
 
2976	if (err)
2977		return err;
2978
2979	nh = nexthop_find_by_id(net, id);
2980	if (!nh)
2981		return -ENOENT;
2982
2983	remove_nexthop(net, nh, &nlinfo);
2984
2985	return 0;
2986}
2987
2988/* rtnl */
2989static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2990			   struct netlink_ext_ack *extack)
2991{
 
2992	struct net *net = sock_net(in_skb->sk);
2993	struct sk_buff *skb = NULL;
2994	struct nexthop *nh;
 
2995	int err;
2996	u32 id;
2997
2998	err = nh_valid_get_del_req(nlh, &id, extack);
 
 
 
 
 
 
2999	if (err)
3000		return err;
3001
3002	err = -ENOBUFS;
3003	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3004	if (!skb)
3005		goto out;
3006
3007	err = -ENOENT;
3008	nh = nexthop_find_by_id(net, id);
3009	if (!nh)
3010		goto errout_free;
3011
3012	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3013			   nlh->nlmsg_seq, 0);
3014	if (err < 0) {
3015		WARN_ON(err == -EMSGSIZE);
3016		goto errout_free;
3017	}
3018
3019	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3020out:
3021	return err;
3022errout_free:
3023	kfree_skb(skb);
3024	goto out;
3025}
3026
3027struct nh_dump_filter {
3028	u32 nh_id;
3029	int dev_idx;
3030	int master_idx;
3031	bool group_filter;
3032	bool fdb_filter;
3033	u32 res_bucket_nh_id;
 
3034};
3035
3036static bool nh_dump_filtered(struct nexthop *nh,
3037			     struct nh_dump_filter *filter, u8 family)
3038{
3039	const struct net_device *dev;
3040	const struct nh_info *nhi;
3041
3042	if (filter->group_filter && !nh->is_group)
3043		return true;
3044
3045	if (!filter->dev_idx && !filter->master_idx && !family)
3046		return false;
3047
3048	if (nh->is_group)
3049		return true;
3050
3051	nhi = rtnl_dereference(nh->nh_info);
3052	if (family && nhi->family != family)
3053		return true;
3054
3055	dev = nhi->fib_nhc.nhc_dev;
3056	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3057		return true;
3058
3059	if (filter->master_idx) {
3060		struct net_device *master;
3061
3062		if (!dev)
3063			return true;
3064
3065		master = netdev_master_upper_dev_get((struct net_device *)dev);
3066		if (!master || master->ifindex != filter->master_idx)
3067			return true;
3068	}
3069
3070	return false;
3071}
3072
3073static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3074			       struct nh_dump_filter *filter,
3075			       struct netlink_ext_ack *extack)
3076{
3077	struct nhmsg *nhm;
3078	u32 idx;
3079
3080	if (tb[NHA_OIF]) {
3081		idx = nla_get_u32(tb[NHA_OIF]);
3082		if (idx > INT_MAX) {
3083			NL_SET_ERR_MSG(extack, "Invalid device index");
3084			return -EINVAL;
3085		}
3086		filter->dev_idx = idx;
3087	}
3088	if (tb[NHA_MASTER]) {
3089		idx = nla_get_u32(tb[NHA_MASTER]);
3090		if (idx > INT_MAX) {
3091			NL_SET_ERR_MSG(extack, "Invalid master device index");
3092			return -EINVAL;
3093		}
3094		filter->master_idx = idx;
3095	}
3096	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3097	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3098
3099	nhm = nlmsg_data(nlh);
3100	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3101		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3102		return -EINVAL;
3103	}
3104
3105	return 0;
3106}
3107
3108static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3109			     struct nh_dump_filter *filter,
3110			     struct netlink_callback *cb)
3111{
3112	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3113	int err;
3114
3115	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3116			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3117			  rtm_nh_policy_dump, cb->extack);
3118	if (err < 0)
3119		return err;
3120
 
 
 
 
 
3121	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3122}
3123
3124struct rtm_dump_nh_ctx {
3125	u32 idx;
3126};
3127
3128static struct rtm_dump_nh_ctx *
3129rtm_dump_nh_ctx(struct netlink_callback *cb)
3130{
3131	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3132
3133	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3134	return ctx;
3135}
3136
3137static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3138				  struct netlink_callback *cb,
3139				  struct rb_root *root,
3140				  struct rtm_dump_nh_ctx *ctx,
3141				  int (*nh_cb)(struct sk_buff *skb,
3142					       struct netlink_callback *cb,
3143					       struct nexthop *nh, void *data),
3144				  void *data)
3145{
3146	struct rb_node *node;
3147	int s_idx;
3148	int err;
3149
3150	s_idx = ctx->idx;
3151	for (node = rb_first(root); node; node = rb_next(node)) {
3152		struct nexthop *nh;
3153
3154		nh = rb_entry(node, struct nexthop, rb_node);
3155		if (nh->id < s_idx)
3156			continue;
3157
3158		ctx->idx = nh->id;
3159		err = nh_cb(skb, cb, nh, data);
3160		if (err)
3161			return err;
3162	}
3163
3164	ctx->idx++;
3165	return 0;
3166}
3167
3168static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3169			       struct nexthop *nh, void *data)
3170{
3171	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3172	struct nh_dump_filter *filter = data;
3173
3174	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3175		return 0;
3176
3177	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3178			    NETLINK_CB(cb->skb).portid,
3179			    cb->nlh->nlmsg_seq, NLM_F_MULTI);
3180}
3181
3182/* rtnl */
3183static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3184{
3185	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3186	struct net *net = sock_net(skb->sk);
3187	struct rb_root *root = &net->nexthop.rb_root;
3188	struct nh_dump_filter filter = {};
3189	int err;
3190
3191	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3192	if (err < 0)
3193		return err;
3194
3195	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3196				     &rtm_dump_nexthop_cb, &filter);
3197	if (err < 0) {
3198		if (likely(skb->len))
3199			goto out;
3200		goto out_err;
3201	}
3202
3203out:
3204	err = skb->len;
3205out_err:
3206	cb->seq = net->nexthop.seq;
3207	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3208	return err;
3209}
3210
3211static struct nexthop *
3212nexthop_find_group_resilient(struct net *net, u32 id,
3213			     struct netlink_ext_ack *extack)
3214{
3215	struct nh_group *nhg;
3216	struct nexthop *nh;
3217
3218	nh = nexthop_find_by_id(net, id);
3219	if (!nh)
3220		return ERR_PTR(-ENOENT);
3221
3222	if (!nh->is_group) {
3223		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3224		return ERR_PTR(-EINVAL);
3225	}
3226
3227	nhg = rtnl_dereference(nh->nh_grp);
3228	if (!nhg->resilient) {
3229		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3230		return ERR_PTR(-EINVAL);
3231	}
3232
3233	return nh;
3234}
3235
3236static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3237			      struct netlink_ext_ack *extack)
3238{
3239	u32 idx;
3240
3241	if (attr) {
3242		idx = nla_get_u32(attr);
3243		if (!idx) {
3244			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3245			return -EINVAL;
3246		}
3247		*nh_id_p = idx;
3248	} else {
3249		*nh_id_p = 0;
3250	}
3251
3252	return 0;
3253}
3254
3255static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3256				    struct nh_dump_filter *filter,
3257				    struct netlink_callback *cb)
3258{
3259	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3260	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3261	int err;
3262
3263	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3264			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3265			  rtm_nh_policy_dump_bucket, NULL);
3266	if (err < 0)
3267		return err;
3268
3269	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3270	if (err)
3271		return err;
3272
3273	if (tb[NHA_RES_BUCKET]) {
3274		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3275
3276		err = nla_parse_nested(res_tb, max,
3277				       tb[NHA_RES_BUCKET],
3278				       rtm_nh_res_bucket_policy_dump,
3279				       cb->extack);
3280		if (err < 0)
3281			return err;
3282
3283		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3284					 &filter->res_bucket_nh_id,
3285					 cb->extack);
3286		if (err)
3287			return err;
3288	}
3289
3290	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3291}
3292
3293struct rtm_dump_res_bucket_ctx {
3294	struct rtm_dump_nh_ctx nh;
3295	u16 bucket_index;
3296	u32 done_nh_idx; /* 1 + the index of the last fully processed NH. */
3297};
3298
3299static struct rtm_dump_res_bucket_ctx *
3300rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3301{
3302	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3303
3304	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3305	return ctx;
3306}
3307
3308struct rtm_dump_nexthop_bucket_data {
3309	struct rtm_dump_res_bucket_ctx *ctx;
3310	struct nh_dump_filter filter;
3311};
3312
3313static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3314				      struct netlink_callback *cb,
3315				      struct nexthop *nh,
3316				      struct rtm_dump_nexthop_bucket_data *dd)
3317{
3318	u32 portid = NETLINK_CB(cb->skb).portid;
3319	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3320	struct nh_res_table *res_table;
3321	struct nh_group *nhg;
3322	u16 bucket_index;
3323	int err;
3324
3325	if (dd->ctx->nh.idx < dd->ctx->done_nh_idx)
3326		return 0;
3327
3328	nhg = rtnl_dereference(nh->nh_grp);
3329	res_table = rtnl_dereference(nhg->res_table);
3330	for (bucket_index = dd->ctx->bucket_index;
3331	     bucket_index < res_table->num_nh_buckets;
3332	     bucket_index++) {
3333		struct nh_res_bucket *bucket;
3334		struct nh_grp_entry *nhge;
3335
3336		bucket = &res_table->nh_buckets[bucket_index];
3337		nhge = rtnl_dereference(bucket->nh_entry);
3338		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3339			continue;
3340
3341		if (dd->filter.res_bucket_nh_id &&
3342		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3343			continue;
3344
 
3345		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3346					 RTM_NEWNEXTHOPBUCKET, portid,
3347					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3348					 cb->extack);
3349		if (err < 0) {
3350			if (likely(skb->len))
3351				goto out;
3352			goto out_err;
3353		}
3354	}
3355
3356	dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1;
3357	bucket_index = 0;
3358
3359out:
3360	err = skb->len;
3361out_err:
3362	dd->ctx->bucket_index = bucket_index;
3363	return err;
3364}
3365
3366static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3367				      struct netlink_callback *cb,
3368				      struct nexthop *nh, void *data)
3369{
3370	struct rtm_dump_nexthop_bucket_data *dd = data;
3371	struct nh_group *nhg;
3372
3373	if (!nh->is_group)
3374		return 0;
3375
3376	nhg = rtnl_dereference(nh->nh_grp);
3377	if (!nhg->resilient)
3378		return 0;
3379
3380	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3381}
3382
3383/* rtnl */
3384static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3385				   struct netlink_callback *cb)
3386{
3387	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3388	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3389	struct net *net = sock_net(skb->sk);
3390	struct nexthop *nh;
3391	int err;
3392
3393	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3394	if (err)
3395		return err;
3396
3397	if (dd.filter.nh_id) {
3398		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3399						  cb->extack);
3400		if (IS_ERR(nh))
3401			return PTR_ERR(nh);
3402		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3403	} else {
3404		struct rb_root *root = &net->nexthop.rb_root;
3405
3406		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3407					     &rtm_dump_nexthop_bucket_cb, &dd);
3408	}
3409
3410	if (err < 0) {
3411		if (likely(skb->len))
3412			goto out;
3413		goto out_err;
3414	}
3415
3416out:
3417	err = skb->len;
3418out_err:
3419	cb->seq = net->nexthop.seq;
3420	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3421	return err;
3422}
3423
3424static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3425					      u16 *bucket_index,
3426					      struct netlink_ext_ack *extack)
3427{
3428	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3429	int err;
3430
3431	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3432			       res, rtm_nh_res_bucket_policy_get, extack);
3433	if (err < 0)
3434		return err;
3435
3436	if (!tb[NHA_RES_BUCKET_INDEX]) {
3437		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3438		return -EINVAL;
3439	}
3440
3441	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3442	return 0;
3443}
3444
3445static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3446				   u32 *id, u16 *bucket_index,
3447				   struct netlink_ext_ack *extack)
3448{
3449	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3450	int err;
3451
3452	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3453			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3454			  rtm_nh_policy_get_bucket, extack);
3455	if (err < 0)
3456		return err;
3457
3458	err = __nh_valid_get_del_req(nlh, tb, id, extack);
3459	if (err)
3460		return err;
3461
3462	if (!tb[NHA_RES_BUCKET]) {
3463		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3464		return -EINVAL;
3465	}
3466
3467	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3468						 bucket_index, extack);
3469	if (err)
3470		return err;
3471
3472	return 0;
3473}
3474
3475/* rtnl */
3476static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3477				  struct netlink_ext_ack *extack)
3478{
3479	struct net *net = sock_net(in_skb->sk);
3480	struct nh_res_table *res_table;
3481	struct sk_buff *skb = NULL;
3482	struct nh_group *nhg;
3483	struct nexthop *nh;
3484	u16 bucket_index;
3485	int err;
3486	u32 id;
3487
3488	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3489	if (err)
3490		return err;
3491
3492	nh = nexthop_find_group_resilient(net, id, extack);
3493	if (IS_ERR(nh))
3494		return PTR_ERR(nh);
3495
3496	nhg = rtnl_dereference(nh->nh_grp);
3497	res_table = rtnl_dereference(nhg->res_table);
3498	if (bucket_index >= res_table->num_nh_buckets) {
3499		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3500		return -ENOENT;
3501	}
3502
3503	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3504	if (!skb)
3505		return -ENOBUFS;
3506
3507	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3508				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3509				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3510				 0, extack);
3511	if (err < 0) {
3512		WARN_ON(err == -EMSGSIZE);
3513		goto errout_free;
3514	}
3515
3516	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3517
3518errout_free:
3519	kfree_skb(skb);
3520	return err;
3521}
3522
3523static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3524{
3525	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3526	struct net *net = dev_net(dev);
3527	struct hlist_head *head = &net->nexthop.devhash[hash];
3528	struct hlist_node *n;
3529	struct nh_info *nhi;
3530
3531	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3532		if (nhi->fib_nhc.nhc_dev == dev) {
3533			if (nhi->family == AF_INET)
3534				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3535						   orig_mtu);
3536		}
3537	}
3538}
3539
3540/* rtnl */
3541static int nh_netdev_event(struct notifier_block *this,
3542			   unsigned long event, void *ptr)
3543{
3544	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3545	struct netdev_notifier_info_ext *info_ext;
3546
3547	switch (event) {
3548	case NETDEV_DOWN:
3549	case NETDEV_UNREGISTER:
3550		nexthop_flush_dev(dev, event);
3551		break;
3552	case NETDEV_CHANGE:
3553		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3554			nexthop_flush_dev(dev, event);
3555		break;
3556	case NETDEV_CHANGEMTU:
3557		info_ext = ptr;
3558		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3559		rt_cache_flush(dev_net(dev));
3560		break;
3561	}
3562	return NOTIFY_DONE;
3563}
3564
3565static struct notifier_block nh_netdev_notifier = {
3566	.notifier_call = nh_netdev_event,
3567};
3568
3569static int nexthops_dump(struct net *net, struct notifier_block *nb,
3570			 enum nexthop_event_type event_type,
3571			 struct netlink_ext_ack *extack)
3572{
3573	struct rb_root *root = &net->nexthop.rb_root;
3574	struct rb_node *node;
3575	int err = 0;
3576
3577	for (node = rb_first(root); node; node = rb_next(node)) {
3578		struct nexthop *nh;
3579
3580		nh = rb_entry(node, struct nexthop, rb_node);
3581		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3582		if (err)
3583			break;
3584	}
3585
3586	return err;
3587}
3588
3589int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3590			      struct netlink_ext_ack *extack)
3591{
3592	int err;
3593
3594	rtnl_lock();
3595	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3596	if (err)
3597		goto unlock;
3598	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3599					       nb);
3600unlock:
3601	rtnl_unlock();
3602	return err;
3603}
3604EXPORT_SYMBOL(register_nexthop_notifier);
3605
3606int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3607{
3608	int err;
3609
3610	rtnl_lock();
3611	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3612						 nb);
3613	if (err)
3614		goto unlock;
3615	nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3616unlock:
 
 
 
 
 
 
 
 
3617	rtnl_unlock();
3618	return err;
3619}
3620EXPORT_SYMBOL(unregister_nexthop_notifier);
3621
3622void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3623{
3624	struct nexthop *nexthop;
3625
3626	rcu_read_lock();
3627
3628	nexthop = nexthop_find_by_id(net, id);
3629	if (!nexthop)
3630		goto out;
3631
3632	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3633	if (offload)
3634		nexthop->nh_flags |= RTNH_F_OFFLOAD;
3635	if (trap)
3636		nexthop->nh_flags |= RTNH_F_TRAP;
3637
3638out:
3639	rcu_read_unlock();
3640}
3641EXPORT_SYMBOL(nexthop_set_hw_flags);
3642
3643void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3644				 bool offload, bool trap)
3645{
3646	struct nh_res_table *res_table;
3647	struct nh_res_bucket *bucket;
3648	struct nexthop *nexthop;
3649	struct nh_group *nhg;
3650
3651	rcu_read_lock();
3652
3653	nexthop = nexthop_find_by_id(net, id);
3654	if (!nexthop || !nexthop->is_group)
3655		goto out;
3656
3657	nhg = rcu_dereference(nexthop->nh_grp);
3658	if (!nhg->resilient)
3659		goto out;
3660
3661	if (bucket_index >= nhg->res_table->num_nh_buckets)
3662		goto out;
3663
3664	res_table = rcu_dereference(nhg->res_table);
3665	bucket = &res_table->nh_buckets[bucket_index];
3666	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3667	if (offload)
3668		bucket->nh_flags |= RTNH_F_OFFLOAD;
3669	if (trap)
3670		bucket->nh_flags |= RTNH_F_TRAP;
3671
3672out:
3673	rcu_read_unlock();
3674}
3675EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3676
3677void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3678				     unsigned long *activity)
3679{
3680	struct nh_res_table *res_table;
3681	struct nexthop *nexthop;
3682	struct nh_group *nhg;
3683	u16 i;
3684
3685	rcu_read_lock();
3686
3687	nexthop = nexthop_find_by_id(net, id);
3688	if (!nexthop || !nexthop->is_group)
3689		goto out;
3690
3691	nhg = rcu_dereference(nexthop->nh_grp);
3692	if (!nhg->resilient)
3693		goto out;
3694
3695	/* Instead of silently ignoring some buckets, demand that the sizes
3696	 * be the same.
3697	 */
3698	res_table = rcu_dereference(nhg->res_table);
3699	if (num_buckets != res_table->num_nh_buckets)
3700		goto out;
3701
3702	for (i = 0; i < num_buckets; i++) {
3703		if (test_bit(i, activity))
3704			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3705	}
3706
3707out:
3708	rcu_read_unlock();
3709}
3710EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3711
 
 
 
 
 
 
 
 
 
 
3712static void __net_exit nexthop_net_exit(struct net *net)
3713{
3714	rtnl_lock();
3715	flush_all_nexthops(net);
3716	rtnl_unlock();
3717	kfree(net->nexthop.devhash);
 
3718}
3719
3720static int __net_init nexthop_net_init(struct net *net)
3721{
3722	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3723
3724	net->nexthop.rb_root = RB_ROOT;
3725	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3726	if (!net->nexthop.devhash)
3727		return -ENOMEM;
3728	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3729
3730	return 0;
3731}
3732
3733static struct pernet_operations nexthop_net_ops = {
3734	.init = nexthop_net_init,
3735	.exit = nexthop_net_exit,
 
3736};
3737
3738static int __init nexthop_init(void)
3739{
3740	register_pernet_subsys(&nexthop_net_ops);
3741
3742	register_netdevice_notifier(&nh_netdev_notifier);
3743
3744	rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3745	rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3746	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3747		      rtm_dump_nexthop, 0);
3748
3749	rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3750	rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3751
3752	rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3753	rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3754
3755	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
3756		      rtm_dump_nexthop_bucket, 0);
3757
3758	return 0;
3759}
3760subsys_initcall(nexthop_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Generic nexthop implementation
   3 *
   4 * Copyright (c) 2017-19 Cumulus Networks
   5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
   6 */
   7
   8#include <linux/nexthop.h>
   9#include <linux/rtnetlink.h>
  10#include <linux/slab.h>
  11#include <linux/vmalloc.h>
  12#include <net/arp.h>
  13#include <net/ipv6_stubs.h>
  14#include <net/lwtunnel.h>
  15#include <net/ndisc.h>
  16#include <net/nexthop.h>
  17#include <net/route.h>
  18#include <net/sock.h>
  19
  20#define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
  21#define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
  22
  23static void remove_nexthop(struct net *net, struct nexthop *nh,
  24			   struct nl_info *nlinfo);
  25
  26#define NH_DEV_HASHBITS  8
  27#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
  28
  29#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS |		\
  30			       NHA_OP_FLAG_DUMP_HW_STATS)
  31
  32static const struct nla_policy rtm_nh_policy_new[] = {
  33	[NHA_ID]		= { .type = NLA_U32 },
  34	[NHA_GROUP]		= { .type = NLA_BINARY },
  35	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
  36	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
  37	[NHA_OIF]		= { .type = NLA_U32 },
  38	[NHA_GATEWAY]		= { .type = NLA_BINARY },
  39	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
  40	[NHA_ENCAP]		= { .type = NLA_NESTED },
  41	[NHA_FDB]		= { .type = NLA_FLAG },
  42	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
  43	[NHA_HW_STATS_ENABLE]	= NLA_POLICY_MAX(NLA_U32, true),
  44};
  45
  46static const struct nla_policy rtm_nh_policy_get[] = {
  47	[NHA_ID]		= { .type = NLA_U32 },
  48	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
  49						  NHA_OP_FLAGS_DUMP_ALL),
  50};
  51
  52static const struct nla_policy rtm_nh_policy_del[] = {
  53	[NHA_ID]		= { .type = NLA_U32 },
  54};
  55
  56static const struct nla_policy rtm_nh_policy_dump[] = {
  57	[NHA_OIF]		= { .type = NLA_U32 },
  58	[NHA_GROUPS]		= { .type = NLA_FLAG },
  59	[NHA_MASTER]		= { .type = NLA_U32 },
  60	[NHA_FDB]		= { .type = NLA_FLAG },
  61	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
  62						  NHA_OP_FLAGS_DUMP_ALL),
  63};
  64
  65static const struct nla_policy rtm_nh_res_policy_new[] = {
  66	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
  67	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
  68	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
  69};
  70
  71static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
  72	[NHA_ID]		= { .type = NLA_U32 },
  73	[NHA_OIF]		= { .type = NLA_U32 },
  74	[NHA_MASTER]		= { .type = NLA_U32 },
  75	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
  76};
  77
  78static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
  79	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
  80};
  81
  82static const struct nla_policy rtm_nh_policy_get_bucket[] = {
  83	[NHA_ID]		= { .type = NLA_U32 },
  84	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
  85};
  86
  87static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
  88	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
  89};
  90
  91static bool nexthop_notifiers_is_empty(struct net *net)
  92{
  93	return !net->nexthop.notifier_chain.head;
  94}
  95
  96static void
  97__nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
  98			       const struct nh_info *nhi)
  99{
 100	nh_info->dev = nhi->fib_nhc.nhc_dev;
 101	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
 102	if (nh_info->gw_family == AF_INET)
 103		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
 104	else if (nh_info->gw_family == AF_INET6)
 105		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
 106
 107	nh_info->id = nhi->nh_parent->id;
 108	nh_info->is_reject = nhi->reject_nh;
 109	nh_info->is_fdb = nhi->fdb_nh;
 110	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
 111}
 112
 113static int nh_notifier_single_info_init(struct nh_notifier_info *info,
 114					const struct nexthop *nh)
 115{
 116	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
 117
 118	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
 119	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
 120	if (!info->nh)
 121		return -ENOMEM;
 122
 123	__nh_notifier_single_info_init(info->nh, nhi);
 124
 125	return 0;
 126}
 127
 128static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
 129{
 130	kfree(info->nh);
 131}
 132
 133static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
 134				       struct nh_group *nhg)
 135{
 136	u16 num_nh = nhg->num_nh;
 137	int i;
 138
 139	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
 140	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
 141			       GFP_KERNEL);
 142	if (!info->nh_grp)
 143		return -ENOMEM;
 144
 145	info->nh_grp->num_nh = num_nh;
 146	info->nh_grp->is_fdb = nhg->fdb_nh;
 147	info->nh_grp->hw_stats = nhg->hw_stats;
 148
 149	for (i = 0; i < num_nh; i++) {
 150		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 151		struct nh_info *nhi;
 152
 153		nhi = rtnl_dereference(nhge->nh->nh_info);
 
 154		info->nh_grp->nh_entries[i].weight = nhge->weight;
 155		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
 156					       nhi);
 157	}
 158
 159	return 0;
 160}
 161
 162static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
 163					   struct nh_group *nhg)
 164{
 165	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
 166	u16 num_nh_buckets = res_table->num_nh_buckets;
 167	unsigned long size;
 168	u16 i;
 169
 170	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
 171	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
 172	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
 173				       __GFP_NOWARN);
 174	if (!info->nh_res_table)
 175		return -ENOMEM;
 176
 177	info->nh_res_table->num_nh_buckets = num_nh_buckets;
 178	info->nh_res_table->hw_stats = nhg->hw_stats;
 179
 180	for (i = 0; i < num_nh_buckets; i++) {
 181		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
 182		struct nh_grp_entry *nhge;
 183		struct nh_info *nhi;
 184
 185		nhge = rtnl_dereference(bucket->nh_entry);
 186		nhi = rtnl_dereference(nhge->nh->nh_info);
 187		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
 188					       nhi);
 189	}
 190
 191	return 0;
 192}
 193
 194static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
 195				     const struct nexthop *nh)
 196{
 197	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 198
 199	if (nhg->hash_threshold)
 200		return nh_notifier_mpath_info_init(info, nhg);
 201	else if (nhg->resilient)
 202		return nh_notifier_res_table_info_init(info, nhg);
 203	return -EINVAL;
 204}
 205
 206static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
 207				      const struct nexthop *nh)
 208{
 209	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 210
 211	if (nhg->hash_threshold)
 212		kfree(info->nh_grp);
 213	else if (nhg->resilient)
 214		vfree(info->nh_res_table);
 215}
 216
 217static int nh_notifier_info_init(struct nh_notifier_info *info,
 218				 const struct nexthop *nh)
 219{
 220	info->id = nh->id;
 221
 222	if (nh->is_group)
 223		return nh_notifier_grp_info_init(info, nh);
 224	else
 225		return nh_notifier_single_info_init(info, nh);
 226}
 227
 228static void nh_notifier_info_fini(struct nh_notifier_info *info,
 229				  const struct nexthop *nh)
 230{
 231	if (nh->is_group)
 232		nh_notifier_grp_info_fini(info, nh);
 233	else
 234		nh_notifier_single_info_fini(info);
 235}
 236
 237static int call_nexthop_notifiers(struct net *net,
 238				  enum nexthop_event_type event_type,
 239				  struct nexthop *nh,
 240				  struct netlink_ext_ack *extack)
 241{
 242	struct nh_notifier_info info = {
 243		.net = net,
 244		.extack = extack,
 245	};
 246	int err;
 247
 248	ASSERT_RTNL();
 249
 250	if (nexthop_notifiers_is_empty(net))
 251		return 0;
 252
 253	err = nh_notifier_info_init(&info, nh);
 254	if (err) {
 255		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
 256		return err;
 257	}
 258
 259	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 260					   event_type, &info);
 261	nh_notifier_info_fini(&info, nh);
 262
 263	return notifier_to_errno(err);
 264}
 265
 266static int
 267nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
 268				      bool force, unsigned int *p_idle_timer_ms)
 269{
 270	struct nh_res_table *res_table;
 271	struct nh_group *nhg;
 272	struct nexthop *nh;
 273	int err = 0;
 274
 275	/* When 'force' is false, nexthop bucket replacement is performed
 276	 * because the bucket was deemed to be idle. In this case, capable
 277	 * listeners can choose to perform an atomic replacement: The bucket is
 278	 * only replaced if it is inactive. However, if the idle timer interval
 279	 * is smaller than the interval in which a listener is querying
 280	 * buckets' activity from the device, then atomic replacement should
 281	 * not be tried. Pass the idle timer value to listeners, so that they
 282	 * could determine which type of replacement to perform.
 283	 */
 284	if (force) {
 285		*p_idle_timer_ms = 0;
 286		return 0;
 287	}
 288
 289	rcu_read_lock();
 290
 291	nh = nexthop_find_by_id(info->net, info->id);
 292	if (!nh) {
 293		err = -EINVAL;
 294		goto out;
 295	}
 296
 297	nhg = rcu_dereference(nh->nh_grp);
 298	res_table = rcu_dereference(nhg->res_table);
 299	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
 300
 301out:
 302	rcu_read_unlock();
 303
 304	return err;
 305}
 306
 307static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
 308					    u16 bucket_index, bool force,
 309					    struct nh_info *oldi,
 310					    struct nh_info *newi)
 311{
 312	unsigned int idle_timer_ms;
 313	int err;
 314
 315	err = nh_notifier_res_bucket_idle_timer_get(info, force,
 316						    &idle_timer_ms);
 317	if (err)
 318		return err;
 319
 320	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
 321	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
 322				      GFP_KERNEL);
 323	if (!info->nh_res_bucket)
 324		return -ENOMEM;
 325
 326	info->nh_res_bucket->bucket_index = bucket_index;
 327	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
 328	info->nh_res_bucket->force = force;
 329	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
 330	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
 331	return 0;
 332}
 333
 334static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
 335{
 336	kfree(info->nh_res_bucket);
 337}
 338
 339static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
 340					       u16 bucket_index, bool force,
 341					       struct nh_info *oldi,
 342					       struct nh_info *newi,
 343					       struct netlink_ext_ack *extack)
 344{
 345	struct nh_notifier_info info = {
 346		.net = net,
 347		.extack = extack,
 348		.id = nhg_id,
 349	};
 350	int err;
 351
 352	if (nexthop_notifiers_is_empty(net))
 353		return 0;
 354
 355	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
 356					       oldi, newi);
 357	if (err)
 358		return err;
 359
 360	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 361					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
 362	nh_notifier_res_bucket_info_fini(&info);
 363
 364	return notifier_to_errno(err);
 365}
 366
 367/* There are three users of RES_TABLE, and NHs etc. referenced from there:
 368 *
 369 * 1) a collection of callbacks for NH maintenance. This operates under
 370 *    RTNL,
 371 * 2) the delayed work that gradually balances the resilient table,
 372 * 3) and nexthop_select_path(), operating under RCU.
 373 *
 374 * Both the delayed work and the RTNL block are writers, and need to
 375 * maintain mutual exclusion. Since there are only two and well-known
 376 * writers for each table, the RTNL code can make sure it has exclusive
 377 * access thus:
 378 *
 379 * - Have the DW operate without locking;
 380 * - synchronously cancel the DW;
 381 * - do the writing;
 382 * - if the write was not actually a delete, call upkeep, which schedules
 383 *   DW again if necessary.
 384 *
 385 * The functions that are always called from the RTNL context use
 386 * rtnl_dereference(). The functions that can also be called from the DW do
 387 * a raw dereference and rely on the above mutual exclusion scheme.
 388 */
 389#define nh_res_dereference(p) (rcu_dereference_raw(p))
 390
 391static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
 392					     u16 bucket_index, bool force,
 393					     struct nexthop *old_nh,
 394					     struct nexthop *new_nh,
 395					     struct netlink_ext_ack *extack)
 396{
 397	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
 398	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
 399
 400	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
 401						   force, oldi, newi, extack);
 402}
 403
 404static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
 405					    struct netlink_ext_ack *extack)
 406{
 407	struct nh_notifier_info info = {
 408		.net = net,
 409		.extack = extack,
 410		.id = nh->id,
 411	};
 412	struct nh_group *nhg;
 413	int err;
 414
 415	ASSERT_RTNL();
 416
 417	if (nexthop_notifiers_is_empty(net))
 418		return 0;
 419
 420	/* At this point, the nexthop buckets are still not populated. Only
 421	 * emit a notification with the logical nexthops, so that a listener
 422	 * could potentially veto it in case of unsupported configuration.
 423	 */
 424	nhg = rtnl_dereference(nh->nh_grp);
 425	err = nh_notifier_mpath_info_init(&info, nhg);
 426	if (err) {
 427		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
 428		return err;
 429	}
 430
 431	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 432					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
 433					   &info);
 434	kfree(info.nh_grp);
 435
 436	return notifier_to_errno(err);
 437}
 438
 439static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
 440				 enum nexthop_event_type event_type,
 441				 struct nexthop *nh,
 442				 struct netlink_ext_ack *extack)
 443{
 444	struct nh_notifier_info info = {
 445		.net = net,
 446		.extack = extack,
 447	};
 448	int err;
 449
 450	err = nh_notifier_info_init(&info, nh);
 451	if (err)
 452		return err;
 453
 454	err = nb->notifier_call(nb, event_type, &info);
 455	nh_notifier_info_fini(&info, nh);
 456
 457	return notifier_to_errno(err);
 458}
 459
 460static unsigned int nh_dev_hashfn(unsigned int val)
 461{
 462	unsigned int mask = NH_DEV_HASHSIZE - 1;
 463
 464	return (val ^
 465		(val >> NH_DEV_HASHBITS) ^
 466		(val >> (NH_DEV_HASHBITS * 2))) & mask;
 467}
 468
 469static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
 470{
 471	struct net_device *dev = nhi->fib_nhc.nhc_dev;
 472	struct hlist_head *head;
 473	unsigned int hash;
 474
 475	WARN_ON(!dev);
 476
 477	hash = nh_dev_hashfn(dev->ifindex);
 478	head = &net->nexthop.devhash[hash];
 479	hlist_add_head(&nhi->dev_hash, head);
 480}
 481
 482static void nexthop_free_group(struct nexthop *nh)
 483{
 484	struct nh_group *nhg;
 485	int i;
 486
 487	nhg = rcu_dereference_raw(nh->nh_grp);
 488	for (i = 0; i < nhg->num_nh; ++i) {
 489		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 490
 491		WARN_ON(!list_empty(&nhge->nh_list));
 492		free_percpu(nhge->stats);
 493		nexthop_put(nhge->nh);
 494	}
 495
 496	WARN_ON(nhg->spare == nhg);
 497
 498	if (nhg->resilient)
 499		vfree(rcu_dereference_raw(nhg->res_table));
 500
 501	kfree(nhg->spare);
 502	kfree(nhg);
 503}
 504
 505static void nexthop_free_single(struct nexthop *nh)
 506{
 507	struct nh_info *nhi;
 508
 509	nhi = rcu_dereference_raw(nh->nh_info);
 510	switch (nhi->family) {
 511	case AF_INET:
 512		fib_nh_release(nh->net, &nhi->fib_nh);
 513		break;
 514	case AF_INET6:
 515		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
 516		break;
 517	}
 518	kfree(nhi);
 519}
 520
 521void nexthop_free_rcu(struct rcu_head *head)
 522{
 523	struct nexthop *nh = container_of(head, struct nexthop, rcu);
 524
 525	if (nh->is_group)
 526		nexthop_free_group(nh);
 527	else
 528		nexthop_free_single(nh);
 529
 530	kfree(nh);
 531}
 532EXPORT_SYMBOL_GPL(nexthop_free_rcu);
 533
 534static struct nexthop *nexthop_alloc(void)
 535{
 536	struct nexthop *nh;
 537
 538	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
 539	if (nh) {
 540		INIT_LIST_HEAD(&nh->fi_list);
 541		INIT_LIST_HEAD(&nh->f6i_list);
 542		INIT_LIST_HEAD(&nh->grp_list);
 543		INIT_LIST_HEAD(&nh->fdb_list);
 544	}
 545	return nh;
 546}
 547
 548static struct nh_group *nexthop_grp_alloc(u16 num_nh)
 549{
 550	struct nh_group *nhg;
 551
 552	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
 553	if (nhg)
 554		nhg->num_nh = num_nh;
 555
 556	return nhg;
 557}
 558
 559static void nh_res_table_upkeep_dw(struct work_struct *work);
 560
 561static struct nh_res_table *
 562nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
 563{
 564	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
 565	struct nh_res_table *res_table;
 566	unsigned long size;
 567
 568	size = struct_size(res_table, nh_buckets, num_nh_buckets);
 569	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
 570	if (!res_table)
 571		return NULL;
 572
 573	res_table->net = net;
 574	res_table->nhg_id = nhg_id;
 575	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
 576	INIT_LIST_HEAD(&res_table->uw_nh_entries);
 577	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
 578	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
 579	res_table->num_nh_buckets = num_nh_buckets;
 580	return res_table;
 581}
 582
 583static void nh_base_seq_inc(struct net *net)
 584{
 585	while (++net->nexthop.seq == 0)
 586		;
 587}
 588
 589/* no reference taken; rcu lock or rtnl must be held */
 590struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
 591{
 592	struct rb_node **pp, *parent = NULL, *next;
 593
 594	pp = &net->nexthop.rb_root.rb_node;
 595	while (1) {
 596		struct nexthop *nh;
 597
 598		next = rcu_dereference_raw(*pp);
 599		if (!next)
 600			break;
 601		parent = next;
 602
 603		nh = rb_entry(parent, struct nexthop, rb_node);
 604		if (id < nh->id)
 605			pp = &next->rb_left;
 606		else if (id > nh->id)
 607			pp = &next->rb_right;
 608		else
 609			return nh;
 610	}
 611	return NULL;
 612}
 613EXPORT_SYMBOL_GPL(nexthop_find_by_id);
 614
 615/* used for auto id allocation; called with rtnl held */
 616static u32 nh_find_unused_id(struct net *net)
 617{
 618	u32 id_start = net->nexthop.last_id_allocated;
 619
 620	while (1) {
 621		net->nexthop.last_id_allocated++;
 622		if (net->nexthop.last_id_allocated == id_start)
 623			break;
 624
 625		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
 626			return net->nexthop.last_id_allocated;
 627	}
 628	return 0;
 629}
 630
 631static void nh_res_time_set_deadline(unsigned long next_time,
 632				     unsigned long *deadline)
 633{
 634	if (time_before(next_time, *deadline))
 635		*deadline = next_time;
 636}
 637
 638static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
 639{
 640	if (list_empty(&res_table->uw_nh_entries))
 641		return 0;
 642	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
 643}
 644
 645static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
 646{
 647	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
 648	struct nlattr *nest;
 649
 650	nest = nla_nest_start(skb, NHA_RES_GROUP);
 651	if (!nest)
 652		return -EMSGSIZE;
 653
 654	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
 655			res_table->num_nh_buckets) ||
 656	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
 657			jiffies_to_clock_t(res_table->idle_timer)) ||
 658	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
 659			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
 660	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
 661			      nh_res_table_unbalanced_time(res_table),
 662			      NHA_RES_GROUP_PAD))
 663		goto nla_put_failure;
 664
 665	nla_nest_end(skb, nest);
 666	return 0;
 667
 668nla_put_failure:
 669	nla_nest_cancel(skb, nest);
 670	return -EMSGSIZE;
 671}
 672
 673static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
 674{
 675	struct nh_grp_entry_stats *cpu_stats;
 676
 677	cpu_stats = get_cpu_ptr(nhge->stats);
 678	u64_stats_update_begin(&cpu_stats->syncp);
 679	u64_stats_inc(&cpu_stats->packets);
 680	u64_stats_update_end(&cpu_stats->syncp);
 681	put_cpu_ptr(cpu_stats);
 682}
 683
 684static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
 685				    u64 *ret_packets)
 686{
 687	int i;
 688
 689	*ret_packets = 0;
 690
 691	for_each_possible_cpu(i) {
 692		struct nh_grp_entry_stats *cpu_stats;
 693		unsigned int start;
 694		u64 packets;
 695
 696		cpu_stats = per_cpu_ptr(nhge->stats, i);
 697		do {
 698			start = u64_stats_fetch_begin(&cpu_stats->syncp);
 699			packets = u64_stats_read(&cpu_stats->packets);
 700		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
 701
 702		*ret_packets += packets;
 703	}
 704}
 705
 706static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
 707					 const struct nexthop *nh)
 708{
 709	struct nh_group *nhg;
 710	int i;
 711
 712	ASSERT_RTNL();
 713	nhg = rtnl_dereference(nh->nh_grp);
 714
 715	info->id = nh->id;
 716	info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
 717	info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
 718						    stats, nhg->num_nh),
 719					GFP_KERNEL);
 720	if (!info->nh_grp_hw_stats)
 721		return -ENOMEM;
 722
 723	info->nh_grp_hw_stats->num_nh = nhg->num_nh;
 724	for (i = 0; i < nhg->num_nh; i++) {
 725		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 726
 727		info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
 728	}
 729
 730	return 0;
 731}
 732
 733static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
 734{
 735	kfree(info->nh_grp_hw_stats);
 736}
 737
 738void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
 739				  unsigned int nh_idx,
 740				  u64 delta_packets)
 741{
 742	info->hw_stats_used = true;
 743	info->stats[nh_idx].packets += delta_packets;
 744}
 745EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
 746
 747static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
 748					 struct nh_notifier_info *info)
 749{
 750	struct nh_group *nhg;
 751	int i;
 752
 753	ASSERT_RTNL();
 754	nhg = rtnl_dereference(nh->nh_grp);
 755
 756	for (i = 0; i < nhg->num_nh; i++) {
 757		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 758
 759		nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
 760	}
 761}
 762
 763static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
 764{
 765	struct nh_notifier_info info = {
 766		.net = nh->net,
 767	};
 768	struct net *net = nh->net;
 769	int err;
 770
 771	if (nexthop_notifiers_is_empty(net)) {
 772		*hw_stats_used = false;
 773		return 0;
 774	}
 775
 776	err = nh_notifier_grp_hw_stats_init(&info, nh);
 777	if (err)
 778		return err;
 779
 780	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
 781					   NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
 782					   &info);
 783
 784	/* Cache whatever we got, even if there was an error, otherwise the
 785	 * successful stats retrievals would get lost.
 786	 */
 787	nh_grp_hw_stats_apply_update(nh, &info);
 788	*hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
 789
 790	nh_notifier_grp_hw_stats_fini(&info);
 791	return notifier_to_errno(err);
 792}
 793
 794static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
 795					struct nh_grp_entry *nhge,
 796					u32 op_flags)
 797{
 798	struct nlattr *nest;
 799	u64 packets;
 800
 801	nh_grp_entry_stats_read(nhge, &packets);
 802
 803	nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
 804	if (!nest)
 805		return -EMSGSIZE;
 806
 807	if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
 808	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
 809			 packets + nhge->packets_hw))
 810		goto nla_put_failure;
 811
 812	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
 813	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
 814			 nhge->packets_hw))
 815		goto nla_put_failure;
 816
 817	nla_nest_end(skb, nest);
 818	return 0;
 819
 820nla_put_failure:
 821	nla_nest_cancel(skb, nest);
 822	return -EMSGSIZE;
 823}
 824
 825static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
 826				  u32 op_flags)
 827{
 828	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 829	struct nlattr *nest;
 830	bool hw_stats_used;
 831	int err;
 832	int i;
 833
 834	if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
 835		goto err_out;
 836
 837	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
 838	    nhg->hw_stats) {
 839		err = nh_grp_hw_stats_update(nh, &hw_stats_used);
 840		if (err)
 841			goto out;
 842
 843		if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
 844			goto err_out;
 845	}
 846
 847	nest = nla_nest_start(skb, NHA_GROUP_STATS);
 848	if (!nest)
 849		goto err_out;
 850
 851	for (i = 0; i < nhg->num_nh; i++)
 852		if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
 853						 op_flags))
 854			goto cancel_out;
 855
 856	nla_nest_end(skb, nest);
 857	return 0;
 858
 859cancel_out:
 860	nla_nest_cancel(skb, nest);
 861err_out:
 862	err = -EMSGSIZE;
 863out:
 864	return err;
 865}
 866
 867static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
 868			    u32 op_flags)
 869{
 870	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 871	struct nexthop_grp *p;
 872	size_t len = nhg->num_nh * sizeof(*p);
 873	struct nlattr *nla;
 874	u16 group_type = 0;
 875	int i;
 876
 877	if (nhg->hash_threshold)
 878		group_type = NEXTHOP_GRP_TYPE_MPATH;
 879	else if (nhg->resilient)
 880		group_type = NEXTHOP_GRP_TYPE_RES;
 881
 882	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
 883		goto nla_put_failure;
 884
 885	nla = nla_reserve(skb, NHA_GROUP, len);
 886	if (!nla)
 887		goto nla_put_failure;
 888
 889	p = nla_data(nla);
 890	for (i = 0; i < nhg->num_nh; ++i) {
 891		p->id = nhg->nh_entries[i].nh->id;
 892		p->weight = nhg->nh_entries[i].weight - 1;
 893		p += 1;
 894	}
 895
 896	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
 897		goto nla_put_failure;
 898
 899	if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
 900	    (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
 901	     nla_put_nh_group_stats(skb, nh, op_flags)))
 902		goto nla_put_failure;
 903
 904	return 0;
 905
 906nla_put_failure:
 907	return -EMSGSIZE;
 908}
 909
 910static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
 911			int event, u32 portid, u32 seq, unsigned int nlflags,
 912			u32 op_flags)
 913{
 914	struct fib6_nh *fib6_nh;
 915	struct fib_nh *fib_nh;
 916	struct nlmsghdr *nlh;
 917	struct nh_info *nhi;
 918	struct nhmsg *nhm;
 919
 920	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
 921	if (!nlh)
 922		return -EMSGSIZE;
 923
 924	nhm = nlmsg_data(nlh);
 925	nhm->nh_family = AF_UNSPEC;
 926	nhm->nh_flags = nh->nh_flags;
 927	nhm->nh_protocol = nh->protocol;
 928	nhm->nh_scope = 0;
 929	nhm->resvd = 0;
 930
 931	if (nla_put_u32(skb, NHA_ID, nh->id))
 932		goto nla_put_failure;
 933
 934	if (nh->is_group) {
 935		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
 936
 937		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
 938			goto nla_put_failure;
 939		if (nla_put_nh_group(skb, nh, op_flags))
 940			goto nla_put_failure;
 941		goto out;
 942	}
 943
 944	nhi = rtnl_dereference(nh->nh_info);
 945	nhm->nh_family = nhi->family;
 946	if (nhi->reject_nh) {
 947		if (nla_put_flag(skb, NHA_BLACKHOLE))
 948			goto nla_put_failure;
 949		goto out;
 950	} else if (nhi->fdb_nh) {
 951		if (nla_put_flag(skb, NHA_FDB))
 952			goto nla_put_failure;
 953	} else {
 954		const struct net_device *dev;
 955
 956		dev = nhi->fib_nhc.nhc_dev;
 957		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
 958			goto nla_put_failure;
 959	}
 960
 961	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
 962	switch (nhi->family) {
 963	case AF_INET:
 964		fib_nh = &nhi->fib_nh;
 965		if (fib_nh->fib_nh_gw_family &&
 966		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
 967			goto nla_put_failure;
 968		break;
 969
 970	case AF_INET6:
 971		fib6_nh = &nhi->fib6_nh;
 972		if (fib6_nh->fib_nh_gw_family &&
 973		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
 974			goto nla_put_failure;
 975		break;
 976	}
 977
 978	if (nhi->fib_nhc.nhc_lwtstate &&
 979	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
 980				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
 981		goto nla_put_failure;
 982
 983out:
 984	nlmsg_end(skb, nlh);
 985	return 0;
 986
 987nla_put_failure:
 988	nlmsg_cancel(skb, nlh);
 989	return -EMSGSIZE;
 990}
 991
 992static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
 993{
 994	return nla_total_size(0) +	/* NHA_RES_GROUP */
 995		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
 996		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
 997		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
 998		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
 999}
1000
1001static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1002{
1003	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1004	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1005	size_t tot = nla_total_size(sz) +
1006		nla_total_size(2); /* NHA_GROUP_TYPE */
1007
1008	if (nhg->resilient)
1009		tot += nh_nlmsg_size_grp_res(nhg);
1010
1011	return tot;
1012}
1013
1014static size_t nh_nlmsg_size_single(struct nexthop *nh)
1015{
1016	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1017	size_t sz;
1018
1019	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1020	 * are mutually exclusive
1021	 */
1022	sz = nla_total_size(4);  /* NHA_OIF */
1023
1024	switch (nhi->family) {
1025	case AF_INET:
1026		if (nhi->fib_nh.fib_nh_gw_family)
1027			sz += nla_total_size(4);  /* NHA_GATEWAY */
1028		break;
1029
1030	case AF_INET6:
1031		/* NHA_GATEWAY */
1032		if (nhi->fib6_nh.fib_nh_gw_family)
1033			sz += nla_total_size(sizeof(const struct in6_addr));
1034		break;
1035	}
1036
1037	if (nhi->fib_nhc.nhc_lwtstate) {
1038		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1039		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
1040	}
1041
1042	return sz;
1043}
1044
1045static size_t nh_nlmsg_size(struct nexthop *nh)
1046{
1047	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1048
1049	sz += nla_total_size(4); /* NHA_ID */
1050
1051	if (nh->is_group)
1052		sz += nh_nlmsg_size_grp(nh);
1053	else
1054		sz += nh_nlmsg_size_single(nh);
1055
1056	return sz;
1057}
1058
1059static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1060{
1061	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1062	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1063	struct sk_buff *skb;
1064	int err = -ENOBUFS;
1065
1066	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1067	if (!skb)
1068		goto errout;
1069
1070	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1071	if (err < 0) {
1072		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1073		WARN_ON(err == -EMSGSIZE);
1074		kfree_skb(skb);
1075		goto errout;
1076	}
1077
1078	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1079		    info->nlh, gfp_any());
1080	return;
1081errout:
1082	if (err < 0)
1083		rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1084}
1085
1086static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1087{
1088	return (unsigned long)atomic_long_read(&bucket->used_time);
1089}
1090
1091static unsigned long
1092nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1093			 const struct nh_res_bucket *bucket,
1094			 unsigned long now)
1095{
1096	unsigned long time = nh_res_bucket_used_time(bucket);
1097
1098	/* Bucket was not used since it was migrated. The idle time is now. */
1099	if (time == bucket->migrated_time)
1100		return now;
1101
1102	return time + res_table->idle_timer;
1103}
1104
1105static unsigned long
1106nh_res_table_unb_point(const struct nh_res_table *res_table)
1107{
1108	return res_table->unbalanced_since + res_table->unbalanced_timer;
1109}
1110
1111static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1112				   struct nh_res_bucket *bucket)
1113{
1114	unsigned long now = jiffies;
1115
1116	atomic_long_set(&bucket->used_time, (long)now);
1117	bucket->migrated_time = now;
1118}
1119
1120static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1121{
1122	atomic_long_set(&bucket->used_time, (long)jiffies);
1123}
1124
1125static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1126{
1127	unsigned long used_time = nh_res_bucket_used_time(bucket);
1128
1129	return jiffies_delta_to_clock_t(jiffies - used_time);
1130}
1131
1132static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1133			      struct nh_res_bucket *bucket, u16 bucket_index,
1134			      int event, u32 portid, u32 seq,
1135			      unsigned int nlflags,
1136			      struct netlink_ext_ack *extack)
1137{
1138	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1139	struct nlmsghdr *nlh;
1140	struct nlattr *nest;
1141	struct nhmsg *nhm;
1142
1143	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1144	if (!nlh)
1145		return -EMSGSIZE;
1146
1147	nhm = nlmsg_data(nlh);
1148	nhm->nh_family = AF_UNSPEC;
1149	nhm->nh_flags = bucket->nh_flags;
1150	nhm->nh_protocol = nh->protocol;
1151	nhm->nh_scope = 0;
1152	nhm->resvd = 0;
1153
1154	if (nla_put_u32(skb, NHA_ID, nh->id))
1155		goto nla_put_failure;
1156
1157	nest = nla_nest_start(skb, NHA_RES_BUCKET);
1158	if (!nest)
1159		goto nla_put_failure;
1160
1161	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1162	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1163	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1164			      nh_res_bucket_idle_time(bucket),
1165			      NHA_RES_BUCKET_PAD))
1166		goto nla_put_failure_nest;
1167
1168	nla_nest_end(skb, nest);
1169	nlmsg_end(skb, nlh);
1170	return 0;
1171
1172nla_put_failure_nest:
1173	nla_nest_cancel(skb, nest);
1174nla_put_failure:
1175	nlmsg_cancel(skb, nlh);
1176	return -EMSGSIZE;
1177}
1178
1179static void nexthop_bucket_notify(struct nh_res_table *res_table,
1180				  u16 bucket_index)
1181{
1182	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1183	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1184	struct nexthop *nh = nhge->nh_parent;
1185	struct sk_buff *skb;
1186	int err = -ENOBUFS;
1187
1188	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1189	if (!skb)
1190		goto errout;
1191
1192	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1193				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1194				 NULL);
1195	if (err < 0) {
1196		kfree_skb(skb);
1197		goto errout;
1198	}
1199
1200	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1201	return;
1202errout:
1203	if (err < 0)
1204		rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1205}
1206
1207static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1208			   bool *is_fdb, struct netlink_ext_ack *extack)
1209{
1210	if (nh->is_group) {
1211		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1212
1213		/* Nesting groups within groups is not supported. */
1214		if (nhg->hash_threshold) {
1215			NL_SET_ERR_MSG(extack,
1216				       "Hash-threshold group can not be a nexthop within a group");
1217			return false;
1218		}
1219		if (nhg->resilient) {
1220			NL_SET_ERR_MSG(extack,
1221				       "Resilient group can not be a nexthop within a group");
1222			return false;
1223		}
1224		*is_fdb = nhg->fdb_nh;
1225	} else {
1226		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1227
1228		if (nhi->reject_nh && npaths > 1) {
1229			NL_SET_ERR_MSG(extack,
1230				       "Blackhole nexthop can not be used in a group with more than 1 path");
1231			return false;
1232		}
1233		*is_fdb = nhi->fdb_nh;
1234	}
1235
1236	return true;
1237}
1238
1239static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1240				   struct netlink_ext_ack *extack)
1241{
1242	struct nh_info *nhi;
1243
1244	nhi = rtnl_dereference(nh->nh_info);
1245
1246	if (!nhi->fdb_nh) {
1247		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1248		return -EINVAL;
1249	}
1250
1251	if (*nh_family == AF_UNSPEC) {
1252		*nh_family = nhi->family;
1253	} else if (*nh_family != nhi->family) {
1254		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1255		return -EINVAL;
1256	}
1257
1258	return 0;
1259}
1260
1261static int nh_check_attr_group(struct net *net,
1262			       struct nlattr *tb[], size_t tb_size,
1263			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1264{
1265	unsigned int len = nla_len(tb[NHA_GROUP]);
1266	u8 nh_family = AF_UNSPEC;
1267	struct nexthop_grp *nhg;
1268	unsigned int i, j;
1269	u8 nhg_fdb = 0;
1270
1271	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1272		NL_SET_ERR_MSG(extack,
1273			       "Invalid length for nexthop group attribute");
1274		return -EINVAL;
1275	}
1276
1277	/* convert len to number of nexthop ids */
1278	len /= sizeof(*nhg);
1279
1280	nhg = nla_data(tb[NHA_GROUP]);
1281	for (i = 0; i < len; ++i) {
1282		if (nhg[i].resvd1 || nhg[i].resvd2) {
1283			NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1284			return -EINVAL;
1285		}
1286		if (nhg[i].weight > 254) {
1287			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1288			return -EINVAL;
1289		}
1290		for (j = i + 1; j < len; ++j) {
1291			if (nhg[i].id == nhg[j].id) {
1292				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1293				return -EINVAL;
1294			}
1295		}
1296	}
1297
1298	if (tb[NHA_FDB])
1299		nhg_fdb = 1;
1300	nhg = nla_data(tb[NHA_GROUP]);
1301	for (i = 0; i < len; ++i) {
1302		struct nexthop *nh;
1303		bool is_fdb_nh;
1304
1305		nh = nexthop_find_by_id(net, nhg[i].id);
1306		if (!nh) {
1307			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1308			return -EINVAL;
1309		}
1310		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1311			return -EINVAL;
1312
1313		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1314			return -EINVAL;
1315
1316		if (!nhg_fdb && is_fdb_nh) {
1317			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1318			return -EINVAL;
1319		}
1320	}
1321	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1322		if (!tb[i])
1323			continue;
1324		switch (i) {
1325		case NHA_HW_STATS_ENABLE:
1326		case NHA_FDB:
1327			continue;
1328		case NHA_RES_GROUP:
1329			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1330				continue;
1331			break;
1332		}
1333		NL_SET_ERR_MSG(extack,
1334			       "No other attributes can be set in nexthop groups");
1335		return -EINVAL;
1336	}
1337
1338	return 0;
1339}
1340
1341static bool ipv6_good_nh(const struct fib6_nh *nh)
1342{
1343	int state = NUD_REACHABLE;
1344	struct neighbour *n;
1345
1346	rcu_read_lock();
1347
1348	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1349	if (n)
1350		state = READ_ONCE(n->nud_state);
1351
1352	rcu_read_unlock();
1353
1354	return !!(state & NUD_VALID);
1355}
1356
1357static bool ipv4_good_nh(const struct fib_nh *nh)
1358{
1359	int state = NUD_REACHABLE;
1360	struct neighbour *n;
1361
1362	rcu_read_lock();
1363
1364	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1365				      (__force u32)nh->fib_nh_gw4);
1366	if (n)
1367		state = READ_ONCE(n->nud_state);
1368
1369	rcu_read_unlock();
1370
1371	return !!(state & NUD_VALID);
1372}
1373
1374static bool nexthop_is_good_nh(const struct nexthop *nh)
1375{
1376	struct nh_info *nhi = rcu_dereference(nh->nh_info);
1377
1378	switch (nhi->family) {
1379	case AF_INET:
1380		return ipv4_good_nh(&nhi->fib_nh);
1381	case AF_INET6:
1382		return ipv6_good_nh(&nhi->fib6_nh);
1383	}
1384
1385	return false;
1386}
1387
1388static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1389{
 
1390	int i;
1391
1392	for (i = 0; i < nhg->num_nh; i++) {
1393		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
 
1394
1395		if (hash > atomic_read(&nhge->hthr.upper_bound))
1396			continue;
1397
1398		nh_grp_entry_stats_inc(nhge);
1399		return nhge->nh;
1400	}
1401
1402	WARN_ON_ONCE(1);
1403	return NULL;
1404}
1405
1406static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1407{
1408	struct nh_grp_entry *nhge0 = NULL;
1409	int i;
1410
1411	if (nhg->fdb_nh)
1412		return nexthop_select_path_fdb(nhg, hash);
1413
1414	for (i = 0; i < nhg->num_nh; ++i) {
1415		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1416
1417		/* nexthops always check if it is good and does
1418		 * not rely on a sysctl for this behavior
1419		 */
1420		if (!nexthop_is_good_nh(nhge->nh))
1421			continue;
1422
1423		if (!nhge0)
1424			nhge0 = nhge;
1425
1426		if (hash > atomic_read(&nhge->hthr.upper_bound))
1427			continue;
 
 
1428
1429		nh_grp_entry_stats_inc(nhge);
1430		return nhge->nh;
1431	}
1432
1433	if (!nhge0)
1434		nhge0 = &nhg->nh_entries[0];
1435	nh_grp_entry_stats_inc(nhge0);
1436	return nhge0->nh;
1437}
1438
1439static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1440{
1441	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1442	u16 bucket_index = hash % res_table->num_nh_buckets;
1443	struct nh_res_bucket *bucket;
1444	struct nh_grp_entry *nhge;
1445
1446	/* nexthop_select_path() is expected to return a non-NULL value, so
1447	 * skip protocol validation and just hand out whatever there is.
1448	 */
1449	bucket = &res_table->nh_buckets[bucket_index];
1450	nh_res_bucket_set_busy(bucket);
1451	nhge = rcu_dereference(bucket->nh_entry);
1452	nh_grp_entry_stats_inc(nhge);
1453	return nhge->nh;
1454}
1455
1456struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1457{
1458	struct nh_group *nhg;
1459
1460	if (!nh->is_group)
1461		return nh;
1462
1463	nhg = rcu_dereference(nh->nh_grp);
1464	if (nhg->hash_threshold)
1465		return nexthop_select_path_hthr(nhg, hash);
1466	else if (nhg->resilient)
1467		return nexthop_select_path_res(nhg, hash);
1468
1469	/* Unreachable. */
1470	return NULL;
1471}
1472EXPORT_SYMBOL_GPL(nexthop_select_path);
1473
1474int nexthop_for_each_fib6_nh(struct nexthop *nh,
1475			     int (*cb)(struct fib6_nh *nh, void *arg),
1476			     void *arg)
1477{
1478	struct nh_info *nhi;
1479	int err;
1480
1481	if (nh->is_group) {
1482		struct nh_group *nhg;
1483		int i;
1484
1485		nhg = rcu_dereference_rtnl(nh->nh_grp);
1486		for (i = 0; i < nhg->num_nh; i++) {
1487			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1488
1489			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1490			err = cb(&nhi->fib6_nh, arg);
1491			if (err)
1492				return err;
1493		}
1494	} else {
1495		nhi = rcu_dereference_rtnl(nh->nh_info);
1496		err = cb(&nhi->fib6_nh, arg);
1497		if (err)
1498			return err;
1499	}
1500
1501	return 0;
1502}
1503EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1504
1505static int check_src_addr(const struct in6_addr *saddr,
1506			  struct netlink_ext_ack *extack)
1507{
1508	if (!ipv6_addr_any(saddr)) {
1509		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1510		return -EINVAL;
1511	}
1512	return 0;
1513}
1514
1515int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1516		       struct netlink_ext_ack *extack)
1517{
1518	struct nh_info *nhi;
1519	bool is_fdb_nh;
1520
1521	/* fib6_src is unique to a fib6_info and limits the ability to cache
1522	 * routes in fib6_nh within a nexthop that is potentially shared
1523	 * across multiple fib entries. If the config wants to use source
1524	 * routing it can not use nexthop objects. mlxsw also does not allow
1525	 * fib6_src on routes.
1526	 */
1527	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1528		return -EINVAL;
1529
1530	if (nh->is_group) {
1531		struct nh_group *nhg;
1532
1533		nhg = rtnl_dereference(nh->nh_grp);
1534		if (nhg->has_v4)
1535			goto no_v4_nh;
1536		is_fdb_nh = nhg->fdb_nh;
1537	} else {
1538		nhi = rtnl_dereference(nh->nh_info);
1539		if (nhi->family == AF_INET)
1540			goto no_v4_nh;
1541		is_fdb_nh = nhi->fdb_nh;
1542	}
1543
1544	if (is_fdb_nh) {
1545		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1546		return -EINVAL;
1547	}
1548
1549	return 0;
1550no_v4_nh:
1551	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1552	return -EINVAL;
1553}
1554EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1555
1556/* if existing nexthop has ipv6 routes linked to it, need
1557 * to verify this new spec works with ipv6
1558 */
1559static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1560			      struct netlink_ext_ack *extack)
1561{
1562	struct fib6_info *f6i;
1563
1564	if (list_empty(&old->f6i_list))
1565		return 0;
1566
1567	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1568		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1569			return -EINVAL;
1570	}
1571
1572	return fib6_check_nexthop(new, NULL, extack);
1573}
1574
1575static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1576			       struct netlink_ext_ack *extack)
1577{
1578	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1579		NL_SET_ERR_MSG(extack,
1580			       "Route with host scope can not have a gateway");
1581		return -EINVAL;
1582	}
1583
1584	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1585		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1586		return -EINVAL;
1587	}
1588
1589	return 0;
1590}
1591
1592/* Invoked by fib add code to verify nexthop by id is ok with
1593 * config for prefix; parts of fib_check_nh not done when nexthop
1594 * object is used.
1595 */
1596int fib_check_nexthop(struct nexthop *nh, u8 scope,
1597		      struct netlink_ext_ack *extack)
1598{
1599	struct nh_info *nhi;
1600	int err = 0;
1601
1602	if (nh->is_group) {
1603		struct nh_group *nhg;
1604
1605		nhg = rtnl_dereference(nh->nh_grp);
1606		if (nhg->fdb_nh) {
1607			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1608			err = -EINVAL;
1609			goto out;
1610		}
1611
1612		if (scope == RT_SCOPE_HOST) {
1613			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1614			err = -EINVAL;
1615			goto out;
1616		}
1617
1618		/* all nexthops in a group have the same scope */
1619		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1620		err = nexthop_check_scope(nhi, scope, extack);
1621	} else {
1622		nhi = rtnl_dereference(nh->nh_info);
1623		if (nhi->fdb_nh) {
1624			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1625			err = -EINVAL;
1626			goto out;
1627		}
1628		err = nexthop_check_scope(nhi, scope, extack);
1629	}
1630
1631out:
1632	return err;
1633}
1634
1635static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1636			     struct netlink_ext_ack *extack)
1637{
1638	struct fib_info *fi;
1639
1640	list_for_each_entry(fi, &old->fi_list, nh_list) {
1641		int err;
1642
1643		err = fib_check_nexthop(new, fi->fib_scope, extack);
1644		if (err)
1645			return err;
1646	}
1647	return 0;
1648}
1649
1650static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1651{
1652	return nhge->res.count_buckets == nhge->res.wants_buckets;
1653}
1654
1655static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1656{
1657	return nhge->res.count_buckets > nhge->res.wants_buckets;
1658}
1659
1660static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1661{
1662	return nhge->res.count_buckets < nhge->res.wants_buckets;
1663}
1664
1665static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1666{
1667	return list_empty(&res_table->uw_nh_entries);
1668}
1669
1670static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1671{
1672	struct nh_grp_entry *nhge;
1673
1674	if (bucket->occupied) {
1675		nhge = nh_res_dereference(bucket->nh_entry);
1676		nhge->res.count_buckets--;
1677		bucket->occupied = false;
1678	}
1679}
1680
1681static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1682				 struct nh_grp_entry *nhge)
1683{
1684	nh_res_bucket_unset_nh(bucket);
1685
1686	bucket->occupied = true;
1687	rcu_assign_pointer(bucket->nh_entry, nhge);
1688	nhge->res.count_buckets++;
1689}
1690
1691static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1692					 struct nh_res_bucket *bucket,
1693					 unsigned long *deadline, bool *force)
1694{
1695	unsigned long now = jiffies;
1696	struct nh_grp_entry *nhge;
1697	unsigned long idle_point;
1698
1699	if (!bucket->occupied) {
1700		/* The bucket is not occupied, its NHGE pointer is either
1701		 * NULL or obsolete. We _have to_ migrate: set force.
1702		 */
1703		*force = true;
1704		return true;
1705	}
1706
1707	nhge = nh_res_dereference(bucket->nh_entry);
1708
1709	/* If the bucket is populated by an underweight or balanced
1710	 * nexthop, do not migrate.
1711	 */
1712	if (!nh_res_nhge_is_ow(nhge))
1713		return false;
1714
1715	/* At this point we know that the bucket is populated with an
1716	 * overweight nexthop. It needs to be migrated to a new nexthop if
1717	 * the idle timer of unbalanced timer expired.
1718	 */
1719
1720	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1721	if (time_after_eq(now, idle_point)) {
1722		/* The bucket is idle. We _can_ migrate: unset force. */
1723		*force = false;
1724		return true;
1725	}
1726
1727	/* Unbalanced timer of 0 means "never force". */
1728	if (res_table->unbalanced_timer) {
1729		unsigned long unb_point;
1730
1731		unb_point = nh_res_table_unb_point(res_table);
1732		if (time_after(now, unb_point)) {
1733			/* The bucket is not idle, but the unbalanced timer
1734			 * expired. We _can_ migrate, but set force anyway,
1735			 * so that drivers know to ignore activity reports
1736			 * from the HW.
1737			 */
1738			*force = true;
1739			return true;
1740		}
1741
1742		nh_res_time_set_deadline(unb_point, deadline);
1743	}
1744
1745	nh_res_time_set_deadline(idle_point, deadline);
1746	return false;
1747}
1748
1749static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1750				  u16 bucket_index, bool notify,
1751				  bool notify_nl, bool force)
1752{
1753	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1754	struct nh_grp_entry *new_nhge;
1755	struct netlink_ext_ack extack;
1756	int err;
1757
1758	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1759					    struct nh_grp_entry,
1760					    res.uw_nh_entry);
1761	if (WARN_ON_ONCE(!new_nhge))
1762		/* If this function is called, "bucket" is either not
1763		 * occupied, or it belongs to a next hop that is
1764		 * overweight. In either case, there ought to be a
1765		 * corresponding underweight next hop.
1766		 */
1767		return false;
1768
1769	if (notify) {
1770		struct nh_grp_entry *old_nhge;
1771
1772		old_nhge = nh_res_dereference(bucket->nh_entry);
1773		err = call_nexthop_res_bucket_notifiers(res_table->net,
1774							res_table->nhg_id,
1775							bucket_index, force,
1776							old_nhge->nh,
1777							new_nhge->nh, &extack);
1778		if (err) {
1779			pr_err_ratelimited("%s\n", extack._msg);
1780			if (!force)
1781				return false;
1782			/* It is not possible to veto a forced replacement, so
1783			 * just clear the hardware flags from the nexthop
1784			 * bucket to indicate to user space that this bucket is
1785			 * not correctly populated in hardware.
1786			 */
1787			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1788		}
1789	}
1790
1791	nh_res_bucket_set_nh(bucket, new_nhge);
1792	nh_res_bucket_set_idle(res_table, bucket);
1793
1794	if (notify_nl)
1795		nexthop_bucket_notify(res_table, bucket_index);
1796
1797	if (nh_res_nhge_is_balanced(new_nhge))
1798		list_del(&new_nhge->res.uw_nh_entry);
1799	return true;
1800}
1801
1802#define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1803
1804static void nh_res_table_upkeep(struct nh_res_table *res_table,
1805				bool notify, bool notify_nl)
1806{
1807	unsigned long now = jiffies;
1808	unsigned long deadline;
1809	u16 i;
1810
1811	/* Deadline is the next time that upkeep should be run. It is the
1812	 * earliest time at which one of the buckets might be migrated.
1813	 * Start at the most pessimistic estimate: either unbalanced_timer
1814	 * from now, or if there is none, idle_timer from now. For each
1815	 * encountered time point, call nh_res_time_set_deadline() to
1816	 * refine the estimate.
1817	 */
1818	if (res_table->unbalanced_timer)
1819		deadline = now + res_table->unbalanced_timer;
1820	else
1821		deadline = now + res_table->idle_timer;
1822
1823	for (i = 0; i < res_table->num_nh_buckets; i++) {
1824		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1825		bool force;
1826
1827		if (nh_res_bucket_should_migrate(res_table, bucket,
1828						 &deadline, &force)) {
1829			if (!nh_res_bucket_migrate(res_table, i, notify,
1830						   notify_nl, force)) {
1831				unsigned long idle_point;
1832
1833				/* A driver can override the migration
1834				 * decision if the HW reports that the
1835				 * bucket is actually not idle. Therefore
1836				 * remark the bucket as busy again and
1837				 * update the deadline.
1838				 */
1839				nh_res_bucket_set_busy(bucket);
1840				idle_point = nh_res_bucket_idle_point(res_table,
1841								      bucket,
1842								      now);
1843				nh_res_time_set_deadline(idle_point, &deadline);
1844			}
1845		}
1846	}
1847
1848	/* If the group is still unbalanced, schedule the next upkeep to
1849	 * either the deadline computed above, or the minimum deadline,
1850	 * whichever comes later.
1851	 */
1852	if (!nh_res_table_is_balanced(res_table)) {
1853		unsigned long now = jiffies;
1854		unsigned long min_deadline;
1855
1856		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1857		if (time_before(deadline, min_deadline))
1858			deadline = min_deadline;
1859
1860		queue_delayed_work(system_power_efficient_wq,
1861				   &res_table->upkeep_dw, deadline - now);
1862	}
1863}
1864
1865static void nh_res_table_upkeep_dw(struct work_struct *work)
1866{
1867	struct delayed_work *dw = to_delayed_work(work);
1868	struct nh_res_table *res_table;
1869
1870	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1871	nh_res_table_upkeep(res_table, true, true);
1872}
1873
1874static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1875{
1876	cancel_delayed_work_sync(&res_table->upkeep_dw);
1877}
1878
1879static void nh_res_group_rebalance(struct nh_group *nhg,
1880				   struct nh_res_table *res_table)
1881{
1882	int prev_upper_bound = 0;
1883	int total = 0;
1884	int w = 0;
1885	int i;
1886
1887	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1888
1889	for (i = 0; i < nhg->num_nh; ++i)
1890		total += nhg->nh_entries[i].weight;
1891
1892	for (i = 0; i < nhg->num_nh; ++i) {
1893		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1894		int upper_bound;
1895
1896		w += nhge->weight;
1897		upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1898						total);
1899		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1900		prev_upper_bound = upper_bound;
1901
1902		if (nh_res_nhge_is_uw(nhge)) {
1903			if (list_empty(&res_table->uw_nh_entries))
1904				res_table->unbalanced_since = jiffies;
1905			list_add(&nhge->res.uw_nh_entry,
1906				 &res_table->uw_nh_entries);
1907		}
1908	}
1909}
1910
1911/* Migrate buckets in res_table so that they reference NHGE's from NHG with
1912 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1913 * entry in NHG as not occupied.
1914 */
1915static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1916					 struct nh_group *nhg)
1917{
1918	u16 i;
1919
1920	for (i = 0; i < res_table->num_nh_buckets; i++) {
1921		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1922		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1923		bool found = false;
1924		int j;
1925
1926		for (j = 0; j < nhg->num_nh; j++) {
1927			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1928
1929			if (nhge->nh->id == id) {
1930				nh_res_bucket_set_nh(bucket, nhge);
1931				found = true;
1932				break;
1933			}
1934		}
1935
1936		if (!found)
1937			nh_res_bucket_unset_nh(bucket);
1938	}
1939}
1940
1941static void replace_nexthop_grp_res(struct nh_group *oldg,
1942				    struct nh_group *newg)
1943{
1944	/* For NH group replacement, the new NHG might only have a stub
1945	 * hash table with 0 buckets, because the number of buckets was not
1946	 * specified. For NH removal, oldg and newg both reference the same
1947	 * res_table. So in any case, in the following, we want to work
1948	 * with oldg->res_table.
1949	 */
1950	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1951	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1952	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1953
1954	nh_res_table_cancel_upkeep(old_res_table);
1955	nh_res_table_migrate_buckets(old_res_table, newg);
1956	nh_res_group_rebalance(newg, old_res_table);
1957	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1958		old_res_table->unbalanced_since = prev_unbalanced_since;
1959	nh_res_table_upkeep(old_res_table, true, false);
1960}
1961
1962static void nh_hthr_group_rebalance(struct nh_group *nhg)
1963{
1964	int total = 0;
1965	int w = 0;
1966	int i;
1967
1968	for (i = 0; i < nhg->num_nh; ++i)
1969		total += nhg->nh_entries[i].weight;
1970
1971	for (i = 0; i < nhg->num_nh; ++i) {
1972		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1973		int upper_bound;
1974
1975		w += nhge->weight;
1976		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1977		atomic_set(&nhge->hthr.upper_bound, upper_bound);
1978	}
1979}
1980
1981static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1982				struct nl_info *nlinfo)
1983{
1984	struct nh_grp_entry *nhges, *new_nhges;
1985	struct nexthop *nhp = nhge->nh_parent;
1986	struct netlink_ext_ack extack;
1987	struct nexthop *nh = nhge->nh;
1988	struct nh_group *nhg, *newg;
1989	int i, j, err;
1990
1991	WARN_ON(!nh);
1992
1993	nhg = rtnl_dereference(nhp->nh_grp);
1994	newg = nhg->spare;
1995
1996	/* last entry, keep it visible and remove the parent */
1997	if (nhg->num_nh == 1) {
1998		remove_nexthop(net, nhp, nlinfo);
1999		return;
2000	}
2001
2002	newg->has_v4 = false;
2003	newg->is_multipath = nhg->is_multipath;
2004	newg->hash_threshold = nhg->hash_threshold;
2005	newg->resilient = nhg->resilient;
2006	newg->fdb_nh = nhg->fdb_nh;
2007	newg->num_nh = nhg->num_nh;
2008
2009	/* copy old entries to new except the one getting removed */
2010	nhges = nhg->nh_entries;
2011	new_nhges = newg->nh_entries;
2012	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2013		struct nh_info *nhi;
2014
2015		/* current nexthop getting removed */
2016		if (nhg->nh_entries[i].nh == nh) {
2017			newg->num_nh--;
2018			continue;
2019		}
2020
2021		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2022		if (nhi->family == AF_INET)
2023			newg->has_v4 = true;
2024
2025		list_del(&nhges[i].nh_list);
2026		new_nhges[j].stats = nhges[i].stats;
2027		new_nhges[j].nh_parent = nhges[i].nh_parent;
2028		new_nhges[j].nh = nhges[i].nh;
2029		new_nhges[j].weight = nhges[i].weight;
2030		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2031		j++;
2032	}
2033
2034	if (newg->hash_threshold)
2035		nh_hthr_group_rebalance(newg);
2036	else if (newg->resilient)
2037		replace_nexthop_grp_res(nhg, newg);
2038
2039	rcu_assign_pointer(nhp->nh_grp, newg);
2040
2041	list_del(&nhge->nh_list);
2042	free_percpu(nhge->stats);
2043	nexthop_put(nhge->nh);
2044
2045	/* Removal of a NH from a resilient group is notified through
2046	 * bucket notifications.
2047	 */
2048	if (newg->hash_threshold) {
2049		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2050					     &extack);
2051		if (err)
2052			pr_err("%s\n", extack._msg);
2053	}
2054
2055	if (nlinfo)
2056		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2057}
2058
2059static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2060				       struct nl_info *nlinfo)
2061{
2062	struct nh_grp_entry *nhge, *tmp;
2063
2064	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2065		remove_nh_grp_entry(net, nhge, nlinfo);
2066
2067	/* make sure all see the newly published array before releasing rtnl */
2068	synchronize_net();
2069}
2070
2071static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2072{
2073	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2074	struct nh_res_table *res_table;
2075	int i, num_nh = nhg->num_nh;
2076
2077	for (i = 0; i < num_nh; ++i) {
2078		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2079
2080		if (WARN_ON(!nhge->nh))
2081			continue;
2082
2083		list_del_init(&nhge->nh_list);
2084	}
2085
2086	if (nhg->resilient) {
2087		res_table = rtnl_dereference(nhg->res_table);
2088		nh_res_table_cancel_upkeep(res_table);
2089	}
2090}
2091
2092/* not called for nexthop replace */
2093static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2094{
2095	struct fib6_info *f6i, *tmp;
2096	bool do_flush = false;
2097	struct fib_info *fi;
2098
2099	list_for_each_entry(fi, &nh->fi_list, nh_list) {
2100		fi->fib_flags |= RTNH_F_DEAD;
2101		do_flush = true;
2102	}
2103	if (do_flush)
2104		fib_flush(net);
2105
2106	/* ip6_del_rt removes the entry from this list hence the _safe */
2107	list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
2108		/* __ip6_del_rt does a release, so do a hold here */
2109		fib6_info_hold(f6i);
2110		ipv6_stub->ip6_del_rt(net, f6i,
2111				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2112	}
2113}
2114
2115static void __remove_nexthop(struct net *net, struct nexthop *nh,
2116			     struct nl_info *nlinfo)
2117{
2118	__remove_nexthop_fib(net, nh);
2119
2120	if (nh->is_group) {
2121		remove_nexthop_group(nh, nlinfo);
2122	} else {
2123		struct nh_info *nhi;
2124
2125		nhi = rtnl_dereference(nh->nh_info);
2126		if (nhi->fib_nhc.nhc_dev)
2127			hlist_del(&nhi->dev_hash);
2128
2129		remove_nexthop_from_groups(net, nh, nlinfo);
2130	}
2131}
2132
2133static void remove_nexthop(struct net *net, struct nexthop *nh,
2134			   struct nl_info *nlinfo)
2135{
2136	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2137
2138	/* remove from the tree */
2139	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2140
2141	if (nlinfo)
2142		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2143
2144	__remove_nexthop(net, nh, nlinfo);
2145	nh_base_seq_inc(net);
2146
2147	nexthop_put(nh);
2148}
2149
2150/* if any FIB entries reference this nexthop, any dst entries
2151 * need to be regenerated
2152 */
2153static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2154			      struct nexthop *replaced_nh)
2155{
2156	struct fib6_info *f6i;
2157	struct nh_group *nhg;
2158	int i;
2159
2160	if (!list_empty(&nh->fi_list))
2161		rt_cache_flush(net);
2162
2163	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2164		ipv6_stub->fib6_update_sernum(net, f6i);
2165
2166	/* if an IPv6 group was replaced, we have to release all old
2167	 * dsts to make sure all refcounts are released
2168	 */
2169	if (!replaced_nh->is_group)
2170		return;
2171
2172	nhg = rtnl_dereference(replaced_nh->nh_grp);
2173	for (i = 0; i < nhg->num_nh; i++) {
2174		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2175		struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2176
2177		if (nhi->family == AF_INET6)
2178			ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2179	}
2180}
2181
2182static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2183			       struct nexthop *new, const struct nh_config *cfg,
2184			       struct netlink_ext_ack *extack)
2185{
2186	struct nh_res_table *tmp_table = NULL;
2187	struct nh_res_table *new_res_table;
2188	struct nh_res_table *old_res_table;
2189	struct nh_group *oldg, *newg;
2190	int i, err;
2191
2192	if (!new->is_group) {
2193		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2194		return -EINVAL;
2195	}
2196
2197	oldg = rtnl_dereference(old->nh_grp);
2198	newg = rtnl_dereference(new->nh_grp);
2199
2200	if (newg->hash_threshold != oldg->hash_threshold) {
2201		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2202		return -EINVAL;
2203	}
2204
2205	if (newg->hash_threshold) {
2206		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2207					     extack);
2208		if (err)
2209			return err;
2210	} else if (newg->resilient) {
2211		new_res_table = rtnl_dereference(newg->res_table);
2212		old_res_table = rtnl_dereference(oldg->res_table);
2213
2214		/* Accept if num_nh_buckets was not given, but if it was
2215		 * given, demand that the value be correct.
2216		 */
2217		if (cfg->nh_grp_res_has_num_buckets &&
2218		    cfg->nh_grp_res_num_buckets !=
2219		    old_res_table->num_nh_buckets) {
2220			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2221			return -EINVAL;
2222		}
2223
2224		/* Emit a pre-replace notification so that listeners could veto
2225		 * a potentially unsupported configuration. Otherwise,
2226		 * individual bucket replacement notifications would need to be
2227		 * vetoed, which is something that should only happen if the
2228		 * bucket is currently active.
2229		 */
2230		err = call_nexthop_res_table_notifiers(net, new, extack);
2231		if (err)
2232			return err;
2233
2234		if (cfg->nh_grp_res_has_idle_timer)
2235			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2236		if (cfg->nh_grp_res_has_unbalanced_timer)
2237			old_res_table->unbalanced_timer =
2238				cfg->nh_grp_res_unbalanced_timer;
2239
2240		replace_nexthop_grp_res(oldg, newg);
2241
2242		tmp_table = new_res_table;
2243		rcu_assign_pointer(newg->res_table, old_res_table);
2244		rcu_assign_pointer(newg->spare->res_table, old_res_table);
2245	}
2246
2247	/* update parents - used by nexthop code for cleanup */
2248	for (i = 0; i < newg->num_nh; i++)
2249		newg->nh_entries[i].nh_parent = old;
2250
2251	rcu_assign_pointer(old->nh_grp, newg);
2252
2253	/* Make sure concurrent readers are not using 'oldg' anymore. */
2254	synchronize_net();
2255
2256	if (newg->resilient) {
 
 
2257		rcu_assign_pointer(oldg->res_table, tmp_table);
2258		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2259	}
2260
2261	for (i = 0; i < oldg->num_nh; i++)
2262		oldg->nh_entries[i].nh_parent = new;
2263
2264	rcu_assign_pointer(new->nh_grp, oldg);
2265
2266	return 0;
2267}
2268
2269static void nh_group_v4_update(struct nh_group *nhg)
2270{
2271	struct nh_grp_entry *nhges;
2272	bool has_v4 = false;
2273	int i;
2274
2275	nhges = nhg->nh_entries;
2276	for (i = 0; i < nhg->num_nh; i++) {
2277		struct nh_info *nhi;
2278
2279		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2280		if (nhi->family == AF_INET)
2281			has_v4 = true;
2282	}
2283	nhg->has_v4 = has_v4;
2284}
2285
2286static int replace_nexthop_single_notify_res(struct net *net,
2287					     struct nh_res_table *res_table,
2288					     struct nexthop *old,
2289					     struct nh_info *oldi,
2290					     struct nh_info *newi,
2291					     struct netlink_ext_ack *extack)
2292{
2293	u32 nhg_id = res_table->nhg_id;
2294	int err;
2295	u16 i;
2296
2297	for (i = 0; i < res_table->num_nh_buckets; i++) {
2298		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2299		struct nh_grp_entry *nhge;
2300
2301		nhge = rtnl_dereference(bucket->nh_entry);
2302		if (nhge->nh == old) {
2303			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2304								  i, true,
2305								  oldi, newi,
2306								  extack);
2307			if (err)
2308				goto err_notify;
2309		}
2310	}
2311
2312	return 0;
2313
2314err_notify:
2315	while (i-- > 0) {
2316		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2317		struct nh_grp_entry *nhge;
2318
2319		nhge = rtnl_dereference(bucket->nh_entry);
2320		if (nhge->nh == old)
2321			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2322							    true, newi, oldi,
2323							    extack);
2324	}
2325	return err;
2326}
2327
2328static int replace_nexthop_single_notify(struct net *net,
2329					 struct nexthop *group_nh,
2330					 struct nexthop *old,
2331					 struct nh_info *oldi,
2332					 struct nh_info *newi,
2333					 struct netlink_ext_ack *extack)
2334{
2335	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2336	struct nh_res_table *res_table;
2337
2338	if (nhg->hash_threshold) {
2339		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2340					      group_nh, extack);
2341	} else if (nhg->resilient) {
2342		res_table = rtnl_dereference(nhg->res_table);
2343		return replace_nexthop_single_notify_res(net, res_table,
2344							 old, oldi, newi,
2345							 extack);
2346	}
2347
2348	return -EINVAL;
2349}
2350
2351static int replace_nexthop_single(struct net *net, struct nexthop *old,
2352				  struct nexthop *new,
2353				  struct netlink_ext_ack *extack)
2354{
2355	u8 old_protocol, old_nh_flags;
2356	struct nh_info *oldi, *newi;
2357	struct nh_grp_entry *nhge;
2358	int err;
2359
2360	if (new->is_group) {
2361		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2362		return -EINVAL;
2363	}
2364
2365	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2366	if (err)
2367		return err;
2368
2369	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2370	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2371	 */
2372	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2373
2374	oldi = rtnl_dereference(old->nh_info);
2375	newi = rtnl_dereference(new->nh_info);
2376
2377	newi->nh_parent = old;
2378	oldi->nh_parent = new;
2379
2380	old_protocol = old->protocol;
2381	old_nh_flags = old->nh_flags;
2382
2383	old->protocol = new->protocol;
2384	old->nh_flags = new->nh_flags;
2385
2386	rcu_assign_pointer(old->nh_info, newi);
2387	rcu_assign_pointer(new->nh_info, oldi);
2388
2389	/* Send a replace notification for all the groups using the nexthop. */
2390	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2391		struct nexthop *nhp = nhge->nh_parent;
2392
2393		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2394						    extack);
2395		if (err)
2396			goto err_notify;
2397	}
2398
2399	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2400	 * update IPv4 indication in all the groups using the nexthop.
2401	 */
2402	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2403		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2404			struct nexthop *nhp = nhge->nh_parent;
2405			struct nh_group *nhg;
2406
2407			nhg = rtnl_dereference(nhp->nh_grp);
2408			nh_group_v4_update(nhg);
2409		}
2410	}
2411
2412	return 0;
2413
2414err_notify:
2415	rcu_assign_pointer(new->nh_info, newi);
2416	rcu_assign_pointer(old->nh_info, oldi);
2417	old->nh_flags = old_nh_flags;
2418	old->protocol = old_protocol;
2419	oldi->nh_parent = old;
2420	newi->nh_parent = new;
2421	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2422		struct nexthop *nhp = nhge->nh_parent;
2423
2424		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2425	}
2426	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2427	return err;
2428}
2429
2430static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2431				     struct nl_info *info)
2432{
2433	struct fib6_info *f6i;
2434
2435	if (!list_empty(&nh->fi_list)) {
2436		struct fib_info *fi;
2437
2438		/* expectation is a few fib_info per nexthop and then
2439		 * a lot of routes per fib_info. So mark the fib_info
2440		 * and then walk the fib tables once
2441		 */
2442		list_for_each_entry(fi, &nh->fi_list, nh_list)
2443			fi->nh_updated = true;
2444
2445		fib_info_notify_update(net, info);
2446
2447		list_for_each_entry(fi, &nh->fi_list, nh_list)
2448			fi->nh_updated = false;
2449	}
2450
2451	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2452		ipv6_stub->fib6_rt_update(net, f6i, info);
2453}
2454
2455/* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2456 * linked to this nexthop and for all groups that the nexthop
2457 * is a member of
2458 */
2459static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2460				   struct nl_info *info)
2461{
2462	struct nh_grp_entry *nhge;
2463
2464	__nexthop_replace_notify(net, nh, info);
2465
2466	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2467		__nexthop_replace_notify(net, nhge->nh_parent, info);
2468}
2469
2470static int replace_nexthop(struct net *net, struct nexthop *old,
2471			   struct nexthop *new, const struct nh_config *cfg,
2472			   struct netlink_ext_ack *extack)
2473{
2474	bool new_is_reject = false;
2475	struct nh_grp_entry *nhge;
2476	int err;
2477
2478	/* check that existing FIB entries are ok with the
2479	 * new nexthop definition
2480	 */
2481	err = fib_check_nh_list(old, new, extack);
2482	if (err)
2483		return err;
2484
2485	err = fib6_check_nh_list(old, new, extack);
2486	if (err)
2487		return err;
2488
2489	if (!new->is_group) {
2490		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2491
2492		new_is_reject = nhi->reject_nh;
2493	}
2494
2495	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2496		/* if new nexthop is a blackhole, any groups using this
2497		 * nexthop cannot have more than 1 path
2498		 */
2499		if (new_is_reject &&
2500		    nexthop_num_path(nhge->nh_parent) > 1) {
2501			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2502			return -EINVAL;
2503		}
2504
2505		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2506		if (err)
2507			return err;
2508
2509		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2510		if (err)
2511			return err;
2512	}
2513
2514	if (old->is_group)
2515		err = replace_nexthop_grp(net, old, new, cfg, extack);
2516	else
2517		err = replace_nexthop_single(net, old, new, extack);
2518
2519	if (!err) {
2520		nh_rt_cache_flush(net, old, new);
2521
2522		__remove_nexthop(net, new, NULL);
2523		nexthop_put(new);
2524	}
2525
2526	return err;
2527}
2528
2529/* called with rtnl_lock held */
2530static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2531			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2532{
2533	struct rb_node **pp, *parent = NULL, *next;
2534	struct rb_root *root = &net->nexthop.rb_root;
2535	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2536	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2537	u32 new_id = new_nh->id;
2538	int replace_notify = 0;
2539	int rc = -EEXIST;
2540
2541	pp = &root->rb_node;
2542	while (1) {
2543		struct nexthop *nh;
2544
2545		next = *pp;
2546		if (!next)
2547			break;
2548
2549		parent = next;
2550
2551		nh = rb_entry(parent, struct nexthop, rb_node);
2552		if (new_id < nh->id) {
2553			pp = &next->rb_left;
2554		} else if (new_id > nh->id) {
2555			pp = &next->rb_right;
2556		} else if (replace) {
2557			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2558			if (!rc) {
2559				new_nh = nh; /* send notification with old nh */
2560				replace_notify = 1;
2561			}
2562			goto out;
2563		} else {
2564			/* id already exists and not a replace */
2565			goto out;
2566		}
2567	}
2568
2569	if (replace && !create) {
2570		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2571		rc = -ENOENT;
2572		goto out;
2573	}
2574
2575	if (new_nh->is_group) {
2576		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2577		struct nh_res_table *res_table;
2578
2579		if (nhg->resilient) {
2580			res_table = rtnl_dereference(nhg->res_table);
2581
2582			/* Not passing the number of buckets is OK when
2583			 * replacing, but not when creating a new group.
2584			 */
2585			if (!cfg->nh_grp_res_has_num_buckets) {
2586				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2587				rc = -EINVAL;
2588				goto out;
2589			}
2590
2591			nh_res_group_rebalance(nhg, res_table);
2592
2593			/* Do not send bucket notifications, we do full
2594			 * notification below.
2595			 */
2596			nh_res_table_upkeep(res_table, false, false);
2597		}
2598	}
2599
2600	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2601	rb_insert_color(&new_nh->rb_node, root);
2602
2603	/* The initial insertion is a full notification for hash-threshold as
2604	 * well as resilient groups.
2605	 */
2606	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2607	if (rc)
2608		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2609
2610out:
2611	if (!rc) {
2612		nh_base_seq_inc(net);
2613		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2614		if (replace_notify &&
2615		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2616			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2617	}
2618
2619	return rc;
2620}
2621
2622/* rtnl */
2623/* remove all nexthops tied to a device being deleted */
2624static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2625{
2626	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2627	struct net *net = dev_net(dev);
2628	struct hlist_head *head = &net->nexthop.devhash[hash];
2629	struct hlist_node *n;
2630	struct nh_info *nhi;
2631
2632	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2633		if (nhi->fib_nhc.nhc_dev != dev)
2634			continue;
2635
2636		if (nhi->reject_nh &&
2637		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2638			continue;
2639
2640		remove_nexthop(net, nhi->nh_parent, NULL);
2641	}
2642}
2643
2644/* rtnl; called when net namespace is deleted */
2645static void flush_all_nexthops(struct net *net)
2646{
2647	struct rb_root *root = &net->nexthop.rb_root;
2648	struct rb_node *node;
2649	struct nexthop *nh;
2650
2651	while ((node = rb_first(root))) {
2652		nh = rb_entry(node, struct nexthop, rb_node);
2653		remove_nexthop(net, nh, NULL);
2654		cond_resched();
2655	}
2656}
2657
2658static struct nexthop *nexthop_create_group(struct net *net,
2659					    struct nh_config *cfg)
2660{
2661	struct nlattr *grps_attr = cfg->nh_grp;
2662	struct nexthop_grp *entry = nla_data(grps_attr);
2663	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2664	struct nh_group *nhg;
2665	struct nexthop *nh;
2666	int err;
2667	int i;
2668
2669	if (WARN_ON(!num_nh))
2670		return ERR_PTR(-EINVAL);
2671
2672	nh = nexthop_alloc();
2673	if (!nh)
2674		return ERR_PTR(-ENOMEM);
2675
2676	nh->is_group = 1;
2677
2678	nhg = nexthop_grp_alloc(num_nh);
2679	if (!nhg) {
2680		kfree(nh);
2681		return ERR_PTR(-ENOMEM);
2682	}
2683
2684	/* spare group used for removals */
2685	nhg->spare = nexthop_grp_alloc(num_nh);
2686	if (!nhg->spare) {
2687		kfree(nhg);
2688		kfree(nh);
2689		return ERR_PTR(-ENOMEM);
2690	}
2691	nhg->spare->spare = nhg;
2692
2693	for (i = 0; i < nhg->num_nh; ++i) {
2694		struct nexthop *nhe;
2695		struct nh_info *nhi;
2696
2697		nhe = nexthop_find_by_id(net, entry[i].id);
2698		if (!nexthop_get(nhe)) {
2699			err = -ENOENT;
2700			goto out_no_nh;
2701		}
2702
2703		nhi = rtnl_dereference(nhe->nh_info);
2704		if (nhi->family == AF_INET)
2705			nhg->has_v4 = true;
2706
2707		nhg->nh_entries[i].stats =
2708			netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2709		if (!nhg->nh_entries[i].stats) {
2710			err = -ENOMEM;
2711			nexthop_put(nhe);
2712			goto out_no_nh;
2713		}
2714		nhg->nh_entries[i].nh = nhe;
2715		nhg->nh_entries[i].weight = entry[i].weight + 1;
2716		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2717		nhg->nh_entries[i].nh_parent = nh;
2718	}
2719
2720	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2721		nhg->hash_threshold = 1;
2722		nhg->is_multipath = true;
2723	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2724		struct nh_res_table *res_table;
2725
2726		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2727		if (!res_table) {
2728			err = -ENOMEM;
2729			goto out_no_nh;
2730		}
2731
2732		rcu_assign_pointer(nhg->spare->res_table, res_table);
2733		rcu_assign_pointer(nhg->res_table, res_table);
2734		nhg->resilient = true;
2735		nhg->is_multipath = true;
2736	}
2737
2738	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2739
2740	if (nhg->hash_threshold)
2741		nh_hthr_group_rebalance(nhg);
2742
2743	if (cfg->nh_fdb)
2744		nhg->fdb_nh = 1;
2745
2746	if (cfg->nh_hw_stats)
2747		nhg->hw_stats = true;
2748
2749	rcu_assign_pointer(nh->nh_grp, nhg);
2750
2751	return nh;
2752
2753out_no_nh:
2754	for (i--; i >= 0; --i) {
2755		list_del(&nhg->nh_entries[i].nh_list);
2756		free_percpu(nhg->nh_entries[i].stats);
2757		nexthop_put(nhg->nh_entries[i].nh);
2758	}
2759
2760	kfree(nhg->spare);
2761	kfree(nhg);
2762	kfree(nh);
2763
2764	return ERR_PTR(err);
2765}
2766
2767static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2768			  struct nh_info *nhi, struct nh_config *cfg,
2769			  struct netlink_ext_ack *extack)
2770{
2771	struct fib_nh *fib_nh = &nhi->fib_nh;
2772	struct fib_config fib_cfg = {
2773		.fc_oif   = cfg->nh_ifindex,
2774		.fc_gw4   = cfg->gw.ipv4,
2775		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2776		.fc_flags = cfg->nh_flags,
2777		.fc_nlinfo = cfg->nlinfo,
2778		.fc_encap = cfg->nh_encap,
2779		.fc_encap_type = cfg->nh_encap_type,
2780	};
2781	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2782	int err;
2783
2784	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2785	if (err) {
2786		fib_nh_release(net, fib_nh);
2787		goto out;
2788	}
2789
2790	if (nhi->fdb_nh)
2791		goto out;
2792
2793	/* sets nh_dev if successful */
2794	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2795	if (!err) {
2796		nh->nh_flags = fib_nh->fib_nh_flags;
2797		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2798					  !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2799	} else {
2800		fib_nh_release(net, fib_nh);
2801	}
2802out:
2803	return err;
2804}
2805
2806static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2807			  struct nh_info *nhi, struct nh_config *cfg,
2808			  struct netlink_ext_ack *extack)
2809{
2810	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2811	struct fib6_config fib6_cfg = {
2812		.fc_table = l3mdev_fib_table(cfg->dev),
2813		.fc_ifindex = cfg->nh_ifindex,
2814		.fc_gateway = cfg->gw.ipv6,
2815		.fc_flags = cfg->nh_flags,
2816		.fc_nlinfo = cfg->nlinfo,
2817		.fc_encap = cfg->nh_encap,
2818		.fc_encap_type = cfg->nh_encap_type,
2819		.fc_is_fdb = cfg->nh_fdb,
2820	};
2821	int err;
2822
2823	if (!ipv6_addr_any(&cfg->gw.ipv6))
2824		fib6_cfg.fc_flags |= RTF_GATEWAY;
2825
2826	/* sets nh_dev if successful */
2827	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2828				      extack);
2829	if (err) {
2830		/* IPv6 is not enabled, don't call fib6_nh_release */
2831		if (err == -EAFNOSUPPORT)
2832			goto out;
2833		ipv6_stub->fib6_nh_release(fib6_nh);
2834	} else {
2835		nh->nh_flags = fib6_nh->fib_nh_flags;
2836	}
2837out:
2838	return err;
2839}
2840
2841static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2842				      struct netlink_ext_ack *extack)
2843{
2844	struct nh_info *nhi;
2845	struct nexthop *nh;
2846	int err = 0;
2847
2848	nh = nexthop_alloc();
2849	if (!nh)
2850		return ERR_PTR(-ENOMEM);
2851
2852	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2853	if (!nhi) {
2854		kfree(nh);
2855		return ERR_PTR(-ENOMEM);
2856	}
2857
2858	nh->nh_flags = cfg->nh_flags;
2859	nh->net = net;
2860
2861	nhi->nh_parent = nh;
2862	nhi->family = cfg->nh_family;
2863	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2864
2865	if (cfg->nh_fdb)
2866		nhi->fdb_nh = 1;
2867
2868	if (cfg->nh_blackhole) {
2869		nhi->reject_nh = 1;
2870		cfg->nh_ifindex = net->loopback_dev->ifindex;
2871	}
2872
2873	switch (cfg->nh_family) {
2874	case AF_INET:
2875		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2876		break;
2877	case AF_INET6:
2878		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2879		break;
2880	}
2881
2882	if (err) {
2883		kfree(nhi);
2884		kfree(nh);
2885		return ERR_PTR(err);
2886	}
2887
2888	/* add the entry to the device based hash */
2889	if (!nhi->fdb_nh)
2890		nexthop_devhash_add(net, nhi);
2891
2892	rcu_assign_pointer(nh->nh_info, nhi);
2893
2894	return nh;
2895}
2896
2897/* called with rtnl lock held */
2898static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2899				   struct netlink_ext_ack *extack)
2900{
2901	struct nexthop *nh;
2902	int err;
2903
2904	if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2905		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2906		return ERR_PTR(-EINVAL);
2907	}
2908
2909	if (!cfg->nh_id) {
2910		cfg->nh_id = nh_find_unused_id(net);
2911		if (!cfg->nh_id) {
2912			NL_SET_ERR_MSG(extack, "No unused id");
2913			return ERR_PTR(-EINVAL);
2914		}
2915	}
2916
2917	if (cfg->nh_grp)
2918		nh = nexthop_create_group(net, cfg);
2919	else
2920		nh = nexthop_create(net, cfg, extack);
2921
2922	if (IS_ERR(nh))
2923		return nh;
2924
2925	refcount_set(&nh->refcnt, 1);
2926	nh->id = cfg->nh_id;
2927	nh->protocol = cfg->nh_protocol;
2928	nh->net = net;
2929
2930	err = insert_nexthop(net, nh, cfg, extack);
2931	if (err) {
2932		__remove_nexthop(net, nh, NULL);
2933		nexthop_put(nh);
2934		nh = ERR_PTR(err);
2935	}
2936
2937	return nh;
2938}
2939
2940static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2941			    unsigned long *timer_p, bool *has_p,
2942			    struct netlink_ext_ack *extack)
2943{
2944	unsigned long timer;
2945	u32 value;
2946
2947	if (!attr) {
2948		*timer_p = fallback;
2949		*has_p = false;
2950		return 0;
2951	}
2952
2953	value = nla_get_u32(attr);
2954	timer = clock_t_to_jiffies(value);
2955	if (timer == ~0UL) {
2956		NL_SET_ERR_MSG(extack, "Timer value too large");
2957		return -EINVAL;
2958	}
2959
2960	*timer_p = timer;
2961	*has_p = true;
2962	return 0;
2963}
2964
2965static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2966				    struct netlink_ext_ack *extack)
2967{
2968	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2969	int err;
2970
2971	if (res) {
2972		err = nla_parse_nested(tb,
2973				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2974				       res, rtm_nh_res_policy_new, extack);
2975		if (err < 0)
2976			return err;
2977	}
2978
2979	if (tb[NHA_RES_GROUP_BUCKETS]) {
2980		cfg->nh_grp_res_num_buckets =
2981			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2982		cfg->nh_grp_res_has_num_buckets = true;
2983		if (!cfg->nh_grp_res_num_buckets) {
2984			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2985			return -EINVAL;
2986		}
2987	}
2988
2989	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2990			       NH_RES_DEFAULT_IDLE_TIMER,
2991			       &cfg->nh_grp_res_idle_timer,
2992			       &cfg->nh_grp_res_has_idle_timer,
2993			       extack);
2994	if (err)
2995		return err;
2996
2997	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2998				NH_RES_DEFAULT_UNBALANCED_TIMER,
2999				&cfg->nh_grp_res_unbalanced_timer,
3000				&cfg->nh_grp_res_has_unbalanced_timer,
3001				extack);
3002}
3003
3004static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3005			    struct nlmsghdr *nlh, struct nh_config *cfg,
3006			    struct netlink_ext_ack *extack)
3007{
3008	struct nhmsg *nhm = nlmsg_data(nlh);
3009	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3010	int err;
3011
3012	err = nlmsg_parse(nlh, sizeof(*nhm), tb,
3013			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
3014			  rtm_nh_policy_new, extack);
3015	if (err < 0)
3016		return err;
3017
3018	err = -EINVAL;
3019	if (nhm->resvd || nhm->nh_scope) {
3020		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3021		goto out;
3022	}
3023	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3024		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3025		goto out;
3026	}
3027
3028	switch (nhm->nh_family) {
3029	case AF_INET:
3030	case AF_INET6:
3031		break;
3032	case AF_UNSPEC:
3033		if (tb[NHA_GROUP])
3034			break;
3035		fallthrough;
3036	default:
3037		NL_SET_ERR_MSG(extack, "Invalid address family");
3038		goto out;
3039	}
3040
3041	memset(cfg, 0, sizeof(*cfg));
3042	cfg->nlflags = nlh->nlmsg_flags;
3043	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3044	cfg->nlinfo.nlh = nlh;
3045	cfg->nlinfo.nl_net = net;
3046
3047	cfg->nh_family = nhm->nh_family;
3048	cfg->nh_protocol = nhm->nh_protocol;
3049	cfg->nh_flags = nhm->nh_flags;
3050
3051	if (tb[NHA_ID])
3052		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3053
3054	if (tb[NHA_FDB]) {
3055		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3056		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
3057			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3058			goto out;
3059		}
3060		if (nhm->nh_flags) {
3061			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3062			goto out;
3063		}
3064		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3065	}
3066
3067	if (tb[NHA_GROUP]) {
3068		if (nhm->nh_family != AF_UNSPEC) {
3069			NL_SET_ERR_MSG(extack, "Invalid family for group");
3070			goto out;
3071		}
3072		cfg->nh_grp = tb[NHA_GROUP];
3073
3074		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3075		if (tb[NHA_GROUP_TYPE])
3076			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3077
3078		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3079			NL_SET_ERR_MSG(extack, "Invalid group type");
3080			goto out;
3081		}
3082		err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
3083					  cfg->nh_grp_type, extack);
3084		if (err)
3085			goto out;
3086
3087		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3088			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3089						       cfg, extack);
3090
3091		if (tb[NHA_HW_STATS_ENABLE])
3092			cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3093
3094		/* no other attributes should be set */
3095		goto out;
3096	}
3097
3098	if (tb[NHA_BLACKHOLE]) {
3099		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3100		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3101			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3102			goto out;
3103		}
3104
3105		cfg->nh_blackhole = 1;
3106		err = 0;
3107		goto out;
3108	}
3109
3110	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3111		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3112		goto out;
3113	}
3114
3115	if (!cfg->nh_fdb && tb[NHA_OIF]) {
3116		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3117		if (cfg->nh_ifindex)
3118			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3119
3120		if (!cfg->dev) {
3121			NL_SET_ERR_MSG(extack, "Invalid device index");
3122			goto out;
3123		} else if (!(cfg->dev->flags & IFF_UP)) {
3124			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3125			err = -ENETDOWN;
3126			goto out;
3127		} else if (!netif_carrier_ok(cfg->dev)) {
3128			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3129			err = -ENETDOWN;
3130			goto out;
3131		}
3132	}
3133
3134	err = -EINVAL;
3135	if (tb[NHA_GATEWAY]) {
3136		struct nlattr *gwa = tb[NHA_GATEWAY];
3137
3138		switch (cfg->nh_family) {
3139		case AF_INET:
3140			if (nla_len(gwa) != sizeof(u32)) {
3141				NL_SET_ERR_MSG(extack, "Invalid gateway");
3142				goto out;
3143			}
3144			cfg->gw.ipv4 = nla_get_be32(gwa);
3145			break;
3146		case AF_INET6:
3147			if (nla_len(gwa) != sizeof(struct in6_addr)) {
3148				NL_SET_ERR_MSG(extack, "Invalid gateway");
3149				goto out;
3150			}
3151			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3152			break;
3153		default:
3154			NL_SET_ERR_MSG(extack,
3155				       "Unknown address family for gateway");
3156			goto out;
3157		}
3158	} else {
3159		/* device only nexthop (no gateway) */
3160		if (cfg->nh_flags & RTNH_F_ONLINK) {
3161			NL_SET_ERR_MSG(extack,
3162				       "ONLINK flag can not be set for nexthop without a gateway");
3163			goto out;
3164		}
3165	}
3166
3167	if (tb[NHA_ENCAP]) {
3168		cfg->nh_encap = tb[NHA_ENCAP];
3169
3170		if (!tb[NHA_ENCAP_TYPE]) {
3171			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3172			goto out;
3173		}
3174
3175		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3176		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3177		if (err < 0)
3178			goto out;
3179
3180	} else if (tb[NHA_ENCAP_TYPE]) {
3181		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3182		goto out;
3183	}
3184
3185	if (tb[NHA_HW_STATS_ENABLE]) {
3186		NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3187		goto out;
3188	}
3189
3190	err = 0;
3191out:
3192	return err;
3193}
3194
3195/* rtnl */
3196static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3197			   struct netlink_ext_ack *extack)
3198{
3199	struct net *net = sock_net(skb->sk);
3200	struct nh_config cfg;
3201	struct nexthop *nh;
3202	int err;
3203
3204	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3205	if (!err) {
3206		nh = nexthop_add(net, &cfg, extack);
3207		if (IS_ERR(nh))
3208			err = PTR_ERR(nh);
3209	}
3210
3211	return err;
3212}
3213
3214static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3215				struct nlattr **tb, u32 *id, u32 *op_flags,
3216				struct netlink_ext_ack *extack)
3217{
3218	struct nhmsg *nhm = nlmsg_data(nlh);
3219
3220	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3221		NL_SET_ERR_MSG(extack, "Invalid values in header");
3222		return -EINVAL;
3223	}
3224
3225	if (!tb[NHA_ID]) {
3226		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3227		return -EINVAL;
3228	}
3229
3230	*id = nla_get_u32(tb[NHA_ID]);
3231	if (!(*id)) {
3232		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3233		return -EINVAL;
3234	}
3235
3236	if (op_flags) {
3237		if (tb[NHA_OP_FLAGS])
3238			*op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3239		else
3240			*op_flags = 0;
3241	}
 
 
 
 
 
 
 
 
3242
3243	return 0;
3244}
3245
3246/* rtnl */
3247static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3248			   struct netlink_ext_ack *extack)
3249{
3250	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3251	struct net *net = sock_net(skb->sk);
3252	struct nl_info nlinfo = {
3253		.nlh = nlh,
3254		.nl_net = net,
3255		.portid = NETLINK_CB(skb).portid,
3256	};
3257	struct nexthop *nh;
3258	int err;
3259	u32 id;
3260
3261	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3262			  ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3263			  extack);
3264	if (err < 0)
3265		return err;
3266
3267	err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3268	if (err)
3269		return err;
3270
3271	nh = nexthop_find_by_id(net, id);
3272	if (!nh)
3273		return -ENOENT;
3274
3275	remove_nexthop(net, nh, &nlinfo);
3276
3277	return 0;
3278}
3279
3280/* rtnl */
3281static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3282			   struct netlink_ext_ack *extack)
3283{
3284	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3285	struct net *net = sock_net(in_skb->sk);
3286	struct sk_buff *skb = NULL;
3287	struct nexthop *nh;
3288	u32 op_flags;
3289	int err;
3290	u32 id;
3291
3292	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3293			  ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3294			  extack);
3295	if (err < 0)
3296		return err;
3297
3298	err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3299	if (err)
3300		return err;
3301
3302	err = -ENOBUFS;
3303	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3304	if (!skb)
3305		goto out;
3306
3307	err = -ENOENT;
3308	nh = nexthop_find_by_id(net, id);
3309	if (!nh)
3310		goto errout_free;
3311
3312	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3313			   nlh->nlmsg_seq, 0, op_flags);
3314	if (err < 0) {
3315		WARN_ON(err == -EMSGSIZE);
3316		goto errout_free;
3317	}
3318
3319	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3320out:
3321	return err;
3322errout_free:
3323	kfree_skb(skb);
3324	goto out;
3325}
3326
3327struct nh_dump_filter {
3328	u32 nh_id;
3329	int dev_idx;
3330	int master_idx;
3331	bool group_filter;
3332	bool fdb_filter;
3333	u32 res_bucket_nh_id;
3334	u32 op_flags;
3335};
3336
3337static bool nh_dump_filtered(struct nexthop *nh,
3338			     struct nh_dump_filter *filter, u8 family)
3339{
3340	const struct net_device *dev;
3341	const struct nh_info *nhi;
3342
3343	if (filter->group_filter && !nh->is_group)
3344		return true;
3345
3346	if (!filter->dev_idx && !filter->master_idx && !family)
3347		return false;
3348
3349	if (nh->is_group)
3350		return true;
3351
3352	nhi = rtnl_dereference(nh->nh_info);
3353	if (family && nhi->family != family)
3354		return true;
3355
3356	dev = nhi->fib_nhc.nhc_dev;
3357	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3358		return true;
3359
3360	if (filter->master_idx) {
3361		struct net_device *master;
3362
3363		if (!dev)
3364			return true;
3365
3366		master = netdev_master_upper_dev_get((struct net_device *)dev);
3367		if (!master || master->ifindex != filter->master_idx)
3368			return true;
3369	}
3370
3371	return false;
3372}
3373
3374static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3375			       struct nh_dump_filter *filter,
3376			       struct netlink_ext_ack *extack)
3377{
3378	struct nhmsg *nhm;
3379	u32 idx;
3380
3381	if (tb[NHA_OIF]) {
3382		idx = nla_get_u32(tb[NHA_OIF]);
3383		if (idx > INT_MAX) {
3384			NL_SET_ERR_MSG(extack, "Invalid device index");
3385			return -EINVAL;
3386		}
3387		filter->dev_idx = idx;
3388	}
3389	if (tb[NHA_MASTER]) {
3390		idx = nla_get_u32(tb[NHA_MASTER]);
3391		if (idx > INT_MAX) {
3392			NL_SET_ERR_MSG(extack, "Invalid master device index");
3393			return -EINVAL;
3394		}
3395		filter->master_idx = idx;
3396	}
3397	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3398	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3399
3400	nhm = nlmsg_data(nlh);
3401	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3402		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3403		return -EINVAL;
3404	}
3405
3406	return 0;
3407}
3408
3409static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3410			     struct nh_dump_filter *filter,
3411			     struct netlink_callback *cb)
3412{
3413	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3414	int err;
3415
3416	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3417			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3418			  rtm_nh_policy_dump, cb->extack);
3419	if (err < 0)
3420		return err;
3421
3422	if (tb[NHA_OP_FLAGS])
3423		filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3424	else
3425		filter->op_flags = 0;
3426
3427	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3428}
3429
3430struct rtm_dump_nh_ctx {
3431	u32 idx;
3432};
3433
3434static struct rtm_dump_nh_ctx *
3435rtm_dump_nh_ctx(struct netlink_callback *cb)
3436{
3437	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3438
3439	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3440	return ctx;
3441}
3442
3443static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3444				  struct netlink_callback *cb,
3445				  struct rb_root *root,
3446				  struct rtm_dump_nh_ctx *ctx,
3447				  int (*nh_cb)(struct sk_buff *skb,
3448					       struct netlink_callback *cb,
3449					       struct nexthop *nh, void *data),
3450				  void *data)
3451{
3452	struct rb_node *node;
3453	int s_idx;
3454	int err;
3455
3456	s_idx = ctx->idx;
3457	for (node = rb_first(root); node; node = rb_next(node)) {
3458		struct nexthop *nh;
3459
3460		nh = rb_entry(node, struct nexthop, rb_node);
3461		if (nh->id < s_idx)
3462			continue;
3463
3464		ctx->idx = nh->id;
3465		err = nh_cb(skb, cb, nh, data);
3466		if (err)
3467			return err;
3468	}
3469
 
3470	return 0;
3471}
3472
3473static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3474			       struct nexthop *nh, void *data)
3475{
3476	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3477	struct nh_dump_filter *filter = data;
3478
3479	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3480		return 0;
3481
3482	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3483			    NETLINK_CB(cb->skb).portid,
3484			    cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3485}
3486
3487/* rtnl */
3488static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3489{
3490	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3491	struct net *net = sock_net(skb->sk);
3492	struct rb_root *root = &net->nexthop.rb_root;
3493	struct nh_dump_filter filter = {};
3494	int err;
3495
3496	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3497	if (err < 0)
3498		return err;
3499
3500	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3501				     &rtm_dump_nexthop_cb, &filter);
 
 
 
 
 
3502
 
 
 
3503	cb->seq = net->nexthop.seq;
3504	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3505	return err;
3506}
3507
3508static struct nexthop *
3509nexthop_find_group_resilient(struct net *net, u32 id,
3510			     struct netlink_ext_ack *extack)
3511{
3512	struct nh_group *nhg;
3513	struct nexthop *nh;
3514
3515	nh = nexthop_find_by_id(net, id);
3516	if (!nh)
3517		return ERR_PTR(-ENOENT);
3518
3519	if (!nh->is_group) {
3520		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3521		return ERR_PTR(-EINVAL);
3522	}
3523
3524	nhg = rtnl_dereference(nh->nh_grp);
3525	if (!nhg->resilient) {
3526		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3527		return ERR_PTR(-EINVAL);
3528	}
3529
3530	return nh;
3531}
3532
3533static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3534			      struct netlink_ext_ack *extack)
3535{
3536	u32 idx;
3537
3538	if (attr) {
3539		idx = nla_get_u32(attr);
3540		if (!idx) {
3541			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3542			return -EINVAL;
3543		}
3544		*nh_id_p = idx;
3545	} else {
3546		*nh_id_p = 0;
3547	}
3548
3549	return 0;
3550}
3551
3552static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3553				    struct nh_dump_filter *filter,
3554				    struct netlink_callback *cb)
3555{
3556	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3557	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3558	int err;
3559
3560	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3561			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3562			  rtm_nh_policy_dump_bucket, NULL);
3563	if (err < 0)
3564		return err;
3565
3566	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3567	if (err)
3568		return err;
3569
3570	if (tb[NHA_RES_BUCKET]) {
3571		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3572
3573		err = nla_parse_nested(res_tb, max,
3574				       tb[NHA_RES_BUCKET],
3575				       rtm_nh_res_bucket_policy_dump,
3576				       cb->extack);
3577		if (err < 0)
3578			return err;
3579
3580		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3581					 &filter->res_bucket_nh_id,
3582					 cb->extack);
3583		if (err)
3584			return err;
3585	}
3586
3587	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3588}
3589
3590struct rtm_dump_res_bucket_ctx {
3591	struct rtm_dump_nh_ctx nh;
3592	u16 bucket_index;
 
3593};
3594
3595static struct rtm_dump_res_bucket_ctx *
3596rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3597{
3598	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3599
3600	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3601	return ctx;
3602}
3603
3604struct rtm_dump_nexthop_bucket_data {
3605	struct rtm_dump_res_bucket_ctx *ctx;
3606	struct nh_dump_filter filter;
3607};
3608
3609static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3610				      struct netlink_callback *cb,
3611				      struct nexthop *nh,
3612				      struct rtm_dump_nexthop_bucket_data *dd)
3613{
3614	u32 portid = NETLINK_CB(cb->skb).portid;
3615	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3616	struct nh_res_table *res_table;
3617	struct nh_group *nhg;
3618	u16 bucket_index;
3619	int err;
3620
 
 
 
3621	nhg = rtnl_dereference(nh->nh_grp);
3622	res_table = rtnl_dereference(nhg->res_table);
3623	for (bucket_index = dd->ctx->bucket_index;
3624	     bucket_index < res_table->num_nh_buckets;
3625	     bucket_index++) {
3626		struct nh_res_bucket *bucket;
3627		struct nh_grp_entry *nhge;
3628
3629		bucket = &res_table->nh_buckets[bucket_index];
3630		nhge = rtnl_dereference(bucket->nh_entry);
3631		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3632			continue;
3633
3634		if (dd->filter.res_bucket_nh_id &&
3635		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3636			continue;
3637
3638		dd->ctx->bucket_index = bucket_index;
3639		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3640					 RTM_NEWNEXTHOPBUCKET, portid,
3641					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3642					 cb->extack);
3643		if (err)
3644			return err;
 
 
 
3645	}
3646
3647	dd->ctx->bucket_index = 0;
 
3648
3649	return 0;
 
 
 
 
3650}
3651
3652static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3653				      struct netlink_callback *cb,
3654				      struct nexthop *nh, void *data)
3655{
3656	struct rtm_dump_nexthop_bucket_data *dd = data;
3657	struct nh_group *nhg;
3658
3659	if (!nh->is_group)
3660		return 0;
3661
3662	nhg = rtnl_dereference(nh->nh_grp);
3663	if (!nhg->resilient)
3664		return 0;
3665
3666	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3667}
3668
3669/* rtnl */
3670static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3671				   struct netlink_callback *cb)
3672{
3673	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3674	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3675	struct net *net = sock_net(skb->sk);
3676	struct nexthop *nh;
3677	int err;
3678
3679	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3680	if (err)
3681		return err;
3682
3683	if (dd.filter.nh_id) {
3684		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3685						  cb->extack);
3686		if (IS_ERR(nh))
3687			return PTR_ERR(nh);
3688		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3689	} else {
3690		struct rb_root *root = &net->nexthop.rb_root;
3691
3692		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3693					     &rtm_dump_nexthop_bucket_cb, &dd);
3694	}
3695
 
 
 
 
 
 
 
 
 
3696	cb->seq = net->nexthop.seq;
3697	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3698	return err;
3699}
3700
3701static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3702					      u16 *bucket_index,
3703					      struct netlink_ext_ack *extack)
3704{
3705	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3706	int err;
3707
3708	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3709			       res, rtm_nh_res_bucket_policy_get, extack);
3710	if (err < 0)
3711		return err;
3712
3713	if (!tb[NHA_RES_BUCKET_INDEX]) {
3714		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3715		return -EINVAL;
3716	}
3717
3718	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3719	return 0;
3720}
3721
3722static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3723				   u32 *id, u16 *bucket_index,
3724				   struct netlink_ext_ack *extack)
3725{
3726	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3727	int err;
3728
3729	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3730			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3731			  rtm_nh_policy_get_bucket, extack);
3732	if (err < 0)
3733		return err;
3734
3735	err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3736	if (err)
3737		return err;
3738
3739	if (!tb[NHA_RES_BUCKET]) {
3740		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3741		return -EINVAL;
3742	}
3743
3744	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3745						 bucket_index, extack);
3746	if (err)
3747		return err;
3748
3749	return 0;
3750}
3751
3752/* rtnl */
3753static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3754				  struct netlink_ext_ack *extack)
3755{
3756	struct net *net = sock_net(in_skb->sk);
3757	struct nh_res_table *res_table;
3758	struct sk_buff *skb = NULL;
3759	struct nh_group *nhg;
3760	struct nexthop *nh;
3761	u16 bucket_index;
3762	int err;
3763	u32 id;
3764
3765	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3766	if (err)
3767		return err;
3768
3769	nh = nexthop_find_group_resilient(net, id, extack);
3770	if (IS_ERR(nh))
3771		return PTR_ERR(nh);
3772
3773	nhg = rtnl_dereference(nh->nh_grp);
3774	res_table = rtnl_dereference(nhg->res_table);
3775	if (bucket_index >= res_table->num_nh_buckets) {
3776		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3777		return -ENOENT;
3778	}
3779
3780	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3781	if (!skb)
3782		return -ENOBUFS;
3783
3784	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3785				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3786				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3787				 0, extack);
3788	if (err < 0) {
3789		WARN_ON(err == -EMSGSIZE);
3790		goto errout_free;
3791	}
3792
3793	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3794
3795errout_free:
3796	kfree_skb(skb);
3797	return err;
3798}
3799
3800static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3801{
3802	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3803	struct net *net = dev_net(dev);
3804	struct hlist_head *head = &net->nexthop.devhash[hash];
3805	struct hlist_node *n;
3806	struct nh_info *nhi;
3807
3808	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3809		if (nhi->fib_nhc.nhc_dev == dev) {
3810			if (nhi->family == AF_INET)
3811				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3812						   orig_mtu);
3813		}
3814	}
3815}
3816
3817/* rtnl */
3818static int nh_netdev_event(struct notifier_block *this,
3819			   unsigned long event, void *ptr)
3820{
3821	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3822	struct netdev_notifier_info_ext *info_ext;
3823
3824	switch (event) {
3825	case NETDEV_DOWN:
3826	case NETDEV_UNREGISTER:
3827		nexthop_flush_dev(dev, event);
3828		break;
3829	case NETDEV_CHANGE:
3830		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3831			nexthop_flush_dev(dev, event);
3832		break;
3833	case NETDEV_CHANGEMTU:
3834		info_ext = ptr;
3835		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3836		rt_cache_flush(dev_net(dev));
3837		break;
3838	}
3839	return NOTIFY_DONE;
3840}
3841
3842static struct notifier_block nh_netdev_notifier = {
3843	.notifier_call = nh_netdev_event,
3844};
3845
3846static int nexthops_dump(struct net *net, struct notifier_block *nb,
3847			 enum nexthop_event_type event_type,
3848			 struct netlink_ext_ack *extack)
3849{
3850	struct rb_root *root = &net->nexthop.rb_root;
3851	struct rb_node *node;
3852	int err = 0;
3853
3854	for (node = rb_first(root); node; node = rb_next(node)) {
3855		struct nexthop *nh;
3856
3857		nh = rb_entry(node, struct nexthop, rb_node);
3858		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3859		if (err)
3860			break;
3861	}
3862
3863	return err;
3864}
3865
3866int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3867			      struct netlink_ext_ack *extack)
3868{
3869	int err;
3870
3871	rtnl_lock();
3872	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3873	if (err)
3874		goto unlock;
3875	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3876					       nb);
3877unlock:
3878	rtnl_unlock();
3879	return err;
3880}
3881EXPORT_SYMBOL(register_nexthop_notifier);
3882
3883int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3884{
3885	int err;
3886
 
3887	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3888						 nb);
3889	if (!err)
3890		nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3891	return err;
3892}
3893EXPORT_SYMBOL(__unregister_nexthop_notifier);
3894
3895int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3896{
3897	int err;
3898
3899	rtnl_lock();
3900	err = __unregister_nexthop_notifier(net, nb);
3901	rtnl_unlock();
3902	return err;
3903}
3904EXPORT_SYMBOL(unregister_nexthop_notifier);
3905
3906void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3907{
3908	struct nexthop *nexthop;
3909
3910	rcu_read_lock();
3911
3912	nexthop = nexthop_find_by_id(net, id);
3913	if (!nexthop)
3914		goto out;
3915
3916	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3917	if (offload)
3918		nexthop->nh_flags |= RTNH_F_OFFLOAD;
3919	if (trap)
3920		nexthop->nh_flags |= RTNH_F_TRAP;
3921
3922out:
3923	rcu_read_unlock();
3924}
3925EXPORT_SYMBOL(nexthop_set_hw_flags);
3926
3927void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3928				 bool offload, bool trap)
3929{
3930	struct nh_res_table *res_table;
3931	struct nh_res_bucket *bucket;
3932	struct nexthop *nexthop;
3933	struct nh_group *nhg;
3934
3935	rcu_read_lock();
3936
3937	nexthop = nexthop_find_by_id(net, id);
3938	if (!nexthop || !nexthop->is_group)
3939		goto out;
3940
3941	nhg = rcu_dereference(nexthop->nh_grp);
3942	if (!nhg->resilient)
3943		goto out;
3944
3945	if (bucket_index >= nhg->res_table->num_nh_buckets)
3946		goto out;
3947
3948	res_table = rcu_dereference(nhg->res_table);
3949	bucket = &res_table->nh_buckets[bucket_index];
3950	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3951	if (offload)
3952		bucket->nh_flags |= RTNH_F_OFFLOAD;
3953	if (trap)
3954		bucket->nh_flags |= RTNH_F_TRAP;
3955
3956out:
3957	rcu_read_unlock();
3958}
3959EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3960
3961void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3962				     unsigned long *activity)
3963{
3964	struct nh_res_table *res_table;
3965	struct nexthop *nexthop;
3966	struct nh_group *nhg;
3967	u16 i;
3968
3969	rcu_read_lock();
3970
3971	nexthop = nexthop_find_by_id(net, id);
3972	if (!nexthop || !nexthop->is_group)
3973		goto out;
3974
3975	nhg = rcu_dereference(nexthop->nh_grp);
3976	if (!nhg->resilient)
3977		goto out;
3978
3979	/* Instead of silently ignoring some buckets, demand that the sizes
3980	 * be the same.
3981	 */
3982	res_table = rcu_dereference(nhg->res_table);
3983	if (num_buckets != res_table->num_nh_buckets)
3984		goto out;
3985
3986	for (i = 0; i < num_buckets; i++) {
3987		if (test_bit(i, activity))
3988			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3989	}
3990
3991out:
3992	rcu_read_unlock();
3993}
3994EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3995
3996static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
3997						   struct list_head *dev_to_kill)
3998{
3999	struct net *net;
4000
4001	ASSERT_RTNL();
4002	list_for_each_entry(net, net_list, exit_list)
4003		flush_all_nexthops(net);
4004}
4005
4006static void __net_exit nexthop_net_exit(struct net *net)
4007{
 
 
 
4008	kfree(net->nexthop.devhash);
4009	net->nexthop.devhash = NULL;
4010}
4011
4012static int __net_init nexthop_net_init(struct net *net)
4013{
4014	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4015
4016	net->nexthop.rb_root = RB_ROOT;
4017	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4018	if (!net->nexthop.devhash)
4019		return -ENOMEM;
4020	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4021
4022	return 0;
4023}
4024
4025static struct pernet_operations nexthop_net_ops = {
4026	.init = nexthop_net_init,
4027	.exit = nexthop_net_exit,
4028	.exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
4029};
4030
4031static int __init nexthop_init(void)
4032{
4033	register_pernet_subsys(&nexthop_net_ops);
4034
4035	register_netdevice_notifier(&nh_netdev_notifier);
4036
4037	rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4038	rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
4039	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
4040		      rtm_dump_nexthop, 0);
4041
4042	rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4043	rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4044
4045	rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4046	rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4047
4048	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
4049		      rtm_dump_nexthop_bucket, 0);
4050
4051	return 0;
4052}
4053subsys_initcall(nexthop_init);