Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/mrp_bridge.h>
   4#include "br_private_mrp.h"
   5
   6static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
   7static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
   8
 
 
 
 
 
 
 
   9static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
  10				struct net_bridge_port *s_port,
  11				struct net_bridge_port *port)
  12{
  13	if (port == p_port ||
  14	    port == s_port)
  15		return true;
  16
  17	return false;
  18}
  19
  20static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
  21			      struct net_bridge_port *port)
  22{
  23	if (port == i_port)
  24		return true;
  25
  26	return false;
  27}
  28
  29static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
  30					       u32 ifindex)
  31{
  32	struct net_bridge_port *res = NULL;
  33	struct net_bridge_port *port;
  34
  35	list_for_each_entry(port, &br->port_list, list) {
  36		if (port->dev->ifindex == ifindex) {
  37			res = port;
  38			break;
  39		}
  40	}
  41
  42	return res;
  43}
  44
  45static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
  46{
  47	struct br_mrp *res = NULL;
  48	struct br_mrp *mrp;
  49
  50	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  51				lockdep_rtnl_is_held()) {
  52		if (mrp->ring_id == ring_id) {
  53			res = mrp;
  54			break;
  55		}
  56	}
  57
  58	return res;
  59}
  60
  61static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
  62{
  63	struct br_mrp *res = NULL;
  64	struct br_mrp *mrp;
  65
  66	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  67				lockdep_rtnl_is_held()) {
  68		if (mrp->in_id == in_id) {
  69			res = mrp;
  70			break;
  71		}
  72	}
  73
  74	return res;
  75}
  76
  77static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
  78{
  79	struct br_mrp *mrp;
  80
  81	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  82				lockdep_rtnl_is_held()) {
  83		struct net_bridge_port *p;
  84
  85		p = rtnl_dereference(mrp->p_port);
  86		if (p && p->dev->ifindex == ifindex)
  87			return false;
  88
  89		p = rtnl_dereference(mrp->s_port);
  90		if (p && p->dev->ifindex == ifindex)
  91			return false;
  92
  93		p = rtnl_dereference(mrp->i_port);
  94		if (p && p->dev->ifindex == ifindex)
  95			return false;
  96	}
  97
  98	return true;
  99}
 100
 101static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
 102				       struct net_bridge_port *p)
 103{
 104	struct br_mrp *res = NULL;
 105	struct br_mrp *mrp;
 106
 107	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
 108				lockdep_rtnl_is_held()) {
 109		if (rcu_access_pointer(mrp->p_port) == p ||
 110		    rcu_access_pointer(mrp->s_port) == p ||
 111		    rcu_access_pointer(mrp->i_port) == p) {
 112			res = mrp;
 113			break;
 114		}
 115	}
 116
 117	return res;
 118}
 119
 120static int br_mrp_next_seq(struct br_mrp *mrp)
 121{
 122	mrp->seq_id++;
 123	return mrp->seq_id;
 124}
 125
 126static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
 127					const u8 *src, const u8 *dst)
 128{
 129	struct ethhdr *eth_hdr;
 130	struct sk_buff *skb;
 131	__be16 *version;
 132
 133	skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
 134	if (!skb)
 135		return NULL;
 136
 137	skb->dev = p->dev;
 138	skb->protocol = htons(ETH_P_MRP);
 139	skb->priority = MRP_FRAME_PRIO;
 140	skb_reserve(skb, sizeof(*eth_hdr));
 141
 142	eth_hdr = skb_push(skb, sizeof(*eth_hdr));
 143	ether_addr_copy(eth_hdr->h_dest, dst);
 144	ether_addr_copy(eth_hdr->h_source, src);
 145	eth_hdr->h_proto = htons(ETH_P_MRP);
 146
 147	version = skb_put(skb, sizeof(*version));
 148	*version = cpu_to_be16(MRP_VERSION);
 149
 150	return skb;
 151}
 152
 153static void br_mrp_skb_tlv(struct sk_buff *skb,
 154			   enum br_mrp_tlv_header_type type,
 155			   u8 length)
 156{
 157	struct br_mrp_tlv_hdr *hdr;
 158
 159	hdr = skb_put(skb, sizeof(*hdr));
 160	hdr->type = type;
 161	hdr->length = length;
 162}
 163
 164static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
 165{
 166	struct br_mrp_common_hdr *hdr;
 167
 168	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
 169
 170	hdr = skb_put(skb, sizeof(*hdr));
 171	hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
 172	memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
 173}
 174
 175static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
 176					     struct net_bridge_port *p,
 177					     enum br_mrp_port_role_type port_role)
 178{
 179	struct br_mrp_ring_test_hdr *hdr = NULL;
 180	struct sk_buff *skb = NULL;
 181
 182	if (!p)
 183		return NULL;
 184
 185	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
 186	if (!skb)
 187		return NULL;
 188
 189	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
 190	hdr = skb_put(skb, sizeof(*hdr));
 191
 192	hdr->prio = cpu_to_be16(mrp->prio);
 193	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
 194	hdr->port_role = cpu_to_be16(port_role);
 195	hdr->state = cpu_to_be16(mrp->ring_state);
 196	hdr->transitions = cpu_to_be16(mrp->ring_transitions);
 197	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
 198
 199	br_mrp_skb_common(skb, mrp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
 201
 202	return skb;
 203}
 204
 205static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
 206						struct net_bridge_port *p,
 207						enum br_mrp_port_role_type port_role)
 208{
 209	struct br_mrp_in_test_hdr *hdr = NULL;
 210	struct sk_buff *skb = NULL;
 211
 212	if (!p)
 213		return NULL;
 214
 215	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
 216	if (!skb)
 217		return NULL;
 218
 219	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
 220	hdr = skb_put(skb, sizeof(*hdr));
 221
 222	hdr->id = cpu_to_be16(mrp->in_id);
 223	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
 224	hdr->port_role = cpu_to_be16(port_role);
 225	hdr->state = cpu_to_be16(mrp->in_state);
 226	hdr->transitions = cpu_to_be16(mrp->in_transitions);
 227	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
 228
 229	br_mrp_skb_common(skb, mrp);
 230	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
 231
 232	return skb;
 233}
 234
 235/* This function is continuously called in the following cases:
 236 * - when node role is MRM, in this case test_monitor is always set to false
 237 *   because it needs to notify the userspace that the ring is open and needs to
 238 *   send MRP_Test frames
 239 * - when node role is MRA, there are 2 subcases:
 240 *     - when MRA behaves as MRM, in this case is similar with MRM role
 241 *     - when MRA behaves as MRC, in this case test_monitor is set to true,
 242 *       because it needs to detect when it stops seeing MRP_Test frames
 243 *       from MRM node but it doesn't need to send MRP_Test frames.
 244 */
 245static void br_mrp_test_work_expired(struct work_struct *work)
 246{
 247	struct delayed_work *del_work = to_delayed_work(work);
 248	struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
 249	struct net_bridge_port *p;
 250	bool notify_open = false;
 251	struct sk_buff *skb;
 252
 253	if (time_before_eq(mrp->test_end, jiffies))
 254		return;
 255
 256	if (mrp->test_count_miss < mrp->test_max_miss) {
 257		mrp->test_count_miss++;
 258	} else {
 259		/* Notify that the ring is open only if the ring state is
 260		 * closed, otherwise it would continue to notify at every
 261		 * interval.
 262		 * Also notify that the ring is open when the node has the
 263		 * role MRA and behaves as MRC. The reason is that the
 264		 * userspace needs to know when the MRM stopped sending
 265		 * MRP_Test frames so that the current node to try to take
 266		 * the role of a MRM.
 267		 */
 268		if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
 269		    mrp->test_monitor)
 270			notify_open = true;
 271	}
 272
 273	rcu_read_lock();
 274
 275	p = rcu_dereference(mrp->p_port);
 276	if (p) {
 277		if (!mrp->test_monitor) {
 278			skb = br_mrp_alloc_test_skb(mrp, p,
 279						    BR_MRP_PORT_ROLE_PRIMARY);
 280			if (!skb)
 281				goto out;
 282
 283			skb_reset_network_header(skb);
 284			dev_queue_xmit(skb);
 285		}
 286
 287		if (notify_open && !mrp->ring_role_offloaded)
 288			br_mrp_ring_port_open(p->dev, true);
 289	}
 290
 291	p = rcu_dereference(mrp->s_port);
 292	if (p) {
 293		if (!mrp->test_monitor) {
 294			skb = br_mrp_alloc_test_skb(mrp, p,
 295						    BR_MRP_PORT_ROLE_SECONDARY);
 296			if (!skb)
 297				goto out;
 298
 299			skb_reset_network_header(skb);
 300			dev_queue_xmit(skb);
 301		}
 302
 303		if (notify_open && !mrp->ring_role_offloaded)
 304			br_mrp_ring_port_open(p->dev, true);
 305	}
 306
 307out:
 308	rcu_read_unlock();
 309
 310	queue_delayed_work(system_wq, &mrp->test_work,
 311			   usecs_to_jiffies(mrp->test_interval));
 312}
 313
 314/* This function is continuously called when the node has the interconnect role
 315 * MIM. It would generate interconnect test frames and will send them on all 3
 316 * ports. But will also check if it stop receiving interconnect test frames.
 317 */
 318static void br_mrp_in_test_work_expired(struct work_struct *work)
 319{
 320	struct delayed_work *del_work = to_delayed_work(work);
 321	struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
 322	struct net_bridge_port *p;
 323	bool notify_open = false;
 324	struct sk_buff *skb;
 325
 326	if (time_before_eq(mrp->in_test_end, jiffies))
 327		return;
 328
 329	if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
 330		mrp->in_test_count_miss++;
 331	} else {
 332		/* Notify that the interconnect ring is open only if the
 333		 * interconnect ring state is closed, otherwise it would
 334		 * continue to notify at every interval.
 335		 */
 336		if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
 337			notify_open = true;
 338	}
 339
 340	rcu_read_lock();
 341
 342	p = rcu_dereference(mrp->p_port);
 343	if (p) {
 344		skb = br_mrp_alloc_in_test_skb(mrp, p,
 345					       BR_MRP_PORT_ROLE_PRIMARY);
 346		if (!skb)
 347			goto out;
 348
 349		skb_reset_network_header(skb);
 350		dev_queue_xmit(skb);
 351
 352		if (notify_open && !mrp->in_role_offloaded)
 353			br_mrp_in_port_open(p->dev, true);
 354	}
 355
 356	p = rcu_dereference(mrp->s_port);
 357	if (p) {
 358		skb = br_mrp_alloc_in_test_skb(mrp, p,
 359					       BR_MRP_PORT_ROLE_SECONDARY);
 360		if (!skb)
 361			goto out;
 362
 363		skb_reset_network_header(skb);
 364		dev_queue_xmit(skb);
 365
 366		if (notify_open && !mrp->in_role_offloaded)
 367			br_mrp_in_port_open(p->dev, true);
 368	}
 369
 370	p = rcu_dereference(mrp->i_port);
 371	if (p) {
 372		skb = br_mrp_alloc_in_test_skb(mrp, p,
 373					       BR_MRP_PORT_ROLE_INTER);
 374		if (!skb)
 375			goto out;
 376
 377		skb_reset_network_header(skb);
 378		dev_queue_xmit(skb);
 379
 380		if (notify_open && !mrp->in_role_offloaded)
 381			br_mrp_in_port_open(p->dev, true);
 382	}
 383
 384out:
 385	rcu_read_unlock();
 386
 387	queue_delayed_work(system_wq, &mrp->in_test_work,
 388			   usecs_to_jiffies(mrp->in_test_interval));
 389}
 390
 391/* Deletes the MRP instance.
 392 * note: called under rtnl_lock
 393 */
 394static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
 395{
 396	struct net_bridge_port *p;
 397	u8 state;
 398
 399	/* Stop sending MRP_Test frames */
 400	cancel_delayed_work_sync(&mrp->test_work);
 401	br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
 402
 403	/* Stop sending MRP_InTest frames if has an interconnect role */
 404	cancel_delayed_work_sync(&mrp->in_test_work);
 405	br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
 406
 
 
 
 
 
 
 
 407	br_mrp_switchdev_del(br, mrp);
 408
 409	/* Reset the ports */
 410	p = rtnl_dereference(mrp->p_port);
 411	if (p) {
 412		spin_lock_bh(&br->lock);
 413		state = netif_running(br->dev) ?
 414				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 415		p->state = state;
 416		p->flags &= ~BR_MRP_AWARE;
 417		spin_unlock_bh(&br->lock);
 418		br_mrp_port_switchdev_set_state(p, state);
 419		rcu_assign_pointer(mrp->p_port, NULL);
 420	}
 421
 422	p = rtnl_dereference(mrp->s_port);
 423	if (p) {
 424		spin_lock_bh(&br->lock);
 425		state = netif_running(br->dev) ?
 426				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 427		p->state = state;
 428		p->flags &= ~BR_MRP_AWARE;
 429		spin_unlock_bh(&br->lock);
 430		br_mrp_port_switchdev_set_state(p, state);
 431		rcu_assign_pointer(mrp->s_port, NULL);
 432	}
 433
 434	p = rtnl_dereference(mrp->i_port);
 435	if (p) {
 436		spin_lock_bh(&br->lock);
 437		state = netif_running(br->dev) ?
 438				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 439		p->state = state;
 440		p->flags &= ~BR_MRP_AWARE;
 441		spin_unlock_bh(&br->lock);
 442		br_mrp_port_switchdev_set_state(p, state);
 443		rcu_assign_pointer(mrp->i_port, NULL);
 444	}
 445
 446	list_del_rcu(&mrp->list);
 447	kfree_rcu(mrp, rcu);
 
 
 
 448}
 449
 450/* Adds a new MRP instance.
 451 * note: called under rtnl_lock
 452 */
 453int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
 454{
 455	struct net_bridge_port *p;
 456	struct br_mrp *mrp;
 457	int err;
 458
 459	/* If the ring exists, it is not possible to create another one with the
 460	 * same ring_id
 461	 */
 462	mrp = br_mrp_find_id(br, instance->ring_id);
 463	if (mrp)
 464		return -EINVAL;
 465
 466	if (!br_mrp_get_port(br, instance->p_ifindex) ||
 467	    !br_mrp_get_port(br, instance->s_ifindex))
 468		return -EINVAL;
 469
 470	/* It is not possible to have the same port part of multiple rings */
 471	if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
 472	    !br_mrp_unique_ifindex(br, instance->s_ifindex))
 473		return -EINVAL;
 474
 475	mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
 476	if (!mrp)
 477		return -ENOMEM;
 478
 479	mrp->ring_id = instance->ring_id;
 480	mrp->prio = instance->prio;
 481
 482	p = br_mrp_get_port(br, instance->p_ifindex);
 483	spin_lock_bh(&br->lock);
 484	p->state = BR_STATE_FORWARDING;
 485	p->flags |= BR_MRP_AWARE;
 486	spin_unlock_bh(&br->lock);
 487	rcu_assign_pointer(mrp->p_port, p);
 488
 489	p = br_mrp_get_port(br, instance->s_ifindex);
 490	spin_lock_bh(&br->lock);
 491	p->state = BR_STATE_FORWARDING;
 492	p->flags |= BR_MRP_AWARE;
 493	spin_unlock_bh(&br->lock);
 494	rcu_assign_pointer(mrp->s_port, p);
 495
 
 
 
 496	INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
 497	INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
 498	list_add_tail_rcu(&mrp->list, &br->mrp_list);
 499
 500	err = br_mrp_switchdev_add(br, mrp);
 501	if (err)
 502		goto delete_mrp;
 503
 504	return 0;
 505
 506delete_mrp:
 507	br_mrp_del_impl(br, mrp);
 508
 509	return err;
 510}
 511
 512/* Deletes the MRP instance from which the port is part of
 513 * note: called under rtnl_lock
 514 */
 515void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
 516{
 517	struct br_mrp *mrp = br_mrp_find_port(br, p);
 518
 519	/* If the port is not part of a MRP instance just bail out */
 520	if (!mrp)
 521		return;
 522
 523	br_mrp_del_impl(br, mrp);
 524}
 525
 526/* Deletes existing MRP instance based on ring_id
 527 * note: called under rtnl_lock
 528 */
 529int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
 530{
 531	struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
 532
 533	if (!mrp)
 534		return -EINVAL;
 535
 536	br_mrp_del_impl(br, mrp);
 537
 538	return 0;
 539}
 540
 541/* Set port state, port state can be forwarding, blocked or disabled
 542 * note: already called with rtnl_lock
 543 */
 544int br_mrp_set_port_state(struct net_bridge_port *p,
 545			  enum br_mrp_port_state_type state)
 546{
 
 
 547	if (!p || !(p->flags & BR_MRP_AWARE))
 548		return -EINVAL;
 549
 550	spin_lock_bh(&p->br->lock);
 551
 552	if (state == BR_MRP_PORT_STATE_FORWARDING)
 553		p->state = BR_STATE_FORWARDING;
 554	else
 555		p->state = BR_STATE_BLOCKING;
 556
 
 557	spin_unlock_bh(&p->br->lock);
 558
 559	br_mrp_port_switchdev_set_state(p, state);
 560
 561	return 0;
 562}
 563
 564/* Set port role, port role can be primary or secondary
 565 * note: already called with rtnl_lock
 566 */
 567int br_mrp_set_port_role(struct net_bridge_port *p,
 568			 enum br_mrp_port_role_type role)
 569{
 570	struct br_mrp *mrp;
 571
 572	if (!p || !(p->flags & BR_MRP_AWARE))
 573		return -EINVAL;
 574
 575	mrp = br_mrp_find_port(p->br, p);
 576
 577	if (!mrp)
 578		return -EINVAL;
 579
 580	switch (role) {
 581	case BR_MRP_PORT_ROLE_PRIMARY:
 582		rcu_assign_pointer(mrp->p_port, p);
 583		break;
 584	case BR_MRP_PORT_ROLE_SECONDARY:
 585		rcu_assign_pointer(mrp->s_port, p);
 586		break;
 587	default:
 588		return -EINVAL;
 589	}
 590
 591	br_mrp_port_switchdev_set_role(p, role);
 592
 593	return 0;
 594}
 595
 596/* Set ring state, ring state can be only Open or Closed
 597 * note: already called with rtnl_lock
 598 */
 599int br_mrp_set_ring_state(struct net_bridge *br,
 600			  struct br_mrp_ring_state *state)
 601{
 602	struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
 603
 604	if (!mrp)
 605		return -EINVAL;
 606
 607	if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
 608	    state->ring_state != BR_MRP_RING_STATE_CLOSED)
 609		mrp->ring_transitions++;
 610
 611	mrp->ring_state = state->ring_state;
 612
 613	br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
 614
 615	return 0;
 616}
 617
 618/* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
 619 * MRC(Media Redundancy Client).
 620 * note: already called with rtnl_lock
 621 */
 622int br_mrp_set_ring_role(struct net_bridge *br,
 623			 struct br_mrp_ring_role *role)
 624{
 625	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
 626	int err;
 627
 628	if (!mrp)
 629		return -EINVAL;
 630
 631	mrp->ring_role = role->ring_role;
 632
 633	/* If there is an error just bailed out */
 634	err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
 635	if (err && err != -EOPNOTSUPP)
 636		return err;
 637
 638	/* Now detect if the HW actually applied the role or not. If the HW
 639	 * applied the role it means that the SW will not to do those operations
 640	 * anymore. For example if the role ir MRM then the HW will notify the
 641	 * SW when ring is open, but if the is not pushed to the HW the SW will
 642	 * need to detect when the ring is open
 643	 */
 644	mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
 645
 646	return 0;
 647}
 648
 649/* Start to generate or monitor MRP test frames, the frames are generated by
 650 * HW and if it fails, they are generated by the SW.
 651 * note: already called with rtnl_lock
 652 */
 653int br_mrp_start_test(struct net_bridge *br,
 654		      struct br_mrp_start_test *test)
 655{
 656	struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
 
 657
 658	if (!mrp)
 659		return -EINVAL;
 660
 661	/* Try to push it to the HW and if it fails then continue with SW
 662	 * implementation and if that also fails then return error.
 663	 */
 664	if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
 665					     test->max_miss, test->period,
 666					     test->monitor))
 
 
 
 
 667		return 0;
 668
 669	mrp->test_interval = test->interval;
 670	mrp->test_end = jiffies + usecs_to_jiffies(test->period);
 671	mrp->test_max_miss = test->max_miss;
 672	mrp->test_monitor = test->monitor;
 673	mrp->test_count_miss = 0;
 674	queue_delayed_work(system_wq, &mrp->test_work,
 675			   usecs_to_jiffies(test->interval));
 676
 677	return 0;
 678}
 679
 680/* Set in state, int state can be only Open or Closed
 681 * note: already called with rtnl_lock
 682 */
 683int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
 684{
 685	struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
 686
 687	if (!mrp)
 688		return -EINVAL;
 689
 690	if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
 691	    state->in_state != BR_MRP_IN_STATE_CLOSED)
 692		mrp->in_transitions++;
 693
 694	mrp->in_state = state->in_state;
 695
 696	br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
 697
 698	return 0;
 699}
 700
 701/* Set in role, in role can be only MIM(Media Interconnection Manager) or
 702 * MIC(Media Interconnection Client).
 703 * note: already called with rtnl_lock
 704 */
 705int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
 706{
 707	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
 
 708	struct net_bridge_port *p;
 709	int err;
 710
 711	if (!mrp)
 712		return -EINVAL;
 713
 714	if (!br_mrp_get_port(br, role->i_ifindex))
 715		return -EINVAL;
 716
 717	if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
 718		u8 state;
 719
 720		/* It is not allowed to disable a port that doesn't exist */
 721		p = rtnl_dereference(mrp->i_port);
 722		if (!p)
 723			return -EINVAL;
 724
 725		/* Stop the generating MRP_InTest frames */
 726		cancel_delayed_work_sync(&mrp->in_test_work);
 727		br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
 728
 729		/* Remove the port */
 730		spin_lock_bh(&br->lock);
 731		state = netif_running(br->dev) ?
 732				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 733		p->state = state;
 734		p->flags &= ~BR_MRP_AWARE;
 735		spin_unlock_bh(&br->lock);
 736		br_mrp_port_switchdev_set_state(p, state);
 737		rcu_assign_pointer(mrp->i_port, NULL);
 738
 739		mrp->in_role = role->in_role;
 740		mrp->in_id = 0;
 741
 742		return 0;
 743	}
 744
 745	/* It is not possible to have the same port part of multiple rings */
 746	if (!br_mrp_unique_ifindex(br, role->i_ifindex))
 747		return -EINVAL;
 748
 749	/* It is not allowed to set a different interconnect port if the mrp
 750	 * instance has already one. First it needs to be disabled and after
 751	 * that set the new port
 752	 */
 753	if (rcu_access_pointer(mrp->i_port))
 754		return -EINVAL;
 755
 756	p = br_mrp_get_port(br, role->i_ifindex);
 757	spin_lock_bh(&br->lock);
 758	p->state = BR_STATE_FORWARDING;
 759	p->flags |= BR_MRP_AWARE;
 760	spin_unlock_bh(&br->lock);
 761	rcu_assign_pointer(mrp->i_port, p);
 762
 763	mrp->in_role = role->in_role;
 764	mrp->in_id = role->in_id;
 765
 766	/* If there is an error just bailed out */
 767	err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
 768					   role->ring_id, role->in_role);
 769	if (err && err != -EOPNOTSUPP)
 770		return err;
 771
 772	/* Now detect if the HW actually applied the role or not. If the HW
 773	 * applied the role it means that the SW will not to do those operations
 774	 * anymore. For example if the role is MIM then the HW will notify the
 775	 * SW when interconnect ring is open, but if the is not pushed to the HW
 776	 * the SW will need to detect when the interconnect ring is open.
 777	 */
 778	mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
 779
 780	return 0;
 781}
 782
 783/* Start to generate MRP_InTest frames, the frames are generated by
 784 * HW and if it fails, they are generated by the SW.
 785 * note: already called with rtnl_lock
 786 */
 787int br_mrp_start_in_test(struct net_bridge *br,
 788			 struct br_mrp_start_in_test *in_test)
 789{
 790	struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
 
 791
 792	if (!mrp)
 793		return -EINVAL;
 794
 795	if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
 796		return -EINVAL;
 797
 798	/* Try to push it to the HW and if it fails then continue with SW
 799	 * implementation and if that also fails then return error.
 800	 */
 801	if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
 802					   in_test->max_miss, in_test->period))
 
 
 
 
 
 803		return 0;
 804
 805	mrp->in_test_interval = in_test->interval;
 806	mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
 807	mrp->in_test_max_miss = in_test->max_miss;
 808	mrp->in_test_count_miss = 0;
 809	queue_delayed_work(system_wq, &mrp->in_test_work,
 810			   usecs_to_jiffies(in_test->interval));
 811
 812	return 0;
 813}
 814
 815/* Determin if the frame type is a ring frame */
 816static bool br_mrp_ring_frame(struct sk_buff *skb)
 817{
 818	const struct br_mrp_tlv_hdr *hdr;
 819	struct br_mrp_tlv_hdr _hdr;
 820
 821	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 822	if (!hdr)
 823		return false;
 824
 825	if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
 826	    hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
 827	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
 828	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
 829	    hdr->type == BR_MRP_TLV_HEADER_OPTION)
 830		return true;
 831
 832	return false;
 833}
 834
 835/* Determin if the frame type is an interconnect frame */
 836static bool br_mrp_in_frame(struct sk_buff *skb)
 837{
 838	const struct br_mrp_tlv_hdr *hdr;
 839	struct br_mrp_tlv_hdr _hdr;
 840
 841	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 842	if (!hdr)
 843		return false;
 844
 845	if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
 846	    hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
 847	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
 848	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
 
 849		return true;
 850
 851	return false;
 852}
 853
 854/* Process only MRP Test frame. All the other MRP frames are processed by
 855 * userspace application
 856 * note: already called with rcu_read_lock
 857 */
 858static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
 859			       struct sk_buff *skb)
 860{
 861	const struct br_mrp_tlv_hdr *hdr;
 862	struct br_mrp_tlv_hdr _hdr;
 863
 864	/* Each MRP header starts with a version field which is 16 bits.
 865	 * Therefore skip the version and get directly the TLV header.
 866	 */
 867	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 868	if (!hdr)
 869		return;
 870
 871	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
 872		return;
 873
 874	mrp->test_count_miss = 0;
 875
 876	/* Notify the userspace that the ring is closed only when the ring is
 877	 * not closed
 878	 */
 879	if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
 880		br_mrp_ring_port_open(port->dev, false);
 881}
 882
 883/* Determin if the test hdr has a better priority than the node */
 884static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
 885					struct net_bridge *br,
 886					const struct br_mrp_ring_test_hdr *hdr)
 887{
 888	u16 prio = be16_to_cpu(hdr->prio);
 889
 890	if (prio < mrp->prio ||
 891	    (prio == mrp->prio &&
 892	    ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
 893		return true;
 894
 895	return false;
 896}
 897
 898/* Process only MRP Test frame. All the other MRP frames are processed by
 899 * userspace application
 900 * note: already called with rcu_read_lock
 901 */
 902static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
 903			       struct net_bridge_port *port,
 904			       struct sk_buff *skb)
 905{
 906	const struct br_mrp_ring_test_hdr *test_hdr;
 907	struct br_mrp_ring_test_hdr _test_hdr;
 908	const struct br_mrp_tlv_hdr *hdr;
 909	struct br_mrp_tlv_hdr _hdr;
 910
 911	/* Each MRP header starts with a version field which is 16 bits.
 912	 * Therefore skip the version and get directly the TLV header.
 913	 */
 914	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 915	if (!hdr)
 916		return;
 917
 918	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
 919		return;
 920
 921	test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
 922				      sizeof(_test_hdr), &_test_hdr);
 923	if (!test_hdr)
 924		return;
 925
 926	/* Only frames that have a better priority than the node will
 927	 * clear the miss counter because otherwise the node will need to behave
 928	 * as MRM.
 929	 */
 930	if (br_mrp_test_better_than_own(mrp, br, test_hdr))
 931		mrp->test_count_miss = 0;
 932}
 933
 934/* Process only MRP InTest frame. All the other MRP frames are processed by
 935 * userspace application
 936 * note: already called with rcu_read_lock
 937 */
 938static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
 939			       struct sk_buff *skb)
 940{
 941	const struct br_mrp_in_test_hdr *in_hdr;
 942	struct br_mrp_in_test_hdr _in_hdr;
 943	const struct br_mrp_tlv_hdr *hdr;
 944	struct br_mrp_tlv_hdr _hdr;
 945
 946	/* Each MRP header starts with a version field which is 16 bits.
 947	 * Therefore skip the version and get directly the TLV header.
 948	 */
 949	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 950	if (!hdr)
 951		return false;
 952
 953	/* The check for InTest frame type was already done */
 954	in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
 955				    sizeof(_in_hdr), &_in_hdr);
 956	if (!in_hdr)
 957		return false;
 958
 959	/* It needs to process only it's own InTest frames. */
 960	if (mrp->in_id != ntohs(in_hdr->id))
 961		return false;
 962
 963	mrp->in_test_count_miss = 0;
 964
 965	/* Notify the userspace that the ring is closed only when the ring is
 966	 * not closed
 967	 */
 968	if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
 969		br_mrp_in_port_open(port->dev, false);
 970
 971	return true;
 972}
 973
 974/* Get the MRP frame type
 975 * note: already called with rcu_read_lock
 976 */
 977static u8 br_mrp_get_frame_type(struct sk_buff *skb)
 978{
 979	const struct br_mrp_tlv_hdr *hdr;
 980	struct br_mrp_tlv_hdr _hdr;
 981
 982	/* Each MRP header starts with a version field which is 16 bits.
 983	 * Therefore skip the version and get directly the TLV header.
 984	 */
 985	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 986	if (!hdr)
 987		return 0xff;
 988
 989	return hdr->type;
 990}
 991
 992static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
 993{
 994	if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
 995	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
 996		return true;
 997
 998	return false;
 999}
1000
1001static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
1002{
1003	if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
1004	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
1005		return true;
1006
1007	return false;
1008}
1009
1010/* This will just forward the frame to the other mrp ring ports, depending on
1011 * the frame type, ring role and interconnect role
1012 * note: already called with rcu_read_lock
1013 */
1014static int br_mrp_rcv(struct net_bridge_port *p,
1015		      struct sk_buff *skb, struct net_device *dev)
1016{
1017	struct net_bridge_port *p_port, *s_port, *i_port = NULL;
1018	struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
1019	struct net_bridge *br;
1020	struct br_mrp *mrp;
1021
1022	/* If port is disabled don't accept any frames */
1023	if (p->state == BR_STATE_DISABLED)
1024		return 0;
1025
1026	br = p->br;
1027	mrp =  br_mrp_find_port(br, p);
1028	if (unlikely(!mrp))
1029		return 0;
1030
1031	p_port = rcu_dereference(mrp->p_port);
1032	if (!p_port)
1033		return 0;
1034	p_dst = p_port;
1035
1036	s_port = rcu_dereference(mrp->s_port);
1037	if (!s_port)
1038		return 0;
1039	s_dst = s_port;
1040
1041	/* If the frame is a ring frame then it is not required to check the
1042	 * interconnect role and ports to process or forward the frame
1043	 */
1044	if (br_mrp_ring_frame(skb)) {
1045		/* If the role is MRM then don't forward the frames */
1046		if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
1047			br_mrp_mrm_process(mrp, p, skb);
1048			goto no_forward;
1049		}
1050
1051		/* If the role is MRA then don't forward the frames if it
1052		 * behaves as MRM node
1053		 */
1054		if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
1055			if (!mrp->test_monitor) {
1056				br_mrp_mrm_process(mrp, p, skb);
1057				goto no_forward;
1058			}
1059
1060			br_mrp_mra_process(mrp, br, p, skb);
1061		}
1062
1063		goto forward;
1064	}
1065
1066	if (br_mrp_in_frame(skb)) {
1067		u8 in_type = br_mrp_get_frame_type(skb);
1068
1069		i_port = rcu_dereference(mrp->i_port);
1070		i_dst = i_port;
1071
1072		/* If the ring port is in block state it should not forward
1073		 * In_Test frames
1074		 */
1075		if (br_mrp_is_ring_port(p_port, s_port, p) &&
1076		    p->state == BR_STATE_BLOCKING &&
1077		    in_type == BR_MRP_TLV_HEADER_IN_TEST)
1078			goto no_forward;
1079
1080		/* Nodes that behaves as MRM needs to stop forwarding the
1081		 * frames in case the ring is closed, otherwise will be a loop.
1082		 * In this case the frame is no forward between the ring ports.
1083		 */
1084		if (br_mrp_mrm_behaviour(mrp) &&
1085		    br_mrp_is_ring_port(p_port, s_port, p) &&
1086		    (s_port->state != BR_STATE_FORWARDING ||
1087		     p_port->state != BR_STATE_FORWARDING)) {
1088			p_dst = NULL;
1089			s_dst = NULL;
1090		}
1091
1092		/* A node that behaves as MRC and doesn't have a interconnect
1093		 * role then it should forward all frames between the ring ports
1094		 * because it doesn't have an interconnect port
1095		 */
1096		if (br_mrp_mrc_behaviour(mrp) &&
1097		    mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
1098			goto forward;
1099
1100		if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
1101			if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
1102				/* MIM should not forward it's own InTest
1103				 * frames
1104				 */
1105				if (br_mrp_mim_process(mrp, p, skb)) {
1106					goto no_forward;
1107				} else {
1108					if (br_mrp_is_ring_port(p_port, s_port,
1109								p))
1110						i_dst = NULL;
1111
1112					if (br_mrp_is_in_port(i_port, p))
1113						goto no_forward;
1114				}
1115			} else {
1116				/* MIM should forward IntLinkChange and
1117				 * IntTopoChange between ring ports but MIM
1118				 * should not forward IntLinkChange and
1119				 * IntTopoChange if the frame was received at
1120				 * the interconnect port
1121				 */
1122				if (br_mrp_is_ring_port(p_port, s_port, p))
1123					i_dst = NULL;
1124
1125				if (br_mrp_is_in_port(i_port, p))
1126					goto no_forward;
1127			}
1128		}
1129
1130		if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
1131			/* MIC should forward InTest frames on all ports
1132			 * regardless of the received port
1133			 */
1134			if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
1135				goto forward;
1136
1137			/* MIC should forward IntLinkChange frames only if they
1138			 * are received on ring ports to all the ports
1139			 */
1140			if (br_mrp_is_ring_port(p_port, s_port, p) &&
1141			    (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
1142			     in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
1143				goto forward;
1144
 
 
 
 
 
 
 
 
 
 
 
1145			/* Should forward the InTopo frames only between the
1146			 * ring ports
1147			 */
1148			if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
1149				i_dst = NULL;
1150				goto forward;
1151			}
1152
1153			/* In all the other cases don't forward the frames */
1154			goto no_forward;
1155		}
1156	}
1157
1158forward:
1159	if (p_dst)
1160		br_forward(p_dst, skb, true, false);
1161	if (s_dst)
1162		br_forward(s_dst, skb, true, false);
1163	if (i_dst)
1164		br_forward(i_dst, skb, true, false);
1165
1166no_forward:
1167	return 1;
1168}
1169
1170/* Check if the frame was received on a port that is part of MRP ring
1171 * and if the frame has MRP eth. In that case process the frame otherwise do
1172 * normal forwarding.
1173 * note: already called with rcu_read_lock
1174 */
1175int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
1176{
1177	/* If there is no MRP instance do normal forwarding */
1178	if (likely(!(p->flags & BR_MRP_AWARE)))
1179		goto out;
1180
1181	if (unlikely(skb->protocol == htons(ETH_P_MRP)))
1182		return br_mrp_rcv(p, skb, p->dev);
1183
1184out:
1185	return 0;
1186}
1187
1188bool br_mrp_enabled(struct net_bridge *br)
1189{
1190	return !list_empty(&br->mrp_list);
1191}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/mrp_bridge.h>
   4#include "br_private_mrp.h"
   5
   6static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
   7static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
   8
   9static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
  10
  11static struct br_frame_type mrp_frame_type __read_mostly = {
  12	.type = cpu_to_be16(ETH_P_MRP),
  13	.frame_handler = br_mrp_process,
  14};
  15
  16static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
  17				struct net_bridge_port *s_port,
  18				struct net_bridge_port *port)
  19{
  20	if (port == p_port ||
  21	    port == s_port)
  22		return true;
  23
  24	return false;
  25}
  26
  27static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
  28			      struct net_bridge_port *port)
  29{
  30	if (port == i_port)
  31		return true;
  32
  33	return false;
  34}
  35
  36static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
  37					       u32 ifindex)
  38{
  39	struct net_bridge_port *res = NULL;
  40	struct net_bridge_port *port;
  41
  42	list_for_each_entry(port, &br->port_list, list) {
  43		if (port->dev->ifindex == ifindex) {
  44			res = port;
  45			break;
  46		}
  47	}
  48
  49	return res;
  50}
  51
  52static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
  53{
  54	struct br_mrp *res = NULL;
  55	struct br_mrp *mrp;
  56
  57	hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  58				 lockdep_rtnl_is_held()) {
  59		if (mrp->ring_id == ring_id) {
  60			res = mrp;
  61			break;
  62		}
  63	}
  64
  65	return res;
  66}
  67
  68static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
  69{
  70	struct br_mrp *res = NULL;
  71	struct br_mrp *mrp;
  72
  73	hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  74				 lockdep_rtnl_is_held()) {
  75		if (mrp->in_id == in_id) {
  76			res = mrp;
  77			break;
  78		}
  79	}
  80
  81	return res;
  82}
  83
  84static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
  85{
  86	struct br_mrp *mrp;
  87
  88	hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  89				 lockdep_rtnl_is_held()) {
  90		struct net_bridge_port *p;
  91
  92		p = rtnl_dereference(mrp->p_port);
  93		if (p && p->dev->ifindex == ifindex)
  94			return false;
  95
  96		p = rtnl_dereference(mrp->s_port);
  97		if (p && p->dev->ifindex == ifindex)
  98			return false;
  99
 100		p = rtnl_dereference(mrp->i_port);
 101		if (p && p->dev->ifindex == ifindex)
 102			return false;
 103	}
 104
 105	return true;
 106}
 107
 108static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
 109				       struct net_bridge_port *p)
 110{
 111	struct br_mrp *res = NULL;
 112	struct br_mrp *mrp;
 113
 114	hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
 115				 lockdep_rtnl_is_held()) {
 116		if (rcu_access_pointer(mrp->p_port) == p ||
 117		    rcu_access_pointer(mrp->s_port) == p ||
 118		    rcu_access_pointer(mrp->i_port) == p) {
 119			res = mrp;
 120			break;
 121		}
 122	}
 123
 124	return res;
 125}
 126
 127static int br_mrp_next_seq(struct br_mrp *mrp)
 128{
 129	mrp->seq_id++;
 130	return mrp->seq_id;
 131}
 132
 133static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
 134					const u8 *src, const u8 *dst)
 135{
 136	struct ethhdr *eth_hdr;
 137	struct sk_buff *skb;
 138	__be16 *version;
 139
 140	skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
 141	if (!skb)
 142		return NULL;
 143
 144	skb->dev = p->dev;
 145	skb->protocol = htons(ETH_P_MRP);
 146	skb->priority = MRP_FRAME_PRIO;
 147	skb_reserve(skb, sizeof(*eth_hdr));
 148
 149	eth_hdr = skb_push(skb, sizeof(*eth_hdr));
 150	ether_addr_copy(eth_hdr->h_dest, dst);
 151	ether_addr_copy(eth_hdr->h_source, src);
 152	eth_hdr->h_proto = htons(ETH_P_MRP);
 153
 154	version = skb_put(skb, sizeof(*version));
 155	*version = cpu_to_be16(MRP_VERSION);
 156
 157	return skb;
 158}
 159
 160static void br_mrp_skb_tlv(struct sk_buff *skb,
 161			   enum br_mrp_tlv_header_type type,
 162			   u8 length)
 163{
 164	struct br_mrp_tlv_hdr *hdr;
 165
 166	hdr = skb_put(skb, sizeof(*hdr));
 167	hdr->type = type;
 168	hdr->length = length;
 169}
 170
 171static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
 172{
 173	struct br_mrp_common_hdr *hdr;
 174
 175	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
 176
 177	hdr = skb_put(skb, sizeof(*hdr));
 178	hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
 179	memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
 180}
 181
 182static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
 183					     struct net_bridge_port *p,
 184					     enum br_mrp_port_role_type port_role)
 185{
 186	struct br_mrp_ring_test_hdr *hdr = NULL;
 187	struct sk_buff *skb = NULL;
 188
 189	if (!p)
 190		return NULL;
 191
 192	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
 193	if (!skb)
 194		return NULL;
 195
 196	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
 197	hdr = skb_put(skb, sizeof(*hdr));
 198
 199	hdr->prio = cpu_to_be16(mrp->prio);
 200	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
 201	hdr->port_role = cpu_to_be16(port_role);
 202	hdr->state = cpu_to_be16(mrp->ring_state);
 203	hdr->transitions = cpu_to_be16(mrp->ring_transitions);
 204	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
 205
 206	br_mrp_skb_common(skb, mrp);
 207
 208	/* In case the node behaves as MRA then the Test frame needs to have
 209	 * an Option TLV which includes eventually a sub-option TLV that has
 210	 * the type AUTO_MGR
 211	 */
 212	if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
 213		struct br_mrp_sub_option1_hdr *sub_opt = NULL;
 214		struct br_mrp_tlv_hdr *sub_tlv = NULL;
 215		struct br_mrp_oui_hdr *oui = NULL;
 216		u8 length;
 217
 218		length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) +
 219			MRP_OPT_PADDING;
 220		br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_OPTION, length);
 221
 222		oui = skb_put(skb, sizeof(*oui));
 223		memset(oui, 0x0, sizeof(*oui));
 224		sub_opt = skb_put(skb, sizeof(*sub_opt));
 225		memset(sub_opt, 0x0, sizeof(*sub_opt));
 226
 227		sub_tlv = skb_put(skb, sizeof(*sub_tlv));
 228		sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR;
 229
 230		/* 32 bit alligment shall be ensured therefore add 2 bytes */
 231		skb_put(skb, MRP_OPT_PADDING);
 232	}
 233
 234	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
 235
 236	return skb;
 237}
 238
 239static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
 240						struct net_bridge_port *p,
 241						enum br_mrp_port_role_type port_role)
 242{
 243	struct br_mrp_in_test_hdr *hdr = NULL;
 244	struct sk_buff *skb = NULL;
 245
 246	if (!p)
 247		return NULL;
 248
 249	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
 250	if (!skb)
 251		return NULL;
 252
 253	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
 254	hdr = skb_put(skb, sizeof(*hdr));
 255
 256	hdr->id = cpu_to_be16(mrp->in_id);
 257	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
 258	hdr->port_role = cpu_to_be16(port_role);
 259	hdr->state = cpu_to_be16(mrp->in_state);
 260	hdr->transitions = cpu_to_be16(mrp->in_transitions);
 261	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
 262
 263	br_mrp_skb_common(skb, mrp);
 264	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
 265
 266	return skb;
 267}
 268
 269/* This function is continuously called in the following cases:
 270 * - when node role is MRM, in this case test_monitor is always set to false
 271 *   because it needs to notify the userspace that the ring is open and needs to
 272 *   send MRP_Test frames
 273 * - when node role is MRA, there are 2 subcases:
 274 *     - when MRA behaves as MRM, in this case is similar with MRM role
 275 *     - when MRA behaves as MRC, in this case test_monitor is set to true,
 276 *       because it needs to detect when it stops seeing MRP_Test frames
 277 *       from MRM node but it doesn't need to send MRP_Test frames.
 278 */
 279static void br_mrp_test_work_expired(struct work_struct *work)
 280{
 281	struct delayed_work *del_work = to_delayed_work(work);
 282	struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
 283	struct net_bridge_port *p;
 284	bool notify_open = false;
 285	struct sk_buff *skb;
 286
 287	if (time_before_eq(mrp->test_end, jiffies))
 288		return;
 289
 290	if (mrp->test_count_miss < mrp->test_max_miss) {
 291		mrp->test_count_miss++;
 292	} else {
 293		/* Notify that the ring is open only if the ring state is
 294		 * closed, otherwise it would continue to notify at every
 295		 * interval.
 296		 * Also notify that the ring is open when the node has the
 297		 * role MRA and behaves as MRC. The reason is that the
 298		 * userspace needs to know when the MRM stopped sending
 299		 * MRP_Test frames so that the current node to try to take
 300		 * the role of a MRM.
 301		 */
 302		if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
 303		    mrp->test_monitor)
 304			notify_open = true;
 305	}
 306
 307	rcu_read_lock();
 308
 309	p = rcu_dereference(mrp->p_port);
 310	if (p) {
 311		if (!mrp->test_monitor) {
 312			skb = br_mrp_alloc_test_skb(mrp, p,
 313						    BR_MRP_PORT_ROLE_PRIMARY);
 314			if (!skb)
 315				goto out;
 316
 317			skb_reset_network_header(skb);
 318			dev_queue_xmit(skb);
 319		}
 320
 321		if (notify_open && !mrp->ring_role_offloaded)
 322			br_mrp_ring_port_open(p->dev, true);
 323	}
 324
 325	p = rcu_dereference(mrp->s_port);
 326	if (p) {
 327		if (!mrp->test_monitor) {
 328			skb = br_mrp_alloc_test_skb(mrp, p,
 329						    BR_MRP_PORT_ROLE_SECONDARY);
 330			if (!skb)
 331				goto out;
 332
 333			skb_reset_network_header(skb);
 334			dev_queue_xmit(skb);
 335		}
 336
 337		if (notify_open && !mrp->ring_role_offloaded)
 338			br_mrp_ring_port_open(p->dev, true);
 339	}
 340
 341out:
 342	rcu_read_unlock();
 343
 344	queue_delayed_work(system_wq, &mrp->test_work,
 345			   usecs_to_jiffies(mrp->test_interval));
 346}
 347
 348/* This function is continuously called when the node has the interconnect role
 349 * MIM. It would generate interconnect test frames and will send them on all 3
 350 * ports. But will also check if it stop receiving interconnect test frames.
 351 */
 352static void br_mrp_in_test_work_expired(struct work_struct *work)
 353{
 354	struct delayed_work *del_work = to_delayed_work(work);
 355	struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
 356	struct net_bridge_port *p;
 357	bool notify_open = false;
 358	struct sk_buff *skb;
 359
 360	if (time_before_eq(mrp->in_test_end, jiffies))
 361		return;
 362
 363	if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
 364		mrp->in_test_count_miss++;
 365	} else {
 366		/* Notify that the interconnect ring is open only if the
 367		 * interconnect ring state is closed, otherwise it would
 368		 * continue to notify at every interval.
 369		 */
 370		if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
 371			notify_open = true;
 372	}
 373
 374	rcu_read_lock();
 375
 376	p = rcu_dereference(mrp->p_port);
 377	if (p) {
 378		skb = br_mrp_alloc_in_test_skb(mrp, p,
 379					       BR_MRP_PORT_ROLE_PRIMARY);
 380		if (!skb)
 381			goto out;
 382
 383		skb_reset_network_header(skb);
 384		dev_queue_xmit(skb);
 385
 386		if (notify_open && !mrp->in_role_offloaded)
 387			br_mrp_in_port_open(p->dev, true);
 388	}
 389
 390	p = rcu_dereference(mrp->s_port);
 391	if (p) {
 392		skb = br_mrp_alloc_in_test_skb(mrp, p,
 393					       BR_MRP_PORT_ROLE_SECONDARY);
 394		if (!skb)
 395			goto out;
 396
 397		skb_reset_network_header(skb);
 398		dev_queue_xmit(skb);
 399
 400		if (notify_open && !mrp->in_role_offloaded)
 401			br_mrp_in_port_open(p->dev, true);
 402	}
 403
 404	p = rcu_dereference(mrp->i_port);
 405	if (p) {
 406		skb = br_mrp_alloc_in_test_skb(mrp, p,
 407					       BR_MRP_PORT_ROLE_INTER);
 408		if (!skb)
 409			goto out;
 410
 411		skb_reset_network_header(skb);
 412		dev_queue_xmit(skb);
 413
 414		if (notify_open && !mrp->in_role_offloaded)
 415			br_mrp_in_port_open(p->dev, true);
 416	}
 417
 418out:
 419	rcu_read_unlock();
 420
 421	queue_delayed_work(system_wq, &mrp->in_test_work,
 422			   usecs_to_jiffies(mrp->in_test_interval));
 423}
 424
 425/* Deletes the MRP instance.
 426 * note: called under rtnl_lock
 427 */
 428static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
 429{
 430	struct net_bridge_port *p;
 431	u8 state;
 432
 433	/* Stop sending MRP_Test frames */
 434	cancel_delayed_work_sync(&mrp->test_work);
 435	br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
 436
 437	/* Stop sending MRP_InTest frames if has an interconnect role */
 438	cancel_delayed_work_sync(&mrp->in_test_work);
 439	br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
 440
 441	/* Disable the roles */
 442	br_mrp_switchdev_set_ring_role(br, mrp, BR_MRP_RING_ROLE_DISABLED);
 443	p = rtnl_dereference(mrp->i_port);
 444	if (p)
 445		br_mrp_switchdev_set_in_role(br, mrp, mrp->in_id, mrp->ring_id,
 446					     BR_MRP_IN_ROLE_DISABLED);
 447
 448	br_mrp_switchdev_del(br, mrp);
 449
 450	/* Reset the ports */
 451	p = rtnl_dereference(mrp->p_port);
 452	if (p) {
 453		spin_lock_bh(&br->lock);
 454		state = netif_running(br->dev) ?
 455				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 456		p->state = state;
 457		p->flags &= ~BR_MRP_AWARE;
 458		spin_unlock_bh(&br->lock);
 459		br_mrp_port_switchdev_set_state(p, state);
 460		rcu_assign_pointer(mrp->p_port, NULL);
 461	}
 462
 463	p = rtnl_dereference(mrp->s_port);
 464	if (p) {
 465		spin_lock_bh(&br->lock);
 466		state = netif_running(br->dev) ?
 467				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 468		p->state = state;
 469		p->flags &= ~BR_MRP_AWARE;
 470		spin_unlock_bh(&br->lock);
 471		br_mrp_port_switchdev_set_state(p, state);
 472		rcu_assign_pointer(mrp->s_port, NULL);
 473	}
 474
 475	p = rtnl_dereference(mrp->i_port);
 476	if (p) {
 477		spin_lock_bh(&br->lock);
 478		state = netif_running(br->dev) ?
 479				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 480		p->state = state;
 481		p->flags &= ~BR_MRP_AWARE;
 482		spin_unlock_bh(&br->lock);
 483		br_mrp_port_switchdev_set_state(p, state);
 484		rcu_assign_pointer(mrp->i_port, NULL);
 485	}
 486
 487	hlist_del_rcu(&mrp->list);
 488	kfree_rcu(mrp, rcu);
 489
 490	if (hlist_empty(&br->mrp_list))
 491		br_del_frame(br, &mrp_frame_type);
 492}
 493
 494/* Adds a new MRP instance.
 495 * note: called under rtnl_lock
 496 */
 497int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
 498{
 499	struct net_bridge_port *p;
 500	struct br_mrp *mrp;
 501	int err;
 502
 503	/* If the ring exists, it is not possible to create another one with the
 504	 * same ring_id
 505	 */
 506	mrp = br_mrp_find_id(br, instance->ring_id);
 507	if (mrp)
 508		return -EINVAL;
 509
 510	if (!br_mrp_get_port(br, instance->p_ifindex) ||
 511	    !br_mrp_get_port(br, instance->s_ifindex))
 512		return -EINVAL;
 513
 514	/* It is not possible to have the same port part of multiple rings */
 515	if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
 516	    !br_mrp_unique_ifindex(br, instance->s_ifindex))
 517		return -EINVAL;
 518
 519	mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
 520	if (!mrp)
 521		return -ENOMEM;
 522
 523	mrp->ring_id = instance->ring_id;
 524	mrp->prio = instance->prio;
 525
 526	p = br_mrp_get_port(br, instance->p_ifindex);
 527	spin_lock_bh(&br->lock);
 528	p->state = BR_STATE_FORWARDING;
 529	p->flags |= BR_MRP_AWARE;
 530	spin_unlock_bh(&br->lock);
 531	rcu_assign_pointer(mrp->p_port, p);
 532
 533	p = br_mrp_get_port(br, instance->s_ifindex);
 534	spin_lock_bh(&br->lock);
 535	p->state = BR_STATE_FORWARDING;
 536	p->flags |= BR_MRP_AWARE;
 537	spin_unlock_bh(&br->lock);
 538	rcu_assign_pointer(mrp->s_port, p);
 539
 540	if (hlist_empty(&br->mrp_list))
 541		br_add_frame(br, &mrp_frame_type);
 542
 543	INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
 544	INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
 545	hlist_add_tail_rcu(&mrp->list, &br->mrp_list);
 546
 547	err = br_mrp_switchdev_add(br, mrp);
 548	if (err)
 549		goto delete_mrp;
 550
 551	return 0;
 552
 553delete_mrp:
 554	br_mrp_del_impl(br, mrp);
 555
 556	return err;
 557}
 558
 559/* Deletes the MRP instance from which the port is part of
 560 * note: called under rtnl_lock
 561 */
 562void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
 563{
 564	struct br_mrp *mrp = br_mrp_find_port(br, p);
 565
 566	/* If the port is not part of a MRP instance just bail out */
 567	if (!mrp)
 568		return;
 569
 570	br_mrp_del_impl(br, mrp);
 571}
 572
 573/* Deletes existing MRP instance based on ring_id
 574 * note: called under rtnl_lock
 575 */
 576int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
 577{
 578	struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
 579
 580	if (!mrp)
 581		return -EINVAL;
 582
 583	br_mrp_del_impl(br, mrp);
 584
 585	return 0;
 586}
 587
 588/* Set port state, port state can be forwarding, blocked or disabled
 589 * note: already called with rtnl_lock
 590 */
 591int br_mrp_set_port_state(struct net_bridge_port *p,
 592			  enum br_mrp_port_state_type state)
 593{
 594	u32 port_state;
 595
 596	if (!p || !(p->flags & BR_MRP_AWARE))
 597		return -EINVAL;
 598
 599	spin_lock_bh(&p->br->lock);
 600
 601	if (state == BR_MRP_PORT_STATE_FORWARDING)
 602		port_state = BR_STATE_FORWARDING;
 603	else
 604		port_state = BR_STATE_BLOCKING;
 605
 606	p->state = port_state;
 607	spin_unlock_bh(&p->br->lock);
 608
 609	br_mrp_port_switchdev_set_state(p, port_state);
 610
 611	return 0;
 612}
 613
 614/* Set port role, port role can be primary or secondary
 615 * note: already called with rtnl_lock
 616 */
 617int br_mrp_set_port_role(struct net_bridge_port *p,
 618			 enum br_mrp_port_role_type role)
 619{
 620	struct br_mrp *mrp;
 621
 622	if (!p || !(p->flags & BR_MRP_AWARE))
 623		return -EINVAL;
 624
 625	mrp = br_mrp_find_port(p->br, p);
 626
 627	if (!mrp)
 628		return -EINVAL;
 629
 630	switch (role) {
 631	case BR_MRP_PORT_ROLE_PRIMARY:
 632		rcu_assign_pointer(mrp->p_port, p);
 633		break;
 634	case BR_MRP_PORT_ROLE_SECONDARY:
 635		rcu_assign_pointer(mrp->s_port, p);
 636		break;
 637	default:
 638		return -EINVAL;
 639	}
 640
 641	br_mrp_port_switchdev_set_role(p, role);
 642
 643	return 0;
 644}
 645
 646/* Set ring state, ring state can be only Open or Closed
 647 * note: already called with rtnl_lock
 648 */
 649int br_mrp_set_ring_state(struct net_bridge *br,
 650			  struct br_mrp_ring_state *state)
 651{
 652	struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
 653
 654	if (!mrp)
 655		return -EINVAL;
 656
 657	if (mrp->ring_state != state->ring_state)
 
 658		mrp->ring_transitions++;
 659
 660	mrp->ring_state = state->ring_state;
 661
 662	br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
 663
 664	return 0;
 665}
 666
 667/* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
 668 * MRC(Media Redundancy Client).
 669 * note: already called with rtnl_lock
 670 */
 671int br_mrp_set_ring_role(struct net_bridge *br,
 672			 struct br_mrp_ring_role *role)
 673{
 674	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
 675	enum br_mrp_hw_support support;
 676
 677	if (!mrp)
 678		return -EINVAL;
 679
 680	mrp->ring_role = role->ring_role;
 681
 682	/* If there is an error just bailed out */
 683	support = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
 684	if (support == BR_MRP_NONE)
 685		return -EOPNOTSUPP;
 686
 687	/* Now detect if the HW actually applied the role or not. If the HW
 688	 * applied the role it means that the SW will not to do those operations
 689	 * anymore. For example if the role ir MRM then the HW will notify the
 690	 * SW when ring is open, but if the is not pushed to the HW the SW will
 691	 * need to detect when the ring is open
 692	 */
 693	mrp->ring_role_offloaded = support == BR_MRP_SW ? 0 : 1;
 694
 695	return 0;
 696}
 697
 698/* Start to generate or monitor MRP test frames, the frames are generated by
 699 * HW and if it fails, they are generated by the SW.
 700 * note: already called with rtnl_lock
 701 */
 702int br_mrp_start_test(struct net_bridge *br,
 703		      struct br_mrp_start_test *test)
 704{
 705	struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
 706	enum br_mrp_hw_support support;
 707
 708	if (!mrp)
 709		return -EINVAL;
 710
 711	/* Try to push it to the HW and if it fails then continue with SW
 712	 * implementation and if that also fails then return error.
 713	 */
 714	support = br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
 715						  test->max_miss, test->period,
 716						  test->monitor);
 717	if (support == BR_MRP_NONE)
 718		return -EOPNOTSUPP;
 719
 720	if (support == BR_MRP_HW)
 721		return 0;
 722
 723	mrp->test_interval = test->interval;
 724	mrp->test_end = jiffies + usecs_to_jiffies(test->period);
 725	mrp->test_max_miss = test->max_miss;
 726	mrp->test_monitor = test->monitor;
 727	mrp->test_count_miss = 0;
 728	queue_delayed_work(system_wq, &mrp->test_work,
 729			   usecs_to_jiffies(test->interval));
 730
 731	return 0;
 732}
 733
 734/* Set in state, int state can be only Open or Closed
 735 * note: already called with rtnl_lock
 736 */
 737int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
 738{
 739	struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
 740
 741	if (!mrp)
 742		return -EINVAL;
 743
 744	if (mrp->in_state != state->in_state)
 
 745		mrp->in_transitions++;
 746
 747	mrp->in_state = state->in_state;
 748
 749	br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
 750
 751	return 0;
 752}
 753
 754/* Set in role, in role can be only MIM(Media Interconnection Manager) or
 755 * MIC(Media Interconnection Client).
 756 * note: already called with rtnl_lock
 757 */
 758int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
 759{
 760	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
 761	enum br_mrp_hw_support support;
 762	struct net_bridge_port *p;
 
 763
 764	if (!mrp)
 765		return -EINVAL;
 766
 767	if (!br_mrp_get_port(br, role->i_ifindex))
 768		return -EINVAL;
 769
 770	if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
 771		u8 state;
 772
 773		/* It is not allowed to disable a port that doesn't exist */
 774		p = rtnl_dereference(mrp->i_port);
 775		if (!p)
 776			return -EINVAL;
 777
 778		/* Stop the generating MRP_InTest frames */
 779		cancel_delayed_work_sync(&mrp->in_test_work);
 780		br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
 781
 782		/* Remove the port */
 783		spin_lock_bh(&br->lock);
 784		state = netif_running(br->dev) ?
 785				BR_STATE_FORWARDING : BR_STATE_DISABLED;
 786		p->state = state;
 787		p->flags &= ~BR_MRP_AWARE;
 788		spin_unlock_bh(&br->lock);
 789		br_mrp_port_switchdev_set_state(p, state);
 790		rcu_assign_pointer(mrp->i_port, NULL);
 791
 792		mrp->in_role = role->in_role;
 793		mrp->in_id = 0;
 794
 795		return 0;
 796	}
 797
 798	/* It is not possible to have the same port part of multiple rings */
 799	if (!br_mrp_unique_ifindex(br, role->i_ifindex))
 800		return -EINVAL;
 801
 802	/* It is not allowed to set a different interconnect port if the mrp
 803	 * instance has already one. First it needs to be disabled and after
 804	 * that set the new port
 805	 */
 806	if (rcu_access_pointer(mrp->i_port))
 807		return -EINVAL;
 808
 809	p = br_mrp_get_port(br, role->i_ifindex);
 810	spin_lock_bh(&br->lock);
 811	p->state = BR_STATE_FORWARDING;
 812	p->flags |= BR_MRP_AWARE;
 813	spin_unlock_bh(&br->lock);
 814	rcu_assign_pointer(mrp->i_port, p);
 815
 816	mrp->in_role = role->in_role;
 817	mrp->in_id = role->in_id;
 818
 819	/* If there is an error just bailed out */
 820	support = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
 821					       role->ring_id, role->in_role);
 822	if (support == BR_MRP_NONE)
 823		return -EOPNOTSUPP;
 824
 825	/* Now detect if the HW actually applied the role or not. If the HW
 826	 * applied the role it means that the SW will not to do those operations
 827	 * anymore. For example if the role is MIM then the HW will notify the
 828	 * SW when interconnect ring is open, but if the is not pushed to the HW
 829	 * the SW will need to detect when the interconnect ring is open.
 830	 */
 831	mrp->in_role_offloaded = support == BR_MRP_SW ? 0 : 1;
 832
 833	return 0;
 834}
 835
 836/* Start to generate MRP_InTest frames, the frames are generated by
 837 * HW and if it fails, they are generated by the SW.
 838 * note: already called with rtnl_lock
 839 */
 840int br_mrp_start_in_test(struct net_bridge *br,
 841			 struct br_mrp_start_in_test *in_test)
 842{
 843	struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
 844	enum br_mrp_hw_support support;
 845
 846	if (!mrp)
 847		return -EINVAL;
 848
 849	if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
 850		return -EINVAL;
 851
 852	/* Try to push it to the HW and if it fails then continue with SW
 853	 * implementation and if that also fails then return error.
 854	 */
 855	support =  br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
 856						 in_test->max_miss,
 857						 in_test->period);
 858	if (support == BR_MRP_NONE)
 859		return -EOPNOTSUPP;
 860
 861	if (support == BR_MRP_HW)
 862		return 0;
 863
 864	mrp->in_test_interval = in_test->interval;
 865	mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
 866	mrp->in_test_max_miss = in_test->max_miss;
 867	mrp->in_test_count_miss = 0;
 868	queue_delayed_work(system_wq, &mrp->in_test_work,
 869			   usecs_to_jiffies(in_test->interval));
 870
 871	return 0;
 872}
 873
 874/* Determine if the frame type is a ring frame */
 875static bool br_mrp_ring_frame(struct sk_buff *skb)
 876{
 877	const struct br_mrp_tlv_hdr *hdr;
 878	struct br_mrp_tlv_hdr _hdr;
 879
 880	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 881	if (!hdr)
 882		return false;
 883
 884	if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
 885	    hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
 886	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
 887	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
 888	    hdr->type == BR_MRP_TLV_HEADER_OPTION)
 889		return true;
 890
 891	return false;
 892}
 893
 894/* Determine if the frame type is an interconnect frame */
 895static bool br_mrp_in_frame(struct sk_buff *skb)
 896{
 897	const struct br_mrp_tlv_hdr *hdr;
 898	struct br_mrp_tlv_hdr _hdr;
 899
 900	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 901	if (!hdr)
 902		return false;
 903
 904	if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
 905	    hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
 906	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
 907	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
 908	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS)
 909		return true;
 910
 911	return false;
 912}
 913
 914/* Process only MRP Test frame. All the other MRP frames are processed by
 915 * userspace application
 916 * note: already called with rcu_read_lock
 917 */
 918static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
 919			       struct sk_buff *skb)
 920{
 921	const struct br_mrp_tlv_hdr *hdr;
 922	struct br_mrp_tlv_hdr _hdr;
 923
 924	/* Each MRP header starts with a version field which is 16 bits.
 925	 * Therefore skip the version and get directly the TLV header.
 926	 */
 927	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 928	if (!hdr)
 929		return;
 930
 931	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
 932		return;
 933
 934	mrp->test_count_miss = 0;
 935
 936	/* Notify the userspace that the ring is closed only when the ring is
 937	 * not closed
 938	 */
 939	if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
 940		br_mrp_ring_port_open(port->dev, false);
 941}
 942
 943/* Determine if the test hdr has a better priority than the node */
 944static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
 945					struct net_bridge *br,
 946					const struct br_mrp_ring_test_hdr *hdr)
 947{
 948	u16 prio = be16_to_cpu(hdr->prio);
 949
 950	if (prio < mrp->prio ||
 951	    (prio == mrp->prio &&
 952	    ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
 953		return true;
 954
 955	return false;
 956}
 957
 958/* Process only MRP Test frame. All the other MRP frames are processed by
 959 * userspace application
 960 * note: already called with rcu_read_lock
 961 */
 962static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
 963			       struct net_bridge_port *port,
 964			       struct sk_buff *skb)
 965{
 966	const struct br_mrp_ring_test_hdr *test_hdr;
 967	struct br_mrp_ring_test_hdr _test_hdr;
 968	const struct br_mrp_tlv_hdr *hdr;
 969	struct br_mrp_tlv_hdr _hdr;
 970
 971	/* Each MRP header starts with a version field which is 16 bits.
 972	 * Therefore skip the version and get directly the TLV header.
 973	 */
 974	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
 975	if (!hdr)
 976		return;
 977
 978	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
 979		return;
 980
 981	test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
 982				      sizeof(_test_hdr), &_test_hdr);
 983	if (!test_hdr)
 984		return;
 985
 986	/* Only frames that have a better priority than the node will
 987	 * clear the miss counter because otherwise the node will need to behave
 988	 * as MRM.
 989	 */
 990	if (br_mrp_test_better_than_own(mrp, br, test_hdr))
 991		mrp->test_count_miss = 0;
 992}
 993
 994/* Process only MRP InTest frame. All the other MRP frames are processed by
 995 * userspace application
 996 * note: already called with rcu_read_lock
 997 */
 998static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
 999			       struct sk_buff *skb)
1000{
1001	const struct br_mrp_in_test_hdr *in_hdr;
1002	struct br_mrp_in_test_hdr _in_hdr;
1003	const struct br_mrp_tlv_hdr *hdr;
1004	struct br_mrp_tlv_hdr _hdr;
1005
1006	/* Each MRP header starts with a version field which is 16 bits.
1007	 * Therefore skip the version and get directly the TLV header.
1008	 */
1009	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
1010	if (!hdr)
1011		return false;
1012
1013	/* The check for InTest frame type was already done */
1014	in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
1015				    sizeof(_in_hdr), &_in_hdr);
1016	if (!in_hdr)
1017		return false;
1018
1019	/* It needs to process only it's own InTest frames. */
1020	if (mrp->in_id != ntohs(in_hdr->id))
1021		return false;
1022
1023	mrp->in_test_count_miss = 0;
1024
1025	/* Notify the userspace that the ring is closed only when the ring is
1026	 * not closed
1027	 */
1028	if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
1029		br_mrp_in_port_open(port->dev, false);
1030
1031	return true;
1032}
1033
1034/* Get the MRP frame type
1035 * note: already called with rcu_read_lock
1036 */
1037static u8 br_mrp_get_frame_type(struct sk_buff *skb)
1038{
1039	const struct br_mrp_tlv_hdr *hdr;
1040	struct br_mrp_tlv_hdr _hdr;
1041
1042	/* Each MRP header starts with a version field which is 16 bits.
1043	 * Therefore skip the version and get directly the TLV header.
1044	 */
1045	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
1046	if (!hdr)
1047		return 0xff;
1048
1049	return hdr->type;
1050}
1051
1052static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
1053{
1054	if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
1055	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
1056		return true;
1057
1058	return false;
1059}
1060
1061static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
1062{
1063	if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
1064	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
1065		return true;
1066
1067	return false;
1068}
1069
1070/* This will just forward the frame to the other mrp ring ports, depending on
1071 * the frame type, ring role and interconnect role
1072 * note: already called with rcu_read_lock
1073 */
1074static int br_mrp_rcv(struct net_bridge_port *p,
1075		      struct sk_buff *skb, struct net_device *dev)
1076{
1077	struct net_bridge_port *p_port, *s_port, *i_port = NULL;
1078	struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
1079	struct net_bridge *br;
1080	struct br_mrp *mrp;
1081
1082	/* If port is disabled don't accept any frames */
1083	if (p->state == BR_STATE_DISABLED)
1084		return 0;
1085
1086	br = p->br;
1087	mrp =  br_mrp_find_port(br, p);
1088	if (unlikely(!mrp))
1089		return 0;
1090
1091	p_port = rcu_dereference(mrp->p_port);
1092	if (!p_port)
1093		return 0;
1094	p_dst = p_port;
1095
1096	s_port = rcu_dereference(mrp->s_port);
1097	if (!s_port)
1098		return 0;
1099	s_dst = s_port;
1100
1101	/* If the frame is a ring frame then it is not required to check the
1102	 * interconnect role and ports to process or forward the frame
1103	 */
1104	if (br_mrp_ring_frame(skb)) {
1105		/* If the role is MRM then don't forward the frames */
1106		if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
1107			br_mrp_mrm_process(mrp, p, skb);
1108			goto no_forward;
1109		}
1110
1111		/* If the role is MRA then don't forward the frames if it
1112		 * behaves as MRM node
1113		 */
1114		if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
1115			if (!mrp->test_monitor) {
1116				br_mrp_mrm_process(mrp, p, skb);
1117				goto no_forward;
1118			}
1119
1120			br_mrp_mra_process(mrp, br, p, skb);
1121		}
1122
1123		goto forward;
1124	}
1125
1126	if (br_mrp_in_frame(skb)) {
1127		u8 in_type = br_mrp_get_frame_type(skb);
1128
1129		i_port = rcu_dereference(mrp->i_port);
1130		i_dst = i_port;
1131
1132		/* If the ring port is in block state it should not forward
1133		 * In_Test frames
1134		 */
1135		if (br_mrp_is_ring_port(p_port, s_port, p) &&
1136		    p->state == BR_STATE_BLOCKING &&
1137		    in_type == BR_MRP_TLV_HEADER_IN_TEST)
1138			goto no_forward;
1139
1140		/* Nodes that behaves as MRM needs to stop forwarding the
1141		 * frames in case the ring is closed, otherwise will be a loop.
1142		 * In this case the frame is no forward between the ring ports.
1143		 */
1144		if (br_mrp_mrm_behaviour(mrp) &&
1145		    br_mrp_is_ring_port(p_port, s_port, p) &&
1146		    (s_port->state != BR_STATE_FORWARDING ||
1147		     p_port->state != BR_STATE_FORWARDING)) {
1148			p_dst = NULL;
1149			s_dst = NULL;
1150		}
1151
1152		/* A node that behaves as MRC and doesn't have a interconnect
1153		 * role then it should forward all frames between the ring ports
1154		 * because it doesn't have an interconnect port
1155		 */
1156		if (br_mrp_mrc_behaviour(mrp) &&
1157		    mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
1158			goto forward;
1159
1160		if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
1161			if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
1162				/* MIM should not forward it's own InTest
1163				 * frames
1164				 */
1165				if (br_mrp_mim_process(mrp, p, skb)) {
1166					goto no_forward;
1167				} else {
1168					if (br_mrp_is_ring_port(p_port, s_port,
1169								p))
1170						i_dst = NULL;
1171
1172					if (br_mrp_is_in_port(i_port, p))
1173						goto no_forward;
1174				}
1175			} else {
1176				/* MIM should forward IntLinkChange/Status and
1177				 * IntTopoChange between ring ports but MIM
1178				 * should not forward IntLinkChange/Status and
1179				 * IntTopoChange if the frame was received at
1180				 * the interconnect port
1181				 */
1182				if (br_mrp_is_ring_port(p_port, s_port, p))
1183					i_dst = NULL;
1184
1185				if (br_mrp_is_in_port(i_port, p))
1186					goto no_forward;
1187			}
1188		}
1189
1190		if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
1191			/* MIC should forward InTest frames on all ports
1192			 * regardless of the received port
1193			 */
1194			if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
1195				goto forward;
1196
1197			/* MIC should forward IntLinkChange frames only if they
1198			 * are received on ring ports to all the ports
1199			 */
1200			if (br_mrp_is_ring_port(p_port, s_port, p) &&
1201			    (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
1202			     in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
1203				goto forward;
1204
1205			/* MIC should forward IntLinkStatus frames only to
1206			 * interconnect port if it was received on a ring port.
1207			 * If it is received on interconnect port then, it
1208			 * should be forward on both ring ports
1209			 */
1210			if (br_mrp_is_ring_port(p_port, s_port, p) &&
1211			    in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) {
1212				p_dst = NULL;
1213				s_dst = NULL;
1214			}
1215
1216			/* Should forward the InTopo frames only between the
1217			 * ring ports
1218			 */
1219			if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
1220				i_dst = NULL;
1221				goto forward;
1222			}
1223
1224			/* In all the other cases don't forward the frames */
1225			goto no_forward;
1226		}
1227	}
1228
1229forward:
1230	if (p_dst)
1231		br_forward(p_dst, skb, true, false);
1232	if (s_dst)
1233		br_forward(s_dst, skb, true, false);
1234	if (i_dst)
1235		br_forward(i_dst, skb, true, false);
1236
1237no_forward:
1238	return 1;
1239}
1240
1241/* Check if the frame was received on a port that is part of MRP ring
1242 * and if the frame has MRP eth. In that case process the frame otherwise do
1243 * normal forwarding.
1244 * note: already called with rcu_read_lock
1245 */
1246static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
1247{
1248	/* If there is no MRP instance do normal forwarding */
1249	if (likely(!(p->flags & BR_MRP_AWARE)))
1250		goto out;
1251
1252	return br_mrp_rcv(p, skb, p->dev);
 
 
1253out:
1254	return 0;
1255}
1256
1257bool br_mrp_enabled(struct net_bridge *br)
1258{
1259	return !hlist_empty(&br->mrp_list);
1260}