Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * The HSR spec says never to forward the same frame twice on the same
  8 * interface. A frame is identified by its source MAC address and its HSR
  9 * sequence number. This code keeps track of senders and their sequence numbers
 10 * to allow filtering of duplicate frames, and to detect HSR ring errors.
 11 * Same code handles filtering of duplicates for PRP as well.
 12 */
 13
 14#include <linux/if_ether.h>
 15#include <linux/etherdevice.h>
 16#include <linux/slab.h>
 17#include <linux/rculist.h>
 18#include "hsr_main.h"
 19#include "hsr_framereg.h"
 20#include "hsr_netlink.h"
 21
 22/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
 23 * false otherwise.
 24 */
 25static bool seq_nr_after(u16 a, u16 b)
 26{
 27	/* Remove inconsistency where
 28	 * seq_nr_after(a, b) == seq_nr_before(a, b)
 29	 */
 30	if ((int)b - a == 32768)
 31		return false;
 32
 33	return (((s16)(b - a)) < 0);
 34}
 35
 36#define seq_nr_before(a, b)		seq_nr_after((b), (a))
 37#define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
 38
 
 
 
 
 
 
 
 
 39bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 40{
 41	struct hsr_self_node *sn;
 42	bool ret = false;
 43
 44	rcu_read_lock();
 45	sn = rcu_dereference(hsr->self_node);
 46	if (!sn) {
 47		WARN_ONCE(1, "HSR: No self node\n");
 48		goto out;
 49	}
 50
 51	if (ether_addr_equal(addr, sn->macaddress_A) ||
 52	    ether_addr_equal(addr, sn->macaddress_B))
 53		ret = true;
 54out:
 55	rcu_read_unlock();
 56	return ret;
 57}
 58
 59/* Search for mac entry. Caller must hold rcu read lock.
 60 */
 61static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
 62					    const unsigned char addr[ETH_ALEN])
 63{
 64	struct hsr_node *node;
 65
 66	list_for_each_entry_rcu(node, node_db, mac_list) {
 67		if (ether_addr_equal(node->macaddress_A, addr))
 68			return node;
 69	}
 70
 71	return NULL;
 72}
 73
 
 
 
 
 
 
 
 
 74/* Helper for device init; the self_node is used in hsr_rcv() to recognize
 75 * frames from self that's been looped over the HSR ring.
 76 */
 77int hsr_create_self_node(struct hsr_priv *hsr,
 78			 const unsigned char addr_a[ETH_ALEN],
 79			 const unsigned char addr_b[ETH_ALEN])
 80{
 81	struct hsr_self_node *sn, *old;
 82
 83	sn = kmalloc(sizeof(*sn), GFP_KERNEL);
 84	if (!sn)
 85		return -ENOMEM;
 86
 87	ether_addr_copy(sn->macaddress_A, addr_a);
 88	ether_addr_copy(sn->macaddress_B, addr_b);
 89
 90	spin_lock_bh(&hsr->list_lock);
 91	old = rcu_replace_pointer(hsr->self_node, sn,
 92				  lockdep_is_held(&hsr->list_lock));
 93	spin_unlock_bh(&hsr->list_lock);
 94
 95	if (old)
 96		kfree_rcu(old, rcu_head);
 97	return 0;
 98}
 99
100void hsr_del_self_node(struct hsr_priv *hsr)
101{
102	struct hsr_self_node *old;
103
104	spin_lock_bh(&hsr->list_lock);
105	old = rcu_replace_pointer(hsr->self_node, NULL,
106				  lockdep_is_held(&hsr->list_lock));
107	spin_unlock_bh(&hsr->list_lock);
108	if (old)
109		kfree_rcu(old, rcu_head);
110}
111
112void hsr_del_nodes(struct list_head *node_db)
113{
114	struct hsr_node *node;
115	struct hsr_node *tmp;
116
117	list_for_each_entry_safe(node, tmp, node_db, mac_list)
118		kfree(node);
119}
120
121void prp_handle_san_frame(bool san, enum hsr_port_type port,
122			  struct hsr_node *node)
123{
124	/* Mark if the SAN node is over LAN_A or LAN_B */
125	if (port == HSR_PT_SLAVE_A) {
126		node->san_a = true;
127		return;
128	}
129
130	if (port == HSR_PT_SLAVE_B)
131		node->san_b = true;
132}
133
134/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
135 * seq_out is used to initialize filtering of outgoing duplicate frames
136 * originating from the newly added node.
137 */
138static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
139				     struct list_head *node_db,
140				     unsigned char addr[],
141				     u16 seq_out, bool san,
142				     enum hsr_port_type rx_port)
143{
144	struct hsr_node *new_node, *node;
145	unsigned long now;
146	int i;
147
148	new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
149	if (!new_node)
150		return NULL;
151
152	ether_addr_copy(new_node->macaddress_A, addr);
153	spin_lock_init(&new_node->seq_out_lock);
154
155	/* We are only interested in time diffs here, so use current jiffies
156	 * as initialization. (0 could trigger an spurious ring error warning).
157	 */
158	now = jiffies;
159	for (i = 0; i < HSR_PT_PORTS; i++) {
160		new_node->time_in[i] = now;
161		new_node->time_out[i] = now;
162	}
163	for (i = 0; i < HSR_PT_PORTS; i++)
164		new_node->seq_out[i] = seq_out;
165
166	if (san && hsr->proto_ops->handle_san_frame)
167		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
168
169	spin_lock_bh(&hsr->list_lock);
170	list_for_each_entry_rcu(node, node_db, mac_list,
171				lockdep_is_held(&hsr->list_lock)) {
172		if (ether_addr_equal(node->macaddress_A, addr))
173			goto out;
174		if (ether_addr_equal(node->macaddress_B, addr))
175			goto out;
176	}
177	list_add_tail_rcu(&new_node->mac_list, node_db);
178	spin_unlock_bh(&hsr->list_lock);
179	return new_node;
180out:
181	spin_unlock_bh(&hsr->list_lock);
182	kfree(new_node);
183	return node;
184}
185
186void prp_update_san_info(struct hsr_node *node, bool is_sup)
187{
188	if (!is_sup)
189		return;
190
191	node->san_a = false;
192	node->san_b = false;
193}
194
195/* Get the hsr_node from which 'skb' was sent.
196 */
197struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
198			      struct sk_buff *skb, bool is_sup,
199			      enum hsr_port_type rx_port)
200{
201	struct hsr_priv *hsr = port->hsr;
202	struct hsr_node *node;
203	struct ethhdr *ethhdr;
204	struct prp_rct *rct;
205	bool san = false;
206	u16 seq_out;
207
208	if (!skb_mac_header_was_set(skb))
209		return NULL;
210
211	ethhdr = (struct ethhdr *)skb_mac_header(skb);
212
213	list_for_each_entry_rcu(node, node_db, mac_list) {
214		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
215			if (hsr->proto_ops->update_san_info)
216				hsr->proto_ops->update_san_info(node, is_sup);
217			return node;
218		}
219		if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
220			if (hsr->proto_ops->update_san_info)
221				hsr->proto_ops->update_san_info(node, is_sup);
222			return node;
223		}
224	}
225
 
 
 
 
 
 
 
 
 
226	/* Everyone may create a node entry, connected node to a HSR/PRP
227	 * device.
228	 */
229	if (ethhdr->h_proto == htons(ETH_P_PRP) ||
230	    ethhdr->h_proto == htons(ETH_P_HSR)) {
 
 
 
 
231		/* Use the existing sequence_nr from the tag as starting point
232		 * for filtering duplicate frames.
233		 */
234		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
235	} else {
236		rct = skb_get_PRP_rct(skb);
237		if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
238			seq_out = prp_get_skb_sequence_nr(rct);
239		} else {
240			if (rx_port != HSR_PT_MASTER)
241				san = true;
242			seq_out = HSR_SEQNR_START;
243		}
244	}
245
246	return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
247			    san, rx_port);
248}
249
250/* Use the Supervision frame's info about an eventual macaddress_B for merging
251 * nodes that has previously had their macaddress_B registered as a separate
252 * node.
253 */
254void hsr_handle_sup_frame(struct hsr_frame_info *frame)
255{
256	struct hsr_node *node_curr = frame->node_src;
257	struct hsr_port *port_rcv = frame->port_rcv;
258	struct hsr_priv *hsr = port_rcv->hsr;
259	struct hsr_sup_payload *hsr_sp;
260	struct hsr_sup_tlv *hsr_sup_tlv;
261	struct hsr_node *node_real;
262	struct sk_buff *skb = NULL;
263	struct list_head *node_db;
264	struct ethhdr *ethhdr;
265	int i;
266	unsigned int pull_size = 0;
267	unsigned int total_pull_size = 0;
268
269	/* Here either frame->skb_hsr or frame->skb_prp should be
270	 * valid as supervision frame always will have protocol
271	 * header info.
272	 */
273	if (frame->skb_hsr)
274		skb = frame->skb_hsr;
275	else if (frame->skb_prp)
276		skb = frame->skb_prp;
277	else if (frame->skb_std)
278		skb = frame->skb_std;
279	if (!skb)
280		return;
281
282	/* Leave the ethernet header. */
283	pull_size = sizeof(struct ethhdr);
284	skb_pull(skb, pull_size);
285	total_pull_size += pull_size;
286
287	ethhdr = (struct ethhdr *)skb_mac_header(skb);
288
289	/* And leave the HSR tag. */
290	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
291		pull_size = sizeof(struct ethhdr);
292		skb_pull(skb, pull_size);
293		total_pull_size += pull_size;
294	}
295
296	/* And leave the HSR sup tag. */
297	pull_size = sizeof(struct hsr_tag);
298	skb_pull(skb, pull_size);
299	total_pull_size += pull_size;
300
301	/* get HSR sup payload */
302	hsr_sp = (struct hsr_sup_payload *)skb->data;
303
304	/* Merge node_curr (registered on macaddress_B) into node_real */
305	node_db = &port_rcv->hsr->node_db;
306	node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
307	if (!node_real)
308		/* No frame received from AddrA of this node yet */
309		node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
310					 HSR_SEQNR_START - 1, true,
311					 port_rcv->type);
312	if (!node_real)
313		goto done; /* No mem */
314	if (node_real == node_curr)
315		/* Node has already been merged */
316		goto done;
317
318	/* Leave the first HSR sup payload. */
319	pull_size = sizeof(struct hsr_sup_payload);
320	skb_pull(skb, pull_size);
321	total_pull_size += pull_size;
322
323	/* Get second supervision tlv */
324	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
325	/* And check if it is a redbox mac TLV */
326	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
327		/* We could stop here after pushing hsr_sup_payload,
328		 * or proceed and allow macaddress_B and for redboxes.
329		 */
330		/* Sanity check length */
331		if (hsr_sup_tlv->HSR_TLV_length != 6)
332			goto done;
333
334		/* Leave the second HSR sup tlv. */
335		pull_size = sizeof(struct hsr_sup_tlv);
336		skb_pull(skb, pull_size);
337		total_pull_size += pull_size;
338
339		/* Get redbox mac address. */
340		hsr_sp = (struct hsr_sup_payload *)skb->data;
341
342		/* Check if redbox mac and node mac are equal. */
343		if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
344			/* This is a redbox supervision frame for a VDAN! */
345			goto done;
346		}
347	}
348
349	ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
350	spin_lock_bh(&node_real->seq_out_lock);
351	for (i = 0; i < HSR_PT_PORTS; i++) {
352		if (!node_curr->time_in_stale[i] &&
353		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
354			node_real->time_in[i] = node_curr->time_in[i];
355			node_real->time_in_stale[i] =
356						node_curr->time_in_stale[i];
357		}
358		if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
359			node_real->seq_out[i] = node_curr->seq_out[i];
360	}
361	spin_unlock_bh(&node_real->seq_out_lock);
362	node_real->addr_B_port = port_rcv->type;
363
364	spin_lock_bh(&hsr->list_lock);
365	if (!node_curr->removed) {
366		list_del_rcu(&node_curr->mac_list);
367		node_curr->removed = true;
368		kfree_rcu(node_curr, rcu_head);
369	}
370	spin_unlock_bh(&hsr->list_lock);
371
372done:
373	/* Push back here */
374	skb_push(skb, total_pull_size);
375}
376
377/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
378 *
379 * If the frame was sent by a node's B interface, replace the source
380 * address with that node's "official" address (macaddress_A) so that upper
381 * layers recognize where it came from.
382 */
383void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
384{
385	if (!skb_mac_header_was_set(skb)) {
386		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
387		return;
388	}
389
390	memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
391}
392
393/* 'skb' is a frame meant for another host.
394 * 'port' is the outgoing interface
395 *
396 * Substitute the target (dest) MAC address if necessary, so the it matches the
397 * recipient interface MAC address, regardless of whether that is the
398 * recipient's A or B interface.
399 * This is needed to keep the packets flowing through switches that learn on
400 * which "side" the different interfaces are.
401 */
402void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
403			 struct hsr_port *port)
404{
405	struct hsr_node *node_dst;
406
407	if (!skb_mac_header_was_set(skb)) {
408		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
409		return;
410	}
411
412	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
413		return;
414
415	node_dst = find_node_by_addr_A(&port->hsr->node_db,
416				       eth_hdr(skb)->h_dest);
 
 
 
 
417	if (!node_dst) {
418		if (net_ratelimit())
419			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
420		return;
421	}
422	if (port->type != node_dst->addr_B_port)
423		return;
424
425	if (is_valid_ether_addr(node_dst->macaddress_B))
426		ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
427}
428
429void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
430			   u16 sequence_nr)
431{
432	/* Don't register incoming frames without a valid sequence number. This
433	 * ensures entries of restarted nodes gets pruned so that they can
434	 * re-register and resume communications.
435	 */
436	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
437	    seq_nr_before(sequence_nr, node->seq_out[port->type]))
438		return;
439
440	node->time_in[port->type] = jiffies;
441	node->time_in_stale[port->type] = false;
442}
443
444/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
445 * ethhdr->h_source address and skb->mac_header set.
446 *
447 * Return:
448 *	 1 if frame can be shown to have been sent recently on this interface,
449 *	 0 otherwise, or
450 *	 negative error code on error
451 */
452int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
453			   u16 sequence_nr)
454{
455	spin_lock_bh(&node->seq_out_lock);
456	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
457	    time_is_after_jiffies(node->time_out[port->type] +
458	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
459		spin_unlock_bh(&node->seq_out_lock);
460		return 1;
461	}
462
463	node->time_out[port->type] = jiffies;
464	node->seq_out[port->type] = sequence_nr;
465	spin_unlock_bh(&node->seq_out_lock);
466	return 0;
467}
468
469static struct hsr_port *get_late_port(struct hsr_priv *hsr,
470				      struct hsr_node *node)
471{
472	if (node->time_in_stale[HSR_PT_SLAVE_A])
473		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
474	if (node->time_in_stale[HSR_PT_SLAVE_B])
475		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
476
477	if (time_after(node->time_in[HSR_PT_SLAVE_B],
478		       node->time_in[HSR_PT_SLAVE_A] +
479					msecs_to_jiffies(MAX_SLAVE_DIFF)))
480		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
481	if (time_after(node->time_in[HSR_PT_SLAVE_A],
482		       node->time_in[HSR_PT_SLAVE_B] +
483					msecs_to_jiffies(MAX_SLAVE_DIFF)))
484		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
485
486	return NULL;
487}
488
489/* Remove stale sequence_nr records. Called by timer every
490 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
491 */
492void hsr_prune_nodes(struct timer_list *t)
493{
494	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
495	struct hsr_node *node;
496	struct hsr_node *tmp;
497	struct hsr_port *port;
498	unsigned long timestamp;
499	unsigned long time_a, time_b;
500
501	spin_lock_bh(&hsr->list_lock);
502	list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
503		/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
504		 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
505		 * the master port. Thus the master node will be repeatedly
506		 * pruned leading to packet loss.
507		 */
508		if (hsr_addr_is_self(hsr, node->macaddress_A))
509			continue;
510
511		/* Shorthand */
512		time_a = node->time_in[HSR_PT_SLAVE_A];
513		time_b = node->time_in[HSR_PT_SLAVE_B];
514
515		/* Check for timestamps old enough to risk wrap-around */
516		if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
517			node->time_in_stale[HSR_PT_SLAVE_A] = true;
518		if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
519			node->time_in_stale[HSR_PT_SLAVE_B] = true;
520
521		/* Get age of newest frame from node.
522		 * At least one time_in is OK here; nodes get pruned long
523		 * before both time_ins can get stale
524		 */
525		timestamp = time_a;
526		if (node->time_in_stale[HSR_PT_SLAVE_A] ||
527		    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
528		    time_after(time_b, time_a)))
529			timestamp = time_b;
530
531		/* Warn of ring error only as long as we get frames at all */
532		if (time_is_after_jiffies(timestamp +
533				msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
534			rcu_read_lock();
535			port = get_late_port(hsr, node);
536			if (port)
537				hsr_nl_ringerror(hsr, node->macaddress_A, port);
538			rcu_read_unlock();
539		}
540
541		/* Prune old entries */
542		if (time_is_before_jiffies(timestamp +
543				msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
544			hsr_nl_nodedown(hsr, node->macaddress_A);
545			if (!node->removed) {
546				list_del_rcu(&node->mac_list);
547				node->removed = true;
548				/* Note that we need to free this entry later: */
549				kfree_rcu(node, rcu_head);
550			}
551		}
552	}
553	spin_unlock_bh(&hsr->list_lock);
554
555	/* Restart timer */
556	mod_timer(&hsr->prune_timer,
557		  jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558}
559
560void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
561			unsigned char addr[ETH_ALEN])
562{
563	struct hsr_node *node;
564
565	if (!_pos) {
566		node = list_first_or_null_rcu(&hsr->node_db,
567					      struct hsr_node, mac_list);
568		if (node)
569			ether_addr_copy(addr, node->macaddress_A);
570		return node;
571	}
572
573	node = _pos;
574	list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
575		ether_addr_copy(addr, node->macaddress_A);
576		return node;
577	}
578
579	return NULL;
580}
581
582int hsr_get_node_data(struct hsr_priv *hsr,
583		      const unsigned char *addr,
584		      unsigned char addr_b[ETH_ALEN],
585		      unsigned int *addr_b_ifindex,
586		      int *if1_age,
587		      u16 *if1_seq,
588		      int *if2_age,
589		      u16 *if2_seq)
590{
591	struct hsr_node *node;
592	struct hsr_port *port;
593	unsigned long tdiff;
594
595	node = find_node_by_addr_A(&hsr->node_db, addr);
596	if (!node)
597		return -ENOENT;
598
599	ether_addr_copy(addr_b, node->macaddress_B);
600
601	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
602	if (node->time_in_stale[HSR_PT_SLAVE_A])
603		*if1_age = INT_MAX;
604#if HZ <= MSEC_PER_SEC
605	else if (tdiff > msecs_to_jiffies(INT_MAX))
606		*if1_age = INT_MAX;
607#endif
608	else
609		*if1_age = jiffies_to_msecs(tdiff);
610
611	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
612	if (node->time_in_stale[HSR_PT_SLAVE_B])
613		*if2_age = INT_MAX;
614#if HZ <= MSEC_PER_SEC
615	else if (tdiff > msecs_to_jiffies(INT_MAX))
616		*if2_age = INT_MAX;
617#endif
618	else
619		*if2_age = jiffies_to_msecs(tdiff);
620
621	/* Present sequence numbers as if they were incoming on interface */
622	*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
623	*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
624
625	if (node->addr_B_port != HSR_PT_NONE) {
626		port = hsr_port_get_hsr(hsr, node->addr_B_port);
627		*addr_b_ifindex = port->dev->ifindex;
628	} else {
629		*addr_b_ifindex = -1;
630	}
631
632	return 0;
633}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * The HSR spec says never to forward the same frame twice on the same
  8 * interface. A frame is identified by its source MAC address and its HSR
  9 * sequence number. This code keeps track of senders and their sequence numbers
 10 * to allow filtering of duplicate frames, and to detect HSR ring errors.
 11 * Same code handles filtering of duplicates for PRP as well.
 12 */
 13
 14#include <linux/if_ether.h>
 15#include <linux/etherdevice.h>
 16#include <linux/slab.h>
 17#include <linux/rculist.h>
 18#include "hsr_main.h"
 19#include "hsr_framereg.h"
 20#include "hsr_netlink.h"
 21
 22/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
 23 * false otherwise.
 24 */
 25static bool seq_nr_after(u16 a, u16 b)
 26{
 27	/* Remove inconsistency where
 28	 * seq_nr_after(a, b) == seq_nr_before(a, b)
 29	 */
 30	if ((int)b - a == 32768)
 31		return false;
 32
 33	return (((s16)(b - a)) < 0);
 34}
 35
 36#define seq_nr_before(a, b)		seq_nr_after((b), (a))
 37#define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
 38
 39bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
 40{
 41	if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox))
 42		return false;
 43
 44	return ether_addr_equal(addr, hsr->macaddress_redbox);
 45}
 46
 47bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 48{
 49	struct hsr_self_node *sn;
 50	bool ret = false;
 51
 52	rcu_read_lock();
 53	sn = rcu_dereference(hsr->self_node);
 54	if (!sn) {
 55		WARN_ONCE(1, "HSR: No self node\n");
 56		goto out;
 57	}
 58
 59	if (ether_addr_equal(addr, sn->macaddress_A) ||
 60	    ether_addr_equal(addr, sn->macaddress_B))
 61		ret = true;
 62out:
 63	rcu_read_unlock();
 64	return ret;
 65}
 66
 67/* Search for mac entry. Caller must hold rcu read lock.
 68 */
 69static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
 70					    const unsigned char addr[ETH_ALEN])
 71{
 72	struct hsr_node *node;
 73
 74	list_for_each_entry_rcu(node, node_db, mac_list) {
 75		if (ether_addr_equal(node->macaddress_A, addr))
 76			return node;
 77	}
 78
 79	return NULL;
 80}
 81
 82/* Check if node for a given MAC address is already present in data base
 83 */
 84bool hsr_is_node_in_db(struct list_head *node_db,
 85		       const unsigned char addr[ETH_ALEN])
 86{
 87	return !!find_node_by_addr_A(node_db, addr);
 88}
 89
 90/* Helper for device init; the self_node is used in hsr_rcv() to recognize
 91 * frames from self that's been looped over the HSR ring.
 92 */
 93int hsr_create_self_node(struct hsr_priv *hsr,
 94			 const unsigned char addr_a[ETH_ALEN],
 95			 const unsigned char addr_b[ETH_ALEN])
 96{
 97	struct hsr_self_node *sn, *old;
 98
 99	sn = kmalloc(sizeof(*sn), GFP_KERNEL);
100	if (!sn)
101		return -ENOMEM;
102
103	ether_addr_copy(sn->macaddress_A, addr_a);
104	ether_addr_copy(sn->macaddress_B, addr_b);
105
106	spin_lock_bh(&hsr->list_lock);
107	old = rcu_replace_pointer(hsr->self_node, sn,
108				  lockdep_is_held(&hsr->list_lock));
109	spin_unlock_bh(&hsr->list_lock);
110
111	if (old)
112		kfree_rcu(old, rcu_head);
113	return 0;
114}
115
116void hsr_del_self_node(struct hsr_priv *hsr)
117{
118	struct hsr_self_node *old;
119
120	spin_lock_bh(&hsr->list_lock);
121	old = rcu_replace_pointer(hsr->self_node, NULL,
122				  lockdep_is_held(&hsr->list_lock));
123	spin_unlock_bh(&hsr->list_lock);
124	if (old)
125		kfree_rcu(old, rcu_head);
126}
127
128void hsr_del_nodes(struct list_head *node_db)
129{
130	struct hsr_node *node;
131	struct hsr_node *tmp;
132
133	list_for_each_entry_safe(node, tmp, node_db, mac_list)
134		kfree(node);
135}
136
137void prp_handle_san_frame(bool san, enum hsr_port_type port,
138			  struct hsr_node *node)
139{
140	/* Mark if the SAN node is over LAN_A or LAN_B */
141	if (port == HSR_PT_SLAVE_A) {
142		node->san_a = true;
143		return;
144	}
145
146	if (port == HSR_PT_SLAVE_B)
147		node->san_b = true;
148}
149
150/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
151 * seq_out is used to initialize filtering of outgoing duplicate frames
152 * originating from the newly added node.
153 */
154static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
155				     struct list_head *node_db,
156				     unsigned char addr[],
157				     u16 seq_out, bool san,
158				     enum hsr_port_type rx_port)
159{
160	struct hsr_node *new_node, *node;
161	unsigned long now;
162	int i;
163
164	new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
165	if (!new_node)
166		return NULL;
167
168	ether_addr_copy(new_node->macaddress_A, addr);
169	spin_lock_init(&new_node->seq_out_lock);
170
171	/* We are only interested in time diffs here, so use current jiffies
172	 * as initialization. (0 could trigger an spurious ring error warning).
173	 */
174	now = jiffies;
175	for (i = 0; i < HSR_PT_PORTS; i++) {
176		new_node->time_in[i] = now;
177		new_node->time_out[i] = now;
178	}
179	for (i = 0; i < HSR_PT_PORTS; i++)
180		new_node->seq_out[i] = seq_out;
181
182	if (san && hsr->proto_ops->handle_san_frame)
183		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
184
185	spin_lock_bh(&hsr->list_lock);
186	list_for_each_entry_rcu(node, node_db, mac_list,
187				lockdep_is_held(&hsr->list_lock)) {
188		if (ether_addr_equal(node->macaddress_A, addr))
189			goto out;
190		if (ether_addr_equal(node->macaddress_B, addr))
191			goto out;
192	}
193	list_add_tail_rcu(&new_node->mac_list, node_db);
194	spin_unlock_bh(&hsr->list_lock);
195	return new_node;
196out:
197	spin_unlock_bh(&hsr->list_lock);
198	kfree(new_node);
199	return node;
200}
201
202void prp_update_san_info(struct hsr_node *node, bool is_sup)
203{
204	if (!is_sup)
205		return;
206
207	node->san_a = false;
208	node->san_b = false;
209}
210
211/* Get the hsr_node from which 'skb' was sent.
212 */
213struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
214			      struct sk_buff *skb, bool is_sup,
215			      enum hsr_port_type rx_port)
216{
217	struct hsr_priv *hsr = port->hsr;
218	struct hsr_node *node;
219	struct ethhdr *ethhdr;
220	struct prp_rct *rct;
221	bool san = false;
222	u16 seq_out;
223
224	if (!skb_mac_header_was_set(skb))
225		return NULL;
226
227	ethhdr = (struct ethhdr *)skb_mac_header(skb);
228
229	list_for_each_entry_rcu(node, node_db, mac_list) {
230		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
231			if (hsr->proto_ops->update_san_info)
232				hsr->proto_ops->update_san_info(node, is_sup);
233			return node;
234		}
235		if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
236			if (hsr->proto_ops->update_san_info)
237				hsr->proto_ops->update_san_info(node, is_sup);
238			return node;
239		}
240	}
241
242	/* Check if required node is not in proxy nodes table */
243	list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
244		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
245			if (hsr->proto_ops->update_san_info)
246				hsr->proto_ops->update_san_info(node, is_sup);
247			return node;
248		}
249	}
250
251	/* Everyone may create a node entry, connected node to a HSR/PRP
252	 * device.
253	 */
254	if (ethhdr->h_proto == htons(ETH_P_PRP) ||
255	    ethhdr->h_proto == htons(ETH_P_HSR)) {
256		/* Check if skb contains hsr_ethhdr */
257		if (skb->mac_len < sizeof(struct hsr_ethhdr))
258			return NULL;
259
260		/* Use the existing sequence_nr from the tag as starting point
261		 * for filtering duplicate frames.
262		 */
263		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
264	} else {
265		rct = skb_get_PRP_rct(skb);
266		if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
267			seq_out = prp_get_skb_sequence_nr(rct);
268		} else {
269			if (rx_port != HSR_PT_MASTER)
270				san = true;
271			seq_out = HSR_SEQNR_START;
272		}
273	}
274
275	return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
276			    san, rx_port);
277}
278
279/* Use the Supervision frame's info about an eventual macaddress_B for merging
280 * nodes that has previously had their macaddress_B registered as a separate
281 * node.
282 */
283void hsr_handle_sup_frame(struct hsr_frame_info *frame)
284{
285	struct hsr_node *node_curr = frame->node_src;
286	struct hsr_port *port_rcv = frame->port_rcv;
287	struct hsr_priv *hsr = port_rcv->hsr;
288	struct hsr_sup_payload *hsr_sp;
289	struct hsr_sup_tlv *hsr_sup_tlv;
290	struct hsr_node *node_real;
291	struct sk_buff *skb = NULL;
292	struct list_head *node_db;
293	struct ethhdr *ethhdr;
294	int i;
295	unsigned int pull_size = 0;
296	unsigned int total_pull_size = 0;
297
298	/* Here either frame->skb_hsr or frame->skb_prp should be
299	 * valid as supervision frame always will have protocol
300	 * header info.
301	 */
302	if (frame->skb_hsr)
303		skb = frame->skb_hsr;
304	else if (frame->skb_prp)
305		skb = frame->skb_prp;
306	else if (frame->skb_std)
307		skb = frame->skb_std;
308	if (!skb)
309		return;
310
311	/* Leave the ethernet header. */
312	pull_size = sizeof(struct ethhdr);
313	skb_pull(skb, pull_size);
314	total_pull_size += pull_size;
315
316	ethhdr = (struct ethhdr *)skb_mac_header(skb);
317
318	/* And leave the HSR tag. */
319	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
320		pull_size = sizeof(struct hsr_tag);
321		skb_pull(skb, pull_size);
322		total_pull_size += pull_size;
323	}
324
325	/* And leave the HSR sup tag. */
326	pull_size = sizeof(struct hsr_sup_tag);
327	skb_pull(skb, pull_size);
328	total_pull_size += pull_size;
329
330	/* get HSR sup payload */
331	hsr_sp = (struct hsr_sup_payload *)skb->data;
332
333	/* Merge node_curr (registered on macaddress_B) into node_real */
334	node_db = &port_rcv->hsr->node_db;
335	node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
336	if (!node_real)
337		/* No frame received from AddrA of this node yet */
338		node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
339					 HSR_SEQNR_START - 1, true,
340					 port_rcv->type);
341	if (!node_real)
342		goto done; /* No mem */
343	if (node_real == node_curr)
344		/* Node has already been merged */
345		goto done;
346
347	/* Leave the first HSR sup payload. */
348	pull_size = sizeof(struct hsr_sup_payload);
349	skb_pull(skb, pull_size);
350	total_pull_size += pull_size;
351
352	/* Get second supervision tlv */
353	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
354	/* And check if it is a redbox mac TLV */
355	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
356		/* We could stop here after pushing hsr_sup_payload,
357		 * or proceed and allow macaddress_B and for redboxes.
358		 */
359		/* Sanity check length */
360		if (hsr_sup_tlv->HSR_TLV_length != 6)
361			goto done;
362
363		/* Leave the second HSR sup tlv. */
364		pull_size = sizeof(struct hsr_sup_tlv);
365		skb_pull(skb, pull_size);
366		total_pull_size += pull_size;
367
368		/* Get redbox mac address. */
369		hsr_sp = (struct hsr_sup_payload *)skb->data;
370
371		/* Check if redbox mac and node mac are equal. */
372		if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
373			/* This is a redbox supervision frame for a VDAN! */
374			goto done;
375		}
376	}
377
378	ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
379	spin_lock_bh(&node_real->seq_out_lock);
380	for (i = 0; i < HSR_PT_PORTS; i++) {
381		if (!node_curr->time_in_stale[i] &&
382		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
383			node_real->time_in[i] = node_curr->time_in[i];
384			node_real->time_in_stale[i] =
385						node_curr->time_in_stale[i];
386		}
387		if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
388			node_real->seq_out[i] = node_curr->seq_out[i];
389	}
390	spin_unlock_bh(&node_real->seq_out_lock);
391	node_real->addr_B_port = port_rcv->type;
392
393	spin_lock_bh(&hsr->list_lock);
394	if (!node_curr->removed) {
395		list_del_rcu(&node_curr->mac_list);
396		node_curr->removed = true;
397		kfree_rcu(node_curr, rcu_head);
398	}
399	spin_unlock_bh(&hsr->list_lock);
400
401done:
402	/* Push back here */
403	skb_push(skb, total_pull_size);
404}
405
406/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
407 *
408 * If the frame was sent by a node's B interface, replace the source
409 * address with that node's "official" address (macaddress_A) so that upper
410 * layers recognize where it came from.
411 */
412void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
413{
414	if (!skb_mac_header_was_set(skb)) {
415		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
416		return;
417	}
418
419	memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
420}
421
422/* 'skb' is a frame meant for another host.
423 * 'port' is the outgoing interface
424 *
425 * Substitute the target (dest) MAC address if necessary, so the it matches the
426 * recipient interface MAC address, regardless of whether that is the
427 * recipient's A or B interface.
428 * This is needed to keep the packets flowing through switches that learn on
429 * which "side" the different interfaces are.
430 */
431void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
432			 struct hsr_port *port)
433{
434	struct hsr_node *node_dst;
435
436	if (!skb_mac_header_was_set(skb)) {
437		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
438		return;
439	}
440
441	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
442		return;
443
444	node_dst = find_node_by_addr_A(&port->hsr->node_db,
445				       eth_hdr(skb)->h_dest);
446	if (!node_dst && port->hsr->redbox)
447		node_dst = find_node_by_addr_A(&port->hsr->proxy_node_db,
448					       eth_hdr(skb)->h_dest);
449
450	if (!node_dst) {
451		if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
452			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
453		return;
454	}
455	if (port->type != node_dst->addr_B_port)
456		return;
457
458	if (is_valid_ether_addr(node_dst->macaddress_B))
459		ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
460}
461
462void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
463			   u16 sequence_nr)
464{
465	/* Don't register incoming frames without a valid sequence number. This
466	 * ensures entries of restarted nodes gets pruned so that they can
467	 * re-register and resume communications.
468	 */
469	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
470	    seq_nr_before(sequence_nr, node->seq_out[port->type]))
471		return;
472
473	node->time_in[port->type] = jiffies;
474	node->time_in_stale[port->type] = false;
475}
476
477/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
478 * ethhdr->h_source address and skb->mac_header set.
479 *
480 * Return:
481 *	 1 if frame can be shown to have been sent recently on this interface,
482 *	 0 otherwise, or
483 *	 negative error code on error
484 */
485int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
486			   u16 sequence_nr)
487{
488	spin_lock_bh(&node->seq_out_lock);
489	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
490	    time_is_after_jiffies(node->time_out[port->type] +
491	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
492		spin_unlock_bh(&node->seq_out_lock);
493		return 1;
494	}
495
496	node->time_out[port->type] = jiffies;
497	node->seq_out[port->type] = sequence_nr;
498	spin_unlock_bh(&node->seq_out_lock);
499	return 0;
500}
501
502static struct hsr_port *get_late_port(struct hsr_priv *hsr,
503				      struct hsr_node *node)
504{
505	if (node->time_in_stale[HSR_PT_SLAVE_A])
506		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
507	if (node->time_in_stale[HSR_PT_SLAVE_B])
508		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
509
510	if (time_after(node->time_in[HSR_PT_SLAVE_B],
511		       node->time_in[HSR_PT_SLAVE_A] +
512					msecs_to_jiffies(MAX_SLAVE_DIFF)))
513		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
514	if (time_after(node->time_in[HSR_PT_SLAVE_A],
515		       node->time_in[HSR_PT_SLAVE_B] +
516					msecs_to_jiffies(MAX_SLAVE_DIFF)))
517		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
518
519	return NULL;
520}
521
522/* Remove stale sequence_nr records. Called by timer every
523 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
524 */
525void hsr_prune_nodes(struct timer_list *t)
526{
527	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
528	struct hsr_node *node;
529	struct hsr_node *tmp;
530	struct hsr_port *port;
531	unsigned long timestamp;
532	unsigned long time_a, time_b;
533
534	spin_lock_bh(&hsr->list_lock);
535	list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
536		/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
537		 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
538		 * the master port. Thus the master node will be repeatedly
539		 * pruned leading to packet loss.
540		 */
541		if (hsr_addr_is_self(hsr, node->macaddress_A))
542			continue;
543
544		/* Shorthand */
545		time_a = node->time_in[HSR_PT_SLAVE_A];
546		time_b = node->time_in[HSR_PT_SLAVE_B];
547
548		/* Check for timestamps old enough to risk wrap-around */
549		if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
550			node->time_in_stale[HSR_PT_SLAVE_A] = true;
551		if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
552			node->time_in_stale[HSR_PT_SLAVE_B] = true;
553
554		/* Get age of newest frame from node.
555		 * At least one time_in is OK here; nodes get pruned long
556		 * before both time_ins can get stale
557		 */
558		timestamp = time_a;
559		if (node->time_in_stale[HSR_PT_SLAVE_A] ||
560		    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
561		    time_after(time_b, time_a)))
562			timestamp = time_b;
563
564		/* Warn of ring error only as long as we get frames at all */
565		if (time_is_after_jiffies(timestamp +
566				msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
567			rcu_read_lock();
568			port = get_late_port(hsr, node);
569			if (port)
570				hsr_nl_ringerror(hsr, node->macaddress_A, port);
571			rcu_read_unlock();
572		}
573
574		/* Prune old entries */
575		if (time_is_before_jiffies(timestamp +
576				msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
577			hsr_nl_nodedown(hsr, node->macaddress_A);
578			if (!node->removed) {
579				list_del_rcu(&node->mac_list);
580				node->removed = true;
581				/* Note that we need to free this entry later: */
582				kfree_rcu(node, rcu_head);
583			}
584		}
585	}
586	spin_unlock_bh(&hsr->list_lock);
587
588	/* Restart timer */
589	mod_timer(&hsr->prune_timer,
590		  jiffies + msecs_to_jiffies(PRUNE_PERIOD));
591}
592
593void hsr_prune_proxy_nodes(struct timer_list *t)
594{
595	struct hsr_priv *hsr = from_timer(hsr, t, prune_proxy_timer);
596	unsigned long timestamp;
597	struct hsr_node *node;
598	struct hsr_node *tmp;
599
600	spin_lock_bh(&hsr->list_lock);
601	list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
602		/* Don't prune RedBox node. */
603		if (hsr_addr_is_redbox(hsr, node->macaddress_A))
604			continue;
605
606		timestamp = node->time_in[HSR_PT_INTERLINK];
607
608		/* Prune old entries */
609		if (time_is_before_jiffies(timestamp +
610				msecs_to_jiffies(HSR_PROXY_NODE_FORGET_TIME))) {
611			hsr_nl_nodedown(hsr, node->macaddress_A);
612			if (!node->removed) {
613				list_del_rcu(&node->mac_list);
614				node->removed = true;
615				/* Note that we need to free this entry later: */
616				kfree_rcu(node, rcu_head);
617			}
618		}
619	}
620
621	spin_unlock_bh(&hsr->list_lock);
622
623	/* Restart timer */
624	mod_timer(&hsr->prune_proxy_timer,
625		  jiffies + msecs_to_jiffies(PRUNE_PROXY_PERIOD));
626}
627
628void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
629			unsigned char addr[ETH_ALEN])
630{
631	struct hsr_node *node;
632
633	if (!_pos) {
634		node = list_first_or_null_rcu(&hsr->node_db,
635					      struct hsr_node, mac_list);
636		if (node)
637			ether_addr_copy(addr, node->macaddress_A);
638		return node;
639	}
640
641	node = _pos;
642	list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
643		ether_addr_copy(addr, node->macaddress_A);
644		return node;
645	}
646
647	return NULL;
648}
649
650int hsr_get_node_data(struct hsr_priv *hsr,
651		      const unsigned char *addr,
652		      unsigned char addr_b[ETH_ALEN],
653		      unsigned int *addr_b_ifindex,
654		      int *if1_age,
655		      u16 *if1_seq,
656		      int *if2_age,
657		      u16 *if2_seq)
658{
659	struct hsr_node *node;
660	struct hsr_port *port;
661	unsigned long tdiff;
662
663	node = find_node_by_addr_A(&hsr->node_db, addr);
664	if (!node)
665		return -ENOENT;
666
667	ether_addr_copy(addr_b, node->macaddress_B);
668
669	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
670	if (node->time_in_stale[HSR_PT_SLAVE_A])
671		*if1_age = INT_MAX;
672#if HZ <= MSEC_PER_SEC
673	else if (tdiff > msecs_to_jiffies(INT_MAX))
674		*if1_age = INT_MAX;
675#endif
676	else
677		*if1_age = jiffies_to_msecs(tdiff);
678
679	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
680	if (node->time_in_stale[HSR_PT_SLAVE_B])
681		*if2_age = INT_MAX;
682#if HZ <= MSEC_PER_SEC
683	else if (tdiff > msecs_to_jiffies(INT_MAX))
684		*if2_age = INT_MAX;
685#endif
686	else
687		*if2_age = jiffies_to_msecs(tdiff);
688
689	/* Present sequence numbers as if they were incoming on interface */
690	*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
691	*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
692
693	if (node->addr_B_port != HSR_PT_NONE) {
694		port = hsr_port_get_hsr(hsr, node->addr_B_port);
695		*addr_b_ifindex = port->dev->ifindex;
696	} else {
697		*addr_b_ifindex = -1;
698	}
699
700	return 0;
701}