Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * Frame router for HSR and PRP.
  8 */
  9
 10#include "hsr_forward.h"
 11#include <linux/types.h>
 12#include <linux/skbuff.h>
 13#include <linux/etherdevice.h>
 14#include <linux/if_vlan.h>
 15#include "hsr_main.h"
 16#include "hsr_framereg.h"
 17
 18struct hsr_node;
 19
 20/* The uses I can see for these HSR supervision frames are:
 21 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
 22 *    22") to reset any sequence_nr counters belonging to that node. Useful if
 23 *    the other node's counter has been reset for some reason.
 24 *    --
 25 *    Or not - resetting the counter and bridging the frame would create a
 26 *    loop, unfortunately.
 27 *
 28 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
 29 *    frame is received from a particular node, we know something is wrong.
 30 *    We just register these (as with normal frames) and throw them away.
 31 *
 32 * 3) Allow different MAC addresses for the two slave interfaces, using the
 33 *    MacAddressA field.
 34 */
 35static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
 36{
 37	struct ethhdr *eth_hdr;
 38	struct hsr_sup_tag *hsr_sup_tag;
 39	struct hsrv1_ethhdr_sp *hsr_V1_hdr;
 40	struct hsr_sup_tlv *hsr_sup_tlv;
 41	u16 total_length = 0;
 42
 43	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
 44	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
 45
 46	/* Correct addr? */
 47	if (!ether_addr_equal(eth_hdr->h_dest,
 48			      hsr->sup_multicast_addr))
 49		return false;
 50
 51	/* Correct ether type?. */
 52	if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
 53	      eth_hdr->h_proto == htons(ETH_P_HSR)))
 54		return false;
 55
 56	/* Get the supervision header from correct location. */
 57	if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
 58		total_length = sizeof(struct hsrv1_ethhdr_sp);
 59		if (!pskb_may_pull(skb, total_length))
 60			return false;
 61
 62		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
 63		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
 64			return false;
 65
 66		hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
 67	} else {
 68		total_length = sizeof(struct hsrv0_ethhdr_sp);
 69		if (!pskb_may_pull(skb, total_length))
 70			return false;
 71
 72		hsr_sup_tag =
 73		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
 74	}
 75
 76	if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
 77	    hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
 78	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
 79	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
 80		return false;
 81	if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
 82	    hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
 83		return false;
 84
 85	/* Get next tlv */
 86	total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
 87	if (!pskb_may_pull(skb, total_length))
 88		return false;
 89	skb_pull(skb, total_length);
 90	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
 91	skb_push(skb, total_length);
 92
 93	/* if this is a redbox supervision frame we need to verify
 94	 * that more data is available
 95	 */
 96	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
 97		/* tlv length must be a length of a mac address */
 98		if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
 99			return false;
100
101		/* make sure another tlv follows */
102		total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
103		if (!pskb_may_pull(skb, total_length))
104			return false;
105
106		/* get next tlv */
107		skb_pull(skb, total_length);
108		hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
109		skb_push(skb, total_length);
110	}
111
112	/* end of tlvs must follow at the end */
113	if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
114	    hsr_sup_tlv->HSR_TLV_length != 0)
115		return false;
116
117	return true;
118}
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
121					       struct hsr_frame_info *frame)
122{
123	struct sk_buff *skb;
124	int copylen;
125	unsigned char *dst, *src;
126
127	skb_pull(skb_in, HSR_HLEN);
128	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
129	skb_push(skb_in, HSR_HLEN);
130	if (!skb)
131		return NULL;
132
133	skb_reset_mac_header(skb);
134
135	if (skb->ip_summed == CHECKSUM_PARTIAL)
136		skb->csum_start -= HSR_HLEN;
137
138	copylen = 2 * ETH_ALEN;
139	if (frame->is_vlan)
140		copylen += VLAN_HLEN;
141	src = skb_mac_header(skb_in);
142	dst = skb_mac_header(skb);
143	memcpy(dst, src, copylen);
144
145	skb->protocol = eth_hdr(skb)->h_proto;
146	return skb;
147}
148
149struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
150				       struct hsr_port *port)
151{
152	if (!frame->skb_std) {
153		if (frame->skb_hsr)
154			frame->skb_std =
155				create_stripped_skb_hsr(frame->skb_hsr, frame);
156		else
157			netdev_warn_once(port->dev,
158					 "Unexpected frame received in hsr_get_untagged_frame()\n");
159
160		if (!frame->skb_std)
161			return NULL;
162	}
163
164	return skb_clone(frame->skb_std, GFP_ATOMIC);
165}
166
167struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
168				       struct hsr_port *port)
169{
170	if (!frame->skb_std) {
171		if (frame->skb_prp) {
172			/* trim the skb by len - HSR_HLEN to exclude RCT */
173			skb_trim(frame->skb_prp,
174				 frame->skb_prp->len - HSR_HLEN);
175			frame->skb_std =
176				__pskb_copy(frame->skb_prp,
177					    skb_headroom(frame->skb_prp),
178					    GFP_ATOMIC);
179		} else {
180			/* Unexpected */
181			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
182				  __FILE__, __LINE__, port->dev->name);
183			return NULL;
184		}
185	}
186
187	return skb_clone(frame->skb_std, GFP_ATOMIC);
188}
189
190static void prp_set_lan_id(struct prp_rct *trailer,
191			   struct hsr_port *port)
192{
193	int lane_id;
194
195	if (port->type == HSR_PT_SLAVE_A)
196		lane_id = 0;
197	else
198		lane_id = 1;
199
200	/* Add net_id in the upper 3 bits of lane_id */
201	lane_id |= port->hsr->net_id;
202	set_prp_lan_id(trailer, lane_id);
203}
204
205/* Tailroom for PRP rct should have been created before calling this */
206static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
207				    struct hsr_frame_info *frame,
208				    struct hsr_port *port)
209{
210	struct prp_rct *trailer;
211	int min_size = ETH_ZLEN;
212	int lsdu_size;
213
214	if (!skb)
215		return skb;
216
217	if (frame->is_vlan)
218		min_size = VLAN_ETH_ZLEN;
219
220	if (skb_put_padto(skb, min_size))
221		return NULL;
222
223	trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
224	lsdu_size = skb->len - 14;
225	if (frame->is_vlan)
226		lsdu_size -= 4;
227	prp_set_lan_id(trailer, port);
228	set_prp_LSDU_size(trailer, lsdu_size);
229	trailer->sequence_nr = htons(frame->sequence_nr);
230	trailer->PRP_suffix = htons(ETH_P_PRP);
231	skb->protocol = eth_hdr(skb)->h_proto;
232
233	return skb;
234}
235
236static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
237			    struct hsr_port *port)
238{
239	int path_id;
240
241	if (port->type == HSR_PT_SLAVE_A)
242		path_id = 0;
243	else
244		path_id = 1;
245
246	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
247}
248
249static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
250				    struct hsr_frame_info *frame,
251				    struct hsr_port *port, u8 proto_version)
252{
253	struct hsr_ethhdr *hsr_ethhdr;
 
254	int lsdu_size;
255
256	/* pad to minimum packet size which is 60 + 6 (HSR tag) */
257	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
258		return NULL;
259
260	lsdu_size = skb->len - 14;
261	if (frame->is_vlan)
262		lsdu_size -= 4;
263
264	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
 
 
 
 
 
 
 
 
 
 
 
265
266	hsr_set_path_id(hsr_ethhdr, port);
267	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
268	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
269	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
270	hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
271			ETH_P_HSR : ETH_P_PRP);
272	skb->protocol = hsr_ethhdr->ethhdr.h_proto;
273
274	return skb;
275}
276
277/* If the original frame was an HSR tagged frame, just clone it to be sent
278 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
279 */
280struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
281					struct hsr_port *port)
282{
283	unsigned char *dst, *src;
284	struct sk_buff *skb;
285	int movelen;
286
287	if (frame->skb_hsr) {
288		struct hsr_ethhdr *hsr_ethhdr =
289			(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
290
291		/* set the lane id properly */
292		hsr_set_path_id(hsr_ethhdr, port);
293		return skb_clone(frame->skb_hsr, GFP_ATOMIC);
294	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
295		return skb_clone(frame->skb_std, GFP_ATOMIC);
296	}
297
298	/* Create the new skb with enough headroom to fit the HSR tag */
299	skb = __pskb_copy(frame->skb_std,
300			  skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
301	if (!skb)
302		return NULL;
303	skb_reset_mac_header(skb);
304
305	if (skb->ip_summed == CHECKSUM_PARTIAL)
306		skb->csum_start += HSR_HLEN;
307
308	movelen = ETH_HLEN;
309	if (frame->is_vlan)
310		movelen += VLAN_HLEN;
311
312	src = skb_mac_header(skb);
313	dst = skb_push(skb, HSR_HLEN);
314	memmove(dst, src, movelen);
315	skb_reset_mac_header(skb);
316
317	/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
318	 * that case
319	 */
320	return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
321}
322
323struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
324					struct hsr_port *port)
325{
326	struct sk_buff *skb;
327
328	if (frame->skb_prp) {
329		struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
330
331		if (trailer) {
332			prp_set_lan_id(trailer, port);
333		} else {
334			WARN_ONCE(!trailer, "errored PRP skb");
335			return NULL;
336		}
337		return skb_clone(frame->skb_prp, GFP_ATOMIC);
338	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
339		return skb_clone(frame->skb_std, GFP_ATOMIC);
340	}
341
342	skb = skb_copy_expand(frame->skb_std, 0,
343			      skb_tailroom(frame->skb_std) + HSR_HLEN,
344			      GFP_ATOMIC);
345	prp_fill_rct(skb, frame, port);
346
347	return skb;
348}
349
350static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
351			       struct hsr_node *node_src)
352{
353	bool was_multicast_frame;
354	int res, recv_len;
355
356	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
357	hsr_addr_subst_source(node_src, skb);
358	skb_pull(skb, ETH_HLEN);
359	recv_len = skb->len;
360	res = netif_rx(skb);
361	if (res == NET_RX_DROP) {
362		dev->stats.rx_dropped++;
363	} else {
364		dev->stats.rx_packets++;
365		dev->stats.rx_bytes += recv_len;
366		if (was_multicast_frame)
367			dev->stats.multicast++;
368	}
369}
370
371static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
372		    struct hsr_frame_info *frame)
373{
374	if (frame->port_rcv->type == HSR_PT_MASTER) {
375		hsr_addr_subst_dest(frame->node_src, skb, port);
376
377		/* Address substitution (IEC62439-3 pp 26, 50): replace mac
378		 * address of outgoing frame with that of the outgoing slave's.
379		 */
380		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
381	}
 
 
 
 
 
 
 
 
 
382	return dev_queue_xmit(skb);
383}
384
385bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
386{
387	return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
388		 port->type ==  HSR_PT_SLAVE_B) ||
389		(frame->port_rcv->type == HSR_PT_SLAVE_B &&
390		 port->type ==  HSR_PT_SLAVE_A));
391}
392
393bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
394{
 
 
395	if (port->dev->features & NETIF_F_HW_HSR_FWD)
396		return prp_drop_frame(frame, port);
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398	return false;
399}
400
401/* Forward the frame through all devices except:
402 * - Back through the receiving device
403 * - If it's a HSR frame: through a device where it has passed before
404 * - if it's a PRP frame: through another PRP slave device (no bridge)
405 * - To the local HSR master only if the frame is directly addressed to it, or
406 *   a non-supervision multicast or broadcast frame.
407 *
408 * HSR slave devices should insert a HSR tag into the frame, or forward the
409 * frame unchanged if it's already tagged. Interlink devices should strip HSR
410 * tags if they're of the non-HSR type (but only after duplicate discard). The
411 * master device always strips HSR tags.
412 */
413static void hsr_forward_do(struct hsr_frame_info *frame)
414{
415	struct hsr_port *port;
416	struct sk_buff *skb;
417	bool sent = false;
418
419	hsr_for_each_port(frame->port_rcv->hsr, port) {
420		struct hsr_priv *hsr = port->hsr;
421		/* Don't send frame back the way it came */
422		if (port == frame->port_rcv)
423			continue;
424
425		/* Don't deliver locally unless we should */
426		if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
427			continue;
428
429		/* Deliver frames directly addressed to us to master only */
430		if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
431			continue;
432
433		/* If hardware duplicate generation is enabled, only send out
434		 * one port.
435		 */
436		if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
437			continue;
438
439		/* Don't send frame over port where it has been sent before.
440		 * Also fro SAN, this shouldn't be done.
441		 */
442		if (!frame->is_from_san &&
443		    hsr_register_frame_out(port, frame->node_src,
444					   frame->sequence_nr))
445			continue;
446
447		if (frame->is_supervision && port->type == HSR_PT_MASTER) {
 
448			hsr_handle_sup_frame(frame);
449			continue;
450		}
451
452		/* Check if frame is to be dropped. Eg. for PRP no forward
453		 * between ports.
454		 */
455		if (hsr->proto_ops->drop_frame &&
456		    hsr->proto_ops->drop_frame(frame, port))
457			continue;
458
459		if (port->type != HSR_PT_MASTER)
 
460			skb = hsr->proto_ops->create_tagged_frame(frame, port);
461		else
462			skb = hsr->proto_ops->get_untagged_frame(frame, port);
463
464		if (!skb) {
465			frame->port_rcv->dev->stats.rx_dropped++;
466			continue;
467		}
468
469		skb->dev = port->dev;
470		if (port->type == HSR_PT_MASTER) {
471			hsr_deliver_master(skb, port->dev, frame->node_src);
472		} else {
473			if (!hsr_xmit(skb, port, frame))
474				sent = true;
 
 
475		}
476	}
477}
478
479static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
480			     struct hsr_frame_info *frame)
481{
482	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
483		frame->is_local_exclusive = true;
484		skb->pkt_type = PACKET_HOST;
485	} else {
486		frame->is_local_exclusive = false;
487	}
488
489	if (skb->pkt_type == PACKET_HOST ||
490	    skb->pkt_type == PACKET_MULTICAST ||
491	    skb->pkt_type == PACKET_BROADCAST) {
492		frame->is_local_dest = true;
493	} else {
494		frame->is_local_dest = false;
495	}
496}
497
498static void handle_std_frame(struct sk_buff *skb,
499			     struct hsr_frame_info *frame)
500{
501	struct hsr_port *port = frame->port_rcv;
502	struct hsr_priv *hsr = port->hsr;
503
504	frame->skb_hsr = NULL;
505	frame->skb_prp = NULL;
506	frame->skb_std = skb;
507
508	if (port->type != HSR_PT_MASTER) {
509		frame->is_from_san = true;
510	} else {
511		/* Sequence nr for the master node */
 
 
512		lockdep_assert_held(&hsr->seqnr_lock);
513		frame->sequence_nr = hsr->sequence_nr;
514		hsr->sequence_nr++;
515	}
516}
517
518int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
519			struct hsr_frame_info *frame)
520{
521	struct hsr_port *port = frame->port_rcv;
522	struct hsr_priv *hsr = port->hsr;
523
524	/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
525	if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
526	    proto == htons(ETH_P_HSR)) {
527		/* Check if skb contains hsr_ethhdr */
528		if (skb->mac_len < sizeof(struct hsr_ethhdr))
529			return -EINVAL;
530
531		/* HSR tagged frame :- Data or Supervision */
532		frame->skb_std = NULL;
533		frame->skb_prp = NULL;
534		frame->skb_hsr = skb;
535		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
536		return 0;
537	}
538
539	/* Standard frame or PRP from master port */
540	handle_std_frame(skb, frame);
541
542	return 0;
543}
544
545int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
546			struct hsr_frame_info *frame)
547{
548	/* Supervision frame */
549	struct prp_rct *rct = skb_get_PRP_rct(skb);
550
551	if (rct &&
552	    prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
553		frame->skb_hsr = NULL;
554		frame->skb_std = NULL;
555		frame->skb_prp = skb;
556		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
557		return 0;
558	}
559	handle_std_frame(skb, frame);
560
561	return 0;
562}
563
564static int fill_frame_info(struct hsr_frame_info *frame,
565			   struct sk_buff *skb, struct hsr_port *port)
566{
567	struct hsr_priv *hsr = port->hsr;
568	struct hsr_vlan_ethhdr *vlan_hdr;
 
569	struct ethhdr *ethhdr;
570	__be16 proto;
571	int ret;
572
573	/* Check if skb contains ethhdr */
574	if (skb->mac_len < sizeof(struct ethhdr))
575		return -EINVAL;
576
577	memset(frame, 0, sizeof(*frame));
578	frame->is_supervision = is_supervision_frame(port->hsr, skb);
579	frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
580				       frame->is_supervision,
581				       port->type);
 
 
 
 
 
 
 
582	if (!frame->node_src)
583		return -1; /* Unknown node and !is_supervision, or no mem */
584
585	ethhdr = (struct ethhdr *)skb_mac_header(skb);
586	frame->is_vlan = false;
587	proto = ethhdr->h_proto;
588
589	if (proto == htons(ETH_P_8021Q))
590		frame->is_vlan = true;
591
592	if (frame->is_vlan) {
593		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
 
 
 
 
 
594		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
595		/* FIXME: */
596		netdev_warn_once(skb->dev, "VLAN not yet supported");
597	}
598
599	frame->is_from_san = false;
600	frame->port_rcv = port;
601	ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
602	if (ret)
603		return ret;
604
605	check_local_dest(port->hsr, skb, frame);
606
607	return 0;
608}
609
610/* Must be called holding rcu read lock (because of the port parameter) */
611void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
612{
613	struct hsr_frame_info frame;
614
615	rcu_read_lock();
616	if (fill_frame_info(&frame, skb, port) < 0)
617		goto out_drop;
618
619	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
620	hsr_forward_do(&frame);
621	rcu_read_unlock();
622	/* Gets called for ingress frames as well as egress from master port.
623	 * So check and increment stats for master port only here.
624	 */
625	if (port->type == HSR_PT_MASTER) {
626		port->dev->stats.tx_packets++;
627		port->dev->stats.tx_bytes += skb->len;
628	}
629
630	kfree_skb(frame.skb_hsr);
631	kfree_skb(frame.skb_prp);
632	kfree_skb(frame.skb_std);
633	return;
634
635out_drop:
636	rcu_read_unlock();
637	port->dev->stats.tx_dropped++;
638	kfree_skb(skb);
639}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * Frame router for HSR and PRP.
  8 */
  9
 10#include "hsr_forward.h"
 11#include <linux/types.h>
 12#include <linux/skbuff.h>
 13#include <linux/etherdevice.h>
 14#include <linux/if_vlan.h>
 15#include "hsr_main.h"
 16#include "hsr_framereg.h"
 17
 18struct hsr_node;
 19
 20/* The uses I can see for these HSR supervision frames are:
 21 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
 22 *    22") to reset any sequence_nr counters belonging to that node. Useful if
 23 *    the other node's counter has been reset for some reason.
 24 *    --
 25 *    Or not - resetting the counter and bridging the frame would create a
 26 *    loop, unfortunately.
 27 *
 28 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
 29 *    frame is received from a particular node, we know something is wrong.
 30 *    We just register these (as with normal frames) and throw them away.
 31 *
 32 * 3) Allow different MAC addresses for the two slave interfaces, using the
 33 *    MacAddressA field.
 34 */
 35static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
 36{
 37	struct ethhdr *eth_hdr;
 38	struct hsr_sup_tag *hsr_sup_tag;
 39	struct hsrv1_ethhdr_sp *hsr_V1_hdr;
 40	struct hsr_sup_tlv *hsr_sup_tlv;
 41	u16 total_length = 0;
 42
 43	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
 44	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
 45
 46	/* Correct addr? */
 47	if (!ether_addr_equal(eth_hdr->h_dest,
 48			      hsr->sup_multicast_addr))
 49		return false;
 50
 51	/* Correct ether type?. */
 52	if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
 53	      eth_hdr->h_proto == htons(ETH_P_HSR)))
 54		return false;
 55
 56	/* Get the supervision header from correct location. */
 57	if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
 58		total_length = sizeof(struct hsrv1_ethhdr_sp);
 59		if (!pskb_may_pull(skb, total_length))
 60			return false;
 61
 62		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
 63		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
 64			return false;
 65
 66		hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
 67	} else {
 68		total_length = sizeof(struct hsrv0_ethhdr_sp);
 69		if (!pskb_may_pull(skb, total_length))
 70			return false;
 71
 72		hsr_sup_tag =
 73		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
 74	}
 75
 76	if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
 77	    hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
 78	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
 79	    hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
 80		return false;
 81	if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
 82	    hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
 83		return false;
 84
 85	/* Get next tlv */
 86	total_length += hsr_sup_tag->tlv.HSR_TLV_length;
 87	if (!pskb_may_pull(skb, total_length))
 88		return false;
 89	skb_pull(skb, total_length);
 90	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
 91	skb_push(skb, total_length);
 92
 93	/* if this is a redbox supervision frame we need to verify
 94	 * that more data is available
 95	 */
 96	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
 97		/* tlv length must be a length of a mac address */
 98		if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
 99			return false;
100
101		/* make sure another tlv follows */
102		total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
103		if (!pskb_may_pull(skb, total_length))
104			return false;
105
106		/* get next tlv */
107		skb_pull(skb, total_length);
108		hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
109		skb_push(skb, total_length);
110	}
111
112	/* end of tlvs must follow at the end */
113	if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
114	    hsr_sup_tlv->HSR_TLV_length != 0)
115		return false;
116
117	return true;
118}
119
120static bool is_proxy_supervision_frame(struct hsr_priv *hsr,
121				       struct sk_buff *skb)
122{
123	struct hsr_sup_payload *payload;
124	struct ethhdr *eth_hdr;
125	u16 total_length = 0;
126
127	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
128
129	/* Get the HSR protocol revision. */
130	if (eth_hdr->h_proto == htons(ETH_P_HSR))
131		total_length = sizeof(struct hsrv1_ethhdr_sp);
132	else
133		total_length = sizeof(struct hsrv0_ethhdr_sp);
134
135	if (!pskb_may_pull(skb, total_length + sizeof(struct hsr_sup_payload)))
136		return false;
137
138	skb_pull(skb, total_length);
139	payload = (struct hsr_sup_payload *)skb->data;
140	skb_push(skb, total_length);
141
142	/* For RedBox (HSR-SAN) check if we have received the supervision
143	 * frame with MAC addresses from own ProxyNodeTable.
144	 */
145	return hsr_is_node_in_db(&hsr->proxy_node_db,
146				 payload->macaddress_A);
147}
148
149static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
150					       struct hsr_frame_info *frame)
151{
152	struct sk_buff *skb;
153	int copylen;
154	unsigned char *dst, *src;
155
156	skb_pull(skb_in, HSR_HLEN);
157	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
158	skb_push(skb_in, HSR_HLEN);
159	if (!skb)
160		return NULL;
161
162	skb_reset_mac_header(skb);
163
164	if (skb->ip_summed == CHECKSUM_PARTIAL)
165		skb->csum_start -= HSR_HLEN;
166
167	copylen = 2 * ETH_ALEN;
168	if (frame->is_vlan)
169		copylen += VLAN_HLEN;
170	src = skb_mac_header(skb_in);
171	dst = skb_mac_header(skb);
172	memcpy(dst, src, copylen);
173
174	skb->protocol = eth_hdr(skb)->h_proto;
175	return skb;
176}
177
178struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
179				       struct hsr_port *port)
180{
181	if (!frame->skb_std) {
182		if (frame->skb_hsr)
183			frame->skb_std =
184				create_stripped_skb_hsr(frame->skb_hsr, frame);
185		else
186			netdev_warn_once(port->dev,
187					 "Unexpected frame received in hsr_get_untagged_frame()\n");
188
189		if (!frame->skb_std)
190			return NULL;
191	}
192
193	return skb_clone(frame->skb_std, GFP_ATOMIC);
194}
195
196struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
197				       struct hsr_port *port)
198{
199	if (!frame->skb_std) {
200		if (frame->skb_prp) {
201			/* trim the skb by len - HSR_HLEN to exclude RCT */
202			skb_trim(frame->skb_prp,
203				 frame->skb_prp->len - HSR_HLEN);
204			frame->skb_std =
205				__pskb_copy(frame->skb_prp,
206					    skb_headroom(frame->skb_prp),
207					    GFP_ATOMIC);
208		} else {
209			/* Unexpected */
210			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
211				  __FILE__, __LINE__, port->dev->name);
212			return NULL;
213		}
214	}
215
216	return skb_clone(frame->skb_std, GFP_ATOMIC);
217}
218
219static void prp_set_lan_id(struct prp_rct *trailer,
220			   struct hsr_port *port)
221{
222	int lane_id;
223
224	if (port->type == HSR_PT_SLAVE_A)
225		lane_id = 0;
226	else
227		lane_id = 1;
228
229	/* Add net_id in the upper 3 bits of lane_id */
230	lane_id |= port->hsr->net_id;
231	set_prp_lan_id(trailer, lane_id);
232}
233
234/* Tailroom for PRP rct should have been created before calling this */
235static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
236				    struct hsr_frame_info *frame,
237				    struct hsr_port *port)
238{
239	struct prp_rct *trailer;
240	int min_size = ETH_ZLEN;
241	int lsdu_size;
242
243	if (!skb)
244		return skb;
245
246	if (frame->is_vlan)
247		min_size = VLAN_ETH_ZLEN;
248
249	if (skb_put_padto(skb, min_size))
250		return NULL;
251
252	trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
253	lsdu_size = skb->len - 14;
254	if (frame->is_vlan)
255		lsdu_size -= 4;
256	prp_set_lan_id(trailer, port);
257	set_prp_LSDU_size(trailer, lsdu_size);
258	trailer->sequence_nr = htons(frame->sequence_nr);
259	trailer->PRP_suffix = htons(ETH_P_PRP);
260	skb->protocol = eth_hdr(skb)->h_proto;
261
262	return skb;
263}
264
265static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
266			    struct hsr_port *port)
267{
268	int path_id;
269
270	if (port->type == HSR_PT_SLAVE_A)
271		path_id = 0;
272	else
273		path_id = 1;
274
275	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
276}
277
278static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
279				    struct hsr_frame_info *frame,
280				    struct hsr_port *port, u8 proto_version)
281{
282	struct hsr_ethhdr *hsr_ethhdr;
283	unsigned char *pc;
284	int lsdu_size;
285
286	/* pad to minimum packet size which is 60 + 6 (HSR tag) */
287	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
288		return NULL;
289
290	lsdu_size = skb->len - 14;
291	if (frame->is_vlan)
292		lsdu_size -= 4;
293
294	pc = skb_mac_header(skb);
295	if (frame->is_vlan)
296		/* This 4-byte shift (size of a vlan tag) does not
297		 * mean that the ethhdr starts there. But rather it
298		 * provides the proper environment for accessing
299		 * the fields, such as hsr_tag etc., just like
300		 * when the vlan tag is not there. This is because
301		 * the hsr tag is after the vlan tag.
302		 */
303		hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
304	else
305		hsr_ethhdr = (struct hsr_ethhdr *)pc;
306
307	hsr_set_path_id(hsr_ethhdr, port);
308	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
309	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
310	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
311	hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
312			ETH_P_HSR : ETH_P_PRP);
313	skb->protocol = hsr_ethhdr->ethhdr.h_proto;
314
315	return skb;
316}
317
318/* If the original frame was an HSR tagged frame, just clone it to be sent
319 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
320 */
321struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
322					struct hsr_port *port)
323{
324	unsigned char *dst, *src;
325	struct sk_buff *skb;
326	int movelen;
327
328	if (frame->skb_hsr) {
329		struct hsr_ethhdr *hsr_ethhdr =
330			(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
331
332		/* set the lane id properly */
333		hsr_set_path_id(hsr_ethhdr, port);
334		return skb_clone(frame->skb_hsr, GFP_ATOMIC);
335	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
336		return skb_clone(frame->skb_std, GFP_ATOMIC);
337	}
338
339	/* Create the new skb with enough headroom to fit the HSR tag */
340	skb = __pskb_copy(frame->skb_std,
341			  skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
342	if (!skb)
343		return NULL;
344	skb_reset_mac_header(skb);
345
346	if (skb->ip_summed == CHECKSUM_PARTIAL)
347		skb->csum_start += HSR_HLEN;
348
349	movelen = ETH_HLEN;
350	if (frame->is_vlan)
351		movelen += VLAN_HLEN;
352
353	src = skb_mac_header(skb);
354	dst = skb_push(skb, HSR_HLEN);
355	memmove(dst, src, movelen);
356	skb_reset_mac_header(skb);
357
358	/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
359	 * that case
360	 */
361	return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
362}
363
364struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
365					struct hsr_port *port)
366{
367	struct sk_buff *skb;
368
369	if (frame->skb_prp) {
370		struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
371
372		if (trailer) {
373			prp_set_lan_id(trailer, port);
374		} else {
375			WARN_ONCE(!trailer, "errored PRP skb");
376			return NULL;
377		}
378		return skb_clone(frame->skb_prp, GFP_ATOMIC);
379	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
380		return skb_clone(frame->skb_std, GFP_ATOMIC);
381	}
382
383	skb = skb_copy_expand(frame->skb_std, skb_headroom(frame->skb_std),
384			      skb_tailroom(frame->skb_std) + HSR_HLEN,
385			      GFP_ATOMIC);
386	return prp_fill_rct(skb, frame, port);
 
 
387}
388
389static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
390			       struct hsr_node *node_src)
391{
392	bool was_multicast_frame;
393	int res, recv_len;
394
395	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
396	hsr_addr_subst_source(node_src, skb);
397	skb_pull(skb, ETH_HLEN);
398	recv_len = skb->len;
399	res = netif_rx(skb);
400	if (res == NET_RX_DROP) {
401		dev->stats.rx_dropped++;
402	} else {
403		dev->stats.rx_packets++;
404		dev->stats.rx_bytes += recv_len;
405		if (was_multicast_frame)
406			dev->stats.multicast++;
407	}
408}
409
410static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
411		    struct hsr_frame_info *frame)
412{
413	if (frame->port_rcv->type == HSR_PT_MASTER) {
414		hsr_addr_subst_dest(frame->node_src, skb, port);
415
416		/* Address substitution (IEC62439-3 pp 26, 50): replace mac
417		 * address of outgoing frame with that of the outgoing slave's.
418		 */
419		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
420	}
421
422	/* When HSR node is used as RedBox - the frame received from HSR ring
423	 * requires source MAC address (SA) replacement to one which can be
424	 * recognized by SAN devices (otherwise, frames are dropped by switch)
425	 */
426	if (port->type == HSR_PT_INTERLINK)
427		ether_addr_copy(eth_hdr(skb)->h_source,
428				port->hsr->macaddress_redbox);
429
430	return dev_queue_xmit(skb);
431}
432
433bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
434{
435	return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
436		 port->type == HSR_PT_SLAVE_B) ||
437		(frame->port_rcv->type == HSR_PT_SLAVE_B &&
438		 port->type == HSR_PT_SLAVE_A));
439}
440
441bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
442{
443	struct sk_buff *skb;
444
445	if (port->dev->features & NETIF_F_HW_HSR_FWD)
446		return prp_drop_frame(frame, port);
447
448	/* RedBox specific frames dropping policies
449	 *
450	 * Do not send HSR supervisory frames to SAN devices
451	 */
452	if (frame->is_supervision && port->type == HSR_PT_INTERLINK)
453		return true;
454
455	/* Do not forward to other HSR port (A or B) unicast frames which
456	 * are addressed to interlink port (and are in the ProxyNodeTable).
457	 */
458	skb = frame->skb_hsr;
459	if (skb && prp_drop_frame(frame, port) &&
460	    is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
461	    hsr_is_node_in_db(&port->hsr->proxy_node_db,
462			      eth_hdr(skb)->h_dest)) {
463		return true;
464	}
465
466	/* Do not forward to port C (Interlink) frames from nodes A and B
467	 * if DA is in NodeTable.
468	 */
469	if ((frame->port_rcv->type == HSR_PT_SLAVE_A ||
470	     frame->port_rcv->type == HSR_PT_SLAVE_B) &&
471	    port->type == HSR_PT_INTERLINK) {
472		skb = frame->skb_hsr;
473		if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
474		    hsr_is_node_in_db(&port->hsr->node_db,
475				      eth_hdr(skb)->h_dest)) {
476			return true;
477		}
478	}
479
480	/* Do not forward to port A and B unicast frames received on the
481	 * interlink port if it is addressed to one of nodes registered in
482	 * the ProxyNodeTable.
483	 */
484	if ((port->type == HSR_PT_SLAVE_A || port->type == HSR_PT_SLAVE_B) &&
485	    frame->port_rcv->type == HSR_PT_INTERLINK) {
486		skb = frame->skb_std;
487		if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
488		    hsr_is_node_in_db(&port->hsr->proxy_node_db,
489				      eth_hdr(skb)->h_dest)) {
490			return true;
491		}
492	}
493
494	return false;
495}
496
497/* Forward the frame through all devices except:
498 * - Back through the receiving device
499 * - If it's a HSR frame: through a device where it has passed before
500 * - if it's a PRP frame: through another PRP slave device (no bridge)
501 * - To the local HSR master only if the frame is directly addressed to it, or
502 *   a non-supervision multicast or broadcast frame.
503 *
504 * HSR slave devices should insert a HSR tag into the frame, or forward the
505 * frame unchanged if it's already tagged. Interlink devices should strip HSR
506 * tags if they're of the non-HSR type (but only after duplicate discard). The
507 * master device always strips HSR tags.
508 */
509static void hsr_forward_do(struct hsr_frame_info *frame)
510{
511	struct hsr_port *port;
512	struct sk_buff *skb;
513	bool sent = false;
514
515	hsr_for_each_port(frame->port_rcv->hsr, port) {
516		struct hsr_priv *hsr = port->hsr;
517		/* Don't send frame back the way it came */
518		if (port == frame->port_rcv)
519			continue;
520
521		/* Don't deliver locally unless we should */
522		if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
523			continue;
524
525		/* Deliver frames directly addressed to us to master only */
526		if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
527			continue;
528
529		/* If hardware duplicate generation is enabled, only send out
530		 * one port.
531		 */
532		if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
533			continue;
534
535		/* Don't send frame over port where it has been sent before.
536		 * Also for SAN, this shouldn't be done.
537		 */
538		if (!frame->is_from_san &&
539		    hsr_register_frame_out(port, frame->node_src,
540					   frame->sequence_nr))
541			continue;
542
543		if (frame->is_supervision && port->type == HSR_PT_MASTER &&
544		    !frame->is_proxy_supervision) {
545			hsr_handle_sup_frame(frame);
546			continue;
547		}
548
549		/* Check if frame is to be dropped. Eg. for PRP no forward
550		 * between ports, or sending HSR supervision to RedBox.
551		 */
552		if (hsr->proto_ops->drop_frame &&
553		    hsr->proto_ops->drop_frame(frame, port))
554			continue;
555
556		if (port->type == HSR_PT_SLAVE_A ||
557		    port->type == HSR_PT_SLAVE_B)
558			skb = hsr->proto_ops->create_tagged_frame(frame, port);
559		else
560			skb = hsr->proto_ops->get_untagged_frame(frame, port);
561
562		if (!skb) {
563			frame->port_rcv->dev->stats.rx_dropped++;
564			continue;
565		}
566
567		skb->dev = port->dev;
568		if (port->type == HSR_PT_MASTER) {
569			hsr_deliver_master(skb, port->dev, frame->node_src);
570		} else {
571			if (!hsr_xmit(skb, port, frame))
572				if (port->type == HSR_PT_SLAVE_A ||
573				    port->type == HSR_PT_SLAVE_B)
574					sent = true;
575		}
576	}
577}
578
579static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
580			     struct hsr_frame_info *frame)
581{
582	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
583		frame->is_local_exclusive = true;
584		skb->pkt_type = PACKET_HOST;
585	} else {
586		frame->is_local_exclusive = false;
587	}
588
589	if (skb->pkt_type == PACKET_HOST ||
590	    skb->pkt_type == PACKET_MULTICAST ||
591	    skb->pkt_type == PACKET_BROADCAST) {
592		frame->is_local_dest = true;
593	} else {
594		frame->is_local_dest = false;
595	}
596}
597
598static void handle_std_frame(struct sk_buff *skb,
599			     struct hsr_frame_info *frame)
600{
601	struct hsr_port *port = frame->port_rcv;
602	struct hsr_priv *hsr = port->hsr;
603
604	frame->skb_hsr = NULL;
605	frame->skb_prp = NULL;
606	frame->skb_std = skb;
607
608	if (port->type != HSR_PT_MASTER)
609		frame->is_from_san = true;
610
611	if (port->type == HSR_PT_MASTER ||
612	    port->type == HSR_PT_INTERLINK) {
613		/* Sequence nr for the master/interlink node */
614		lockdep_assert_held(&hsr->seqnr_lock);
615		frame->sequence_nr = hsr->sequence_nr;
616		hsr->sequence_nr++;
617	}
618}
619
620int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
621			struct hsr_frame_info *frame)
622{
623	struct hsr_port *port = frame->port_rcv;
624	struct hsr_priv *hsr = port->hsr;
625
626	/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
627	if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
628	    proto == htons(ETH_P_HSR)) {
629		/* Check if skb contains hsr_ethhdr */
630		if (skb->mac_len < sizeof(struct hsr_ethhdr))
631			return -EINVAL;
632
633		/* HSR tagged frame :- Data or Supervision */
634		frame->skb_std = NULL;
635		frame->skb_prp = NULL;
636		frame->skb_hsr = skb;
637		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
638		return 0;
639	}
640
641	/* Standard frame or PRP from master port */
642	handle_std_frame(skb, frame);
643
644	return 0;
645}
646
647int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
648			struct hsr_frame_info *frame)
649{
650	/* Supervision frame */
651	struct prp_rct *rct = skb_get_PRP_rct(skb);
652
653	if (rct &&
654	    prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
655		frame->skb_hsr = NULL;
656		frame->skb_std = NULL;
657		frame->skb_prp = skb;
658		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
659		return 0;
660	}
661	handle_std_frame(skb, frame);
662
663	return 0;
664}
665
666static int fill_frame_info(struct hsr_frame_info *frame,
667			   struct sk_buff *skb, struct hsr_port *port)
668{
669	struct hsr_priv *hsr = port->hsr;
670	struct hsr_vlan_ethhdr *vlan_hdr;
671	struct list_head *n_db;
672	struct ethhdr *ethhdr;
673	__be16 proto;
674	int ret;
675
676	/* Check if skb contains ethhdr */
677	if (skb->mac_len < sizeof(struct ethhdr))
678		return -EINVAL;
679
680	memset(frame, 0, sizeof(*frame));
681	frame->is_supervision = is_supervision_frame(port->hsr, skb);
682	if (frame->is_supervision && hsr->redbox)
683		frame->is_proxy_supervision =
684			is_proxy_supervision_frame(port->hsr, skb);
685
686	n_db = &hsr->node_db;
687	if (port->type == HSR_PT_INTERLINK)
688		n_db = &hsr->proxy_node_db;
689
690	frame->node_src = hsr_get_node(port, n_db, skb,
691				       frame->is_supervision, port->type);
692	if (!frame->node_src)
693		return -1; /* Unknown node and !is_supervision, or no mem */
694
695	ethhdr = (struct ethhdr *)skb_mac_header(skb);
696	frame->is_vlan = false;
697	proto = ethhdr->h_proto;
698
699	if (proto == htons(ETH_P_8021Q))
700		frame->is_vlan = true;
701
702	if (frame->is_vlan) {
703		/* Note: skb->mac_len might be wrong here. */
704		if (!pskb_may_pull(skb,
705				   skb_mac_offset(skb) +
706				   offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
707			return -EINVAL;
708		vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
709		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
 
 
710	}
711
712	frame->is_from_san = false;
713	frame->port_rcv = port;
714	ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
715	if (ret)
716		return ret;
717
718	check_local_dest(port->hsr, skb, frame);
719
720	return 0;
721}
722
723/* Must be called holding rcu read lock (because of the port parameter) */
724void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
725{
726	struct hsr_frame_info frame;
727
728	rcu_read_lock();
729	if (fill_frame_info(&frame, skb, port) < 0)
730		goto out_drop;
731
732	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
733	hsr_forward_do(&frame);
734	rcu_read_unlock();
735	/* Gets called for ingress frames as well as egress from master port.
736	 * So check and increment stats for master port only here.
737	 */
738	if (port->type == HSR_PT_MASTER || port->type == HSR_PT_INTERLINK) {
739		port->dev->stats.tx_packets++;
740		port->dev->stats.tx_bytes += skb->len;
741	}
742
743	kfree_skb(frame.skb_hsr);
744	kfree_skb(frame.skb_prp);
745	kfree_skb(frame.skb_std);
746	return;
747
748out_drop:
749	rcu_read_unlock();
750	port->dev->stats.tx_dropped++;
751	kfree_skb(skb);
752}