Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* Copyright 2011-2013 Autronica Fire and Security AS
  2 *
  3 * This program is free software; you can redistribute it and/or modify it
  4 * under the terms of the GNU General Public License as published by the Free
  5 * Software Foundation; either version 2 of the License, or (at your option)
  6 * any later version.
  7 *
  8 * Author(s):
  9 *	2011-2013 Arvid Brodin, arvid.brodin@xdin.com
 10 *
 11 * In addition to routines for registering and unregistering HSR support, this
 12 * file also contains the receive routine that handles all incoming frames with
 13 * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
 14 */
 15
 16#include <linux/netdevice.h>
 17#include <linux/rculist.h>
 18#include <linux/timer.h>
 19#include <linux/etherdevice.h>
 20#include "hsr_main.h"
 21#include "hsr_device.h"
 22#include "hsr_netlink.h"
 23#include "hsr_framereg.h"
 24
 25
 26/* List of all registered virtual HSR devices */
 27static LIST_HEAD(hsr_list);
 28
 29void register_hsr_master(struct hsr_priv *hsr_priv)
 30{
 31	list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list);
 32}
 33
 34void unregister_hsr_master(struct hsr_priv *hsr_priv)
 35{
 36	struct hsr_priv *hsr_priv_it;
 37
 38	list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list)
 39		if (hsr_priv_it == hsr_priv) {
 40			list_del_rcu(&hsr_priv_it->hsr_list);
 41			return;
 42		}
 43}
 44
 45bool is_hsr_slave(struct net_device *dev)
 46{
 47	struct hsr_priv *hsr_priv_it;
 48
 49	list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) {
 50		if (dev == hsr_priv_it->slave[0])
 51			return true;
 52		if (dev == hsr_priv_it->slave[1])
 53			return true;
 54	}
 55
 56	return false;
 57}
 58
 59
 60/* If dev is a HSR slave device, return the virtual master device. Return NULL
 61 * otherwise.
 62 */
 63static struct hsr_priv *get_hsr_master(struct net_device *dev)
 64{
 65	struct hsr_priv *hsr_priv;
 66
 67	rcu_read_lock();
 68	list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
 69		if ((dev == hsr_priv->slave[0]) ||
 70		    (dev == hsr_priv->slave[1])) {
 71			rcu_read_unlock();
 72			return hsr_priv;
 73		}
 74
 75	rcu_read_unlock();
 76	return NULL;
 77}
 78
 79
 80/* If dev is a HSR slave device, return the other slave device. Return NULL
 81 * otherwise.
 82 */
 83static struct net_device *get_other_slave(struct hsr_priv *hsr_priv,
 84					  struct net_device *dev)
 85{
 86	if (dev == hsr_priv->slave[0])
 87		return hsr_priv->slave[1];
 88	if (dev == hsr_priv->slave[1])
 89		return hsr_priv->slave[0];
 90
 91	return NULL;
 92}
 93
 94
 95static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 96			     void *ptr)
 97{
 98	struct net_device *slave, *other_slave;
 99	struct hsr_priv *hsr_priv;
100	int old_operstate;
101	int mtu_max;
102	int res;
103	struct net_device *dev;
104
105	dev = netdev_notifier_info_to_dev(ptr);
106
107	hsr_priv = get_hsr_master(dev);
108	if (hsr_priv) {
109		/* dev is a slave device */
110		slave = dev;
111		other_slave = get_other_slave(hsr_priv, slave);
112	} else {
113		if (!is_hsr_master(dev))
114			return NOTIFY_DONE;
115		hsr_priv = netdev_priv(dev);
116		slave = hsr_priv->slave[0];
117		other_slave = hsr_priv->slave[1];
118	}
119
120	switch (event) {
121	case NETDEV_UP:		/* Administrative state DOWN */
122	case NETDEV_DOWN:	/* Administrative state UP */
123	case NETDEV_CHANGE:	/* Link (carrier) state changes */
124		old_operstate = hsr_priv->dev->operstate;
125		hsr_set_carrier(hsr_priv->dev, slave, other_slave);
126		/* netif_stacked_transfer_operstate() cannot be used here since
127		 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
128		 */
129		hsr_set_operstate(hsr_priv->dev, slave, other_slave);
130		hsr_check_announce(hsr_priv->dev, old_operstate);
131		break;
132	case NETDEV_CHANGEADDR:
133
134		/* This should not happen since there's no ndo_set_mac_address()
135		 * for HSR devices - i.e. not supported.
136		 */
137		if (dev == hsr_priv->dev)
138			break;
139
140		if (dev == hsr_priv->slave[0])
141			ether_addr_copy(hsr_priv->dev->dev_addr,
142					hsr_priv->slave[0]->dev_addr);
143
144		/* Make sure we recognize frames from ourselves in hsr_rcv() */
145		res = hsr_create_self_node(&hsr_priv->self_node_db,
146					   hsr_priv->dev->dev_addr,
147					   hsr_priv->slave[1] ?
148						hsr_priv->slave[1]->dev_addr :
149						hsr_priv->dev->dev_addr);
150		if (res)
151			netdev_warn(hsr_priv->dev,
152				    "Could not update HSR node address.\n");
153
154		if (dev == hsr_priv->slave[0])
155			call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev);
156		break;
157	case NETDEV_CHANGEMTU:
158		if (dev == hsr_priv->dev)
159			break; /* Handled in ndo_change_mtu() */
160		mtu_max = hsr_get_max_mtu(hsr_priv);
161		if (hsr_priv->dev->mtu > mtu_max)
162			dev_set_mtu(hsr_priv->dev, mtu_max);
163		break;
164	case NETDEV_UNREGISTER:
165		if (dev == hsr_priv->slave[0])
166			hsr_priv->slave[0] = NULL;
167		if (dev == hsr_priv->slave[1])
168			hsr_priv->slave[1] = NULL;
169
170		/* There should really be a way to set a new slave device... */
171
172		break;
173	case NETDEV_PRE_TYPE_CHANGE:
174		/* HSR works only on Ethernet devices. Refuse slave to change
175		 * its type.
176		 */
177		return NOTIFY_BAD;
178	}
179
180	return NOTIFY_DONE;
181}
182
183
184static struct timer_list prune_timer;
185
186static void prune_nodes_all(unsigned long data)
187{
188	struct hsr_priv *hsr_priv;
189
190	rcu_read_lock();
191	list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
192		hsr_prune_nodes(hsr_priv);
193	rcu_read_unlock();
194
195	prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
196	add_timer(&prune_timer);
197}
198
199
200static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
201{
202	struct hsr_tag *hsr_tag;
203	struct sk_buff *skb2;
204
205	skb2 = skb_share_check(skb, GFP_ATOMIC);
206	if (unlikely(!skb2))
207		goto err_free;
208	skb = skb2;
209
210	if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN)))
211		goto err_free;
212
213	hsr_tag = (struct hsr_tag *) skb->data;
214	skb->protocol = hsr_tag->encap_proto;
215	skb_pull(skb, HSR_TAGLEN);
216
217	return skb;
218
219err_free:
220	kfree_skb(skb);
221	return NULL;
222}
223
224
225/* The uses I can see for these HSR supervision frames are:
226 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
227 *    22") to reset any sequence_nr counters belonging to that node. Useful if
228 *    the other node's counter has been reset for some reason.
229 *    --
230 *    Or not - resetting the counter and bridging the frame would create a
231 *    loop, unfortunately.
232 *
233 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
234 *    frame is received from a particular node, we know something is wrong.
235 *    We just register these (as with normal frames) and throw them away.
236 *
237 * 3) Allow different MAC addresses for the two slave interfaces, using the
238 *    MacAddressA field.
239 */
240static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb)
241{
242	struct hsr_sup_tag *hsr_stag;
243
244	if (!ether_addr_equal(eth_hdr(skb)->h_dest,
245			      hsr_priv->sup_multicast_addr))
246		return false;
247
248	hsr_stag = (struct hsr_sup_tag *) skb->data;
249	if (get_hsr_stag_path(hsr_stag) != 0x0f)
250		return false;
251	if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
252	    (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
253		return false;
254	if (hsr_stag->HSR_TLV_Length != 12)
255		return false;
256
257	return true;
258}
259
260
261/* Implementation somewhat according to IEC-62439-3, p. 43
262 */
263static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
264		   struct packet_type *pt, struct net_device *orig_dev)
265{
266	struct hsr_priv *hsr_priv;
267	struct net_device *other_slave;
268	struct node_entry *node;
269	bool deliver_to_self;
270	struct sk_buff *skb_deliver;
271	enum hsr_dev_idx dev_in_idx, dev_other_idx;
272	bool dup_out;
273	int ret;
274
275	hsr_priv = get_hsr_master(dev);
276
277	if (!hsr_priv) {
278		/* Non-HSR-slave device 'dev' is connected to a HSR network */
279		kfree_skb(skb);
280		dev->stats.rx_errors++;
281		return NET_RX_SUCCESS;
282	}
283
284	if (dev == hsr_priv->slave[0]) {
285		dev_in_idx = HSR_DEV_SLAVE_A;
286		dev_other_idx = HSR_DEV_SLAVE_B;
287	} else {
288		dev_in_idx = HSR_DEV_SLAVE_B;
289		dev_other_idx = HSR_DEV_SLAVE_A;
290	}
291
292	node = hsr_find_node(&hsr_priv->self_node_db, skb);
293	if (node) {
294		/* Always kill frames sent by ourselves */
295		kfree_skb(skb);
296		return NET_RX_SUCCESS;
297	}
298
299	/* Is this frame a candidate for local reception? */
300	deliver_to_self = false;
301	if ((skb->pkt_type == PACKET_HOST) ||
302	    (skb->pkt_type == PACKET_MULTICAST) ||
303	    (skb->pkt_type == PACKET_BROADCAST))
304		deliver_to_self = true;
305	else if (ether_addr_equal(eth_hdr(skb)->h_dest,
306				     hsr_priv->dev->dev_addr)) {
307		skb->pkt_type = PACKET_HOST;
308		deliver_to_self = true;
309	}
310
311
312	rcu_read_lock(); /* node_db */
313	node = hsr_find_node(&hsr_priv->node_db, skb);
314
315	if (is_supervision_frame(hsr_priv, skb)) {
316		skb_pull(skb, sizeof(struct hsr_sup_tag));
317		node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx);
318		if (!node) {
319			rcu_read_unlock(); /* node_db */
320			kfree_skb(skb);
321			hsr_priv->dev->stats.rx_dropped++;
322			return NET_RX_DROP;
323		}
324		skb_push(skb, sizeof(struct hsr_sup_tag));
325		deliver_to_self = false;
326	}
327
328	if (!node) {
329		/* Source node unknown; this might be a HSR frame from
330		 * another net (different multicast address). Ignore it.
331		 */
332		rcu_read_unlock(); /* node_db */
333		kfree_skb(skb);
334		return NET_RX_SUCCESS;
335	}
336
337	/* Register ALL incoming frames as outgoing through the other interface.
338	 * This allows us to register frames as incoming only if they are valid
339	 * for the receiving interface, without using a specific counter for
340	 * incoming frames.
341	 */
342	dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
343	if (!dup_out)
344		hsr_register_frame_in(node, dev_in_idx);
345
346	/* Forward this frame? */
347	if (!dup_out && (skb->pkt_type != PACKET_HOST))
348		other_slave = get_other_slave(hsr_priv, dev);
349	else
350		other_slave = NULL;
351
352	if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
353		deliver_to_self = false;
354
355	rcu_read_unlock(); /* node_db */
356
357	if (!deliver_to_self && !other_slave) {
358		kfree_skb(skb);
359		/* Circulated frame; silently remove it. */
360		return NET_RX_SUCCESS;
361	}
362
363	skb_deliver = skb;
364	if (deliver_to_self && other_slave) {
365		/* skb_clone() is not enough since we will strip the hsr tag
366		 * and do address substitution below
367		 */
368		skb_deliver = pskb_copy(skb, GFP_ATOMIC);
369		if (!skb_deliver) {
370			deliver_to_self = false;
371			hsr_priv->dev->stats.rx_dropped++;
372		}
373	}
374
375	if (deliver_to_self) {
376		bool multicast_frame;
377
378		skb_deliver = hsr_pull_tag(skb_deliver);
379		if (!skb_deliver) {
380			hsr_priv->dev->stats.rx_dropped++;
381			goto forward;
382		}
383#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
384		/* Move everything in the header that is after the HSR tag,
385		 * to work around alignment problems caused by the 6-byte HSR
386		 * tag. In practice, this removes/overwrites the HSR tag in
387		 * the header and restores a "standard" packet.
388		 */
389		memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data,
390			skb_headlen(skb_deliver));
391
392		/* Adjust skb members so they correspond with the move above.
393		 * This cannot possibly underflow skb->data since hsr_pull_tag()
394		 * above succeeded.
395		 * At this point in the protocol stack, the transport and
396		 * network headers have not been set yet, and we haven't touched
397		 * the mac header nor the head. So we only need to adjust data
398		 * and tail:
399		 */
400		skb_deliver->data -= HSR_TAGLEN;
401		skb_deliver->tail -= HSR_TAGLEN;
402#endif
403		skb_deliver->dev = hsr_priv->dev;
404		hsr_addr_subst_source(hsr_priv, skb_deliver);
405		multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
406		ret = netif_rx(skb_deliver);
407		if (ret == NET_RX_DROP) {
408			hsr_priv->dev->stats.rx_dropped++;
409		} else {
410			hsr_priv->dev->stats.rx_packets++;
411			hsr_priv->dev->stats.rx_bytes += skb->len;
412			if (multicast_frame)
413				hsr_priv->dev->stats.multicast++;
414		}
415	}
416
417forward:
418	if (other_slave) {
419		skb_push(skb, ETH_HLEN);
420		skb->dev = other_slave;
421		dev_queue_xmit(skb);
422	}
423
424	return NET_RX_SUCCESS;
425}
426
427
428static struct packet_type hsr_pt __read_mostly = {
429	.type = htons(ETH_P_PRP),
430	.func = hsr_rcv,
431};
432
433static struct notifier_block hsr_nb = {
434	.notifier_call = hsr_netdev_notify,	/* Slave event notifications */
435};
436
437
438static int __init hsr_init(void)
439{
440	int res;
441
442	BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN);
443
444	dev_add_pack(&hsr_pt);
445
446	init_timer(&prune_timer);
447	prune_timer.function = prune_nodes_all;
448	prune_timer.data = 0;
449	prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
450	add_timer(&prune_timer);
451
452	register_netdevice_notifier(&hsr_nb);
453
454	res = hsr_netlink_init();
455
456	return res;
457}
458
459static void __exit hsr_exit(void)
460{
461	unregister_netdevice_notifier(&hsr_nb);
462	del_timer_sync(&prune_timer);
463	hsr_netlink_exit();
464	dev_remove_pack(&hsr_pt);
465}
466
467module_init(hsr_init);
468module_exit(hsr_exit);
469MODULE_LICENSE("GPL");