Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * Routines for handling Netlink messages for HSR.
  8 */
  9
 10#include "hsr_netlink.h"
 11#include <linux/kernel.h>
 12#include <net/rtnetlink.h>
 13#include <net/genetlink.h>
 14#include "hsr_main.h"
 15#include "hsr_device.h"
 16#include "hsr_framereg.h"
 17
 18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
 19	[IFLA_HSR_SLAVE1]		= { .type = NLA_U32 },
 20	[IFLA_HSR_SLAVE2]		= { .type = NLA_U32 },
 21	[IFLA_HSR_MULTICAST_SPEC]	= { .type = NLA_U8 },
 22	[IFLA_HSR_VERSION]	= { .type = NLA_U8 },
 23	[IFLA_HSR_SUPERVISION_ADDR]	= { .len = ETH_ALEN },
 24	[IFLA_HSR_SEQ_NR]		= { .type = NLA_U16 },
 
 25};
 26
 27/* Here, it seems a netdevice has already been allocated for us, and the
 28 * hsr_dev_setup routine has been executed. Nice!
 29 */
 30static int hsr_newlink(struct net *src_net, struct net_device *dev,
 31		       struct nlattr *tb[], struct nlattr *data[],
 32		       struct netlink_ext_ack *extack)
 33{
 
 
 
 34	struct net_device *link[2];
 35	unsigned char multicast_spec, hsr_version;
 36
 37	if (!data) {
 38		netdev_info(dev, "HSR: No slave devices specified\n");
 39		return -EINVAL;
 40	}
 41	if (!data[IFLA_HSR_SLAVE1]) {
 42		netdev_info(dev, "HSR: Slave1 device not specified\n");
 43		return -EINVAL;
 44	}
 45	link[0] = __dev_get_by_index(src_net,
 46				     nla_get_u32(data[IFLA_HSR_SLAVE1]));
 
 
 
 
 47	if (!data[IFLA_HSR_SLAVE2]) {
 48		netdev_info(dev, "HSR: Slave2 device not specified\n");
 49		return -EINVAL;
 50	}
 51	link[1] = __dev_get_by_index(src_net,
 52				     nla_get_u32(data[IFLA_HSR_SLAVE2]));
 
 
 
 
 53
 54	if (!link[0] || !link[1])
 55		return -ENODEV;
 56	if (link[0] == link[1])
 57		return -EINVAL;
 
 58
 59	if (!data[IFLA_HSR_MULTICAST_SPEC])
 60		multicast_spec = 0;
 61	else
 62		multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
 63
 64	if (!data[IFLA_HSR_VERSION])
 65		hsr_version = 0;
 66	else
 67		hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
 
 
 
 68
 69	return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70}
 71
 72static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 73{
 74	struct hsr_priv *hsr;
 75	struct hsr_port *port;
 76	int res;
 77
 78	hsr = netdev_priv(dev);
 
 79
 80	res = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 81
 82	rcu_read_lock();
 83	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
 84	if (port)
 85		res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
 86	rcu_read_unlock();
 87	if (res)
 88		goto nla_put_failure;
 89
 90	rcu_read_lock();
 91	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
 92	if (port)
 93		res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
 94	rcu_read_unlock();
 95	if (res)
 96		goto nla_put_failure;
 97
 98	if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
 99		    hsr->sup_multicast_addr) ||
100	    nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
101		goto nla_put_failure;
 
 
 
 
102
103	return 0;
104
105nla_put_failure:
106	return -EMSGSIZE;
107}
108
109static struct rtnl_link_ops hsr_link_ops __read_mostly = {
110	.kind		= "hsr",
111	.maxtype	= IFLA_HSR_MAX,
112	.policy		= hsr_policy,
113	.priv_size	= sizeof(struct hsr_priv),
114	.setup		= hsr_dev_setup,
115	.newlink	= hsr_newlink,
 
116	.fill_info	= hsr_fill_info,
117};
118
119/* attribute policy */
120static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
121	[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
122	[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
123	[HSR_A_IFINDEX] = { .type = NLA_U32 },
124	[HSR_A_IF1_AGE] = { .type = NLA_U32 },
125	[HSR_A_IF2_AGE] = { .type = NLA_U32 },
126	[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
127	[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
128};
129
130static struct genl_family hsr_genl_family;
131
132static const struct genl_multicast_group hsr_mcgrps[] = {
133	{ .name = "hsr-network", },
134};
135
136/* This is called if for some node with MAC address addr, we only get frames
137 * over one of the slave interfaces. This would indicate an open network ring
138 * (i.e. a link has failed somewhere).
139 */
140void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
141		      struct hsr_port *port)
142{
143	struct sk_buff *skb;
144	void *msg_head;
145	struct hsr_port *master;
146	int res;
147
148	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
149	if (!skb)
150		goto fail;
151
152	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
153			       HSR_C_RING_ERROR);
154	if (!msg_head)
155		goto nla_put_failure;
156
157	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
158	if (res < 0)
159		goto nla_put_failure;
160
161	res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
162	if (res < 0)
163		goto nla_put_failure;
164
165	genlmsg_end(skb, msg_head);
166	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
167
168	return;
169
170nla_put_failure:
171	kfree_skb(skb);
172
173fail:
174	rcu_read_lock();
175	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
176	netdev_warn(master->dev, "Could not send HSR ring error message\n");
177	rcu_read_unlock();
178}
179
180/* This is called when we haven't heard from the node with MAC address addr for
181 * some time (just before the node is removed from the node table/list).
182 */
183void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
184{
185	struct sk_buff *skb;
186	void *msg_head;
187	struct hsr_port *master;
188	int res;
189
190	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
191	if (!skb)
192		goto fail;
193
194	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
195	if (!msg_head)
196		goto nla_put_failure;
197
198	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
199	if (res < 0)
200		goto nla_put_failure;
201
202	genlmsg_end(skb, msg_head);
203	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
204
205	return;
206
207nla_put_failure:
208	kfree_skb(skb);
209
210fail:
211	rcu_read_lock();
212	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
213	netdev_warn(master->dev, "Could not send HSR node down\n");
214	rcu_read_unlock();
215}
216
217/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
218 * about the status of a specific node in the network, defined by its MAC
219 * address.
220 *
221 * Input: hsr ifindex, node mac address
222 * Output: hsr ifindex, node mac address (copied from request),
223 *	   age of latest frame from node over slave 1, slave 2 [ms]
224 */
225static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
226{
227	/* For receiving */
228	struct nlattr *na;
229	struct net_device *hsr_dev;
230
231	/* For sending */
232	struct sk_buff *skb_out;
233	void *msg_head;
234	struct hsr_priv *hsr;
235	struct hsr_port *port;
236	unsigned char hsr_node_addr_b[ETH_ALEN];
237	int hsr_node_if1_age;
238	u16 hsr_node_if1_seq;
239	int hsr_node_if2_age;
240	u16 hsr_node_if2_seq;
241	int addr_b_ifindex;
242	int res;
243
244	if (!info)
245		goto invalid;
246
247	na = info->attrs[HSR_A_IFINDEX];
248	if (!na)
249		goto invalid;
250	na = info->attrs[HSR_A_NODE_ADDR];
251	if (!na)
252		goto invalid;
253
254	hsr_dev = __dev_get_by_index(genl_info_net(info),
255				     nla_get_u32(info->attrs[HSR_A_IFINDEX]));
 
256	if (!hsr_dev)
257		goto invalid;
258	if (!is_hsr_master(hsr_dev))
259		goto invalid;
260
261	/* Send reply */
262	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
263	if (!skb_out) {
264		res = -ENOMEM;
265		goto fail;
266	}
267
268	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
269			       info->snd_seq, &hsr_genl_family, 0,
270			       HSR_C_SET_NODE_STATUS);
271	if (!msg_head) {
272		res = -ENOMEM;
273		goto nla_put_failure;
274	}
275
276	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
277	if (res < 0)
278		goto nla_put_failure;
279
280	hsr = netdev_priv(hsr_dev);
281	res = hsr_get_node_data(hsr,
282				(unsigned char *)
283				nla_data(info->attrs[HSR_A_NODE_ADDR]),
284					 hsr_node_addr_b,
285					 &addr_b_ifindex,
286					 &hsr_node_if1_age,
287					 &hsr_node_if1_seq,
288					 &hsr_node_if2_age,
289					 &hsr_node_if2_seq);
290	if (res < 0)
291		goto nla_put_failure;
292
293	res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
294		      nla_data(info->attrs[HSR_A_NODE_ADDR]));
295	if (res < 0)
296		goto nla_put_failure;
297
298	if (addr_b_ifindex > -1) {
299		res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
300			      hsr_node_addr_b);
301		if (res < 0)
302			goto nla_put_failure;
303
304		res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
305				  addr_b_ifindex);
306		if (res < 0)
307			goto nla_put_failure;
308	}
309
310	res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
311	if (res < 0)
312		goto nla_put_failure;
313	res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
314	if (res < 0)
315		goto nla_put_failure;
316	rcu_read_lock();
317	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
318	if (port)
319		res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
320				  port->dev->ifindex);
321	rcu_read_unlock();
322	if (res < 0)
323		goto nla_put_failure;
324
325	res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
326	if (res < 0)
327		goto nla_put_failure;
328	res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
329	if (res < 0)
330		goto nla_put_failure;
331	rcu_read_lock();
332	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
333	if (port)
334		res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
335				  port->dev->ifindex);
336	rcu_read_unlock();
337	if (res < 0)
338		goto nla_put_failure;
339
 
 
340	genlmsg_end(skb_out, msg_head);
341	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
342
343	return 0;
344
 
 
345invalid:
346	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
347	return 0;
348
349nla_put_failure:
350	kfree_skb(skb_out);
351	/* Fall through */
352
353fail:
 
354	return res;
355}
356
357/* Get a list of MacAddressA of all nodes known to this node (including self).
358 */
359static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
360{
361	/* For receiving */
362	struct nlattr *na;
363	struct net_device *hsr_dev;
364
365	/* For sending */
366	struct sk_buff *skb_out;
367	void *msg_head;
368	struct hsr_priv *hsr;
369	void *pos;
370	unsigned char addr[ETH_ALEN];
 
 
371	int res;
372
373	if (!info)
374		goto invalid;
375
376	na = info->attrs[HSR_A_IFINDEX];
377	if (!na)
378		goto invalid;
379
380	hsr_dev = __dev_get_by_index(genl_info_net(info),
381				     nla_get_u32(info->attrs[HSR_A_IFINDEX]));
 
382	if (!hsr_dev)
383		goto invalid;
384	if (!is_hsr_master(hsr_dev))
385		goto invalid;
386
 
387	/* Send reply */
388	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
389	if (!skb_out) {
390		res = -ENOMEM;
391		goto fail;
392	}
393
394	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
395			       info->snd_seq, &hsr_genl_family, 0,
396			       HSR_C_SET_NODE_LIST);
397	if (!msg_head) {
398		res = -ENOMEM;
399		goto nla_put_failure;
400	}
401
402	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
403	if (res < 0)
404		goto nla_put_failure;
 
 
405
406	hsr = netdev_priv(hsr_dev);
407
408	rcu_read_lock();
409	pos = hsr_get_next_node(hsr, NULL, addr);
410	while (pos) {
411		res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
412		if (res < 0) {
413			rcu_read_unlock();
 
 
 
 
 
 
414			goto nla_put_failure;
415		}
416		pos = hsr_get_next_node(hsr, pos, addr);
417	}
418	rcu_read_unlock();
419
420	genlmsg_end(skb_out, msg_head);
421	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
422
423	return 0;
424
 
 
425invalid:
426	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
427	return 0;
428
429nla_put_failure:
430	kfree_skb(skb_out);
431	/* Fall through */
432
433fail:
 
434	return res;
435}
436
437static const struct genl_ops hsr_ops[] = {
438	{
439		.cmd = HSR_C_GET_NODE_STATUS,
440		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
441		.flags = 0,
442		.doit = hsr_get_node_status,
443		.dumpit = NULL,
444	},
445	{
446		.cmd = HSR_C_GET_NODE_LIST,
447		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
448		.flags = 0,
449		.doit = hsr_get_node_list,
450		.dumpit = NULL,
451	},
452};
453
454static struct genl_family hsr_genl_family __ro_after_init = {
455	.hdrsize = 0,
456	.name = "HSR",
457	.version = 1,
458	.maxattr = HSR_A_MAX,
459	.policy = hsr_genl_policy,
 
460	.module = THIS_MODULE,
461	.ops = hsr_ops,
462	.n_ops = ARRAY_SIZE(hsr_ops),
 
463	.mcgrps = hsr_mcgrps,
464	.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
465};
466
467int __init hsr_netlink_init(void)
468{
469	int rc;
470
471	rc = rtnl_link_register(&hsr_link_ops);
472	if (rc)
473		goto fail_rtnl_link_register;
474
475	rc = genl_register_family(&hsr_genl_family);
476	if (rc)
477		goto fail_genl_register_family;
478
 
479	return 0;
480
481fail_genl_register_family:
482	rtnl_link_unregister(&hsr_link_ops);
483fail_rtnl_link_register:
484
485	return rc;
486}
487
488void __exit hsr_netlink_exit(void)
489{
490	genl_unregister_family(&hsr_genl_family);
491	rtnl_link_unregister(&hsr_link_ops);
492}
493
494MODULE_ALIAS_RTNL_LINK("hsr");
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright 2011-2014 Autronica Fire and Security AS
  3 *
  4 * Author(s):
  5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6 *
  7 * Routines for handling Netlink messages for HSR and PRP.
  8 */
  9
 10#include "hsr_netlink.h"
 11#include <linux/kernel.h>
 12#include <net/rtnetlink.h>
 13#include <net/genetlink.h>
 14#include "hsr_main.h"
 15#include "hsr_device.h"
 16#include "hsr_framereg.h"
 17
 18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
 19	[IFLA_HSR_SLAVE1]		= { .type = NLA_U32 },
 20	[IFLA_HSR_SLAVE2]		= { .type = NLA_U32 },
 21	[IFLA_HSR_MULTICAST_SPEC]	= { .type = NLA_U8 },
 22	[IFLA_HSR_VERSION]	= { .type = NLA_U8 },
 23	[IFLA_HSR_SUPERVISION_ADDR]	= { .len = ETH_ALEN },
 24	[IFLA_HSR_SEQ_NR]		= { .type = NLA_U16 },
 25	[IFLA_HSR_PROTOCOL]		= { .type = NLA_U8 },
 26};
 27
 28/* Here, it seems a netdevice has already been allocated for us, and the
 29 * hsr_dev_setup routine has been executed. Nice!
 30 */
 31static int hsr_newlink(struct net *src_net, struct net_device *dev,
 32		       struct nlattr *tb[], struct nlattr *data[],
 33		       struct netlink_ext_ack *extack)
 34{
 35	enum hsr_version proto_version;
 36	unsigned char multicast_spec;
 37	u8 proto = HSR_PROTOCOL_HSR;
 38	struct net_device *link[2];
 
 39
 40	if (!data) {
 41		NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
 42		return -EINVAL;
 43	}
 44	if (!data[IFLA_HSR_SLAVE1]) {
 45		NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
 46		return -EINVAL;
 47	}
 48	link[0] = __dev_get_by_index(src_net,
 49				     nla_get_u32(data[IFLA_HSR_SLAVE1]));
 50	if (!link[0]) {
 51		NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
 52		return -EINVAL;
 53	}
 54	if (!data[IFLA_HSR_SLAVE2]) {
 55		NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
 56		return -EINVAL;
 57	}
 58	link[1] = __dev_get_by_index(src_net,
 59				     nla_get_u32(data[IFLA_HSR_SLAVE2]));
 60	if (!link[1]) {
 61		NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
 62		return -EINVAL;
 63	}
 64
 65	if (link[0] == link[1]) {
 66		NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
 
 67		return -EINVAL;
 68	}
 69
 70	if (!data[IFLA_HSR_MULTICAST_SPEC])
 71		multicast_spec = 0;
 72	else
 73		multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
 74
 75	if (data[IFLA_HSR_PROTOCOL])
 76		proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
 77
 78	if (proto >= HSR_PROTOCOL_MAX) {
 79		NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
 80		return -EINVAL;
 81	}
 82
 83	if (!data[IFLA_HSR_VERSION]) {
 84		proto_version = HSR_V0;
 85	} else {
 86		if (proto == HSR_PROTOCOL_PRP) {
 87			NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
 88			return -EINVAL;
 89		}
 90
 91		proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
 92		if (proto_version > HSR_V1) {
 93			NL_SET_ERR_MSG_MOD(extack,
 94					   "Only HSR version 0/1 supported");
 95			return -EINVAL;
 96		}
 97	}
 98
 99	if (proto == HSR_PROTOCOL_PRP)
100		proto_version = PRP_V1;
101
102	return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
103}
104
105static void hsr_dellink(struct net_device *dev, struct list_head *head)
106{
107	struct hsr_priv *hsr = netdev_priv(dev);
 
 
108
109	del_timer_sync(&hsr->prune_timer);
110	del_timer_sync(&hsr->announce_timer);
111
112	hsr_debugfs_term(hsr);
113	hsr_del_ports(hsr);
114
115	hsr_del_self_node(hsr);
116	hsr_del_nodes(&hsr->node_db);
117
118	unregister_netdevice_queue(dev, head);
119}
120
121static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
122{
123	struct hsr_priv *hsr = netdev_priv(dev);
124	u8 proto = HSR_PROTOCOL_HSR;
125	struct hsr_port *port;
126
 
127	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
128	if (port) {
129		if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
130			goto nla_put_failure;
131	}
 
132
 
133	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
134	if (port) {
135		if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
136			goto nla_put_failure;
137	}
 
138
139	if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
140		    hsr->sup_multicast_addr) ||
141	    nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
142		goto nla_put_failure;
143	if (hsr->prot_version == PRP_V1)
144		proto = HSR_PROTOCOL_PRP;
145	if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
146		goto nla_put_failure;
147
148	return 0;
149
150nla_put_failure:
151	return -EMSGSIZE;
152}
153
154static struct rtnl_link_ops hsr_link_ops __read_mostly = {
155	.kind		= "hsr",
156	.maxtype	= IFLA_HSR_MAX,
157	.policy		= hsr_policy,
158	.priv_size	= sizeof(struct hsr_priv),
159	.setup		= hsr_dev_setup,
160	.newlink	= hsr_newlink,
161	.dellink	= hsr_dellink,
162	.fill_info	= hsr_fill_info,
163};
164
165/* attribute policy */
166static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
167	[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
168	[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
169	[HSR_A_IFINDEX] = { .type = NLA_U32 },
170	[HSR_A_IF1_AGE] = { .type = NLA_U32 },
171	[HSR_A_IF2_AGE] = { .type = NLA_U32 },
172	[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
173	[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
174};
175
176static struct genl_family hsr_genl_family;
177
178static const struct genl_multicast_group hsr_mcgrps[] = {
179	{ .name = "hsr-network", },
180};
181
182/* This is called if for some node with MAC address addr, we only get frames
183 * over one of the slave interfaces. This would indicate an open network ring
184 * (i.e. a link has failed somewhere).
185 */
186void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
187		      struct hsr_port *port)
188{
189	struct sk_buff *skb;
190	void *msg_head;
191	struct hsr_port *master;
192	int res;
193
194	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
195	if (!skb)
196		goto fail;
197
198	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
199			       HSR_C_RING_ERROR);
200	if (!msg_head)
201		goto nla_put_failure;
202
203	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
204	if (res < 0)
205		goto nla_put_failure;
206
207	res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
208	if (res < 0)
209		goto nla_put_failure;
210
211	genlmsg_end(skb, msg_head);
212	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
213
214	return;
215
216nla_put_failure:
217	kfree_skb(skb);
218
219fail:
220	rcu_read_lock();
221	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
222	netdev_warn(master->dev, "Could not send HSR ring error message\n");
223	rcu_read_unlock();
224}
225
226/* This is called when we haven't heard from the node with MAC address addr for
227 * some time (just before the node is removed from the node table/list).
228 */
229void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
230{
231	struct sk_buff *skb;
232	void *msg_head;
233	struct hsr_port *master;
234	int res;
235
236	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
237	if (!skb)
238		goto fail;
239
240	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
241	if (!msg_head)
242		goto nla_put_failure;
243
244	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
245	if (res < 0)
246		goto nla_put_failure;
247
248	genlmsg_end(skb, msg_head);
249	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
250
251	return;
252
253nla_put_failure:
254	kfree_skb(skb);
255
256fail:
257	rcu_read_lock();
258	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
259	netdev_warn(master->dev, "Could not send HSR node down\n");
260	rcu_read_unlock();
261}
262
263/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
264 * about the status of a specific node in the network, defined by its MAC
265 * address.
266 *
267 * Input: hsr ifindex, node mac address
268 * Output: hsr ifindex, node mac address (copied from request),
269 *	   age of latest frame from node over slave 1, slave 2 [ms]
270 */
271static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
272{
273	/* For receiving */
274	struct nlattr *na;
275	struct net_device *hsr_dev;
276
277	/* For sending */
278	struct sk_buff *skb_out;
279	void *msg_head;
280	struct hsr_priv *hsr;
281	struct hsr_port *port;
282	unsigned char hsr_node_addr_b[ETH_ALEN];
283	int hsr_node_if1_age;
284	u16 hsr_node_if1_seq;
285	int hsr_node_if2_age;
286	u16 hsr_node_if2_seq;
287	int addr_b_ifindex;
288	int res;
289
290	if (!info)
291		goto invalid;
292
293	na = info->attrs[HSR_A_IFINDEX];
294	if (!na)
295		goto invalid;
296	na = info->attrs[HSR_A_NODE_ADDR];
297	if (!na)
298		goto invalid;
299
300	rcu_read_lock();
301	hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
302				       nla_get_u32(info->attrs[HSR_A_IFINDEX]));
303	if (!hsr_dev)
304		goto rcu_unlock;
305	if (!is_hsr_master(hsr_dev))
306		goto rcu_unlock;
307
308	/* Send reply */
309	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
310	if (!skb_out) {
311		res = -ENOMEM;
312		goto fail;
313	}
314
315	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
316			       info->snd_seq, &hsr_genl_family, 0,
317			       HSR_C_SET_NODE_STATUS);
318	if (!msg_head) {
319		res = -ENOMEM;
320		goto nla_put_failure;
321	}
322
323	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
324	if (res < 0)
325		goto nla_put_failure;
326
327	hsr = netdev_priv(hsr_dev);
328	res = hsr_get_node_data(hsr,
329				(unsigned char *)
330				nla_data(info->attrs[HSR_A_NODE_ADDR]),
331					 hsr_node_addr_b,
332					 &addr_b_ifindex,
333					 &hsr_node_if1_age,
334					 &hsr_node_if1_seq,
335					 &hsr_node_if2_age,
336					 &hsr_node_if2_seq);
337	if (res < 0)
338		goto nla_put_failure;
339
340	res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
341		      nla_data(info->attrs[HSR_A_NODE_ADDR]));
342	if (res < 0)
343		goto nla_put_failure;
344
345	if (addr_b_ifindex > -1) {
346		res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
347			      hsr_node_addr_b);
348		if (res < 0)
349			goto nla_put_failure;
350
351		res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
352				  addr_b_ifindex);
353		if (res < 0)
354			goto nla_put_failure;
355	}
356
357	res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
358	if (res < 0)
359		goto nla_put_failure;
360	res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
361	if (res < 0)
362		goto nla_put_failure;
 
363	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
364	if (port)
365		res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
366				  port->dev->ifindex);
 
367	if (res < 0)
368		goto nla_put_failure;
369
370	res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
371	if (res < 0)
372		goto nla_put_failure;
373	res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
374	if (res < 0)
375		goto nla_put_failure;
 
376	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
377	if (port)
378		res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
379				  port->dev->ifindex);
 
380	if (res < 0)
381		goto nla_put_failure;
382
383	rcu_read_unlock();
384
385	genlmsg_end(skb_out, msg_head);
386	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
387
388	return 0;
389
390rcu_unlock:
391	rcu_read_unlock();
392invalid:
393	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
394	return 0;
395
396nla_put_failure:
397	kfree_skb(skb_out);
398	/* Fall through */
399
400fail:
401	rcu_read_unlock();
402	return res;
403}
404
405/* Get a list of MacAddressA of all nodes known to this node (including self).
406 */
407static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
408{
409	unsigned char addr[ETH_ALEN];
 
410	struct net_device *hsr_dev;
 
 
411	struct sk_buff *skb_out;
 
412	struct hsr_priv *hsr;
413	bool restart = false;
414	struct nlattr *na;
415	void *pos = NULL;
416	void *msg_head;
417	int res;
418
419	if (!info)
420		goto invalid;
421
422	na = info->attrs[HSR_A_IFINDEX];
423	if (!na)
424		goto invalid;
425
426	rcu_read_lock();
427	hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
428				       nla_get_u32(info->attrs[HSR_A_IFINDEX]));
429	if (!hsr_dev)
430		goto rcu_unlock;
431	if (!is_hsr_master(hsr_dev))
432		goto rcu_unlock;
433
434restart:
435	/* Send reply */
436	skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
437	if (!skb_out) {
438		res = -ENOMEM;
439		goto fail;
440	}
441
442	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
443			       info->snd_seq, &hsr_genl_family, 0,
444			       HSR_C_SET_NODE_LIST);
445	if (!msg_head) {
446		res = -ENOMEM;
447		goto nla_put_failure;
448	}
449
450	if (!restart) {
451		res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
452		if (res < 0)
453			goto nla_put_failure;
454	}
455
456	hsr = netdev_priv(hsr_dev);
457
458	if (!pos)
459		pos = hsr_get_next_node(hsr, NULL, addr);
460	while (pos) {
461		res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
462		if (res < 0) {
463			if (res == -EMSGSIZE) {
464				genlmsg_end(skb_out, msg_head);
465				genlmsg_unicast(genl_info_net(info), skb_out,
466						info->snd_portid);
467				restart = true;
468				goto restart;
469			}
470			goto nla_put_failure;
471		}
472		pos = hsr_get_next_node(hsr, pos, addr);
473	}
474	rcu_read_unlock();
475
476	genlmsg_end(skb_out, msg_head);
477	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
478
479	return 0;
480
481rcu_unlock:
482	rcu_read_unlock();
483invalid:
484	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
485	return 0;
486
487nla_put_failure:
488	nlmsg_free(skb_out);
489	/* Fall through */
490
491fail:
492	rcu_read_unlock();
493	return res;
494}
495
496static const struct genl_small_ops hsr_ops[] = {
497	{
498		.cmd = HSR_C_GET_NODE_STATUS,
499		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
500		.flags = 0,
501		.doit = hsr_get_node_status,
502		.dumpit = NULL,
503	},
504	{
505		.cmd = HSR_C_GET_NODE_LIST,
506		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
507		.flags = 0,
508		.doit = hsr_get_node_list,
509		.dumpit = NULL,
510	},
511};
512
513static struct genl_family hsr_genl_family __ro_after_init = {
514	.hdrsize = 0,
515	.name = "HSR",
516	.version = 1,
517	.maxattr = HSR_A_MAX,
518	.policy = hsr_genl_policy,
519	.netnsok = true,
520	.module = THIS_MODULE,
521	.small_ops = hsr_ops,
522	.n_small_ops = ARRAY_SIZE(hsr_ops),
523	.resv_start_op = HSR_C_SET_NODE_LIST + 1,
524	.mcgrps = hsr_mcgrps,
525	.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
526};
527
528int __init hsr_netlink_init(void)
529{
530	int rc;
531
532	rc = rtnl_link_register(&hsr_link_ops);
533	if (rc)
534		goto fail_rtnl_link_register;
535
536	rc = genl_register_family(&hsr_genl_family);
537	if (rc)
538		goto fail_genl_register_family;
539
540	hsr_debugfs_create_root();
541	return 0;
542
543fail_genl_register_family:
544	rtnl_link_unregister(&hsr_link_ops);
545fail_rtnl_link_register:
546
547	return rc;
548}
549
550void __exit hsr_netlink_exit(void)
551{
552	genl_unregister_family(&hsr_genl_family);
553	rtnl_link_unregister(&hsr_link_ops);
554}
555
556MODULE_ALIAS_RTNL_LINK("hsr");