Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Routines for handling Netlink messages for HSR.
8 */
9
10#include "hsr_netlink.h"
11#include <linux/kernel.h>
12#include <net/rtnetlink.h>
13#include <net/genetlink.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_framereg.h"
17
18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
22 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
25};
26
27/* Here, it seems a netdevice has already been allocated for us, and the
28 * hsr_dev_setup routine has been executed. Nice!
29 */
30static int hsr_newlink(struct net *src_net, struct net_device *dev,
31 struct nlattr *tb[], struct nlattr *data[],
32 struct netlink_ext_ack *extack)
33{
34 struct net_device *link[2];
35 unsigned char multicast_spec, hsr_version;
36
37 if (!data) {
38 netdev_info(dev, "HSR: No slave devices specified\n");
39 return -EINVAL;
40 }
41 if (!data[IFLA_HSR_SLAVE1]) {
42 netdev_info(dev, "HSR: Slave1 device not specified\n");
43 return -EINVAL;
44 }
45 link[0] = __dev_get_by_index(src_net,
46 nla_get_u32(data[IFLA_HSR_SLAVE1]));
47 if (!data[IFLA_HSR_SLAVE2]) {
48 netdev_info(dev, "HSR: Slave2 device not specified\n");
49 return -EINVAL;
50 }
51 link[1] = __dev_get_by_index(src_net,
52 nla_get_u32(data[IFLA_HSR_SLAVE2]));
53
54 if (!link[0] || !link[1])
55 return -ENODEV;
56 if (link[0] == link[1])
57 return -EINVAL;
58
59 if (!data[IFLA_HSR_MULTICAST_SPEC])
60 multicast_spec = 0;
61 else
62 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
63
64 if (!data[IFLA_HSR_VERSION])
65 hsr_version = 0;
66 else
67 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
68
69 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
70}
71
72static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
73{
74 struct hsr_priv *hsr;
75 struct hsr_port *port;
76 int res;
77
78 hsr = netdev_priv(dev);
79
80 res = 0;
81
82 rcu_read_lock();
83 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
84 if (port)
85 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
86 rcu_read_unlock();
87 if (res)
88 goto nla_put_failure;
89
90 rcu_read_lock();
91 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
92 if (port)
93 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
94 rcu_read_unlock();
95 if (res)
96 goto nla_put_failure;
97
98 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
99 hsr->sup_multicast_addr) ||
100 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
101 goto nla_put_failure;
102
103 return 0;
104
105nla_put_failure:
106 return -EMSGSIZE;
107}
108
109static struct rtnl_link_ops hsr_link_ops __read_mostly = {
110 .kind = "hsr",
111 .maxtype = IFLA_HSR_MAX,
112 .policy = hsr_policy,
113 .priv_size = sizeof(struct hsr_priv),
114 .setup = hsr_dev_setup,
115 .newlink = hsr_newlink,
116 .fill_info = hsr_fill_info,
117};
118
119/* attribute policy */
120static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
121 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
122 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
123 [HSR_A_IFINDEX] = { .type = NLA_U32 },
124 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
125 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
126 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
127 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
128};
129
130static struct genl_family hsr_genl_family;
131
132static const struct genl_multicast_group hsr_mcgrps[] = {
133 { .name = "hsr-network", },
134};
135
136/* This is called if for some node with MAC address addr, we only get frames
137 * over one of the slave interfaces. This would indicate an open network ring
138 * (i.e. a link has failed somewhere).
139 */
140void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
141 struct hsr_port *port)
142{
143 struct sk_buff *skb;
144 void *msg_head;
145 struct hsr_port *master;
146 int res;
147
148 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
149 if (!skb)
150 goto fail;
151
152 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
153 HSR_C_RING_ERROR);
154 if (!msg_head)
155 goto nla_put_failure;
156
157 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
158 if (res < 0)
159 goto nla_put_failure;
160
161 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
162 if (res < 0)
163 goto nla_put_failure;
164
165 genlmsg_end(skb, msg_head);
166 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
167
168 return;
169
170nla_put_failure:
171 kfree_skb(skb);
172
173fail:
174 rcu_read_lock();
175 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
176 netdev_warn(master->dev, "Could not send HSR ring error message\n");
177 rcu_read_unlock();
178}
179
180/* This is called when we haven't heard from the node with MAC address addr for
181 * some time (just before the node is removed from the node table/list).
182 */
183void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
184{
185 struct sk_buff *skb;
186 void *msg_head;
187 struct hsr_port *master;
188 int res;
189
190 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
191 if (!skb)
192 goto fail;
193
194 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
195 if (!msg_head)
196 goto nla_put_failure;
197
198 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
199 if (res < 0)
200 goto nla_put_failure;
201
202 genlmsg_end(skb, msg_head);
203 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
204
205 return;
206
207nla_put_failure:
208 kfree_skb(skb);
209
210fail:
211 rcu_read_lock();
212 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
213 netdev_warn(master->dev, "Could not send HSR node down\n");
214 rcu_read_unlock();
215}
216
217/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
218 * about the status of a specific node in the network, defined by its MAC
219 * address.
220 *
221 * Input: hsr ifindex, node mac address
222 * Output: hsr ifindex, node mac address (copied from request),
223 * age of latest frame from node over slave 1, slave 2 [ms]
224 */
225static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
226{
227 /* For receiving */
228 struct nlattr *na;
229 struct net_device *hsr_dev;
230
231 /* For sending */
232 struct sk_buff *skb_out;
233 void *msg_head;
234 struct hsr_priv *hsr;
235 struct hsr_port *port;
236 unsigned char hsr_node_addr_b[ETH_ALEN];
237 int hsr_node_if1_age;
238 u16 hsr_node_if1_seq;
239 int hsr_node_if2_age;
240 u16 hsr_node_if2_seq;
241 int addr_b_ifindex;
242 int res;
243
244 if (!info)
245 goto invalid;
246
247 na = info->attrs[HSR_A_IFINDEX];
248 if (!na)
249 goto invalid;
250 na = info->attrs[HSR_A_NODE_ADDR];
251 if (!na)
252 goto invalid;
253
254 hsr_dev = __dev_get_by_index(genl_info_net(info),
255 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
256 if (!hsr_dev)
257 goto invalid;
258 if (!is_hsr_master(hsr_dev))
259 goto invalid;
260
261 /* Send reply */
262 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
263 if (!skb_out) {
264 res = -ENOMEM;
265 goto fail;
266 }
267
268 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
269 info->snd_seq, &hsr_genl_family, 0,
270 HSR_C_SET_NODE_STATUS);
271 if (!msg_head) {
272 res = -ENOMEM;
273 goto nla_put_failure;
274 }
275
276 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
277 if (res < 0)
278 goto nla_put_failure;
279
280 hsr = netdev_priv(hsr_dev);
281 res = hsr_get_node_data(hsr,
282 (unsigned char *)
283 nla_data(info->attrs[HSR_A_NODE_ADDR]),
284 hsr_node_addr_b,
285 &addr_b_ifindex,
286 &hsr_node_if1_age,
287 &hsr_node_if1_seq,
288 &hsr_node_if2_age,
289 &hsr_node_if2_seq);
290 if (res < 0)
291 goto nla_put_failure;
292
293 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
294 nla_data(info->attrs[HSR_A_NODE_ADDR]));
295 if (res < 0)
296 goto nla_put_failure;
297
298 if (addr_b_ifindex > -1) {
299 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
300 hsr_node_addr_b);
301 if (res < 0)
302 goto nla_put_failure;
303
304 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
305 addr_b_ifindex);
306 if (res < 0)
307 goto nla_put_failure;
308 }
309
310 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
311 if (res < 0)
312 goto nla_put_failure;
313 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
314 if (res < 0)
315 goto nla_put_failure;
316 rcu_read_lock();
317 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
318 if (port)
319 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
320 port->dev->ifindex);
321 rcu_read_unlock();
322 if (res < 0)
323 goto nla_put_failure;
324
325 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
326 if (res < 0)
327 goto nla_put_failure;
328 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
329 if (res < 0)
330 goto nla_put_failure;
331 rcu_read_lock();
332 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
333 if (port)
334 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
335 port->dev->ifindex);
336 rcu_read_unlock();
337 if (res < 0)
338 goto nla_put_failure;
339
340 genlmsg_end(skb_out, msg_head);
341 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
342
343 return 0;
344
345invalid:
346 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
347 return 0;
348
349nla_put_failure:
350 kfree_skb(skb_out);
351 /* Fall through */
352
353fail:
354 return res;
355}
356
357/* Get a list of MacAddressA of all nodes known to this node (including self).
358 */
359static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
360{
361 /* For receiving */
362 struct nlattr *na;
363 struct net_device *hsr_dev;
364
365 /* For sending */
366 struct sk_buff *skb_out;
367 void *msg_head;
368 struct hsr_priv *hsr;
369 void *pos;
370 unsigned char addr[ETH_ALEN];
371 int res;
372
373 if (!info)
374 goto invalid;
375
376 na = info->attrs[HSR_A_IFINDEX];
377 if (!na)
378 goto invalid;
379
380 hsr_dev = __dev_get_by_index(genl_info_net(info),
381 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
382 if (!hsr_dev)
383 goto invalid;
384 if (!is_hsr_master(hsr_dev))
385 goto invalid;
386
387 /* Send reply */
388 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
389 if (!skb_out) {
390 res = -ENOMEM;
391 goto fail;
392 }
393
394 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
395 info->snd_seq, &hsr_genl_family, 0,
396 HSR_C_SET_NODE_LIST);
397 if (!msg_head) {
398 res = -ENOMEM;
399 goto nla_put_failure;
400 }
401
402 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
403 if (res < 0)
404 goto nla_put_failure;
405
406 hsr = netdev_priv(hsr_dev);
407
408 rcu_read_lock();
409 pos = hsr_get_next_node(hsr, NULL, addr);
410 while (pos) {
411 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
412 if (res < 0) {
413 rcu_read_unlock();
414 goto nla_put_failure;
415 }
416 pos = hsr_get_next_node(hsr, pos, addr);
417 }
418 rcu_read_unlock();
419
420 genlmsg_end(skb_out, msg_head);
421 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
422
423 return 0;
424
425invalid:
426 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
427 return 0;
428
429nla_put_failure:
430 kfree_skb(skb_out);
431 /* Fall through */
432
433fail:
434 return res;
435}
436
437static const struct genl_ops hsr_ops[] = {
438 {
439 .cmd = HSR_C_GET_NODE_STATUS,
440 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
441 .flags = 0,
442 .doit = hsr_get_node_status,
443 .dumpit = NULL,
444 },
445 {
446 .cmd = HSR_C_GET_NODE_LIST,
447 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
448 .flags = 0,
449 .doit = hsr_get_node_list,
450 .dumpit = NULL,
451 },
452};
453
454static struct genl_family hsr_genl_family __ro_after_init = {
455 .hdrsize = 0,
456 .name = "HSR",
457 .version = 1,
458 .maxattr = HSR_A_MAX,
459 .policy = hsr_genl_policy,
460 .module = THIS_MODULE,
461 .ops = hsr_ops,
462 .n_ops = ARRAY_SIZE(hsr_ops),
463 .mcgrps = hsr_mcgrps,
464 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
465};
466
467int __init hsr_netlink_init(void)
468{
469 int rc;
470
471 rc = rtnl_link_register(&hsr_link_ops);
472 if (rc)
473 goto fail_rtnl_link_register;
474
475 rc = genl_register_family(&hsr_genl_family);
476 if (rc)
477 goto fail_genl_register_family;
478
479 return 0;
480
481fail_genl_register_family:
482 rtnl_link_unregister(&hsr_link_ops);
483fail_rtnl_link_register:
484
485 return rc;
486}
487
488void __exit hsr_netlink_exit(void)
489{
490 genl_unregister_family(&hsr_genl_family);
491 rtnl_link_unregister(&hsr_link_ops);
492}
493
494MODULE_ALIAS_RTNL_LINK("hsr");
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Routines for handling Netlink messages for HSR and PRP.
8 */
9
10#include "hsr_netlink.h"
11#include <linux/kernel.h>
12#include <net/rtnetlink.h>
13#include <net/genetlink.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_framereg.h"
17
18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
22 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
25 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
26 [IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
27};
28
29/* Here, it seems a netdevice has already been allocated for us, and the
30 * hsr_dev_setup routine has been executed. Nice!
31 */
32static int hsr_newlink(struct net *src_net, struct net_device *dev,
33 struct nlattr *tb[], struct nlattr *data[],
34 struct netlink_ext_ack *extack)
35{
36 enum hsr_version proto_version;
37 unsigned char multicast_spec;
38 u8 proto = HSR_PROTOCOL_HSR;
39
40 struct net_device *link[2], *interlink = NULL;
41 if (!data) {
42 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
43 return -EINVAL;
44 }
45 if (!data[IFLA_HSR_SLAVE1]) {
46 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
47 return -EINVAL;
48 }
49 link[0] = __dev_get_by_index(src_net,
50 nla_get_u32(data[IFLA_HSR_SLAVE1]));
51 if (!link[0]) {
52 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
53 return -EINVAL;
54 }
55 if (!data[IFLA_HSR_SLAVE2]) {
56 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
57 return -EINVAL;
58 }
59 link[1] = __dev_get_by_index(src_net,
60 nla_get_u32(data[IFLA_HSR_SLAVE2]));
61 if (!link[1]) {
62 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
63 return -EINVAL;
64 }
65
66 if (link[0] == link[1]) {
67 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
68 return -EINVAL;
69 }
70
71 if (data[IFLA_HSR_INTERLINK])
72 interlink = __dev_get_by_index(src_net,
73 nla_get_u32(data[IFLA_HSR_INTERLINK]));
74
75 if (interlink && interlink == link[0]) {
76 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
77 return -EINVAL;
78 }
79
80 if (interlink && interlink == link[1]) {
81 NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
82 return -EINVAL;
83 }
84
85 multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0);
86
87 if (data[IFLA_HSR_PROTOCOL])
88 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
89
90 if (proto >= HSR_PROTOCOL_MAX) {
91 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
92 return -EINVAL;
93 }
94
95 if (!data[IFLA_HSR_VERSION]) {
96 proto_version = HSR_V0;
97 } else {
98 if (proto == HSR_PROTOCOL_PRP) {
99 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
100 return -EINVAL;
101 }
102
103 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
104 if (proto_version > HSR_V1) {
105 NL_SET_ERR_MSG_MOD(extack,
106 "Only HSR version 0/1 supported");
107 return -EINVAL;
108 }
109 }
110
111 if (proto == HSR_PROTOCOL_PRP) {
112 proto_version = PRP_V1;
113 if (interlink) {
114 NL_SET_ERR_MSG_MOD(extack,
115 "Interlink only works with HSR");
116 return -EINVAL;
117 }
118 }
119
120 return hsr_dev_finalize(dev, link, interlink, multicast_spec,
121 proto_version, extack);
122}
123
124static void hsr_dellink(struct net_device *dev, struct list_head *head)
125{
126 struct hsr_priv *hsr = netdev_priv(dev);
127
128 timer_delete_sync(&hsr->prune_timer);
129 timer_delete_sync(&hsr->prune_proxy_timer);
130 timer_delete_sync(&hsr->announce_timer);
131 timer_delete_sync(&hsr->announce_proxy_timer);
132
133 hsr_debugfs_term(hsr);
134 hsr_del_ports(hsr);
135
136 hsr_del_self_node(hsr);
137 hsr_del_nodes(&hsr->node_db);
138 hsr_del_nodes(&hsr->proxy_node_db);
139
140 unregister_netdevice_queue(dev, head);
141}
142
143static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
144{
145 struct hsr_priv *hsr = netdev_priv(dev);
146 u8 proto = HSR_PROTOCOL_HSR;
147 struct hsr_port *port;
148
149 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
150 if (port) {
151 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
152 goto nla_put_failure;
153 }
154
155 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
156 if (port) {
157 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
158 goto nla_put_failure;
159 }
160
161 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
162 hsr->sup_multicast_addr) ||
163 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
164 goto nla_put_failure;
165 if (hsr->prot_version == PRP_V1)
166 proto = HSR_PROTOCOL_PRP;
167 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
168 goto nla_put_failure;
169
170 return 0;
171
172nla_put_failure:
173 return -EMSGSIZE;
174}
175
176static struct rtnl_link_ops hsr_link_ops __read_mostly = {
177 .kind = "hsr",
178 .maxtype = IFLA_HSR_MAX,
179 .policy = hsr_policy,
180 .priv_size = sizeof(struct hsr_priv),
181 .setup = hsr_dev_setup,
182 .newlink = hsr_newlink,
183 .dellink = hsr_dellink,
184 .fill_info = hsr_fill_info,
185};
186
187/* attribute policy */
188static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
189 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
190 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
191 [HSR_A_IFINDEX] = { .type = NLA_U32 },
192 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
193 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
194 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
195 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
196};
197
198static struct genl_family hsr_genl_family;
199
200static const struct genl_multicast_group hsr_mcgrps[] = {
201 { .name = "hsr-network", },
202};
203
204/* This is called if for some node with MAC address addr, we only get frames
205 * over one of the slave interfaces. This would indicate an open network ring
206 * (i.e. a link has failed somewhere).
207 */
208void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
209 struct hsr_port *port)
210{
211 struct sk_buff *skb;
212 void *msg_head;
213 struct hsr_port *master;
214 int res;
215
216 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
217 if (!skb)
218 goto fail;
219
220 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
221 HSR_C_RING_ERROR);
222 if (!msg_head)
223 goto nla_put_failure;
224
225 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
226 if (res < 0)
227 goto nla_put_failure;
228
229 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
230 if (res < 0)
231 goto nla_put_failure;
232
233 genlmsg_end(skb, msg_head);
234 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
235
236 return;
237
238nla_put_failure:
239 kfree_skb(skb);
240
241fail:
242 rcu_read_lock();
243 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
244 netdev_warn(master->dev, "Could not send HSR ring error message\n");
245 rcu_read_unlock();
246}
247
248/* This is called when we haven't heard from the node with MAC address addr for
249 * some time (just before the node is removed from the node table/list).
250 */
251void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
252{
253 struct sk_buff *skb;
254 void *msg_head;
255 struct hsr_port *master;
256 int res;
257
258 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
259 if (!skb)
260 goto fail;
261
262 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
263 if (!msg_head)
264 goto nla_put_failure;
265
266 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
267 if (res < 0)
268 goto nla_put_failure;
269
270 genlmsg_end(skb, msg_head);
271 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
272
273 return;
274
275nla_put_failure:
276 kfree_skb(skb);
277
278fail:
279 rcu_read_lock();
280 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
281 netdev_warn(master->dev, "Could not send HSR node down\n");
282 rcu_read_unlock();
283}
284
285/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
286 * about the status of a specific node in the network, defined by its MAC
287 * address.
288 *
289 * Input: hsr ifindex, node mac address
290 * Output: hsr ifindex, node mac address (copied from request),
291 * age of latest frame from node over slave 1, slave 2 [ms]
292 */
293static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
294{
295 /* For receiving */
296 struct nlattr *na;
297 struct net_device *hsr_dev;
298
299 /* For sending */
300 struct sk_buff *skb_out;
301 void *msg_head;
302 struct hsr_priv *hsr;
303 struct hsr_port *port;
304 unsigned char hsr_node_addr_b[ETH_ALEN];
305 int hsr_node_if1_age;
306 u16 hsr_node_if1_seq;
307 int hsr_node_if2_age;
308 u16 hsr_node_if2_seq;
309 int addr_b_ifindex;
310 int res;
311
312 if (!info)
313 goto invalid;
314
315 na = info->attrs[HSR_A_IFINDEX];
316 if (!na)
317 goto invalid;
318 na = info->attrs[HSR_A_NODE_ADDR];
319 if (!na)
320 goto invalid;
321
322 rcu_read_lock();
323 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
324 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
325 if (!hsr_dev)
326 goto rcu_unlock;
327 if (!is_hsr_master(hsr_dev))
328 goto rcu_unlock;
329
330 /* Send reply */
331 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
332 if (!skb_out) {
333 res = -ENOMEM;
334 goto fail;
335 }
336
337 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
338 info->snd_seq, &hsr_genl_family, 0,
339 HSR_C_SET_NODE_STATUS);
340 if (!msg_head) {
341 res = -ENOMEM;
342 goto nla_put_failure;
343 }
344
345 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
346 if (res < 0)
347 goto nla_put_failure;
348
349 hsr = netdev_priv(hsr_dev);
350 res = hsr_get_node_data(hsr,
351 (unsigned char *)
352 nla_data(info->attrs[HSR_A_NODE_ADDR]),
353 hsr_node_addr_b,
354 &addr_b_ifindex,
355 &hsr_node_if1_age,
356 &hsr_node_if1_seq,
357 &hsr_node_if2_age,
358 &hsr_node_if2_seq);
359 if (res < 0)
360 goto nla_put_failure;
361
362 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
363 nla_data(info->attrs[HSR_A_NODE_ADDR]));
364 if (res < 0)
365 goto nla_put_failure;
366
367 if (addr_b_ifindex > -1) {
368 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
369 hsr_node_addr_b);
370 if (res < 0)
371 goto nla_put_failure;
372
373 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
374 addr_b_ifindex);
375 if (res < 0)
376 goto nla_put_failure;
377 }
378
379 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
380 if (res < 0)
381 goto nla_put_failure;
382 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
383 if (res < 0)
384 goto nla_put_failure;
385 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
386 if (port)
387 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
388 port->dev->ifindex);
389 if (res < 0)
390 goto nla_put_failure;
391
392 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
393 if (res < 0)
394 goto nla_put_failure;
395 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
396 if (res < 0)
397 goto nla_put_failure;
398 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
399 if (port)
400 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
401 port->dev->ifindex);
402 if (res < 0)
403 goto nla_put_failure;
404
405 rcu_read_unlock();
406
407 genlmsg_end(skb_out, msg_head);
408 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
409
410 return 0;
411
412rcu_unlock:
413 rcu_read_unlock();
414invalid:
415 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
416 return 0;
417
418nla_put_failure:
419 kfree_skb(skb_out);
420 /* Fall through */
421
422fail:
423 rcu_read_unlock();
424 return res;
425}
426
427/* Get a list of MacAddressA of all nodes known to this node (including self).
428 */
429static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
430{
431 unsigned char addr[ETH_ALEN];
432 struct net_device *hsr_dev;
433 struct sk_buff *skb_out;
434 struct hsr_priv *hsr;
435 bool restart = false;
436 struct nlattr *na;
437 void *pos = NULL;
438 void *msg_head;
439 int res;
440
441 if (!info)
442 goto invalid;
443
444 na = info->attrs[HSR_A_IFINDEX];
445 if (!na)
446 goto invalid;
447
448 rcu_read_lock();
449 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
450 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
451 if (!hsr_dev)
452 goto rcu_unlock;
453 if (!is_hsr_master(hsr_dev))
454 goto rcu_unlock;
455
456restart:
457 /* Send reply */
458 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
459 if (!skb_out) {
460 res = -ENOMEM;
461 goto fail;
462 }
463
464 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
465 info->snd_seq, &hsr_genl_family, 0,
466 HSR_C_SET_NODE_LIST);
467 if (!msg_head) {
468 res = -ENOMEM;
469 goto nla_put_failure;
470 }
471
472 if (!restart) {
473 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
474 if (res < 0)
475 goto nla_put_failure;
476 }
477
478 hsr = netdev_priv(hsr_dev);
479
480 if (!pos)
481 pos = hsr_get_next_node(hsr, NULL, addr);
482 while (pos) {
483 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
484 if (res < 0) {
485 if (res == -EMSGSIZE) {
486 genlmsg_end(skb_out, msg_head);
487 genlmsg_unicast(genl_info_net(info), skb_out,
488 info->snd_portid);
489 restart = true;
490 goto restart;
491 }
492 goto nla_put_failure;
493 }
494 pos = hsr_get_next_node(hsr, pos, addr);
495 }
496 rcu_read_unlock();
497
498 genlmsg_end(skb_out, msg_head);
499 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
500
501 return 0;
502
503rcu_unlock:
504 rcu_read_unlock();
505invalid:
506 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
507 return 0;
508
509nla_put_failure:
510 nlmsg_free(skb_out);
511 /* Fall through */
512
513fail:
514 rcu_read_unlock();
515 return res;
516}
517
518static const struct genl_small_ops hsr_ops[] = {
519 {
520 .cmd = HSR_C_GET_NODE_STATUS,
521 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
522 .flags = 0,
523 .doit = hsr_get_node_status,
524 .dumpit = NULL,
525 },
526 {
527 .cmd = HSR_C_GET_NODE_LIST,
528 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
529 .flags = 0,
530 .doit = hsr_get_node_list,
531 .dumpit = NULL,
532 },
533};
534
535static struct genl_family hsr_genl_family __ro_after_init = {
536 .hdrsize = 0,
537 .name = "HSR",
538 .version = 1,
539 .maxattr = HSR_A_MAX,
540 .policy = hsr_genl_policy,
541 .netnsok = true,
542 .module = THIS_MODULE,
543 .small_ops = hsr_ops,
544 .n_small_ops = ARRAY_SIZE(hsr_ops),
545 .resv_start_op = HSR_C_SET_NODE_LIST + 1,
546 .mcgrps = hsr_mcgrps,
547 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
548};
549
550int __init hsr_netlink_init(void)
551{
552 int rc;
553
554 rc = rtnl_link_register(&hsr_link_ops);
555 if (rc)
556 goto fail_rtnl_link_register;
557
558 rc = genl_register_family(&hsr_genl_family);
559 if (rc)
560 goto fail_genl_register_family;
561
562 hsr_debugfs_create_root();
563 return 0;
564
565fail_genl_register_family:
566 rtnl_link_unregister(&hsr_link_ops);
567fail_rtnl_link_register:
568
569 return rc;
570}
571
572void __exit hsr_netlink_exit(void)
573{
574 genl_unregister_family(&hsr_genl_family);
575 rtnl_link_unregister(&hsr_link_ops);
576}
577
578MODULE_ALIAS_RTNL_LINK("hsr");