Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IEEE 802.1Q Multiple Registration Protocol (MRP)
4 *
5 * Copyright (c) 2012 Massachusetts Institute of Technology
6 *
7 * Adapted from code in net/802/garp.c
8 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
9 */
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <net/mrp.h>
19#include <asm/unaligned.h>
20
21static unsigned int mrp_join_time __read_mostly = 200;
22module_param(mrp_join_time, uint, 0644);
23MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
24
25static unsigned int mrp_periodic_time __read_mostly = 1000;
26module_param(mrp_periodic_time, uint, 0644);
27MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
28
29MODULE_LICENSE("GPL");
30
31static const u8
32mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
33 [MRP_APPLICANT_VO] = {
34 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
35 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
36 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
37 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
38 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
40 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
43 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
44 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
45 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
46 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
47 },
48 [MRP_APPLICANT_VP] = {
49 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
50 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
51 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
52 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
53 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
54 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
55 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
56 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
58 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
59 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
60 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
61 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
62 },
63 [MRP_APPLICANT_VN] = {
64 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
65 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
66 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
67 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
68 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
69 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
73 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
74 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
75 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
76 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
77 },
78 [MRP_APPLICANT_AN] = {
79 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
80 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
81 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
82 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
83 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
84 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
87 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
88 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
89 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
90 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
91 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
92 },
93 [MRP_APPLICANT_AA] = {
94 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
95 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
96 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
97 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
98 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
99 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
100 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
101 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
102 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
103 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
104 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
105 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
106 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
107 },
108 [MRP_APPLICANT_QA] = {
109 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
110 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
111 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
112 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
113 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
115 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
116 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
117 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
118 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
119 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
120 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
121 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
122 },
123 [MRP_APPLICANT_LA] = {
124 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
125 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
126 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
127 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
128 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
129 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
133 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
134 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
135 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
136 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
137 },
138 [MRP_APPLICANT_AO] = {
139 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
140 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
141 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
142 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
143 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
145 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
147 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
148 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
149 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
150 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
151 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
152 },
153 [MRP_APPLICANT_QO] = {
154 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
155 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
156 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
157 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
158 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
160 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
161 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
162 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
163 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
164 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
165 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
166 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
167 },
168 [MRP_APPLICANT_AP] = {
169 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
170 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
171 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
172 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
173 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
174 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
175 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
176 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
177 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
178 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
179 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
180 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
181 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
182 },
183 [MRP_APPLICANT_QP] = {
184 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
185 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
186 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
187 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
188 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
190 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
191 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
192 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
193 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
194 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
195 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
196 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
197 },
198};
199
200static const u8
201mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
202 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
203 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
204 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
205 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
206 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
207 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
208 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
209 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
210 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
211 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
212 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
213};
214
215static void mrp_attrvalue_inc(void *value, u8 len)
216{
217 u8 *v = (u8 *)value;
218
219 /* Add 1 to the last byte. If it becomes zero,
220 * go to the previous byte and repeat.
221 */
222 while (len > 0 && !++v[--len])
223 ;
224}
225
226static int mrp_attr_cmp(const struct mrp_attr *attr,
227 const void *value, u8 len, u8 type)
228{
229 if (attr->type != type)
230 return attr->type - type;
231 if (attr->len != len)
232 return attr->len - len;
233 return memcmp(attr->value, value, len);
234}
235
236static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
237 const void *value, u8 len, u8 type)
238{
239 struct rb_node *parent = app->mad.rb_node;
240 struct mrp_attr *attr;
241 int d;
242
243 while (parent) {
244 attr = rb_entry(parent, struct mrp_attr, node);
245 d = mrp_attr_cmp(attr, value, len, type);
246 if (d > 0)
247 parent = parent->rb_left;
248 else if (d < 0)
249 parent = parent->rb_right;
250 else
251 return attr;
252 }
253 return NULL;
254}
255
256static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
257 const void *value, u8 len, u8 type)
258{
259 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
260 struct mrp_attr *attr;
261 int d;
262
263 while (*p) {
264 parent = *p;
265 attr = rb_entry(parent, struct mrp_attr, node);
266 d = mrp_attr_cmp(attr, value, len, type);
267 if (d > 0)
268 p = &parent->rb_left;
269 else if (d < 0)
270 p = &parent->rb_right;
271 else {
272 /* The attribute already exists; re-use it. */
273 return attr;
274 }
275 }
276 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
277 if (!attr)
278 return attr;
279 attr->state = MRP_APPLICANT_VO;
280 attr->type = type;
281 attr->len = len;
282 memcpy(attr->value, value, len);
283
284 rb_link_node(&attr->node, parent, p);
285 rb_insert_color(&attr->node, &app->mad);
286 return attr;
287}
288
289static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
290{
291 rb_erase(&attr->node, &app->mad);
292 kfree(attr);
293}
294
295static void mrp_attr_destroy_all(struct mrp_applicant *app)
296{
297 struct rb_node *node, *next;
298 struct mrp_attr *attr;
299
300 for (node = rb_first(&app->mad);
301 next = node ? rb_next(node) : NULL, node != NULL;
302 node = next) {
303 attr = rb_entry(node, struct mrp_attr, node);
304 mrp_attr_destroy(app, attr);
305 }
306}
307
308static int mrp_pdu_init(struct mrp_applicant *app)
309{
310 struct sk_buff *skb;
311 struct mrp_pdu_hdr *ph;
312
313 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
314 GFP_ATOMIC);
315 if (!skb)
316 return -ENOMEM;
317
318 skb->dev = app->dev;
319 skb->protocol = app->app->pkttype.type;
320 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
321 skb_reset_network_header(skb);
322 skb_reset_transport_header(skb);
323
324 ph = __skb_put(skb, sizeof(*ph));
325 ph->version = app->app->version;
326
327 app->pdu = skb;
328 return 0;
329}
330
331static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
332{
333 __be16 *endmark;
334
335 if (skb_tailroom(app->pdu) < sizeof(*endmark))
336 return -1;
337 endmark = __skb_put(app->pdu, sizeof(*endmark));
338 put_unaligned(MRP_END_MARK, endmark);
339 return 0;
340}
341
342static void mrp_pdu_queue(struct mrp_applicant *app)
343{
344 if (!app->pdu)
345 return;
346
347 if (mrp_cb(app->pdu)->mh)
348 mrp_pdu_append_end_mark(app);
349 mrp_pdu_append_end_mark(app);
350
351 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
352 app->app->group_address, app->dev->dev_addr,
353 app->pdu->len);
354
355 skb_queue_tail(&app->queue, app->pdu);
356 app->pdu = NULL;
357}
358
359static void mrp_queue_xmit(struct mrp_applicant *app)
360{
361 struct sk_buff *skb;
362
363 while ((skb = skb_dequeue(&app->queue)))
364 dev_queue_xmit(skb);
365}
366
367static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
368 u8 attrtype, u8 attrlen)
369{
370 struct mrp_msg_hdr *mh;
371
372 if (mrp_cb(app->pdu)->mh) {
373 if (mrp_pdu_append_end_mark(app) < 0)
374 return -1;
375 mrp_cb(app->pdu)->mh = NULL;
376 mrp_cb(app->pdu)->vah = NULL;
377 }
378
379 if (skb_tailroom(app->pdu) < sizeof(*mh))
380 return -1;
381 mh = __skb_put(app->pdu, sizeof(*mh));
382 mh->attrtype = attrtype;
383 mh->attrlen = attrlen;
384 mrp_cb(app->pdu)->mh = mh;
385 return 0;
386}
387
388static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
389 const void *firstattrvalue, u8 attrlen)
390{
391 struct mrp_vecattr_hdr *vah;
392
393 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
394 return -1;
395 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
396 put_unaligned(0, &vah->lenflags);
397 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
398 mrp_cb(app->pdu)->vah = vah;
399 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
400 return 0;
401}
402
403static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
404 const struct mrp_attr *attr,
405 enum mrp_vecattr_event vaevent)
406{
407 u16 len, pos;
408 u8 *vaevents;
409 int err;
410again:
411 if (!app->pdu) {
412 err = mrp_pdu_init(app);
413 if (err < 0)
414 return err;
415 }
416
417 /* If there is no Message header in the PDU, or the Message header is
418 * for a different attribute type, add an EndMark (if necessary) and a
419 * new Message header to the PDU.
420 */
421 if (!mrp_cb(app->pdu)->mh ||
422 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
423 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
424 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
425 goto queue;
426 }
427
428 /* If there is no VectorAttribute header for this Message in the PDU,
429 * or this attribute's value does not sequentially follow the previous
430 * attribute's value, add a new VectorAttribute header to the PDU.
431 */
432 if (!mrp_cb(app->pdu)->vah ||
433 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
434 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
435 goto queue;
436 }
437
438 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
439 pos = len % 3;
440
441 /* Events are packed into Vectors in the PDU, three to a byte. Add a
442 * byte to the end of the Vector if necessary.
443 */
444 if (!pos) {
445 if (skb_tailroom(app->pdu) < sizeof(u8))
446 goto queue;
447 vaevents = __skb_put(app->pdu, sizeof(u8));
448 } else {
449 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
450 }
451
452 switch (pos) {
453 case 0:
454 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
455 __MRP_VECATTR_EVENT_MAX);
456 break;
457 case 1:
458 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
459 break;
460 case 2:
461 *vaevents += vaevent;
462 break;
463 default:
464 WARN_ON(1);
465 }
466
467 /* Increment the length of the VectorAttribute in the PDU, as well as
468 * the value of the next attribute that would continue its Vector.
469 */
470 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
471 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
472
473 return 0;
474
475queue:
476 mrp_pdu_queue(app);
477 goto again;
478}
479
480static void mrp_attr_event(struct mrp_applicant *app,
481 struct mrp_attr *attr, enum mrp_event event)
482{
483 enum mrp_applicant_state state;
484
485 state = mrp_applicant_state_table[attr->state][event];
486 if (state == MRP_APPLICANT_INVALID) {
487 WARN_ON(1);
488 return;
489 }
490
491 if (event == MRP_EVENT_TX) {
492 /* When appending the attribute fails, don't update its state
493 * in order to retry at the next TX event.
494 */
495
496 switch (mrp_tx_action_table[attr->state]) {
497 case MRP_TX_ACTION_NONE:
498 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
499 case MRP_TX_ACTION_S_IN_OPTIONAL:
500 break;
501 case MRP_TX_ACTION_S_NEW:
502 if (mrp_pdu_append_vecattr_event(
503 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
504 return;
505 break;
506 case MRP_TX_ACTION_S_JOIN_IN:
507 if (mrp_pdu_append_vecattr_event(
508 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
509 return;
510 break;
511 case MRP_TX_ACTION_S_LV:
512 if (mrp_pdu_append_vecattr_event(
513 app, attr, MRP_VECATTR_EVENT_LV) < 0)
514 return;
515 /* As a pure applicant, sending a leave message
516 * implies that the attribute was unregistered and
517 * can be destroyed.
518 */
519 mrp_attr_destroy(app, attr);
520 return;
521 default:
522 WARN_ON(1);
523 }
524 }
525
526 attr->state = state;
527}
528
529int mrp_request_join(const struct net_device *dev,
530 const struct mrp_application *appl,
531 const void *value, u8 len, u8 type)
532{
533 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
534 struct mrp_applicant *app = rtnl_dereference(
535 port->applicants[appl->type]);
536 struct mrp_attr *attr;
537
538 if (sizeof(struct mrp_skb_cb) + len >
539 sizeof_field(struct sk_buff, cb))
540 return -ENOMEM;
541
542 spin_lock_bh(&app->lock);
543 attr = mrp_attr_create(app, value, len, type);
544 if (!attr) {
545 spin_unlock_bh(&app->lock);
546 return -ENOMEM;
547 }
548 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
549 spin_unlock_bh(&app->lock);
550 return 0;
551}
552EXPORT_SYMBOL_GPL(mrp_request_join);
553
554void mrp_request_leave(const struct net_device *dev,
555 const struct mrp_application *appl,
556 const void *value, u8 len, u8 type)
557{
558 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
559 struct mrp_applicant *app = rtnl_dereference(
560 port->applicants[appl->type]);
561 struct mrp_attr *attr;
562
563 if (sizeof(struct mrp_skb_cb) + len >
564 sizeof_field(struct sk_buff, cb))
565 return;
566
567 spin_lock_bh(&app->lock);
568 attr = mrp_attr_lookup(app, value, len, type);
569 if (!attr) {
570 spin_unlock_bh(&app->lock);
571 return;
572 }
573 mrp_attr_event(app, attr, MRP_EVENT_LV);
574 spin_unlock_bh(&app->lock);
575}
576EXPORT_SYMBOL_GPL(mrp_request_leave);
577
578static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
579{
580 struct rb_node *node, *next;
581 struct mrp_attr *attr;
582
583 for (node = rb_first(&app->mad);
584 next = node ? rb_next(node) : NULL, node != NULL;
585 node = next) {
586 attr = rb_entry(node, struct mrp_attr, node);
587 mrp_attr_event(app, attr, event);
588 }
589}
590
591static void mrp_join_timer_arm(struct mrp_applicant *app)
592{
593 unsigned long delay;
594
595 delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
596 mod_timer(&app->join_timer, jiffies + delay);
597}
598
599static void mrp_join_timer(struct timer_list *t)
600{
601 struct mrp_applicant *app = from_timer(app, t, join_timer);
602
603 spin_lock(&app->lock);
604 mrp_mad_event(app, MRP_EVENT_TX);
605 mrp_pdu_queue(app);
606 spin_unlock(&app->lock);
607
608 mrp_queue_xmit(app);
609 spin_lock(&app->lock);
610 if (likely(app->active))
611 mrp_join_timer_arm(app);
612 spin_unlock(&app->lock);
613}
614
615static void mrp_periodic_timer_arm(struct mrp_applicant *app)
616{
617 mod_timer(&app->periodic_timer,
618 jiffies + msecs_to_jiffies(mrp_periodic_time));
619}
620
621static void mrp_periodic_timer(struct timer_list *t)
622{
623 struct mrp_applicant *app = from_timer(app, t, periodic_timer);
624
625 spin_lock(&app->lock);
626 if (likely(app->active)) {
627 mrp_mad_event(app, MRP_EVENT_PERIODIC);
628 mrp_pdu_queue(app);
629 mrp_periodic_timer_arm(app);
630 }
631 spin_unlock(&app->lock);
632}
633
634static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
635{
636 __be16 endmark;
637
638 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
639 return -1;
640 if (endmark == MRP_END_MARK) {
641 *offset += sizeof(endmark);
642 return -1;
643 }
644 return 0;
645}
646
647static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
648 struct sk_buff *skb,
649 enum mrp_vecattr_event vaevent)
650{
651 struct mrp_attr *attr;
652 enum mrp_event event;
653
654 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
655 mrp_cb(skb)->mh->attrlen,
656 mrp_cb(skb)->mh->attrtype);
657 if (attr == NULL)
658 return;
659
660 switch (vaevent) {
661 case MRP_VECATTR_EVENT_NEW:
662 event = MRP_EVENT_R_NEW;
663 break;
664 case MRP_VECATTR_EVENT_JOIN_IN:
665 event = MRP_EVENT_R_JOIN_IN;
666 break;
667 case MRP_VECATTR_EVENT_IN:
668 event = MRP_EVENT_R_IN;
669 break;
670 case MRP_VECATTR_EVENT_JOIN_MT:
671 event = MRP_EVENT_R_JOIN_MT;
672 break;
673 case MRP_VECATTR_EVENT_MT:
674 event = MRP_EVENT_R_MT;
675 break;
676 case MRP_VECATTR_EVENT_LV:
677 event = MRP_EVENT_R_LV;
678 break;
679 default:
680 return;
681 }
682
683 mrp_attr_event(app, attr, event);
684}
685
686static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
687 struct sk_buff *skb, int *offset)
688{
689 struct mrp_vecattr_hdr _vah;
690 u16 valen;
691 u8 vaevents, vaevent;
692
693 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
694 &_vah);
695 if (!mrp_cb(skb)->vah)
696 return -1;
697 *offset += sizeof(_vah);
698
699 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
700 MRP_VECATTR_HDR_FLAG_LA)
701 mrp_mad_event(app, MRP_EVENT_R_LA);
702 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
703 MRP_VECATTR_HDR_LEN_MASK);
704
705 /* The VectorAttribute structure in a PDU carries event information
706 * about one or more attributes having consecutive values. Only the
707 * value for the first attribute is contained in the structure. So
708 * we make a copy of that value, and then increment it each time we
709 * advance to the next event in its Vector.
710 */
711 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
712 sizeof_field(struct sk_buff, cb))
713 return -1;
714 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
715 mrp_cb(skb)->mh->attrlen) < 0)
716 return -1;
717 *offset += mrp_cb(skb)->mh->attrlen;
718
719 /* In a VectorAttribute, the Vector contains events which are packed
720 * three to a byte. We process one byte of the Vector at a time.
721 */
722 while (valen > 0) {
723 if (skb_copy_bits(skb, *offset, &vaevents,
724 sizeof(vaevents)) < 0)
725 return -1;
726 *offset += sizeof(vaevents);
727
728 /* Extract and process the first event. */
729 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
730 __MRP_VECATTR_EVENT_MAX);
731 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
732 /* The byte is malformed; stop processing. */
733 return -1;
734 }
735 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
736
737 /* If present, extract and process the second event. */
738 if (!--valen)
739 break;
740 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
741 mrp_cb(skb)->mh->attrlen);
742 vaevents %= (__MRP_VECATTR_EVENT_MAX *
743 __MRP_VECATTR_EVENT_MAX);
744 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
745 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
746
747 /* If present, extract and process the third event. */
748 if (!--valen)
749 break;
750 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
751 mrp_cb(skb)->mh->attrlen);
752 vaevents %= __MRP_VECATTR_EVENT_MAX;
753 vaevent = vaevents;
754 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
755 }
756 return 0;
757}
758
759static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
760 int *offset)
761{
762 struct mrp_msg_hdr _mh;
763
764 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
765 if (!mrp_cb(skb)->mh)
766 return -1;
767 *offset += sizeof(_mh);
768
769 if (mrp_cb(skb)->mh->attrtype == 0 ||
770 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
771 mrp_cb(skb)->mh->attrlen == 0)
772 return -1;
773
774 while (skb->len > *offset) {
775 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
776 break;
777 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
778 return -1;
779 }
780 return 0;
781}
782
783static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
784 struct packet_type *pt, struct net_device *orig_dev)
785{
786 struct mrp_application *appl = container_of(pt, struct mrp_application,
787 pkttype);
788 struct mrp_port *port;
789 struct mrp_applicant *app;
790 struct mrp_pdu_hdr _ph;
791 const struct mrp_pdu_hdr *ph;
792 int offset = skb_network_offset(skb);
793
794 /* If the interface is in promiscuous mode, drop the packet if
795 * it was unicast to another host.
796 */
797 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
798 goto out;
799 skb = skb_share_check(skb, GFP_ATOMIC);
800 if (unlikely(!skb))
801 goto out;
802 port = rcu_dereference(dev->mrp_port);
803 if (unlikely(!port))
804 goto out;
805 app = rcu_dereference(port->applicants[appl->type]);
806 if (unlikely(!app))
807 goto out;
808
809 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
810 if (!ph)
811 goto out;
812 offset += sizeof(_ph);
813
814 if (ph->version != app->app->version)
815 goto out;
816
817 spin_lock(&app->lock);
818 while (skb->len > offset) {
819 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
820 break;
821 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
822 break;
823 }
824 spin_unlock(&app->lock);
825out:
826 kfree_skb(skb);
827 return 0;
828}
829
830static int mrp_init_port(struct net_device *dev)
831{
832 struct mrp_port *port;
833
834 port = kzalloc(sizeof(*port), GFP_KERNEL);
835 if (!port)
836 return -ENOMEM;
837 rcu_assign_pointer(dev->mrp_port, port);
838 return 0;
839}
840
841static void mrp_release_port(struct net_device *dev)
842{
843 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
844 unsigned int i;
845
846 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
847 if (rtnl_dereference(port->applicants[i]))
848 return;
849 }
850 RCU_INIT_POINTER(dev->mrp_port, NULL);
851 kfree_rcu(port, rcu);
852}
853
854int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
855{
856 struct mrp_applicant *app;
857 int err;
858
859 ASSERT_RTNL();
860
861 if (!rtnl_dereference(dev->mrp_port)) {
862 err = mrp_init_port(dev);
863 if (err < 0)
864 goto err1;
865 }
866
867 err = -ENOMEM;
868 app = kzalloc(sizeof(*app), GFP_KERNEL);
869 if (!app)
870 goto err2;
871
872 err = dev_mc_add(dev, appl->group_address);
873 if (err < 0)
874 goto err3;
875
876 app->dev = dev;
877 app->app = appl;
878 app->mad = RB_ROOT;
879 app->active = true;
880 spin_lock_init(&app->lock);
881 skb_queue_head_init(&app->queue);
882 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
883 timer_setup(&app->join_timer, mrp_join_timer, 0);
884 mrp_join_timer_arm(app);
885 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
886 mrp_periodic_timer_arm(app);
887 return 0;
888
889err3:
890 kfree(app);
891err2:
892 mrp_release_port(dev);
893err1:
894 return err;
895}
896EXPORT_SYMBOL_GPL(mrp_init_applicant);
897
898void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
899{
900 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
901 struct mrp_applicant *app = rtnl_dereference(
902 port->applicants[appl->type]);
903
904 ASSERT_RTNL();
905
906 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
907
908 spin_lock_bh(&app->lock);
909 app->active = false;
910 spin_unlock_bh(&app->lock);
911 /* Delete timer and generate a final TX event to flush out
912 * all pending messages before the applicant is gone.
913 */
914 timer_shutdown_sync(&app->join_timer);
915 timer_shutdown_sync(&app->periodic_timer);
916
917 spin_lock_bh(&app->lock);
918 mrp_mad_event(app, MRP_EVENT_TX);
919 mrp_attr_destroy_all(app);
920 mrp_pdu_queue(app);
921 spin_unlock_bh(&app->lock);
922
923 mrp_queue_xmit(app);
924
925 dev_mc_del(dev, appl->group_address);
926 kfree_rcu(app, rcu);
927 mrp_release_port(dev);
928}
929EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
930
931int mrp_register_application(struct mrp_application *appl)
932{
933 appl->pkttype.func = mrp_rcv;
934 dev_add_pack(&appl->pkttype);
935 return 0;
936}
937EXPORT_SYMBOL_GPL(mrp_register_application);
938
939void mrp_unregister_application(struct mrp_application *appl)
940{
941 dev_remove_pack(&appl->pkttype);
942}
943EXPORT_SYMBOL_GPL(mrp_unregister_application);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IEEE 802.1Q Multiple Registration Protocol (MRP)
4 *
5 * Copyright (c) 2012 Massachusetts Institute of Technology
6 *
7 * Adapted from code in net/802/garp.c
8 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
9 */
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <net/mrp.h>
19#include <linux/unaligned.h>
20
21static unsigned int mrp_join_time __read_mostly = 200;
22module_param(mrp_join_time, uint, 0644);
23MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
24
25static unsigned int mrp_periodic_time __read_mostly = 1000;
26module_param(mrp_periodic_time, uint, 0644);
27MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
28
29MODULE_DESCRIPTION("IEEE 802.1Q Multiple Registration Protocol (MRP)");
30MODULE_LICENSE("GPL");
31
32static const u8
33mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
34 [MRP_APPLICANT_VO] = {
35 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
36 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
37 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
38 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
40 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
41 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
43 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
44 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
45 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
46 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
47 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
48 },
49 [MRP_APPLICANT_VP] = {
50 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
51 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
52 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
53 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
54 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
55 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
56 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
58 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
59 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
60 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
61 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
62 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
63 },
64 [MRP_APPLICANT_VN] = {
65 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
66 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
67 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
68 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
69 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
73 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
74 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
75 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
76 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
77 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
78 },
79 [MRP_APPLICANT_AN] = {
80 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
81 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
82 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
83 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
84 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
87 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
88 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
89 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
90 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
91 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
92 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
93 },
94 [MRP_APPLICANT_AA] = {
95 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
96 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
97 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
98 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
99 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
100 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
101 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
102 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
103 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
104 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
105 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
106 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
107 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
108 },
109 [MRP_APPLICANT_QA] = {
110 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
111 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
112 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
113 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
115 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
116 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
117 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
118 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
119 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
120 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
121 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
122 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
123 },
124 [MRP_APPLICANT_LA] = {
125 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
126 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
127 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
128 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
129 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
133 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
134 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
135 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
136 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
137 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
138 },
139 [MRP_APPLICANT_AO] = {
140 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
141 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
142 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
143 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
145 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
146 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
147 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
148 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
149 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
150 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
151 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
152 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
153 },
154 [MRP_APPLICANT_QO] = {
155 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
156 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
157 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
158 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
160 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
161 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
162 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
163 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
164 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
165 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
166 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
167 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
168 },
169 [MRP_APPLICANT_AP] = {
170 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
171 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
172 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
173 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
174 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
175 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
176 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
177 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
178 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
179 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
180 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
181 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
182 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
183 },
184 [MRP_APPLICANT_QP] = {
185 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
186 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
187 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
188 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
190 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
191 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
192 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
193 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
194 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
195 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
196 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
197 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
198 },
199};
200
201static const u8
202mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
203 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
204 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
205 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
206 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
207 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
208 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
209 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
210 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
211 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
212 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
213 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
214};
215
216static void mrp_attrvalue_inc(void *value, u8 len)
217{
218 u8 *v = (u8 *)value;
219
220 /* Add 1 to the last byte. If it becomes zero,
221 * go to the previous byte and repeat.
222 */
223 while (len > 0 && !++v[--len])
224 ;
225}
226
227static int mrp_attr_cmp(const struct mrp_attr *attr,
228 const void *value, u8 len, u8 type)
229{
230 if (attr->type != type)
231 return attr->type - type;
232 if (attr->len != len)
233 return attr->len - len;
234 return memcmp(attr->value, value, len);
235}
236
237static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
238 const void *value, u8 len, u8 type)
239{
240 struct rb_node *parent = app->mad.rb_node;
241 struct mrp_attr *attr;
242 int d;
243
244 while (parent) {
245 attr = rb_entry(parent, struct mrp_attr, node);
246 d = mrp_attr_cmp(attr, value, len, type);
247 if (d > 0)
248 parent = parent->rb_left;
249 else if (d < 0)
250 parent = parent->rb_right;
251 else
252 return attr;
253 }
254 return NULL;
255}
256
257static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
258 const void *value, u8 len, u8 type)
259{
260 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
261 struct mrp_attr *attr;
262 int d;
263
264 while (*p) {
265 parent = *p;
266 attr = rb_entry(parent, struct mrp_attr, node);
267 d = mrp_attr_cmp(attr, value, len, type);
268 if (d > 0)
269 p = &parent->rb_left;
270 else if (d < 0)
271 p = &parent->rb_right;
272 else {
273 /* The attribute already exists; re-use it. */
274 return attr;
275 }
276 }
277 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
278 if (!attr)
279 return attr;
280 attr->state = MRP_APPLICANT_VO;
281 attr->type = type;
282 attr->len = len;
283 memcpy(attr->value, value, len);
284
285 rb_link_node(&attr->node, parent, p);
286 rb_insert_color(&attr->node, &app->mad);
287 return attr;
288}
289
290static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
291{
292 rb_erase(&attr->node, &app->mad);
293 kfree(attr);
294}
295
296static void mrp_attr_destroy_all(struct mrp_applicant *app)
297{
298 struct rb_node *node, *next;
299 struct mrp_attr *attr;
300
301 for (node = rb_first(&app->mad);
302 next = node ? rb_next(node) : NULL, node != NULL;
303 node = next) {
304 attr = rb_entry(node, struct mrp_attr, node);
305 mrp_attr_destroy(app, attr);
306 }
307}
308
309static int mrp_pdu_init(struct mrp_applicant *app)
310{
311 struct sk_buff *skb;
312 struct mrp_pdu_hdr *ph;
313
314 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
315 GFP_ATOMIC);
316 if (!skb)
317 return -ENOMEM;
318
319 skb->dev = app->dev;
320 skb->protocol = app->app->pkttype.type;
321 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
322 skb_reset_network_header(skb);
323 skb_reset_transport_header(skb);
324
325 ph = __skb_put(skb, sizeof(*ph));
326 ph->version = app->app->version;
327
328 app->pdu = skb;
329 return 0;
330}
331
332static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
333{
334 __be16 *endmark;
335
336 if (skb_tailroom(app->pdu) < sizeof(*endmark))
337 return -1;
338 endmark = __skb_put(app->pdu, sizeof(*endmark));
339 put_unaligned(MRP_END_MARK, endmark);
340 return 0;
341}
342
343static void mrp_pdu_queue(struct mrp_applicant *app)
344{
345 if (!app->pdu)
346 return;
347
348 if (mrp_cb(app->pdu)->mh)
349 mrp_pdu_append_end_mark(app);
350 mrp_pdu_append_end_mark(app);
351
352 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
353 app->app->group_address, app->dev->dev_addr,
354 app->pdu->len);
355
356 skb_queue_tail(&app->queue, app->pdu);
357 app->pdu = NULL;
358}
359
360static void mrp_queue_xmit(struct mrp_applicant *app)
361{
362 struct sk_buff *skb;
363
364 while ((skb = skb_dequeue(&app->queue)))
365 dev_queue_xmit(skb);
366}
367
368static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
369 u8 attrtype, u8 attrlen)
370{
371 struct mrp_msg_hdr *mh;
372
373 if (mrp_cb(app->pdu)->mh) {
374 if (mrp_pdu_append_end_mark(app) < 0)
375 return -1;
376 mrp_cb(app->pdu)->mh = NULL;
377 mrp_cb(app->pdu)->vah = NULL;
378 }
379
380 if (skb_tailroom(app->pdu) < sizeof(*mh))
381 return -1;
382 mh = __skb_put(app->pdu, sizeof(*mh));
383 mh->attrtype = attrtype;
384 mh->attrlen = attrlen;
385 mrp_cb(app->pdu)->mh = mh;
386 return 0;
387}
388
389static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
390 const void *firstattrvalue, u8 attrlen)
391{
392 struct mrp_vecattr_hdr *vah;
393
394 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
395 return -1;
396 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
397 put_unaligned(0, &vah->lenflags);
398 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
399 mrp_cb(app->pdu)->vah = vah;
400 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
401 return 0;
402}
403
404static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
405 const struct mrp_attr *attr,
406 enum mrp_vecattr_event vaevent)
407{
408 u16 len, pos;
409 u8 *vaevents;
410 int err;
411again:
412 if (!app->pdu) {
413 err = mrp_pdu_init(app);
414 if (err < 0)
415 return err;
416 }
417
418 /* If there is no Message header in the PDU, or the Message header is
419 * for a different attribute type, add an EndMark (if necessary) and a
420 * new Message header to the PDU.
421 */
422 if (!mrp_cb(app->pdu)->mh ||
423 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
424 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
425 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
426 goto queue;
427 }
428
429 /* If there is no VectorAttribute header for this Message in the PDU,
430 * or this attribute's value does not sequentially follow the previous
431 * attribute's value, add a new VectorAttribute header to the PDU.
432 */
433 if (!mrp_cb(app->pdu)->vah ||
434 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
435 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
436 goto queue;
437 }
438
439 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
440 pos = len % 3;
441
442 /* Events are packed into Vectors in the PDU, three to a byte. Add a
443 * byte to the end of the Vector if necessary.
444 */
445 if (!pos) {
446 if (skb_tailroom(app->pdu) < sizeof(u8))
447 goto queue;
448 vaevents = __skb_put(app->pdu, sizeof(u8));
449 } else {
450 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
451 }
452
453 switch (pos) {
454 case 0:
455 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
456 __MRP_VECATTR_EVENT_MAX);
457 break;
458 case 1:
459 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
460 break;
461 case 2:
462 *vaevents += vaevent;
463 break;
464 default:
465 WARN_ON(1);
466 }
467
468 /* Increment the length of the VectorAttribute in the PDU, as well as
469 * the value of the next attribute that would continue its Vector.
470 */
471 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
472 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
473
474 return 0;
475
476queue:
477 mrp_pdu_queue(app);
478 goto again;
479}
480
481static void mrp_attr_event(struct mrp_applicant *app,
482 struct mrp_attr *attr, enum mrp_event event)
483{
484 enum mrp_applicant_state state;
485
486 state = mrp_applicant_state_table[attr->state][event];
487 if (state == MRP_APPLICANT_INVALID) {
488 WARN_ON(1);
489 return;
490 }
491
492 if (event == MRP_EVENT_TX) {
493 /* When appending the attribute fails, don't update its state
494 * in order to retry at the next TX event.
495 */
496
497 switch (mrp_tx_action_table[attr->state]) {
498 case MRP_TX_ACTION_NONE:
499 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
500 case MRP_TX_ACTION_S_IN_OPTIONAL:
501 break;
502 case MRP_TX_ACTION_S_NEW:
503 if (mrp_pdu_append_vecattr_event(
504 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
505 return;
506 break;
507 case MRP_TX_ACTION_S_JOIN_IN:
508 if (mrp_pdu_append_vecattr_event(
509 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
510 return;
511 break;
512 case MRP_TX_ACTION_S_LV:
513 if (mrp_pdu_append_vecattr_event(
514 app, attr, MRP_VECATTR_EVENT_LV) < 0)
515 return;
516 /* As a pure applicant, sending a leave message
517 * implies that the attribute was unregistered and
518 * can be destroyed.
519 */
520 mrp_attr_destroy(app, attr);
521 return;
522 default:
523 WARN_ON(1);
524 }
525 }
526
527 attr->state = state;
528}
529
530int mrp_request_join(const struct net_device *dev,
531 const struct mrp_application *appl,
532 const void *value, u8 len, u8 type)
533{
534 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
535 struct mrp_applicant *app = rtnl_dereference(
536 port->applicants[appl->type]);
537 struct mrp_attr *attr;
538
539 if (sizeof(struct mrp_skb_cb) + len >
540 sizeof_field(struct sk_buff, cb))
541 return -ENOMEM;
542
543 spin_lock_bh(&app->lock);
544 attr = mrp_attr_create(app, value, len, type);
545 if (!attr) {
546 spin_unlock_bh(&app->lock);
547 return -ENOMEM;
548 }
549 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
550 spin_unlock_bh(&app->lock);
551 return 0;
552}
553EXPORT_SYMBOL_GPL(mrp_request_join);
554
555void mrp_request_leave(const struct net_device *dev,
556 const struct mrp_application *appl,
557 const void *value, u8 len, u8 type)
558{
559 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
560 struct mrp_applicant *app = rtnl_dereference(
561 port->applicants[appl->type]);
562 struct mrp_attr *attr;
563
564 if (sizeof(struct mrp_skb_cb) + len >
565 sizeof_field(struct sk_buff, cb))
566 return;
567
568 spin_lock_bh(&app->lock);
569 attr = mrp_attr_lookup(app, value, len, type);
570 if (!attr) {
571 spin_unlock_bh(&app->lock);
572 return;
573 }
574 mrp_attr_event(app, attr, MRP_EVENT_LV);
575 spin_unlock_bh(&app->lock);
576}
577EXPORT_SYMBOL_GPL(mrp_request_leave);
578
579static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
580{
581 struct rb_node *node, *next;
582 struct mrp_attr *attr;
583
584 for (node = rb_first(&app->mad);
585 next = node ? rb_next(node) : NULL, node != NULL;
586 node = next) {
587 attr = rb_entry(node, struct mrp_attr, node);
588 mrp_attr_event(app, attr, event);
589 }
590}
591
592static void mrp_join_timer_arm(struct mrp_applicant *app)
593{
594 unsigned long delay;
595
596 delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
597 mod_timer(&app->join_timer, jiffies + delay);
598}
599
600static void mrp_join_timer(struct timer_list *t)
601{
602 struct mrp_applicant *app = from_timer(app, t, join_timer);
603
604 spin_lock(&app->lock);
605 mrp_mad_event(app, MRP_EVENT_TX);
606 mrp_pdu_queue(app);
607 spin_unlock(&app->lock);
608
609 mrp_queue_xmit(app);
610 spin_lock(&app->lock);
611 if (likely(app->active))
612 mrp_join_timer_arm(app);
613 spin_unlock(&app->lock);
614}
615
616static void mrp_periodic_timer_arm(struct mrp_applicant *app)
617{
618 mod_timer(&app->periodic_timer,
619 jiffies + msecs_to_jiffies(mrp_periodic_time));
620}
621
622static void mrp_periodic_timer(struct timer_list *t)
623{
624 struct mrp_applicant *app = from_timer(app, t, periodic_timer);
625
626 spin_lock(&app->lock);
627 if (likely(app->active)) {
628 mrp_mad_event(app, MRP_EVENT_PERIODIC);
629 mrp_pdu_queue(app);
630 mrp_periodic_timer_arm(app);
631 }
632 spin_unlock(&app->lock);
633}
634
635static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
636{
637 __be16 endmark;
638
639 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
640 return -1;
641 if (endmark == MRP_END_MARK) {
642 *offset += sizeof(endmark);
643 return -1;
644 }
645 return 0;
646}
647
648static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
649 struct sk_buff *skb,
650 enum mrp_vecattr_event vaevent)
651{
652 struct mrp_attr *attr;
653 enum mrp_event event;
654
655 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
656 mrp_cb(skb)->mh->attrlen,
657 mrp_cb(skb)->mh->attrtype);
658 if (attr == NULL)
659 return;
660
661 switch (vaevent) {
662 case MRP_VECATTR_EVENT_NEW:
663 event = MRP_EVENT_R_NEW;
664 break;
665 case MRP_VECATTR_EVENT_JOIN_IN:
666 event = MRP_EVENT_R_JOIN_IN;
667 break;
668 case MRP_VECATTR_EVENT_IN:
669 event = MRP_EVENT_R_IN;
670 break;
671 case MRP_VECATTR_EVENT_JOIN_MT:
672 event = MRP_EVENT_R_JOIN_MT;
673 break;
674 case MRP_VECATTR_EVENT_MT:
675 event = MRP_EVENT_R_MT;
676 break;
677 case MRP_VECATTR_EVENT_LV:
678 event = MRP_EVENT_R_LV;
679 break;
680 default:
681 return;
682 }
683
684 mrp_attr_event(app, attr, event);
685}
686
687static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
688 struct sk_buff *skb, int *offset)
689{
690 struct mrp_vecattr_hdr _vah;
691 u16 valen;
692 u8 vaevents, vaevent;
693
694 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
695 &_vah);
696 if (!mrp_cb(skb)->vah)
697 return -1;
698 *offset += sizeof(_vah);
699
700 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
701 MRP_VECATTR_HDR_FLAG_LA)
702 mrp_mad_event(app, MRP_EVENT_R_LA);
703 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
704 MRP_VECATTR_HDR_LEN_MASK);
705
706 /* The VectorAttribute structure in a PDU carries event information
707 * about one or more attributes having consecutive values. Only the
708 * value for the first attribute is contained in the structure. So
709 * we make a copy of that value, and then increment it each time we
710 * advance to the next event in its Vector.
711 */
712 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
713 sizeof_field(struct sk_buff, cb))
714 return -1;
715 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
716 mrp_cb(skb)->mh->attrlen) < 0)
717 return -1;
718 *offset += mrp_cb(skb)->mh->attrlen;
719
720 /* In a VectorAttribute, the Vector contains events which are packed
721 * three to a byte. We process one byte of the Vector at a time.
722 */
723 while (valen > 0) {
724 if (skb_copy_bits(skb, *offset, &vaevents,
725 sizeof(vaevents)) < 0)
726 return -1;
727 *offset += sizeof(vaevents);
728
729 /* Extract and process the first event. */
730 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
731 __MRP_VECATTR_EVENT_MAX);
732 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
733 /* The byte is malformed; stop processing. */
734 return -1;
735 }
736 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
737
738 /* If present, extract and process the second event. */
739 if (!--valen)
740 break;
741 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
742 mrp_cb(skb)->mh->attrlen);
743 vaevents %= (__MRP_VECATTR_EVENT_MAX *
744 __MRP_VECATTR_EVENT_MAX);
745 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
746 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
747
748 /* If present, extract and process the third event. */
749 if (!--valen)
750 break;
751 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
752 mrp_cb(skb)->mh->attrlen);
753 vaevents %= __MRP_VECATTR_EVENT_MAX;
754 vaevent = vaevents;
755 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
756 }
757 return 0;
758}
759
760static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
761 int *offset)
762{
763 struct mrp_msg_hdr _mh;
764
765 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
766 if (!mrp_cb(skb)->mh)
767 return -1;
768 *offset += sizeof(_mh);
769
770 if (mrp_cb(skb)->mh->attrtype == 0 ||
771 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
772 mrp_cb(skb)->mh->attrlen == 0)
773 return -1;
774
775 while (skb->len > *offset) {
776 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
777 break;
778 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
779 return -1;
780 }
781 return 0;
782}
783
784static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
785 struct packet_type *pt, struct net_device *orig_dev)
786{
787 struct mrp_application *appl = container_of(pt, struct mrp_application,
788 pkttype);
789 struct mrp_port *port;
790 struct mrp_applicant *app;
791 struct mrp_pdu_hdr _ph;
792 const struct mrp_pdu_hdr *ph;
793 int offset = skb_network_offset(skb);
794
795 /* If the interface is in promiscuous mode, drop the packet if
796 * it was unicast to another host.
797 */
798 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
799 goto out;
800 skb = skb_share_check(skb, GFP_ATOMIC);
801 if (unlikely(!skb))
802 goto out;
803 port = rcu_dereference(dev->mrp_port);
804 if (unlikely(!port))
805 goto out;
806 app = rcu_dereference(port->applicants[appl->type]);
807 if (unlikely(!app))
808 goto out;
809
810 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
811 if (!ph)
812 goto out;
813 offset += sizeof(_ph);
814
815 if (ph->version != app->app->version)
816 goto out;
817
818 spin_lock(&app->lock);
819 while (skb->len > offset) {
820 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
821 break;
822 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
823 break;
824 }
825 spin_unlock(&app->lock);
826out:
827 kfree_skb(skb);
828 return 0;
829}
830
831static int mrp_init_port(struct net_device *dev)
832{
833 struct mrp_port *port;
834
835 port = kzalloc(sizeof(*port), GFP_KERNEL);
836 if (!port)
837 return -ENOMEM;
838 rcu_assign_pointer(dev->mrp_port, port);
839 return 0;
840}
841
842static void mrp_release_port(struct net_device *dev)
843{
844 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
845 unsigned int i;
846
847 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
848 if (rtnl_dereference(port->applicants[i]))
849 return;
850 }
851 RCU_INIT_POINTER(dev->mrp_port, NULL);
852 kfree_rcu(port, rcu);
853}
854
855int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
856{
857 struct mrp_applicant *app;
858 int err;
859
860 ASSERT_RTNL();
861
862 if (!rtnl_dereference(dev->mrp_port)) {
863 err = mrp_init_port(dev);
864 if (err < 0)
865 goto err1;
866 }
867
868 err = -ENOMEM;
869 app = kzalloc(sizeof(*app), GFP_KERNEL);
870 if (!app)
871 goto err2;
872
873 err = dev_mc_add(dev, appl->group_address);
874 if (err < 0)
875 goto err3;
876
877 app->dev = dev;
878 app->app = appl;
879 app->mad = RB_ROOT;
880 app->active = true;
881 spin_lock_init(&app->lock);
882 skb_queue_head_init(&app->queue);
883 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
884 timer_setup(&app->join_timer, mrp_join_timer, 0);
885 mrp_join_timer_arm(app);
886 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
887 mrp_periodic_timer_arm(app);
888 return 0;
889
890err3:
891 kfree(app);
892err2:
893 mrp_release_port(dev);
894err1:
895 return err;
896}
897EXPORT_SYMBOL_GPL(mrp_init_applicant);
898
899void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
900{
901 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
902 struct mrp_applicant *app = rtnl_dereference(
903 port->applicants[appl->type]);
904
905 ASSERT_RTNL();
906
907 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
908
909 spin_lock_bh(&app->lock);
910 app->active = false;
911 spin_unlock_bh(&app->lock);
912 /* Delete timer and generate a final TX event to flush out
913 * all pending messages before the applicant is gone.
914 */
915 timer_shutdown_sync(&app->join_timer);
916 timer_shutdown_sync(&app->periodic_timer);
917
918 spin_lock_bh(&app->lock);
919 mrp_mad_event(app, MRP_EVENT_TX);
920 mrp_attr_destroy_all(app);
921 mrp_pdu_queue(app);
922 spin_unlock_bh(&app->lock);
923
924 mrp_queue_xmit(app);
925
926 dev_mc_del(dev, appl->group_address);
927 kfree_rcu(app, rcu);
928 mrp_release_port(dev);
929}
930EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
931
932int mrp_register_application(struct mrp_application *appl)
933{
934 appl->pkttype.func = mrp_rcv;
935 dev_add_pack(&appl->pkttype);
936 return 0;
937}
938EXPORT_SYMBOL_GPL(mrp_register_application);
939
940void mrp_unregister_application(struct mrp_application *appl)
941{
942 dev_remove_pack(&appl->pkttype);
943}
944EXPORT_SYMBOL_GPL(mrp_unregister_application);