Loading...
1/*
2 * IEEE 802.1Q Multiple Registration Protocol (MRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/802/garp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rtnetlink.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/mrp.h>
22#include <asm/unaligned.h>
23
24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27
28static unsigned int mrp_periodic_time __read_mostly = 1000;
29module_param(mrp_periodic_time, uint, 0644);
30MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
31
32MODULE_LICENSE("GPL");
33
34static const u8
35mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
36 [MRP_APPLICANT_VO] = {
37 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
38 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
39 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
40 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
43 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
44 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
45 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
46 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
47 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
48 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
49 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
50 },
51 [MRP_APPLICANT_VP] = {
52 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
53 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
54 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
55 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
56 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
58 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
59 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
60 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
61 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
62 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
63 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
64 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
65 },
66 [MRP_APPLICANT_VN] = {
67 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
68 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
69 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
70 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
71 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
73 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
74 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
75 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
76 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
77 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
78 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
79 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
80 },
81 [MRP_APPLICANT_AN] = {
82 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
83 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
84 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
85 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
86 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
87 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
88 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
89 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
90 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
91 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
92 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
93 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
94 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
95 },
96 [MRP_APPLICANT_AA] = {
97 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
98 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
99 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
100 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
101 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
102 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
103 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
104 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
105 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
106 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
107 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
108 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
109 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
110 },
111 [MRP_APPLICANT_QA] = {
112 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
113 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
114 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
115 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
116 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
117 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
118 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
119 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
120 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
121 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
122 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
123 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
124 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
125 },
126 [MRP_APPLICANT_LA] = {
127 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
128 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
129 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
130 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
131 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
133 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
134 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
135 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
136 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
137 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
138 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
139 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
140 },
141 [MRP_APPLICANT_AO] = {
142 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
143 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
144 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
145 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
147 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
148 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
149 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
150 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
151 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
152 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
153 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
154 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
155 },
156 [MRP_APPLICANT_QO] = {
157 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
158 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
159 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
160 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
161 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
162 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
163 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
164 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
165 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
166 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
167 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
168 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
169 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
170 },
171 [MRP_APPLICANT_AP] = {
172 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
173 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
174 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
175 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
176 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
177 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
178 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
179 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
180 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
181 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
182 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
183 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
184 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
185 },
186 [MRP_APPLICANT_QP] = {
187 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
188 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
189 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
190 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
191 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
192 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
193 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
194 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
195 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
196 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
197 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
198 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
199 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
200 },
201};
202
203static const u8
204mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
205 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
206 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
207 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
208 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
209 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
210 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
211 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
212 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
213 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
214 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
215 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
216};
217
218static void mrp_attrvalue_inc(void *value, u8 len)
219{
220 u8 *v = (u8 *)value;
221
222 /* Add 1 to the last byte. If it becomes zero,
223 * go to the previous byte and repeat.
224 */
225 while (len > 0 && !++v[--len])
226 ;
227}
228
229static int mrp_attr_cmp(const struct mrp_attr *attr,
230 const void *value, u8 len, u8 type)
231{
232 if (attr->type != type)
233 return attr->type - type;
234 if (attr->len != len)
235 return attr->len - len;
236 return memcmp(attr->value, value, len);
237}
238
239static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
240 const void *value, u8 len, u8 type)
241{
242 struct rb_node *parent = app->mad.rb_node;
243 struct mrp_attr *attr;
244 int d;
245
246 while (parent) {
247 attr = rb_entry(parent, struct mrp_attr, node);
248 d = mrp_attr_cmp(attr, value, len, type);
249 if (d > 0)
250 parent = parent->rb_left;
251 else if (d < 0)
252 parent = parent->rb_right;
253 else
254 return attr;
255 }
256 return NULL;
257}
258
259static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
260 const void *value, u8 len, u8 type)
261{
262 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
263 struct mrp_attr *attr;
264 int d;
265
266 while (*p) {
267 parent = *p;
268 attr = rb_entry(parent, struct mrp_attr, node);
269 d = mrp_attr_cmp(attr, value, len, type);
270 if (d > 0)
271 p = &parent->rb_left;
272 else if (d < 0)
273 p = &parent->rb_right;
274 else {
275 /* The attribute already exists; re-use it. */
276 return attr;
277 }
278 }
279 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
280 if (!attr)
281 return attr;
282 attr->state = MRP_APPLICANT_VO;
283 attr->type = type;
284 attr->len = len;
285 memcpy(attr->value, value, len);
286
287 rb_link_node(&attr->node, parent, p);
288 rb_insert_color(&attr->node, &app->mad);
289 return attr;
290}
291
292static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
293{
294 rb_erase(&attr->node, &app->mad);
295 kfree(attr);
296}
297
298static int mrp_pdu_init(struct mrp_applicant *app)
299{
300 struct sk_buff *skb;
301 struct mrp_pdu_hdr *ph;
302
303 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
304 GFP_ATOMIC);
305 if (!skb)
306 return -ENOMEM;
307
308 skb->dev = app->dev;
309 skb->protocol = app->app->pkttype.type;
310 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
311 skb_reset_network_header(skb);
312 skb_reset_transport_header(skb);
313
314 ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
315 ph->version = app->app->version;
316
317 app->pdu = skb;
318 return 0;
319}
320
321static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
322{
323 __be16 *endmark;
324
325 if (skb_tailroom(app->pdu) < sizeof(*endmark))
326 return -1;
327 endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
328 put_unaligned(MRP_END_MARK, endmark);
329 return 0;
330}
331
332static void mrp_pdu_queue(struct mrp_applicant *app)
333{
334 if (!app->pdu)
335 return;
336
337 if (mrp_cb(app->pdu)->mh)
338 mrp_pdu_append_end_mark(app);
339 mrp_pdu_append_end_mark(app);
340
341 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
342 app->app->group_address, app->dev->dev_addr,
343 app->pdu->len);
344
345 skb_queue_tail(&app->queue, app->pdu);
346 app->pdu = NULL;
347}
348
349static void mrp_queue_xmit(struct mrp_applicant *app)
350{
351 struct sk_buff *skb;
352
353 while ((skb = skb_dequeue(&app->queue)))
354 dev_queue_xmit(skb);
355}
356
357static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
358 u8 attrtype, u8 attrlen)
359{
360 struct mrp_msg_hdr *mh;
361
362 if (mrp_cb(app->pdu)->mh) {
363 if (mrp_pdu_append_end_mark(app) < 0)
364 return -1;
365 mrp_cb(app->pdu)->mh = NULL;
366 mrp_cb(app->pdu)->vah = NULL;
367 }
368
369 if (skb_tailroom(app->pdu) < sizeof(*mh))
370 return -1;
371 mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
372 mh->attrtype = attrtype;
373 mh->attrlen = attrlen;
374 mrp_cb(app->pdu)->mh = mh;
375 return 0;
376}
377
378static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
379 const void *firstattrvalue, u8 attrlen)
380{
381 struct mrp_vecattr_hdr *vah;
382
383 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
384 return -1;
385 vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
386 sizeof(*vah) + attrlen);
387 put_unaligned(0, &vah->lenflags);
388 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
389 mrp_cb(app->pdu)->vah = vah;
390 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
391 return 0;
392}
393
394static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
395 const struct mrp_attr *attr,
396 enum mrp_vecattr_event vaevent)
397{
398 u16 len, pos;
399 u8 *vaevents;
400 int err;
401again:
402 if (!app->pdu) {
403 err = mrp_pdu_init(app);
404 if (err < 0)
405 return err;
406 }
407
408 /* If there is no Message header in the PDU, or the Message header is
409 * for a different attribute type, add an EndMark (if necessary) and a
410 * new Message header to the PDU.
411 */
412 if (!mrp_cb(app->pdu)->mh ||
413 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
414 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
415 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
416 goto queue;
417 }
418
419 /* If there is no VectorAttribute header for this Message in the PDU,
420 * or this attribute's value does not sequentially follow the previous
421 * attribute's value, add a new VectorAttribute header to the PDU.
422 */
423 if (!mrp_cb(app->pdu)->vah ||
424 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
425 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
426 goto queue;
427 }
428
429 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
430 pos = len % 3;
431
432 /* Events are packed into Vectors in the PDU, three to a byte. Add a
433 * byte to the end of the Vector if necessary.
434 */
435 if (!pos) {
436 if (skb_tailroom(app->pdu) < sizeof(u8))
437 goto queue;
438 vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
439 } else {
440 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
441 }
442
443 switch (pos) {
444 case 0:
445 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
446 __MRP_VECATTR_EVENT_MAX);
447 break;
448 case 1:
449 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
450 break;
451 case 2:
452 *vaevents += vaevent;
453 break;
454 default:
455 WARN_ON(1);
456 }
457
458 /* Increment the length of the VectorAttribute in the PDU, as well as
459 * the value of the next attribute that would continue its Vector.
460 */
461 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
462 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
463
464 return 0;
465
466queue:
467 mrp_pdu_queue(app);
468 goto again;
469}
470
471static void mrp_attr_event(struct mrp_applicant *app,
472 struct mrp_attr *attr, enum mrp_event event)
473{
474 enum mrp_applicant_state state;
475
476 state = mrp_applicant_state_table[attr->state][event];
477 if (state == MRP_APPLICANT_INVALID) {
478 WARN_ON(1);
479 return;
480 }
481
482 if (event == MRP_EVENT_TX) {
483 /* When appending the attribute fails, don't update its state
484 * in order to retry at the next TX event.
485 */
486
487 switch (mrp_tx_action_table[attr->state]) {
488 case MRP_TX_ACTION_NONE:
489 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
490 case MRP_TX_ACTION_S_IN_OPTIONAL:
491 break;
492 case MRP_TX_ACTION_S_NEW:
493 if (mrp_pdu_append_vecattr_event(
494 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
495 return;
496 break;
497 case MRP_TX_ACTION_S_JOIN_IN:
498 if (mrp_pdu_append_vecattr_event(
499 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
500 return;
501 break;
502 case MRP_TX_ACTION_S_LV:
503 if (mrp_pdu_append_vecattr_event(
504 app, attr, MRP_VECATTR_EVENT_LV) < 0)
505 return;
506 /* As a pure applicant, sending a leave message
507 * implies that the attribute was unregistered and
508 * can be destroyed.
509 */
510 mrp_attr_destroy(app, attr);
511 return;
512 default:
513 WARN_ON(1);
514 }
515 }
516
517 attr->state = state;
518}
519
520int mrp_request_join(const struct net_device *dev,
521 const struct mrp_application *appl,
522 const void *value, u8 len, u8 type)
523{
524 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
525 struct mrp_applicant *app = rtnl_dereference(
526 port->applicants[appl->type]);
527 struct mrp_attr *attr;
528
529 if (sizeof(struct mrp_skb_cb) + len >
530 FIELD_SIZEOF(struct sk_buff, cb))
531 return -ENOMEM;
532
533 spin_lock_bh(&app->lock);
534 attr = mrp_attr_create(app, value, len, type);
535 if (!attr) {
536 spin_unlock_bh(&app->lock);
537 return -ENOMEM;
538 }
539 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
540 spin_unlock_bh(&app->lock);
541 return 0;
542}
543EXPORT_SYMBOL_GPL(mrp_request_join);
544
545void mrp_request_leave(const struct net_device *dev,
546 const struct mrp_application *appl,
547 const void *value, u8 len, u8 type)
548{
549 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
550 struct mrp_applicant *app = rtnl_dereference(
551 port->applicants[appl->type]);
552 struct mrp_attr *attr;
553
554 if (sizeof(struct mrp_skb_cb) + len >
555 FIELD_SIZEOF(struct sk_buff, cb))
556 return;
557
558 spin_lock_bh(&app->lock);
559 attr = mrp_attr_lookup(app, value, len, type);
560 if (!attr) {
561 spin_unlock_bh(&app->lock);
562 return;
563 }
564 mrp_attr_event(app, attr, MRP_EVENT_LV);
565 spin_unlock_bh(&app->lock);
566}
567EXPORT_SYMBOL_GPL(mrp_request_leave);
568
569static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
570{
571 struct rb_node *node, *next;
572 struct mrp_attr *attr;
573
574 for (node = rb_first(&app->mad);
575 next = node ? rb_next(node) : NULL, node != NULL;
576 node = next) {
577 attr = rb_entry(node, struct mrp_attr, node);
578 mrp_attr_event(app, attr, event);
579 }
580}
581
582static void mrp_join_timer_arm(struct mrp_applicant *app)
583{
584 unsigned long delay;
585
586 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
587 mod_timer(&app->join_timer, jiffies + delay);
588}
589
590static void mrp_join_timer(unsigned long data)
591{
592 struct mrp_applicant *app = (struct mrp_applicant *)data;
593
594 spin_lock(&app->lock);
595 mrp_mad_event(app, MRP_EVENT_TX);
596 mrp_pdu_queue(app);
597 spin_unlock(&app->lock);
598
599 mrp_queue_xmit(app);
600 mrp_join_timer_arm(app);
601}
602
603static void mrp_periodic_timer_arm(struct mrp_applicant *app)
604{
605 mod_timer(&app->periodic_timer,
606 jiffies + msecs_to_jiffies(mrp_periodic_time));
607}
608
609static void mrp_periodic_timer(unsigned long data)
610{
611 struct mrp_applicant *app = (struct mrp_applicant *)data;
612
613 spin_lock(&app->lock);
614 mrp_mad_event(app, MRP_EVENT_PERIODIC);
615 mrp_pdu_queue(app);
616 spin_unlock(&app->lock);
617
618 mrp_periodic_timer_arm(app);
619}
620
621static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
622{
623 __be16 endmark;
624
625 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
626 return -1;
627 if (endmark == MRP_END_MARK) {
628 *offset += sizeof(endmark);
629 return -1;
630 }
631 return 0;
632}
633
634static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
635 struct sk_buff *skb,
636 enum mrp_vecattr_event vaevent)
637{
638 struct mrp_attr *attr;
639 enum mrp_event event;
640
641 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
642 mrp_cb(skb)->mh->attrlen,
643 mrp_cb(skb)->mh->attrtype);
644 if (attr == NULL)
645 return;
646
647 switch (vaevent) {
648 case MRP_VECATTR_EVENT_NEW:
649 event = MRP_EVENT_R_NEW;
650 break;
651 case MRP_VECATTR_EVENT_JOIN_IN:
652 event = MRP_EVENT_R_JOIN_IN;
653 break;
654 case MRP_VECATTR_EVENT_IN:
655 event = MRP_EVENT_R_IN;
656 break;
657 case MRP_VECATTR_EVENT_JOIN_MT:
658 event = MRP_EVENT_R_JOIN_MT;
659 break;
660 case MRP_VECATTR_EVENT_MT:
661 event = MRP_EVENT_R_MT;
662 break;
663 case MRP_VECATTR_EVENT_LV:
664 event = MRP_EVENT_R_LV;
665 break;
666 default:
667 return;
668 }
669
670 mrp_attr_event(app, attr, event);
671}
672
673static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
674 struct sk_buff *skb, int *offset)
675{
676 struct mrp_vecattr_hdr _vah;
677 u16 valen;
678 u8 vaevents, vaevent;
679
680 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
681 &_vah);
682 if (!mrp_cb(skb)->vah)
683 return -1;
684 *offset += sizeof(_vah);
685
686 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
687 MRP_VECATTR_HDR_FLAG_LA)
688 mrp_mad_event(app, MRP_EVENT_R_LA);
689 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
690 MRP_VECATTR_HDR_LEN_MASK);
691
692 /* The VectorAttribute structure in a PDU carries event information
693 * about one or more attributes having consecutive values. Only the
694 * value for the first attribute is contained in the structure. So
695 * we make a copy of that value, and then increment it each time we
696 * advance to the next event in its Vector.
697 */
698 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
699 FIELD_SIZEOF(struct sk_buff, cb))
700 return -1;
701 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
702 mrp_cb(skb)->mh->attrlen) < 0)
703 return -1;
704 *offset += mrp_cb(skb)->mh->attrlen;
705
706 /* In a VectorAttribute, the Vector contains events which are packed
707 * three to a byte. We process one byte of the Vector at a time.
708 */
709 while (valen > 0) {
710 if (skb_copy_bits(skb, *offset, &vaevents,
711 sizeof(vaevents)) < 0)
712 return -1;
713 *offset += sizeof(vaevents);
714
715 /* Extract and process the first event. */
716 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
717 __MRP_VECATTR_EVENT_MAX);
718 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
719 /* The byte is malformed; stop processing. */
720 return -1;
721 }
722 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
723
724 /* If present, extract and process the second event. */
725 if (!--valen)
726 break;
727 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
728 mrp_cb(skb)->mh->attrlen);
729 vaevents %= (__MRP_VECATTR_EVENT_MAX *
730 __MRP_VECATTR_EVENT_MAX);
731 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
732 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
733
734 /* If present, extract and process the third event. */
735 if (!--valen)
736 break;
737 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
738 mrp_cb(skb)->mh->attrlen);
739 vaevents %= __MRP_VECATTR_EVENT_MAX;
740 vaevent = vaevents;
741 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
742 }
743 return 0;
744}
745
746static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
747 int *offset)
748{
749 struct mrp_msg_hdr _mh;
750
751 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
752 if (!mrp_cb(skb)->mh)
753 return -1;
754 *offset += sizeof(_mh);
755
756 if (mrp_cb(skb)->mh->attrtype == 0 ||
757 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
758 mrp_cb(skb)->mh->attrlen == 0)
759 return -1;
760
761 while (skb->len > *offset) {
762 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
763 break;
764 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
765 return -1;
766 }
767 return 0;
768}
769
770static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
771 struct packet_type *pt, struct net_device *orig_dev)
772{
773 struct mrp_application *appl = container_of(pt, struct mrp_application,
774 pkttype);
775 struct mrp_port *port;
776 struct mrp_applicant *app;
777 struct mrp_pdu_hdr _ph;
778 const struct mrp_pdu_hdr *ph;
779 int offset = skb_network_offset(skb);
780
781 /* If the interface is in promiscuous mode, drop the packet if
782 * it was unicast to another host.
783 */
784 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
785 goto out;
786 skb = skb_share_check(skb, GFP_ATOMIC);
787 if (unlikely(!skb))
788 goto out;
789 port = rcu_dereference(dev->mrp_port);
790 if (unlikely(!port))
791 goto out;
792 app = rcu_dereference(port->applicants[appl->type]);
793 if (unlikely(!app))
794 goto out;
795
796 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
797 if (!ph)
798 goto out;
799 offset += sizeof(_ph);
800
801 if (ph->version != app->app->version)
802 goto out;
803
804 spin_lock(&app->lock);
805 while (skb->len > offset) {
806 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
807 break;
808 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
809 break;
810 }
811 spin_unlock(&app->lock);
812out:
813 kfree_skb(skb);
814 return 0;
815}
816
817static int mrp_init_port(struct net_device *dev)
818{
819 struct mrp_port *port;
820
821 port = kzalloc(sizeof(*port), GFP_KERNEL);
822 if (!port)
823 return -ENOMEM;
824 rcu_assign_pointer(dev->mrp_port, port);
825 return 0;
826}
827
828static void mrp_release_port(struct net_device *dev)
829{
830 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
831 unsigned int i;
832
833 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
834 if (rtnl_dereference(port->applicants[i]))
835 return;
836 }
837 RCU_INIT_POINTER(dev->mrp_port, NULL);
838 kfree_rcu(port, rcu);
839}
840
841int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
842{
843 struct mrp_applicant *app;
844 int err;
845
846 ASSERT_RTNL();
847
848 if (!rtnl_dereference(dev->mrp_port)) {
849 err = mrp_init_port(dev);
850 if (err < 0)
851 goto err1;
852 }
853
854 err = -ENOMEM;
855 app = kzalloc(sizeof(*app), GFP_KERNEL);
856 if (!app)
857 goto err2;
858
859 err = dev_mc_add(dev, appl->group_address);
860 if (err < 0)
861 goto err3;
862
863 app->dev = dev;
864 app->app = appl;
865 app->mad = RB_ROOT;
866 spin_lock_init(&app->lock);
867 skb_queue_head_init(&app->queue);
868 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
869 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
870 mrp_join_timer_arm(app);
871 setup_timer(&app->periodic_timer, mrp_periodic_timer,
872 (unsigned long)app);
873 mrp_periodic_timer_arm(app);
874 return 0;
875
876err3:
877 kfree(app);
878err2:
879 mrp_release_port(dev);
880err1:
881 return err;
882}
883EXPORT_SYMBOL_GPL(mrp_init_applicant);
884
885void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
886{
887 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
888 struct mrp_applicant *app = rtnl_dereference(
889 port->applicants[appl->type]);
890
891 ASSERT_RTNL();
892
893 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
894
895 /* Delete timer and generate a final TX event to flush out
896 * all pending messages before the applicant is gone.
897 */
898 del_timer_sync(&app->join_timer);
899 del_timer_sync(&app->periodic_timer);
900
901 spin_lock_bh(&app->lock);
902 mrp_mad_event(app, MRP_EVENT_TX);
903 mrp_pdu_queue(app);
904 spin_unlock_bh(&app->lock);
905
906 mrp_queue_xmit(app);
907
908 dev_mc_del(dev, appl->group_address);
909 kfree_rcu(app, rcu);
910 mrp_release_port(dev);
911}
912EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
913
914int mrp_register_application(struct mrp_application *appl)
915{
916 appl->pkttype.func = mrp_rcv;
917 dev_add_pack(&appl->pkttype);
918 return 0;
919}
920EXPORT_SYMBOL_GPL(mrp_register_application);
921
922void mrp_unregister_application(struct mrp_application *appl)
923{
924 dev_remove_pack(&appl->pkttype);
925}
926EXPORT_SYMBOL_GPL(mrp_unregister_application);
1/*
2 * IEEE 802.1Q Multiple Registration Protocol (MRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/802/garp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rtnetlink.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/mrp.h>
22#include <asm/unaligned.h>
23
24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27
28static unsigned int mrp_periodic_time __read_mostly = 1000;
29module_param(mrp_periodic_time, uint, 0644);
30MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
31
32MODULE_LICENSE("GPL");
33
34static const u8
35mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
36 [MRP_APPLICANT_VO] = {
37 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
38 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
39 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
40 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
43 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
44 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
45 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
46 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
47 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
48 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
49 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
50 },
51 [MRP_APPLICANT_VP] = {
52 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
53 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
54 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
55 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
56 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
58 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
59 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
60 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
61 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
62 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
63 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
64 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
65 },
66 [MRP_APPLICANT_VN] = {
67 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
68 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
69 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
70 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
71 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
73 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
74 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
75 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
76 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
77 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
78 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
79 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
80 },
81 [MRP_APPLICANT_AN] = {
82 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
83 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
84 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
85 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
86 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
87 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
88 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
89 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
90 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
91 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
92 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
93 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
94 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
95 },
96 [MRP_APPLICANT_AA] = {
97 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
98 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
99 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
100 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
101 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
102 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
103 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
104 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
105 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
106 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
107 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
108 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
109 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
110 },
111 [MRP_APPLICANT_QA] = {
112 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
113 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
114 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
115 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
116 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
117 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
118 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
119 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
120 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
121 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
122 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
123 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
124 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
125 },
126 [MRP_APPLICANT_LA] = {
127 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
128 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
129 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
130 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
131 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
133 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
134 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
135 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
136 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
137 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
138 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
139 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
140 },
141 [MRP_APPLICANT_AO] = {
142 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
143 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
144 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
145 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
147 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
148 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
149 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
150 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
151 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
152 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
153 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
154 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
155 },
156 [MRP_APPLICANT_QO] = {
157 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
158 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
159 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
160 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
161 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
162 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
163 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
164 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
165 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
166 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
167 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
168 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
169 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
170 },
171 [MRP_APPLICANT_AP] = {
172 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
173 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
174 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
175 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
176 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
177 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
178 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
179 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
180 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
181 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
182 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
183 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
184 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
185 },
186 [MRP_APPLICANT_QP] = {
187 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
188 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
189 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
190 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
191 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
192 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
193 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
194 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
195 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
196 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
197 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
198 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
199 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
200 },
201};
202
203static const u8
204mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
205 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
206 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
207 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
208 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
209 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
210 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
211 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
212 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
213 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
214 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
215 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
216};
217
218static void mrp_attrvalue_inc(void *value, u8 len)
219{
220 u8 *v = (u8 *)value;
221
222 /* Add 1 to the last byte. If it becomes zero,
223 * go to the previous byte and repeat.
224 */
225 while (len > 0 && !++v[--len])
226 ;
227}
228
229static int mrp_attr_cmp(const struct mrp_attr *attr,
230 const void *value, u8 len, u8 type)
231{
232 if (attr->type != type)
233 return attr->type - type;
234 if (attr->len != len)
235 return attr->len - len;
236 return memcmp(attr->value, value, len);
237}
238
239static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
240 const void *value, u8 len, u8 type)
241{
242 struct rb_node *parent = app->mad.rb_node;
243 struct mrp_attr *attr;
244 int d;
245
246 while (parent) {
247 attr = rb_entry(parent, struct mrp_attr, node);
248 d = mrp_attr_cmp(attr, value, len, type);
249 if (d > 0)
250 parent = parent->rb_left;
251 else if (d < 0)
252 parent = parent->rb_right;
253 else
254 return attr;
255 }
256 return NULL;
257}
258
259static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
260 const void *value, u8 len, u8 type)
261{
262 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
263 struct mrp_attr *attr;
264 int d;
265
266 while (*p) {
267 parent = *p;
268 attr = rb_entry(parent, struct mrp_attr, node);
269 d = mrp_attr_cmp(attr, value, len, type);
270 if (d > 0)
271 p = &parent->rb_left;
272 else if (d < 0)
273 p = &parent->rb_right;
274 else {
275 /* The attribute already exists; re-use it. */
276 return attr;
277 }
278 }
279 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
280 if (!attr)
281 return attr;
282 attr->state = MRP_APPLICANT_VO;
283 attr->type = type;
284 attr->len = len;
285 memcpy(attr->value, value, len);
286
287 rb_link_node(&attr->node, parent, p);
288 rb_insert_color(&attr->node, &app->mad);
289 return attr;
290}
291
292static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
293{
294 rb_erase(&attr->node, &app->mad);
295 kfree(attr);
296}
297
298static int mrp_pdu_init(struct mrp_applicant *app)
299{
300 struct sk_buff *skb;
301 struct mrp_pdu_hdr *ph;
302
303 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
304 GFP_ATOMIC);
305 if (!skb)
306 return -ENOMEM;
307
308 skb->dev = app->dev;
309 skb->protocol = app->app->pkttype.type;
310 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
311 skb_reset_network_header(skb);
312 skb_reset_transport_header(skb);
313
314 ph = __skb_put(skb, sizeof(*ph));
315 ph->version = app->app->version;
316
317 app->pdu = skb;
318 return 0;
319}
320
321static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
322{
323 __be16 *endmark;
324
325 if (skb_tailroom(app->pdu) < sizeof(*endmark))
326 return -1;
327 endmark = __skb_put(app->pdu, sizeof(*endmark));
328 put_unaligned(MRP_END_MARK, endmark);
329 return 0;
330}
331
332static void mrp_pdu_queue(struct mrp_applicant *app)
333{
334 if (!app->pdu)
335 return;
336
337 if (mrp_cb(app->pdu)->mh)
338 mrp_pdu_append_end_mark(app);
339 mrp_pdu_append_end_mark(app);
340
341 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
342 app->app->group_address, app->dev->dev_addr,
343 app->pdu->len);
344
345 skb_queue_tail(&app->queue, app->pdu);
346 app->pdu = NULL;
347}
348
349static void mrp_queue_xmit(struct mrp_applicant *app)
350{
351 struct sk_buff *skb;
352
353 while ((skb = skb_dequeue(&app->queue)))
354 dev_queue_xmit(skb);
355}
356
357static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
358 u8 attrtype, u8 attrlen)
359{
360 struct mrp_msg_hdr *mh;
361
362 if (mrp_cb(app->pdu)->mh) {
363 if (mrp_pdu_append_end_mark(app) < 0)
364 return -1;
365 mrp_cb(app->pdu)->mh = NULL;
366 mrp_cb(app->pdu)->vah = NULL;
367 }
368
369 if (skb_tailroom(app->pdu) < sizeof(*mh))
370 return -1;
371 mh = __skb_put(app->pdu, sizeof(*mh));
372 mh->attrtype = attrtype;
373 mh->attrlen = attrlen;
374 mrp_cb(app->pdu)->mh = mh;
375 return 0;
376}
377
378static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
379 const void *firstattrvalue, u8 attrlen)
380{
381 struct mrp_vecattr_hdr *vah;
382
383 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
384 return -1;
385 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
386 put_unaligned(0, &vah->lenflags);
387 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
388 mrp_cb(app->pdu)->vah = vah;
389 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
390 return 0;
391}
392
393static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
394 const struct mrp_attr *attr,
395 enum mrp_vecattr_event vaevent)
396{
397 u16 len, pos;
398 u8 *vaevents;
399 int err;
400again:
401 if (!app->pdu) {
402 err = mrp_pdu_init(app);
403 if (err < 0)
404 return err;
405 }
406
407 /* If there is no Message header in the PDU, or the Message header is
408 * for a different attribute type, add an EndMark (if necessary) and a
409 * new Message header to the PDU.
410 */
411 if (!mrp_cb(app->pdu)->mh ||
412 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
413 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
414 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
415 goto queue;
416 }
417
418 /* If there is no VectorAttribute header for this Message in the PDU,
419 * or this attribute's value does not sequentially follow the previous
420 * attribute's value, add a new VectorAttribute header to the PDU.
421 */
422 if (!mrp_cb(app->pdu)->vah ||
423 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
424 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
425 goto queue;
426 }
427
428 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
429 pos = len % 3;
430
431 /* Events are packed into Vectors in the PDU, three to a byte. Add a
432 * byte to the end of the Vector if necessary.
433 */
434 if (!pos) {
435 if (skb_tailroom(app->pdu) < sizeof(u8))
436 goto queue;
437 vaevents = __skb_put(app->pdu, sizeof(u8));
438 } else {
439 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
440 }
441
442 switch (pos) {
443 case 0:
444 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
445 __MRP_VECATTR_EVENT_MAX);
446 break;
447 case 1:
448 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
449 break;
450 case 2:
451 *vaevents += vaevent;
452 break;
453 default:
454 WARN_ON(1);
455 }
456
457 /* Increment the length of the VectorAttribute in the PDU, as well as
458 * the value of the next attribute that would continue its Vector.
459 */
460 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
461 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
462
463 return 0;
464
465queue:
466 mrp_pdu_queue(app);
467 goto again;
468}
469
470static void mrp_attr_event(struct mrp_applicant *app,
471 struct mrp_attr *attr, enum mrp_event event)
472{
473 enum mrp_applicant_state state;
474
475 state = mrp_applicant_state_table[attr->state][event];
476 if (state == MRP_APPLICANT_INVALID) {
477 WARN_ON(1);
478 return;
479 }
480
481 if (event == MRP_EVENT_TX) {
482 /* When appending the attribute fails, don't update its state
483 * in order to retry at the next TX event.
484 */
485
486 switch (mrp_tx_action_table[attr->state]) {
487 case MRP_TX_ACTION_NONE:
488 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
489 case MRP_TX_ACTION_S_IN_OPTIONAL:
490 break;
491 case MRP_TX_ACTION_S_NEW:
492 if (mrp_pdu_append_vecattr_event(
493 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
494 return;
495 break;
496 case MRP_TX_ACTION_S_JOIN_IN:
497 if (mrp_pdu_append_vecattr_event(
498 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
499 return;
500 break;
501 case MRP_TX_ACTION_S_LV:
502 if (mrp_pdu_append_vecattr_event(
503 app, attr, MRP_VECATTR_EVENT_LV) < 0)
504 return;
505 /* As a pure applicant, sending a leave message
506 * implies that the attribute was unregistered and
507 * can be destroyed.
508 */
509 mrp_attr_destroy(app, attr);
510 return;
511 default:
512 WARN_ON(1);
513 }
514 }
515
516 attr->state = state;
517}
518
519int mrp_request_join(const struct net_device *dev,
520 const struct mrp_application *appl,
521 const void *value, u8 len, u8 type)
522{
523 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
524 struct mrp_applicant *app = rtnl_dereference(
525 port->applicants[appl->type]);
526 struct mrp_attr *attr;
527
528 if (sizeof(struct mrp_skb_cb) + len >
529 FIELD_SIZEOF(struct sk_buff, cb))
530 return -ENOMEM;
531
532 spin_lock_bh(&app->lock);
533 attr = mrp_attr_create(app, value, len, type);
534 if (!attr) {
535 spin_unlock_bh(&app->lock);
536 return -ENOMEM;
537 }
538 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
539 spin_unlock_bh(&app->lock);
540 return 0;
541}
542EXPORT_SYMBOL_GPL(mrp_request_join);
543
544void mrp_request_leave(const struct net_device *dev,
545 const struct mrp_application *appl,
546 const void *value, u8 len, u8 type)
547{
548 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
549 struct mrp_applicant *app = rtnl_dereference(
550 port->applicants[appl->type]);
551 struct mrp_attr *attr;
552
553 if (sizeof(struct mrp_skb_cb) + len >
554 FIELD_SIZEOF(struct sk_buff, cb))
555 return;
556
557 spin_lock_bh(&app->lock);
558 attr = mrp_attr_lookup(app, value, len, type);
559 if (!attr) {
560 spin_unlock_bh(&app->lock);
561 return;
562 }
563 mrp_attr_event(app, attr, MRP_EVENT_LV);
564 spin_unlock_bh(&app->lock);
565}
566EXPORT_SYMBOL_GPL(mrp_request_leave);
567
568static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
569{
570 struct rb_node *node, *next;
571 struct mrp_attr *attr;
572
573 for (node = rb_first(&app->mad);
574 next = node ? rb_next(node) : NULL, node != NULL;
575 node = next) {
576 attr = rb_entry(node, struct mrp_attr, node);
577 mrp_attr_event(app, attr, event);
578 }
579}
580
581static void mrp_join_timer_arm(struct mrp_applicant *app)
582{
583 unsigned long delay;
584
585 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
586 mod_timer(&app->join_timer, jiffies + delay);
587}
588
589static void mrp_join_timer(struct timer_list *t)
590{
591 struct mrp_applicant *app = from_timer(app, t, join_timer);
592
593 spin_lock(&app->lock);
594 mrp_mad_event(app, MRP_EVENT_TX);
595 mrp_pdu_queue(app);
596 spin_unlock(&app->lock);
597
598 mrp_queue_xmit(app);
599 mrp_join_timer_arm(app);
600}
601
602static void mrp_periodic_timer_arm(struct mrp_applicant *app)
603{
604 mod_timer(&app->periodic_timer,
605 jiffies + msecs_to_jiffies(mrp_periodic_time));
606}
607
608static void mrp_periodic_timer(struct timer_list *t)
609{
610 struct mrp_applicant *app = from_timer(app, t, periodic_timer);
611
612 spin_lock(&app->lock);
613 mrp_mad_event(app, MRP_EVENT_PERIODIC);
614 mrp_pdu_queue(app);
615 spin_unlock(&app->lock);
616
617 mrp_periodic_timer_arm(app);
618}
619
620static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
621{
622 __be16 endmark;
623
624 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
625 return -1;
626 if (endmark == MRP_END_MARK) {
627 *offset += sizeof(endmark);
628 return -1;
629 }
630 return 0;
631}
632
633static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
634 struct sk_buff *skb,
635 enum mrp_vecattr_event vaevent)
636{
637 struct mrp_attr *attr;
638 enum mrp_event event;
639
640 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
641 mrp_cb(skb)->mh->attrlen,
642 mrp_cb(skb)->mh->attrtype);
643 if (attr == NULL)
644 return;
645
646 switch (vaevent) {
647 case MRP_VECATTR_EVENT_NEW:
648 event = MRP_EVENT_R_NEW;
649 break;
650 case MRP_VECATTR_EVENT_JOIN_IN:
651 event = MRP_EVENT_R_JOIN_IN;
652 break;
653 case MRP_VECATTR_EVENT_IN:
654 event = MRP_EVENT_R_IN;
655 break;
656 case MRP_VECATTR_EVENT_JOIN_MT:
657 event = MRP_EVENT_R_JOIN_MT;
658 break;
659 case MRP_VECATTR_EVENT_MT:
660 event = MRP_EVENT_R_MT;
661 break;
662 case MRP_VECATTR_EVENT_LV:
663 event = MRP_EVENT_R_LV;
664 break;
665 default:
666 return;
667 }
668
669 mrp_attr_event(app, attr, event);
670}
671
672static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
673 struct sk_buff *skb, int *offset)
674{
675 struct mrp_vecattr_hdr _vah;
676 u16 valen;
677 u8 vaevents, vaevent;
678
679 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
680 &_vah);
681 if (!mrp_cb(skb)->vah)
682 return -1;
683 *offset += sizeof(_vah);
684
685 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
686 MRP_VECATTR_HDR_FLAG_LA)
687 mrp_mad_event(app, MRP_EVENT_R_LA);
688 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
689 MRP_VECATTR_HDR_LEN_MASK);
690
691 /* The VectorAttribute structure in a PDU carries event information
692 * about one or more attributes having consecutive values. Only the
693 * value for the first attribute is contained in the structure. So
694 * we make a copy of that value, and then increment it each time we
695 * advance to the next event in its Vector.
696 */
697 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
698 FIELD_SIZEOF(struct sk_buff, cb))
699 return -1;
700 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
701 mrp_cb(skb)->mh->attrlen) < 0)
702 return -1;
703 *offset += mrp_cb(skb)->mh->attrlen;
704
705 /* In a VectorAttribute, the Vector contains events which are packed
706 * three to a byte. We process one byte of the Vector at a time.
707 */
708 while (valen > 0) {
709 if (skb_copy_bits(skb, *offset, &vaevents,
710 sizeof(vaevents)) < 0)
711 return -1;
712 *offset += sizeof(vaevents);
713
714 /* Extract and process the first event. */
715 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
716 __MRP_VECATTR_EVENT_MAX);
717 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
718 /* The byte is malformed; stop processing. */
719 return -1;
720 }
721 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
722
723 /* If present, extract and process the second event. */
724 if (!--valen)
725 break;
726 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
727 mrp_cb(skb)->mh->attrlen);
728 vaevents %= (__MRP_VECATTR_EVENT_MAX *
729 __MRP_VECATTR_EVENT_MAX);
730 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
731 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
732
733 /* If present, extract and process the third event. */
734 if (!--valen)
735 break;
736 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
737 mrp_cb(skb)->mh->attrlen);
738 vaevents %= __MRP_VECATTR_EVENT_MAX;
739 vaevent = vaevents;
740 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
741 }
742 return 0;
743}
744
745static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
746 int *offset)
747{
748 struct mrp_msg_hdr _mh;
749
750 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
751 if (!mrp_cb(skb)->mh)
752 return -1;
753 *offset += sizeof(_mh);
754
755 if (mrp_cb(skb)->mh->attrtype == 0 ||
756 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
757 mrp_cb(skb)->mh->attrlen == 0)
758 return -1;
759
760 while (skb->len > *offset) {
761 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
762 break;
763 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
764 return -1;
765 }
766 return 0;
767}
768
769static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
770 struct packet_type *pt, struct net_device *orig_dev)
771{
772 struct mrp_application *appl = container_of(pt, struct mrp_application,
773 pkttype);
774 struct mrp_port *port;
775 struct mrp_applicant *app;
776 struct mrp_pdu_hdr _ph;
777 const struct mrp_pdu_hdr *ph;
778 int offset = skb_network_offset(skb);
779
780 /* If the interface is in promiscuous mode, drop the packet if
781 * it was unicast to another host.
782 */
783 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
784 goto out;
785 skb = skb_share_check(skb, GFP_ATOMIC);
786 if (unlikely(!skb))
787 goto out;
788 port = rcu_dereference(dev->mrp_port);
789 if (unlikely(!port))
790 goto out;
791 app = rcu_dereference(port->applicants[appl->type]);
792 if (unlikely(!app))
793 goto out;
794
795 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
796 if (!ph)
797 goto out;
798 offset += sizeof(_ph);
799
800 if (ph->version != app->app->version)
801 goto out;
802
803 spin_lock(&app->lock);
804 while (skb->len > offset) {
805 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
806 break;
807 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
808 break;
809 }
810 spin_unlock(&app->lock);
811out:
812 kfree_skb(skb);
813 return 0;
814}
815
816static int mrp_init_port(struct net_device *dev)
817{
818 struct mrp_port *port;
819
820 port = kzalloc(sizeof(*port), GFP_KERNEL);
821 if (!port)
822 return -ENOMEM;
823 rcu_assign_pointer(dev->mrp_port, port);
824 return 0;
825}
826
827static void mrp_release_port(struct net_device *dev)
828{
829 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
830 unsigned int i;
831
832 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
833 if (rtnl_dereference(port->applicants[i]))
834 return;
835 }
836 RCU_INIT_POINTER(dev->mrp_port, NULL);
837 kfree_rcu(port, rcu);
838}
839
840int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
841{
842 struct mrp_applicant *app;
843 int err;
844
845 ASSERT_RTNL();
846
847 if (!rtnl_dereference(dev->mrp_port)) {
848 err = mrp_init_port(dev);
849 if (err < 0)
850 goto err1;
851 }
852
853 err = -ENOMEM;
854 app = kzalloc(sizeof(*app), GFP_KERNEL);
855 if (!app)
856 goto err2;
857
858 err = dev_mc_add(dev, appl->group_address);
859 if (err < 0)
860 goto err3;
861
862 app->dev = dev;
863 app->app = appl;
864 app->mad = RB_ROOT;
865 spin_lock_init(&app->lock);
866 skb_queue_head_init(&app->queue);
867 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
868 timer_setup(&app->join_timer, mrp_join_timer, 0);
869 mrp_join_timer_arm(app);
870 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
871 mrp_periodic_timer_arm(app);
872 return 0;
873
874err3:
875 kfree(app);
876err2:
877 mrp_release_port(dev);
878err1:
879 return err;
880}
881EXPORT_SYMBOL_GPL(mrp_init_applicant);
882
883void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
884{
885 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
886 struct mrp_applicant *app = rtnl_dereference(
887 port->applicants[appl->type]);
888
889 ASSERT_RTNL();
890
891 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
892
893 /* Delete timer and generate a final TX event to flush out
894 * all pending messages before the applicant is gone.
895 */
896 del_timer_sync(&app->join_timer);
897 del_timer_sync(&app->periodic_timer);
898
899 spin_lock_bh(&app->lock);
900 mrp_mad_event(app, MRP_EVENT_TX);
901 mrp_pdu_queue(app);
902 spin_unlock_bh(&app->lock);
903
904 mrp_queue_xmit(app);
905
906 dev_mc_del(dev, appl->group_address);
907 kfree_rcu(app, rcu);
908 mrp_release_port(dev);
909}
910EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
911
912int mrp_register_application(struct mrp_application *appl)
913{
914 appl->pkttype.func = mrp_rcv;
915 dev_add_pack(&appl->pkttype);
916 return 0;
917}
918EXPORT_SYMBOL_GPL(mrp_register_application);
919
920void mrp_unregister_application(struct mrp_application *appl)
921{
922 dev_remove_pack(&appl->pkttype);
923}
924EXPORT_SYMBOL_GPL(mrp_unregister_application);