Loading...
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42
43#define MAX_FORWARD_SIZE 1024
44#define BUF_HEADROOM (LL_MAX_HEADER + 48)
45#define BUF_TAILROOM 16
46
47static unsigned int align(unsigned int i)
48{
49 return (i + 3) & ~3u;
50}
51
52/**
53 * tipc_buf_acquire - creates a TIPC message buffer
54 * @size: message size (including TIPC header)
55 *
56 * Returns a new buffer with data pointers set to the specified size.
57 *
58 * NOTE: Headroom is reserved to allow prepending of a data link header.
59 * There may also be unrequested tailroom present at the buffer's end.
60 */
61struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
62{
63 struct sk_buff *skb;
64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
65
66 skb = alloc_skb_fclone(buf_size, gfp);
67 if (skb) {
68 skb_reserve(skb, BUF_HEADROOM);
69 skb_put(skb, size);
70 skb->next = NULL;
71 }
72 return skb;
73}
74
75void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
76 u32 hsize, u32 dnode)
77{
78 memset(m, 0, hsize);
79 msg_set_version(m);
80 msg_set_user(m, user);
81 msg_set_hdr_sz(m, hsize);
82 msg_set_size(m, hsize);
83 msg_set_prevnode(m, own_node);
84 msg_set_type(m, type);
85 if (hsize > SHORT_H_SIZE) {
86 msg_set_orignode(m, own_node);
87 msg_set_destnode(m, dnode);
88 }
89}
90
91struct sk_buff *tipc_msg_create(uint user, uint type,
92 uint hdr_sz, uint data_sz, u32 dnode,
93 u32 onode, u32 dport, u32 oport, int errcode)
94{
95 struct tipc_msg *msg;
96 struct sk_buff *buf;
97
98 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 if (unlikely(!buf))
100 return NULL;
101
102 msg = buf_msg(buf);
103 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
104 msg_set_size(msg, hdr_sz + data_sz);
105 msg_set_origport(msg, oport);
106 msg_set_destport(msg, dport);
107 msg_set_errcode(msg, errcode);
108 if (hdr_sz > SHORT_H_SIZE) {
109 msg_set_orignode(msg, onode);
110 msg_set_destnode(msg, dnode);
111 }
112 return buf;
113}
114
115/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
116 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
117 * out: set when successful non-complete reassembly, otherwise NULL
118 * @*buf: in: the buffer to append. Always defined
119 * out: head buf after successful complete reassembly, otherwise NULL
120 * Returns 1 when reassembly complete, otherwise 0
121 */
122int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
123{
124 struct sk_buff *head = *headbuf;
125 struct sk_buff *frag = *buf;
126 struct sk_buff *tail = NULL;
127 struct tipc_msg *msg;
128 u32 fragid;
129 int delta;
130 bool headstolen;
131
132 if (!frag)
133 goto err;
134
135 msg = buf_msg(frag);
136 fragid = msg_type(msg);
137 frag->next = NULL;
138 skb_pull(frag, msg_hdr_sz(msg));
139
140 if (fragid == FIRST_FRAGMENT) {
141 if (unlikely(head))
142 goto err;
143 if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
144 goto err;
145 head = *headbuf = frag;
146 *buf = NULL;
147 TIPC_SKB_CB(head)->tail = NULL;
148 if (skb_is_nonlinear(head)) {
149 skb_walk_frags(head, tail) {
150 TIPC_SKB_CB(head)->tail = tail;
151 }
152 } else {
153 skb_frag_list_init(head);
154 }
155 return 0;
156 }
157
158 if (!head)
159 goto err;
160
161 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
162 kfree_skb_partial(frag, headstolen);
163 } else {
164 tail = TIPC_SKB_CB(head)->tail;
165 if (!skb_has_frag_list(head))
166 skb_shinfo(head)->frag_list = frag;
167 else
168 tail->next = frag;
169 head->truesize += frag->truesize;
170 head->data_len += frag->len;
171 head->len += frag->len;
172 TIPC_SKB_CB(head)->tail = frag;
173 }
174
175 if (fragid == LAST_FRAGMENT) {
176 TIPC_SKB_CB(head)->validated = false;
177 if (unlikely(!tipc_msg_validate(&head)))
178 goto err;
179 *buf = head;
180 TIPC_SKB_CB(head)->tail = NULL;
181 *headbuf = NULL;
182 return 1;
183 }
184 *buf = NULL;
185 return 0;
186err:
187 kfree_skb(*buf);
188 kfree_skb(*headbuf);
189 *buf = *headbuf = NULL;
190 return 0;
191}
192
193/* tipc_msg_validate - validate basic format of received message
194 *
195 * This routine ensures a TIPC message has an acceptable header, and at least
196 * as much data as the header indicates it should. The routine also ensures
197 * that the entire message header is stored in the main fragment of the message
198 * buffer, to simplify future access to message header fields.
199 *
200 * Note: Having extra info present in the message header or data areas is OK.
201 * TIPC will ignore the excess, under the assumption that it is optional info
202 * introduced by a later release of the protocol.
203 */
204bool tipc_msg_validate(struct sk_buff **_skb)
205{
206 struct sk_buff *skb = *_skb;
207 struct tipc_msg *hdr;
208 int msz, hsz;
209
210 /* Ensure that flow control ratio condition is satisfied */
211 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
212 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
213 if (!skb)
214 return false;
215 kfree_skb(*_skb);
216 *_skb = skb;
217 }
218
219 if (unlikely(TIPC_SKB_CB(skb)->validated))
220 return true;
221 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
222 return false;
223
224 hsz = msg_hdr_sz(buf_msg(skb));
225 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
226 return false;
227 if (unlikely(!pskb_may_pull(skb, hsz)))
228 return false;
229
230 hdr = buf_msg(skb);
231 if (unlikely(msg_version(hdr) != TIPC_VERSION))
232 return false;
233
234 msz = msg_size(hdr);
235 if (unlikely(msz < hsz))
236 return false;
237 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
238 return false;
239 if (unlikely(skb->len < msz))
240 return false;
241
242 TIPC_SKB_CB(skb)->validated = true;
243 return true;
244}
245
246/**
247 * tipc_msg_fragment - build a fragment skb list for TIPC message
248 *
249 * @skb: TIPC message skb
250 * @hdr: internal msg header to be put on the top of the fragments
251 * @pktmax: max size of a fragment incl. the header
252 * @frags: returned fragment skb list
253 *
254 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
255 * or -ENOMEM
256 */
257int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
258 int pktmax, struct sk_buff_head *frags)
259{
260 int pktno, nof_fragms, dsz, dmax, eat;
261 struct tipc_msg *_hdr;
262 struct sk_buff *_skb;
263 u8 *data;
264
265 /* Non-linear buffer? */
266 if (skb_linearize(skb))
267 return -ENOMEM;
268
269 data = (u8 *)skb->data;
270 dsz = msg_size(buf_msg(skb));
271 dmax = pktmax - INT_H_SIZE;
272 if (dsz <= dmax || !dmax)
273 return -EINVAL;
274
275 nof_fragms = dsz / dmax + 1;
276 for (pktno = 1; pktno <= nof_fragms; pktno++) {
277 if (pktno < nof_fragms)
278 eat = dmax;
279 else
280 eat = dsz % dmax;
281 /* Allocate a new fragment */
282 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
283 if (!_skb)
284 goto error;
285 skb_orphan(_skb);
286 __skb_queue_tail(frags, _skb);
287 /* Copy header & data to the fragment */
288 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
289 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
290 data += eat;
291 /* Update the fragment's header */
292 _hdr = buf_msg(_skb);
293 msg_set_fragm_no(_hdr, pktno);
294 msg_set_nof_fragms(_hdr, nof_fragms);
295 msg_set_size(_hdr, INT_H_SIZE + eat);
296 }
297 return 0;
298
299error:
300 __skb_queue_purge(frags);
301 __skb_queue_head_init(frags);
302 return -ENOMEM;
303}
304
305/**
306 * tipc_msg_build - create buffer chain containing specified header and data
307 * @mhdr: Message header, to be prepended to data
308 * @m: User message
309 * @dsz: Total length of user data
310 * @pktmax: Max packet size that can be used
311 * @list: Buffer or chain of buffers to be returned to caller
312 *
313 * Note that the recursive call we are making here is safe, since it can
314 * logically go only one further level down.
315 *
316 * Returns message data size or errno: -ENOMEM, -EFAULT
317 */
318int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
319 int dsz, int pktmax, struct sk_buff_head *list)
320{
321 int mhsz = msg_hdr_sz(mhdr);
322 struct tipc_msg pkthdr;
323 int msz = mhsz + dsz;
324 int pktrem = pktmax;
325 struct sk_buff *skb;
326 int drem = dsz;
327 int pktno = 1;
328 char *pktpos;
329 int pktsz;
330 int rc;
331
332 msg_set_size(mhdr, msz);
333
334 /* No fragmentation needed? */
335 if (likely(msz <= pktmax)) {
336 skb = tipc_buf_acquire(msz, GFP_KERNEL);
337
338 /* Fall back to smaller MTU if node local message */
339 if (unlikely(!skb)) {
340 if (pktmax != MAX_MSG_SIZE)
341 return -ENOMEM;
342 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
343 if (rc != dsz)
344 return rc;
345 if (tipc_msg_assemble(list))
346 return dsz;
347 return -ENOMEM;
348 }
349 skb_orphan(skb);
350 __skb_queue_tail(list, skb);
351 skb_copy_to_linear_data(skb, mhdr, mhsz);
352 pktpos = skb->data + mhsz;
353 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
354 return dsz;
355 rc = -EFAULT;
356 goto error;
357 }
358
359 /* Prepare reusable fragment header */
360 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
361 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
362 msg_set_size(&pkthdr, pktmax);
363 msg_set_fragm_no(&pkthdr, pktno);
364 msg_set_importance(&pkthdr, msg_importance(mhdr));
365
366 /* Prepare first fragment */
367 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
368 if (!skb)
369 return -ENOMEM;
370 skb_orphan(skb);
371 __skb_queue_tail(list, skb);
372 pktpos = skb->data;
373 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
374 pktpos += INT_H_SIZE;
375 pktrem -= INT_H_SIZE;
376 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
377 pktpos += mhsz;
378 pktrem -= mhsz;
379
380 do {
381 if (drem < pktrem)
382 pktrem = drem;
383
384 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
385 rc = -EFAULT;
386 goto error;
387 }
388 drem -= pktrem;
389
390 if (!drem)
391 break;
392
393 /* Prepare new fragment: */
394 if (drem < (pktmax - INT_H_SIZE))
395 pktsz = drem + INT_H_SIZE;
396 else
397 pktsz = pktmax;
398 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
399 if (!skb) {
400 rc = -ENOMEM;
401 goto error;
402 }
403 skb_orphan(skb);
404 __skb_queue_tail(list, skb);
405 msg_set_type(&pkthdr, FRAGMENT);
406 msg_set_size(&pkthdr, pktsz);
407 msg_set_fragm_no(&pkthdr, ++pktno);
408 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
409 pktpos = skb->data + INT_H_SIZE;
410 pktrem = pktsz - INT_H_SIZE;
411
412 } while (1);
413 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
414 return dsz;
415error:
416 __skb_queue_purge(list);
417 __skb_queue_head_init(list);
418 return rc;
419}
420
421/**
422 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
423 * @skb: the buffer to append to ("bundle")
424 * @msg: message to be appended
425 * @mtu: max allowable size for the bundle buffer
426 * Consumes buffer if successful
427 * Returns true if bundling could be performed, otherwise false
428 */
429bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
430{
431 struct tipc_msg *bmsg;
432 unsigned int bsz;
433 unsigned int msz = msg_size(msg);
434 u32 start, pad;
435 u32 max = mtu - INT_H_SIZE;
436
437 if (likely(msg_user(msg) == MSG_FRAGMENTER))
438 return false;
439 if (!skb)
440 return false;
441 bmsg = buf_msg(skb);
442 bsz = msg_size(bmsg);
443 start = align(bsz);
444 pad = start - bsz;
445
446 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
447 return false;
448 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
449 return false;
450 if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
451 return false;
452 if (unlikely(skb_tailroom(skb) < (pad + msz)))
453 return false;
454 if (unlikely(max < (start + msz)))
455 return false;
456 if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
457 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
458 return false;
459
460 skb_put(skb, pad + msz);
461 skb_copy_to_linear_data_offset(skb, start, msg, msz);
462 msg_set_size(bmsg, start + msz);
463 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
464 return true;
465}
466
467/**
468 * tipc_msg_extract(): extract bundled inner packet from buffer
469 * @skb: buffer to be extracted from.
470 * @iskb: extracted inner buffer, to be returned
471 * @pos: position in outer message of msg to be extracted.
472 * Returns position of next msg
473 * Consumes outer buffer when last packet extracted
474 * Returns true when when there is an extracted buffer, otherwise false
475 */
476bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
477{
478 struct tipc_msg *hdr, *ihdr;
479 int imsz;
480
481 *iskb = NULL;
482 if (unlikely(skb_linearize(skb)))
483 goto none;
484
485 hdr = buf_msg(skb);
486 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
487 goto none;
488
489 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
490 imsz = msg_size(ihdr);
491
492 if ((*pos + imsz) > msg_data_sz(hdr))
493 goto none;
494
495 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
496 if (!*iskb)
497 goto none;
498
499 skb_copy_to_linear_data(*iskb, ihdr, imsz);
500 if (unlikely(!tipc_msg_validate(iskb)))
501 goto none;
502
503 *pos += align(imsz);
504 return true;
505none:
506 kfree_skb(skb);
507 kfree_skb(*iskb);
508 *iskb = NULL;
509 return false;
510}
511
512/**
513 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
514 * @list: the buffer chain, where head is the buffer to replace/append
515 * @skb: buffer to be created, appended to and returned in case of success
516 * @msg: message to be appended
517 * @mtu: max allowable size for the bundle buffer, inclusive header
518 * @dnode: destination node for message. (Not always present in header)
519 * Returns true if success, otherwise false
520 */
521bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
522 u32 mtu, u32 dnode)
523{
524 struct sk_buff *_skb;
525 struct tipc_msg *bmsg;
526 u32 msz = msg_size(msg);
527 u32 max = mtu - INT_H_SIZE;
528
529 if (msg_user(msg) == MSG_FRAGMENTER)
530 return false;
531 if (msg_user(msg) == TUNNEL_PROTOCOL)
532 return false;
533 if (msg_user(msg) == BCAST_PROTOCOL)
534 return false;
535 if (msz > (max / 2))
536 return false;
537
538 _skb = tipc_buf_acquire(max, GFP_ATOMIC);
539 if (!_skb)
540 return false;
541
542 skb_trim(_skb, INT_H_SIZE);
543 bmsg = buf_msg(_skb);
544 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
545 INT_H_SIZE, dnode);
546 msg_set_importance(bmsg, msg_importance(msg));
547 msg_set_seqno(bmsg, msg_seqno(msg));
548 msg_set_ack(bmsg, msg_ack(msg));
549 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
550 tipc_msg_bundle(_skb, msg, mtu);
551 *skb = _skb;
552 return true;
553}
554
555/**
556 * tipc_msg_reverse(): swap source and destination addresses and add error code
557 * @own_node: originating node id for reversed message
558 * @skb: buffer containing message to be reversed; will be consumed
559 * @err: error code to be set in message, if any
560 * Replaces consumed buffer with new one when successful
561 * Returns true if success, otherwise false
562 */
563bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
564{
565 struct sk_buff *_skb = *skb;
566 struct tipc_msg *_hdr, *hdr;
567 int hlen, dlen;
568
569 if (skb_linearize(_skb))
570 goto exit;
571 _hdr = buf_msg(_skb);
572 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
573 hlen = msg_hdr_sz(_hdr);
574
575 if (msg_dest_droppable(_hdr))
576 goto exit;
577 if (msg_errcode(_hdr))
578 goto exit;
579
580 /* Never return SHORT header */
581 if (hlen == SHORT_H_SIZE)
582 hlen = BASIC_H_SIZE;
583
584 /* Don't return data along with SYN+, - sender has a clone */
585 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
586 dlen = 0;
587
588 /* Allocate new buffer to return */
589 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
590 if (!*skb)
591 goto exit;
592 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
593 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
594
595 /* Build reverse header in new buffer */
596 hdr = buf_msg(*skb);
597 msg_set_hdr_sz(hdr, hlen);
598 msg_set_errcode(hdr, err);
599 msg_set_non_seq(hdr, 0);
600 msg_set_origport(hdr, msg_destport(_hdr));
601 msg_set_destport(hdr, msg_origport(_hdr));
602 msg_set_destnode(hdr, msg_prevnode(_hdr));
603 msg_set_prevnode(hdr, own_node);
604 msg_set_orignode(hdr, own_node);
605 msg_set_size(hdr, hlen + dlen);
606 skb_orphan(_skb);
607 kfree_skb(_skb);
608 return true;
609exit:
610 kfree_skb(_skb);
611 *skb = NULL;
612 return false;
613}
614
615bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
616{
617 struct sk_buff *skb, *_skb;
618
619 skb_queue_walk(msg, skb) {
620 _skb = skb_clone(skb, GFP_ATOMIC);
621 if (!_skb) {
622 __skb_queue_purge(cpy);
623 pr_err_ratelimited("Failed to clone buffer chain\n");
624 return false;
625 }
626 __skb_queue_tail(cpy, _skb);
627 }
628 return true;
629}
630
631/**
632 * tipc_msg_lookup_dest(): try to find new destination for named message
633 * @skb: the buffer containing the message.
634 * @err: error code to be used by caller if lookup fails
635 * Does not consume buffer
636 * Returns true if a destination is found, false otherwise
637 */
638bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
639{
640 struct tipc_msg *msg = buf_msg(skb);
641 u32 dport, dnode;
642 u32 onode = tipc_own_addr(net);
643
644 if (!msg_isdata(msg))
645 return false;
646 if (!msg_named(msg))
647 return false;
648 if (msg_errcode(msg))
649 return false;
650 *err = TIPC_ERR_NO_NAME;
651 if (skb_linearize(skb))
652 return false;
653 msg = buf_msg(skb);
654 if (msg_reroute_cnt(msg))
655 return false;
656 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
657 dport = tipc_nametbl_translate(net, msg_nametype(msg),
658 msg_nameinst(msg), &dnode);
659 if (!dport)
660 return false;
661 msg_incr_reroute_cnt(msg);
662 if (dnode != onode)
663 msg_set_prevnode(msg, onode);
664 msg_set_destnode(msg, dnode);
665 msg_set_destport(msg, dport);
666 *err = TIPC_OK;
667
668 if (!skb_cloned(skb))
669 return true;
670
671 return true;
672}
673
674/* tipc_msg_assemble() - assemble chain of fragments into one message
675 */
676bool tipc_msg_assemble(struct sk_buff_head *list)
677{
678 struct sk_buff *skb, *tmp = NULL;
679
680 if (skb_queue_len(list) == 1)
681 return true;
682
683 while ((skb = __skb_dequeue(list))) {
684 skb->next = NULL;
685 if (tipc_buf_append(&tmp, &skb)) {
686 __skb_queue_tail(list, skb);
687 return true;
688 }
689 if (!tmp)
690 break;
691 }
692 __skb_queue_purge(list);
693 __skb_queue_head_init(list);
694 pr_warn("Failed do assemble buffer\n");
695 return false;
696}
697
698/* tipc_msg_reassemble() - clone a buffer chain of fragments and
699 * reassemble the clones into one message
700 */
701bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
702{
703 struct sk_buff *skb, *_skb;
704 struct sk_buff *frag = NULL;
705 struct sk_buff *head = NULL;
706 int hdr_len;
707
708 /* Copy header if single buffer */
709 if (skb_queue_len(list) == 1) {
710 skb = skb_peek(list);
711 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
712 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
713 if (!_skb)
714 return false;
715 __skb_queue_tail(rcvq, _skb);
716 return true;
717 }
718
719 /* Clone all fragments and reassemble */
720 skb_queue_walk(list, skb) {
721 frag = skb_clone(skb, GFP_ATOMIC);
722 if (!frag)
723 goto error;
724 frag->next = NULL;
725 if (tipc_buf_append(&head, &frag))
726 break;
727 if (!head)
728 goto error;
729 }
730 __skb_queue_tail(rcvq, frag);
731 return true;
732error:
733 pr_warn("Failed do clone local mcast rcv buffer\n");
734 kfree_skb(head);
735 return false;
736}
737
738bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
739 struct sk_buff_head *cpy)
740{
741 struct sk_buff *skb, *_skb;
742
743 skb_queue_walk(msg, skb) {
744 _skb = pskb_copy(skb, GFP_ATOMIC);
745 if (!_skb) {
746 __skb_queue_purge(cpy);
747 return false;
748 }
749 msg_set_destnode(buf_msg(_skb), dst);
750 __skb_queue_tail(cpy, _skb);
751 }
752 return true;
753}
754
755/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
756 * @list: list to be appended to
757 * @seqno: sequence number of buffer to add
758 * @skb: buffer to add
759 */
760void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
761 struct sk_buff *skb)
762{
763 struct sk_buff *_skb, *tmp;
764
765 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
766 __skb_queue_head(list, skb);
767 return;
768 }
769
770 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
771 __skb_queue_tail(list, skb);
772 return;
773 }
774
775 skb_queue_walk_safe(list, _skb, tmp) {
776 if (more(seqno, buf_seqno(_skb)))
777 continue;
778 if (seqno == buf_seqno(_skb))
779 break;
780 __skb_queue_before(list, _skb, skb);
781 return;
782 }
783 kfree_skb(skb);
784}
785
786void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
787 struct sk_buff_head *xmitq)
788{
789 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
790 __skb_queue_tail(xmitq, skb);
791}
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42#include "crypto.h"
43
44#define MAX_FORWARD_SIZE 1024
45#ifdef CONFIG_TIPC_CRYPTO
46#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
48#else
49#define BUF_HEADROOM (LL_MAX_HEADER + 48)
50#define BUF_TAILROOM 16
51#endif
52
53static unsigned int align(unsigned int i)
54{
55 return (i + 3) & ~3u;
56}
57
58/**
59 * tipc_buf_acquire - creates a TIPC message buffer
60 * @size: message size (including TIPC header)
61 *
62 * Returns a new buffer with data pointers set to the specified size.
63 *
64 * NOTE: Headroom is reserved to allow prepending of a data link header.
65 * There may also be unrequested tailroom present at the buffer's end.
66 */
67struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
68{
69 struct sk_buff *skb;
70#ifdef CONFIG_TIPC_CRYPTO
71 unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
72#else
73 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
74#endif
75
76 skb = alloc_skb_fclone(buf_size, gfp);
77 if (skb) {
78 skb_reserve(skb, BUF_HEADROOM);
79 skb_put(skb, size);
80 skb->next = NULL;
81 }
82 return skb;
83}
84
85void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
86 u32 hsize, u32 dnode)
87{
88 memset(m, 0, hsize);
89 msg_set_version(m);
90 msg_set_user(m, user);
91 msg_set_hdr_sz(m, hsize);
92 msg_set_size(m, hsize);
93 msg_set_prevnode(m, own_node);
94 msg_set_type(m, type);
95 if (hsize > SHORT_H_SIZE) {
96 msg_set_orignode(m, own_node);
97 msg_set_destnode(m, dnode);
98 }
99}
100
101struct sk_buff *tipc_msg_create(uint user, uint type,
102 uint hdr_sz, uint data_sz, u32 dnode,
103 u32 onode, u32 dport, u32 oport, int errcode)
104{
105 struct tipc_msg *msg;
106 struct sk_buff *buf;
107
108 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
109 if (unlikely(!buf))
110 return NULL;
111
112 msg = buf_msg(buf);
113 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
114 msg_set_size(msg, hdr_sz + data_sz);
115 msg_set_origport(msg, oport);
116 msg_set_destport(msg, dport);
117 msg_set_errcode(msg, errcode);
118 if (hdr_sz > SHORT_H_SIZE) {
119 msg_set_orignode(msg, onode);
120 msg_set_destnode(msg, dnode);
121 }
122 return buf;
123}
124
125/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
126 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
127 * out: set when successful non-complete reassembly, otherwise NULL
128 * @*buf: in: the buffer to append. Always defined
129 * out: head buf after successful complete reassembly, otherwise NULL
130 * Returns 1 when reassembly complete, otherwise 0
131 */
132int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
133{
134 struct sk_buff *head = *headbuf;
135 struct sk_buff *frag = *buf;
136 struct sk_buff *tail = NULL;
137 struct tipc_msg *msg;
138 u32 fragid;
139 int delta;
140 bool headstolen;
141
142 if (!frag)
143 goto err;
144
145 msg = buf_msg(frag);
146 fragid = msg_type(msg);
147 frag->next = NULL;
148 skb_pull(frag, msg_hdr_sz(msg));
149
150 if (fragid == FIRST_FRAGMENT) {
151 if (unlikely(head))
152 goto err;
153 frag = skb_unshare(frag, GFP_ATOMIC);
154 if (unlikely(!frag))
155 goto err;
156 head = *headbuf = frag;
157 *buf = NULL;
158 TIPC_SKB_CB(head)->tail = NULL;
159 if (skb_is_nonlinear(head)) {
160 skb_walk_frags(head, tail) {
161 TIPC_SKB_CB(head)->tail = tail;
162 }
163 } else {
164 skb_frag_list_init(head);
165 }
166 return 0;
167 }
168
169 if (!head)
170 goto err;
171
172 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
173 kfree_skb_partial(frag, headstolen);
174 } else {
175 tail = TIPC_SKB_CB(head)->tail;
176 if (!skb_has_frag_list(head))
177 skb_shinfo(head)->frag_list = frag;
178 else
179 tail->next = frag;
180 head->truesize += frag->truesize;
181 head->data_len += frag->len;
182 head->len += frag->len;
183 TIPC_SKB_CB(head)->tail = frag;
184 }
185
186 if (fragid == LAST_FRAGMENT) {
187 TIPC_SKB_CB(head)->validated = 0;
188 if (unlikely(!tipc_msg_validate(&head)))
189 goto err;
190 *buf = head;
191 TIPC_SKB_CB(head)->tail = NULL;
192 *headbuf = NULL;
193 return 1;
194 }
195 *buf = NULL;
196 return 0;
197err:
198 kfree_skb(*buf);
199 kfree_skb(*headbuf);
200 *buf = *headbuf = NULL;
201 return 0;
202}
203
204/**
205 * tipc_msg_append(): Append data to tail of an existing buffer queue
206 * @_hdr: header to be used
207 * @m: the data to be appended
208 * @mss: max allowable size of buffer
209 * @dlen: size of data to be appended
210 * @txq: queue to appand to
211 * Returns the number og 1k blocks appended or errno value
212 */
213int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
214 int mss, struct sk_buff_head *txq)
215{
216 struct sk_buff *skb;
217 int accounted, total, curr;
218 int mlen, cpy, rem = dlen;
219 struct tipc_msg *hdr;
220
221 skb = skb_peek_tail(txq);
222 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
223 total = accounted;
224
225 do {
226 if (!skb || skb->len >= mss) {
227 skb = tipc_buf_acquire(mss, GFP_KERNEL);
228 if (unlikely(!skb))
229 return -ENOMEM;
230 skb_orphan(skb);
231 skb_trim(skb, MIN_H_SIZE);
232 hdr = buf_msg(skb);
233 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
234 msg_set_hdr_sz(hdr, MIN_H_SIZE);
235 msg_set_size(hdr, MIN_H_SIZE);
236 __skb_queue_tail(txq, skb);
237 total += 1;
238 }
239 hdr = buf_msg(skb);
240 curr = msg_blocks(hdr);
241 mlen = msg_size(hdr);
242 cpy = min_t(size_t, rem, mss - mlen);
243 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
244 return -EFAULT;
245 msg_set_size(hdr, mlen + cpy);
246 skb_put(skb, cpy);
247 rem -= cpy;
248 total += msg_blocks(hdr) - curr;
249 } while (rem > 0);
250 return total - accounted;
251}
252
253/* tipc_msg_validate - validate basic format of received message
254 *
255 * This routine ensures a TIPC message has an acceptable header, and at least
256 * as much data as the header indicates it should. The routine also ensures
257 * that the entire message header is stored in the main fragment of the message
258 * buffer, to simplify future access to message header fields.
259 *
260 * Note: Having extra info present in the message header or data areas is OK.
261 * TIPC will ignore the excess, under the assumption that it is optional info
262 * introduced by a later release of the protocol.
263 */
264bool tipc_msg_validate(struct sk_buff **_skb)
265{
266 struct sk_buff *skb = *_skb;
267 struct tipc_msg *hdr;
268 int msz, hsz;
269
270 /* Ensure that flow control ratio condition is satisfied */
271 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
272 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
273 if (!skb)
274 return false;
275 kfree_skb(*_skb);
276 *_skb = skb;
277 }
278
279 if (unlikely(TIPC_SKB_CB(skb)->validated))
280 return true;
281
282 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
283 return false;
284
285 hsz = msg_hdr_sz(buf_msg(skb));
286 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
287 return false;
288 if (unlikely(!pskb_may_pull(skb, hsz)))
289 return false;
290
291 hdr = buf_msg(skb);
292 if (unlikely(msg_version(hdr) != TIPC_VERSION))
293 return false;
294
295 msz = msg_size(hdr);
296 if (unlikely(msz < hsz))
297 return false;
298 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
299 return false;
300 if (unlikely(skb->len < msz))
301 return false;
302
303 TIPC_SKB_CB(skb)->validated = 1;
304 return true;
305}
306
307/**
308 * tipc_msg_fragment - build a fragment skb list for TIPC message
309 *
310 * @skb: TIPC message skb
311 * @hdr: internal msg header to be put on the top of the fragments
312 * @pktmax: max size of a fragment incl. the header
313 * @frags: returned fragment skb list
314 *
315 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
316 * or -ENOMEM
317 */
318int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
319 int pktmax, struct sk_buff_head *frags)
320{
321 int pktno, nof_fragms, dsz, dmax, eat;
322 struct tipc_msg *_hdr;
323 struct sk_buff *_skb;
324 u8 *data;
325
326 /* Non-linear buffer? */
327 if (skb_linearize(skb))
328 return -ENOMEM;
329
330 data = (u8 *)skb->data;
331 dsz = msg_size(buf_msg(skb));
332 dmax = pktmax - INT_H_SIZE;
333 if (dsz <= dmax || !dmax)
334 return -EINVAL;
335
336 nof_fragms = dsz / dmax + 1;
337 for (pktno = 1; pktno <= nof_fragms; pktno++) {
338 if (pktno < nof_fragms)
339 eat = dmax;
340 else
341 eat = dsz % dmax;
342 /* Allocate a new fragment */
343 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
344 if (!_skb)
345 goto error;
346 skb_orphan(_skb);
347 __skb_queue_tail(frags, _skb);
348 /* Copy header & data to the fragment */
349 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
350 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
351 data += eat;
352 /* Update the fragment's header */
353 _hdr = buf_msg(_skb);
354 msg_set_fragm_no(_hdr, pktno);
355 msg_set_nof_fragms(_hdr, nof_fragms);
356 msg_set_size(_hdr, INT_H_SIZE + eat);
357 }
358 return 0;
359
360error:
361 __skb_queue_purge(frags);
362 __skb_queue_head_init(frags);
363 return -ENOMEM;
364}
365
366/**
367 * tipc_msg_build - create buffer chain containing specified header and data
368 * @mhdr: Message header, to be prepended to data
369 * @m: User message
370 * @dsz: Total length of user data
371 * @pktmax: Max packet size that can be used
372 * @list: Buffer or chain of buffers to be returned to caller
373 *
374 * Note that the recursive call we are making here is safe, since it can
375 * logically go only one further level down.
376 *
377 * Returns message data size or errno: -ENOMEM, -EFAULT
378 */
379int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
380 int dsz, int pktmax, struct sk_buff_head *list)
381{
382 int mhsz = msg_hdr_sz(mhdr);
383 struct tipc_msg pkthdr;
384 int msz = mhsz + dsz;
385 int pktrem = pktmax;
386 struct sk_buff *skb;
387 int drem = dsz;
388 int pktno = 1;
389 char *pktpos;
390 int pktsz;
391 int rc;
392
393 msg_set_size(mhdr, msz);
394
395 /* No fragmentation needed? */
396 if (likely(msz <= pktmax)) {
397 skb = tipc_buf_acquire(msz, GFP_KERNEL);
398
399 /* Fall back to smaller MTU if node local message */
400 if (unlikely(!skb)) {
401 if (pktmax != MAX_MSG_SIZE)
402 return -ENOMEM;
403 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
404 if (rc != dsz)
405 return rc;
406 if (tipc_msg_assemble(list))
407 return dsz;
408 return -ENOMEM;
409 }
410 skb_orphan(skb);
411 __skb_queue_tail(list, skb);
412 skb_copy_to_linear_data(skb, mhdr, mhsz);
413 pktpos = skb->data + mhsz;
414 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
415 return dsz;
416 rc = -EFAULT;
417 goto error;
418 }
419
420 /* Prepare reusable fragment header */
421 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
422 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
423 msg_set_size(&pkthdr, pktmax);
424 msg_set_fragm_no(&pkthdr, pktno);
425 msg_set_importance(&pkthdr, msg_importance(mhdr));
426
427 /* Prepare first fragment */
428 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
429 if (!skb)
430 return -ENOMEM;
431 skb_orphan(skb);
432 __skb_queue_tail(list, skb);
433 pktpos = skb->data;
434 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
435 pktpos += INT_H_SIZE;
436 pktrem -= INT_H_SIZE;
437 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
438 pktpos += mhsz;
439 pktrem -= mhsz;
440
441 do {
442 if (drem < pktrem)
443 pktrem = drem;
444
445 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
446 rc = -EFAULT;
447 goto error;
448 }
449 drem -= pktrem;
450
451 if (!drem)
452 break;
453
454 /* Prepare new fragment: */
455 if (drem < (pktmax - INT_H_SIZE))
456 pktsz = drem + INT_H_SIZE;
457 else
458 pktsz = pktmax;
459 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
460 if (!skb) {
461 rc = -ENOMEM;
462 goto error;
463 }
464 skb_orphan(skb);
465 __skb_queue_tail(list, skb);
466 msg_set_type(&pkthdr, FRAGMENT);
467 msg_set_size(&pkthdr, pktsz);
468 msg_set_fragm_no(&pkthdr, ++pktno);
469 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
470 pktpos = skb->data + INT_H_SIZE;
471 pktrem = pktsz - INT_H_SIZE;
472
473 } while (1);
474 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
475 return dsz;
476error:
477 __skb_queue_purge(list);
478 __skb_queue_head_init(list);
479 return rc;
480}
481
482/**
483 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
484 * @bskb: the bundle buffer to append to
485 * @msg: message to be appended
486 * @max: max allowable size for the bundle buffer
487 *
488 * Returns "true" if bundling has been performed, otherwise "false"
489 */
490static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
491 u32 max)
492{
493 struct tipc_msg *bmsg = buf_msg(bskb);
494 u32 msz, bsz, offset, pad;
495
496 msz = msg_size(msg);
497 bsz = msg_size(bmsg);
498 offset = align(bsz);
499 pad = offset - bsz;
500
501 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
502 return false;
503 if (unlikely(max < (offset + msz)))
504 return false;
505
506 skb_put(bskb, pad + msz);
507 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
508 msg_set_size(bmsg, offset + msz);
509 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
510 return true;
511}
512
513/**
514 * tipc_msg_try_bundle - Try to bundle a new message to the last one
515 * @tskb: the last/target message to which the new one will be appended
516 * @skb: the new message skb pointer
517 * @mss: max message size (header inclusive)
518 * @dnode: destination node for the message
519 * @new_bundle: if this call made a new bundle or not
520 *
521 * Return: "true" if the new message skb is potential for bundling this time or
522 * later, in the case a bundling has been done this time, the skb is consumed
523 * (the skb pointer = NULL).
524 * Otherwise, "false" if the skb cannot be bundled at all.
525 */
526bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
527 u32 dnode, bool *new_bundle)
528{
529 struct tipc_msg *msg, *inner, *outer;
530 u32 tsz;
531
532 /* First, check if the new buffer is suitable for bundling */
533 msg = buf_msg(*skb);
534 if (msg_user(msg) == MSG_FRAGMENTER)
535 return false;
536 if (msg_user(msg) == TUNNEL_PROTOCOL)
537 return false;
538 if (msg_user(msg) == BCAST_PROTOCOL)
539 return false;
540 if (mss <= INT_H_SIZE + msg_size(msg))
541 return false;
542
543 /* Ok, but the last/target buffer can be empty? */
544 if (unlikely(!tskb))
545 return true;
546
547 /* Is it a bundle already? Try to bundle the new message to it */
548 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
549 *new_bundle = false;
550 goto bundle;
551 }
552
553 /* Make a new bundle of the two messages if possible */
554 tsz = msg_size(buf_msg(tskb));
555 if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
556 return true;
557 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
558 GFP_ATOMIC)))
559 return true;
560 inner = buf_msg(tskb);
561 skb_push(tskb, INT_H_SIZE);
562 outer = buf_msg(tskb);
563 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
564 dnode);
565 msg_set_importance(outer, msg_importance(inner));
566 msg_set_size(outer, INT_H_SIZE + tsz);
567 msg_set_msgcnt(outer, 1);
568 *new_bundle = true;
569
570bundle:
571 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
572 consume_skb(*skb);
573 *skb = NULL;
574 }
575 return true;
576}
577
578/**
579 * tipc_msg_extract(): extract bundled inner packet from buffer
580 * @skb: buffer to be extracted from.
581 * @iskb: extracted inner buffer, to be returned
582 * @pos: position in outer message of msg to be extracted.
583 * Returns position of next msg
584 * Consumes outer buffer when last packet extracted
585 * Returns true when when there is an extracted buffer, otherwise false
586 */
587bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
588{
589 struct tipc_msg *hdr, *ihdr;
590 int imsz;
591
592 *iskb = NULL;
593 if (unlikely(skb_linearize(skb)))
594 goto none;
595
596 hdr = buf_msg(skb);
597 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
598 goto none;
599
600 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
601 imsz = msg_size(ihdr);
602
603 if ((*pos + imsz) > msg_data_sz(hdr))
604 goto none;
605
606 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
607 if (!*iskb)
608 goto none;
609
610 skb_copy_to_linear_data(*iskb, ihdr, imsz);
611 if (unlikely(!tipc_msg_validate(iskb)))
612 goto none;
613
614 *pos += align(imsz);
615 return true;
616none:
617 kfree_skb(skb);
618 kfree_skb(*iskb);
619 *iskb = NULL;
620 return false;
621}
622
623/**
624 * tipc_msg_reverse(): swap source and destination addresses and add error code
625 * @own_node: originating node id for reversed message
626 * @skb: buffer containing message to be reversed; will be consumed
627 * @err: error code to be set in message, if any
628 * Replaces consumed buffer with new one when successful
629 * Returns true if success, otherwise false
630 */
631bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
632{
633 struct sk_buff *_skb = *skb;
634 struct tipc_msg *_hdr, *hdr;
635 int hlen, dlen;
636
637 if (skb_linearize(_skb))
638 goto exit;
639 _hdr = buf_msg(_skb);
640 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
641 hlen = msg_hdr_sz(_hdr);
642
643 if (msg_dest_droppable(_hdr))
644 goto exit;
645 if (msg_errcode(_hdr))
646 goto exit;
647
648 /* Never return SHORT header */
649 if (hlen == SHORT_H_SIZE)
650 hlen = BASIC_H_SIZE;
651
652 /* Don't return data along with SYN+, - sender has a clone */
653 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
654 dlen = 0;
655
656 /* Allocate new buffer to return */
657 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
658 if (!*skb)
659 goto exit;
660 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
661 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
662
663 /* Build reverse header in new buffer */
664 hdr = buf_msg(*skb);
665 msg_set_hdr_sz(hdr, hlen);
666 msg_set_errcode(hdr, err);
667 msg_set_non_seq(hdr, 0);
668 msg_set_origport(hdr, msg_destport(_hdr));
669 msg_set_destport(hdr, msg_origport(_hdr));
670 msg_set_destnode(hdr, msg_prevnode(_hdr));
671 msg_set_prevnode(hdr, own_node);
672 msg_set_orignode(hdr, own_node);
673 msg_set_size(hdr, hlen + dlen);
674 skb_orphan(_skb);
675 kfree_skb(_skb);
676 return true;
677exit:
678 kfree_skb(_skb);
679 *skb = NULL;
680 return false;
681}
682
683bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
684{
685 struct sk_buff *skb, *_skb;
686
687 skb_queue_walk(msg, skb) {
688 _skb = skb_clone(skb, GFP_ATOMIC);
689 if (!_skb) {
690 __skb_queue_purge(cpy);
691 pr_err_ratelimited("Failed to clone buffer chain\n");
692 return false;
693 }
694 __skb_queue_tail(cpy, _skb);
695 }
696 return true;
697}
698
699/**
700 * tipc_msg_lookup_dest(): try to find new destination for named message
701 * @skb: the buffer containing the message.
702 * @err: error code to be used by caller if lookup fails
703 * Does not consume buffer
704 * Returns true if a destination is found, false otherwise
705 */
706bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
707{
708 struct tipc_msg *msg = buf_msg(skb);
709 u32 dport, dnode;
710 u32 onode = tipc_own_addr(net);
711
712 if (!msg_isdata(msg))
713 return false;
714 if (!msg_named(msg))
715 return false;
716 if (msg_errcode(msg))
717 return false;
718 *err = TIPC_ERR_NO_NAME;
719 if (skb_linearize(skb))
720 return false;
721 msg = buf_msg(skb);
722 if (msg_reroute_cnt(msg))
723 return false;
724 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
725 dport = tipc_nametbl_translate(net, msg_nametype(msg),
726 msg_nameinst(msg), &dnode);
727 if (!dport)
728 return false;
729 msg_incr_reroute_cnt(msg);
730 if (dnode != onode)
731 msg_set_prevnode(msg, onode);
732 msg_set_destnode(msg, dnode);
733 msg_set_destport(msg, dport);
734 *err = TIPC_OK;
735
736 return true;
737}
738
739/* tipc_msg_assemble() - assemble chain of fragments into one message
740 */
741bool tipc_msg_assemble(struct sk_buff_head *list)
742{
743 struct sk_buff *skb, *tmp = NULL;
744
745 if (skb_queue_len(list) == 1)
746 return true;
747
748 while ((skb = __skb_dequeue(list))) {
749 skb->next = NULL;
750 if (tipc_buf_append(&tmp, &skb)) {
751 __skb_queue_tail(list, skb);
752 return true;
753 }
754 if (!tmp)
755 break;
756 }
757 __skb_queue_purge(list);
758 __skb_queue_head_init(list);
759 pr_warn("Failed do assemble buffer\n");
760 return false;
761}
762
763/* tipc_msg_reassemble() - clone a buffer chain of fragments and
764 * reassemble the clones into one message
765 */
766bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
767{
768 struct sk_buff *skb, *_skb;
769 struct sk_buff *frag = NULL;
770 struct sk_buff *head = NULL;
771 int hdr_len;
772
773 /* Copy header if single buffer */
774 if (skb_queue_len(list) == 1) {
775 skb = skb_peek(list);
776 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
777 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
778 if (!_skb)
779 return false;
780 __skb_queue_tail(rcvq, _skb);
781 return true;
782 }
783
784 /* Clone all fragments and reassemble */
785 skb_queue_walk(list, skb) {
786 frag = skb_clone(skb, GFP_ATOMIC);
787 if (!frag)
788 goto error;
789 frag->next = NULL;
790 if (tipc_buf_append(&head, &frag))
791 break;
792 if (!head)
793 goto error;
794 }
795 __skb_queue_tail(rcvq, frag);
796 return true;
797error:
798 pr_warn("Failed do clone local mcast rcv buffer\n");
799 kfree_skb(head);
800 return false;
801}
802
803bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
804 struct sk_buff_head *cpy)
805{
806 struct sk_buff *skb, *_skb;
807
808 skb_queue_walk(msg, skb) {
809 _skb = pskb_copy(skb, GFP_ATOMIC);
810 if (!_skb) {
811 __skb_queue_purge(cpy);
812 return false;
813 }
814 msg_set_destnode(buf_msg(_skb), dst);
815 __skb_queue_tail(cpy, _skb);
816 }
817 return true;
818}
819
820/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
821 * @list: list to be appended to
822 * @seqno: sequence number of buffer to add
823 * @skb: buffer to add
824 */
825bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
826 struct sk_buff *skb)
827{
828 struct sk_buff *_skb, *tmp;
829
830 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
831 __skb_queue_head(list, skb);
832 return true;
833 }
834
835 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
836 __skb_queue_tail(list, skb);
837 return true;
838 }
839
840 skb_queue_walk_safe(list, _skb, tmp) {
841 if (more(seqno, buf_seqno(_skb)))
842 continue;
843 if (seqno == buf_seqno(_skb))
844 break;
845 __skb_queue_before(list, _skb, skb);
846 return true;
847 }
848 kfree_skb(skb);
849 return false;
850}
851
852void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
853 struct sk_buff_head *xmitq)
854{
855 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
856 __skb_queue_tail(xmitq, skb);
857}