Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * net/tipc/msg.c: TIPC message header routines
  3 *
  4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions are met:
 10 *
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the names of the copyright holders nor the names of its
 17 *    contributors may be used to endorse or promote products derived from
 18 *    this software without specific prior written permission.
 19 *
 20 * Alternatively, this software may be distributed under the terms of the
 21 * GNU General Public License ("GPL") version 2 as published by the Free
 22 * Software Foundation.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34 * POSSIBILITY OF SUCH DAMAGE.
 35 */
 36
 37#include <net/sock.h>
 38#include "core.h"
 39#include "msg.h"
 40#include "addr.h"
 41#include "name_table.h"
 42#include "crypto.h"
 43
 44#define BUF_ALIGN(x) ALIGN(x, 4)
 45#define MAX_FORWARD_SIZE 1024
 46#ifdef CONFIG_TIPC_CRYPTO
 47#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
 48#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
 49#else
 50#define BUF_HEADROOM (LL_MAX_HEADER + 48)
 51#define BUF_OVERHEAD BUF_HEADROOM
 52#endif
 53
 54const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
 55			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 
 56
 57/**
 58 * tipc_buf_acquire - creates a TIPC message buffer
 59 * @size: message size (including TIPC header)
 60 * @gfp: memory allocation flags
 61 *
 62 * Return: a new buffer with data pointers set to the specified size.
 63 *
 64 * NOTE:
 65 * Headroom is reserved to allow prepending of a data link header.
 66 * There may also be unrequested tailroom present at the buffer's end.
 67 */
 68struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
 69{
 70	struct sk_buff *skb;
 
 
 
 
 
 71
 72	skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
 73	if (skb) {
 74		skb_reserve(skb, BUF_HEADROOM);
 75		skb_put(skb, size);
 76		skb->next = NULL;
 77	}
 78	return skb;
 79}
 80
 81void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
 82		   u32 hsize, u32 dnode)
 83{
 84	memset(m, 0, hsize);
 85	msg_set_version(m);
 86	msg_set_user(m, user);
 87	msg_set_hdr_sz(m, hsize);
 88	msg_set_size(m, hsize);
 89	msg_set_prevnode(m, own_node);
 90	msg_set_type(m, type);
 91	if (hsize > SHORT_H_SIZE) {
 92		msg_set_orignode(m, own_node);
 93		msg_set_destnode(m, dnode);
 94	}
 95}
 96
 97struct sk_buff *tipc_msg_create(uint user, uint type,
 98				uint hdr_sz, uint data_sz, u32 dnode,
 99				u32 onode, u32 dport, u32 oport, int errcode)
100{
101	struct tipc_msg *msg;
102	struct sk_buff *buf;
103
104	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
105	if (unlikely(!buf))
106		return NULL;
107
108	msg = buf_msg(buf);
109	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
110	msg_set_size(msg, hdr_sz + data_sz);
111	msg_set_origport(msg, oport);
112	msg_set_destport(msg, dport);
113	msg_set_errcode(msg, errcode);
 
 
 
 
114	return buf;
115}
116
117/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
118 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
119 *            out: set when successful non-complete reassembly, otherwise NULL
120 * @*buf:     in:  the buffer to append. Always defined
121 *            out: head buf after successful complete reassembly, otherwise NULL
122 * Returns 1 when reassembly complete, otherwise 0
123 */
124int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
125{
126	struct sk_buff *head = *headbuf;
127	struct sk_buff *frag = *buf;
128	struct sk_buff *tail = NULL;
129	struct tipc_msg *msg;
130	u32 fragid;
131	int delta;
132	bool headstolen;
133
134	if (!frag)
135		goto err;
136
137	msg = buf_msg(frag);
138	fragid = msg_type(msg);
139	frag->next = NULL;
140	skb_pull(frag, msg_hdr_sz(msg));
141
142	if (fragid == FIRST_FRAGMENT) {
143		if (unlikely(head))
144			goto err;
145		if (skb_has_frag_list(frag) && __skb_linearize(frag))
146			goto err;
147		*buf = NULL;
148		frag = skb_unshare(frag, GFP_ATOMIC);
149		if (unlikely(!frag))
150			goto err;
151		head = *headbuf = frag;
 
152		TIPC_SKB_CB(head)->tail = NULL;
 
 
 
 
 
 
 
153		return 0;
154	}
155
156	if (!head)
157		goto err;
158
159	/* Either the input skb ownership is transferred to headskb
160	 * or the input skb is freed, clear the reference to avoid
161	 * bad access on error path.
162	 */
163	*buf = NULL;
164	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
165		kfree_skb_partial(frag, headstolen);
166	} else {
167		tail = TIPC_SKB_CB(head)->tail;
168		if (!skb_has_frag_list(head))
169			skb_shinfo(head)->frag_list = frag;
170		else
171			tail->next = frag;
172		head->truesize += frag->truesize;
173		head->data_len += frag->len;
174		head->len += frag->len;
175		TIPC_SKB_CB(head)->tail = frag;
176	}
177
178	if (fragid == LAST_FRAGMENT) {
179		TIPC_SKB_CB(head)->validated = 0;
180		if (unlikely(!tipc_msg_validate(&head)))
181			goto err;
182		*buf = head;
183		TIPC_SKB_CB(head)->tail = NULL;
184		*headbuf = NULL;
185		return 1;
186	}
 
187	return 0;
188err:
189	kfree_skb(*buf);
190	kfree_skb(*headbuf);
191	*buf = *headbuf = NULL;
192	return 0;
193}
194
195/**
196 * tipc_msg_append(): Append data to tail of an existing buffer queue
197 * @_hdr: header to be used
198 * @m: the data to be appended
199 * @mss: max allowable size of buffer
200 * @dlen: size of data to be appended
201 * @txq: queue to append to
202 *
203 * Return: the number of 1k blocks appended or errno value
204 */
205int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
206		    int mss, struct sk_buff_head *txq)
207{
208	struct sk_buff *skb;
209	int accounted, total, curr;
210	int mlen, cpy, rem = dlen;
211	struct tipc_msg *hdr;
212
213	skb = skb_peek_tail(txq);
214	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
215	total = accounted;
216
217	do {
218		if (!skb || skb->len >= mss) {
219			skb = tipc_buf_acquire(mss, GFP_KERNEL);
220			if (unlikely(!skb))
221				return -ENOMEM;
222			skb_orphan(skb);
223			skb_trim(skb, MIN_H_SIZE);
224			hdr = buf_msg(skb);
225			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
226			msg_set_hdr_sz(hdr, MIN_H_SIZE);
227			msg_set_size(hdr, MIN_H_SIZE);
228			__skb_queue_tail(txq, skb);
229			total += 1;
230		}
231		hdr = buf_msg(skb);
232		curr = msg_blocks(hdr);
233		mlen = msg_size(hdr);
234		cpy = min_t(size_t, rem, mss - mlen);
235		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
236			return -EFAULT;
237		msg_set_size(hdr, mlen + cpy);
238		skb_put(skb, cpy);
239		rem -= cpy;
240		total += msg_blocks(hdr) - curr;
241	} while (rem > 0);
242	return total - accounted;
243}
244
245/* tipc_msg_validate - validate basic format of received message
246 *
247 * This routine ensures a TIPC message has an acceptable header, and at least
248 * as much data as the header indicates it should.  The routine also ensures
249 * that the entire message header is stored in the main fragment of the message
250 * buffer, to simplify future access to message header fields.
251 *
252 * Note: Having extra info present in the message header or data areas is OK.
253 * TIPC will ignore the excess, under the assumption that it is optional info
254 * introduced by a later release of the protocol.
255 */
256bool tipc_msg_validate(struct sk_buff **_skb)
257{
258	struct sk_buff *skb = *_skb;
259	struct tipc_msg *hdr;
260	int msz, hsz;
261
262	/* Ensure that flow control ratio condition is satisfied */
263	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
264		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
265		if (!skb)
266			return false;
267		kfree_skb(*_skb);
268		*_skb = skb;
269	}
270
271	if (unlikely(TIPC_SKB_CB(skb)->validated))
272		return true;
273
274	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
275		return false;
276
277	hsz = msg_hdr_sz(buf_msg(skb));
278	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
279		return false;
280	if (unlikely(!pskb_may_pull(skb, hsz)))
281		return false;
282
283	hdr = buf_msg(skb);
284	if (unlikely(msg_version(hdr) != TIPC_VERSION))
285		return false;
286
287	msz = msg_size(hdr);
288	if (unlikely(msz < hsz))
289		return false;
290	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
291		return false;
292	if (unlikely(skb->len < msz))
293		return false;
294
295	TIPC_SKB_CB(skb)->validated = 1;
296	return true;
297}
298
299/**
300 * tipc_msg_fragment - build a fragment skb list for TIPC message
301 *
302 * @skb: TIPC message skb
303 * @hdr: internal msg header to be put on the top of the fragments
304 * @pktmax: max size of a fragment incl. the header
305 * @frags: returned fragment skb list
306 *
307 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
308 * or -ENOMEM
309 */
310int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
311		      int pktmax, struct sk_buff_head *frags)
312{
313	int pktno, nof_fragms, dsz, dmax, eat;
314	struct tipc_msg *_hdr;
315	struct sk_buff *_skb;
316	u8 *data;
317
318	/* Non-linear buffer? */
319	if (skb_linearize(skb))
320		return -ENOMEM;
321
322	data = (u8 *)skb->data;
323	dsz = msg_size(buf_msg(skb));
324	dmax = pktmax - INT_H_SIZE;
325	if (dsz <= dmax || !dmax)
326		return -EINVAL;
327
328	nof_fragms = dsz / dmax + 1;
329	for (pktno = 1; pktno <= nof_fragms; pktno++) {
330		if (pktno < nof_fragms)
331			eat = dmax;
332		else
333			eat = dsz % dmax;
334		/* Allocate a new fragment */
335		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
336		if (!_skb)
337			goto error;
338		skb_orphan(_skb);
339		__skb_queue_tail(frags, _skb);
340		/* Copy header & data to the fragment */
341		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
342		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
343		data += eat;
344		/* Update the fragment's header */
345		_hdr = buf_msg(_skb);
346		msg_set_fragm_no(_hdr, pktno);
347		msg_set_nof_fragms(_hdr, nof_fragms);
348		msg_set_size(_hdr, INT_H_SIZE + eat);
349	}
350	return 0;
351
352error:
353	__skb_queue_purge(frags);
354	__skb_queue_head_init(frags);
355	return -ENOMEM;
356}
357
358/**
359 * tipc_msg_build - create buffer chain containing specified header and data
360 * @mhdr: Message header, to be prepended to data
361 * @m: User message
362 * @offset: buffer offset for fragmented messages (FIXME)
363 * @dsz: Total length of user data
364 * @pktmax: Max packet size that can be used
365 * @list: Buffer or chain of buffers to be returned to caller
366 *
367 * Note that the recursive call we are making here is safe, since it can
368 * logically go only one further level down.
369 *
370 * Return: message data size or errno: -ENOMEM, -EFAULT
371 */
372int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
373		   int dsz, int pktmax, struct sk_buff_head *list)
374{
375	int mhsz = msg_hdr_sz(mhdr);
376	struct tipc_msg pkthdr;
377	int msz = mhsz + dsz;
378	int pktrem = pktmax;
379	struct sk_buff *skb;
380	int drem = dsz;
381	int pktno = 1;
382	char *pktpos;
383	int pktsz;
384	int rc;
385
386	msg_set_size(mhdr, msz);
387
388	/* No fragmentation needed? */
389	if (likely(msz <= pktmax)) {
390		skb = tipc_buf_acquire(msz, GFP_KERNEL);
391
392		/* Fall back to smaller MTU if node local message */
393		if (unlikely(!skb)) {
394			if (pktmax != MAX_MSG_SIZE)
395				return -ENOMEM;
396			rc = tipc_msg_build(mhdr, m, offset, dsz,
397					    one_page_mtu, list);
398			if (rc != dsz)
399				return rc;
400			if (tipc_msg_assemble(list))
401				return dsz;
402			return -ENOMEM;
403		}
404		skb_orphan(skb);
405		__skb_queue_tail(list, skb);
406		skb_copy_to_linear_data(skb, mhdr, mhsz);
407		pktpos = skb->data + mhsz;
408		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
409			return dsz;
410		rc = -EFAULT;
411		goto error;
412	}
413
414	/* Prepare reusable fragment header */
415	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
416		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
417	msg_set_size(&pkthdr, pktmax);
418	msg_set_fragm_no(&pkthdr, pktno);
419	msg_set_importance(&pkthdr, msg_importance(mhdr));
420
421	/* Prepare first fragment */
422	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
423	if (!skb)
424		return -ENOMEM;
425	skb_orphan(skb);
426	__skb_queue_tail(list, skb);
427	pktpos = skb->data;
428	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
429	pktpos += INT_H_SIZE;
430	pktrem -= INT_H_SIZE;
431	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
432	pktpos += mhsz;
433	pktrem -= mhsz;
434
435	do {
436		if (drem < pktrem)
437			pktrem = drem;
438
439		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
440			rc = -EFAULT;
441			goto error;
442		}
443		drem -= pktrem;
444
445		if (!drem)
446			break;
447
448		/* Prepare new fragment: */
449		if (drem < (pktmax - INT_H_SIZE))
450			pktsz = drem + INT_H_SIZE;
451		else
452			pktsz = pktmax;
453		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
454		if (!skb) {
455			rc = -ENOMEM;
456			goto error;
457		}
458		skb_orphan(skb);
459		__skb_queue_tail(list, skb);
460		msg_set_type(&pkthdr, FRAGMENT);
461		msg_set_size(&pkthdr, pktsz);
462		msg_set_fragm_no(&pkthdr, ++pktno);
463		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
464		pktpos = skb->data + INT_H_SIZE;
465		pktrem = pktsz - INT_H_SIZE;
466
467	} while (1);
468	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
469	return dsz;
470error:
471	__skb_queue_purge(list);
472	__skb_queue_head_init(list);
473	return rc;
474}
475
476/**
477 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
478 * @bskb: the bundle buffer to append to
479 * @msg: message to be appended
480 * @max: max allowable size for the bundle buffer
481 *
482 * Return: "true" if bundling has been performed, otherwise "false"
483 */
484static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
485			    u32 max)
486{
487	struct tipc_msg *bmsg = buf_msg(bskb);
488	u32 msz, bsz, offset, pad;
489
490	msz = msg_size(msg);
491	bsz = msg_size(bmsg);
492	offset = BUF_ALIGN(bsz);
493	pad = offset - bsz;
494
495	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
496		return false;
497	if (unlikely(max < (offset + msz)))
498		return false;
499
500	skb_put(bskb, pad + msz);
501	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
502	msg_set_size(bmsg, offset + msz);
503	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
504	return true;
505}
506
507/**
508 * tipc_msg_try_bundle - Try to bundle a new message to the last one
509 * @tskb: the last/target message to which the new one will be appended
510 * @skb: the new message skb pointer
511 * @mss: max message size (header inclusive)
512 * @dnode: destination node for the message
513 * @new_bundle: if this call made a new bundle or not
514 *
515 * Return: "true" if the new message skb is potential for bundling this time or
516 * later, in the case a bundling has been done this time, the skb is consumed
517 * (the skb pointer = NULL).
518 * Otherwise, "false" if the skb cannot be bundled at all.
519 */
520bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
521			 u32 dnode, bool *new_bundle)
522{
523	struct tipc_msg *msg, *inner, *outer;
524	u32 tsz;
525
526	/* First, check if the new buffer is suitable for bundling */
527	msg = buf_msg(*skb);
528	if (msg_user(msg) == MSG_FRAGMENTER)
529		return false;
530	if (msg_user(msg) == TUNNEL_PROTOCOL)
531		return false;
532	if (msg_user(msg) == BCAST_PROTOCOL)
533		return false;
534	if (mss <= INT_H_SIZE + msg_size(msg))
535		return false;
536
537	/* Ok, but the last/target buffer can be empty? */
538	if (unlikely(!tskb))
539		return true;
540
541	/* Is it a bundle already? Try to bundle the new message to it */
542	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
543		*new_bundle = false;
544		goto bundle;
545	}
546
547	/* Make a new bundle of the two messages if possible */
548	tsz = msg_size(buf_msg(tskb));
549	if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
550		return true;
551	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
552				      GFP_ATOMIC)))
553		return true;
554	inner = buf_msg(tskb);
555	skb_push(tskb, INT_H_SIZE);
556	outer = buf_msg(tskb);
557	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
558		      dnode);
559	msg_set_importance(outer, msg_importance(inner));
560	msg_set_size(outer, INT_H_SIZE + tsz);
561	msg_set_msgcnt(outer, 1);
562	*new_bundle = true;
563
564bundle:
565	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
566		consume_skb(*skb);
567		*skb = NULL;
568	}
569	return true;
570}
571
572/**
573 *  tipc_msg_extract(): extract bundled inner packet from buffer
574 *  @skb: buffer to be extracted from.
575 *  @iskb: extracted inner buffer, to be returned
576 *  @pos: position in outer message of msg to be extracted.
577 *  Returns position of next msg.
578 *  Consumes outer buffer when last packet extracted
579 *  Return: true when there is an extracted buffer, otherwise false
580 */
581bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
582{
583	struct tipc_msg *hdr, *ihdr;
584	int imsz;
585
586	*iskb = NULL;
587	if (unlikely(skb_linearize(skb)))
588		goto none;
589
590	hdr = buf_msg(skb);
591	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
592		goto none;
593
594	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
595	imsz = msg_size(ihdr);
596
597	if ((*pos + imsz) > msg_data_sz(hdr))
598		goto none;
599
600	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
601	if (!*iskb)
602		goto none;
603
604	skb_copy_to_linear_data(*iskb, ihdr, imsz);
605	if (unlikely(!tipc_msg_validate(iskb)))
606		goto none;
607
608	*pos += BUF_ALIGN(imsz);
609	return true;
610none:
611	kfree_skb(skb);
612	kfree_skb(*iskb);
613	*iskb = NULL;
614	return false;
615}
616
617/**
618 * tipc_msg_reverse(): swap source and destination addresses and add error code
619 * @own_node: originating node id for reversed message
620 * @skb:  buffer containing message to be reversed; will be consumed
621 * @err:  error code to be set in message, if any
622 * Replaces consumed buffer with new one when successful
623 * Return: true if success, otherwise false
624 */
625bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
626{
627	struct sk_buff *_skb = *skb;
628	struct tipc_msg *_hdr, *hdr;
629	int hlen, dlen;
630
631	if (skb_linearize(_skb))
632		goto exit;
633	_hdr = buf_msg(_skb);
634	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
635	hlen = msg_hdr_sz(_hdr);
636
637	if (msg_dest_droppable(_hdr))
638		goto exit;
639	if (msg_errcode(_hdr))
640		goto exit;
641
642	/* Never return SHORT header */
643	if (hlen == SHORT_H_SIZE)
644		hlen = BASIC_H_SIZE;
645
646	/* Don't return data along with SYN+, - sender has a clone */
647	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
648		dlen = 0;
649
650	/* Allocate new buffer to return */
651	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
652	if (!*skb)
653		goto exit;
654	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
655	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
656
657	/* Build reverse header in new buffer */
658	hdr = buf_msg(*skb);
659	msg_set_hdr_sz(hdr, hlen);
660	msg_set_errcode(hdr, err);
661	msg_set_non_seq(hdr, 0);
662	msg_set_origport(hdr, msg_destport(_hdr));
663	msg_set_destport(hdr, msg_origport(_hdr));
664	msg_set_destnode(hdr, msg_prevnode(_hdr));
665	msg_set_prevnode(hdr, own_node);
666	msg_set_orignode(hdr, own_node);
667	msg_set_size(hdr, hlen + dlen);
668	skb_orphan(_skb);
669	kfree_skb(_skb);
670	return true;
671exit:
672	kfree_skb(_skb);
673	*skb = NULL;
674	return false;
675}
676
677bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
678{
679	struct sk_buff *skb, *_skb;
680
681	skb_queue_walk(msg, skb) {
682		_skb = skb_clone(skb, GFP_ATOMIC);
683		if (!_skb) {
684			__skb_queue_purge(cpy);
685			pr_err_ratelimited("Failed to clone buffer chain\n");
686			return false;
687		}
688		__skb_queue_tail(cpy, _skb);
689	}
690	return true;
691}
692
693/**
694 * tipc_msg_lookup_dest(): try to find new destination for named message
695 * @net: pointer to associated network namespace
696 * @skb: the buffer containing the message.
697 * @err: error code to be used by caller if lookup fails
698 * Does not consume buffer
699 * Return: true if a destination is found, false otherwise
700 */
701bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
702{
703	struct tipc_msg *msg = buf_msg(skb);
704	u32 scope = msg_lookup_scope(msg);
705	u32 self = tipc_own_addr(net);
706	u32 inst = msg_nameinst(msg);
707	struct tipc_socket_addr sk;
708	struct tipc_uaddr ua;
709
710	if (!msg_isdata(msg))
711		return false;
712	if (!msg_named(msg))
713		return false;
714	if (msg_errcode(msg))
715		return false;
716	*err = TIPC_ERR_NO_NAME;
717	if (skb_linearize(skb))
718		return false;
719	msg = buf_msg(skb);
720	if (msg_reroute_cnt(msg))
721		return false;
722	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
723		   msg_nametype(msg), inst, inst);
724	sk.node = tipc_scope2node(net, scope);
725	if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
726		return false;
727	msg_incr_reroute_cnt(msg);
728	if (sk.node != self)
729		msg_set_prevnode(msg, self);
730	msg_set_destnode(msg, sk.node);
731	msg_set_destport(msg, sk.ref);
732	*err = TIPC_OK;
733
734	return true;
735}
736
737/* tipc_msg_assemble() - assemble chain of fragments into one message
738 */
739bool tipc_msg_assemble(struct sk_buff_head *list)
740{
741	struct sk_buff *skb, *tmp = NULL;
742
743	if (skb_queue_len(list) == 1)
744		return true;
745
746	while ((skb = __skb_dequeue(list))) {
747		skb->next = NULL;
748		if (tipc_buf_append(&tmp, &skb)) {
749			__skb_queue_tail(list, skb);
750			return true;
751		}
752		if (!tmp)
753			break;
754	}
755	__skb_queue_purge(list);
756	__skb_queue_head_init(list);
757	pr_warn("Failed do assemble buffer\n");
758	return false;
759}
760
761/* tipc_msg_reassemble() - clone a buffer chain of fragments and
762 *                         reassemble the clones into one message
763 */
764bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
765{
766	struct sk_buff *skb, *_skb;
767	struct sk_buff *frag = NULL;
768	struct sk_buff *head = NULL;
769	int hdr_len;
770
771	/* Copy header if single buffer */
772	if (skb_queue_len(list) == 1) {
773		skb = skb_peek(list);
774		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
775		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
776		if (!_skb)
777			return false;
778		__skb_queue_tail(rcvq, _skb);
779		return true;
780	}
781
782	/* Clone all fragments and reassemble */
783	skb_queue_walk(list, skb) {
784		frag = skb_clone(skb, GFP_ATOMIC);
785		if (!frag)
786			goto error;
787		frag->next = NULL;
788		if (tipc_buf_append(&head, &frag))
789			break;
790		if (!head)
791			goto error;
792	}
793	__skb_queue_tail(rcvq, frag);
794	return true;
795error:
796	pr_warn("Failed do clone local mcast rcv buffer\n");
797	kfree_skb(head);
798	return false;
799}
800
801bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
802			struct sk_buff_head *cpy)
803{
804	struct sk_buff *skb, *_skb;
805
806	skb_queue_walk(msg, skb) {
807		_skb = pskb_copy(skb, GFP_ATOMIC);
808		if (!_skb) {
809			__skb_queue_purge(cpy);
810			return false;
811		}
812		msg_set_destnode(buf_msg(_skb), dst);
813		__skb_queue_tail(cpy, _skb);
814	}
815	return true;
816}
817
818/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
819 * @list: list to be appended to
820 * @seqno: sequence number of buffer to add
821 * @skb: buffer to add
822 */
823bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
824			     struct sk_buff *skb)
825{
826	struct sk_buff *_skb, *tmp;
827
828	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
829		__skb_queue_head(list, skb);
830		return true;
831	}
832
833	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
834		__skb_queue_tail(list, skb);
835		return true;
836	}
837
838	skb_queue_walk_safe(list, _skb, tmp) {
839		if (more(seqno, buf_seqno(_skb)))
840			continue;
841		if (seqno == buf_seqno(_skb))
842			break;
843		__skb_queue_before(list, _skb, skb);
844		return true;
845	}
846	kfree_skb(skb);
847	return false;
848}
849
850void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
851		     struct sk_buff_head *xmitq)
852{
853	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
854		__skb_queue_tail(xmitq, skb);
855}
v5.9
  1/*
  2 * net/tipc/msg.c: TIPC message header routines
  3 *
  4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions are met:
 10 *
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the names of the copyright holders nor the names of its
 17 *    contributors may be used to endorse or promote products derived from
 18 *    this software without specific prior written permission.
 19 *
 20 * Alternatively, this software may be distributed under the terms of the
 21 * GNU General Public License ("GPL") version 2 as published by the Free
 22 * Software Foundation.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34 * POSSIBILITY OF SUCH DAMAGE.
 35 */
 36
 37#include <net/sock.h>
 38#include "core.h"
 39#include "msg.h"
 40#include "addr.h"
 41#include "name_table.h"
 42#include "crypto.h"
 43
 
 44#define MAX_FORWARD_SIZE 1024
 45#ifdef CONFIG_TIPC_CRYPTO
 46#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
 47#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
 48#else
 49#define BUF_HEADROOM (LL_MAX_HEADER + 48)
 50#define BUF_TAILROOM 16
 51#endif
 52
 53static unsigned int align(unsigned int i)
 54{
 55	return (i + 3) & ~3u;
 56}
 57
 58/**
 59 * tipc_buf_acquire - creates a TIPC message buffer
 60 * @size: message size (including TIPC header)
 
 61 *
 62 * Returns a new buffer with data pointers set to the specified size.
 63 *
 64 * NOTE: Headroom is reserved to allow prepending of a data link header.
 65 *       There may also be unrequested tailroom present at the buffer's end.
 
 66 */
 67struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
 68{
 69	struct sk_buff *skb;
 70#ifdef CONFIG_TIPC_CRYPTO
 71	unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
 72#else
 73	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
 74#endif
 75
 76	skb = alloc_skb_fclone(buf_size, gfp);
 77	if (skb) {
 78		skb_reserve(skb, BUF_HEADROOM);
 79		skb_put(skb, size);
 80		skb->next = NULL;
 81	}
 82	return skb;
 83}
 84
 85void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
 86		   u32 hsize, u32 dnode)
 87{
 88	memset(m, 0, hsize);
 89	msg_set_version(m);
 90	msg_set_user(m, user);
 91	msg_set_hdr_sz(m, hsize);
 92	msg_set_size(m, hsize);
 93	msg_set_prevnode(m, own_node);
 94	msg_set_type(m, type);
 95	if (hsize > SHORT_H_SIZE) {
 96		msg_set_orignode(m, own_node);
 97		msg_set_destnode(m, dnode);
 98	}
 99}
100
101struct sk_buff *tipc_msg_create(uint user, uint type,
102				uint hdr_sz, uint data_sz, u32 dnode,
103				u32 onode, u32 dport, u32 oport, int errcode)
104{
105	struct tipc_msg *msg;
106	struct sk_buff *buf;
107
108	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
109	if (unlikely(!buf))
110		return NULL;
111
112	msg = buf_msg(buf);
113	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
114	msg_set_size(msg, hdr_sz + data_sz);
115	msg_set_origport(msg, oport);
116	msg_set_destport(msg, dport);
117	msg_set_errcode(msg, errcode);
118	if (hdr_sz > SHORT_H_SIZE) {
119		msg_set_orignode(msg, onode);
120		msg_set_destnode(msg, dnode);
121	}
122	return buf;
123}
124
125/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
126 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
127 *            out: set when successful non-complete reassembly, otherwise NULL
128 * @*buf:     in:  the buffer to append. Always defined
129 *            out: head buf after successful complete reassembly, otherwise NULL
130 * Returns 1 when reassembly complete, otherwise 0
131 */
132int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
133{
134	struct sk_buff *head = *headbuf;
135	struct sk_buff *frag = *buf;
136	struct sk_buff *tail = NULL;
137	struct tipc_msg *msg;
138	u32 fragid;
139	int delta;
140	bool headstolen;
141
142	if (!frag)
143		goto err;
144
145	msg = buf_msg(frag);
146	fragid = msg_type(msg);
147	frag->next = NULL;
148	skb_pull(frag, msg_hdr_sz(msg));
149
150	if (fragid == FIRST_FRAGMENT) {
151		if (unlikely(head))
152			goto err;
 
 
 
153		frag = skb_unshare(frag, GFP_ATOMIC);
154		if (unlikely(!frag))
155			goto err;
156		head = *headbuf = frag;
157		*buf = NULL;
158		TIPC_SKB_CB(head)->tail = NULL;
159		if (skb_is_nonlinear(head)) {
160			skb_walk_frags(head, tail) {
161				TIPC_SKB_CB(head)->tail = tail;
162			}
163		} else {
164			skb_frag_list_init(head);
165		}
166		return 0;
167	}
168
169	if (!head)
170		goto err;
171
 
 
 
 
 
172	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
173		kfree_skb_partial(frag, headstolen);
174	} else {
175		tail = TIPC_SKB_CB(head)->tail;
176		if (!skb_has_frag_list(head))
177			skb_shinfo(head)->frag_list = frag;
178		else
179			tail->next = frag;
180		head->truesize += frag->truesize;
181		head->data_len += frag->len;
182		head->len += frag->len;
183		TIPC_SKB_CB(head)->tail = frag;
184	}
185
186	if (fragid == LAST_FRAGMENT) {
187		TIPC_SKB_CB(head)->validated = 0;
188		if (unlikely(!tipc_msg_validate(&head)))
189			goto err;
190		*buf = head;
191		TIPC_SKB_CB(head)->tail = NULL;
192		*headbuf = NULL;
193		return 1;
194	}
195	*buf = NULL;
196	return 0;
197err:
198	kfree_skb(*buf);
199	kfree_skb(*headbuf);
200	*buf = *headbuf = NULL;
201	return 0;
202}
203
204/**
205 * tipc_msg_append(): Append data to tail of an existing buffer queue
206 * @_hdr: header to be used
207 * @m: the data to be appended
208 * @mss: max allowable size of buffer
209 * @dlen: size of data to be appended
210 * @txq: queue to appand to
211 * Returns the number og 1k blocks appended or errno value
 
212 */
213int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
214		    int mss, struct sk_buff_head *txq)
215{
216	struct sk_buff *skb;
217	int accounted, total, curr;
218	int mlen, cpy, rem = dlen;
219	struct tipc_msg *hdr;
220
221	skb = skb_peek_tail(txq);
222	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
223	total = accounted;
224
225	do {
226		if (!skb || skb->len >= mss) {
227			skb = tipc_buf_acquire(mss, GFP_KERNEL);
228			if (unlikely(!skb))
229				return -ENOMEM;
230			skb_orphan(skb);
231			skb_trim(skb, MIN_H_SIZE);
232			hdr = buf_msg(skb);
233			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
234			msg_set_hdr_sz(hdr, MIN_H_SIZE);
235			msg_set_size(hdr, MIN_H_SIZE);
236			__skb_queue_tail(txq, skb);
237			total += 1;
238		}
239		hdr = buf_msg(skb);
240		curr = msg_blocks(hdr);
241		mlen = msg_size(hdr);
242		cpy = min_t(size_t, rem, mss - mlen);
243		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
244			return -EFAULT;
245		msg_set_size(hdr, mlen + cpy);
246		skb_put(skb, cpy);
247		rem -= cpy;
248		total += msg_blocks(hdr) - curr;
249	} while (rem > 0);
250	return total - accounted;
251}
252
253/* tipc_msg_validate - validate basic format of received message
254 *
255 * This routine ensures a TIPC message has an acceptable header, and at least
256 * as much data as the header indicates it should.  The routine also ensures
257 * that the entire message header is stored in the main fragment of the message
258 * buffer, to simplify future access to message header fields.
259 *
260 * Note: Having extra info present in the message header or data areas is OK.
261 * TIPC will ignore the excess, under the assumption that it is optional info
262 * introduced by a later release of the protocol.
263 */
264bool tipc_msg_validate(struct sk_buff **_skb)
265{
266	struct sk_buff *skb = *_skb;
267	struct tipc_msg *hdr;
268	int msz, hsz;
269
270	/* Ensure that flow control ratio condition is satisfied */
271	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
272		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
273		if (!skb)
274			return false;
275		kfree_skb(*_skb);
276		*_skb = skb;
277	}
278
279	if (unlikely(TIPC_SKB_CB(skb)->validated))
280		return true;
281
282	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
283		return false;
284
285	hsz = msg_hdr_sz(buf_msg(skb));
286	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
287		return false;
288	if (unlikely(!pskb_may_pull(skb, hsz)))
289		return false;
290
291	hdr = buf_msg(skb);
292	if (unlikely(msg_version(hdr) != TIPC_VERSION))
293		return false;
294
295	msz = msg_size(hdr);
296	if (unlikely(msz < hsz))
297		return false;
298	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
299		return false;
300	if (unlikely(skb->len < msz))
301		return false;
302
303	TIPC_SKB_CB(skb)->validated = 1;
304	return true;
305}
306
307/**
308 * tipc_msg_fragment - build a fragment skb list for TIPC message
309 *
310 * @skb: TIPC message skb
311 * @hdr: internal msg header to be put on the top of the fragments
312 * @pktmax: max size of a fragment incl. the header
313 * @frags: returned fragment skb list
314 *
315 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
316 * or -ENOMEM
317 */
318int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
319		      int pktmax, struct sk_buff_head *frags)
320{
321	int pktno, nof_fragms, dsz, dmax, eat;
322	struct tipc_msg *_hdr;
323	struct sk_buff *_skb;
324	u8 *data;
325
326	/* Non-linear buffer? */
327	if (skb_linearize(skb))
328		return -ENOMEM;
329
330	data = (u8 *)skb->data;
331	dsz = msg_size(buf_msg(skb));
332	dmax = pktmax - INT_H_SIZE;
333	if (dsz <= dmax || !dmax)
334		return -EINVAL;
335
336	nof_fragms = dsz / dmax + 1;
337	for (pktno = 1; pktno <= nof_fragms; pktno++) {
338		if (pktno < nof_fragms)
339			eat = dmax;
340		else
341			eat = dsz % dmax;
342		/* Allocate a new fragment */
343		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
344		if (!_skb)
345			goto error;
346		skb_orphan(_skb);
347		__skb_queue_tail(frags, _skb);
348		/* Copy header & data to the fragment */
349		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
350		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
351		data += eat;
352		/* Update the fragment's header */
353		_hdr = buf_msg(_skb);
354		msg_set_fragm_no(_hdr, pktno);
355		msg_set_nof_fragms(_hdr, nof_fragms);
356		msg_set_size(_hdr, INT_H_SIZE + eat);
357	}
358	return 0;
359
360error:
361	__skb_queue_purge(frags);
362	__skb_queue_head_init(frags);
363	return -ENOMEM;
364}
365
366/**
367 * tipc_msg_build - create buffer chain containing specified header and data
368 * @mhdr: Message header, to be prepended to data
369 * @m: User message
 
370 * @dsz: Total length of user data
371 * @pktmax: Max packet size that can be used
372 * @list: Buffer or chain of buffers to be returned to caller
373 *
374 * Note that the recursive call we are making here is safe, since it can
375 * logically go only one further level down.
376 *
377 * Returns message data size or errno: -ENOMEM, -EFAULT
378 */
379int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
380		   int dsz, int pktmax, struct sk_buff_head *list)
381{
382	int mhsz = msg_hdr_sz(mhdr);
383	struct tipc_msg pkthdr;
384	int msz = mhsz + dsz;
385	int pktrem = pktmax;
386	struct sk_buff *skb;
387	int drem = dsz;
388	int pktno = 1;
389	char *pktpos;
390	int pktsz;
391	int rc;
392
393	msg_set_size(mhdr, msz);
394
395	/* No fragmentation needed? */
396	if (likely(msz <= pktmax)) {
397		skb = tipc_buf_acquire(msz, GFP_KERNEL);
398
399		/* Fall back to smaller MTU if node local message */
400		if (unlikely(!skb)) {
401			if (pktmax != MAX_MSG_SIZE)
402				return -ENOMEM;
403			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
 
404			if (rc != dsz)
405				return rc;
406			if (tipc_msg_assemble(list))
407				return dsz;
408			return -ENOMEM;
409		}
410		skb_orphan(skb);
411		__skb_queue_tail(list, skb);
412		skb_copy_to_linear_data(skb, mhdr, mhsz);
413		pktpos = skb->data + mhsz;
414		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
415			return dsz;
416		rc = -EFAULT;
417		goto error;
418	}
419
420	/* Prepare reusable fragment header */
421	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
422		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
423	msg_set_size(&pkthdr, pktmax);
424	msg_set_fragm_no(&pkthdr, pktno);
425	msg_set_importance(&pkthdr, msg_importance(mhdr));
426
427	/* Prepare first fragment */
428	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
429	if (!skb)
430		return -ENOMEM;
431	skb_orphan(skb);
432	__skb_queue_tail(list, skb);
433	pktpos = skb->data;
434	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
435	pktpos += INT_H_SIZE;
436	pktrem -= INT_H_SIZE;
437	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
438	pktpos += mhsz;
439	pktrem -= mhsz;
440
441	do {
442		if (drem < pktrem)
443			pktrem = drem;
444
445		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
446			rc = -EFAULT;
447			goto error;
448		}
449		drem -= pktrem;
450
451		if (!drem)
452			break;
453
454		/* Prepare new fragment: */
455		if (drem < (pktmax - INT_H_SIZE))
456			pktsz = drem + INT_H_SIZE;
457		else
458			pktsz = pktmax;
459		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
460		if (!skb) {
461			rc = -ENOMEM;
462			goto error;
463		}
464		skb_orphan(skb);
465		__skb_queue_tail(list, skb);
466		msg_set_type(&pkthdr, FRAGMENT);
467		msg_set_size(&pkthdr, pktsz);
468		msg_set_fragm_no(&pkthdr, ++pktno);
469		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
470		pktpos = skb->data + INT_H_SIZE;
471		pktrem = pktsz - INT_H_SIZE;
472
473	} while (1);
474	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
475	return dsz;
476error:
477	__skb_queue_purge(list);
478	__skb_queue_head_init(list);
479	return rc;
480}
481
482/**
483 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
484 * @bskb: the bundle buffer to append to
485 * @msg: message to be appended
486 * @max: max allowable size for the bundle buffer
487 *
488 * Returns "true" if bundling has been performed, otherwise "false"
489 */
490static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
491			    u32 max)
492{
493	struct tipc_msg *bmsg = buf_msg(bskb);
494	u32 msz, bsz, offset, pad;
495
496	msz = msg_size(msg);
497	bsz = msg_size(bmsg);
498	offset = align(bsz);
499	pad = offset - bsz;
500
501	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
502		return false;
503	if (unlikely(max < (offset + msz)))
504		return false;
505
506	skb_put(bskb, pad + msz);
507	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
508	msg_set_size(bmsg, offset + msz);
509	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
510	return true;
511}
512
513/**
514 * tipc_msg_try_bundle - Try to bundle a new message to the last one
515 * @tskb: the last/target message to which the new one will be appended
516 * @skb: the new message skb pointer
517 * @mss: max message size (header inclusive)
518 * @dnode: destination node for the message
519 * @new_bundle: if this call made a new bundle or not
520 *
521 * Return: "true" if the new message skb is potential for bundling this time or
522 * later, in the case a bundling has been done this time, the skb is consumed
523 * (the skb pointer = NULL).
524 * Otherwise, "false" if the skb cannot be bundled at all.
525 */
526bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
527			 u32 dnode, bool *new_bundle)
528{
529	struct tipc_msg *msg, *inner, *outer;
530	u32 tsz;
531
532	/* First, check if the new buffer is suitable for bundling */
533	msg = buf_msg(*skb);
534	if (msg_user(msg) == MSG_FRAGMENTER)
535		return false;
536	if (msg_user(msg) == TUNNEL_PROTOCOL)
537		return false;
538	if (msg_user(msg) == BCAST_PROTOCOL)
539		return false;
540	if (mss <= INT_H_SIZE + msg_size(msg))
541		return false;
542
543	/* Ok, but the last/target buffer can be empty? */
544	if (unlikely(!tskb))
545		return true;
546
547	/* Is it a bundle already? Try to bundle the new message to it */
548	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
549		*new_bundle = false;
550		goto bundle;
551	}
552
553	/* Make a new bundle of the two messages if possible */
554	tsz = msg_size(buf_msg(tskb));
555	if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
556		return true;
557	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
558				      GFP_ATOMIC)))
559		return true;
560	inner = buf_msg(tskb);
561	skb_push(tskb, INT_H_SIZE);
562	outer = buf_msg(tskb);
563	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
564		      dnode);
565	msg_set_importance(outer, msg_importance(inner));
566	msg_set_size(outer, INT_H_SIZE + tsz);
567	msg_set_msgcnt(outer, 1);
568	*new_bundle = true;
569
570bundle:
571	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
572		consume_skb(*skb);
573		*skb = NULL;
574	}
575	return true;
576}
577
578/**
579 *  tipc_msg_extract(): extract bundled inner packet from buffer
580 *  @skb: buffer to be extracted from.
581 *  @iskb: extracted inner buffer, to be returned
582 *  @pos: position in outer message of msg to be extracted.
583 *        Returns position of next msg
584 *  Consumes outer buffer when last packet extracted
585 *  Returns true when when there is an extracted buffer, otherwise false
586 */
587bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
588{
589	struct tipc_msg *hdr, *ihdr;
590	int imsz;
591
592	*iskb = NULL;
593	if (unlikely(skb_linearize(skb)))
594		goto none;
595
596	hdr = buf_msg(skb);
597	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
598		goto none;
599
600	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
601	imsz = msg_size(ihdr);
602
603	if ((*pos + imsz) > msg_data_sz(hdr))
604		goto none;
605
606	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
607	if (!*iskb)
608		goto none;
609
610	skb_copy_to_linear_data(*iskb, ihdr, imsz);
611	if (unlikely(!tipc_msg_validate(iskb)))
612		goto none;
613
614	*pos += align(imsz);
615	return true;
616none:
617	kfree_skb(skb);
618	kfree_skb(*iskb);
619	*iskb = NULL;
620	return false;
621}
622
623/**
624 * tipc_msg_reverse(): swap source and destination addresses and add error code
625 * @own_node: originating node id for reversed message
626 * @skb:  buffer containing message to be reversed; will be consumed
627 * @err:  error code to be set in message, if any
628 * Replaces consumed buffer with new one when successful
629 * Returns true if success, otherwise false
630 */
631bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
632{
633	struct sk_buff *_skb = *skb;
634	struct tipc_msg *_hdr, *hdr;
635	int hlen, dlen;
636
637	if (skb_linearize(_skb))
638		goto exit;
639	_hdr = buf_msg(_skb);
640	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
641	hlen = msg_hdr_sz(_hdr);
642
643	if (msg_dest_droppable(_hdr))
644		goto exit;
645	if (msg_errcode(_hdr))
646		goto exit;
647
648	/* Never return SHORT header */
649	if (hlen == SHORT_H_SIZE)
650		hlen = BASIC_H_SIZE;
651
652	/* Don't return data along with SYN+, - sender has a clone */
653	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
654		dlen = 0;
655
656	/* Allocate new buffer to return */
657	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
658	if (!*skb)
659		goto exit;
660	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
661	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
662
663	/* Build reverse header in new buffer */
664	hdr = buf_msg(*skb);
665	msg_set_hdr_sz(hdr, hlen);
666	msg_set_errcode(hdr, err);
667	msg_set_non_seq(hdr, 0);
668	msg_set_origport(hdr, msg_destport(_hdr));
669	msg_set_destport(hdr, msg_origport(_hdr));
670	msg_set_destnode(hdr, msg_prevnode(_hdr));
671	msg_set_prevnode(hdr, own_node);
672	msg_set_orignode(hdr, own_node);
673	msg_set_size(hdr, hlen + dlen);
674	skb_orphan(_skb);
675	kfree_skb(_skb);
676	return true;
677exit:
678	kfree_skb(_skb);
679	*skb = NULL;
680	return false;
681}
682
683bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
684{
685	struct sk_buff *skb, *_skb;
686
687	skb_queue_walk(msg, skb) {
688		_skb = skb_clone(skb, GFP_ATOMIC);
689		if (!_skb) {
690			__skb_queue_purge(cpy);
691			pr_err_ratelimited("Failed to clone buffer chain\n");
692			return false;
693		}
694		__skb_queue_tail(cpy, _skb);
695	}
696	return true;
697}
698
699/**
700 * tipc_msg_lookup_dest(): try to find new destination for named message
 
701 * @skb: the buffer containing the message.
702 * @err: error code to be used by caller if lookup fails
703 * Does not consume buffer
704 * Returns true if a destination is found, false otherwise
705 */
706bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
707{
708	struct tipc_msg *msg = buf_msg(skb);
709	u32 dport, dnode;
710	u32 onode = tipc_own_addr(net);
 
 
 
711
712	if (!msg_isdata(msg))
713		return false;
714	if (!msg_named(msg))
715		return false;
716	if (msg_errcode(msg))
717		return false;
718	*err = TIPC_ERR_NO_NAME;
719	if (skb_linearize(skb))
720		return false;
721	msg = buf_msg(skb);
722	if (msg_reroute_cnt(msg))
723		return false;
724	dnode = tipc_scope2node(net, msg_lookup_scope(msg));
725	dport = tipc_nametbl_translate(net, msg_nametype(msg),
726				       msg_nameinst(msg), &dnode);
727	if (!dport)
728		return false;
729	msg_incr_reroute_cnt(msg);
730	if (dnode != onode)
731		msg_set_prevnode(msg, onode);
732	msg_set_destnode(msg, dnode);
733	msg_set_destport(msg, dport);
734	*err = TIPC_OK;
735
736	return true;
737}
738
739/* tipc_msg_assemble() - assemble chain of fragments into one message
740 */
741bool tipc_msg_assemble(struct sk_buff_head *list)
742{
743	struct sk_buff *skb, *tmp = NULL;
744
745	if (skb_queue_len(list) == 1)
746		return true;
747
748	while ((skb = __skb_dequeue(list))) {
749		skb->next = NULL;
750		if (tipc_buf_append(&tmp, &skb)) {
751			__skb_queue_tail(list, skb);
752			return true;
753		}
754		if (!tmp)
755			break;
756	}
757	__skb_queue_purge(list);
758	__skb_queue_head_init(list);
759	pr_warn("Failed do assemble buffer\n");
760	return false;
761}
762
763/* tipc_msg_reassemble() - clone a buffer chain of fragments and
764 *                         reassemble the clones into one message
765 */
766bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
767{
768	struct sk_buff *skb, *_skb;
769	struct sk_buff *frag = NULL;
770	struct sk_buff *head = NULL;
771	int hdr_len;
772
773	/* Copy header if single buffer */
774	if (skb_queue_len(list) == 1) {
775		skb = skb_peek(list);
776		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
777		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
778		if (!_skb)
779			return false;
780		__skb_queue_tail(rcvq, _skb);
781		return true;
782	}
783
784	/* Clone all fragments and reassemble */
785	skb_queue_walk(list, skb) {
786		frag = skb_clone(skb, GFP_ATOMIC);
787		if (!frag)
788			goto error;
789		frag->next = NULL;
790		if (tipc_buf_append(&head, &frag))
791			break;
792		if (!head)
793			goto error;
794	}
795	__skb_queue_tail(rcvq, frag);
796	return true;
797error:
798	pr_warn("Failed do clone local mcast rcv buffer\n");
799	kfree_skb(head);
800	return false;
801}
802
803bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
804			struct sk_buff_head *cpy)
805{
806	struct sk_buff *skb, *_skb;
807
808	skb_queue_walk(msg, skb) {
809		_skb = pskb_copy(skb, GFP_ATOMIC);
810		if (!_skb) {
811			__skb_queue_purge(cpy);
812			return false;
813		}
814		msg_set_destnode(buf_msg(_skb), dst);
815		__skb_queue_tail(cpy, _skb);
816	}
817	return true;
818}
819
820/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
821 * @list: list to be appended to
822 * @seqno: sequence number of buffer to add
823 * @skb: buffer to add
824 */
825bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
826			     struct sk_buff *skb)
827{
828	struct sk_buff *_skb, *tmp;
829
830	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
831		__skb_queue_head(list, skb);
832		return true;
833	}
834
835	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
836		__skb_queue_tail(list, skb);
837		return true;
838	}
839
840	skb_queue_walk_safe(list, _skb, tmp) {
841		if (more(seqno, buf_seqno(_skb)))
842			continue;
843		if (seqno == buf_seqno(_skb))
844			break;
845		__skb_queue_before(list, _skb, skb);
846		return true;
847	}
848	kfree_skb(skb);
849	return false;
850}
851
852void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
853		     struct sk_buff_head *xmitq)
854{
855	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
856		__skb_queue_tail(xmitq, skb);
857}