Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v5.4
  1/*
  2 * net/tipc/bcast.c: TIPC broadcast code
  3 *
  4 * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  5 * Copyright (c) 2004, Intel Corporation.
  6 * Copyright (c) 2005, 2010-2011, Wind River Systems
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions are met:
 11 *
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the names of the copyright holders nor the names of its
 18 *    contributors may be used to endorse or promote products derived from
 19 *    this software without specific prior written permission.
 20 *
 21 * Alternatively, this software may be distributed under the terms of the
 22 * GNU General Public License ("GPL") version 2 as published by the Free
 23 * Software Foundation.
 24 *
 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35 * POSSIBILITY OF SUCH DAMAGE.
 36 */
 37
 38#include <linux/tipc_config.h>
 39#include "socket.h"
 40#include "msg.h"
 41#include "bcast.h"
 42#include "link.h"
 43#include "name_table.h"
 44
 45#define BCLINK_WIN_DEFAULT  50	/* bcast link window size (default) */
 46#define BCLINK_WIN_MIN      32	/* bcast minimum link window size */
 47
 48const char tipc_bclink_name[] = "broadcast-link";
 49
 50/**
 51 * struct tipc_bc_base - base structure for keeping broadcast send state
 52 * @link: broadcast send link structure
 53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages
 54 * @dests: array keeping number of reachable destinations per bearer
 55 * @primary_bearer: a bearer having links to all broadcast destinations, if any
 56 * @bcast_support: indicates if primary bearer, if any, supports broadcast
 57 * @force_bcast: forces broadcast for multicast traffic
 58 * @rcast_support: indicates if all peer nodes support replicast
 59 * @force_rcast: forces replicast for multicast traffic
 60 * @rc_ratio: dest count as percentage of cluster size where send method changes
 61 * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
 62 */
 63struct tipc_bc_base {
 64	struct tipc_link *link;
 65	struct sk_buff_head inputq;
 66	int dests[MAX_BEARERS];
 67	int primary_bearer;
 68	bool bcast_support;
 69	bool force_bcast;
 70	bool rcast_support;
 71	bool force_rcast;
 72	int rc_ratio;
 73	int bc_threshold;
 74};
 75
 76static struct tipc_bc_base *tipc_bc_base(struct net *net)
 77{
 78	return tipc_net(net)->bcbase;
 79}
 80
 81/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
 82 * Note: the MTU is decremented to give room for a tunnel header, in
 83 * case the message needs to be sent as replicast
 84 */
 85int tipc_bcast_get_mtu(struct net *net)
 86{
 87	return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
 88}
 89
 90void tipc_bcast_disable_rcast(struct net *net)
 91{
 92	tipc_bc_base(net)->rcast_support = false;
 93}
 94
 95static void tipc_bcbase_calc_bc_threshold(struct net *net)
 96{
 97	struct tipc_bc_base *bb = tipc_bc_base(net);
 98	int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
 99
100	bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
101}
102
103/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
104 *                               if any, and make it primary bearer
105 */
106static void tipc_bcbase_select_primary(struct net *net)
107{
108	struct tipc_bc_base *bb = tipc_bc_base(net);
109	int all_dests =  tipc_link_bc_peers(bb->link);
110	int i, mtu, prim;
111
112	bb->primary_bearer = INVALID_BEARER_ID;
113	bb->bcast_support = true;
114
115	if (!all_dests)
116		return;
117
118	for (i = 0; i < MAX_BEARERS; i++) {
119		if (!bb->dests[i])
120			continue;
121
122		mtu = tipc_bearer_mtu(net, i);
123		if (mtu < tipc_link_mtu(bb->link))
124			tipc_link_set_mtu(bb->link, mtu);
125		bb->bcast_support &= tipc_bearer_bcast_support(net, i);
126		if (bb->dests[i] < all_dests)
127			continue;
128
129		bb->primary_bearer = i;
130
131		/* Reduce risk that all nodes select same primary */
132		if ((i ^ tipc_own_addr(net)) & 1)
133			break;
134	}
135	prim = bb->primary_bearer;
136	if (prim != INVALID_BEARER_ID)
137		bb->bcast_support = tipc_bearer_bcast_support(net, prim);
138}
139
140void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
141{
142	struct tipc_bc_base *bb = tipc_bc_base(net);
143
144	tipc_bcast_lock(net);
145	bb->dests[bearer_id]++;
146	tipc_bcbase_select_primary(net);
147	tipc_bcast_unlock(net);
148}
149
150void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
151{
152	struct tipc_bc_base *bb = tipc_bc_base(net);
153
154	tipc_bcast_lock(net);
155	bb->dests[bearer_id]--;
156	tipc_bcbase_select_primary(net);
157	tipc_bcast_unlock(net);
158}
159
160/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
161 *
162 * Note that number of reachable destinations, as indicated in the dests[]
163 * array, may transitionally differ from the number of destinations indicated
164 * in each sent buffer. We can sustain this. Excess destination nodes will
165 * drop and never acknowledge the unexpected packets, and missing destinations
166 * will either require retransmission (if they are just about to be added to
167 * the bearer), or be removed from the buffer's 'ackers' counter (if they
168 * just went down)
169 */
170static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
171{
172	int bearer_id;
173	struct tipc_bc_base *bb = tipc_bc_base(net);
174	struct sk_buff *skb, *_skb;
175	struct sk_buff_head _xmitq;
176
177	if (skb_queue_empty(xmitq))
178		return;
179
180	/* The typical case: at least one bearer has links to all nodes */
181	bearer_id = bb->primary_bearer;
182	if (bearer_id >= 0) {
183		tipc_bearer_bc_xmit(net, bearer_id, xmitq);
184		return;
185	}
186
187	/* We have to transmit across all bearers */
188	__skb_queue_head_init(&_xmitq);
189	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
190		if (!bb->dests[bearer_id])
191			continue;
192
193		skb_queue_walk(xmitq, skb) {
194			_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
195			if (!_skb)
196				break;
197			__skb_queue_tail(&_xmitq, _skb);
198		}
199		tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
200	}
201	__skb_queue_purge(xmitq);
202	__skb_queue_purge(&_xmitq);
203}
204
205static void tipc_bcast_select_xmit_method(struct net *net, int dests,
206					  struct tipc_mc_method *method)
207{
208	struct tipc_bc_base *bb = tipc_bc_base(net);
209	unsigned long exp = method->expires;
210
211	/* Broadcast supported by used bearer/bearers? */
212	if (!bb->bcast_support) {
213		method->rcast = true;
214		return;
215	}
216	/* Any destinations which don't support replicast ? */
217	if (!bb->rcast_support) {
218		method->rcast = false;
219		return;
220	}
221	/* Can current method be changed ? */
222	method->expires = jiffies + TIPC_METHOD_EXPIRE;
223	if (method->mandatory)
224		return;
225
226	if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
227	    time_before(jiffies, exp))
228		return;
229
230	/* Configuration as force 'broadcast' method */
231	if (bb->force_bcast) {
232		method->rcast = false;
233		return;
234	}
235	/* Configuration as force 'replicast' method */
236	if (bb->force_rcast) {
237		method->rcast = true;
238		return;
239	}
240	/* Configuration as 'autoselect' or default method */
241	/* Determine method to use now */
242	method->rcast = dests <= bb->bc_threshold;
243}
244
245/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
246 * @net: the applicable net namespace
247 * @pkts: chain of buffers containing message
248 * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
249 * Consumes the buffer chain.
250 * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
251 */
252static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
253			   u16 *cong_link_cnt)
254{
255	struct tipc_link *l = tipc_bc_sndlink(net);
256	struct sk_buff_head xmitq;
257	int rc = 0;
258
259	__skb_queue_head_init(&xmitq);
260	tipc_bcast_lock(net);
261	if (tipc_link_bc_peers(l))
262		rc = tipc_link_xmit(l, pkts, &xmitq);
263	tipc_bcast_unlock(net);
264	tipc_bcbase_xmit(net, &xmitq);
265	__skb_queue_purge(pkts);
266	if (rc == -ELINKCONG) {
267		*cong_link_cnt = 1;
268		rc = 0;
269	}
270	return rc;
271}
272
273/* tipc_rcast_xmit - replicate and send a message to given destination nodes
274 * @net: the applicable net namespace
275 * @pkts: chain of buffers containing message
276 * @dests: list of destination nodes
277 * @cong_link_cnt: returns number of congested links
278 * @cong_links: returns identities of congested links
279 * Returns 0 if success, otherwise errno
280 */
281static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
282			   struct tipc_nlist *dests, u16 *cong_link_cnt)
283{
284	struct tipc_dest *dst, *tmp;
285	struct sk_buff_head _pkts;
286	u32 dnode, selector;
287
288	selector = msg_link_selector(buf_msg(skb_peek(pkts)));
289	__skb_queue_head_init(&_pkts);
290
291	list_for_each_entry_safe(dst, tmp, &dests->list, list) {
292		dnode = dst->node;
293		if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
294			return -ENOMEM;
295
296		/* Any other return value than -ELINKCONG is ignored */
297		if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
298			(*cong_link_cnt)++;
299	}
300	return 0;
301}
302
303/* tipc_mcast_send_sync - deliver a dummy message with SYN bit
304 * @net: the applicable net namespace
305 * @skb: socket buffer to copy
306 * @method: send method to be used
307 * @dests: destination nodes for message.
308 * @cong_link_cnt: returns number of encountered congested destination links
309 * Returns 0 if success, otherwise errno
310 */
311static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
312				struct tipc_mc_method *method,
313				struct tipc_nlist *dests,
314				u16 *cong_link_cnt)
315{
316	struct tipc_msg *hdr, *_hdr;
317	struct sk_buff_head tmpq;
318	struct sk_buff *_skb;
319
320	/* Is a cluster supporting with new capabilities ? */
321	if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
322		return 0;
323
324	hdr = buf_msg(skb);
325	if (msg_user(hdr) == MSG_FRAGMENTER)
326		hdr = msg_inner_hdr(hdr);
327	if (msg_type(hdr) != TIPC_MCAST_MSG)
328		return 0;
329
330	/* Allocate dummy message */
331	_skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
332	if (!_skb)
333		return -ENOMEM;
334
335	/* Preparing for 'synching' header */
336	msg_set_syn(hdr, 1);
337
338	/* Copy skb's header into a dummy header */
339	skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
340	skb_orphan(_skb);
341
342	/* Reverse method for dummy message */
343	_hdr = buf_msg(_skb);
344	msg_set_size(_hdr, MCAST_H_SIZE);
345	msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
346
347	__skb_queue_head_init(&tmpq);
348	__skb_queue_tail(&tmpq, _skb);
349	if (method->rcast)
350		tipc_bcast_xmit(net, &tmpq, cong_link_cnt);
351	else
352		tipc_rcast_xmit(net, &tmpq, dests, cong_link_cnt);
353
354	/* This queue should normally be empty by now */
355	__skb_queue_purge(&tmpq);
356
357	return 0;
358}
359
360/* tipc_mcast_xmit - deliver message to indicated destination nodes
361 *                   and to identified node local sockets
362 * @net: the applicable net namespace
363 * @pkts: chain of buffers containing message
364 * @method: send method to be used
365 * @dests: destination nodes for message.
366 * @cong_link_cnt: returns number of encountered congested destination links
367 * Consumes buffer chain.
368 * Returns 0 if success, otherwise errno
369 */
370int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
371		    struct tipc_mc_method *method, struct tipc_nlist *dests,
372		    u16 *cong_link_cnt)
373{
374	struct sk_buff_head inputq, localq;
375	bool rcast = method->rcast;
376	struct tipc_msg *hdr;
377	struct sk_buff *skb;
378	int rc = 0;
379
380	skb_queue_head_init(&inputq);
381	__skb_queue_head_init(&localq);
382
383	/* Clone packets before they are consumed by next call */
384	if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
385		rc = -ENOMEM;
386		goto exit;
387	}
388	/* Send according to determined transmit method */
389	if (dests->remote) {
390		tipc_bcast_select_xmit_method(net, dests->remote, method);
391
392		skb = skb_peek(pkts);
393		hdr = buf_msg(skb);
394		if (msg_user(hdr) == MSG_FRAGMENTER)
395			hdr = msg_inner_hdr(hdr);
396		msg_set_is_rcast(hdr, method->rcast);
397
398		/* Switch method ? */
399		if (rcast != method->rcast)
400			tipc_mcast_send_sync(net, skb, method,
401					     dests, cong_link_cnt);
402
403		if (method->rcast)
404			rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
405		else
406			rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
407	}
408
409	if (dests->local) {
410		tipc_loopback_trace(net, &localq);
411		tipc_sk_mcast_rcv(net, &localq, &inputq);
412	}
413exit:
414	/* This queue should normally be empty by now */
415	__skb_queue_purge(pkts);
416	return rc;
417}
418
419/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
420 *
421 * RCU is locked, no other locks set
422 */
423int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
424{
425	struct tipc_msg *hdr = buf_msg(skb);
426	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
427	struct sk_buff_head xmitq;
428	int rc;
429
430	__skb_queue_head_init(&xmitq);
431
432	if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
433		kfree_skb(skb);
434		return 0;
435	}
436
437	tipc_bcast_lock(net);
438	if (msg_user(hdr) == BCAST_PROTOCOL)
439		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
440	else
441		rc = tipc_link_rcv(l, skb, NULL);
442	tipc_bcast_unlock(net);
443
444	tipc_bcbase_xmit(net, &xmitq);
445
446	/* Any socket wakeup messages ? */
447	if (!skb_queue_empty(inputq))
448		tipc_sk_rcv(net, inputq);
449
450	return rc;
451}
452
453/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
454 *
455 * RCU is locked, no other locks set
456 */
457void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
458			struct tipc_msg *hdr)
459{
460	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
461	u16 acked = msg_bcast_ack(hdr);
462	struct sk_buff_head xmitq;
463
464	/* Ignore bc acks sent by peer before bcast synch point was received */
465	if (msg_bc_ack_invalid(hdr))
466		return;
467
468	__skb_queue_head_init(&xmitq);
469
470	tipc_bcast_lock(net);
471	tipc_link_bc_ack_rcv(l, acked, &xmitq);
472	tipc_bcast_unlock(net);
473
474	tipc_bcbase_xmit(net, &xmitq);
475
476	/* Any socket wakeup messages ? */
477	if (!skb_queue_empty(inputq))
478		tipc_sk_rcv(net, inputq);
479}
480
481/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
482 *
483 * RCU is locked, no other locks set
484 */
485int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
486			struct tipc_msg *hdr)
487{
488	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
489	struct sk_buff_head xmitq;
490	int rc = 0;
491
492	__skb_queue_head_init(&xmitq);
493
494	tipc_bcast_lock(net);
495	if (msg_type(hdr) != STATE_MSG) {
496		tipc_link_bc_init_rcv(l, hdr);
497	} else if (!msg_bc_ack_invalid(hdr)) {
498		tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
499		rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
500	}
501	tipc_bcast_unlock(net);
502
503	tipc_bcbase_xmit(net, &xmitq);
504
505	/* Any socket wakeup messages ? */
506	if (!skb_queue_empty(inputq))
507		tipc_sk_rcv(net, inputq);
508	return rc;
509}
510
511/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
512 *
513 * RCU is locked, node lock is set
514 */
515void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
516			 struct sk_buff_head *xmitq)
517{
518	struct tipc_link *snd_l = tipc_bc_sndlink(net);
519
520	tipc_bcast_lock(net);
521	tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
522	tipc_bcbase_select_primary(net);
523	tipc_bcbase_calc_bc_threshold(net);
524	tipc_bcast_unlock(net);
525}
526
527/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
528 *
529 * RCU is locked, node lock is set
530 */
531void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
532{
533	struct tipc_link *snd_l = tipc_bc_sndlink(net);
534	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
535	struct sk_buff_head xmitq;
536
537	__skb_queue_head_init(&xmitq);
538
539	tipc_bcast_lock(net);
540	tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
541	tipc_bcbase_select_primary(net);
542	tipc_bcbase_calc_bc_threshold(net);
543	tipc_bcast_unlock(net);
544
545	tipc_bcbase_xmit(net, &xmitq);
546
547	/* Any socket wakeup messages ? */
548	if (!skb_queue_empty(inputq))
549		tipc_sk_rcv(net, inputq);
550}
551
552int tipc_bclink_reset_stats(struct net *net)
553{
554	struct tipc_link *l = tipc_bc_sndlink(net);
555
556	if (!l)
557		return -ENOPROTOOPT;
558
559	tipc_bcast_lock(net);
560	tipc_link_reset_stats(l);
561	tipc_bcast_unlock(net);
562	return 0;
563}
564
565static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
566{
567	struct tipc_link *l = tipc_bc_sndlink(net);
568
569	if (!l)
570		return -ENOPROTOOPT;
571	if (limit < BCLINK_WIN_MIN)
572		limit = BCLINK_WIN_MIN;
573	if (limit > TIPC_MAX_LINK_WIN)
574		return -EINVAL;
575	tipc_bcast_lock(net);
576	tipc_link_set_queue_limits(l, limit);
577	tipc_bcast_unlock(net);
578	return 0;
579}
580
581static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
582{
583	struct tipc_bc_base *bb = tipc_bc_base(net);
584
585	switch (bc_mode) {
586	case BCLINK_MODE_BCAST:
587		if (!bb->bcast_support)
588			return -ENOPROTOOPT;
589
590		bb->force_bcast = true;
591		bb->force_rcast = false;
592		break;
593	case BCLINK_MODE_RCAST:
594		if (!bb->rcast_support)
595			return -ENOPROTOOPT;
596
597		bb->force_bcast = false;
598		bb->force_rcast = true;
599		break;
600	case BCLINK_MODE_SEL:
601		if (!bb->bcast_support || !bb->rcast_support)
602			return -ENOPROTOOPT;
603
604		bb->force_bcast = false;
605		bb->force_rcast = false;
606		break;
607	default:
608		return -EINVAL;
609	}
610
611	return 0;
612}
613
614static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
615{
616	struct tipc_bc_base *bb = tipc_bc_base(net);
617
618	if (!bb->bcast_support || !bb->rcast_support)
619		return -ENOPROTOOPT;
620
621	if (bc_ratio > 100 || bc_ratio <= 0)
622		return -EINVAL;
623
624	bb->rc_ratio = bc_ratio;
625	tipc_bcast_lock(net);
626	tipc_bcbase_calc_bc_threshold(net);
627	tipc_bcast_unlock(net);
628
629	return 0;
630}
631
632int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
633{
634	int err;
635	u32 win;
636	u32 bc_mode;
637	u32 bc_ratio;
638	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
639
640	if (!attrs[TIPC_NLA_LINK_PROP])
641		return -EINVAL;
642
643	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
644	if (err)
645		return err;
646
647	if (!props[TIPC_NLA_PROP_WIN] &&
648	    !props[TIPC_NLA_PROP_BROADCAST] &&
649	    !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
650		return -EOPNOTSUPP;
651	}
652
653	if (props[TIPC_NLA_PROP_BROADCAST]) {
654		bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
655		err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
656	}
657
658	if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
659		bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
660		err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
661	}
662
663	if (!err && props[TIPC_NLA_PROP_WIN]) {
664		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
665		err = tipc_bc_link_set_queue_limits(net, win);
666	}
667
668	return err;
669}
670
671int tipc_bcast_init(struct net *net)
672{
673	struct tipc_net *tn = tipc_net(net);
674	struct tipc_bc_base *bb = NULL;
675	struct tipc_link *l = NULL;
676
677	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
678	if (!bb)
679		goto enomem;
680	tn->bcbase = bb;
681	spin_lock_init(&tipc_net(net)->bclock);
682
683	if (!tipc_link_bc_create(net, 0, 0,
684				 FB_MTU,
685				 BCLINK_WIN_DEFAULT,
686				 0,
687				 &bb->inputq,
688				 NULL,
689				 NULL,
690				 &l))
691		goto enomem;
692	bb->link = l;
693	tn->bcl = l;
694	bb->rc_ratio = 10;
695	bb->rcast_support = true;
696	return 0;
697enomem:
698	kfree(bb);
699	kfree(l);
700	return -ENOMEM;
701}
702
703void tipc_bcast_stop(struct net *net)
704{
705	struct tipc_net *tn = net_generic(net, tipc_net_id);
706
707	synchronize_net();
708	kfree(tn->bcbase);
709	kfree(tn->bcl);
710}
711
712void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
713{
714	memset(nl, 0, sizeof(*nl));
715	INIT_LIST_HEAD(&nl->list);
716	nl->self = self;
717}
718
719void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
720{
721	if (node == nl->self)
722		nl->local = true;
723	else if (tipc_dest_push(&nl->list, node, 0))
724		nl->remote++;
725}
726
727void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
728{
729	if (node == nl->self)
730		nl->local = false;
731	else if (tipc_dest_del(&nl->list, node, 0))
732		nl->remote--;
733}
734
735void tipc_nlist_purge(struct tipc_nlist *nl)
736{
737	tipc_dest_list_purge(&nl->list);
738	nl->remote = 0;
739	nl->local = false;
740}
741
742u32 tipc_bcast_get_broadcast_mode(struct net *net)
743{
744	struct tipc_bc_base *bb = tipc_bc_base(net);
745
746	if (bb->force_bcast)
747		return BCLINK_MODE_BCAST;
748
749	if (bb->force_rcast)
750		return BCLINK_MODE_RCAST;
751
752	if (bb->bcast_support && bb->rcast_support)
753		return BCLINK_MODE_SEL;
754
755	return 0;
756}
757
758u32 tipc_bcast_get_broadcast_ratio(struct net *net)
759{
760	struct tipc_bc_base *bb = tipc_bc_base(net);
761
762	return bb->rc_ratio;
763}
764
765void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
766			   struct sk_buff_head *inputq)
767{
768	struct sk_buff *skb, *_skb, *tmp;
769	struct tipc_msg *hdr, *_hdr;
770	bool match = false;
771	u32 node, port;
772
773	skb = skb_peek(inputq);
774	if (!skb)
775		return;
776
777	hdr = buf_msg(skb);
778
779	if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
780		return;
781
782	node = msg_orignode(hdr);
783	if (node == tipc_own_addr(net))
784		return;
785
786	port = msg_origport(hdr);
787
788	/* Has the twin SYN message already arrived ? */
789	skb_queue_walk(defq, _skb) {
790		_hdr = buf_msg(_skb);
791		if (msg_orignode(_hdr) != node)
792			continue;
793		if (msg_origport(_hdr) != port)
794			continue;
795		match = true;
796		break;
797	}
798
799	if (!match) {
800		if (!msg_is_syn(hdr))
801			return;
802		__skb_dequeue(inputq);
803		__skb_queue_tail(defq, skb);
804		return;
805	}
806
807	/* Deliver non-SYN message from other link, otherwise queue it */
808	if (!msg_is_syn(hdr)) {
809		if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
810			return;
811		__skb_dequeue(inputq);
812		__skb_queue_tail(defq, skb);
813		return;
814	}
815
816	/* Queue non-SYN/SYN message from same link */
817	if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
818		__skb_dequeue(inputq);
819		__skb_queue_tail(defq, skb);
820		return;
821	}
822
823	/* Matching SYN messages => return the one with data, if any */
824	__skb_unlink(_skb, defq);
825	if (msg_data_sz(hdr)) {
826		kfree_skb(_skb);
827	} else {
828		__skb_dequeue(inputq);
829		kfree_skb(skb);
830		__skb_queue_tail(inputq, _skb);
831	}
832
833	/* Deliver subsequent non-SYN messages from same peer */
834	skb_queue_walk_safe(defq, _skb, tmp) {
835		_hdr = buf_msg(_skb);
836		if (msg_orignode(_hdr) != node)
837			continue;
838		if (msg_origport(_hdr) != port)
839			continue;
840		if (msg_is_syn(_hdr))
841			break;
842		__skb_unlink(_skb, defq);
843		__skb_queue_tail(inputq, _skb);
844	}
845}
v4.17
  1/*
  2 * net/tipc/bcast.c: TIPC broadcast code
  3 *
  4 * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  5 * Copyright (c) 2004, Intel Corporation.
  6 * Copyright (c) 2005, 2010-2011, Wind River Systems
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions are met:
 11 *
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the names of the copyright holders nor the names of its
 18 *    contributors may be used to endorse or promote products derived from
 19 *    this software without specific prior written permission.
 20 *
 21 * Alternatively, this software may be distributed under the terms of the
 22 * GNU General Public License ("GPL") version 2 as published by the Free
 23 * Software Foundation.
 24 *
 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35 * POSSIBILITY OF SUCH DAMAGE.
 36 */
 37
 38#include <linux/tipc_config.h>
 39#include "socket.h"
 40#include "msg.h"
 41#include "bcast.h"
 42#include "link.h"
 43#include "name_table.h"
 44
 45#define BCLINK_WIN_DEFAULT  50	/* bcast link window size (default) */
 46#define BCLINK_WIN_MIN      32	/* bcast minimum link window size */
 47
 48const char tipc_bclink_name[] = "broadcast-link";
 49
 50/**
 51 * struct tipc_bc_base - base structure for keeping broadcast send state
 52 * @link: broadcast send link structure
 53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages
 54 * @dest: array keeping number of reachable destinations per bearer
 55 * @primary_bearer: a bearer having links to all broadcast destinations, if any
 56 * @bcast_support: indicates if primary bearer, if any, supports broadcast
 
 57 * @rcast_support: indicates if all peer nodes support replicast
 
 58 * @rc_ratio: dest count as percentage of cluster size where send method changes
 59 * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
 60 */
 61struct tipc_bc_base {
 62	struct tipc_link *link;
 63	struct sk_buff_head inputq;
 64	int dests[MAX_BEARERS];
 65	int primary_bearer;
 66	bool bcast_support;
 
 67	bool rcast_support;
 
 68	int rc_ratio;
 69	int bc_threshold;
 70};
 71
 72static struct tipc_bc_base *tipc_bc_base(struct net *net)
 73{
 74	return tipc_net(net)->bcbase;
 75}
 76
 77/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
 78 * Note: the MTU is decremented to give room for a tunnel header, in
 79 * case the message needs to be sent as replicast
 80 */
 81int tipc_bcast_get_mtu(struct net *net)
 82{
 83	return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
 84}
 85
 86void tipc_bcast_disable_rcast(struct net *net)
 87{
 88	tipc_bc_base(net)->rcast_support = false;
 89}
 90
 91static void tipc_bcbase_calc_bc_threshold(struct net *net)
 92{
 93	struct tipc_bc_base *bb = tipc_bc_base(net);
 94	int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
 95
 96	bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
 97}
 98
 99/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
100 *                               if any, and make it primary bearer
101 */
102static void tipc_bcbase_select_primary(struct net *net)
103{
104	struct tipc_bc_base *bb = tipc_bc_base(net);
105	int all_dests =  tipc_link_bc_peers(bb->link);
106	int i, mtu, prim;
107
108	bb->primary_bearer = INVALID_BEARER_ID;
109	bb->bcast_support = true;
110
111	if (!all_dests)
112		return;
113
114	for (i = 0; i < MAX_BEARERS; i++) {
115		if (!bb->dests[i])
116			continue;
117
118		mtu = tipc_bearer_mtu(net, i);
119		if (mtu < tipc_link_mtu(bb->link))
120			tipc_link_set_mtu(bb->link, mtu);
121		bb->bcast_support &= tipc_bearer_bcast_support(net, i);
122		if (bb->dests[i] < all_dests)
123			continue;
124
125		bb->primary_bearer = i;
126
127		/* Reduce risk that all nodes select same primary */
128		if ((i ^ tipc_own_addr(net)) & 1)
129			break;
130	}
131	prim = bb->primary_bearer;
132	if (prim != INVALID_BEARER_ID)
133		bb->bcast_support = tipc_bearer_bcast_support(net, prim);
134}
135
136void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
137{
138	struct tipc_bc_base *bb = tipc_bc_base(net);
139
140	tipc_bcast_lock(net);
141	bb->dests[bearer_id]++;
142	tipc_bcbase_select_primary(net);
143	tipc_bcast_unlock(net);
144}
145
146void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
147{
148	struct tipc_bc_base *bb = tipc_bc_base(net);
149
150	tipc_bcast_lock(net);
151	bb->dests[bearer_id]--;
152	tipc_bcbase_select_primary(net);
153	tipc_bcast_unlock(net);
154}
155
156/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
157 *
158 * Note that number of reachable destinations, as indicated in the dests[]
159 * array, may transitionally differ from the number of destinations indicated
160 * in each sent buffer. We can sustain this. Excess destination nodes will
161 * drop and never acknowledge the unexpected packets, and missing destinations
162 * will either require retransmission (if they are just about to be added to
163 * the bearer), or be removed from the buffer's 'ackers' counter (if they
164 * just went down)
165 */
166static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
167{
168	int bearer_id;
169	struct tipc_bc_base *bb = tipc_bc_base(net);
170	struct sk_buff *skb, *_skb;
171	struct sk_buff_head _xmitq;
172
173	if (skb_queue_empty(xmitq))
174		return;
175
176	/* The typical case: at least one bearer has links to all nodes */
177	bearer_id = bb->primary_bearer;
178	if (bearer_id >= 0) {
179		tipc_bearer_bc_xmit(net, bearer_id, xmitq);
180		return;
181	}
182
183	/* We have to transmit across all bearers */
184	skb_queue_head_init(&_xmitq);
185	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
186		if (!bb->dests[bearer_id])
187			continue;
188
189		skb_queue_walk(xmitq, skb) {
190			_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
191			if (!_skb)
192				break;
193			__skb_queue_tail(&_xmitq, _skb);
194		}
195		tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
196	}
197	__skb_queue_purge(xmitq);
198	__skb_queue_purge(&_xmitq);
199}
200
201static void tipc_bcast_select_xmit_method(struct net *net, int dests,
202					  struct tipc_mc_method *method)
203{
204	struct tipc_bc_base *bb = tipc_bc_base(net);
205	unsigned long exp = method->expires;
206
207	/* Broadcast supported by used bearer/bearers? */
208	if (!bb->bcast_support) {
209		method->rcast = true;
210		return;
211	}
212	/* Any destinations which don't support replicast ? */
213	if (!bb->rcast_support) {
214		method->rcast = false;
215		return;
216	}
217	/* Can current method be changed ? */
218	method->expires = jiffies + TIPC_METHOD_EXPIRE;
219	if (method->mandatory || time_before(jiffies, exp))
220		return;
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222	/* Determine method to use now */
223	method->rcast = dests <= bb->bc_threshold;
224}
225
226/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
227 * @net: the applicable net namespace
228 * @pkts: chain of buffers containing message
229 * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
230 * Consumes the buffer chain.
231 * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
232 */
233static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
234			   u16 *cong_link_cnt)
235{
236	struct tipc_link *l = tipc_bc_sndlink(net);
237	struct sk_buff_head xmitq;
238	int rc = 0;
239
240	skb_queue_head_init(&xmitq);
241	tipc_bcast_lock(net);
242	if (tipc_link_bc_peers(l))
243		rc = tipc_link_xmit(l, pkts, &xmitq);
244	tipc_bcast_unlock(net);
245	tipc_bcbase_xmit(net, &xmitq);
246	__skb_queue_purge(pkts);
247	if (rc == -ELINKCONG) {
248		*cong_link_cnt = 1;
249		rc = 0;
250	}
251	return rc;
252}
253
254/* tipc_rcast_xmit - replicate and send a message to given destination nodes
255 * @net: the applicable net namespace
256 * @pkts: chain of buffers containing message
257 * @dests: list of destination nodes
258 * @cong_link_cnt: returns number of congested links
259 * @cong_links: returns identities of congested links
260 * Returns 0 if success, otherwise errno
261 */
262static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
263			   struct tipc_nlist *dests, u16 *cong_link_cnt)
264{
265	struct tipc_dest *dst, *tmp;
266	struct sk_buff_head _pkts;
267	u32 dnode, selector;
268
269	selector = msg_link_selector(buf_msg(skb_peek(pkts)));
270	skb_queue_head_init(&_pkts);
271
272	list_for_each_entry_safe(dst, tmp, &dests->list, list) {
273		dnode = dst->node;
274		if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
275			return -ENOMEM;
276
277		/* Any other return value than -ELINKCONG is ignored */
278		if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
279			(*cong_link_cnt)++;
280	}
281	return 0;
282}
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284/* tipc_mcast_xmit - deliver message to indicated destination nodes
285 *                   and to identified node local sockets
286 * @net: the applicable net namespace
287 * @pkts: chain of buffers containing message
288 * @method: send method to be used
289 * @dests: destination nodes for message.
290 * @cong_link_cnt: returns number of encountered congested destination links
291 * Consumes buffer chain.
292 * Returns 0 if success, otherwise errno
293 */
294int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
295		    struct tipc_mc_method *method, struct tipc_nlist *dests,
296		    u16 *cong_link_cnt)
297{
298	struct sk_buff_head inputq, localq;
 
 
 
299	int rc = 0;
300
301	skb_queue_head_init(&inputq);
302	skb_queue_head_init(&localq);
303
304	/* Clone packets before they are consumed by next call */
305	if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
306		rc = -ENOMEM;
307		goto exit;
308	}
309	/* Send according to determined transmit method */
310	if (dests->remote) {
311		tipc_bcast_select_xmit_method(net, dests->remote, method);
 
 
 
 
 
 
 
 
 
 
 
 
312		if (method->rcast)
313			rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
314		else
315			rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
316	}
317
318	if (dests->local)
 
319		tipc_sk_mcast_rcv(net, &localq, &inputq);
 
320exit:
321	/* This queue should normally be empty by now */
322	__skb_queue_purge(pkts);
323	return rc;
324}
325
326/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
327 *
328 * RCU is locked, no other locks set
329 */
330int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
331{
332	struct tipc_msg *hdr = buf_msg(skb);
333	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
334	struct sk_buff_head xmitq;
335	int rc;
336
337	__skb_queue_head_init(&xmitq);
338
339	if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
340		kfree_skb(skb);
341		return 0;
342	}
343
344	tipc_bcast_lock(net);
345	if (msg_user(hdr) == BCAST_PROTOCOL)
346		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
347	else
348		rc = tipc_link_rcv(l, skb, NULL);
349	tipc_bcast_unlock(net);
350
351	tipc_bcbase_xmit(net, &xmitq);
352
353	/* Any socket wakeup messages ? */
354	if (!skb_queue_empty(inputq))
355		tipc_sk_rcv(net, inputq);
356
357	return rc;
358}
359
360/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
361 *
362 * RCU is locked, no other locks set
363 */
364void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
365			struct tipc_msg *hdr)
366{
367	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
368	u16 acked = msg_bcast_ack(hdr);
369	struct sk_buff_head xmitq;
370
371	/* Ignore bc acks sent by peer before bcast synch point was received */
372	if (msg_bc_ack_invalid(hdr))
373		return;
374
375	__skb_queue_head_init(&xmitq);
376
377	tipc_bcast_lock(net);
378	tipc_link_bc_ack_rcv(l, acked, &xmitq);
379	tipc_bcast_unlock(net);
380
381	tipc_bcbase_xmit(net, &xmitq);
382
383	/* Any socket wakeup messages ? */
384	if (!skb_queue_empty(inputq))
385		tipc_sk_rcv(net, inputq);
386}
387
388/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
389 *
390 * RCU is locked, no other locks set
391 */
392int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
393			struct tipc_msg *hdr)
394{
395	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
396	struct sk_buff_head xmitq;
397	int rc = 0;
398
399	__skb_queue_head_init(&xmitq);
400
401	tipc_bcast_lock(net);
402	if (msg_type(hdr) != STATE_MSG) {
403		tipc_link_bc_init_rcv(l, hdr);
404	} else if (!msg_bc_ack_invalid(hdr)) {
405		tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
406		rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
407	}
408	tipc_bcast_unlock(net);
409
410	tipc_bcbase_xmit(net, &xmitq);
411
412	/* Any socket wakeup messages ? */
413	if (!skb_queue_empty(inputq))
414		tipc_sk_rcv(net, inputq);
415	return rc;
416}
417
418/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
419 *
420 * RCU is locked, node lock is set
421 */
422void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
423			 struct sk_buff_head *xmitq)
424{
425	struct tipc_link *snd_l = tipc_bc_sndlink(net);
426
427	tipc_bcast_lock(net);
428	tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
429	tipc_bcbase_select_primary(net);
430	tipc_bcbase_calc_bc_threshold(net);
431	tipc_bcast_unlock(net);
432}
433
434/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
435 *
436 * RCU is locked, node lock is set
437 */
438void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
439{
440	struct tipc_link *snd_l = tipc_bc_sndlink(net);
441	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
442	struct sk_buff_head xmitq;
443
444	__skb_queue_head_init(&xmitq);
445
446	tipc_bcast_lock(net);
447	tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
448	tipc_bcbase_select_primary(net);
449	tipc_bcbase_calc_bc_threshold(net);
450	tipc_bcast_unlock(net);
451
452	tipc_bcbase_xmit(net, &xmitq);
453
454	/* Any socket wakeup messages ? */
455	if (!skb_queue_empty(inputq))
456		tipc_sk_rcv(net, inputq);
457}
458
459int tipc_bclink_reset_stats(struct net *net)
460{
461	struct tipc_link *l = tipc_bc_sndlink(net);
462
463	if (!l)
464		return -ENOPROTOOPT;
465
466	tipc_bcast_lock(net);
467	tipc_link_reset_stats(l);
468	tipc_bcast_unlock(net);
469	return 0;
470}
471
472static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
473{
474	struct tipc_link *l = tipc_bc_sndlink(net);
475
476	if (!l)
477		return -ENOPROTOOPT;
478	if (limit < BCLINK_WIN_MIN)
479		limit = BCLINK_WIN_MIN;
480	if (limit > TIPC_MAX_LINK_WIN)
481		return -EINVAL;
482	tipc_bcast_lock(net);
483	tipc_link_set_queue_limits(l, limit);
484	tipc_bcast_unlock(net);
485	return 0;
486}
487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
489{
490	int err;
491	u32 win;
 
 
492	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
493
494	if (!attrs[TIPC_NLA_LINK_PROP])
495		return -EINVAL;
496
497	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
498	if (err)
499		return err;
500
501	if (!props[TIPC_NLA_PROP_WIN])
 
 
502		return -EOPNOTSUPP;
 
 
 
 
 
 
503
504	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
 
 
 
505
506	return tipc_bc_link_set_queue_limits(net, win);
 
 
 
 
 
507}
508
509int tipc_bcast_init(struct net *net)
510{
511	struct tipc_net *tn = tipc_net(net);
512	struct tipc_bc_base *bb = NULL;
513	struct tipc_link *l = NULL;
514
515	bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
516	if (!bb)
517		goto enomem;
518	tn->bcbase = bb;
519	spin_lock_init(&tipc_net(net)->bclock);
520
521	if (!tipc_link_bc_create(net, 0, 0,
522				 FB_MTU,
523				 BCLINK_WIN_DEFAULT,
524				 0,
525				 &bb->inputq,
526				 NULL,
527				 NULL,
528				 &l))
529		goto enomem;
530	bb->link = l;
531	tn->bcl = l;
532	bb->rc_ratio = 25;
533	bb->rcast_support = true;
534	return 0;
535enomem:
536	kfree(bb);
537	kfree(l);
538	return -ENOMEM;
539}
540
541void tipc_bcast_stop(struct net *net)
542{
543	struct tipc_net *tn = net_generic(net, tipc_net_id);
544
545	synchronize_net();
546	kfree(tn->bcbase);
547	kfree(tn->bcl);
548}
549
550void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
551{
552	memset(nl, 0, sizeof(*nl));
553	INIT_LIST_HEAD(&nl->list);
554	nl->self = self;
555}
556
557void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
558{
559	if (node == nl->self)
560		nl->local = true;
561	else if (tipc_dest_push(&nl->list, node, 0))
562		nl->remote++;
563}
564
565void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
566{
567	if (node == nl->self)
568		nl->local = false;
569	else if (tipc_dest_del(&nl->list, node, 0))
570		nl->remote--;
571}
572
573void tipc_nlist_purge(struct tipc_nlist *nl)
574{
575	tipc_dest_list_purge(&nl->list);
576	nl->remote = 0;
577	nl->local = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578}