Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * net/tipc/bcast.c: TIPC broadcast code
  3 *
  4 * Copyright (c) 2004-2006, Ericsson AB
  5 * Copyright (c) 2004, Intel Corporation.
  6 * Copyright (c) 2005, 2010-2011, Wind River Systems
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions are met:
 11 *
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the names of the copyright holders nor the names of its
 18 *    contributors may be used to endorse or promote products derived from
 19 *    this software without specific prior written permission.
 20 *
 21 * Alternatively, this software may be distributed under the terms of the
 22 * GNU General Public License ("GPL") version 2 as published by the Free
 23 * Software Foundation.
 24 *
 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35 * POSSIBILITY OF SUCH DAMAGE.
 36 */
 37
 38#include "core.h"
 39#include "link.h"
 40#include "port.h"
 41#include "bcast.h"
 
 
 42
 43#define MAX_PKT_DEFAULT_MCAST 1500	/* bcast link max packet size (fixed) */
 44
 45#define BCLINK_WIN_DEFAULT 20		/* bcast link window size (default) */
 46
 47/**
 48 * struct bcbearer_pair - a pair of bearers used by broadcast link
 49 * @primary: pointer to primary bearer
 50 * @secondary: pointer to secondary bearer
 51 *
 52 * Bearers must have same priority and same set of reachable destinations
 53 * to be paired.
 54 */
 55
 56struct bcbearer_pair {
 57	struct tipc_bearer *primary;
 58	struct tipc_bearer *secondary;
 59};
 60
 61/**
 62 * struct bcbearer - bearer used by broadcast link
 63 * @bearer: (non-standard) broadcast bearer structure
 64 * @media: (non-standard) broadcast media structure
 65 * @bpairs: array of bearer pairs
 66 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
 67 * @remains: temporary node map used by tipc_bcbearer_send()
 68 * @remains_new: temporary node map used tipc_bcbearer_send()
 69 *
 70 * Note: The fields labelled "temporary" are incorporated into the bearer
 71 * to avoid consuming potentially limited stack space through the use of
 72 * large local variables within multicast routines.  Concurrent access is
 73 * prevented through use of the spinlock "bc_lock".
 74 */
 75
 76struct bcbearer {
 77	struct tipc_bearer bearer;
 78	struct media media;
 79	struct bcbearer_pair bpairs[MAX_BEARERS];
 80	struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
 81	struct tipc_node_map remains;
 82	struct tipc_node_map remains_new;
 83};
 84
 85/**
 86 * struct bclink - link used for broadcast messages
 87 * @link: (non-standard) broadcast link structure
 88 * @node: (non-standard) node structure representing b'cast link's peer node
 89 * @retransmit_to: node that most recently requested a retransmit
 90 *
 91 * Handles sequence numbering, fragmentation, bundling, etc.
 92 */
 93
 94struct bclink {
 95	struct link link;
 96	struct tipc_node node;
 97	struct tipc_node *retransmit_to;
 
 
 
 
 
 
 
 98};
 99
100
101static struct bcbearer *bcbearer;
102static struct bclink *bclink;
103static struct link *bcl;
104static DEFINE_SPINLOCK(bc_lock);
105
106/* broadcast-capable node map */
107struct tipc_node_map tipc_bcast_nmap;
108
109const char tipc_bclink_name[] = "broadcast-link";
110
111static void tipc_nmap_diff(struct tipc_node_map *nm_a,
112			   struct tipc_node_map *nm_b,
113			   struct tipc_node_map *nm_diff);
114
115static u32 buf_seqno(struct sk_buff *buf)
116{
117	return msg_seqno(buf_msg(buf));
118}
119
120static u32 bcbuf_acks(struct sk_buff *buf)
121{
122	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
123}
124
125static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
126{
127	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
128}
129
130static void bcbuf_decr_acks(struct sk_buff *buf)
131{
132	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
133}
134
135
136static void bclink_set_last_sent(void)
137{
138	if (bcl->next_out)
139		bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
140	else
141		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
142}
143
144u32 tipc_bclink_get_last_sent(void)
145{
146	return bcl->fsm_msg_cnt;
147}
148
149/**
150 * bclink_set_gap - set gap according to contents of current deferred pkt queue
151 *
152 * Called with 'node' locked, bc_lock unlocked
153 */
154
155static void bclink_set_gap(struct tipc_node *n_ptr)
156{
157	struct sk_buff *buf = n_ptr->bclink.deferred_head;
158
159	n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
160		mod(n_ptr->bclink.last_in);
161	if (unlikely(buf != NULL))
162		n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
163}
164
165/**
166 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
167 *
168 * This mechanism endeavours to prevent all nodes in network from trying
169 * to ACK or NACK at the same time.
170 *
171 * Note: TIPC uses a different trigger to distribute ACKs than it does to
172 *       distribute NACKs, but tries to use the same spacing (divide by 16).
173 */
174
175static int bclink_ack_allowed(u32 n)
176{
177	return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
178}
179
180
181/**
182 * tipc_bclink_retransmit_to - get most recent node to request retransmission
183 *
184 * Called with bc_lock locked
185 */
186
187struct tipc_node *tipc_bclink_retransmit_to(void)
188{
189	return bclink->retransmit_to;
190}
191
192/**
193 * bclink_retransmit_pkt - retransmit broadcast packets
194 * @after: sequence number of last packet to *not* retransmit
195 * @to: sequence number of last packet to retransmit
196 *
197 * Called with bc_lock locked
198 */
199
200static void bclink_retransmit_pkt(u32 after, u32 to)
201{
202	struct sk_buff *buf;
203
204	buf = bcl->first_out;
205	while (buf && less_eq(buf_seqno(buf), after))
206		buf = buf->next;
207	tipc_link_retransmit(bcl, buf, mod(to - after));
208}
209
210/**
211 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
212 * @n_ptr: node that sent acknowledgement info
213 * @acked: broadcast sequence # that has been acknowledged
214 *
215 * Node is locked, bc_lock unlocked.
216 */
217
218void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
219{
220	struct sk_buff *crs;
221	struct sk_buff *next;
222	unsigned int released = 0;
223
224	if (less_eq(acked, n_ptr->bclink.acked))
225		return;
226
227	spin_lock_bh(&bc_lock);
 
228
229	/* Skip over packets that node has previously acknowledged */
 
 
230
231	crs = bcl->first_out;
232	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
233		crs = crs->next;
234
235	/* Update packets that node is now acknowledging */
236
237	while (crs && less_eq(buf_seqno(crs), acked)) {
238		next = crs->next;
239		bcbuf_decr_acks(crs);
240		if (bcbuf_acks(crs) == 0) {
241			bcl->first_out = next;
242			bcl->out_queue_size--;
243			buf_discard(crs);
244			released = 1;
245		}
246		crs = next;
247	}
248	n_ptr->bclink.acked = acked;
249
250	/* Try resolving broadcast link congestion, if necessary */
251
252	if (unlikely(bcl->next_out)) {
253		tipc_link_push_queue(bcl);
254		bclink_set_last_sent();
255	}
256	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
257		tipc_link_wakeup_ports(bcl, 0);
258	spin_unlock_bh(&bc_lock);
259}
260
261/**
262 * bclink_send_ack - unicast an ACK msg
263 *
264 * tipc_net_lock and node lock set
265 */
266
267static void bclink_send_ack(struct tipc_node *n_ptr)
268{
269	struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
270
271	if (l_ptr != NULL)
272		tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
 
 
273}
274
275/**
276 * bclink_send_nack- broadcast a NACK msg
277 *
278 * tipc_net_lock and node lock set
279 */
280
281static void bclink_send_nack(struct tipc_node *n_ptr)
282{
283	struct sk_buff *buf;
284	struct tipc_msg *msg;
285
286	if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
287		return;
288
289	buf = tipc_buf_acquire(INT_H_SIZE);
290	if (buf) {
291		msg = buf_msg(buf);
292		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
293			 INT_H_SIZE, n_ptr->addr);
294		msg_set_non_seq(msg, 1);
295		msg_set_mc_netid(msg, tipc_net_id);
296		msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
297		msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
298		msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
299		msg_set_bcast_tag(msg, tipc_own_tag);
300
301		if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
302			bcl->stats.sent_nacks++;
303			buf_discard(buf);
304		} else {
305			tipc_bearer_schedule(bcl->b_ptr, bcl);
306			bcl->proto_msg_queue = buf;
307			bcl->stats.bearer_congs++;
308		}
309
310		/*
311		 * Ensure we doesn't send another NACK msg to the node
312		 * until 16 more deferred messages arrive from it
313		 * (i.e. helps prevent all nodes from NACK'ing at same time)
314		 */
315
316		n_ptr->bclink.nack_sync = tipc_own_tag;
317	}
318}
319
320/**
321 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
322 *
323 * tipc_net_lock and node lock set
 
 
 
 
 
 
324 */
325
326void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
327{
328	if (!n_ptr->bclink.supported ||
329	    less_eq(last_sent, mod(n_ptr->bclink.last_in)))
330		return;
331
332	bclink_set_gap(n_ptr);
333	if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
334		n_ptr->bclink.gap_to = last_sent;
335	bclink_send_nack(n_ptr);
336}
337
338/**
339 * tipc_bclink_peek_nack - process a NACK msg meant for another node
340 *
341 * Only tipc_net_lock set.
342 */
343
344static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
345{
346	struct tipc_node *n_ptr = tipc_node_find(dest);
347	u32 my_after, my_to;
348
349	if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
 
 
 
350		return;
351	tipc_node_lock(n_ptr);
352	/*
353	 * Modify gap to suppress unnecessary NACKs from this node
354	 */
355	my_after = n_ptr->bclink.gap_after;
356	my_to = n_ptr->bclink.gap_to;
357
358	if (less_eq(gap_after, my_after)) {
359		if (less(my_after, gap_to) && less(gap_to, my_to))
360			n_ptr->bclink.gap_after = gap_to;
361		else if (less_eq(my_to, gap_to))
362			n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
363	} else if (less_eq(gap_after, my_to)) {
364		if (less_eq(my_to, gap_to))
365			n_ptr->bclink.gap_to = gap_after;
366	} else {
367		/*
368		 * Expand gap if missing bufs not in deferred queue:
369		 */
370		struct sk_buff *buf = n_ptr->bclink.deferred_head;
371		u32 prev = n_ptr->bclink.gap_to;
372
373		for (; buf; buf = buf->next) {
374			u32 seqno = buf_seqno(buf);
 
 
 
375
376			if (mod(seqno - prev) != 1) {
377				buf = NULL;
378				break;
379			}
380			if (seqno == gap_after)
381				break;
382			prev = seqno;
383		}
384		if (buf == NULL)
385			n_ptr->bclink.gap_to = gap_after;
386	}
387	/*
388	 * Some nodes may send a complementary NACK now:
389	 */
390	if (bclink_ack_allowed(sender_tag + 1)) {
391		if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
392			bclink_send_nack(n_ptr);
393			bclink_set_gap(n_ptr);
394		}
395	}
396	tipc_node_unlock(n_ptr);
397}
398
399/**
400 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
401 */
402
403int tipc_bclink_send_msg(struct sk_buff *buf)
404{
405	int res;
406
407	spin_lock_bh(&bc_lock);
408
409	res = tipc_link_send_buf(bcl, buf);
410	if (likely(res > 0))
411		bclink_set_last_sent();
412
413	bcl->stats.queue_sz_counts++;
414	bcl->stats.accu_queue_sz += bcl->out_queue_size;
415
416	spin_unlock_bh(&bc_lock);
417	return res;
418}
419
420/**
421 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
422 *
423 * tipc_net_lock is read_locked, no other locks set
424 */
425
426void tipc_bclink_recv_pkt(struct sk_buff *buf)
427{
428	struct tipc_msg *msg = buf_msg(buf);
429	struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
430	u32 next_in;
431	u32 seqno;
432	struct sk_buff *deferred;
433
434	if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
435		     (msg_mc_netid(msg) != tipc_net_id))) {
436		buf_discard(buf);
437		return;
438	}
439
440	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
441		if (msg_destnode(msg) == tipc_own_addr) {
442			tipc_node_lock(node);
443			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
444			tipc_node_unlock(node);
445			spin_lock_bh(&bc_lock);
446			bcl->stats.recv_nacks++;
447			bclink->retransmit_to = node;
448			bclink_retransmit_pkt(msg_bcgap_after(msg),
449					      msg_bcgap_to(msg));
450			spin_unlock_bh(&bc_lock);
451		} else {
452			tipc_bclink_peek_nack(msg_destnode(msg),
453					      msg_bcast_tag(msg),
454					      msg_bcgap_after(msg),
455					      msg_bcgap_to(msg));
456		}
457		buf_discard(buf);
458		return;
459	}
460
461	tipc_node_lock(node);
462receive:
463	deferred = node->bclink.deferred_head;
464	next_in = mod(node->bclink.last_in + 1);
465	seqno = msg_seqno(msg);
466
467	if (likely(seqno == next_in)) {
468		bcl->stats.recv_info++;
469		node->bclink.last_in++;
470		bclink_set_gap(node);
471		if (unlikely(bclink_ack_allowed(seqno))) {
472			bclink_send_ack(node);
473			bcl->stats.sent_acks++;
474		}
475		if (likely(msg_isdata(msg))) {
476			tipc_node_unlock(node);
477			tipc_port_recv_mcast(buf, NULL);
478		} else if (msg_user(msg) == MSG_BUNDLER) {
479			bcl->stats.recv_bundles++;
480			bcl->stats.recv_bundled += msg_msgcnt(msg);
481			tipc_node_unlock(node);
482			tipc_link_recv_bundle(buf);
483		} else if (msg_user(msg) == MSG_FRAGMENTER) {
484			bcl->stats.recv_fragments++;
485			if (tipc_link_recv_fragment(&node->bclink.defragm,
486						    &buf, &msg))
487				bcl->stats.recv_fragmented++;
488			tipc_node_unlock(node);
489			tipc_net_route_msg(buf);
490		} else {
491			tipc_node_unlock(node);
492			tipc_net_route_msg(buf);
493		}
494		if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
495			tipc_node_lock(node);
496			buf = deferred;
497			msg = buf_msg(buf);
498			node->bclink.deferred_head = deferred->next;
499			goto receive;
500		}
501		return;
502	} else if (less(next_in, seqno)) {
503		u32 gap_after = node->bclink.gap_after;
504		u32 gap_to = node->bclink.gap_to;
505
506		if (tipc_link_defer_pkt(&node->bclink.deferred_head,
507					&node->bclink.deferred_tail,
508					buf)) {
509			node->bclink.nack_sync++;
510			bcl->stats.deferred_recv++;
511			if (seqno == mod(gap_after + 1))
512				node->bclink.gap_after = seqno;
513			else if (less(gap_after, seqno) && less(seqno, gap_to))
514				node->bclink.gap_to = seqno;
515		}
516		if (bclink_ack_allowed(node->bclink.nack_sync)) {
517			if (gap_to != gap_after)
518				bclink_send_nack(node);
519			bclink_set_gap(node);
520		}
521	} else {
522		bcl->stats.duplicates++;
523		buf_discard(buf);
524	}
525	tipc_node_unlock(node);
526}
527
528u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
529{
530	return (n_ptr->bclink.supported &&
531		(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532}
533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
534
535/**
536 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
537 *
538 * Send through as many bearers as necessary to reach all nodes
539 * that support TIPC multicasting.
 
 
 
 
540 *
541 * Returns 0 if packet sent successfully, non-zero if not
542 */
543
544static int tipc_bcbearer_send(struct sk_buff *buf,
545			      struct tipc_bearer *unused1,
546			      struct tipc_media_addr *unused2)
547{
548	int bp_index;
549
550	/* Prepare buffer for broadcasting (if first time trying to send it) */
 
551
552	if (likely(!msg_non_seq(buf_msg(buf)))) {
553		struct tipc_msg *msg;
554
555		bcbuf_set_acks(buf, tipc_bcast_nmap.count);
556		msg = buf_msg(buf);
557		msg_set_non_seq(msg, 1);
558		msg_set_mc_netid(msg, tipc_net_id);
559		bcl->stats.sent_info++;
560
561		if (WARN_ON(!tipc_bcast_nmap.count)) {
562			dump_stack();
563			return 0;
564		}
565	}
566
567	/* Send buffer over bearers until all targets reached */
568
569	bcbearer->remains = tipc_bcast_nmap;
570
571	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
572		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
573		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
574
575		if (!p)
576			break;	/* no more bearers to try */
577
578		tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
579		if (bcbearer->remains_new.count == bcbearer->remains.count)
580			continue;	/* bearer pair doesn't add anything */
581
582		if (p->blocked ||
583		    p->media->send_msg(buf, p, &p->media->bcast_addr)) {
584			/* unable to send on primary bearer */
585			if (!s || s->blocked ||
586			    s->media->send_msg(buf, s,
587					       &s->media->bcast_addr)) {
588				/* unable to send on either bearer */
589				continue;
590			}
591		}
592
593		if (s) {
594			bcbearer->bpairs[bp_index].primary = s;
595			bcbearer->bpairs[bp_index].secondary = p;
596		}
597
598		if (bcbearer->remains_new.count == 0)
599			return 0;
600
601		bcbearer->remains = bcbearer->remains_new;
602	}
603
604	/*
605	 * Unable to reach all targets (indicate success, since currently
606	 * there isn't code in place to properly block & unblock the
607	 * pseudo-bearer used by the broadcast link)
608	 */
609
610	return TIPC_OK;
611}
612
613/**
614 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
 
615 */
616
617void tipc_bcbearer_sort(void)
618{
619	struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
620	struct bcbearer_pair *bp_curr;
621	int b_index;
622	int pri;
623
624	spin_lock_bh(&bc_lock);
625
626	/* Group bearers by priority (can assume max of two per priority) */
627
628	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
629
630	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
631		struct tipc_bearer *b = &tipc_bearers[b_index];
632
633		if (!b->active || !b->nodes.count)
634			continue;
635
636		if (!bp_temp[b->priority].primary)
637			bp_temp[b->priority].primary = b;
638		else
639			bp_temp[b->priority].secondary = b;
640	}
641
642	/* Create array of bearer pairs for broadcasting */
643
644	bp_curr = bcbearer->bpairs;
645	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
 
646
647	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
648
649		if (!bp_temp[pri].primary)
650			continue;
 
 
651
652		bp_curr->primary = bp_temp[pri].primary;
 
 
 
 
 
 
 
 
 
653
654		if (bp_temp[pri].secondary) {
655			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
656					    &bp_temp[pri].secondary->nodes)) {
657				bp_curr->secondary = bp_temp[pri].secondary;
658			} else {
659				bp_curr++;
660				bp_curr->primary = bp_temp[pri].secondary;
661			}
662		}
663
664		bp_curr++;
 
 
 
 
 
665	}
 
666
667	spin_unlock_bh(&bc_lock);
 
 
 
 
 
668}
669
670/**
671 * tipc_bcbearer_push - resolve bearer congestion
672 *
673 * Forces bclink to push out any unsent packets, until all packets are gone
674 * or congestion reoccurs.
675 * No locks set when function called
676 */
677
678void tipc_bcbearer_push(void)
679{
680	struct tipc_bearer *b_ptr;
681
682	spin_lock_bh(&bc_lock);
683	b_ptr = &bcbearer->bearer;
684	if (b_ptr->blocked) {
685		b_ptr->blocked = 0;
686		tipc_bearer_lock_push(b_ptr);
687	}
688	spin_unlock_bh(&bc_lock);
689}
690
691
692int tipc_bclink_stats(char *buf, const u32 buf_size)
 
 
 
693{
694	struct print_buf pb;
695
696	if (!bcl)
697		return 0;
698
699	tipc_printbuf_init(&pb, buf, buf_size);
700
701	spin_lock_bh(&bc_lock);
 
 
 
 
702
703	tipc_printf(&pb, "Link <%s>\n"
704			 "  Window:%u packets\n",
705		    bcl->name, bcl->queue_limit[0]);
706	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
707		    bcl->stats.recv_info,
708		    bcl->stats.recv_fragments,
709		    bcl->stats.recv_fragmented,
710		    bcl->stats.recv_bundles,
711		    bcl->stats.recv_bundled);
712	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
713		    bcl->stats.sent_info,
714		    bcl->stats.sent_fragments,
715		    bcl->stats.sent_fragmented,
716		    bcl->stats.sent_bundles,
717		    bcl->stats.sent_bundled);
718	tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n",
719		    bcl->stats.recv_nacks,
720		    bcl->stats.deferred_recv,
721		    bcl->stats.duplicates);
722	tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n",
723		    bcl->stats.sent_nacks,
724		    bcl->stats.sent_acks,
725		    bcl->stats.retransmitted);
726	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
727		    bcl->stats.bearer_congs,
728		    bcl->stats.link_congs,
729		    bcl->stats.max_queue_sz,
730		    bcl->stats.queue_sz_counts
731		    ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
732		    : 0);
733
734	spin_unlock_bh(&bc_lock);
735	return tipc_printbuf_validate(&pb);
 
736}
737
738int tipc_bclink_reset_stats(void)
739{
740	if (!bcl)
 
 
741		return -ENOPROTOOPT;
742
743	spin_lock_bh(&bc_lock);
744	memset(&bcl->stats, 0, sizeof(bcl->stats));
745	spin_unlock_bh(&bc_lock);
746	return 0;
747}
748
749int tipc_bclink_set_queue_limits(u32 limit)
750{
751	if (!bcl)
 
 
752		return -ENOPROTOOPT;
753	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
 
 
754		return -EINVAL;
755
756	spin_lock_bh(&bc_lock);
757	tipc_link_set_queue_limits(bcl, limit);
758	spin_unlock_bh(&bc_lock);
759	return 0;
760}
761
762int tipc_bclink_init(void)
763{
764	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
765	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
766	if (!bcbearer || !bclink) {
767		warn("Multicast link creation failed, no memory\n");
768		kfree(bcbearer);
769		bcbearer = NULL;
770		kfree(bclink);
771		bclink = NULL;
772		return -ENOMEM;
773	}
774
775	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
776	bcbearer->bearer.media = &bcbearer->media;
777	bcbearer->media.send_msg = tipc_bcbearer_send;
778	sprintf(bcbearer->media.name, "tipc-multicast");
779
780	bcl = &bclink->link;
781	INIT_LIST_HEAD(&bcl->waiting_ports);
782	bcl->next_out_no = 1;
783	spin_lock_init(&bclink->node.lock);
784	bcl->owner = &bclink->node;
785	bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
786	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
787	bcl->b_ptr = &bcbearer->bearer;
788	bcl->state = WORKING_WORKING;
789	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
790
791	return 0;
792}
793
794void tipc_bclink_stop(void)
795{
796	spin_lock_bh(&bc_lock);
797	if (bcbearer) {
798		tipc_link_stop(bcl);
799		bcl = NULL;
800		kfree(bclink);
801		bclink = NULL;
802		kfree(bcbearer);
803		bcbearer = NULL;
804	}
805	spin_unlock_bh(&bc_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
806}
807
808
809/**
810 * tipc_nmap_add - add a node to a node map
811 */
812
813void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
814{
815	int n = tipc_node(node);
816	int w = n / WSIZE;
817	u32 mask = (1 << (n % WSIZE));
818
819	if ((nm_ptr->map[w] & mask) == 0) {
820		nm_ptr->count++;
821		nm_ptr->map[w] |= mask;
822	}
823}
824
825/**
826 * tipc_nmap_remove - remove a node from a node map
827 */
828
829void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
830{
831	int n = tipc_node(node);
832	int w = n / WSIZE;
833	u32 mask = (1 << (n % WSIZE));
834
835	if ((nm_ptr->map[w] & mask) != 0) {
836		nm_ptr->map[w] &= ~mask;
837		nm_ptr->count--;
838	}
839}
840
841/**
842 * tipc_nmap_diff - find differences between node maps
843 * @nm_a: input node map A
844 * @nm_b: input node map B
845 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
846 */
847
848static void tipc_nmap_diff(struct tipc_node_map *nm_a,
849			   struct tipc_node_map *nm_b,
850			   struct tipc_node_map *nm_diff)
851{
852	int stop = ARRAY_SIZE(nm_a->map);
853	int w;
854	int b;
855	u32 map;
856
857	memset(nm_diff, 0, sizeof(*nm_diff));
858	for (w = 0; w < stop; w++) {
859		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
860		nm_diff->map[w] = map;
861		if (map != 0) {
862			for (b = 0 ; b < WSIZE; b++) {
863				if (map & (1 << b))
864					nm_diff->count++;
865			}
866		}
867	}
868}
869
870/**
871 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
872 */
873
874void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
875{
876	struct port_list *item = pl_ptr;
877	int i;
878	int item_sz = PLSIZE;
879	int cnt = pl_ptr->count;
880
881	for (; ; cnt -= item_sz, item = item->next) {
882		if (cnt < PLSIZE)
883			item_sz = cnt;
884		for (i = 0; i < item_sz; i++)
885			if (item->ports[i] == port)
886				return;
887		if (i < PLSIZE) {
888			item->ports[i] = port;
889			pl_ptr->count++;
890			return;
891		}
892		if (!item->next) {
893			item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
894			if (!item->next) {
895				warn("Incomplete multicast delivery, no memory\n");
896				return;
897			}
898			item->next->next = NULL;
899		}
900	}
901}
902
903/**
904 * tipc_port_list_free - free dynamically created entries in port_list chain
905 *
906 */
907
908void tipc_port_list_free(struct port_list *pl_ptr)
909{
910	struct port_list *item;
911	struct port_list *next;
912
913	for (item = pl_ptr->next; item; item = next) {
914		next = item->next;
915		kfree(item);
916	}
917}
918
v4.17
  1/*
  2 * net/tipc/bcast.c: TIPC broadcast code
  3 *
  4 * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  5 * Copyright (c) 2004, Intel Corporation.
  6 * Copyright (c) 2005, 2010-2011, Wind River Systems
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions are met:
 11 *
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the names of the copyright holders nor the names of its
 18 *    contributors may be used to endorse or promote products derived from
 19 *    this software without specific prior written permission.
 20 *
 21 * Alternatively, this software may be distributed under the terms of the
 22 * GNU General Public License ("GPL") version 2 as published by the Free
 23 * Software Foundation.
 24 *
 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35 * POSSIBILITY OF SUCH DAMAGE.
 36 */
 37
 38#include <linux/tipc_config.h>
 39#include "socket.h"
 40#include "msg.h"
 41#include "bcast.h"
 42#include "link.h"
 43#include "name_table.h"
 44
 45#define BCLINK_WIN_DEFAULT  50	/* bcast link window size (default) */
 46#define BCLINK_WIN_MIN      32	/* bcast minimum link window size */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48const char tipc_bclink_name[] = "broadcast-link";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50/**
 51 * struct tipc_bc_base - base structure for keeping broadcast send state
 52 * @link: broadcast send link structure
 53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages
 54 * @dest: array keeping number of reachable destinations per bearer
 55 * @primary_bearer: a bearer having links to all broadcast destinations, if any
 56 * @bcast_support: indicates if primary bearer, if any, supports broadcast
 57 * @rcast_support: indicates if all peer nodes support replicast
 58 * @rc_ratio: dest count as percentage of cluster size where send method changes
 59 * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
 60 */
 61struct tipc_bc_base {
 62	struct tipc_link *link;
 63	struct sk_buff_head inputq;
 64	int dests[MAX_BEARERS];
 65	int primary_bearer;
 66	bool bcast_support;
 67	bool rcast_support;
 68	int rc_ratio;
 69	int bc_threshold;
 70};
 71
 72static struct tipc_bc_base *tipc_bc_base(struct net *net)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73{
 74	return tipc_net(net)->bcbase;
 75}
 76
 77/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
 78 * Note: the MTU is decremented to give room for a tunnel header, in
 79 * case the message needs to be sent as replicast
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80 */
 81int tipc_bcast_get_mtu(struct net *net)
 
 82{
 83	return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
 
 
 
 
 
 84}
 85
 86void tipc_bcast_disable_rcast(struct net *net)
 
 
 
 
 
 
 
 
 
 
 87{
 88	tipc_bc_base(net)->rcast_support = false;
 89}
 90
 91static void tipc_bcbase_calc_bc_threshold(struct net *net)
 
 
 
 
 
 
 
 92{
 93	struct tipc_bc_base *bb = tipc_bc_base(net);
 94	int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
 
 
 
 
 
 
 
 
 95
 96	bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
 
 
 
 
 
 
 
 97}
 98
 99/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
100 *                               if any, and make it primary bearer
 
 
 
 
101 */
102static void tipc_bcbase_select_primary(struct net *net)
 
103{
104	struct tipc_bc_base *bb = tipc_bc_base(net);
105	int all_dests =  tipc_link_bc_peers(bb->link);
106	int i, mtu, prim;
107
108	bb->primary_bearer = INVALID_BEARER_ID;
109	bb->bcast_support = true;
110
111	if (!all_dests)
112		return;
113
114	for (i = 0; i < MAX_BEARERS; i++) {
115		if (!bb->dests[i])
116			continue;
117
118		mtu = tipc_bearer_mtu(net, i);
119		if (mtu < tipc_link_mtu(bb->link))
120			tipc_link_set_mtu(bb->link, mtu);
121		bb->bcast_support &= tipc_bearer_bcast_support(net, i);
122		if (bb->dests[i] < all_dests)
123			continue;
 
 
 
 
 
 
 
 
 
 
 
 
124
125		bb->primary_bearer = i;
126
127		/* Reduce risk that all nodes select same primary */
128		if ((i ^ tipc_own_addr(net)) & 1)
129			break;
130	}
131	prim = bb->primary_bearer;
132	if (prim != INVALID_BEARER_ID)
133		bb->bcast_support = tipc_bearer_bcast_support(net, prim);
134}
135
136void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
 
 
 
 
 
 
137{
138	struct tipc_bc_base *bb = tipc_bc_base(net);
139
140	tipc_bcast_lock(net);
141	bb->dests[bearer_id]++;
142	tipc_bcbase_select_primary(net);
143	tipc_bcast_unlock(net);
144}
145
146void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
 
 
 
 
 
 
147{
148	struct tipc_bc_base *bb = tipc_bc_base(net);
 
149
150	tipc_bcast_lock(net);
151	bb->dests[bearer_id]--;
152	tipc_bcbase_select_primary(net);
153	tipc_bcast_unlock(net);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154}
155
156/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
 
157 *
158 * Note that number of reachable destinations, as indicated in the dests[]
159 * array, may transitionally differ from the number of destinations indicated
160 * in each sent buffer. We can sustain this. Excess destination nodes will
161 * drop and never acknowledge the unexpected packets, and missing destinations
162 * will either require retransmission (if they are just about to be added to
163 * the bearer), or be removed from the buffer's 'ackers' counter (if they
164 * just went down)
165 */
166static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
 
167{
168	int bearer_id;
169	struct tipc_bc_base *bb = tipc_bc_base(net);
170	struct sk_buff *skb, *_skb;
171	struct sk_buff_head _xmitq;
 
 
 
 
 
 
 
 
 
 
 
172
173	if (skb_queue_empty(xmitq))
174		return;
 
 
175
176	/* The typical case: at least one bearer has links to all nodes */
177	bearer_id = bb->primary_bearer;
178	if (bearer_id >= 0) {
179		tipc_bearer_bc_xmit(net, bearer_id, xmitq);
180		return;
181	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
183	/* We have to transmit across all bearers */
184	skb_queue_head_init(&_xmitq);
185	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
186		if (!bb->dests[bearer_id])
187			continue;
188
189		skb_queue_walk(xmitq, skb) {
190			_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
191			if (!_skb)
 
 
192				break;
193			__skb_queue_tail(&_xmitq, _skb);
194		}
195		tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
 
196	}
197	__skb_queue_purge(xmitq);
198	__skb_queue_purge(&_xmitq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199}
200
201static void tipc_bcast_select_xmit_method(struct net *net, int dests,
202					  struct tipc_mc_method *method)
 
 
 
 
 
203{
204	struct tipc_bc_base *bb = tipc_bc_base(net);
205	unsigned long exp = method->expires;
 
 
 
206
207	/* Broadcast supported by used bearer/bearers? */
208	if (!bb->bcast_support) {
209		method->rcast = true;
210		return;
211	}
212	/* Any destinations which don't support replicast ? */
213	if (!bb->rcast_support) {
214		method->rcast = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215		return;
216	}
217	/* Can current method be changed ? */
218	method->expires = jiffies + TIPC_METHOD_EXPIRE;
219	if (method->mandatory || time_before(jiffies, exp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220		return;
221
222	/* Determine method to use now */
223	method->rcast = dests <= bb->bc_threshold;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224}
225
226/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
227 * @net: the applicable net namespace
228 * @pkts: chain of buffers containing message
229 * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
230 * Consumes the buffer chain.
231 * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
232 */
233static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
234			   u16 *cong_link_cnt)
235{
236	struct tipc_link *l = tipc_bc_sndlink(net);
237	struct sk_buff_head xmitq;
238	int rc = 0;
239
240	skb_queue_head_init(&xmitq);
241	tipc_bcast_lock(net);
242	if (tipc_link_bc_peers(l))
243		rc = tipc_link_xmit(l, pkts, &xmitq);
244	tipc_bcast_unlock(net);
245	tipc_bcbase_xmit(net, &xmitq);
246	__skb_queue_purge(pkts);
247	if (rc == -ELINKCONG) {
248		*cong_link_cnt = 1;
249		rc = 0;
250	}
251	return rc;
252}
253
254/* tipc_rcast_xmit - replicate and send a message to given destination nodes
255 * @net: the applicable net namespace
256 * @pkts: chain of buffers containing message
257 * @dests: list of destination nodes
258 * @cong_link_cnt: returns number of congested links
259 * @cong_links: returns identities of congested links
260 * Returns 0 if success, otherwise errno
261 */
262static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
263			   struct tipc_nlist *dests, u16 *cong_link_cnt)
264{
265	struct tipc_dest *dst, *tmp;
266	struct sk_buff_head _pkts;
267	u32 dnode, selector;
268
269	selector = msg_link_selector(buf_msg(skb_peek(pkts)));
270	skb_queue_head_init(&_pkts);
271
272	list_for_each_entry_safe(dst, tmp, &dests->list, list) {
273		dnode = dst->node;
274		if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
275			return -ENOMEM;
276
277		/* Any other return value than -ELINKCONG is ignored */
278		if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
279			(*cong_link_cnt)++;
280	}
281	return 0;
282}
283
284/* tipc_mcast_xmit - deliver message to indicated destination nodes
285 *                   and to identified node local sockets
286 * @net: the applicable net namespace
287 * @pkts: chain of buffers containing message
288 * @method: send method to be used
289 * @dests: destination nodes for message.
290 * @cong_link_cnt: returns number of encountered congested destination links
291 * Consumes buffer chain.
292 * Returns 0 if success, otherwise errno
293 */
294int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
295		    struct tipc_mc_method *method, struct tipc_nlist *dests,
296		    u16 *cong_link_cnt)
297{
298	struct sk_buff_head inputq, localq;
299	int rc = 0;
300
301	skb_queue_head_init(&inputq);
302	skb_queue_head_init(&localq);
303
304	/* Clone packets before they are consumed by next call */
305	if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
306		rc = -ENOMEM;
307		goto exit;
308	}
309	/* Send according to determined transmit method */
310	if (dests->remote) {
311		tipc_bcast_select_xmit_method(net, dests->remote, method);
312		if (method->rcast)
313			rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
314		else
315			rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
316	}
317
318	if (dests->local)
319		tipc_sk_mcast_rcv(net, &localq, &inputq);
320exit:
321	/* This queue should normally be empty by now */
322	__skb_queue_purge(pkts);
323	return rc;
324}
325
326/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
327 *
328 * RCU is locked, no other locks set
329 */
330int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
 
 
 
331{
332	struct tipc_msg *hdr = buf_msg(skb);
333	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
334	struct sk_buff_head xmitq;
335	int rc;
336
337	__skb_queue_head_init(&xmitq);
 
338
339	if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
340		kfree_skb(skb);
341		return 0;
 
 
 
 
 
 
 
342	}
343
344	tipc_bcast_lock(net);
345	if (msg_user(hdr) == BCAST_PROTOCOL)
346		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
347	else
348		rc = tipc_link_rcv(l, skb, NULL);
349	tipc_bcast_unlock(net);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
351	tipc_bcbase_xmit(net, &xmitq);
 
352
353	/* Any socket wakeup messages ? */
354	if (!skb_queue_empty(inputq))
355		tipc_sk_rcv(net, inputq);
 
 
356
357	return rc;
358}
359
360/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
361 *
362 * RCU is locked, no other locks set
363 */
364void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
365			struct tipc_msg *hdr)
366{
367	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
368	u16 acked = msg_bcast_ack(hdr);
369	struct sk_buff_head xmitq;
 
 
 
 
 
370
371	/* Ignore bc acks sent by peer before bcast synch point was received */
372	if (msg_bc_ack_invalid(hdr))
373		return;
 
 
 
 
 
 
 
 
 
 
374
375	__skb_queue_head_init(&xmitq);
376
377	tipc_bcast_lock(net);
378	tipc_link_bc_ack_rcv(l, acked, &xmitq);
379	tipc_bcast_unlock(net);
380
381	tipc_bcbase_xmit(net, &xmitq);
382
383	/* Any socket wakeup messages ? */
384	if (!skb_queue_empty(inputq))
385		tipc_sk_rcv(net, inputq);
386}
387
388/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
389 *
390 * RCU is locked, no other locks set
391 */
392int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
393			struct tipc_msg *hdr)
394{
395	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
396	struct sk_buff_head xmitq;
397	int rc = 0;
398
399	__skb_queue_head_init(&xmitq);
 
 
 
 
 
 
 
 
400
401	tipc_bcast_lock(net);
402	if (msg_type(hdr) != STATE_MSG) {
403		tipc_link_bc_init_rcv(l, hdr);
404	} else if (!msg_bc_ack_invalid(hdr)) {
405		tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
406		rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
407	}
408	tipc_bcast_unlock(net);
409
410	tipc_bcbase_xmit(net, &xmitq);
411
412	/* Any socket wakeup messages ? */
413	if (!skb_queue_empty(inputq))
414		tipc_sk_rcv(net, inputq);
415	return rc;
416}
417
418/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
 
419 *
420 * RCU is locked, node lock is set
 
 
421 */
422void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
423			 struct sk_buff_head *xmitq)
424{
425	struct tipc_link *snd_l = tipc_bc_sndlink(net);
426
427	tipc_bcast_lock(net);
428	tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
429	tipc_bcbase_select_primary(net);
430	tipc_bcbase_calc_bc_threshold(net);
431	tipc_bcast_unlock(net);
 
 
432}
433
434/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
435 *
436 * RCU is locked, node lock is set
437 */
438void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
439{
440	struct tipc_link *snd_l = tipc_bc_sndlink(net);
441	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
442	struct sk_buff_head xmitq;
 
443
444	__skb_queue_head_init(&xmitq);
445
446	tipc_bcast_lock(net);
447	tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
448	tipc_bcbase_select_primary(net);
449	tipc_bcbase_calc_bc_threshold(net);
450	tipc_bcast_unlock(net);
451
452	tipc_bcbase_xmit(net, &xmitq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
454	/* Any socket wakeup messages ? */
455	if (!skb_queue_empty(inputq))
456		tipc_sk_rcv(net, inputq);
457}
458
459int tipc_bclink_reset_stats(struct net *net)
460{
461	struct tipc_link *l = tipc_bc_sndlink(net);
462
463	if (!l)
464		return -ENOPROTOOPT;
465
466	tipc_bcast_lock(net);
467	tipc_link_reset_stats(l);
468	tipc_bcast_unlock(net);
469	return 0;
470}
471
472static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
473{
474	struct tipc_link *l = tipc_bc_sndlink(net);
475
476	if (!l)
477		return -ENOPROTOOPT;
478	if (limit < BCLINK_WIN_MIN)
479		limit = BCLINK_WIN_MIN;
480	if (limit > TIPC_MAX_LINK_WIN)
481		return -EINVAL;
482	tipc_bcast_lock(net);
483	tipc_link_set_queue_limits(l, limit);
484	tipc_bcast_unlock(net);
 
485	return 0;
486}
487
488int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
489{
490	int err;
491	u32 win;
492	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
494	if (!attrs[TIPC_NLA_LINK_PROP])
495		return -EINVAL;
496
497	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
498	if (err)
499		return err;
500
501	if (!props[TIPC_NLA_PROP_WIN])
502		return -EOPNOTSUPP;
503
504	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
505
506	return tipc_bc_link_set_queue_limits(net, win);
507}
508
509int tipc_bcast_init(struct net *net)
510{
511	struct tipc_net *tn = tipc_net(net);
512	struct tipc_bc_base *bb = NULL;
513	struct tipc_link *l = NULL;
514
515	bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
516	if (!bb)
517		goto enomem;
518	tn->bcbase = bb;
519	spin_lock_init(&tipc_net(net)->bclock);
520
521	if (!tipc_link_bc_create(net, 0, 0,
522				 FB_MTU,
523				 BCLINK_WIN_DEFAULT,
524				 0,
525				 &bb->inputq,
526				 NULL,
527				 NULL,
528				 &l))
529		goto enomem;
530	bb->link = l;
531	tn->bcl = l;
532	bb->rc_ratio = 25;
533	bb->rcast_support = true;
534	return 0;
535enomem:
536	kfree(bb);
537	kfree(l);
538	return -ENOMEM;
539}
540
541void tipc_bcast_stop(struct net *net)
 
 
 
 
 
542{
543	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
 
544
545	synchronize_net();
546	kfree(tn->bcbase);
547	kfree(tn->bcl);
 
548}
549
550void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
 
 
 
 
551{
552	memset(nl, 0, sizeof(*nl));
553	INIT_LIST_HEAD(&nl->list);
554	nl->self = self;
 
 
 
 
 
555}
556
557void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
558{
559	if (node == nl->self)
560		nl->local = true;
561	else if (tipc_dest_push(&nl->list, node, 0))
562		nl->remote++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563}
564
565void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
 
 
 
 
566{
567	if (node == nl->self)
568		nl->local = false;
569	else if (tipc_dest_del(&nl->list, node, 0))
570		nl->remote--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571}
572
573void tipc_nlist_purge(struct tipc_nlist *nl)
 
 
 
 
 
574{
575	tipc_dest_list_purge(&nl->list);
576	nl->remote = 0;
577	nl->local = false;
 
 
 
 
578}