Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * net/tipc/name_distr.c: TIPC name distribution code
  3 *
  4 * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
  6 * Copyright (c) 2020-2021, Red Hat Inc
  7 * All rights reserved.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions are met:
 11 *
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 * 3. Neither the names of the copyright holders nor the names of its
 18 *    contributors may be used to endorse or promote products derived from
 19 *    this software without specific prior written permission.
 20 *
 21 * Alternatively, this software may be distributed under the terms of the
 22 * GNU General Public License ("GPL") version 2 as published by the Free
 23 * Software Foundation.
 24 *
 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35 * POSSIBILITY OF SUCH DAMAGE.
 36 */
 37
 38#include "core.h"
 39#include "link.h"
 40#include "name_distr.h"
 41
 42int sysctl_tipc_named_timeout __read_mostly = 2000;
 43
 
 
 
 
 
 
 
 
 44/**
 45 * publ_to_item - add publication info to a publication message
 46 * @p: publication info
 47 * @i: location of item in the message
 48 */
 49static void publ_to_item(struct distr_item *i, struct publication *p)
 50{
 51	i->type = htonl(p->sr.type);
 52	i->lower = htonl(p->sr.lower);
 53	i->upper = htonl(p->sr.upper);
 54	i->port = htonl(p->sk.ref);
 55	i->key = htonl(p->key);
 56}
 57
 58/**
 59 * named_prepare_buf - allocate & initialize a publication message
 60 * @net: the associated network namespace
 61 * @type: message type
 62 * @size: payload size
 63 * @dest: destination node
 64 *
 65 * The buffer returned is of size INT_H_SIZE + payload size
 66 */
 67static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
 68					 u32 dest)
 69{
 70	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
 71	u32 self = tipc_own_addr(net);
 72	struct tipc_msg *msg;
 73
 74	if (buf != NULL) {
 75		msg = buf_msg(buf);
 76		tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
 77			      type, INT_H_SIZE, dest);
 78		msg_set_size(msg, INT_H_SIZE + size);
 79	}
 80	return buf;
 81}
 82
 83/**
 84 * tipc_named_publish - tell other nodes about a new publication by this node
 85 * @net: the associated network namespace
 86 * @p: the new publication
 87 */
 88struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
 89{
 90	struct name_table *nt = tipc_name_table(net);
 91	struct distr_item *item;
 92	struct sk_buff *skb;
 93
 94	if (p->scope == TIPC_NODE_SCOPE) {
 95		list_add_tail_rcu(&p->binding_node, &nt->node_scope);
 96		return NULL;
 97	}
 98	write_lock_bh(&nt->cluster_scope_lock);
 99	list_add_tail(&p->binding_node, &nt->cluster_scope);
100	write_unlock_bh(&nt->cluster_scope_lock);
101	skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
102	if (!skb) {
103		pr_warn("Publication distribution failure\n");
104		return NULL;
105	}
106	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
107	msg_set_non_legacy(buf_msg(skb));
108	item = (struct distr_item *)msg_data(buf_msg(skb));
109	publ_to_item(item, p);
110	return skb;
111}
112
113/**
114 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
115 * @net: the associated network namespace
116 * @p: the withdrawn publication
117 */
118struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
119{
120	struct name_table *nt = tipc_name_table(net);
121	struct distr_item *item;
122	struct sk_buff *skb;
123
124	write_lock_bh(&nt->cluster_scope_lock);
125	list_del(&p->binding_node);
126	write_unlock_bh(&nt->cluster_scope_lock);
127	if (p->scope == TIPC_NODE_SCOPE)
128		return NULL;
129
130	skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
131	if (!skb) {
132		pr_warn("Withdrawal distribution failure\n");
133		return NULL;
134	}
135	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
136	msg_set_non_legacy(buf_msg(skb));
137	item = (struct distr_item *)msg_data(buf_msg(skb));
138	publ_to_item(item, p);
139	return skb;
140}
141
142/**
143 * named_distribute - prepare name info for bulk distribution to another node
144 * @net: the associated network namespace
145 * @list: list of messages (buffers) to be returned from this function
146 * @dnode: node to be updated
147 * @pls: linked list of publication items to be packed into buffer chain
148 * @seqno: sequence number for this message
149 */
150static void named_distribute(struct net *net, struct sk_buff_head *list,
151			     u32 dnode, struct list_head *pls, u16 seqno)
152{
153	struct publication *publ;
154	struct sk_buff *skb = NULL;
155	struct distr_item *item = NULL;
156	u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
157			ITEM_SIZE) * ITEM_SIZE;
158	u32 msg_rem = msg_dsz;
159	struct tipc_msg *hdr;
160
161	list_for_each_entry(publ, pls, binding_node) {
162		/* Prepare next buffer: */
163		if (!skb) {
164			skb = named_prepare_buf(net, PUBLICATION, msg_rem,
165						dnode);
166			if (!skb) {
167				pr_warn("Bulk publication failure\n");
168				return;
169			}
170			hdr = buf_msg(skb);
171			msg_set_bc_ack_invalid(hdr, true);
172			msg_set_bulk(hdr);
173			msg_set_non_legacy(hdr);
174			item = (struct distr_item *)msg_data(hdr);
175		}
176
177		/* Pack publication into message: */
178		publ_to_item(item, publ);
179		item++;
180		msg_rem -= ITEM_SIZE;
181
182		/* Append full buffer to list: */
183		if (!msg_rem) {
184			__skb_queue_tail(list, skb);
185			skb = NULL;
186			msg_rem = msg_dsz;
187		}
188	}
189	if (skb) {
190		hdr = buf_msg(skb);
191		msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
192		skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
193		__skb_queue_tail(list, skb);
194	}
195	hdr = buf_msg(skb_peek_tail(list));
196	msg_set_last_bulk(hdr);
197	msg_set_named_seqno(hdr, seqno);
198}
199
200/**
201 * tipc_named_node_up - tell specified node about all publications by this node
202 * @net: the associated network namespace
203 * @dnode: destination node
204 * @capabilities: peer node's capabilities
205 */
206void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
207{
208	struct name_table *nt = tipc_name_table(net);
209	struct tipc_net *tn = tipc_net(net);
210	struct sk_buff_head head;
211	u16 seqno;
212
213	__skb_queue_head_init(&head);
214	spin_lock_bh(&tn->nametbl_lock);
215	if (!(capabilities & TIPC_NAMED_BCAST))
216		nt->rc_dests++;
217	seqno = nt->snd_nxt;
218	spin_unlock_bh(&tn->nametbl_lock);
219
220	read_lock_bh(&nt->cluster_scope_lock);
221	named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
222	tipc_node_xmit(net, &head, dnode, 0);
223	read_unlock_bh(&nt->cluster_scope_lock);
224}
225
226/**
227 * tipc_publ_purge - remove publication associated with a failed node
228 * @net: the associated network namespace
229 * @p: the publication to remove
230 * @addr: failed node's address
231 *
232 * Invoked for each publication issued by a newly failed node.
233 * Removes publication structure from name table & deletes it.
234 */
235static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
236{
237	struct tipc_net *tn = tipc_net(net);
238	struct publication *_p;
239	struct tipc_uaddr ua;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
241	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
242		   p->sr.lower, p->sr.upper);
243	spin_lock_bh(&tn->nametbl_lock);
244	_p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
245	if (_p)
246		tipc_node_unsubscribe(net, &_p->binding_node, addr);
 
 
 
247	spin_unlock_bh(&tn->nametbl_lock);
248	if (_p)
249		kfree_rcu(_p, rcu);
250}
251
252void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
253		      u32 addr, u16 capabilities)
254{
255	struct name_table *nt = tipc_name_table(net);
256	struct tipc_net *tn = tipc_net(net);
257
258	struct publication *publ, *tmp;
259
260	list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
261		tipc_publ_purge(net, publ, addr);
 
262	spin_lock_bh(&tn->nametbl_lock);
263	if (!(capabilities & TIPC_NAMED_BCAST))
264		nt->rc_dests--;
265	spin_unlock_bh(&tn->nametbl_lock);
266}
267
268/**
269 * tipc_update_nametbl - try to process a nametable update and notify
270 *			 subscribers
271 * @net: the associated network namespace
272 * @i: location of item in the message
273 * @node: node address
274 * @dtype: name distributor message type
275 *
276 * tipc_nametbl_lock must be held.
277 * Return: the publication item if successful, otherwise NULL.
278 */
279static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
280				u32 node, u32 dtype)
281{
282	struct publication *p = NULL;
283	struct tipc_socket_addr sk;
284	struct tipc_uaddr ua;
 
 
285	u32 key = ntohl(i->key);
286
287	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
288		   ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
289	sk.ref = ntohl(i->port);
290	sk.node = node;
291
292	if (dtype == PUBLICATION) {
293		p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
 
 
294		if (p) {
295			tipc_node_subscribe(net, &p->binding_node, node);
296			return true;
297		}
298	} else if (dtype == WITHDRAWAL) {
299		p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
 
300		if (p) {
301			tipc_node_unsubscribe(net, &p->binding_node, node);
302			kfree_rcu(p, rcu);
303			return true;
304		}
305		pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
306				    ua.sr.type, ua.sr.lower, node);
307	} else {
308		pr_warn_ratelimited("Unknown name table message received\n");
309	}
310	return false;
311}
312
313static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
314					  u16 *rcv_nxt, bool *open)
315{
316	struct sk_buff *skb, *tmp;
317	struct tipc_msg *hdr;
318	u16 seqno;
319
320	spin_lock_bh(&namedq->lock);
321	skb_queue_walk_safe(namedq, skb, tmp) {
322		if (unlikely(skb_linearize(skb))) {
323			__skb_unlink(skb, namedq);
324			kfree_skb(skb);
325			continue;
326		}
327		hdr = buf_msg(skb);
328		seqno = msg_named_seqno(hdr);
329		if (msg_is_last_bulk(hdr)) {
330			*rcv_nxt = seqno;
331			*open = true;
332		}
333
334		if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
335			__skb_unlink(skb, namedq);
336			spin_unlock_bh(&namedq->lock);
337			return skb;
338		}
339
340		if (*open && (*rcv_nxt == seqno)) {
341			(*rcv_nxt)++;
342			__skb_unlink(skb, namedq);
343			spin_unlock_bh(&namedq->lock);
344			return skb;
345		}
346
347		if (less(seqno, *rcv_nxt)) {
348			__skb_unlink(skb, namedq);
349			kfree_skb(skb);
350			continue;
351		}
352	}
353	spin_unlock_bh(&namedq->lock);
354	return NULL;
355}
356
357/**
358 * tipc_named_rcv - process name table update messages sent by another node
359 * @net: the associated network namespace
360 * @namedq: queue to receive from
361 * @rcv_nxt: store last received seqno here
362 * @open: last bulk msg was received (FIXME)
363 */
364void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
365		    u16 *rcv_nxt, bool *open)
366{
367	struct tipc_net *tn = tipc_net(net);
368	struct distr_item *item;
369	struct tipc_msg *hdr;
370	struct sk_buff *skb;
371	u32 count, node;
372
373	spin_lock_bh(&tn->nametbl_lock);
374	while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
375		hdr = buf_msg(skb);
376		node = msg_orignode(hdr);
377		item = (struct distr_item *)msg_data(hdr);
378		count = msg_data_sz(hdr) / ITEM_SIZE;
379		while (count--) {
380			tipc_update_nametbl(net, item, node, msg_type(hdr));
381			item++;
382		}
383		kfree_skb(skb);
384	}
385	spin_unlock_bh(&tn->nametbl_lock);
386}
387
388/**
389 * tipc_named_reinit - re-initialize local publications
390 * @net: the associated network namespace
391 *
392 * This routine is called whenever TIPC networking is enabled.
393 * All name table entries published by this node are updated to reflect
394 * the node's new network address.
395 */
396void tipc_named_reinit(struct net *net)
397{
398	struct name_table *nt = tipc_name_table(net);
399	struct tipc_net *tn = tipc_net(net);
400	struct publication *p;
401	u32 self = tipc_own_addr(net);
402
403	spin_lock_bh(&tn->nametbl_lock);
404
405	list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
406		p->sk.node = self;
407	list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
408		p->sk.node = self;
409	nt->rc_dests = 0;
410	spin_unlock_bh(&tn->nametbl_lock);
411}
v5.9
  1/*
  2 * net/tipc/name_distr.c: TIPC name distribution code
  3 *
  4 * Copyright (c) 2000-2006, 2014, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
 
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions are met:
 10 *
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the names of the copyright holders nor the names of its
 17 *    contributors may be used to endorse or promote products derived from
 18 *    this software without specific prior written permission.
 19 *
 20 * Alternatively, this software may be distributed under the terms of the
 21 * GNU General Public License ("GPL") version 2 as published by the Free
 22 * Software Foundation.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34 * POSSIBILITY OF SUCH DAMAGE.
 35 */
 36
 37#include "core.h"
 38#include "link.h"
 39#include "name_distr.h"
 40
 41int sysctl_tipc_named_timeout __read_mostly = 2000;
 42
 43struct distr_queue_item {
 44	struct distr_item i;
 45	u32 dtype;
 46	u32 node;
 47	unsigned long expires;
 48	struct list_head next;
 49};
 50
 51/**
 52 * publ_to_item - add publication info to a publication message
 
 
 53 */
 54static void publ_to_item(struct distr_item *i, struct publication *p)
 55{
 56	i->type = htonl(p->type);
 57	i->lower = htonl(p->lower);
 58	i->upper = htonl(p->upper);
 59	i->port = htonl(p->port);
 60	i->key = htonl(p->key);
 61}
 62
 63/**
 64 * named_prepare_buf - allocate & initialize a publication message
 
 
 
 
 65 *
 66 * The buffer returned is of size INT_H_SIZE + payload size
 67 */
 68static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
 69					 u32 dest)
 70{
 71	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
 72	u32 self = tipc_own_addr(net);
 73	struct tipc_msg *msg;
 74
 75	if (buf != NULL) {
 76		msg = buf_msg(buf);
 77		tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
 78			      type, INT_H_SIZE, dest);
 79		msg_set_size(msg, INT_H_SIZE + size);
 80	}
 81	return buf;
 82}
 83
 84/**
 85 * tipc_named_publish - tell other nodes about a new publication by this node
 
 
 86 */
 87struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
 88{
 89	struct name_table *nt = tipc_name_table(net);
 90	struct distr_item *item;
 91	struct sk_buff *skb;
 92
 93	if (publ->scope == TIPC_NODE_SCOPE) {
 94		list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
 95		return NULL;
 96	}
 97	write_lock_bh(&nt->cluster_scope_lock);
 98	list_add_tail(&publ->binding_node, &nt->cluster_scope);
 99	write_unlock_bh(&nt->cluster_scope_lock);
100	skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
101	if (!skb) {
102		pr_warn("Publication distribution failure\n");
103		return NULL;
104	}
105	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
106	msg_set_non_legacy(buf_msg(skb));
107	item = (struct distr_item *)msg_data(buf_msg(skb));
108	publ_to_item(item, publ);
109	return skb;
110}
111
112/**
113 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
 
 
114 */
115struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
116{
117	struct name_table *nt = tipc_name_table(net);
118	struct distr_item *item;
119	struct sk_buff *skb;
120
121	write_lock_bh(&nt->cluster_scope_lock);
122	list_del(&publ->binding_node);
123	write_unlock_bh(&nt->cluster_scope_lock);
124	if (publ->scope == TIPC_NODE_SCOPE)
125		return NULL;
126
127	skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
128	if (!skb) {
129		pr_warn("Withdrawal distribution failure\n");
130		return NULL;
131	}
132	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
133	msg_set_non_legacy(buf_msg(skb));
134	item = (struct distr_item *)msg_data(buf_msg(skb));
135	publ_to_item(item, publ);
136	return skb;
137}
138
139/**
140 * named_distribute - prepare name info for bulk distribution to another node
 
141 * @list: list of messages (buffers) to be returned from this function
142 * @dnode: node to be updated
143 * @pls: linked list of publication items to be packed into buffer chain
 
144 */
145static void named_distribute(struct net *net, struct sk_buff_head *list,
146			     u32 dnode, struct list_head *pls, u16 seqno)
147{
148	struct publication *publ;
149	struct sk_buff *skb = NULL;
150	struct distr_item *item = NULL;
151	u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
152			ITEM_SIZE) * ITEM_SIZE;
153	u32 msg_rem = msg_dsz;
154	struct tipc_msg *hdr;
155
156	list_for_each_entry(publ, pls, binding_node) {
157		/* Prepare next buffer: */
158		if (!skb) {
159			skb = named_prepare_buf(net, PUBLICATION, msg_rem,
160						dnode);
161			if (!skb) {
162				pr_warn("Bulk publication failure\n");
163				return;
164			}
165			hdr = buf_msg(skb);
166			msg_set_bc_ack_invalid(hdr, true);
167			msg_set_bulk(hdr);
168			msg_set_non_legacy(hdr);
169			item = (struct distr_item *)msg_data(hdr);
170		}
171
172		/* Pack publication into message: */
173		publ_to_item(item, publ);
174		item++;
175		msg_rem -= ITEM_SIZE;
176
177		/* Append full buffer to list: */
178		if (!msg_rem) {
179			__skb_queue_tail(list, skb);
180			skb = NULL;
181			msg_rem = msg_dsz;
182		}
183	}
184	if (skb) {
185		hdr = buf_msg(skb);
186		msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
187		skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
188		__skb_queue_tail(list, skb);
189	}
190	hdr = buf_msg(skb_peek_tail(list));
191	msg_set_last_bulk(hdr);
192	msg_set_named_seqno(hdr, seqno);
193}
194
195/**
196 * tipc_named_node_up - tell specified node about all publications by this node
 
 
 
197 */
198void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
199{
200	struct name_table *nt = tipc_name_table(net);
201	struct tipc_net *tn = tipc_net(net);
202	struct sk_buff_head head;
203	u16 seqno;
204
205	__skb_queue_head_init(&head);
206	spin_lock_bh(&tn->nametbl_lock);
207	if (!(capabilities & TIPC_NAMED_BCAST))
208		nt->rc_dests++;
209	seqno = nt->snd_nxt;
210	spin_unlock_bh(&tn->nametbl_lock);
211
212	read_lock_bh(&nt->cluster_scope_lock);
213	named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
214	tipc_node_xmit(net, &head, dnode, 0);
215	read_unlock_bh(&nt->cluster_scope_lock);
216}
217
218/**
219 * tipc_publ_purge - remove publication associated with a failed node
 
 
 
220 *
221 * Invoked for each publication issued by a newly failed node.
222 * Removes publication structure from name table & deletes it.
223 */
224static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
225{
226	struct tipc_net *tn = tipc_net(net);
227	struct publication *p;
228
229	spin_lock_bh(&tn->nametbl_lock);
230	p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
231				     publ->node, publ->key);
232	if (p)
233		tipc_node_unsubscribe(net, &p->binding_node, addr);
234	spin_unlock_bh(&tn->nametbl_lock);
235
236	if (p != publ) {
237		pr_err("Unable to remove publication from failed node\n"
238		       " (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
239		       publ->type, publ->lower, publ->node, publ->port,
240		       publ->key);
241	}
242
243	if (p)
244		kfree_rcu(p, rcu);
245}
246
247/**
248 * tipc_dist_queue_purge - remove deferred updates from a node that went down
249 */
250static void tipc_dist_queue_purge(struct net *net, u32 addr)
251{
252	struct tipc_net *tn = net_generic(net, tipc_net_id);
253	struct distr_queue_item *e, *tmp;
254
 
 
255	spin_lock_bh(&tn->nametbl_lock);
256	list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
257		if (e->node != addr)
258			continue;
259		list_del(&e->next);
260		kfree(e);
261	}
262	spin_unlock_bh(&tn->nametbl_lock);
 
 
263}
264
265void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
266		      u32 addr, u16 capabilities)
267{
268	struct name_table *nt = tipc_name_table(net);
269	struct tipc_net *tn = tipc_net(net);
270
271	struct publication *publ, *tmp;
272
273	list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
274		tipc_publ_purge(net, publ, addr);
275	tipc_dist_queue_purge(net, addr);
276	spin_lock_bh(&tn->nametbl_lock);
277	if (!(capabilities & TIPC_NAMED_BCAST))
278		nt->rc_dests--;
279	spin_unlock_bh(&tn->nametbl_lock);
280}
281
282/**
283 * tipc_update_nametbl - try to process a nametable update and notify
284 *			 subscribers
 
 
 
 
285 *
286 * tipc_nametbl_lock must be held.
287 * Returns the publication item if successful, otherwise NULL.
288 */
289static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
290				u32 node, u32 dtype)
291{
292	struct publication *p = NULL;
293	u32 lower = ntohl(i->lower);
294	u32 upper = ntohl(i->upper);
295	u32 type = ntohl(i->type);
296	u32 port = ntohl(i->port);
297	u32 key = ntohl(i->key);
298
 
 
 
 
 
299	if (dtype == PUBLICATION) {
300		p = tipc_nametbl_insert_publ(net, type, lower, upper,
301					     TIPC_CLUSTER_SCOPE, node,
302					     port, key);
303		if (p) {
304			tipc_node_subscribe(net, &p->binding_node, node);
305			return true;
306		}
307	} else if (dtype == WITHDRAWAL) {
308		p = tipc_nametbl_remove_publ(net, type, lower,
309					     upper, node, key);
310		if (p) {
311			tipc_node_unsubscribe(net, &p->binding_node, node);
312			kfree_rcu(p, rcu);
313			return true;
314		}
315		pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
316				    type, lower, node);
317	} else {
318		pr_warn("Unrecognized name table message received\n");
319	}
320	return false;
321}
322
323static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
324					  u16 *rcv_nxt, bool *open)
325{
326	struct sk_buff *skb, *tmp;
327	struct tipc_msg *hdr;
328	u16 seqno;
329
 
330	skb_queue_walk_safe(namedq, skb, tmp) {
331		skb_linearize(skb);
 
 
 
 
332		hdr = buf_msg(skb);
333		seqno = msg_named_seqno(hdr);
334		if (msg_is_last_bulk(hdr)) {
335			*rcv_nxt = seqno;
336			*open = true;
337		}
338
339		if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
340			__skb_unlink(skb, namedq);
 
341			return skb;
342		}
343
344		if (*open && (*rcv_nxt == seqno)) {
345			(*rcv_nxt)++;
346			__skb_unlink(skb, namedq);
 
347			return skb;
348		}
349
350		if (less(seqno, *rcv_nxt)) {
351			__skb_unlink(skb, namedq);
352			kfree_skb(skb);
353			continue;
354		}
355	}
 
356	return NULL;
357}
358
359/**
360 * tipc_named_rcv - process name table update messages sent by another node
 
 
 
 
361 */
362void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
363		    u16 *rcv_nxt, bool *open)
364{
365	struct tipc_net *tn = tipc_net(net);
366	struct distr_item *item;
367	struct tipc_msg *hdr;
368	struct sk_buff *skb;
369	u32 count, node;
370
371	spin_lock_bh(&tn->nametbl_lock);
372	while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
373		hdr = buf_msg(skb);
374		node = msg_orignode(hdr);
375		item = (struct distr_item *)msg_data(hdr);
376		count = msg_data_sz(hdr) / ITEM_SIZE;
377		while (count--) {
378			tipc_update_nametbl(net, item, node, msg_type(hdr));
379			item++;
380		}
381		kfree_skb(skb);
382	}
383	spin_unlock_bh(&tn->nametbl_lock);
384}
385
386/**
387 * tipc_named_reinit - re-initialize local publications
 
388 *
389 * This routine is called whenever TIPC networking is enabled.
390 * All name table entries published by this node are updated to reflect
391 * the node's new network address.
392 */
393void tipc_named_reinit(struct net *net)
394{
395	struct name_table *nt = tipc_name_table(net);
396	struct tipc_net *tn = tipc_net(net);
397	struct publication *publ;
398	u32 self = tipc_own_addr(net);
399
400	spin_lock_bh(&tn->nametbl_lock);
401
402	list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
403		publ->node = self;
404	list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
405		publ->node = self;
406	nt->rc_dests = 0;
407	spin_unlock_bh(&tn->nametbl_lock);
408}