Loading...
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "link.h"
39#include "name_distr.h"
40
41#define ITEM_SIZE sizeof(struct distr_item)
42
43/**
44 * struct distr_item - publication info distributed to other nodes
45 * @type: name sequence type
46 * @lower: name sequence lower bound
47 * @upper: name sequence upper bound
48 * @ref: publishing port reference
49 * @key: publication key
50 *
51 * ===> All fields are stored in network byte order. <===
52 *
53 * First 3 fields identify (name or) name sequence being published.
54 * Reference field uniquely identifies port that published name sequence.
55 * Key field uniquely identifies publication, in the event a port has
56 * multiple publications of the same name sequence.
57 *
58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message.
60 */
61
62struct distr_item {
63 __be32 type;
64 __be32 lower;
65 __be32 upper;
66 __be32 ref;
67 __be32 key;
68};
69
70/**
71 * List of externally visible publications by this node --
72 * that is, all publications having scope > TIPC_NODE_SCOPE.
73 */
74
75static LIST_HEAD(publ_root);
76static u32 publ_cnt;
77
78/**
79 * publ_to_item - add publication info to a publication message
80 */
81
82static void publ_to_item(struct distr_item *i, struct publication *p)
83{
84 i->type = htonl(p->type);
85 i->lower = htonl(p->lower);
86 i->upper = htonl(p->upper);
87 i->ref = htonl(p->ref);
88 i->key = htonl(p->key);
89}
90
91/**
92 * named_prepare_buf - allocate & initialize a publication message
93 */
94
95static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
96{
97 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
98 struct tipc_msg *msg;
99
100 if (buf != NULL) {
101 msg = buf_msg(buf);
102 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
103 msg_set_size(msg, INT_H_SIZE + size);
104 }
105 return buf;
106}
107
108static void named_cluster_distribute(struct sk_buff *buf)
109{
110 struct sk_buff *buf_copy;
111 struct tipc_node *n_ptr;
112
113 list_for_each_entry(n_ptr, &tipc_node_list, list) {
114 if (tipc_node_active_links(n_ptr)) {
115 buf_copy = skb_copy(buf, GFP_ATOMIC);
116 if (!buf_copy)
117 break;
118 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
119 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
120 }
121 }
122
123 buf_discard(buf);
124}
125
126/**
127 * tipc_named_publish - tell other nodes about a new publication by this node
128 */
129
130void tipc_named_publish(struct publication *publ)
131{
132 struct sk_buff *buf;
133 struct distr_item *item;
134
135 list_add_tail(&publ->local_list, &publ_root);
136 publ_cnt++;
137
138 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
139 if (!buf) {
140 warn("Publication distribution failure\n");
141 return;
142 }
143
144 item = (struct distr_item *)msg_data(buf_msg(buf));
145 publ_to_item(item, publ);
146 named_cluster_distribute(buf);
147}
148
149/**
150 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
151 */
152
153void tipc_named_withdraw(struct publication *publ)
154{
155 struct sk_buff *buf;
156 struct distr_item *item;
157
158 list_del(&publ->local_list);
159 publ_cnt--;
160
161 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
162 if (!buf) {
163 warn("Withdrawal distribution failure\n");
164 return;
165 }
166
167 item = (struct distr_item *)msg_data(buf_msg(buf));
168 publ_to_item(item, publ);
169 named_cluster_distribute(buf);
170}
171
172/**
173 * tipc_named_node_up - tell specified node about all publications by this node
174 */
175
176void tipc_named_node_up(unsigned long node)
177{
178 struct publication *publ;
179 struct distr_item *item = NULL;
180 struct sk_buff *buf = NULL;
181 u32 left = 0;
182 u32 rest;
183 u32 max_item_buf;
184
185 read_lock_bh(&tipc_nametbl_lock);
186 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
187 max_item_buf *= ITEM_SIZE;
188 rest = publ_cnt * ITEM_SIZE;
189
190 list_for_each_entry(publ, &publ_root, local_list) {
191 if (!buf) {
192 left = (rest <= max_item_buf) ? rest : max_item_buf;
193 rest -= left;
194 buf = named_prepare_buf(PUBLICATION, left, node);
195 if (!buf) {
196 warn("Bulk publication distribution failure\n");
197 goto exit;
198 }
199 item = (struct distr_item *)msg_data(buf_msg(buf));
200 }
201 publ_to_item(item, publ);
202 item++;
203 left -= ITEM_SIZE;
204 if (!left) {
205 msg_set_link_selector(buf_msg(buf), node);
206 tipc_link_send(buf, node, node);
207 buf = NULL;
208 }
209 }
210exit:
211 read_unlock_bh(&tipc_nametbl_lock);
212}
213
214/**
215 * named_purge_publ - remove publication associated with a failed node
216 *
217 * Invoked for each publication issued by a newly failed node.
218 * Removes publication structure from name table & deletes it.
219 * In rare cases the link may have come back up again when this
220 * function is called, and we have two items representing the same
221 * publication. Nudge this item's key to distinguish it from the other.
222 */
223
224static void named_purge_publ(struct publication *publ)
225{
226 struct publication *p;
227
228 write_lock_bh(&tipc_nametbl_lock);
229 publ->key += 1222345;
230 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
231 publ->node, publ->ref, publ->key);
232 if (p)
233 tipc_nodesub_unsubscribe(&p->subscr);
234 write_unlock_bh(&tipc_nametbl_lock);
235
236 if (p != publ) {
237 err("Unable to remove publication from failed node\n"
238 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
239 publ->type, publ->lower, publ->node, publ->ref, publ->key);
240 }
241
242 kfree(p);
243}
244
245/**
246 * tipc_named_recv - process name table update message sent by another node
247 */
248
249void tipc_named_recv(struct sk_buff *buf)
250{
251 struct publication *publ;
252 struct tipc_msg *msg = buf_msg(buf);
253 struct distr_item *item = (struct distr_item *)msg_data(msg);
254 u32 count = msg_data_sz(msg) / ITEM_SIZE;
255
256 write_lock_bh(&tipc_nametbl_lock);
257 while (count--) {
258 if (msg_type(msg) == PUBLICATION) {
259 publ = tipc_nametbl_insert_publ(ntohl(item->type),
260 ntohl(item->lower),
261 ntohl(item->upper),
262 TIPC_CLUSTER_SCOPE,
263 msg_orignode(msg),
264 ntohl(item->ref),
265 ntohl(item->key));
266 if (publ) {
267 tipc_nodesub_subscribe(&publ->subscr,
268 msg_orignode(msg),
269 publ,
270 (net_ev_handler)
271 named_purge_publ);
272 }
273 } else if (msg_type(msg) == WITHDRAWAL) {
274 publ = tipc_nametbl_remove_publ(ntohl(item->type),
275 ntohl(item->lower),
276 msg_orignode(msg),
277 ntohl(item->ref),
278 ntohl(item->key));
279
280 if (publ) {
281 tipc_nodesub_unsubscribe(&publ->subscr);
282 kfree(publ);
283 } else {
284 err("Unable to remove publication by node 0x%x\n"
285 "(type=%u, lower=%u, ref=%u, key=%u)\n",
286 msg_orignode(msg),
287 ntohl(item->type), ntohl(item->lower),
288 ntohl(item->ref), ntohl(item->key));
289 }
290 } else {
291 warn("Unrecognized name table message received\n");
292 }
293 item++;
294 }
295 write_unlock_bh(&tipc_nametbl_lock);
296 buf_discard(buf);
297}
298
299/**
300 * tipc_named_reinit - re-initialize local publication list
301 *
302 * This routine is called whenever TIPC networking is (re)enabled.
303 * All existing publications by this node that have "cluster" or "zone" scope
304 * are updated to reflect the node's current network address.
305 * (If the node's address is unchanged, the update loop terminates immediately.)
306 */
307
308void tipc_named_reinit(void)
309{
310 struct publication *publ;
311
312 write_lock_bh(&tipc_nametbl_lock);
313 list_for_each_entry(publ, &publ_root, local_list) {
314 if (publ->node == tipc_own_addr)
315 break;
316 publ->node = tipc_own_addr;
317 }
318 write_unlock_bh(&tipc_nametbl_lock);
319}
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "link.h"
40#include "name_distr.h"
41
42int sysctl_tipc_named_timeout __read_mostly = 2000;
43
44/**
45 * publ_to_item - add publication info to a publication message
46 * @p: publication info
47 * @i: location of item in the message
48 */
49static void publ_to_item(struct distr_item *i, struct publication *p)
50{
51 i->type = htonl(p->sr.type);
52 i->lower = htonl(p->sr.lower);
53 i->upper = htonl(p->sr.upper);
54 i->port = htonl(p->sk.ref);
55 i->key = htonl(p->key);
56}
57
58/**
59 * named_prepare_buf - allocate & initialize a publication message
60 * @net: the associated network namespace
61 * @type: message type
62 * @size: payload size
63 * @dest: destination node
64 *
65 * The buffer returned is of size INT_H_SIZE + payload size
66 */
67static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
68 u32 dest)
69{
70 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
71 u32 self = tipc_own_addr(net);
72 struct tipc_msg *msg;
73
74 if (buf != NULL) {
75 msg = buf_msg(buf);
76 tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
77 type, INT_H_SIZE, dest);
78 msg_set_size(msg, INT_H_SIZE + size);
79 }
80 return buf;
81}
82
83/**
84 * tipc_named_publish - tell other nodes about a new publication by this node
85 * @net: the associated network namespace
86 * @p: the new publication
87 */
88struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
89{
90 struct name_table *nt = tipc_name_table(net);
91 struct distr_item *item;
92 struct sk_buff *skb;
93
94 if (p->scope == TIPC_NODE_SCOPE) {
95 list_add_tail_rcu(&p->binding_node, &nt->node_scope);
96 return NULL;
97 }
98 write_lock_bh(&nt->cluster_scope_lock);
99 list_add_tail(&p->binding_node, &nt->cluster_scope);
100 write_unlock_bh(&nt->cluster_scope_lock);
101 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
102 if (!skb) {
103 pr_warn("Publication distribution failure\n");
104 return NULL;
105 }
106 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
107 msg_set_non_legacy(buf_msg(skb));
108 item = (struct distr_item *)msg_data(buf_msg(skb));
109 publ_to_item(item, p);
110 return skb;
111}
112
113/**
114 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
115 * @net: the associated network namespace
116 * @p: the withdrawn publication
117 */
118struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
119{
120 struct name_table *nt = tipc_name_table(net);
121 struct distr_item *item;
122 struct sk_buff *skb;
123
124 write_lock_bh(&nt->cluster_scope_lock);
125 list_del(&p->binding_node);
126 write_unlock_bh(&nt->cluster_scope_lock);
127 if (p->scope == TIPC_NODE_SCOPE)
128 return NULL;
129
130 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
131 if (!skb) {
132 pr_warn("Withdrawal distribution failure\n");
133 return NULL;
134 }
135 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
136 msg_set_non_legacy(buf_msg(skb));
137 item = (struct distr_item *)msg_data(buf_msg(skb));
138 publ_to_item(item, p);
139 return skb;
140}
141
142/**
143 * named_distribute - prepare name info for bulk distribution to another node
144 * @net: the associated network namespace
145 * @list: list of messages (buffers) to be returned from this function
146 * @dnode: node to be updated
147 * @pls: linked list of publication items to be packed into buffer chain
148 * @seqno: sequence number for this message
149 */
150static void named_distribute(struct net *net, struct sk_buff_head *list,
151 u32 dnode, struct list_head *pls, u16 seqno)
152{
153 struct publication *publ;
154 struct sk_buff *skb = NULL;
155 struct distr_item *item = NULL;
156 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
157 ITEM_SIZE) * ITEM_SIZE;
158 u32 msg_rem = msg_dsz;
159 struct tipc_msg *hdr;
160
161 list_for_each_entry(publ, pls, binding_node) {
162 /* Prepare next buffer: */
163 if (!skb) {
164 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
165 dnode);
166 if (!skb) {
167 pr_warn("Bulk publication failure\n");
168 return;
169 }
170 hdr = buf_msg(skb);
171 msg_set_bc_ack_invalid(hdr, true);
172 msg_set_bulk(hdr);
173 msg_set_non_legacy(hdr);
174 item = (struct distr_item *)msg_data(hdr);
175 }
176
177 /* Pack publication into message: */
178 publ_to_item(item, publ);
179 item++;
180 msg_rem -= ITEM_SIZE;
181
182 /* Append full buffer to list: */
183 if (!msg_rem) {
184 __skb_queue_tail(list, skb);
185 skb = NULL;
186 msg_rem = msg_dsz;
187 }
188 }
189 if (skb) {
190 hdr = buf_msg(skb);
191 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
192 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
193 __skb_queue_tail(list, skb);
194 }
195 hdr = buf_msg(skb_peek_tail(list));
196 msg_set_last_bulk(hdr);
197 msg_set_named_seqno(hdr, seqno);
198}
199
200/**
201 * tipc_named_node_up - tell specified node about all publications by this node
202 * @net: the associated network namespace
203 * @dnode: destination node
204 * @capabilities: peer node's capabilities
205 */
206void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
207{
208 struct name_table *nt = tipc_name_table(net);
209 struct tipc_net *tn = tipc_net(net);
210 struct sk_buff_head head;
211 u16 seqno;
212
213 __skb_queue_head_init(&head);
214 spin_lock_bh(&tn->nametbl_lock);
215 if (!(capabilities & TIPC_NAMED_BCAST))
216 nt->rc_dests++;
217 seqno = nt->snd_nxt;
218 spin_unlock_bh(&tn->nametbl_lock);
219
220 read_lock_bh(&nt->cluster_scope_lock);
221 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
222 tipc_node_xmit(net, &head, dnode, 0);
223 read_unlock_bh(&nt->cluster_scope_lock);
224}
225
226/**
227 * tipc_publ_purge - remove publication associated with a failed node
228 * @net: the associated network namespace
229 * @p: the publication to remove
230 * @addr: failed node's address
231 *
232 * Invoked for each publication issued by a newly failed node.
233 * Removes publication structure from name table & deletes it.
234 */
235static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
236{
237 struct tipc_net *tn = tipc_net(net);
238 struct publication *_p;
239 struct tipc_uaddr ua;
240
241 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
242 p->sr.lower, p->sr.upper);
243 spin_lock_bh(&tn->nametbl_lock);
244 _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
245 if (_p)
246 tipc_node_unsubscribe(net, &_p->binding_node, addr);
247 spin_unlock_bh(&tn->nametbl_lock);
248 if (_p)
249 kfree_rcu(_p, rcu);
250}
251
252void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
253 u32 addr, u16 capabilities)
254{
255 struct name_table *nt = tipc_name_table(net);
256 struct tipc_net *tn = tipc_net(net);
257
258 struct publication *publ, *tmp;
259
260 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
261 tipc_publ_purge(net, publ, addr);
262 spin_lock_bh(&tn->nametbl_lock);
263 if (!(capabilities & TIPC_NAMED_BCAST))
264 nt->rc_dests--;
265 spin_unlock_bh(&tn->nametbl_lock);
266}
267
268/**
269 * tipc_update_nametbl - try to process a nametable update and notify
270 * subscribers
271 * @net: the associated network namespace
272 * @i: location of item in the message
273 * @node: node address
274 * @dtype: name distributor message type
275 *
276 * tipc_nametbl_lock must be held.
277 * Return: the publication item if successful, otherwise NULL.
278 */
279static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
280 u32 node, u32 dtype)
281{
282 struct publication *p = NULL;
283 struct tipc_socket_addr sk;
284 struct tipc_uaddr ua;
285 u32 key = ntohl(i->key);
286
287 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
288 ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
289 sk.ref = ntohl(i->port);
290 sk.node = node;
291
292 if (dtype == PUBLICATION) {
293 p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
294 if (p) {
295 tipc_node_subscribe(net, &p->binding_node, node);
296 return true;
297 }
298 } else if (dtype == WITHDRAWAL) {
299 p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
300 if (p) {
301 tipc_node_unsubscribe(net, &p->binding_node, node);
302 kfree_rcu(p, rcu);
303 return true;
304 }
305 pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
306 ua.sr.type, ua.sr.lower, node);
307 } else {
308 pr_warn_ratelimited("Unknown name table message received\n");
309 }
310 return false;
311}
312
313static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
314 u16 *rcv_nxt, bool *open)
315{
316 struct sk_buff *skb, *tmp;
317 struct tipc_msg *hdr;
318 u16 seqno;
319
320 spin_lock_bh(&namedq->lock);
321 skb_queue_walk_safe(namedq, skb, tmp) {
322 if (unlikely(skb_linearize(skb))) {
323 __skb_unlink(skb, namedq);
324 kfree_skb(skb);
325 continue;
326 }
327 hdr = buf_msg(skb);
328 seqno = msg_named_seqno(hdr);
329 if (msg_is_last_bulk(hdr)) {
330 *rcv_nxt = seqno;
331 *open = true;
332 }
333
334 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
335 __skb_unlink(skb, namedq);
336 spin_unlock_bh(&namedq->lock);
337 return skb;
338 }
339
340 if (*open && (*rcv_nxt == seqno)) {
341 (*rcv_nxt)++;
342 __skb_unlink(skb, namedq);
343 spin_unlock_bh(&namedq->lock);
344 return skb;
345 }
346
347 if (less(seqno, *rcv_nxt)) {
348 __skb_unlink(skb, namedq);
349 kfree_skb(skb);
350 continue;
351 }
352 }
353 spin_unlock_bh(&namedq->lock);
354 return NULL;
355}
356
357/**
358 * tipc_named_rcv - process name table update messages sent by another node
359 * @net: the associated network namespace
360 * @namedq: queue to receive from
361 * @rcv_nxt: store last received seqno here
362 * @open: last bulk msg was received (FIXME)
363 */
364void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
365 u16 *rcv_nxt, bool *open)
366{
367 struct tipc_net *tn = tipc_net(net);
368 struct distr_item *item;
369 struct tipc_msg *hdr;
370 struct sk_buff *skb;
371 u32 count, node;
372
373 spin_lock_bh(&tn->nametbl_lock);
374 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
375 hdr = buf_msg(skb);
376 node = msg_orignode(hdr);
377 item = (struct distr_item *)msg_data(hdr);
378 count = msg_data_sz(hdr) / ITEM_SIZE;
379 while (count--) {
380 tipc_update_nametbl(net, item, node, msg_type(hdr));
381 item++;
382 }
383 kfree_skb(skb);
384 }
385 spin_unlock_bh(&tn->nametbl_lock);
386}
387
388/**
389 * tipc_named_reinit - re-initialize local publications
390 * @net: the associated network namespace
391 *
392 * This routine is called whenever TIPC networking is enabled.
393 * All name table entries published by this node are updated to reflect
394 * the node's new network address.
395 */
396void tipc_named_reinit(struct net *net)
397{
398 struct name_table *nt = tipc_name_table(net);
399 struct tipc_net *tn = tipc_net(net);
400 struct publication *p;
401 u32 self = tipc_own_addr(net);
402
403 spin_lock_bh(&tn->nametbl_lock);
404
405 list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
406 p->sk.node = self;
407 list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
408 p->sk.node = self;
409 nt->rc_dests = 0;
410 spin_unlock_bh(&tn->nametbl_lock);
411}