Loading...
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "link.h"
40#include "name_distr.h"
41
42int sysctl_tipc_named_timeout __read_mostly = 2000;
43
44/**
45 * publ_to_item - add publication info to a publication message
46 * @p: publication info
47 * @i: location of item in the message
48 */
49static void publ_to_item(struct distr_item *i, struct publication *p)
50{
51 i->type = htonl(p->sr.type);
52 i->lower = htonl(p->sr.lower);
53 i->upper = htonl(p->sr.upper);
54 i->port = htonl(p->sk.ref);
55 i->key = htonl(p->key);
56}
57
58/**
59 * named_prepare_buf - allocate & initialize a publication message
60 * @net: the associated network namespace
61 * @type: message type
62 * @size: payload size
63 * @dest: destination node
64 *
65 * The buffer returned is of size INT_H_SIZE + payload size
66 */
67static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
68 u32 dest)
69{
70 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
71 u32 self = tipc_own_addr(net);
72 struct tipc_msg *msg;
73
74 if (buf != NULL) {
75 msg = buf_msg(buf);
76 tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
77 type, INT_H_SIZE, dest);
78 msg_set_size(msg, INT_H_SIZE + size);
79 }
80 return buf;
81}
82
83/**
84 * tipc_named_publish - tell other nodes about a new publication by this node
85 * @net: the associated network namespace
86 * @p: the new publication
87 */
88struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
89{
90 struct name_table *nt = tipc_name_table(net);
91 struct distr_item *item;
92 struct sk_buff *skb;
93
94 if (p->scope == TIPC_NODE_SCOPE) {
95 list_add_tail_rcu(&p->binding_node, &nt->node_scope);
96 return NULL;
97 }
98 write_lock_bh(&nt->cluster_scope_lock);
99 list_add_tail(&p->binding_node, &nt->cluster_scope);
100 write_unlock_bh(&nt->cluster_scope_lock);
101 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
102 if (!skb) {
103 pr_warn("Publication distribution failure\n");
104 return NULL;
105 }
106 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
107 msg_set_non_legacy(buf_msg(skb));
108 item = (struct distr_item *)msg_data(buf_msg(skb));
109 publ_to_item(item, p);
110 return skb;
111}
112
113/**
114 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
115 * @net: the associated network namespace
116 * @p: the withdrawn publication
117 */
118struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
119{
120 struct name_table *nt = tipc_name_table(net);
121 struct distr_item *item;
122 struct sk_buff *skb;
123
124 write_lock_bh(&nt->cluster_scope_lock);
125 list_del(&p->binding_node);
126 write_unlock_bh(&nt->cluster_scope_lock);
127 if (p->scope == TIPC_NODE_SCOPE)
128 return NULL;
129
130 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
131 if (!skb) {
132 pr_warn("Withdrawal distribution failure\n");
133 return NULL;
134 }
135 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
136 msg_set_non_legacy(buf_msg(skb));
137 item = (struct distr_item *)msg_data(buf_msg(skb));
138 publ_to_item(item, p);
139 return skb;
140}
141
142/**
143 * named_distribute - prepare name info for bulk distribution to another node
144 * @net: the associated network namespace
145 * @list: list of messages (buffers) to be returned from this function
146 * @dnode: node to be updated
147 * @pls: linked list of publication items to be packed into buffer chain
148 * @seqno: sequence number for this message
149 */
150static void named_distribute(struct net *net, struct sk_buff_head *list,
151 u32 dnode, struct list_head *pls, u16 seqno)
152{
153 struct publication *publ;
154 struct sk_buff *skb = NULL;
155 struct distr_item *item = NULL;
156 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
157 ITEM_SIZE) * ITEM_SIZE;
158 u32 msg_rem = msg_dsz;
159 struct tipc_msg *hdr;
160
161 list_for_each_entry(publ, pls, binding_node) {
162 /* Prepare next buffer: */
163 if (!skb) {
164 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
165 dnode);
166 if (!skb) {
167 pr_warn("Bulk publication failure\n");
168 return;
169 }
170 hdr = buf_msg(skb);
171 msg_set_bc_ack_invalid(hdr, true);
172 msg_set_bulk(hdr);
173 msg_set_non_legacy(hdr);
174 item = (struct distr_item *)msg_data(hdr);
175 }
176
177 /* Pack publication into message: */
178 publ_to_item(item, publ);
179 item++;
180 msg_rem -= ITEM_SIZE;
181
182 /* Append full buffer to list: */
183 if (!msg_rem) {
184 __skb_queue_tail(list, skb);
185 skb = NULL;
186 msg_rem = msg_dsz;
187 }
188 }
189 if (skb) {
190 hdr = buf_msg(skb);
191 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
192 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
193 __skb_queue_tail(list, skb);
194 }
195 hdr = buf_msg(skb_peek_tail(list));
196 msg_set_last_bulk(hdr);
197 msg_set_named_seqno(hdr, seqno);
198}
199
200/**
201 * tipc_named_node_up - tell specified node about all publications by this node
202 * @net: the associated network namespace
203 * @dnode: destination node
204 * @capabilities: peer node's capabilities
205 */
206void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
207{
208 struct name_table *nt = tipc_name_table(net);
209 struct tipc_net *tn = tipc_net(net);
210 struct sk_buff_head head;
211 u16 seqno;
212
213 __skb_queue_head_init(&head);
214 spin_lock_bh(&tn->nametbl_lock);
215 if (!(capabilities & TIPC_NAMED_BCAST))
216 nt->rc_dests++;
217 seqno = nt->snd_nxt;
218 spin_unlock_bh(&tn->nametbl_lock);
219
220 read_lock_bh(&nt->cluster_scope_lock);
221 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
222 tipc_node_xmit(net, &head, dnode, 0);
223 read_unlock_bh(&nt->cluster_scope_lock);
224}
225
226/**
227 * tipc_publ_purge - remove publication associated with a failed node
228 * @net: the associated network namespace
229 * @p: the publication to remove
230 * @addr: failed node's address
231 *
232 * Invoked for each publication issued by a newly failed node.
233 * Removes publication structure from name table & deletes it.
234 */
235static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
236{
237 struct tipc_net *tn = tipc_net(net);
238 struct publication *_p;
239 struct tipc_uaddr ua;
240
241 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
242 p->sr.lower, p->sr.upper);
243 spin_lock_bh(&tn->nametbl_lock);
244 _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
245 if (_p)
246 tipc_node_unsubscribe(net, &_p->binding_node, addr);
247 spin_unlock_bh(&tn->nametbl_lock);
248 if (_p)
249 kfree_rcu(_p, rcu);
250}
251
252void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
253 u32 addr, u16 capabilities)
254{
255 struct name_table *nt = tipc_name_table(net);
256 struct tipc_net *tn = tipc_net(net);
257
258 struct publication *publ, *tmp;
259
260 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
261 tipc_publ_purge(net, publ, addr);
262 spin_lock_bh(&tn->nametbl_lock);
263 if (!(capabilities & TIPC_NAMED_BCAST))
264 nt->rc_dests--;
265 spin_unlock_bh(&tn->nametbl_lock);
266}
267
268/**
269 * tipc_update_nametbl - try to process a nametable update and notify
270 * subscribers
271 * @net: the associated network namespace
272 * @i: location of item in the message
273 * @node: node address
274 * @dtype: name distributor message type
275 *
276 * tipc_nametbl_lock must be held.
277 * Return: the publication item if successful, otherwise NULL.
278 */
279static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
280 u32 node, u32 dtype)
281{
282 struct publication *p = NULL;
283 struct tipc_socket_addr sk;
284 struct tipc_uaddr ua;
285 u32 key = ntohl(i->key);
286
287 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
288 ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
289 sk.ref = ntohl(i->port);
290 sk.node = node;
291
292 if (dtype == PUBLICATION) {
293 p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
294 if (p) {
295 tipc_node_subscribe(net, &p->binding_node, node);
296 return true;
297 }
298 } else if (dtype == WITHDRAWAL) {
299 p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
300 if (p) {
301 tipc_node_unsubscribe(net, &p->binding_node, node);
302 kfree_rcu(p, rcu);
303 return true;
304 }
305 pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
306 ua.sr.type, ua.sr.lower, node);
307 } else {
308 pr_warn_ratelimited("Unknown name table message received\n");
309 }
310 return false;
311}
312
313static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
314 u16 *rcv_nxt, bool *open)
315{
316 struct sk_buff *skb, *tmp;
317 struct tipc_msg *hdr;
318 u16 seqno;
319
320 spin_lock_bh(&namedq->lock);
321 skb_queue_walk_safe(namedq, skb, tmp) {
322 if (unlikely(skb_linearize(skb))) {
323 __skb_unlink(skb, namedq);
324 kfree_skb(skb);
325 continue;
326 }
327 hdr = buf_msg(skb);
328 seqno = msg_named_seqno(hdr);
329 if (msg_is_last_bulk(hdr)) {
330 *rcv_nxt = seqno;
331 *open = true;
332 }
333
334 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
335 __skb_unlink(skb, namedq);
336 spin_unlock_bh(&namedq->lock);
337 return skb;
338 }
339
340 if (*open && (*rcv_nxt == seqno)) {
341 (*rcv_nxt)++;
342 __skb_unlink(skb, namedq);
343 spin_unlock_bh(&namedq->lock);
344 return skb;
345 }
346
347 if (less(seqno, *rcv_nxt)) {
348 __skb_unlink(skb, namedq);
349 kfree_skb(skb);
350 continue;
351 }
352 }
353 spin_unlock_bh(&namedq->lock);
354 return NULL;
355}
356
357/**
358 * tipc_named_rcv - process name table update messages sent by another node
359 * @net: the associated network namespace
360 * @namedq: queue to receive from
361 * @rcv_nxt: store last received seqno here
362 * @open: last bulk msg was received (FIXME)
363 */
364void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
365 u16 *rcv_nxt, bool *open)
366{
367 struct tipc_net *tn = tipc_net(net);
368 struct distr_item *item;
369 struct tipc_msg *hdr;
370 struct sk_buff *skb;
371 u32 count, node;
372
373 spin_lock_bh(&tn->nametbl_lock);
374 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
375 hdr = buf_msg(skb);
376 node = msg_orignode(hdr);
377 item = (struct distr_item *)msg_data(hdr);
378 count = msg_data_sz(hdr) / ITEM_SIZE;
379 while (count--) {
380 tipc_update_nametbl(net, item, node, msg_type(hdr));
381 item++;
382 }
383 kfree_skb(skb);
384 }
385 spin_unlock_bh(&tn->nametbl_lock);
386}
387
388/**
389 * tipc_named_reinit - re-initialize local publications
390 * @net: the associated network namespace
391 *
392 * This routine is called whenever TIPC networking is enabled.
393 * All name table entries published by this node are updated to reflect
394 * the node's new network address.
395 */
396void tipc_named_reinit(struct net *net)
397{
398 struct name_table *nt = tipc_name_table(net);
399 struct tipc_net *tn = tipc_net(net);
400 struct publication *p;
401 u32 self = tipc_own_addr(net);
402
403 spin_lock_bh(&tn->nametbl_lock);
404
405 list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
406 p->sk.node = self;
407 list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
408 p->sk.node = self;
409 nt->rc_dests = 0;
410 spin_unlock_bh(&tn->nametbl_lock);
411}
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "link.h"
39#include "name_distr.h"
40
41#define ITEM_SIZE sizeof(struct distr_item)
42
43/**
44 * struct distr_item - publication info distributed to other nodes
45 * @type: name sequence type
46 * @lower: name sequence lower bound
47 * @upper: name sequence upper bound
48 * @ref: publishing port reference
49 * @key: publication key
50 *
51 * ===> All fields are stored in network byte order. <===
52 *
53 * First 3 fields identify (name or) name sequence being published.
54 * Reference field uniquely identifies port that published name sequence.
55 * Key field uniquely identifies publication, in the event a port has
56 * multiple publications of the same name sequence.
57 *
58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message.
60 */
61struct distr_item {
62 __be32 type;
63 __be32 lower;
64 __be32 upper;
65 __be32 ref;
66 __be32 key;
67};
68
69/**
70 * struct publ_list - list of publications made by this node
71 * @list: circular list of publications
72 * @list_size: number of entries in list
73 */
74struct publ_list {
75 struct list_head list;
76 u32 size;
77};
78
79static struct publ_list publ_zone = {
80 .list = LIST_HEAD_INIT(publ_zone.list),
81 .size = 0,
82};
83
84static struct publ_list publ_cluster = {
85 .list = LIST_HEAD_INIT(publ_cluster.list),
86 .size = 0,
87};
88
89static struct publ_list publ_node = {
90 .list = LIST_HEAD_INIT(publ_node.list),
91 .size = 0,
92};
93
94static struct publ_list *publ_lists[] = {
95 NULL,
96 &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
97 &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
98 &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
99};
100
101
102/**
103 * publ_to_item - add publication info to a publication message
104 */
105static void publ_to_item(struct distr_item *i, struct publication *p)
106{
107 i->type = htonl(p->type);
108 i->lower = htonl(p->lower);
109 i->upper = htonl(p->upper);
110 i->ref = htonl(p->ref);
111 i->key = htonl(p->key);
112}
113
114/**
115 * named_prepare_buf - allocate & initialize a publication message
116 */
117static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
118{
119 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
120 struct tipc_msg *msg;
121
122 if (buf != NULL) {
123 msg = buf_msg(buf);
124 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
125 msg_set_size(msg, INT_H_SIZE + size);
126 }
127 return buf;
128}
129
130static void named_cluster_distribute(struct sk_buff *buf)
131{
132 struct sk_buff *buf_copy;
133 struct tipc_node *n_ptr;
134
135 list_for_each_entry(n_ptr, &tipc_node_list, list) {
136 if (tipc_node_active_links(n_ptr)) {
137 buf_copy = skb_copy(buf, GFP_ATOMIC);
138 if (!buf_copy)
139 break;
140 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
141 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
142 }
143 }
144
145 kfree_skb(buf);
146}
147
148/**
149 * tipc_named_publish - tell other nodes about a new publication by this node
150 */
151void tipc_named_publish(struct publication *publ)
152{
153 struct sk_buff *buf;
154 struct distr_item *item;
155
156 list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
157 publ_lists[publ->scope]->size++;
158
159 if (publ->scope == TIPC_NODE_SCOPE)
160 return;
161
162 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
163 if (!buf) {
164 warn("Publication distribution failure\n");
165 return;
166 }
167
168 item = (struct distr_item *)msg_data(buf_msg(buf));
169 publ_to_item(item, publ);
170 named_cluster_distribute(buf);
171}
172
173/**
174 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
175 */
176void tipc_named_withdraw(struct publication *publ)
177{
178 struct sk_buff *buf;
179 struct distr_item *item;
180
181 list_del(&publ->local_list);
182 publ_lists[publ->scope]->size--;
183
184 if (publ->scope == TIPC_NODE_SCOPE)
185 return;
186
187 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
188 if (!buf) {
189 warn("Withdrawal distribution failure\n");
190 return;
191 }
192
193 item = (struct distr_item *)msg_data(buf_msg(buf));
194 publ_to_item(item, publ);
195 named_cluster_distribute(buf);
196}
197
198/*
199 * named_distribute - prepare name info for bulk distribution to another node
200 */
201static void named_distribute(struct list_head *message_list, u32 node,
202 struct publ_list *pls, u32 max_item_buf)
203{
204 struct publication *publ;
205 struct sk_buff *buf = NULL;
206 struct distr_item *item = NULL;
207 u32 left = 0;
208 u32 rest = pls->size * ITEM_SIZE;
209
210 list_for_each_entry(publ, &pls->list, local_list) {
211 if (!buf) {
212 left = (rest <= max_item_buf) ? rest : max_item_buf;
213 rest -= left;
214 buf = named_prepare_buf(PUBLICATION, left, node);
215 if (!buf) {
216 warn("Bulk publication failure\n");
217 return;
218 }
219 item = (struct distr_item *)msg_data(buf_msg(buf));
220 }
221 publ_to_item(item, publ);
222 item++;
223 left -= ITEM_SIZE;
224 if (!left) {
225 list_add_tail((struct list_head *)buf, message_list);
226 buf = NULL;
227 }
228 }
229}
230
231/**
232 * tipc_named_node_up - tell specified node about all publications by this node
233 */
234void tipc_named_node_up(unsigned long nodearg)
235{
236 struct tipc_node *n_ptr;
237 struct tipc_link *l_ptr;
238 struct list_head message_list;
239 u32 node = (u32)nodearg;
240 u32 max_item_buf = 0;
241
242 /* compute maximum amount of publication data to send per message */
243 read_lock_bh(&tipc_net_lock);
244 n_ptr = tipc_node_find(node);
245 if (n_ptr) {
246 tipc_node_lock(n_ptr);
247 l_ptr = n_ptr->active_links[0];
248 if (l_ptr)
249 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
250 ITEM_SIZE) * ITEM_SIZE;
251 tipc_node_unlock(n_ptr);
252 }
253 read_unlock_bh(&tipc_net_lock);
254 if (!max_item_buf)
255 return;
256
257 /* create list of publication messages, then send them as a unit */
258 INIT_LIST_HEAD(&message_list);
259
260 read_lock_bh(&tipc_nametbl_lock);
261 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
262 named_distribute(&message_list, node, &publ_zone, max_item_buf);
263 read_unlock_bh(&tipc_nametbl_lock);
264
265 tipc_link_send_names(&message_list, (u32)node);
266}
267
268/**
269 * named_purge_publ - remove publication associated with a failed node
270 *
271 * Invoked for each publication issued by a newly failed node.
272 * Removes publication structure from name table & deletes it.
273 */
274static void named_purge_publ(struct publication *publ)
275{
276 struct publication *p;
277
278 write_lock_bh(&tipc_nametbl_lock);
279 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
280 publ->node, publ->ref, publ->key);
281 if (p)
282 tipc_nodesub_unsubscribe(&p->subscr);
283 write_unlock_bh(&tipc_nametbl_lock);
284
285 if (p != publ) {
286 err("Unable to remove publication from failed node\n"
287 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
288 publ->type, publ->lower, publ->node, publ->ref, publ->key);
289 }
290
291 kfree(p);
292}
293
294/**
295 * tipc_named_recv - process name table update message sent by another node
296 */
297void tipc_named_recv(struct sk_buff *buf)
298{
299 struct publication *publ;
300 struct tipc_msg *msg = buf_msg(buf);
301 struct distr_item *item = (struct distr_item *)msg_data(msg);
302 u32 count = msg_data_sz(msg) / ITEM_SIZE;
303
304 write_lock_bh(&tipc_nametbl_lock);
305 while (count--) {
306 if (msg_type(msg) == PUBLICATION) {
307 publ = tipc_nametbl_insert_publ(ntohl(item->type),
308 ntohl(item->lower),
309 ntohl(item->upper),
310 TIPC_CLUSTER_SCOPE,
311 msg_orignode(msg),
312 ntohl(item->ref),
313 ntohl(item->key));
314 if (publ) {
315 tipc_nodesub_subscribe(&publ->subscr,
316 msg_orignode(msg),
317 publ,
318 (net_ev_handler)
319 named_purge_publ);
320 }
321 } else if (msg_type(msg) == WITHDRAWAL) {
322 publ = tipc_nametbl_remove_publ(ntohl(item->type),
323 ntohl(item->lower),
324 msg_orignode(msg),
325 ntohl(item->ref),
326 ntohl(item->key));
327
328 if (publ) {
329 tipc_nodesub_unsubscribe(&publ->subscr);
330 kfree(publ);
331 } else {
332 err("Unable to remove publication by node 0x%x\n"
333 "(type=%u, lower=%u, ref=%u, key=%u)\n",
334 msg_orignode(msg),
335 ntohl(item->type), ntohl(item->lower),
336 ntohl(item->ref), ntohl(item->key));
337 }
338 } else {
339 warn("Unrecognized name table message received\n");
340 }
341 item++;
342 }
343 write_unlock_bh(&tipc_nametbl_lock);
344 kfree_skb(buf);
345}
346
347/**
348 * tipc_named_reinit - re-initialize local publications
349 *
350 * This routine is called whenever TIPC networking is enabled.
351 * All name table entries published by this node are updated to reflect
352 * the node's new network address.
353 */
354void tipc_named_reinit(void)
355{
356 struct publication *publ;
357 int scope;
358
359 write_lock_bh(&tipc_nametbl_lock);
360
361 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
362 list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
363 publ->node = tipc_own_addr;
364
365 write_unlock_bh(&tipc_nametbl_lock);
366}