Loading...
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "link.h"
40#include "name_distr.h"
41
42int sysctl_tipc_named_timeout __read_mostly = 2000;
43
44/**
45 * publ_to_item - add publication info to a publication message
46 * @p: publication info
47 * @i: location of item in the message
48 */
49static void publ_to_item(struct distr_item *i, struct publication *p)
50{
51 i->type = htonl(p->sr.type);
52 i->lower = htonl(p->sr.lower);
53 i->upper = htonl(p->sr.upper);
54 i->port = htonl(p->sk.ref);
55 i->key = htonl(p->key);
56}
57
58/**
59 * named_prepare_buf - allocate & initialize a publication message
60 * @net: the associated network namespace
61 * @type: message type
62 * @size: payload size
63 * @dest: destination node
64 *
65 * The buffer returned is of size INT_H_SIZE + payload size
66 */
67static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
68 u32 dest)
69{
70 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
71 u32 self = tipc_own_addr(net);
72 struct tipc_msg *msg;
73
74 if (buf != NULL) {
75 msg = buf_msg(buf);
76 tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
77 type, INT_H_SIZE, dest);
78 msg_set_size(msg, INT_H_SIZE + size);
79 }
80 return buf;
81}
82
83/**
84 * tipc_named_publish - tell other nodes about a new publication by this node
85 * @net: the associated network namespace
86 * @p: the new publication
87 */
88struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
89{
90 struct name_table *nt = tipc_name_table(net);
91 struct distr_item *item;
92 struct sk_buff *skb;
93
94 if (p->scope == TIPC_NODE_SCOPE) {
95 list_add_tail_rcu(&p->binding_node, &nt->node_scope);
96 return NULL;
97 }
98 write_lock_bh(&nt->cluster_scope_lock);
99 list_add_tail(&p->binding_node, &nt->cluster_scope);
100 write_unlock_bh(&nt->cluster_scope_lock);
101 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
102 if (!skb) {
103 pr_warn("Publication distribution failure\n");
104 return NULL;
105 }
106 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
107 msg_set_non_legacy(buf_msg(skb));
108 item = (struct distr_item *)msg_data(buf_msg(skb));
109 publ_to_item(item, p);
110 return skb;
111}
112
113/**
114 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
115 * @net: the associated network namespace
116 * @p: the withdrawn publication
117 */
118struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
119{
120 struct name_table *nt = tipc_name_table(net);
121 struct distr_item *item;
122 struct sk_buff *skb;
123
124 write_lock_bh(&nt->cluster_scope_lock);
125 list_del(&p->binding_node);
126 write_unlock_bh(&nt->cluster_scope_lock);
127 if (p->scope == TIPC_NODE_SCOPE)
128 return NULL;
129
130 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
131 if (!skb) {
132 pr_warn("Withdrawal distribution failure\n");
133 return NULL;
134 }
135 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
136 msg_set_non_legacy(buf_msg(skb));
137 item = (struct distr_item *)msg_data(buf_msg(skb));
138 publ_to_item(item, p);
139 return skb;
140}
141
142/**
143 * named_distribute - prepare name info for bulk distribution to another node
144 * @net: the associated network namespace
145 * @list: list of messages (buffers) to be returned from this function
146 * @dnode: node to be updated
147 * @pls: linked list of publication items to be packed into buffer chain
148 * @seqno: sequence number for this message
149 */
150static void named_distribute(struct net *net, struct sk_buff_head *list,
151 u32 dnode, struct list_head *pls, u16 seqno)
152{
153 struct publication *publ;
154 struct sk_buff *skb = NULL;
155 struct distr_item *item = NULL;
156 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
157 ITEM_SIZE) * ITEM_SIZE;
158 u32 msg_rem = msg_dsz;
159 struct tipc_msg *hdr;
160
161 list_for_each_entry(publ, pls, binding_node) {
162 /* Prepare next buffer: */
163 if (!skb) {
164 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
165 dnode);
166 if (!skb) {
167 pr_warn("Bulk publication failure\n");
168 return;
169 }
170 hdr = buf_msg(skb);
171 msg_set_bc_ack_invalid(hdr, true);
172 msg_set_bulk(hdr);
173 msg_set_non_legacy(hdr);
174 item = (struct distr_item *)msg_data(hdr);
175 }
176
177 /* Pack publication into message: */
178 publ_to_item(item, publ);
179 item++;
180 msg_rem -= ITEM_SIZE;
181
182 /* Append full buffer to list: */
183 if (!msg_rem) {
184 __skb_queue_tail(list, skb);
185 skb = NULL;
186 msg_rem = msg_dsz;
187 }
188 }
189 if (skb) {
190 hdr = buf_msg(skb);
191 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
192 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
193 __skb_queue_tail(list, skb);
194 }
195 hdr = buf_msg(skb_peek_tail(list));
196 msg_set_last_bulk(hdr);
197 msg_set_named_seqno(hdr, seqno);
198}
199
200/**
201 * tipc_named_node_up - tell specified node about all publications by this node
202 * @net: the associated network namespace
203 * @dnode: destination node
204 * @capabilities: peer node's capabilities
205 */
206void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
207{
208 struct name_table *nt = tipc_name_table(net);
209 struct tipc_net *tn = tipc_net(net);
210 struct sk_buff_head head;
211 u16 seqno;
212
213 __skb_queue_head_init(&head);
214 spin_lock_bh(&tn->nametbl_lock);
215 if (!(capabilities & TIPC_NAMED_BCAST))
216 nt->rc_dests++;
217 seqno = nt->snd_nxt;
218 spin_unlock_bh(&tn->nametbl_lock);
219
220 read_lock_bh(&nt->cluster_scope_lock);
221 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
222 tipc_node_xmit(net, &head, dnode, 0);
223 read_unlock_bh(&nt->cluster_scope_lock);
224}
225
226/**
227 * tipc_publ_purge - remove publication associated with a failed node
228 * @net: the associated network namespace
229 * @p: the publication to remove
230 * @addr: failed node's address
231 *
232 * Invoked for each publication issued by a newly failed node.
233 * Removes publication structure from name table & deletes it.
234 */
235static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
236{
237 struct tipc_net *tn = tipc_net(net);
238 struct publication *_p;
239 struct tipc_uaddr ua;
240
241 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
242 p->sr.lower, p->sr.upper);
243 spin_lock_bh(&tn->nametbl_lock);
244 _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
245 if (_p)
246 tipc_node_unsubscribe(net, &_p->binding_node, addr);
247 spin_unlock_bh(&tn->nametbl_lock);
248 if (_p)
249 kfree_rcu(_p, rcu);
250}
251
252void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
253 u32 addr, u16 capabilities)
254{
255 struct name_table *nt = tipc_name_table(net);
256 struct tipc_net *tn = tipc_net(net);
257
258 struct publication *publ, *tmp;
259
260 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
261 tipc_publ_purge(net, publ, addr);
262 spin_lock_bh(&tn->nametbl_lock);
263 if (!(capabilities & TIPC_NAMED_BCAST))
264 nt->rc_dests--;
265 spin_unlock_bh(&tn->nametbl_lock);
266}
267
268/**
269 * tipc_update_nametbl - try to process a nametable update and notify
270 * subscribers
271 * @net: the associated network namespace
272 * @i: location of item in the message
273 * @node: node address
274 * @dtype: name distributor message type
275 *
276 * tipc_nametbl_lock must be held.
277 * Return: the publication item if successful, otherwise NULL.
278 */
279static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
280 u32 node, u32 dtype)
281{
282 struct publication *p = NULL;
283 struct tipc_socket_addr sk;
284 struct tipc_uaddr ua;
285 u32 key = ntohl(i->key);
286
287 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
288 ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
289 sk.ref = ntohl(i->port);
290 sk.node = node;
291
292 if (dtype == PUBLICATION) {
293 p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
294 if (p) {
295 tipc_node_subscribe(net, &p->binding_node, node);
296 return true;
297 }
298 } else if (dtype == WITHDRAWAL) {
299 p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
300 if (p) {
301 tipc_node_unsubscribe(net, &p->binding_node, node);
302 kfree_rcu(p, rcu);
303 return true;
304 }
305 pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
306 ua.sr.type, ua.sr.lower, node);
307 } else {
308 pr_warn_ratelimited("Unknown name table message received\n");
309 }
310 return false;
311}
312
313static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
314 u16 *rcv_nxt, bool *open)
315{
316 struct sk_buff *skb, *tmp;
317 struct tipc_msg *hdr;
318 u16 seqno;
319
320 spin_lock_bh(&namedq->lock);
321 skb_queue_walk_safe(namedq, skb, tmp) {
322 if (unlikely(skb_linearize(skb))) {
323 __skb_unlink(skb, namedq);
324 kfree_skb(skb);
325 continue;
326 }
327 hdr = buf_msg(skb);
328 seqno = msg_named_seqno(hdr);
329 if (msg_is_last_bulk(hdr)) {
330 *rcv_nxt = seqno;
331 *open = true;
332 }
333
334 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
335 __skb_unlink(skb, namedq);
336 spin_unlock_bh(&namedq->lock);
337 return skb;
338 }
339
340 if (*open && (*rcv_nxt == seqno)) {
341 (*rcv_nxt)++;
342 __skb_unlink(skb, namedq);
343 spin_unlock_bh(&namedq->lock);
344 return skb;
345 }
346
347 if (less(seqno, *rcv_nxt)) {
348 __skb_unlink(skb, namedq);
349 kfree_skb(skb);
350 continue;
351 }
352 }
353 spin_unlock_bh(&namedq->lock);
354 return NULL;
355}
356
357/**
358 * tipc_named_rcv - process name table update messages sent by another node
359 * @net: the associated network namespace
360 * @namedq: queue to receive from
361 * @rcv_nxt: store last received seqno here
362 * @open: last bulk msg was received (FIXME)
363 */
364void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
365 u16 *rcv_nxt, bool *open)
366{
367 struct tipc_net *tn = tipc_net(net);
368 struct distr_item *item;
369 struct tipc_msg *hdr;
370 struct sk_buff *skb;
371 u32 count, node;
372
373 spin_lock_bh(&tn->nametbl_lock);
374 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
375 hdr = buf_msg(skb);
376 node = msg_orignode(hdr);
377 item = (struct distr_item *)msg_data(hdr);
378 count = msg_data_sz(hdr) / ITEM_SIZE;
379 while (count--) {
380 tipc_update_nametbl(net, item, node, msg_type(hdr));
381 item++;
382 }
383 kfree_skb(skb);
384 }
385 spin_unlock_bh(&tn->nametbl_lock);
386}
387
388/**
389 * tipc_named_reinit - re-initialize local publications
390 * @net: the associated network namespace
391 *
392 * This routine is called whenever TIPC networking is enabled.
393 * All name table entries published by this node are updated to reflect
394 * the node's new network address.
395 */
396void tipc_named_reinit(struct net *net)
397{
398 struct name_table *nt = tipc_name_table(net);
399 struct tipc_net *tn = tipc_net(net);
400 struct publication *p;
401 u32 self = tipc_own_addr(net);
402
403 spin_lock_bh(&tn->nametbl_lock);
404
405 list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
406 p->sk.node = self;
407 list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
408 p->sk.node = self;
409 nt->rc_dests = 0;
410 spin_unlock_bh(&tn->nametbl_lock);
411}
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "link.h"
39#include "name_distr.h"
40
41int sysctl_tipc_named_timeout __read_mostly = 2000;
42
43struct distr_queue_item {
44 struct distr_item i;
45 u32 dtype;
46 u32 node;
47 unsigned long expires;
48 struct list_head next;
49};
50
51/**
52 * publ_to_item - add publication info to a publication message
53 */
54static void publ_to_item(struct distr_item *i, struct publication *p)
55{
56 i->type = htonl(p->type);
57 i->lower = htonl(p->lower);
58 i->upper = htonl(p->upper);
59 i->ref = htonl(p->ref);
60 i->key = htonl(p->key);
61}
62
63/**
64 * named_prepare_buf - allocate & initialize a publication message
65 */
66static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
67 u32 dest)
68{
69 struct tipc_net *tn = net_generic(net, tipc_net_id);
70 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
71 struct tipc_msg *msg;
72
73 if (buf != NULL) {
74 msg = buf_msg(buf);
75 tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
76 INT_H_SIZE, dest);
77 msg_set_size(msg, INT_H_SIZE + size);
78 }
79 return buf;
80}
81
82/**
83 * tipc_named_publish - tell other nodes about a new publication by this node
84 */
85struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
86{
87 struct tipc_net *tn = net_generic(net, tipc_net_id);
88 struct sk_buff *buf;
89 struct distr_item *item;
90
91 list_add_tail_rcu(&publ->local_list,
92 &tn->nametbl->publ_list[publ->scope]);
93
94 if (publ->scope == TIPC_NODE_SCOPE)
95 return NULL;
96
97 buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
98 if (!buf) {
99 pr_warn("Publication distribution failure\n");
100 return NULL;
101 }
102
103 item = (struct distr_item *)msg_data(buf_msg(buf));
104 publ_to_item(item, publ);
105 return buf;
106}
107
108/**
109 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
110 */
111struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
112{
113 struct sk_buff *buf;
114 struct distr_item *item;
115
116 list_del(&publ->local_list);
117
118 if (publ->scope == TIPC_NODE_SCOPE)
119 return NULL;
120
121 buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
122 if (!buf) {
123 pr_warn("Withdrawal distribution failure\n");
124 return NULL;
125 }
126
127 item = (struct distr_item *)msg_data(buf_msg(buf));
128 publ_to_item(item, publ);
129 return buf;
130}
131
132/**
133 * named_distribute - prepare name info for bulk distribution to another node
134 * @list: list of messages (buffers) to be returned from this function
135 * @dnode: node to be updated
136 * @pls: linked list of publication items to be packed into buffer chain
137 */
138static void named_distribute(struct net *net, struct sk_buff_head *list,
139 u32 dnode, struct list_head *pls)
140{
141 struct publication *publ;
142 struct sk_buff *skb = NULL;
143 struct distr_item *item = NULL;
144 uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
145 ITEM_SIZE;
146 uint msg_rem = msg_dsz;
147
148 list_for_each_entry(publ, pls, local_list) {
149 /* Prepare next buffer: */
150 if (!skb) {
151 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
152 dnode);
153 if (!skb) {
154 pr_warn("Bulk publication failure\n");
155 return;
156 }
157 item = (struct distr_item *)msg_data(buf_msg(skb));
158 }
159
160 /* Pack publication into message: */
161 publ_to_item(item, publ);
162 item++;
163 msg_rem -= ITEM_SIZE;
164
165 /* Append full buffer to list: */
166 if (!msg_rem) {
167 __skb_queue_tail(list, skb);
168 skb = NULL;
169 msg_rem = msg_dsz;
170 }
171 }
172 if (skb) {
173 msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
174 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
175 __skb_queue_tail(list, skb);
176 }
177}
178
179/**
180 * tipc_named_node_up - tell specified node about all publications by this node
181 */
182void tipc_named_node_up(struct net *net, u32 dnode)
183{
184 struct tipc_net *tn = net_generic(net, tipc_net_id);
185 struct sk_buff_head head;
186
187 __skb_queue_head_init(&head);
188
189 rcu_read_lock();
190 named_distribute(net, &head, dnode,
191 &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
192 named_distribute(net, &head, dnode,
193 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
194 rcu_read_unlock();
195
196 tipc_node_xmit(net, &head, dnode, 0);
197}
198
199/**
200 * tipc_publ_purge - remove publication associated with a failed node
201 *
202 * Invoked for each publication issued by a newly failed node.
203 * Removes publication structure from name table & deletes it.
204 */
205static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
206{
207 struct tipc_net *tn = net_generic(net, tipc_net_id);
208 struct publication *p;
209
210 spin_lock_bh(&tn->nametbl_lock);
211 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
212 publ->node, publ->ref, publ->key);
213 if (p)
214 tipc_node_unsubscribe(net, &p->nodesub_list, addr);
215 spin_unlock_bh(&tn->nametbl_lock);
216
217 if (p != publ) {
218 pr_err("Unable to remove publication from failed node\n"
219 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
220 publ->type, publ->lower, publ->node, publ->ref,
221 publ->key);
222 }
223
224 kfree_rcu(p, rcu);
225}
226
227/**
228 * tipc_dist_queue_purge - remove deferred updates from a node that went down
229 */
230static void tipc_dist_queue_purge(struct net *net, u32 addr)
231{
232 struct tipc_net *tn = net_generic(net, tipc_net_id);
233 struct distr_queue_item *e, *tmp;
234
235 spin_lock_bh(&tn->nametbl_lock);
236 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
237 if (e->node != addr)
238 continue;
239 list_del(&e->next);
240 kfree(e);
241 }
242 spin_unlock_bh(&tn->nametbl_lock);
243}
244
245void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
246{
247 struct publication *publ, *tmp;
248
249 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
250 tipc_publ_purge(net, publ, addr);
251 tipc_dist_queue_purge(net, addr);
252}
253
254/**
255 * tipc_update_nametbl - try to process a nametable update and notify
256 * subscribers
257 *
258 * tipc_nametbl_lock must be held.
259 * Returns the publication item if successful, otherwise NULL.
260 */
261static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
262 u32 node, u32 dtype)
263{
264 struct publication *publ = NULL;
265
266 if (dtype == PUBLICATION) {
267 publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
268 ntohl(i->lower),
269 ntohl(i->upper),
270 TIPC_CLUSTER_SCOPE, node,
271 ntohl(i->ref), ntohl(i->key));
272 if (publ) {
273 tipc_node_subscribe(net, &publ->nodesub_list, node);
274 return true;
275 }
276 } else if (dtype == WITHDRAWAL) {
277 publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
278 ntohl(i->lower),
279 node, ntohl(i->ref),
280 ntohl(i->key));
281 if (publ) {
282 tipc_node_unsubscribe(net, &publ->nodesub_list, node);
283 kfree_rcu(publ, rcu);
284 return true;
285 }
286 } else {
287 pr_warn("Unrecognized name table message received\n");
288 }
289 return false;
290}
291
292/**
293 * tipc_named_add_backlog - add a failed name table update to the backlog
294 *
295 */
296static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
297 u32 type, u32 node)
298{
299 struct distr_queue_item *e;
300 struct tipc_net *tn = net_generic(net, tipc_net_id);
301 unsigned long now = get_jiffies_64();
302
303 e = kzalloc(sizeof(*e), GFP_ATOMIC);
304 if (!e)
305 return;
306 e->dtype = type;
307 e->node = node;
308 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
309 memcpy(e, i, sizeof(*i));
310 list_add_tail(&e->next, &tn->dist_queue);
311}
312
313/**
314 * tipc_named_process_backlog - try to process any pending name table updates
315 * from the network.
316 */
317void tipc_named_process_backlog(struct net *net)
318{
319 struct distr_queue_item *e, *tmp;
320 struct tipc_net *tn = net_generic(net, tipc_net_id);
321 char addr[16];
322 unsigned long now = get_jiffies_64();
323
324 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
325 if (time_after(e->expires, now)) {
326 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
327 continue;
328 } else {
329 tipc_addr_string_fill(addr, e->node);
330 pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
331 e->dtype, ntohl(e->i.type),
332 ntohl(e->i.lower),
333 ntohl(e->i.upper),
334 addr, ntohl(e->i.key));
335 }
336 list_del(&e->next);
337 kfree(e);
338 }
339}
340
341/**
342 * tipc_named_rcv - process name table update messages sent by another node
343 */
344void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
345{
346 struct tipc_net *tn = net_generic(net, tipc_net_id);
347 struct tipc_msg *msg;
348 struct distr_item *item;
349 uint count;
350 u32 node;
351 struct sk_buff *skb;
352 int mtype;
353
354 spin_lock_bh(&tn->nametbl_lock);
355 for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
356 skb_linearize(skb);
357 msg = buf_msg(skb);
358 mtype = msg_type(msg);
359 item = (struct distr_item *)msg_data(msg);
360 count = msg_data_sz(msg) / ITEM_SIZE;
361 node = msg_orignode(msg);
362 while (count--) {
363 if (!tipc_update_nametbl(net, item, node, mtype))
364 tipc_named_add_backlog(net, item, mtype, node);
365 item++;
366 }
367 kfree_skb(skb);
368 tipc_named_process_backlog(net);
369 }
370 spin_unlock_bh(&tn->nametbl_lock);
371}
372
373/**
374 * tipc_named_reinit - re-initialize local publications
375 *
376 * This routine is called whenever TIPC networking is enabled.
377 * All name table entries published by this node are updated to reflect
378 * the node's new network address.
379 */
380void tipc_named_reinit(struct net *net)
381{
382 struct tipc_net *tn = net_generic(net, tipc_net_id);
383 struct publication *publ;
384 int scope;
385
386 spin_lock_bh(&tn->nametbl_lock);
387
388 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
389 list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
390 local_list)
391 publ->node = tn->own_addr;
392
393 spin_unlock_bh(&tn->nametbl_lock);
394}