Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * net/tipc/net.c: TIPC network routing code
  3 *
  4 * Copyright (c) 1995-2006, 2014, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions are met:
 10 *
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the names of the copyright holders nor the names of its
 17 *    contributors may be used to endorse or promote products derived from
 18 *    this software without specific prior written permission.
 19 *
 20 * Alternatively, this software may be distributed under the terms of the
 21 * GNU General Public License ("GPL") version 2 as published by the Free
 22 * Software Foundation.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34 * POSSIBILITY OF SUCH DAMAGE.
 35 */
 36
 37#include "core.h"
 38#include "net.h"
 39#include "name_distr.h"
 40#include "subscr.h"
 41#include "socket.h"
 42#include "node.h"
 43#include "bcast.h"
 44#include "link.h"
 45#include "netlink.h"
 46#include "monitor.h"
 47
 48/*
 49 * The TIPC locking policy is designed to ensure a very fine locking
 50 * granularity, permitting complete parallel access to individual
 51 * port and node/link instances. The code consists of four major
 52 * locking domains, each protected with their own disjunct set of locks.
 53 *
 54 * 1: The bearer level.
 55 *    RTNL lock is used to serialize the process of configuring bearer
 56 *    on update side, and RCU lock is applied on read side to make
 57 *    bearer instance valid on both paths of message transmission and
 58 *    reception.
 59 *
 60 * 2: The node and link level.
 61 *    All node instances are saved into two tipc_node_list and node_htable
 62 *    lists. The two lists are protected by node_list_lock on write side,
 63 *    and they are guarded with RCU lock on read side. Especially node
 64 *    instance is destroyed only when TIPC module is removed, and we can
 65 *    confirm that there has no any user who is accessing the node at the
 66 *    moment. Therefore, Except for iterating the two lists within RCU
 67 *    protection, it's no needed to hold RCU that we access node instance
 68 *    in other places.
 69 *
 70 *    In addition, all members in node structure including link instances
 71 *    are protected by node spin lock.
 72 *
 73 * 3: The transport level of the protocol.
 74 *    This consists of the structures port, (and its user level
 75 *    representations, such as user_port and tipc_sock), reference and
 76 *    tipc_user (port.c, reg.c, socket.c).
 77 *
 78 *    This layer has four different locks:
 79 *     - The tipc_port spin_lock. This is protecting each port instance
 80 *       from parallel data access and removal. Since we can not place
 81 *       this lock in the port itself, it has been placed in the
 82 *       corresponding reference table entry, which has the same life
 83 *       cycle as the module. This entry is difficult to access from
 84 *       outside the TIPC core, however, so a pointer to the lock has
 85 *       been added in the port instance, -to be used for unlocking
 86 *       only.
 87 *     - A read/write lock to protect the reference table itself (teg.c).
 88 *       (Nobody is using read-only access to this, so it can just as
 89 *       well be changed to a spin_lock)
 90 *     - A spin lock to protect the registry of kernel/driver users (reg.c)
 91 *     - A global spin_lock (tipc_port_lock), which only task is to ensure
 92 *       consistency where more than one port is involved in an operation,
 93 *       i.e., when a port is part of a linked list of ports.
 94 *       There are two such lists; 'port_list', which is used for management,
 95 *       and 'wait_list', which is used to queue ports during congestion.
 96 *
 97 *  4: The name table (name_table.c, name_distr.c, subscription.c)
 98 *     - There is one big read/write-lock (tipc_nametbl_lock) protecting the
 99 *       overall name table structure. Nothing must be added/removed to
100 *       this structure without holding write access to it.
101 *     - There is one local spin_lock per sub_sequence, which can be seen
102 *       as a sub-domain to the tipc_nametbl_lock domain. It is used only
103 *       for translation operations, and is needed because a translation
104 *       steps the root of the 'publication' linked list between each lookup.
105 *       This is always used within the scope of a tipc_nametbl_lock(read).
106 *     - A local spin_lock protecting the queue of subscriber events.
107*/
108
109static void tipc_net_finalize(struct net *net, u32 addr);
110
111int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
112{
113	if (tipc_own_id(net)) {
114		pr_info("Cannot configure node identity twice\n");
115		return -1;
116	}
117	pr_info("Started in network mode\n");
118
119	if (node_id)
120		tipc_set_node_id(net, node_id);
121	if (addr)
122		tipc_net_finalize(net, addr);
123	return 0;
124}
125
126static void tipc_net_finalize(struct net *net, u32 addr)
127{
128	struct tipc_net *tn = tipc_net(net);
129	struct tipc_socket_addr sk = {0, addr};
130	struct tipc_uaddr ua;
131
132	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
133		   TIPC_NODE_STATE, addr, addr);
134
135	if (cmpxchg(&tn->node_addr, 0, addr))
136		return;
137	tipc_set_node_addr(net, addr);
138	tipc_named_reinit(net);
139	tipc_sk_reinit(net);
140	tipc_mon_reinit_self(net);
141	tipc_nametbl_publish(net, &ua, &sk, addr);
142}
143
144void tipc_net_finalize_work(struct work_struct *work)
145{
146	struct tipc_net *tn = container_of(work, struct tipc_net, work);
147
148	tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
 
 
 
 
149}
150
151void tipc_net_stop(struct net *net)
152{
153	if (!tipc_own_id(net))
 
 
154		return;
155
 
 
156	rtnl_lock();
157	tipc_bearer_stop(net);
158	tipc_node_stop(net);
159	rtnl_unlock();
160
161	pr_info("Left network mode\n");
162}
163
164static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
165{
166	struct tipc_net *tn = net_generic(net, tipc_net_id);
167	u64 *w0 = (u64 *)&tn->node_id[0];
168	u64 *w1 = (u64 *)&tn->node_id[8];
169	struct nlattr *attrs;
170	void *hdr;
 
171
172	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
173			  NLM_F_MULTI, TIPC_NL_NET_GET);
174	if (!hdr)
175		return -EMSGSIZE;
176
177	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET);
178	if (!attrs)
179		goto msg_full;
180
181	if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
182		goto attr_msg_full;
183	if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0))
184		goto attr_msg_full;
185	if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0))
186		goto attr_msg_full;
187	nla_nest_end(msg->skb, attrs);
188	genlmsg_end(msg->skb, hdr);
189
190	return 0;
191
192attr_msg_full:
193	nla_nest_cancel(msg->skb, attrs);
194msg_full:
195	genlmsg_cancel(msg->skb, hdr);
196
197	return -EMSGSIZE;
198}
199
200int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
201{
202	struct net *net = sock_net(skb->sk);
203	int err;
204	int done = cb->args[0];
205	struct tipc_nl_msg msg;
206
207	if (done)
208		return 0;
209
210	msg.skb = skb;
211	msg.portid = NETLINK_CB(cb->skb).portid;
212	msg.seq = cb->nlh->nlmsg_seq;
213
214	err = __tipc_nl_add_net(net, &msg);
215	if (err)
216		goto out;
217
218	done = 1;
219out:
220	cb->args[0] = done;
221
222	return skb->len;
223}
224
225int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
226{
227	struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
228	struct net *net = sock_net(skb->sk);
229	struct tipc_net *tn = tipc_net(net);
 
230	int err;
231
232	if (!info->attrs[TIPC_NLA_NET])
233		return -EINVAL;
234
235	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
236					  info->attrs[TIPC_NLA_NET],
237					  tipc_nl_net_policy, info->extack);
238
239	if (err)
240		return err;
241
242	/* Can't change net id once TIPC has joined a network */
243	if (tipc_own_addr(net))
244		return -EPERM;
245
246	if (attrs[TIPC_NLA_NET_ID]) {
247		u32 val;
248
 
 
 
 
249		val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
250		if (val < 1 || val > 9999)
251			return -EINVAL;
252
253		tn->net_id = val;
254	}
255
256	if (attrs[TIPC_NLA_NET_ADDR]) {
257		u32 addr;
258
 
 
 
 
259		addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
260		if (!addr)
261			return -EINVAL;
262		tn->legacy_addr_format = true;
263		tipc_net_init(net, NULL, addr);
264	}
265
266	if (attrs[TIPC_NLA_NET_NODEID]) {
267		u8 node_id[NODE_ID_LEN];
268		u64 *w0 = (u64 *)&node_id[0];
269		u64 *w1 = (u64 *)&node_id[8];
270
271		if (!attrs[TIPC_NLA_NET_NODEID_W1])
272			return -EINVAL;
273		*w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
274		*w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
275		tipc_net_init(net, node_id, 0);
276	}
277	return 0;
278}
279
280int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
281{
282	int err;
283
284	rtnl_lock();
285	err = __tipc_nl_net_set(skb, info);
286	rtnl_unlock();
287
288	return err;
289}
290
291static int __tipc_nl_addr_legacy_get(struct net *net, struct tipc_nl_msg *msg)
292{
293	struct tipc_net *tn = tipc_net(net);
294	struct nlattr *attrs;
295	void *hdr;
296
297	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
298			  0, TIPC_NL_ADDR_LEGACY_GET);
299	if (!hdr)
300		return -EMSGSIZE;
301
302	attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
303	if (!attrs)
304		goto msg_full;
305
306	if (tn->legacy_addr_format)
307		if (nla_put_flag(msg->skb, TIPC_NLA_NET_ADDR_LEGACY))
308			goto attr_msg_full;
309
310	nla_nest_end(msg->skb, attrs);
311	genlmsg_end(msg->skb, hdr);
312
313	return 0;
314
315attr_msg_full:
316	nla_nest_cancel(msg->skb, attrs);
317msg_full:
318	genlmsg_cancel(msg->skb, hdr);
319
320	return -EMSGSIZE;
321}
322
323int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info)
324{
325	struct net *net = sock_net(skb->sk);
326	struct tipc_nl_msg msg;
327	struct sk_buff *rep;
328	int err;
329
330	rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
331	if (!rep)
332		return -ENOMEM;
333
334	msg.skb = rep;
335	msg.portid = info->snd_portid;
336	msg.seq = info->snd_seq;
337
338	err = __tipc_nl_addr_legacy_get(net, &msg);
339	if (err) {
340		nlmsg_free(msg.skb);
341		return err;
342	}
343
344	return genlmsg_reply(msg.skb, info);
345}
v4.6
  1/*
  2 * net/tipc/net.c: TIPC network routing code
  3 *
  4 * Copyright (c) 1995-2006, 2014, Ericsson AB
  5 * Copyright (c) 2005, 2010-2011, Wind River Systems
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions are met:
 10 *
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the names of the copyright holders nor the names of its
 17 *    contributors may be used to endorse or promote products derived from
 18 *    this software without specific prior written permission.
 19 *
 20 * Alternatively, this software may be distributed under the terms of the
 21 * GNU General Public License ("GPL") version 2 as published by the Free
 22 * Software Foundation.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34 * POSSIBILITY OF SUCH DAMAGE.
 35 */
 36
 37#include "core.h"
 38#include "net.h"
 39#include "name_distr.h"
 40#include "subscr.h"
 41#include "socket.h"
 42#include "node.h"
 43#include "bcast.h"
 
 44#include "netlink.h"
 
 45
 46/*
 47 * The TIPC locking policy is designed to ensure a very fine locking
 48 * granularity, permitting complete parallel access to individual
 49 * port and node/link instances. The code consists of four major
 50 * locking domains, each protected with their own disjunct set of locks.
 51 *
 52 * 1: The bearer level.
 53 *    RTNL lock is used to serialize the process of configuring bearer
 54 *    on update side, and RCU lock is applied on read side to make
 55 *    bearer instance valid on both paths of message transmission and
 56 *    reception.
 57 *
 58 * 2: The node and link level.
 59 *    All node instances are saved into two tipc_node_list and node_htable
 60 *    lists. The two lists are protected by node_list_lock on write side,
 61 *    and they are guarded with RCU lock on read side. Especially node
 62 *    instance is destroyed only when TIPC module is removed, and we can
 63 *    confirm that there has no any user who is accessing the node at the
 64 *    moment. Therefore, Except for iterating the two lists within RCU
 65 *    protection, it's no needed to hold RCU that we access node instance
 66 *    in other places.
 67 *
 68 *    In addition, all members in node structure including link instances
 69 *    are protected by node spin lock.
 70 *
 71 * 3: The transport level of the protocol.
 72 *    This consists of the structures port, (and its user level
 73 *    representations, such as user_port and tipc_sock), reference and
 74 *    tipc_user (port.c, reg.c, socket.c).
 75 *
 76 *    This layer has four different locks:
 77 *     - The tipc_port spin_lock. This is protecting each port instance
 78 *       from parallel data access and removal. Since we can not place
 79 *       this lock in the port itself, it has been placed in the
 80 *       corresponding reference table entry, which has the same life
 81 *       cycle as the module. This entry is difficult to access from
 82 *       outside the TIPC core, however, so a pointer to the lock has
 83 *       been added in the port instance, -to be used for unlocking
 84 *       only.
 85 *     - A read/write lock to protect the reference table itself (teg.c).
 86 *       (Nobody is using read-only access to this, so it can just as
 87 *       well be changed to a spin_lock)
 88 *     - A spin lock to protect the registry of kernel/driver users (reg.c)
 89 *     - A global spin_lock (tipc_port_lock), which only task is to ensure
 90 *       consistency where more than one port is involved in an operation,
 91 *       i.e., whe a port is part of a linked list of ports.
 92 *       There are two such lists; 'port_list', which is used for management,
 93 *       and 'wait_list', which is used to queue ports during congestion.
 94 *
 95 *  4: The name table (name_table.c, name_distr.c, subscription.c)
 96 *     - There is one big read/write-lock (tipc_nametbl_lock) protecting the
 97 *       overall name table structure. Nothing must be added/removed to
 98 *       this structure without holding write access to it.
 99 *     - There is one local spin_lock per sub_sequence, which can be seen
100 *       as a sub-domain to the tipc_nametbl_lock domain. It is used only
101 *       for translation operations, and is needed because a translation
102 *       steps the root of the 'publication' linked list between each lookup.
103 *       This is always used within the scope of a tipc_nametbl_lock(read).
104 *     - A local spin_lock protecting the queue of subscriber events.
105*/
106
107int tipc_net_start(struct net *net, u32 addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108{
109	struct tipc_net *tn = net_generic(net, tipc_net_id);
110	char addr_string[16];
 
 
 
 
111
112	tn->own_addr = addr;
 
 
113	tipc_named_reinit(net);
114	tipc_sk_reinit(net);
 
 
 
115
116	tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
117			     TIPC_ZONE_SCOPE, 0, tn->own_addr);
 
118
119	pr_info("Started in network mode\n");
120	pr_info("Own node address %s, network identity %u\n",
121		tipc_addr_string_fill(addr_string, tn->own_addr),
122		tn->net_id);
123	return 0;
124}
125
126void tipc_net_stop(struct net *net)
127{
128	struct tipc_net *tn = net_generic(net, tipc_net_id);
129
130	if (!tn->own_addr)
131		return;
132
133	tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
134			      tn->own_addr);
135	rtnl_lock();
136	tipc_bearer_stop(net);
137	tipc_node_stop(net);
138	rtnl_unlock();
139
140	pr_info("Left network mode\n");
141}
142
143static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
144{
145	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
 
 
146	void *hdr;
147	struct nlattr *attrs;
148
149	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
150			  NLM_F_MULTI, TIPC_NL_NET_GET);
151	if (!hdr)
152		return -EMSGSIZE;
153
154	attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
155	if (!attrs)
156		goto msg_full;
157
158	if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
159		goto attr_msg_full;
160
 
 
 
161	nla_nest_end(msg->skb, attrs);
162	genlmsg_end(msg->skb, hdr);
163
164	return 0;
165
166attr_msg_full:
167	nla_nest_cancel(msg->skb, attrs);
168msg_full:
169	genlmsg_cancel(msg->skb, hdr);
170
171	return -EMSGSIZE;
172}
173
174int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
175{
176	struct net *net = sock_net(skb->sk);
177	int err;
178	int done = cb->args[0];
179	struct tipc_nl_msg msg;
180
181	if (done)
182		return 0;
183
184	msg.skb = skb;
185	msg.portid = NETLINK_CB(cb->skb).portid;
186	msg.seq = cb->nlh->nlmsg_seq;
187
188	err = __tipc_nl_add_net(net, &msg);
189	if (err)
190		goto out;
191
192	done = 1;
193out:
194	cb->args[0] = done;
195
196	return skb->len;
197}
198
199int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
200{
 
201	struct net *net = sock_net(skb->sk);
202	struct tipc_net *tn = net_generic(net, tipc_net_id);
203	struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
204	int err;
205
206	if (!info->attrs[TIPC_NLA_NET])
207		return -EINVAL;
208
209	err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
210			       info->attrs[TIPC_NLA_NET],
211			       tipc_nl_net_policy);
 
212	if (err)
213		return err;
214
 
 
 
 
215	if (attrs[TIPC_NLA_NET_ID]) {
216		u32 val;
217
218		/* Can't change net id once TIPC has joined a network */
219		if (tn->own_addr)
220			return -EPERM;
221
222		val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
223		if (val < 1 || val > 9999)
224			return -EINVAL;
225
226		tn->net_id = val;
227	}
228
229	if (attrs[TIPC_NLA_NET_ADDR]) {
230		u32 addr;
231
232		/* Can't change net addr once TIPC has joined a network */
233		if (tn->own_addr)
234			return -EPERM;
235
236		addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
237		if (!tipc_addr_node_valid(addr))
238			return -EINVAL;
 
 
 
 
 
 
 
 
239
240		rtnl_lock();
241		tipc_net_start(net, addr);
242		rtnl_unlock();
 
 
243	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246}