Linux Audio

Check our new training course

Loading...
v4.17
 
  1/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2 *
  3 * This program is free software; you can redistribute it and/or modify
  4 * it under the terms of the GNU General Public License version 2 and
  5 * only version 2 as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful,
  8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 10 * GNU General Public License for more details.
 11 *
 12 * RMNET configuration engine
 13 *
 14 */
 15
 16#include <net/sock.h>
 17#include <linux/module.h>
 18#include <linux/netlink.h>
 19#include <linux/netdevice.h>
 20#include "rmnet_config.h"
 21#include "rmnet_handlers.h"
 22#include "rmnet_vnd.h"
 23#include "rmnet_private.h"
 24
 25/* Locking scheme -
 26 * The shared resource which needs to be protected is realdev->rx_handler_data.
 27 * For the writer path, this is using rtnl_lock(). The writer paths are
 28 * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
 29 * paths are already called with rtnl_lock() acquired in. There is also an
 30 * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
 31 * dereference here, we will need to use rtnl_dereference(). Dev list writing
 32 * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
 33 * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
 34 * path. We only need rcu_read_lock() for these scenarios. In these cases,
 35 * the rcu_read_lock() is held in __dev_queue_xmit() and
 36 * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
 37 * to get the relevant information. For dev list reading, we again acquire
 38 * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
 39 * We also use unregister_netdevice_many() to free all rmnet devices in
 40 * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
 41 * same context.
 42 */
 43
 44/* Local Definitions and Declarations */
 45
 46static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
 47	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
 48	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
 49};
 50
 51static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
 52{
 53	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
 54}
 55
 56/* Needs rtnl lock */
 57static struct rmnet_port*
 58rmnet_get_port_rtnl(const struct net_device *real_dev)
 59{
 60	return rtnl_dereference(real_dev->rx_handler_data);
 61}
 62
 63static int rmnet_unregister_real_device(struct net_device *real_dev,
 64					struct rmnet_port *port)
 65{
 
 
 66	if (port->nr_rmnet_devs)
 67		return -EINVAL;
 68
 69	kfree(port);
 70
 71	netdev_rx_handler_unregister(real_dev);
 72
 73	/* release reference on real_dev */
 74	dev_put(real_dev);
 75
 76	netdev_dbg(real_dev, "Removed from rmnet\n");
 77	return 0;
 78}
 79
 80static int rmnet_register_real_device(struct net_device *real_dev)
 
 81{
 82	struct rmnet_port *port;
 83	int rc, entry;
 84
 85	ASSERT_RTNL();
 86
 87	if (rmnet_is_real_dev_registered(real_dev))
 
 
 
 
 
 
 88		return 0;
 
 89
 90	port = kzalloc(sizeof(*port), GFP_ATOMIC);
 91	if (!port)
 92		return -ENOMEM;
 93
 94	port->dev = real_dev;
 95	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
 96	if (rc) {
 97		kfree(port);
 98		return -EBUSY;
 99	}
100
101	/* hold on to real dev for MAP data */
102	dev_hold(real_dev);
103
104	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
105		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
106
 
 
107	netdev_dbg(real_dev, "registered with rmnet\n");
108	return 0;
109}
110
111static void rmnet_unregister_bridge(struct net_device *dev,
112				    struct rmnet_port *port)
113{
114	struct rmnet_port *bridge_port;
115	struct net_device *bridge_dev;
116
117	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
118		return;
119
120	/* bridge slave handling */
121	if (!port->nr_rmnet_devs) {
122		bridge_dev = port->bridge_ep;
123
124		bridge_port = rmnet_get_port_rtnl(bridge_dev);
125		bridge_port->bridge_ep = NULL;
126		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
 
 
127	} else {
 
128		bridge_dev = port->bridge_ep;
129
130		bridge_port = rmnet_get_port_rtnl(bridge_dev);
131		rmnet_unregister_real_device(bridge_dev, bridge_port);
132	}
 
 
 
133}
134
135static int rmnet_newlink(struct net *src_net, struct net_device *dev,
136			 struct nlattr *tb[], struct nlattr *data[],
137			 struct netlink_ext_ack *extack)
138{
139	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
140	struct net_device *real_dev;
141	int mode = RMNET_EPMODE_VND;
142	struct rmnet_endpoint *ep;
143	struct rmnet_port *port;
144	int err = 0;
145	u16 mux_id;
146
 
 
 
 
 
147	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
148	if (!real_dev || !dev)
 
149		return -ENODEV;
 
150
151	if (!data[IFLA_RMNET_MUX_ID])
152		return -EINVAL;
153
154	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
155	if (!ep)
156		return -ENOMEM;
157
158	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
159
160	err = rmnet_register_real_device(real_dev);
161	if (err)
162		goto err0;
163
164	port = rmnet_get_port_rtnl(real_dev);
165	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
166	if (err)
167		goto err1;
168
 
 
 
 
169	port->rmnet_mode = mode;
 
170
171	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
172
173	if (data[IFLA_RMNET_FLAGS]) {
174		struct ifla_rmnet_flags *flags;
175
176		flags = nla_data(data[IFLA_RMNET_FLAGS]);
177		data_format = flags->flags & flags->mask;
 
178	}
179
180	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
181	port->data_format = data_format;
182
183	return 0;
184
 
 
 
185err1:
186	rmnet_unregister_real_device(real_dev, port);
187err0:
188	kfree(ep);
189	return err;
190}
191
192static void rmnet_dellink(struct net_device *dev, struct list_head *head)
193{
194	struct rmnet_priv *priv = netdev_priv(dev);
195	struct net_device *real_dev;
 
196	struct rmnet_endpoint *ep;
197	struct rmnet_port *port;
198	u8 mux_id;
199
200	real_dev = priv->real_dev;
201
202	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
203		return;
204
205	port = rmnet_get_port_rtnl(real_dev);
206
207	mux_id = rmnet_vnd_get_mux(dev);
 
 
 
208
209	ep = rmnet_get_endpoint(port, mux_id);
210	if (ep) {
211		hlist_del_init_rcu(&ep->hlnode);
212		rmnet_unregister_bridge(dev, port);
213		rmnet_vnd_dellink(mux_id, port, ep);
214		kfree(ep);
215	}
216	rmnet_unregister_real_device(real_dev, port);
217
 
 
218	unregister_netdevice_queue(dev, head);
219}
220
221static void rmnet_force_unassociate_device(struct net_device *dev)
222{
223	struct net_device *real_dev = dev;
224	struct hlist_node *tmp_ep;
225	struct rmnet_endpoint *ep;
226	struct rmnet_port *port;
227	unsigned long bkt_ep;
228	LIST_HEAD(list);
229
230	if (!rmnet_is_real_dev_registered(real_dev))
231		return;
232
233	ASSERT_RTNL();
234
235	port = rmnet_get_port_rtnl(dev);
236
237	rcu_read_lock();
238	rmnet_unregister_bridge(dev, port);
239
240	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
241		unregister_netdevice_queue(ep->egress_dev, &list);
242		rmnet_vnd_dellink(ep->mux_id, port, ep);
243
244		hlist_del_init_rcu(&ep->hlnode);
245		kfree(ep);
 
 
 
 
 
 
 
 
 
 
 
 
246	}
247
248	rcu_read_unlock();
249	unregister_netdevice_many(&list);
250
251	rmnet_unregister_real_device(real_dev, port);
252}
253
254static int rmnet_config_notify_cb(struct notifier_block *nb,
255				  unsigned long event, void *data)
256{
257	struct net_device *dev = netdev_notifier_info_to_dev(data);
258
259	if (!dev)
260		return NOTIFY_DONE;
261
262	switch (event) {
263	case NETDEV_UNREGISTER:
264		netdev_dbg(dev, "Kernel unregister\n");
265		rmnet_force_unassociate_device(dev);
 
 
 
 
266		break;
267
268	default:
269		break;
270	}
271
272	return NOTIFY_DONE;
273}
274
275static struct notifier_block rmnet_dev_notifier __read_mostly = {
276	.notifier_call = rmnet_config_notify_cb,
277};
278
279static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
280			       struct netlink_ext_ack *extack)
281{
282	u16 mux_id;
283
284	if (!data || !data[IFLA_RMNET_MUX_ID])
 
285		return -EINVAL;
 
286
287	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
288	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
 
289		return -ERANGE;
 
290
291	return 0;
292}
293
294static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
295			    struct nlattr *data[],
296			    struct netlink_ext_ack *extack)
297{
298	struct rmnet_priv *priv = netdev_priv(dev);
299	struct net_device *real_dev;
300	struct rmnet_endpoint *ep;
301	struct rmnet_port *port;
302	u16 mux_id;
303
304	real_dev = __dev_get_by_index(dev_net(dev),
305				      nla_get_u32(tb[IFLA_LINK]));
306
307	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
 
308		return -ENODEV;
309
310	port = rmnet_get_port_rtnl(real_dev);
311
312	if (data[IFLA_RMNET_MUX_ID]) {
313		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
314		ep = rmnet_get_endpoint(port, priv->mux_id);
315		if (!ep)
316			return -ENODEV;
317
318		hlist_del_init_rcu(&ep->hlnode);
319		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
320
321		ep->mux_id = mux_id;
322		priv->mux_id = mux_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323	}
324
325	if (data[IFLA_RMNET_FLAGS]) {
326		struct ifla_rmnet_flags *flags;
 
327
 
328		flags = nla_data(data[IFLA_RMNET_FLAGS]);
329		port->data_format = flags->flags & flags->mask;
 
 
 
 
 
 
 
330	}
331
332	return 0;
333}
334
335static size_t rmnet_get_size(const struct net_device *dev)
336{
337	return
338		/* IFLA_RMNET_MUX_ID */
339		nla_total_size(2) +
340		/* IFLA_RMNET_FLAGS */
341		nla_total_size(sizeof(struct ifla_rmnet_flags));
342}
343
344static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
345{
346	struct rmnet_priv *priv = netdev_priv(dev);
347	struct net_device *real_dev;
348	struct ifla_rmnet_flags f;
349	struct rmnet_port *port;
350
351	real_dev = priv->real_dev;
352
353	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
354		goto nla_put_failure;
355
356	if (rmnet_is_real_dev_registered(real_dev)) {
357		port = rmnet_get_port_rtnl(real_dev);
358		f.flags = port->data_format;
359	} else {
360		f.flags = 0;
361	}
362
363	f.mask  = ~0;
364
365	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
366		goto nla_put_failure;
367
368	return 0;
369
370nla_put_failure:
371	return -EMSGSIZE;
372}
373
374struct rtnl_link_ops rmnet_link_ops __read_mostly = {
375	.kind		= "rmnet",
376	.maxtype	= __IFLA_RMNET_MAX,
377	.priv_size	= sizeof(struct rmnet_priv),
378	.setup		= rmnet_vnd_setup,
379	.validate	= rmnet_rtnl_validate,
380	.newlink	= rmnet_newlink,
381	.dellink	= rmnet_dellink,
382	.get_size	= rmnet_get_size,
383	.changelink     = rmnet_changelink,
384	.policy		= rmnet_policy,
385	.fill_info	= rmnet_fill_info,
386};
387
388/* Needs either rcu_read_lock() or rtnl lock */
389struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
390{
391	if (rmnet_is_real_dev_registered(real_dev))
392		return rcu_dereference_rtnl(real_dev->rx_handler_data);
393	else
394		return NULL;
395}
396
397struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
398{
399	struct rmnet_endpoint *ep;
400
401	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
402		if (ep->mux_id == mux_id)
403			return ep;
404	}
405
406	return NULL;
407}
408
409int rmnet_add_bridge(struct net_device *rmnet_dev,
410		     struct net_device *slave_dev,
411		     struct netlink_ext_ack *extack)
412{
413	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
414	struct net_device *real_dev = priv->real_dev;
415	struct rmnet_port *port, *slave_port;
416	int err;
417
418	port = rmnet_get_port(real_dev);
419
420	/* If there is more than one rmnet dev attached, its probably being
421	 * used for muxing. Skip the briding in that case
422	 */
423	if (port->nr_rmnet_devs > 1)
 
424		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
425
426	if (rmnet_is_real_dev_registered(slave_dev))
427		return -EBUSY;
 
428
429	err = rmnet_register_real_device(slave_dev);
430	if (err)
431		return -EBUSY;
432
433	slave_port = rmnet_get_port(slave_dev);
 
 
 
 
 
 
 
434	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
435	slave_port->bridge_ep = real_dev;
 
436
437	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
438	port->bridge_ep = slave_dev;
439
440	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
441	return 0;
442}
443
444int rmnet_del_bridge(struct net_device *rmnet_dev,
445		     struct net_device *slave_dev)
446{
447	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
448	struct net_device *real_dev = priv->real_dev;
449	struct rmnet_port *port, *slave_port;
450
451	port = rmnet_get_port(real_dev);
452	port->rmnet_mode = RMNET_EPMODE_VND;
453	port->bridge_ep = NULL;
454
455	slave_port = rmnet_get_port(slave_dev);
456	rmnet_unregister_real_device(slave_dev, slave_port);
457
458	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
459	return 0;
460}
461
462/* Startup/Shutdown */
463
464static int __init rmnet_init(void)
465{
466	int rc;
467
468	rc = register_netdevice_notifier(&rmnet_dev_notifier);
469	if (rc != 0)
470		return rc;
471
472	rc = rtnl_link_register(&rmnet_link_ops);
473	if (rc != 0) {
474		unregister_netdevice_notifier(&rmnet_dev_notifier);
475		return rc;
476	}
477	return rc;
478}
479
480static void __exit rmnet_exit(void)
481{
482	unregister_netdevice_notifier(&rmnet_dev_notifier);
483	rtnl_link_unregister(&rmnet_link_ops);
 
484}
485
486module_init(rmnet_init)
487module_exit(rmnet_exit)
 
 
488MODULE_LICENSE("GPL v2");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  3 *
 
 
 
 
 
 
 
 
 
  4 * RMNET configuration engine
 
  5 */
  6
  7#include <net/sock.h>
  8#include <linux/module.h>
  9#include <linux/netlink.h>
 10#include <linux/netdevice.h>
 11#include "rmnet_config.h"
 12#include "rmnet_handlers.h"
 13#include "rmnet_vnd.h"
 14#include "rmnet_private.h"
 15#include "rmnet_map.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16
 17/* Local Definitions and Declarations */
 18
 19static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
 20	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
 21	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
 22};
 23
 24static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
 25{
 26	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
 27}
 28
 29/* Needs rtnl lock */
 30struct rmnet_port*
 31rmnet_get_port_rtnl(const struct net_device *real_dev)
 32{
 33	return rtnl_dereference(real_dev->rx_handler_data);
 34}
 35
 36static int rmnet_unregister_real_device(struct net_device *real_dev)
 
 37{
 38	struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
 39
 40	if (port->nr_rmnet_devs)
 41		return -EINVAL;
 42
 43	rmnet_map_tx_aggregate_exit(port);
 44
 45	netdev_rx_handler_unregister(real_dev);
 46
 47	kfree(port);
 
 48
 49	netdev_dbg(real_dev, "Removed from rmnet\n");
 50	return 0;
 51}
 52
 53static int rmnet_register_real_device(struct net_device *real_dev,
 54				      struct netlink_ext_ack *extack)
 55{
 56	struct rmnet_port *port;
 57	int rc, entry;
 58
 59	ASSERT_RTNL();
 60
 61	if (rmnet_is_real_dev_registered(real_dev)) {
 62		port = rmnet_get_port_rtnl(real_dev);
 63		if (port->rmnet_mode != RMNET_EPMODE_VND) {
 64			NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
 65			return -EINVAL;
 66		}
 67
 68		return 0;
 69	}
 70
 71	port = kzalloc(sizeof(*port), GFP_KERNEL);
 72	if (!port)
 73		return -ENOMEM;
 74
 75	port->dev = real_dev;
 76	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
 77	if (rc) {
 78		kfree(port);
 79		return -EBUSY;
 80	}
 81
 
 
 
 82	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
 83		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
 84
 85	rmnet_map_tx_aggregate_init(port);
 86
 87	netdev_dbg(real_dev, "registered with rmnet\n");
 88	return 0;
 89}
 90
 91static void rmnet_unregister_bridge(struct rmnet_port *port)
 
 92{
 93	struct net_device *bridge_dev, *real_dev, *rmnet_dev;
 94	struct rmnet_port *real_port;
 95
 96	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
 97		return;
 98
 99	rmnet_dev = port->rmnet_dev;
100	if (!port->nr_rmnet_devs) {
101		/* bridge device */
102		real_dev = port->bridge_ep;
103		bridge_dev = port->dev;
104
105		real_port = rmnet_get_port_rtnl(real_dev);
106		real_port->bridge_ep = NULL;
107		real_port->rmnet_mode = RMNET_EPMODE_VND;
108	} else {
109		/* real device */
110		bridge_dev = port->bridge_ep;
111
112		port->bridge_ep = NULL;
113		port->rmnet_mode = RMNET_EPMODE_VND;
114	}
115
116	netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
117	rmnet_unregister_real_device(bridge_dev);
118}
119
120static int rmnet_newlink(struct net *src_net, struct net_device *dev,
121			 struct nlattr *tb[], struct nlattr *data[],
122			 struct netlink_ext_ack *extack)
123{
124	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
125	struct net_device *real_dev;
126	int mode = RMNET_EPMODE_VND;
127	struct rmnet_endpoint *ep;
128	struct rmnet_port *port;
129	int err = 0;
130	u16 mux_id;
131
132	if (!tb[IFLA_LINK]) {
133		NL_SET_ERR_MSG_MOD(extack, "link not specified");
134		return -EINVAL;
135	}
136
137	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
138	if (!real_dev) {
139		NL_SET_ERR_MSG_MOD(extack, "link does not exist");
140		return -ENODEV;
141	}
142
143	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 
 
 
144	if (!ep)
145		return -ENOMEM;
146
147	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
148
149	err = rmnet_register_real_device(real_dev, extack);
150	if (err)
151		goto err0;
152
153	port = rmnet_get_port_rtnl(real_dev);
154	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep, extack);
155	if (err)
156		goto err1;
157
158	err = netdev_upper_dev_link(real_dev, dev, extack);
159	if (err < 0)
160		goto err2;
161
162	port->rmnet_mode = mode;
163	port->rmnet_dev = dev;
164
165	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
166
167	if (data[IFLA_RMNET_FLAGS]) {
168		struct ifla_rmnet_flags *flags;
169
170		flags = nla_data(data[IFLA_RMNET_FLAGS]);
171		data_format &= ~flags->mask;
172		data_format |= flags->flags & flags->mask;
173	}
174
175	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
176	port->data_format = data_format;
177
178	return 0;
179
180err2:
181	unregister_netdevice(dev);
182	rmnet_vnd_dellink(mux_id, port, ep);
183err1:
184	rmnet_unregister_real_device(real_dev);
185err0:
186	kfree(ep);
187	return err;
188}
189
190static void rmnet_dellink(struct net_device *dev, struct list_head *head)
191{
192	struct rmnet_priv *priv = netdev_priv(dev);
193	struct net_device *real_dev, *bridge_dev;
194	struct rmnet_port *real_port, *bridge_port;
195	struct rmnet_endpoint *ep;
196	u8 mux_id = priv->mux_id;
 
197
198	real_dev = priv->real_dev;
199
200	if (!rmnet_is_real_dev_registered(real_dev))
201		return;
202
203	real_port = rmnet_get_port_rtnl(real_dev);
204	bridge_dev = real_port->bridge_ep;
205	if (bridge_dev) {
206		bridge_port = rmnet_get_port_rtnl(bridge_dev);
207		rmnet_unregister_bridge(bridge_port);
208	}
209
210	ep = rmnet_get_endpoint(real_port, mux_id);
211	if (ep) {
212		hlist_del_init_rcu(&ep->hlnode);
213		rmnet_vnd_dellink(mux_id, real_port, ep);
 
214		kfree(ep);
215	}
 
216
217	netdev_upper_dev_unlink(real_dev, dev);
218	rmnet_unregister_real_device(real_dev);
219	unregister_netdevice_queue(dev, head);
220}
221
222static void rmnet_force_unassociate_device(struct net_device *real_dev)
223{
 
224	struct hlist_node *tmp_ep;
225	struct rmnet_endpoint *ep;
226	struct rmnet_port *port;
227	unsigned long bkt_ep;
228	LIST_HEAD(list);
229
230	port = rmnet_get_port_rtnl(real_dev);
 
 
 
 
 
 
 
 
 
 
 
 
231
232	if (port->nr_rmnet_devs) {
233		/* real device */
234		rmnet_unregister_bridge(port);
235		hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
236			unregister_netdevice_queue(ep->egress_dev, &list);
237			netdev_upper_dev_unlink(real_dev, ep->egress_dev);
238			rmnet_vnd_dellink(ep->mux_id, port, ep);
239			hlist_del_init_rcu(&ep->hlnode);
240			kfree(ep);
241		}
242		rmnet_unregister_real_device(real_dev);
243		unregister_netdevice_many(&list);
244	} else {
245		rmnet_unregister_bridge(port);
246	}
 
 
 
 
 
247}
248
249static int rmnet_config_notify_cb(struct notifier_block *nb,
250				  unsigned long event, void *data)
251{
252	struct net_device *real_dev = netdev_notifier_info_to_dev(data);
253
254	if (!rmnet_is_real_dev_registered(real_dev))
255		return NOTIFY_DONE;
256
257	switch (event) {
258	case NETDEV_UNREGISTER:
259		netdev_dbg(real_dev, "Kernel unregister\n");
260		rmnet_force_unassociate_device(real_dev);
261		break;
262	case NETDEV_CHANGEMTU:
263		if (rmnet_vnd_validate_real_dev_mtu(real_dev))
264			return NOTIFY_BAD;
265		break;
 
266	default:
267		break;
268	}
269
270	return NOTIFY_DONE;
271}
272
273static struct notifier_block rmnet_dev_notifier __read_mostly = {
274	.notifier_call = rmnet_config_notify_cb,
275};
276
277static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
278			       struct netlink_ext_ack *extack)
279{
280	u16 mux_id;
281
282	if (!data || !data[IFLA_RMNET_MUX_ID]) {
283		NL_SET_ERR_MSG_MOD(extack, "MUX ID not specified");
284		return -EINVAL;
285	}
286
287	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
288	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) {
289		NL_SET_ERR_MSG_MOD(extack, "invalid MUX ID");
290		return -ERANGE;
291	}
292
293	return 0;
294}
295
296static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
297			    struct nlattr *data[],
298			    struct netlink_ext_ack *extack)
299{
300	struct rmnet_priv *priv = netdev_priv(dev);
301	struct net_device *real_dev;
 
302	struct rmnet_port *port;
303	u16 mux_id;
304
305	if (!dev)
306		return -ENODEV;
307
308	real_dev = priv->real_dev;
309	if (!rmnet_is_real_dev_registered(real_dev))
310		return -ENODEV;
311
312	port = rmnet_get_port_rtnl(real_dev);
313
314	if (data[IFLA_RMNET_MUX_ID]) {
315		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
 
 
 
316
317		if (mux_id != priv->mux_id) {
318			struct rmnet_endpoint *ep;
319
320			ep = rmnet_get_endpoint(port, priv->mux_id);
321			if (!ep)
322				return -ENODEV;
323
324			if (rmnet_get_endpoint(port, mux_id)) {
325				NL_SET_ERR_MSG_MOD(extack,
326						   "MUX ID already exists");
327				return -EINVAL;
328			}
329
330			hlist_del_init_rcu(&ep->hlnode);
331			hlist_add_head_rcu(&ep->hlnode,
332					   &port->muxed_ep[mux_id]);
333
334			ep->mux_id = mux_id;
335			priv->mux_id = mux_id;
336		}
337	}
338
339	if (data[IFLA_RMNET_FLAGS]) {
340		struct ifla_rmnet_flags *flags;
341		u32 old_data_format;
342
343		old_data_format = port->data_format;
344		flags = nla_data(data[IFLA_RMNET_FLAGS]);
345		port->data_format &= ~flags->mask;
346		port->data_format |= flags->flags & flags->mask;
347
348		if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
349			port->data_format = old_data_format;
350			NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
351			return -EINVAL;
352		}
353	}
354
355	return 0;
356}
357
358static size_t rmnet_get_size(const struct net_device *dev)
359{
360	return
361		/* IFLA_RMNET_MUX_ID */
362		nla_total_size(2) +
363		/* IFLA_RMNET_FLAGS */
364		nla_total_size(sizeof(struct ifla_rmnet_flags));
365}
366
367static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
368{
369	struct rmnet_priv *priv = netdev_priv(dev);
370	struct net_device *real_dev;
371	struct ifla_rmnet_flags f;
372	struct rmnet_port *port;
373
374	real_dev = priv->real_dev;
375
376	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
377		goto nla_put_failure;
378
379	if (rmnet_is_real_dev_registered(real_dev)) {
380		port = rmnet_get_port_rtnl(real_dev);
381		f.flags = port->data_format;
382	} else {
383		f.flags = 0;
384	}
385
386	f.mask  = ~0;
387
388	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
389		goto nla_put_failure;
390
391	return 0;
392
393nla_put_failure:
394	return -EMSGSIZE;
395}
396
397struct rtnl_link_ops rmnet_link_ops __read_mostly = {
398	.kind		= "rmnet",
399	.maxtype	= IFLA_RMNET_MAX,
400	.priv_size	= sizeof(struct rmnet_priv),
401	.setup		= rmnet_vnd_setup,
402	.validate	= rmnet_rtnl_validate,
403	.newlink	= rmnet_newlink,
404	.dellink	= rmnet_dellink,
405	.get_size	= rmnet_get_size,
406	.changelink     = rmnet_changelink,
407	.policy		= rmnet_policy,
408	.fill_info	= rmnet_fill_info,
409};
410
411struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
 
412{
413	if (rmnet_is_real_dev_registered(real_dev))
414		return rcu_dereference_bh(real_dev->rx_handler_data);
415	else
416		return NULL;
417}
418
419struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
420{
421	struct rmnet_endpoint *ep;
422
423	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
424		if (ep->mux_id == mux_id)
425			return ep;
426	}
427
428	return NULL;
429}
430
431int rmnet_add_bridge(struct net_device *rmnet_dev,
432		     struct net_device *slave_dev,
433		     struct netlink_ext_ack *extack)
434{
435	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
436	struct net_device *real_dev = priv->real_dev;
437	struct rmnet_port *port, *slave_port;
438	int err;
439
440	port = rmnet_get_port_rtnl(real_dev);
441
442	/* If there is more than one rmnet dev attached, its probably being
443	 * used for muxing. Skip the briding in that case
444	 */
445	if (port->nr_rmnet_devs > 1) {
446		NL_SET_ERR_MSG_MOD(extack, "more than one rmnet dev attached");
447		return -EINVAL;
448	}
449
450	if (port->rmnet_mode != RMNET_EPMODE_VND) {
451		NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
452		return -EINVAL;
453	}
454
455	if (rmnet_is_real_dev_registered(slave_dev)) {
456		NL_SET_ERR_MSG_MOD(extack,
457				   "slave cannot be another rmnet dev");
458
 
459		return -EBUSY;
460	}
461
462	err = rmnet_register_real_device(slave_dev, extack);
463	if (err)
464		return -EBUSY;
465
466	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
467					   extack);
468	if (err) {
469		rmnet_unregister_real_device(slave_dev);
470		return err;
471	}
472
473	slave_port = rmnet_get_port_rtnl(slave_dev);
474	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
475	slave_port->bridge_ep = real_dev;
476	slave_port->rmnet_dev = rmnet_dev;
477
478	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
479	port->bridge_ep = slave_dev;
480
481	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
482	return 0;
483}
484
485int rmnet_del_bridge(struct net_device *rmnet_dev,
486		     struct net_device *slave_dev)
487{
488	struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
 
 
 
 
 
 
489
490	rmnet_unregister_bridge(port);
 
491
492	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
493	return 0;
494}
495
496/* Startup/Shutdown */
497
498static int __init rmnet_init(void)
499{
500	int rc;
501
502	rc = register_netdevice_notifier(&rmnet_dev_notifier);
503	if (rc != 0)
504		return rc;
505
506	rc = rtnl_link_register(&rmnet_link_ops);
507	if (rc != 0) {
508		unregister_netdevice_notifier(&rmnet_dev_notifier);
509		return rc;
510	}
511	return rc;
512}
513
514static void __exit rmnet_exit(void)
515{
 
516	rtnl_link_unregister(&rmnet_link_ops);
517	unregister_netdevice_notifier(&rmnet_dev_notifier);
518}
519
520module_init(rmnet_init)
521module_exit(rmnet_exit)
522MODULE_ALIAS_RTNL_LINK("rmnet");
523MODULE_DESCRIPTION("Qualcomm RmNet MAP driver");
524MODULE_LICENSE("GPL v2");