Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
   3
   4#include <linux/bitmap.h>
   5#include <linux/err.h>
   6#include <linux/errno.h>
   7#include <linux/debugfs.h>
   8#include <linux/fs.h>
   9#include <linux/init.h>
  10#include <linux/idr.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/poll.h>
  14#include <linux/skbuff.h>
  15#include <linux/slab.h>
  16#include <linux/types.h>
  17#include <linux/uaccess.h>
  18#include <linux/termios.h>
  19#include <linux/wwan.h>
  20#include <net/rtnetlink.h>
  21#include <uapi/linux/wwan.h>
  22
  23/* Maximum number of minors in use */
  24#define WWAN_MAX_MINORS		(1 << MINORBITS)
  25
  26static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
  27static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
  28static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
  29static struct class *wwan_class;
  30static int wwan_major;
  31static struct dentry *wwan_debugfs_dir;
  32
  33#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
  34#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
  35
  36/* WWAN port flags */
  37#define WWAN_PORT_TX_OFF	0
  38
  39/**
  40 * struct wwan_device - The structure that defines a WWAN device
  41 *
  42 * @id: WWAN device unique ID.
  43 * @dev: Underlying device.
  44 * @port_id: Current available port ID to pick.
  45 * @ops: wwan device ops
  46 * @ops_ctxt: context to pass to ops
  47 * @debugfs_dir:  WWAN device debugfs dir
  48 */
  49struct wwan_device {
  50	unsigned int id;
  51	struct device dev;
  52	atomic_t port_id;
  53	const struct wwan_ops *ops;
  54	void *ops_ctxt;
  55#ifdef CONFIG_WWAN_DEBUGFS
  56	struct dentry *debugfs_dir;
  57#endif
  58};
  59
  60/**
  61 * struct wwan_port - The structure that defines a WWAN port
  62 * @type: Port type
  63 * @start_count: Port start counter
  64 * @flags: Store port state and capabilities
  65 * @ops: Pointer to WWAN port operations
  66 * @ops_lock: Protect port ops
  67 * @dev: Underlying device
  68 * @rxq: Buffer inbound queue
  69 * @waitqueue: The waitqueue for port fops (read/write/poll)
  70 * @data_lock: Port specific data access serialization
  71 * @headroom_len: SKB reserved headroom size
  72 * @frag_len: Length to fragment packet
  73 * @at_data: AT port specific data
  74 */
  75struct wwan_port {
  76	enum wwan_port_type type;
  77	unsigned int start_count;
  78	unsigned long flags;
  79	const struct wwan_port_ops *ops;
  80	struct mutex ops_lock; /* Serialize ops + protect against removal */
  81	struct device dev;
  82	struct sk_buff_head rxq;
  83	wait_queue_head_t waitqueue;
  84	struct mutex data_lock;	/* Port specific data access serialization */
  85	size_t headroom_len;
  86	size_t frag_len;
  87	union {
  88		struct {
  89			struct ktermios termios;
  90			int mdmbits;
  91		} at_data;
  92	};
  93};
  94
  95static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
  96{
  97	struct wwan_device *wwan = to_wwan_dev(dev);
  98
  99	return sprintf(buf, "%d\n", wwan->id);
 100}
 101static DEVICE_ATTR_RO(index);
 102
 103static struct attribute *wwan_dev_attrs[] = {
 104	&dev_attr_index.attr,
 105	NULL,
 106};
 107ATTRIBUTE_GROUPS(wwan_dev);
 108
 109static void wwan_dev_destroy(struct device *dev)
 110{
 111	struct wwan_device *wwandev = to_wwan_dev(dev);
 112
 113	ida_free(&wwan_dev_ids, wwandev->id);
 114	kfree(wwandev);
 115}
 116
 117static const struct device_type wwan_dev_type = {
 118	.name    = "wwan_dev",
 119	.release = wwan_dev_destroy,
 120	.groups = wwan_dev_groups,
 121};
 122
 123static int wwan_dev_parent_match(struct device *dev, const void *parent)
 124{
 125	return (dev->type == &wwan_dev_type &&
 126		(dev->parent == parent || dev == parent));
 127}
 128
 129static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
 130{
 131	struct device *dev;
 132
 133	dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
 134	if (!dev)
 135		return ERR_PTR(-ENODEV);
 136
 137	return to_wwan_dev(dev);
 138}
 139
 140static int wwan_dev_name_match(struct device *dev, const void *name)
 141{
 142	return dev->type == &wwan_dev_type &&
 143	       strcmp(dev_name(dev), name) == 0;
 144}
 145
 146static struct wwan_device *wwan_dev_get_by_name(const char *name)
 147{
 148	struct device *dev;
 149
 150	dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
 151	if (!dev)
 152		return ERR_PTR(-ENODEV);
 153
 154	return to_wwan_dev(dev);
 155}
 156
 157#ifdef CONFIG_WWAN_DEBUGFS
 158struct dentry *wwan_get_debugfs_dir(struct device *parent)
 159{
 160	struct wwan_device *wwandev;
 161
 162	wwandev = wwan_dev_get_by_parent(parent);
 163	if (IS_ERR(wwandev))
 164		return ERR_CAST(wwandev);
 165
 166	return wwandev->debugfs_dir;
 167}
 168EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
 169
 170static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
 171{
 172	struct wwan_device *wwandev;
 173
 174	if (dev->type != &wwan_dev_type)
 175		return 0;
 176
 177	wwandev = to_wwan_dev(dev);
 178
 179	return wwandev->debugfs_dir == dir;
 180}
 181
 182static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
 183{
 184	struct device *dev;
 185
 186	dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
 187	if (!dev)
 188		return ERR_PTR(-ENODEV);
 189
 190	return to_wwan_dev(dev);
 191}
 192
 193void wwan_put_debugfs_dir(struct dentry *dir)
 194{
 195	struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
 196
 197	if (WARN_ON(IS_ERR(wwandev)))
 198		return;
 199
 200	/* wwan_dev_get_by_debugfs() also got a reference */
 201	put_device(&wwandev->dev);
 202	put_device(&wwandev->dev);
 203}
 204EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
 205#endif
 206
 207/* This function allocates and registers a new WWAN device OR if a WWAN device
 208 * already exist for the given parent, it gets a reference and return it.
 209 * This function is not exported (for now), it is called indirectly via
 210 * wwan_create_port().
 211 */
 212static struct wwan_device *wwan_create_dev(struct device *parent)
 213{
 214	struct wwan_device *wwandev;
 215	int err, id;
 216
 217	/* The 'find-alloc-register' operation must be protected against
 218	 * concurrent execution, a WWAN device is possibly shared between
 219	 * multiple callers or concurrently unregistered from wwan_remove_dev().
 220	 */
 221	mutex_lock(&wwan_register_lock);
 222
 223	/* If wwandev already exists, return it */
 224	wwandev = wwan_dev_get_by_parent(parent);
 225	if (!IS_ERR(wwandev))
 226		goto done_unlock;
 227
 228	id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
 229	if (id < 0) {
 230		wwandev = ERR_PTR(id);
 231		goto done_unlock;
 232	}
 233
 234	wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
 235	if (!wwandev) {
 236		wwandev = ERR_PTR(-ENOMEM);
 237		ida_free(&wwan_dev_ids, id);
 238		goto done_unlock;
 239	}
 240
 241	wwandev->dev.parent = parent;
 242	wwandev->dev.class = wwan_class;
 243	wwandev->dev.type = &wwan_dev_type;
 244	wwandev->id = id;
 245	dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
 246
 247	err = device_register(&wwandev->dev);
 248	if (err) {
 249		put_device(&wwandev->dev);
 250		wwandev = ERR_PTR(err);
 251		goto done_unlock;
 252	}
 253
 254#ifdef CONFIG_WWAN_DEBUGFS
 255	wwandev->debugfs_dir =
 256			debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
 257					   wwan_debugfs_dir);
 258#endif
 259
 260done_unlock:
 261	mutex_unlock(&wwan_register_lock);
 262
 263	return wwandev;
 264}
 265
 266static int is_wwan_child(struct device *dev, void *data)
 267{
 268	return dev->class == wwan_class;
 269}
 270
 271static void wwan_remove_dev(struct wwan_device *wwandev)
 272{
 273	int ret;
 274
 275	/* Prevent concurrent picking from wwan_create_dev */
 276	mutex_lock(&wwan_register_lock);
 277
 278	/* WWAN device is created and registered (get+add) along with its first
 279	 * child port, and subsequent port registrations only grab a reference
 280	 * (get). The WWAN device must then be unregistered (del+put) along with
 281	 * its last port, and reference simply dropped (put) otherwise. In the
 282	 * same fashion, we must not unregister it when the ops are still there.
 283	 */
 284	if (wwandev->ops)
 285		ret = 1;
 286	else
 287		ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
 288
 289	if (!ret) {
 290#ifdef CONFIG_WWAN_DEBUGFS
 291		debugfs_remove_recursive(wwandev->debugfs_dir);
 292#endif
 293		device_unregister(&wwandev->dev);
 294	} else {
 295		put_device(&wwandev->dev);
 296	}
 297
 298	mutex_unlock(&wwan_register_lock);
 299}
 300
 301/* ------- WWAN port management ------- */
 302
 303static const struct {
 304	const char * const name;	/* Port type name */
 305	const char * const devsuf;	/* Port device name suffix */
 306} wwan_port_types[WWAN_PORT_MAX + 1] = {
 307	[WWAN_PORT_AT] = {
 308		.name = "AT",
 309		.devsuf = "at",
 310	},
 311	[WWAN_PORT_MBIM] = {
 312		.name = "MBIM",
 313		.devsuf = "mbim",
 314	},
 315	[WWAN_PORT_QMI] = {
 316		.name = "QMI",
 317		.devsuf = "qmi",
 318	},
 319	[WWAN_PORT_QCDM] = {
 320		.name = "QCDM",
 321		.devsuf = "qcdm",
 322	},
 323	[WWAN_PORT_FIREHOSE] = {
 324		.name = "FIREHOSE",
 325		.devsuf = "firehose",
 326	},
 327	[WWAN_PORT_XMMRPC] = {
 328		.name = "XMMRPC",
 329		.devsuf = "xmmrpc",
 330	},
 331};
 332
 333static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 334			 char *buf)
 335{
 336	struct wwan_port *port = to_wwan_port(dev);
 337
 338	return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
 339}
 340static DEVICE_ATTR_RO(type);
 341
 342static struct attribute *wwan_port_attrs[] = {
 343	&dev_attr_type.attr,
 344	NULL,
 345};
 346ATTRIBUTE_GROUPS(wwan_port);
 347
 348static void wwan_port_destroy(struct device *dev)
 349{
 350	struct wwan_port *port = to_wwan_port(dev);
 351
 352	ida_free(&minors, MINOR(port->dev.devt));
 353	mutex_destroy(&port->data_lock);
 354	mutex_destroy(&port->ops_lock);
 355	kfree(port);
 356}
 357
 358static const struct device_type wwan_port_dev_type = {
 359	.name = "wwan_port",
 360	.release = wwan_port_destroy,
 361	.groups = wwan_port_groups,
 362};
 363
 364static int wwan_port_minor_match(struct device *dev, const void *minor)
 365{
 366	return (dev->type == &wwan_port_dev_type &&
 367		MINOR(dev->devt) == *(unsigned int *)minor);
 368}
 369
 370static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
 371{
 372	struct device *dev;
 373
 374	dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
 375	if (!dev)
 376		return ERR_PTR(-ENODEV);
 377
 378	return to_wwan_port(dev);
 379}
 380
 381/* Allocate and set unique name based on passed format
 382 *
 383 * Name allocation approach is highly inspired by the __dev_alloc_name()
 384 * function.
 385 *
 386 * To avoid names collision, the caller must prevent the new port device
 387 * registration as well as concurrent invocation of this function.
 388 */
 389static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
 390{
 391	struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
 392	const unsigned int max_ports = PAGE_SIZE * 8;
 393	struct class_dev_iter iter;
 394	unsigned long *idmap;
 395	struct device *dev;
 396	char buf[0x20];
 397	int id;
 398
 399	idmap = bitmap_zalloc(max_ports, GFP_KERNEL);
 400	if (!idmap)
 401		return -ENOMEM;
 402
 403	/* Collect ids of same name format ports */
 404	class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
 405	while ((dev = class_dev_iter_next(&iter))) {
 406		if (dev->parent != &wwandev->dev)
 407			continue;
 408		if (sscanf(dev_name(dev), fmt, &id) != 1)
 409			continue;
 410		if (id < 0 || id >= max_ports)
 411			continue;
 412		set_bit(id, idmap);
 413	}
 414	class_dev_iter_exit(&iter);
 415
 416	/* Allocate unique id */
 417	id = find_first_zero_bit(idmap, max_ports);
 418	bitmap_free(idmap);
 419
 420	snprintf(buf, sizeof(buf), fmt, id);	/* Name generation */
 421
 422	dev = device_find_child_by_name(&wwandev->dev, buf);
 423	if (dev) {
 424		put_device(dev);
 425		return -ENFILE;
 426	}
 427
 428	return dev_set_name(&port->dev, buf);
 429}
 430
 431struct wwan_port *wwan_create_port(struct device *parent,
 432				   enum wwan_port_type type,
 433				   const struct wwan_port_ops *ops,
 434				   struct wwan_port_caps *caps,
 435				   void *drvdata)
 436{
 437	struct wwan_device *wwandev;
 438	struct wwan_port *port;
 439	char namefmt[0x20];
 440	int minor, err;
 441
 442	if (type > WWAN_PORT_MAX || !ops)
 443		return ERR_PTR(-EINVAL);
 444
 445	/* A port is always a child of a WWAN device, retrieve (allocate or
 446	 * pick) the WWAN device based on the provided parent device.
 447	 */
 448	wwandev = wwan_create_dev(parent);
 449	if (IS_ERR(wwandev))
 450		return ERR_CAST(wwandev);
 451
 452	/* A port is exposed as character device, get a minor */
 453	minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
 454	if (minor < 0) {
 455		err = minor;
 456		goto error_wwandev_remove;
 457	}
 458
 459	port = kzalloc(sizeof(*port), GFP_KERNEL);
 460	if (!port) {
 461		err = -ENOMEM;
 462		ida_free(&minors, minor);
 463		goto error_wwandev_remove;
 464	}
 465
 466	port->type = type;
 467	port->ops = ops;
 468	port->frag_len = caps ? caps->frag_len : SIZE_MAX;
 469	port->headroom_len = caps ? caps->headroom_len : 0;
 470	mutex_init(&port->ops_lock);
 471	skb_queue_head_init(&port->rxq);
 472	init_waitqueue_head(&port->waitqueue);
 473	mutex_init(&port->data_lock);
 474
 475	port->dev.parent = &wwandev->dev;
 476	port->dev.class = wwan_class;
 477	port->dev.type = &wwan_port_dev_type;
 478	port->dev.devt = MKDEV(wwan_major, minor);
 479	dev_set_drvdata(&port->dev, drvdata);
 480
 481	/* allocate unique name based on wwan device id, port type and number */
 482	snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
 483		 wwan_port_types[port->type].devsuf);
 484
 485	/* Serialize ports registration */
 486	mutex_lock(&wwan_register_lock);
 487
 488	__wwan_port_dev_assign_name(port, namefmt);
 489	err = device_register(&port->dev);
 490
 491	mutex_unlock(&wwan_register_lock);
 492
 493	if (err)
 494		goto error_put_device;
 495
 496	dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
 497	return port;
 498
 499error_put_device:
 500	put_device(&port->dev);
 501error_wwandev_remove:
 502	wwan_remove_dev(wwandev);
 503
 504	return ERR_PTR(err);
 505}
 506EXPORT_SYMBOL_GPL(wwan_create_port);
 507
 508void wwan_remove_port(struct wwan_port *port)
 509{
 510	struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
 511
 512	mutex_lock(&port->ops_lock);
 513	if (port->start_count)
 514		port->ops->stop(port);
 515	port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
 516	mutex_unlock(&port->ops_lock);
 517
 518	wake_up_interruptible(&port->waitqueue);
 519
 520	skb_queue_purge(&port->rxq);
 521	dev_set_drvdata(&port->dev, NULL);
 522
 523	dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
 524	device_unregister(&port->dev);
 525
 526	/* Release related wwan device */
 527	wwan_remove_dev(wwandev);
 528}
 529EXPORT_SYMBOL_GPL(wwan_remove_port);
 530
 531void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
 532{
 533	skb_queue_tail(&port->rxq, skb);
 534	wake_up_interruptible(&port->waitqueue);
 535}
 536EXPORT_SYMBOL_GPL(wwan_port_rx);
 537
 538void wwan_port_txon(struct wwan_port *port)
 539{
 540	clear_bit(WWAN_PORT_TX_OFF, &port->flags);
 541	wake_up_interruptible(&port->waitqueue);
 542}
 543EXPORT_SYMBOL_GPL(wwan_port_txon);
 544
 545void wwan_port_txoff(struct wwan_port *port)
 546{
 547	set_bit(WWAN_PORT_TX_OFF, &port->flags);
 548}
 549EXPORT_SYMBOL_GPL(wwan_port_txoff);
 550
 551void *wwan_port_get_drvdata(struct wwan_port *port)
 552{
 553	return dev_get_drvdata(&port->dev);
 554}
 555EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
 556
 557static int wwan_port_op_start(struct wwan_port *port)
 558{
 559	int ret = 0;
 560
 561	mutex_lock(&port->ops_lock);
 562	if (!port->ops) { /* Port got unplugged */
 563		ret = -ENODEV;
 564		goto out_unlock;
 565	}
 566
 567	/* If port is already started, don't start again */
 568	if (!port->start_count)
 569		ret = port->ops->start(port);
 570
 571	if (!ret)
 572		port->start_count++;
 573
 574out_unlock:
 575	mutex_unlock(&port->ops_lock);
 576
 577	return ret;
 578}
 579
 580static void wwan_port_op_stop(struct wwan_port *port)
 581{
 582	mutex_lock(&port->ops_lock);
 583	port->start_count--;
 584	if (!port->start_count) {
 585		if (port->ops)
 586			port->ops->stop(port);
 587		skb_queue_purge(&port->rxq);
 588	}
 589	mutex_unlock(&port->ops_lock);
 590}
 591
 592static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
 593			   bool nonblock)
 594{
 595	int ret;
 596
 597	mutex_lock(&port->ops_lock);
 598	if (!port->ops) { /* Port got unplugged */
 599		ret = -ENODEV;
 600		goto out_unlock;
 601	}
 602
 603	if (nonblock || !port->ops->tx_blocking)
 604		ret = port->ops->tx(port, skb);
 605	else
 606		ret = port->ops->tx_blocking(port, skb);
 607
 608out_unlock:
 609	mutex_unlock(&port->ops_lock);
 610
 611	return ret;
 612}
 613
 614static bool is_read_blocked(struct wwan_port *port)
 615{
 616	return skb_queue_empty(&port->rxq) && port->ops;
 617}
 618
 619static bool is_write_blocked(struct wwan_port *port)
 620{
 621	return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
 622}
 623
 624static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
 625{
 626	if (!is_read_blocked(port))
 627		return 0;
 628
 629	if (nonblock)
 630		return -EAGAIN;
 631
 632	if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
 633		return -ERESTARTSYS;
 634
 635	return 0;
 636}
 637
 638static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
 639{
 640	if (!is_write_blocked(port))
 641		return 0;
 642
 643	if (nonblock)
 644		return -EAGAIN;
 645
 646	if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
 647		return -ERESTARTSYS;
 648
 649	return 0;
 650}
 651
 652static int wwan_port_fops_open(struct inode *inode, struct file *file)
 653{
 654	struct wwan_port *port;
 655	int err = 0;
 656
 657	port = wwan_port_get_by_minor(iminor(inode));
 658	if (IS_ERR(port))
 659		return PTR_ERR(port);
 660
 661	file->private_data = port;
 662	stream_open(inode, file);
 663
 664	err = wwan_port_op_start(port);
 665	if (err)
 666		put_device(&port->dev);
 667
 668	return err;
 669}
 670
 671static int wwan_port_fops_release(struct inode *inode, struct file *filp)
 672{
 673	struct wwan_port *port = filp->private_data;
 674
 675	wwan_port_op_stop(port);
 676	put_device(&port->dev);
 677
 678	return 0;
 679}
 680
 681static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
 682				   size_t count, loff_t *ppos)
 683{
 684	struct wwan_port *port = filp->private_data;
 685	struct sk_buff *skb;
 686	size_t copied;
 687	int ret;
 688
 689	ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
 690	if (ret)
 691		return ret;
 692
 693	skb = skb_dequeue(&port->rxq);
 694	if (!skb)
 695		return -EIO;
 696
 697	copied = min_t(size_t, count, skb->len);
 698	if (copy_to_user(buf, skb->data, copied)) {
 699		kfree_skb(skb);
 700		return -EFAULT;
 701	}
 702	skb_pull(skb, copied);
 703
 704	/* skb is not fully consumed, keep it in the queue */
 705	if (skb->len)
 706		skb_queue_head(&port->rxq, skb);
 707	else
 708		consume_skb(skb);
 709
 710	return copied;
 711}
 712
 713static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
 714				    size_t count, loff_t *offp)
 715{
 716	struct sk_buff *skb, *head = NULL, *tail = NULL;
 717	struct wwan_port *port = filp->private_data;
 718	size_t frag_len, remain = count;
 719	int ret;
 720
 721	ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
 722	if (ret)
 723		return ret;
 724
 725	do {
 726		frag_len = min(remain, port->frag_len);
 727		skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
 728		if (!skb) {
 729			ret = -ENOMEM;
 730			goto freeskb;
 731		}
 732		skb_reserve(skb, port->headroom_len);
 733
 734		if (!head) {
 735			head = skb;
 736		} else if (!tail) {
 737			skb_shinfo(head)->frag_list = skb;
 738			tail = skb;
 739		} else {
 740			tail->next = skb;
 741			tail = skb;
 742		}
 743
 744		if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) {
 745			ret = -EFAULT;
 746			goto freeskb;
 747		}
 748
 749		if (skb != head) {
 750			head->data_len += skb->len;
 751			head->len += skb->len;
 752			head->truesize += skb->truesize;
 753		}
 754	} while (remain -= frag_len);
 755
 756	ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK));
 757	if (!ret)
 758		return count;
 
 
 759
 760freeskb:
 761	kfree_skb(head);
 762	return ret;
 763}
 764
 765static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
 766{
 767	struct wwan_port *port = filp->private_data;
 768	__poll_t mask = 0;
 769
 770	poll_wait(filp, &port->waitqueue, wait);
 771
 772	mutex_lock(&port->ops_lock);
 773	if (port->ops && port->ops->tx_poll)
 774		mask |= port->ops->tx_poll(port, filp, wait);
 775	else if (!is_write_blocked(port))
 776		mask |= EPOLLOUT | EPOLLWRNORM;
 777	if (!is_read_blocked(port))
 778		mask |= EPOLLIN | EPOLLRDNORM;
 779	if (!port->ops)
 780		mask |= EPOLLHUP | EPOLLERR;
 781	mutex_unlock(&port->ops_lock);
 782
 783	return mask;
 784}
 785
 786/* Implements minimalistic stub terminal IOCTLs support */
 787static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
 788				    unsigned long arg)
 789{
 790	int ret = 0;
 791
 792	mutex_lock(&port->data_lock);
 793
 794	switch (cmd) {
 795	case TCFLSH:
 796		break;
 797
 798	case TCGETS:
 799		if (copy_to_user((void __user *)arg, &port->at_data.termios,
 800				 sizeof(struct termios)))
 801			ret = -EFAULT;
 802		break;
 803
 804	case TCSETS:
 805	case TCSETSW:
 806	case TCSETSF:
 807		if (copy_from_user(&port->at_data.termios, (void __user *)arg,
 808				   sizeof(struct termios)))
 809			ret = -EFAULT;
 810		break;
 811
 812#ifdef TCGETS2
 813	case TCGETS2:
 814		if (copy_to_user((void __user *)arg, &port->at_data.termios,
 815				 sizeof(struct termios2)))
 816			ret = -EFAULT;
 817		break;
 818
 819	case TCSETS2:
 820	case TCSETSW2:
 821	case TCSETSF2:
 822		if (copy_from_user(&port->at_data.termios, (void __user *)arg,
 823				   sizeof(struct termios2)))
 824			ret = -EFAULT;
 825		break;
 826#endif
 827
 828	case TIOCMGET:
 829		ret = put_user(port->at_data.mdmbits, (int __user *)arg);
 830		break;
 831
 832	case TIOCMSET:
 833	case TIOCMBIC:
 834	case TIOCMBIS: {
 835		int mdmbits;
 836
 837		if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
 838			ret = -EFAULT;
 839			break;
 840		}
 841		if (cmd == TIOCMBIC)
 842			port->at_data.mdmbits &= ~mdmbits;
 843		else if (cmd == TIOCMBIS)
 844			port->at_data.mdmbits |= mdmbits;
 845		else
 846			port->at_data.mdmbits = mdmbits;
 847		break;
 848	}
 849
 850	default:
 851		ret = -ENOIOCTLCMD;
 852	}
 853
 854	mutex_unlock(&port->data_lock);
 855
 856	return ret;
 857}
 858
 859static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
 860				 unsigned long arg)
 861{
 862	struct wwan_port *port = filp->private_data;
 863	int res;
 864
 865	if (port->type == WWAN_PORT_AT) {	/* AT port specific IOCTLs */
 866		res = wwan_port_fops_at_ioctl(port, cmd, arg);
 867		if (res != -ENOIOCTLCMD)
 868			return res;
 869	}
 870
 871	switch (cmd) {
 872	case TIOCINQ: {	/* aka SIOCINQ aka FIONREAD */
 873		unsigned long flags;
 874		struct sk_buff *skb;
 875		int amount = 0;
 876
 877		spin_lock_irqsave(&port->rxq.lock, flags);
 878		skb_queue_walk(&port->rxq, skb)
 879			amount += skb->len;
 880		spin_unlock_irqrestore(&port->rxq.lock, flags);
 881
 882		return put_user(amount, (int __user *)arg);
 883	}
 884
 885	default:
 886		return -ENOIOCTLCMD;
 887	}
 888}
 889
 890static const struct file_operations wwan_port_fops = {
 891	.owner = THIS_MODULE,
 892	.open = wwan_port_fops_open,
 893	.release = wwan_port_fops_release,
 894	.read = wwan_port_fops_read,
 895	.write = wwan_port_fops_write,
 896	.poll = wwan_port_fops_poll,
 897	.unlocked_ioctl = wwan_port_fops_ioctl,
 898#ifdef CONFIG_COMPAT
 899	.compat_ioctl = compat_ptr_ioctl,
 900#endif
 901	.llseek = noop_llseek,
 902};
 903
 904static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
 905			      struct netlink_ext_ack *extack)
 906{
 907	if (!data)
 908		return -EINVAL;
 909
 910	if (!tb[IFLA_PARENT_DEV_NAME])
 911		return -EINVAL;
 912
 913	if (!data[IFLA_WWAN_LINK_ID])
 914		return -EINVAL;
 915
 916	return 0;
 917}
 918
 919static struct device_type wwan_type = { .name = "wwan" };
 920
 921static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
 922					  const char *ifname,
 923					  unsigned char name_assign_type,
 924					  unsigned int num_tx_queues,
 925					  unsigned int num_rx_queues)
 926{
 927	const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
 928	struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
 929	struct net_device *dev;
 930	unsigned int priv_size;
 931
 932	if (IS_ERR(wwandev))
 933		return ERR_CAST(wwandev);
 934
 935	/* only supported if ops were registered (not just ports) */
 936	if (!wwandev->ops) {
 937		dev = ERR_PTR(-EOPNOTSUPP);
 938		goto out;
 939	}
 940
 941	priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
 942	dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
 943			       wwandev->ops->setup, num_tx_queues, num_rx_queues);
 944
 945	if (dev) {
 946		SET_NETDEV_DEV(dev, &wwandev->dev);
 947		SET_NETDEV_DEVTYPE(dev, &wwan_type);
 948	}
 949
 950out:
 951	/* release the reference */
 952	put_device(&wwandev->dev);
 953	return dev;
 954}
 955
 956static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
 957			     struct nlattr *tb[], struct nlattr *data[],
 958			     struct netlink_ext_ack *extack)
 959{
 960	struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
 961	u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
 962	struct wwan_netdev_priv *priv = netdev_priv(dev);
 963	int ret;
 964
 965	if (IS_ERR(wwandev))
 966		return PTR_ERR(wwandev);
 967
 968	/* shouldn't have a netdev (left) with us as parent so WARN */
 969	if (WARN_ON(!wwandev->ops)) {
 970		ret = -EOPNOTSUPP;
 971		goto out;
 972	}
 973
 974	priv->link_id = link_id;
 975	if (wwandev->ops->newlink)
 976		ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
 977					    link_id, extack);
 978	else
 979		ret = register_netdevice(dev);
 980
 981out:
 982	/* release the reference */
 983	put_device(&wwandev->dev);
 984	return ret;
 985}
 986
 987static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
 988{
 989	struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
 990
 991	if (IS_ERR(wwandev))
 992		return;
 993
 994	/* shouldn't have a netdev (left) with us as parent so WARN */
 995	if (WARN_ON(!wwandev->ops))
 996		goto out;
 997
 998	if (wwandev->ops->dellink)
 999		wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
1000	else
1001		unregister_netdevice_queue(dev, head);
1002
1003out:
1004	/* release the reference */
1005	put_device(&wwandev->dev);
1006}
1007
1008static size_t wwan_rtnl_get_size(const struct net_device *dev)
1009{
1010	return
1011		nla_total_size(4) +	/* IFLA_WWAN_LINK_ID */
1012		0;
1013}
1014
1015static int wwan_rtnl_fill_info(struct sk_buff *skb,
1016			       const struct net_device *dev)
1017{
1018	struct wwan_netdev_priv *priv = netdev_priv(dev);
1019
1020	if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
1021		goto nla_put_failure;
1022
1023	return 0;
1024
1025nla_put_failure:
1026	return -EMSGSIZE;
1027}
1028
1029static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
1030	[IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
1031};
1032
1033static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
1034	.kind = "wwan",
1035	.maxtype = __IFLA_WWAN_MAX,
1036	.alloc = wwan_rtnl_alloc,
1037	.validate = wwan_rtnl_validate,
1038	.newlink = wwan_rtnl_newlink,
1039	.dellink = wwan_rtnl_dellink,
1040	.get_size = wwan_rtnl_get_size,
1041	.fill_info = wwan_rtnl_fill_info,
1042	.policy = wwan_rtnl_policy,
1043};
1044
1045static void wwan_create_default_link(struct wwan_device *wwandev,
1046				     u32 def_link_id)
1047{
1048	struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
1049	struct nlattr *data[IFLA_WWAN_MAX + 1];
1050	struct net_device *dev;
1051	struct nlmsghdr *nlh;
1052	struct sk_buff *msg;
1053
1054	/* Forge attributes required to create a WWAN netdev. We first
1055	 * build a netlink message and then parse it. This looks
1056	 * odd, but such approach is less error prone.
1057	 */
1058	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1059	if (WARN_ON(!msg))
1060		return;
1061	nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
1062	if (WARN_ON(!nlh))
1063		goto free_attrs;
1064
1065	if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
1066		goto free_attrs;
1067	tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
1068	if (!tb[IFLA_LINKINFO])
1069		goto free_attrs;
1070	linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
1071	if (!linkinfo[IFLA_INFO_DATA])
1072		goto free_attrs;
1073	if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
1074		goto free_attrs;
1075	nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
1076	nla_nest_end(msg, tb[IFLA_LINKINFO]);
1077
1078	nlmsg_end(msg, nlh);
1079
1080	/* The next three parsing calls can not fail */
1081	nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
1082	nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
1083				    NULL, NULL);
1084	nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
1085				    linkinfo[IFLA_INFO_DATA], NULL, NULL);
1086
1087	rtnl_lock();
1088
1089	dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
1090			       &wwan_rtnl_link_ops, tb, NULL);
1091	if (WARN_ON(IS_ERR(dev)))
1092		goto unlock;
1093
1094	if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
1095		free_netdev(dev);
1096		goto unlock;
1097	}
1098
1099	rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
1100
1101unlock:
1102	rtnl_unlock();
1103
1104free_attrs:
1105	nlmsg_free(msg);
1106}
1107
1108/**
1109 * wwan_register_ops - register WWAN device ops
1110 * @parent: Device to use as parent and shared by all WWAN ports and
1111 *	created netdevs
1112 * @ops: operations to register
1113 * @ctxt: context to pass to operations
1114 * @def_link_id: id of the default link that will be automatically created by
1115 *	the WWAN core for the WWAN device. The default link will not be created
1116 *	if the passed value is WWAN_NO_DEFAULT_LINK.
1117 *
1118 * Returns: 0 on success, a negative error code on failure
1119 */
1120int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
1121		      void *ctxt, u32 def_link_id)
1122{
1123	struct wwan_device *wwandev;
1124
1125	if (WARN_ON(!parent || !ops || !ops->setup))
1126		return -EINVAL;
1127
1128	wwandev = wwan_create_dev(parent);
1129	if (IS_ERR(wwandev))
1130		return PTR_ERR(wwandev);
1131
1132	if (WARN_ON(wwandev->ops)) {
1133		wwan_remove_dev(wwandev);
1134		return -EBUSY;
1135	}
1136
1137	wwandev->ops = ops;
1138	wwandev->ops_ctxt = ctxt;
1139
1140	/* NB: we do not abort ops registration in case of default link
1141	 * creation failure. Link ops is the management interface, while the
1142	 * default link creation is a service option. And we should not prevent
1143	 * a user from manually creating a link latter if service option failed
1144	 * now.
1145	 */
1146	if (def_link_id != WWAN_NO_DEFAULT_LINK)
1147		wwan_create_default_link(wwandev, def_link_id);
1148
1149	return 0;
1150}
1151EXPORT_SYMBOL_GPL(wwan_register_ops);
1152
1153/* Enqueue child netdev deletion */
1154static int wwan_child_dellink(struct device *dev, void *data)
1155{
1156	struct list_head *kill_list = data;
1157
1158	if (dev->type == &wwan_type)
1159		wwan_rtnl_dellink(to_net_dev(dev), kill_list);
1160
1161	return 0;
1162}
1163
1164/**
1165 * wwan_unregister_ops - remove WWAN device ops
1166 * @parent: Device to use as parent and shared by all WWAN ports and
1167 *	created netdevs
1168 */
1169void wwan_unregister_ops(struct device *parent)
1170{
1171	struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
1172	LIST_HEAD(kill_list);
1173
1174	if (WARN_ON(IS_ERR(wwandev)))
1175		return;
1176	if (WARN_ON(!wwandev->ops)) {
1177		put_device(&wwandev->dev);
1178		return;
1179	}
1180
1181	/* put the reference obtained by wwan_dev_get_by_parent(),
1182	 * we should still have one (that the owner is giving back
1183	 * now) due to the ops being assigned.
1184	 */
1185	put_device(&wwandev->dev);
1186
1187	rtnl_lock();	/* Prevent concurrent netdev(s) creation/destroying */
1188
1189	/* Remove all child netdev(s), using batch removing */
1190	device_for_each_child(&wwandev->dev, &kill_list,
1191			      wwan_child_dellink);
1192	unregister_netdevice_many(&kill_list);
1193
1194	wwandev->ops = NULL;	/* Finally remove ops */
1195
1196	rtnl_unlock();
1197
1198	wwandev->ops_ctxt = NULL;
1199	wwan_remove_dev(wwandev);
1200}
1201EXPORT_SYMBOL_GPL(wwan_unregister_ops);
1202
1203static int __init wwan_init(void)
1204{
1205	int err;
1206
1207	err = rtnl_link_register(&wwan_rtnl_link_ops);
1208	if (err)
1209		return err;
1210
1211	wwan_class = class_create("wwan");
1212	if (IS_ERR(wwan_class)) {
1213		err = PTR_ERR(wwan_class);
1214		goto unregister;
1215	}
1216
1217	/* chrdev used for wwan ports */
1218	wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
1219				       &wwan_port_fops);
1220	if (wwan_major < 0) {
1221		err = wwan_major;
1222		goto destroy;
1223	}
1224
1225#ifdef CONFIG_WWAN_DEBUGFS
1226	wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
1227#endif
1228
1229	return 0;
1230
1231destroy:
1232	class_destroy(wwan_class);
1233unregister:
1234	rtnl_link_unregister(&wwan_rtnl_link_ops);
1235	return err;
1236}
1237
1238static void __exit wwan_exit(void)
1239{
1240	debugfs_remove_recursive(wwan_debugfs_dir);
1241	__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
1242	rtnl_link_unregister(&wwan_rtnl_link_ops);
1243	class_destroy(wwan_class);
1244}
1245
1246module_init(wwan_init);
1247module_exit(wwan_exit);
1248
1249MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1250MODULE_DESCRIPTION("WWAN core");
1251MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
   3
 
   4#include <linux/err.h>
   5#include <linux/errno.h>
   6#include <linux/debugfs.h>
   7#include <linux/fs.h>
   8#include <linux/init.h>
   9#include <linux/idr.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/poll.h>
  13#include <linux/skbuff.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/uaccess.h>
  17#include <linux/termios.h>
  18#include <linux/wwan.h>
  19#include <net/rtnetlink.h>
  20#include <uapi/linux/wwan.h>
  21
  22/* Maximum number of minors in use */
  23#define WWAN_MAX_MINORS		(1 << MINORBITS)
  24
  25static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
  26static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
  27static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
  28static struct class *wwan_class;
  29static int wwan_major;
  30static struct dentry *wwan_debugfs_dir;
  31
  32#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
  33#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
  34
  35/* WWAN port flags */
  36#define WWAN_PORT_TX_OFF	0
  37
  38/**
  39 * struct wwan_device - The structure that defines a WWAN device
  40 *
  41 * @id: WWAN device unique ID.
  42 * @dev: Underlying device.
  43 * @port_id: Current available port ID to pick.
  44 * @ops: wwan device ops
  45 * @ops_ctxt: context to pass to ops
  46 * @debugfs_dir:  WWAN device debugfs dir
  47 */
  48struct wwan_device {
  49	unsigned int id;
  50	struct device dev;
  51	atomic_t port_id;
  52	const struct wwan_ops *ops;
  53	void *ops_ctxt;
  54#ifdef CONFIG_WWAN_DEBUGFS
  55	struct dentry *debugfs_dir;
  56#endif
  57};
  58
  59/**
  60 * struct wwan_port - The structure that defines a WWAN port
  61 * @type: Port type
  62 * @start_count: Port start counter
  63 * @flags: Store port state and capabilities
  64 * @ops: Pointer to WWAN port operations
  65 * @ops_lock: Protect port ops
  66 * @dev: Underlying device
  67 * @rxq: Buffer inbound queue
  68 * @waitqueue: The waitqueue for port fops (read/write/poll)
  69 * @data_lock: Port specific data access serialization
 
 
  70 * @at_data: AT port specific data
  71 */
  72struct wwan_port {
  73	enum wwan_port_type type;
  74	unsigned int start_count;
  75	unsigned long flags;
  76	const struct wwan_port_ops *ops;
  77	struct mutex ops_lock; /* Serialize ops + protect against removal */
  78	struct device dev;
  79	struct sk_buff_head rxq;
  80	wait_queue_head_t waitqueue;
  81	struct mutex data_lock;	/* Port specific data access serialization */
 
 
  82	union {
  83		struct {
  84			struct ktermios termios;
  85			int mdmbits;
  86		} at_data;
  87	};
  88};
  89
  90static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
  91{
  92	struct wwan_device *wwan = to_wwan_dev(dev);
  93
  94	return sprintf(buf, "%d\n", wwan->id);
  95}
  96static DEVICE_ATTR_RO(index);
  97
  98static struct attribute *wwan_dev_attrs[] = {
  99	&dev_attr_index.attr,
 100	NULL,
 101};
 102ATTRIBUTE_GROUPS(wwan_dev);
 103
 104static void wwan_dev_destroy(struct device *dev)
 105{
 106	struct wwan_device *wwandev = to_wwan_dev(dev);
 107
 108	ida_free(&wwan_dev_ids, wwandev->id);
 109	kfree(wwandev);
 110}
 111
 112static const struct device_type wwan_dev_type = {
 113	.name    = "wwan_dev",
 114	.release = wwan_dev_destroy,
 115	.groups = wwan_dev_groups,
 116};
 117
 118static int wwan_dev_parent_match(struct device *dev, const void *parent)
 119{
 120	return (dev->type == &wwan_dev_type &&
 121		(dev->parent == parent || dev == parent));
 122}
 123
 124static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
 125{
 126	struct device *dev;
 127
 128	dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
 129	if (!dev)
 130		return ERR_PTR(-ENODEV);
 131
 132	return to_wwan_dev(dev);
 133}
 134
 135static int wwan_dev_name_match(struct device *dev, const void *name)
 136{
 137	return dev->type == &wwan_dev_type &&
 138	       strcmp(dev_name(dev), name) == 0;
 139}
 140
 141static struct wwan_device *wwan_dev_get_by_name(const char *name)
 142{
 143	struct device *dev;
 144
 145	dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
 146	if (!dev)
 147		return ERR_PTR(-ENODEV);
 148
 149	return to_wwan_dev(dev);
 150}
 151
 152#ifdef CONFIG_WWAN_DEBUGFS
 153struct dentry *wwan_get_debugfs_dir(struct device *parent)
 154{
 155	struct wwan_device *wwandev;
 156
 157	wwandev = wwan_dev_get_by_parent(parent);
 158	if (IS_ERR(wwandev))
 159		return ERR_CAST(wwandev);
 160
 161	return wwandev->debugfs_dir;
 162}
 163EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
 164
 165static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
 166{
 167	struct wwan_device *wwandev;
 168
 169	if (dev->type != &wwan_dev_type)
 170		return 0;
 171
 172	wwandev = to_wwan_dev(dev);
 173
 174	return wwandev->debugfs_dir == dir;
 175}
 176
 177static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
 178{
 179	struct device *dev;
 180
 181	dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
 182	if (!dev)
 183		return ERR_PTR(-ENODEV);
 184
 185	return to_wwan_dev(dev);
 186}
 187
 188void wwan_put_debugfs_dir(struct dentry *dir)
 189{
 190	struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
 191
 192	if (WARN_ON(IS_ERR(wwandev)))
 193		return;
 194
 195	/* wwan_dev_get_by_debugfs() also got a reference */
 196	put_device(&wwandev->dev);
 197	put_device(&wwandev->dev);
 198}
 199EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
 200#endif
 201
 202/* This function allocates and registers a new WWAN device OR if a WWAN device
 203 * already exist for the given parent, it gets a reference and return it.
 204 * This function is not exported (for now), it is called indirectly via
 205 * wwan_create_port().
 206 */
 207static struct wwan_device *wwan_create_dev(struct device *parent)
 208{
 209	struct wwan_device *wwandev;
 210	int err, id;
 211
 212	/* The 'find-alloc-register' operation must be protected against
 213	 * concurrent execution, a WWAN device is possibly shared between
 214	 * multiple callers or concurrently unregistered from wwan_remove_dev().
 215	 */
 216	mutex_lock(&wwan_register_lock);
 217
 218	/* If wwandev already exists, return it */
 219	wwandev = wwan_dev_get_by_parent(parent);
 220	if (!IS_ERR(wwandev))
 221		goto done_unlock;
 222
 223	id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
 224	if (id < 0) {
 225		wwandev = ERR_PTR(id);
 226		goto done_unlock;
 227	}
 228
 229	wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
 230	if (!wwandev) {
 231		wwandev = ERR_PTR(-ENOMEM);
 232		ida_free(&wwan_dev_ids, id);
 233		goto done_unlock;
 234	}
 235
 236	wwandev->dev.parent = parent;
 237	wwandev->dev.class = wwan_class;
 238	wwandev->dev.type = &wwan_dev_type;
 239	wwandev->id = id;
 240	dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
 241
 242	err = device_register(&wwandev->dev);
 243	if (err) {
 244		put_device(&wwandev->dev);
 245		wwandev = ERR_PTR(err);
 246		goto done_unlock;
 247	}
 248
 249#ifdef CONFIG_WWAN_DEBUGFS
 250	wwandev->debugfs_dir =
 251			debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
 252					   wwan_debugfs_dir);
 253#endif
 254
 255done_unlock:
 256	mutex_unlock(&wwan_register_lock);
 257
 258	return wwandev;
 259}
 260
 261static int is_wwan_child(struct device *dev, void *data)
 262{
 263	return dev->class == wwan_class;
 264}
 265
 266static void wwan_remove_dev(struct wwan_device *wwandev)
 267{
 268	int ret;
 269
 270	/* Prevent concurrent picking from wwan_create_dev */
 271	mutex_lock(&wwan_register_lock);
 272
 273	/* WWAN device is created and registered (get+add) along with its first
 274	 * child port, and subsequent port registrations only grab a reference
 275	 * (get). The WWAN device must then be unregistered (del+put) along with
 276	 * its last port, and reference simply dropped (put) otherwise. In the
 277	 * same fashion, we must not unregister it when the ops are still there.
 278	 */
 279	if (wwandev->ops)
 280		ret = 1;
 281	else
 282		ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
 283
 284	if (!ret) {
 285#ifdef CONFIG_WWAN_DEBUGFS
 286		debugfs_remove_recursive(wwandev->debugfs_dir);
 287#endif
 288		device_unregister(&wwandev->dev);
 289	} else {
 290		put_device(&wwandev->dev);
 291	}
 292
 293	mutex_unlock(&wwan_register_lock);
 294}
 295
 296/* ------- WWAN port management ------- */
 297
 298static const struct {
 299	const char * const name;	/* Port type name */
 300	const char * const devsuf;	/* Port devce name suffix */
 301} wwan_port_types[WWAN_PORT_MAX + 1] = {
 302	[WWAN_PORT_AT] = {
 303		.name = "AT",
 304		.devsuf = "at",
 305	},
 306	[WWAN_PORT_MBIM] = {
 307		.name = "MBIM",
 308		.devsuf = "mbim",
 309	},
 310	[WWAN_PORT_QMI] = {
 311		.name = "QMI",
 312		.devsuf = "qmi",
 313	},
 314	[WWAN_PORT_QCDM] = {
 315		.name = "QCDM",
 316		.devsuf = "qcdm",
 317	},
 318	[WWAN_PORT_FIREHOSE] = {
 319		.name = "FIREHOSE",
 320		.devsuf = "firehose",
 321	},
 322	[WWAN_PORT_XMMRPC] = {
 323		.name = "XMMRPC",
 324		.devsuf = "xmmrpc",
 325	},
 326};
 327
 328static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 329			 char *buf)
 330{
 331	struct wwan_port *port = to_wwan_port(dev);
 332
 333	return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
 334}
 335static DEVICE_ATTR_RO(type);
 336
 337static struct attribute *wwan_port_attrs[] = {
 338	&dev_attr_type.attr,
 339	NULL,
 340};
 341ATTRIBUTE_GROUPS(wwan_port);
 342
 343static void wwan_port_destroy(struct device *dev)
 344{
 345	struct wwan_port *port = to_wwan_port(dev);
 346
 347	ida_free(&minors, MINOR(port->dev.devt));
 348	mutex_destroy(&port->data_lock);
 349	mutex_destroy(&port->ops_lock);
 350	kfree(port);
 351}
 352
 353static const struct device_type wwan_port_dev_type = {
 354	.name = "wwan_port",
 355	.release = wwan_port_destroy,
 356	.groups = wwan_port_groups,
 357};
 358
 359static int wwan_port_minor_match(struct device *dev, const void *minor)
 360{
 361	return (dev->type == &wwan_port_dev_type &&
 362		MINOR(dev->devt) == *(unsigned int *)minor);
 363}
 364
 365static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
 366{
 367	struct device *dev;
 368
 369	dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
 370	if (!dev)
 371		return ERR_PTR(-ENODEV);
 372
 373	return to_wwan_port(dev);
 374}
 375
 376/* Allocate and set unique name based on passed format
 377 *
 378 * Name allocation approach is highly inspired by the __dev_alloc_name()
 379 * function.
 380 *
 381 * To avoid names collision, the caller must prevent the new port device
 382 * registration as well as concurrent invocation of this function.
 383 */
 384static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
 385{
 386	struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
 387	const unsigned int max_ports = PAGE_SIZE * 8;
 388	struct class_dev_iter iter;
 389	unsigned long *idmap;
 390	struct device *dev;
 391	char buf[0x20];
 392	int id;
 393
 394	idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
 395	if (!idmap)
 396		return -ENOMEM;
 397
 398	/* Collect ids of same name format ports */
 399	class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
 400	while ((dev = class_dev_iter_next(&iter))) {
 401		if (dev->parent != &wwandev->dev)
 402			continue;
 403		if (sscanf(dev_name(dev), fmt, &id) != 1)
 404			continue;
 405		if (id < 0 || id >= max_ports)
 406			continue;
 407		set_bit(id, idmap);
 408	}
 409	class_dev_iter_exit(&iter);
 410
 411	/* Allocate unique id */
 412	id = find_first_zero_bit(idmap, max_ports);
 413	free_page((unsigned long)idmap);
 414
 415	snprintf(buf, sizeof(buf), fmt, id);	/* Name generation */
 416
 417	dev = device_find_child_by_name(&wwandev->dev, buf);
 418	if (dev) {
 419		put_device(dev);
 420		return -ENFILE;
 421	}
 422
 423	return dev_set_name(&port->dev, buf);
 424}
 425
 426struct wwan_port *wwan_create_port(struct device *parent,
 427				   enum wwan_port_type type,
 428				   const struct wwan_port_ops *ops,
 
 429				   void *drvdata)
 430{
 431	struct wwan_device *wwandev;
 432	struct wwan_port *port;
 433	char namefmt[0x20];
 434	int minor, err;
 435
 436	if (type > WWAN_PORT_MAX || !ops)
 437		return ERR_PTR(-EINVAL);
 438
 439	/* A port is always a child of a WWAN device, retrieve (allocate or
 440	 * pick) the WWAN device based on the provided parent device.
 441	 */
 442	wwandev = wwan_create_dev(parent);
 443	if (IS_ERR(wwandev))
 444		return ERR_CAST(wwandev);
 445
 446	/* A port is exposed as character device, get a minor */
 447	minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
 448	if (minor < 0) {
 449		err = minor;
 450		goto error_wwandev_remove;
 451	}
 452
 453	port = kzalloc(sizeof(*port), GFP_KERNEL);
 454	if (!port) {
 455		err = -ENOMEM;
 456		ida_free(&minors, minor);
 457		goto error_wwandev_remove;
 458	}
 459
 460	port->type = type;
 461	port->ops = ops;
 
 
 462	mutex_init(&port->ops_lock);
 463	skb_queue_head_init(&port->rxq);
 464	init_waitqueue_head(&port->waitqueue);
 465	mutex_init(&port->data_lock);
 466
 467	port->dev.parent = &wwandev->dev;
 468	port->dev.class = wwan_class;
 469	port->dev.type = &wwan_port_dev_type;
 470	port->dev.devt = MKDEV(wwan_major, minor);
 471	dev_set_drvdata(&port->dev, drvdata);
 472
 473	/* allocate unique name based on wwan device id, port type and number */
 474	snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
 475		 wwan_port_types[port->type].devsuf);
 476
 477	/* Serialize ports registration */
 478	mutex_lock(&wwan_register_lock);
 479
 480	__wwan_port_dev_assign_name(port, namefmt);
 481	err = device_register(&port->dev);
 482
 483	mutex_unlock(&wwan_register_lock);
 484
 485	if (err)
 486		goto error_put_device;
 487
 
 488	return port;
 489
 490error_put_device:
 491	put_device(&port->dev);
 492error_wwandev_remove:
 493	wwan_remove_dev(wwandev);
 494
 495	return ERR_PTR(err);
 496}
 497EXPORT_SYMBOL_GPL(wwan_create_port);
 498
 499void wwan_remove_port(struct wwan_port *port)
 500{
 501	struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
 502
 503	mutex_lock(&port->ops_lock);
 504	if (port->start_count)
 505		port->ops->stop(port);
 506	port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
 507	mutex_unlock(&port->ops_lock);
 508
 509	wake_up_interruptible(&port->waitqueue);
 510
 511	skb_queue_purge(&port->rxq);
 512	dev_set_drvdata(&port->dev, NULL);
 
 
 513	device_unregister(&port->dev);
 514
 515	/* Release related wwan device */
 516	wwan_remove_dev(wwandev);
 517}
 518EXPORT_SYMBOL_GPL(wwan_remove_port);
 519
 520void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
 521{
 522	skb_queue_tail(&port->rxq, skb);
 523	wake_up_interruptible(&port->waitqueue);
 524}
 525EXPORT_SYMBOL_GPL(wwan_port_rx);
 526
 527void wwan_port_txon(struct wwan_port *port)
 528{
 529	clear_bit(WWAN_PORT_TX_OFF, &port->flags);
 530	wake_up_interruptible(&port->waitqueue);
 531}
 532EXPORT_SYMBOL_GPL(wwan_port_txon);
 533
 534void wwan_port_txoff(struct wwan_port *port)
 535{
 536	set_bit(WWAN_PORT_TX_OFF, &port->flags);
 537}
 538EXPORT_SYMBOL_GPL(wwan_port_txoff);
 539
 540void *wwan_port_get_drvdata(struct wwan_port *port)
 541{
 542	return dev_get_drvdata(&port->dev);
 543}
 544EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
 545
 546static int wwan_port_op_start(struct wwan_port *port)
 547{
 548	int ret = 0;
 549
 550	mutex_lock(&port->ops_lock);
 551	if (!port->ops) { /* Port got unplugged */
 552		ret = -ENODEV;
 553		goto out_unlock;
 554	}
 555
 556	/* If port is already started, don't start again */
 557	if (!port->start_count)
 558		ret = port->ops->start(port);
 559
 560	if (!ret)
 561		port->start_count++;
 562
 563out_unlock:
 564	mutex_unlock(&port->ops_lock);
 565
 566	return ret;
 567}
 568
 569static void wwan_port_op_stop(struct wwan_port *port)
 570{
 571	mutex_lock(&port->ops_lock);
 572	port->start_count--;
 573	if (!port->start_count) {
 574		if (port->ops)
 575			port->ops->stop(port);
 576		skb_queue_purge(&port->rxq);
 577	}
 578	mutex_unlock(&port->ops_lock);
 579}
 580
 581static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
 582			   bool nonblock)
 583{
 584	int ret;
 585
 586	mutex_lock(&port->ops_lock);
 587	if (!port->ops) { /* Port got unplugged */
 588		ret = -ENODEV;
 589		goto out_unlock;
 590	}
 591
 592	if (nonblock || !port->ops->tx_blocking)
 593		ret = port->ops->tx(port, skb);
 594	else
 595		ret = port->ops->tx_blocking(port, skb);
 596
 597out_unlock:
 598	mutex_unlock(&port->ops_lock);
 599
 600	return ret;
 601}
 602
 603static bool is_read_blocked(struct wwan_port *port)
 604{
 605	return skb_queue_empty(&port->rxq) && port->ops;
 606}
 607
 608static bool is_write_blocked(struct wwan_port *port)
 609{
 610	return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
 611}
 612
 613static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
 614{
 615	if (!is_read_blocked(port))
 616		return 0;
 617
 618	if (nonblock)
 619		return -EAGAIN;
 620
 621	if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
 622		return -ERESTARTSYS;
 623
 624	return 0;
 625}
 626
 627static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
 628{
 629	if (!is_write_blocked(port))
 630		return 0;
 631
 632	if (nonblock)
 633		return -EAGAIN;
 634
 635	if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
 636		return -ERESTARTSYS;
 637
 638	return 0;
 639}
 640
 641static int wwan_port_fops_open(struct inode *inode, struct file *file)
 642{
 643	struct wwan_port *port;
 644	int err = 0;
 645
 646	port = wwan_port_get_by_minor(iminor(inode));
 647	if (IS_ERR(port))
 648		return PTR_ERR(port);
 649
 650	file->private_data = port;
 651	stream_open(inode, file);
 652
 653	err = wwan_port_op_start(port);
 654	if (err)
 655		put_device(&port->dev);
 656
 657	return err;
 658}
 659
 660static int wwan_port_fops_release(struct inode *inode, struct file *filp)
 661{
 662	struct wwan_port *port = filp->private_data;
 663
 664	wwan_port_op_stop(port);
 665	put_device(&port->dev);
 666
 667	return 0;
 668}
 669
 670static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
 671				   size_t count, loff_t *ppos)
 672{
 673	struct wwan_port *port = filp->private_data;
 674	struct sk_buff *skb;
 675	size_t copied;
 676	int ret;
 677
 678	ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
 679	if (ret)
 680		return ret;
 681
 682	skb = skb_dequeue(&port->rxq);
 683	if (!skb)
 684		return -EIO;
 685
 686	copied = min_t(size_t, count, skb->len);
 687	if (copy_to_user(buf, skb->data, copied)) {
 688		kfree_skb(skb);
 689		return -EFAULT;
 690	}
 691	skb_pull(skb, copied);
 692
 693	/* skb is not fully consumed, keep it in the queue */
 694	if (skb->len)
 695		skb_queue_head(&port->rxq, skb);
 696	else
 697		consume_skb(skb);
 698
 699	return copied;
 700}
 701
 702static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
 703				    size_t count, loff_t *offp)
 704{
 
 705	struct wwan_port *port = filp->private_data;
 706	struct sk_buff *skb;
 707	int ret;
 708
 709	ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
 710	if (ret)
 711		return ret;
 712
 713	skb = alloc_skb(count, GFP_KERNEL);
 714	if (!skb)
 715		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717	if (copy_from_user(skb_put(skb, count), buf, count)) {
 718		kfree_skb(skb);
 719		return -EFAULT;
 720	}
 
 
 721
 722	ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
 723	if (ret) {
 724		kfree_skb(skb);
 725		return ret;
 726	}
 727
 728	return count;
 
 
 729}
 730
 731static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
 732{
 733	struct wwan_port *port = filp->private_data;
 734	__poll_t mask = 0;
 735
 736	poll_wait(filp, &port->waitqueue, wait);
 737
 738	mutex_lock(&port->ops_lock);
 739	if (port->ops && port->ops->tx_poll)
 740		mask |= port->ops->tx_poll(port, filp, wait);
 741	else if (!is_write_blocked(port))
 742		mask |= EPOLLOUT | EPOLLWRNORM;
 743	if (!is_read_blocked(port))
 744		mask |= EPOLLIN | EPOLLRDNORM;
 745	if (!port->ops)
 746		mask |= EPOLLHUP | EPOLLERR;
 747	mutex_unlock(&port->ops_lock);
 748
 749	return mask;
 750}
 751
 752/* Implements minimalistic stub terminal IOCTLs support */
 753static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
 754				    unsigned long arg)
 755{
 756	int ret = 0;
 757
 758	mutex_lock(&port->data_lock);
 759
 760	switch (cmd) {
 761	case TCFLSH:
 762		break;
 763
 764	case TCGETS:
 765		if (copy_to_user((void __user *)arg, &port->at_data.termios,
 766				 sizeof(struct termios)))
 767			ret = -EFAULT;
 768		break;
 769
 770	case TCSETS:
 771	case TCSETSW:
 772	case TCSETSF:
 773		if (copy_from_user(&port->at_data.termios, (void __user *)arg,
 774				   sizeof(struct termios)))
 775			ret = -EFAULT;
 776		break;
 777
 778#ifdef TCGETS2
 779	case TCGETS2:
 780		if (copy_to_user((void __user *)arg, &port->at_data.termios,
 781				 sizeof(struct termios2)))
 782			ret = -EFAULT;
 783		break;
 784
 785	case TCSETS2:
 786	case TCSETSW2:
 787	case TCSETSF2:
 788		if (copy_from_user(&port->at_data.termios, (void __user *)arg,
 789				   sizeof(struct termios2)))
 790			ret = -EFAULT;
 791		break;
 792#endif
 793
 794	case TIOCMGET:
 795		ret = put_user(port->at_data.mdmbits, (int __user *)arg);
 796		break;
 797
 798	case TIOCMSET:
 799	case TIOCMBIC:
 800	case TIOCMBIS: {
 801		int mdmbits;
 802
 803		if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
 804			ret = -EFAULT;
 805			break;
 806		}
 807		if (cmd == TIOCMBIC)
 808			port->at_data.mdmbits &= ~mdmbits;
 809		else if (cmd == TIOCMBIS)
 810			port->at_data.mdmbits |= mdmbits;
 811		else
 812			port->at_data.mdmbits = mdmbits;
 813		break;
 814	}
 815
 816	default:
 817		ret = -ENOIOCTLCMD;
 818	}
 819
 820	mutex_unlock(&port->data_lock);
 821
 822	return ret;
 823}
 824
 825static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
 826				 unsigned long arg)
 827{
 828	struct wwan_port *port = filp->private_data;
 829	int res;
 830
 831	if (port->type == WWAN_PORT_AT) {	/* AT port specific IOCTLs */
 832		res = wwan_port_fops_at_ioctl(port, cmd, arg);
 833		if (res != -ENOIOCTLCMD)
 834			return res;
 835	}
 836
 837	switch (cmd) {
 838	case TIOCINQ: {	/* aka SIOCINQ aka FIONREAD */
 839		unsigned long flags;
 840		struct sk_buff *skb;
 841		int amount = 0;
 842
 843		spin_lock_irqsave(&port->rxq.lock, flags);
 844		skb_queue_walk(&port->rxq, skb)
 845			amount += skb->len;
 846		spin_unlock_irqrestore(&port->rxq.lock, flags);
 847
 848		return put_user(amount, (int __user *)arg);
 849	}
 850
 851	default:
 852		return -ENOIOCTLCMD;
 853	}
 854}
 855
 856static const struct file_operations wwan_port_fops = {
 857	.owner = THIS_MODULE,
 858	.open = wwan_port_fops_open,
 859	.release = wwan_port_fops_release,
 860	.read = wwan_port_fops_read,
 861	.write = wwan_port_fops_write,
 862	.poll = wwan_port_fops_poll,
 863	.unlocked_ioctl = wwan_port_fops_ioctl,
 864#ifdef CONFIG_COMPAT
 865	.compat_ioctl = compat_ptr_ioctl,
 866#endif
 867	.llseek = noop_llseek,
 868};
 869
 870static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
 871			      struct netlink_ext_ack *extack)
 872{
 873	if (!data)
 874		return -EINVAL;
 875
 876	if (!tb[IFLA_PARENT_DEV_NAME])
 877		return -EINVAL;
 878
 879	if (!data[IFLA_WWAN_LINK_ID])
 880		return -EINVAL;
 881
 882	return 0;
 883}
 884
 885static struct device_type wwan_type = { .name = "wwan" };
 886
 887static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
 888					  const char *ifname,
 889					  unsigned char name_assign_type,
 890					  unsigned int num_tx_queues,
 891					  unsigned int num_rx_queues)
 892{
 893	const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
 894	struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
 895	struct net_device *dev;
 896	unsigned int priv_size;
 897
 898	if (IS_ERR(wwandev))
 899		return ERR_CAST(wwandev);
 900
 901	/* only supported if ops were registered (not just ports) */
 902	if (!wwandev->ops) {
 903		dev = ERR_PTR(-EOPNOTSUPP);
 904		goto out;
 905	}
 906
 907	priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
 908	dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
 909			       wwandev->ops->setup, num_tx_queues, num_rx_queues);
 910
 911	if (dev) {
 912		SET_NETDEV_DEV(dev, &wwandev->dev);
 913		SET_NETDEV_DEVTYPE(dev, &wwan_type);
 914	}
 915
 916out:
 917	/* release the reference */
 918	put_device(&wwandev->dev);
 919	return dev;
 920}
 921
 922static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
 923			     struct nlattr *tb[], struct nlattr *data[],
 924			     struct netlink_ext_ack *extack)
 925{
 926	struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
 927	u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
 928	struct wwan_netdev_priv *priv = netdev_priv(dev);
 929	int ret;
 930
 931	if (IS_ERR(wwandev))
 932		return PTR_ERR(wwandev);
 933
 934	/* shouldn't have a netdev (left) with us as parent so WARN */
 935	if (WARN_ON(!wwandev->ops)) {
 936		ret = -EOPNOTSUPP;
 937		goto out;
 938	}
 939
 940	priv->link_id = link_id;
 941	if (wwandev->ops->newlink)
 942		ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
 943					    link_id, extack);
 944	else
 945		ret = register_netdevice(dev);
 946
 947out:
 948	/* release the reference */
 949	put_device(&wwandev->dev);
 950	return ret;
 951}
 952
 953static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
 954{
 955	struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
 956
 957	if (IS_ERR(wwandev))
 958		return;
 959
 960	/* shouldn't have a netdev (left) with us as parent so WARN */
 961	if (WARN_ON(!wwandev->ops))
 962		goto out;
 963
 964	if (wwandev->ops->dellink)
 965		wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
 966	else
 967		unregister_netdevice_queue(dev, head);
 968
 969out:
 970	/* release the reference */
 971	put_device(&wwandev->dev);
 972}
 973
 974static size_t wwan_rtnl_get_size(const struct net_device *dev)
 975{
 976	return
 977		nla_total_size(4) +	/* IFLA_WWAN_LINK_ID */
 978		0;
 979}
 980
 981static int wwan_rtnl_fill_info(struct sk_buff *skb,
 982			       const struct net_device *dev)
 983{
 984	struct wwan_netdev_priv *priv = netdev_priv(dev);
 985
 986	if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
 987		goto nla_put_failure;
 988
 989	return 0;
 990
 991nla_put_failure:
 992	return -EMSGSIZE;
 993}
 994
 995static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
 996	[IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
 997};
 998
 999static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
1000	.kind = "wwan",
1001	.maxtype = __IFLA_WWAN_MAX,
1002	.alloc = wwan_rtnl_alloc,
1003	.validate = wwan_rtnl_validate,
1004	.newlink = wwan_rtnl_newlink,
1005	.dellink = wwan_rtnl_dellink,
1006	.get_size = wwan_rtnl_get_size,
1007	.fill_info = wwan_rtnl_fill_info,
1008	.policy = wwan_rtnl_policy,
1009};
1010
1011static void wwan_create_default_link(struct wwan_device *wwandev,
1012				     u32 def_link_id)
1013{
1014	struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
1015	struct nlattr *data[IFLA_WWAN_MAX + 1];
1016	struct net_device *dev;
1017	struct nlmsghdr *nlh;
1018	struct sk_buff *msg;
1019
1020	/* Forge attributes required to create a WWAN netdev. We first
1021	 * build a netlink message and then parse it. This looks
1022	 * odd, but such approach is less error prone.
1023	 */
1024	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1025	if (WARN_ON(!msg))
1026		return;
1027	nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
1028	if (WARN_ON(!nlh))
1029		goto free_attrs;
1030
1031	if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
1032		goto free_attrs;
1033	tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
1034	if (!tb[IFLA_LINKINFO])
1035		goto free_attrs;
1036	linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
1037	if (!linkinfo[IFLA_INFO_DATA])
1038		goto free_attrs;
1039	if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
1040		goto free_attrs;
1041	nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
1042	nla_nest_end(msg, tb[IFLA_LINKINFO]);
1043
1044	nlmsg_end(msg, nlh);
1045
1046	/* The next three parsing calls can not fail */
1047	nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
1048	nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
1049				    NULL, NULL);
1050	nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
1051				    linkinfo[IFLA_INFO_DATA], NULL, NULL);
1052
1053	rtnl_lock();
1054
1055	dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
1056			       &wwan_rtnl_link_ops, tb, NULL);
1057	if (WARN_ON(IS_ERR(dev)))
1058		goto unlock;
1059
1060	if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
1061		free_netdev(dev);
1062		goto unlock;
1063	}
1064
1065	rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
1066
1067unlock:
1068	rtnl_unlock();
1069
1070free_attrs:
1071	nlmsg_free(msg);
1072}
1073
1074/**
1075 * wwan_register_ops - register WWAN device ops
1076 * @parent: Device to use as parent and shared by all WWAN ports and
1077 *	created netdevs
1078 * @ops: operations to register
1079 * @ctxt: context to pass to operations
1080 * @def_link_id: id of the default link that will be automatically created by
1081 *	the WWAN core for the WWAN device. The default link will not be created
1082 *	if the passed value is WWAN_NO_DEFAULT_LINK.
1083 *
1084 * Returns: 0 on success, a negative error code on failure
1085 */
1086int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
1087		      void *ctxt, u32 def_link_id)
1088{
1089	struct wwan_device *wwandev;
1090
1091	if (WARN_ON(!parent || !ops || !ops->setup))
1092		return -EINVAL;
1093
1094	wwandev = wwan_create_dev(parent);
1095	if (IS_ERR(wwandev))
1096		return PTR_ERR(wwandev);
1097
1098	if (WARN_ON(wwandev->ops)) {
1099		wwan_remove_dev(wwandev);
1100		return -EBUSY;
1101	}
1102
1103	wwandev->ops = ops;
1104	wwandev->ops_ctxt = ctxt;
1105
1106	/* NB: we do not abort ops registration in case of default link
1107	 * creation failure. Link ops is the management interface, while the
1108	 * default link creation is a service option. And we should not prevent
1109	 * a user from manually creating a link latter if service option failed
1110	 * now.
1111	 */
1112	if (def_link_id != WWAN_NO_DEFAULT_LINK)
1113		wwan_create_default_link(wwandev, def_link_id);
1114
1115	return 0;
1116}
1117EXPORT_SYMBOL_GPL(wwan_register_ops);
1118
1119/* Enqueue child netdev deletion */
1120static int wwan_child_dellink(struct device *dev, void *data)
1121{
1122	struct list_head *kill_list = data;
1123
1124	if (dev->type == &wwan_type)
1125		wwan_rtnl_dellink(to_net_dev(dev), kill_list);
1126
1127	return 0;
1128}
1129
1130/**
1131 * wwan_unregister_ops - remove WWAN device ops
1132 * @parent: Device to use as parent and shared by all WWAN ports and
1133 *	created netdevs
1134 */
1135void wwan_unregister_ops(struct device *parent)
1136{
1137	struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
1138	LIST_HEAD(kill_list);
1139
1140	if (WARN_ON(IS_ERR(wwandev)))
1141		return;
1142	if (WARN_ON(!wwandev->ops)) {
1143		put_device(&wwandev->dev);
1144		return;
1145	}
1146
1147	/* put the reference obtained by wwan_dev_get_by_parent(),
1148	 * we should still have one (that the owner is giving back
1149	 * now) due to the ops being assigned.
1150	 */
1151	put_device(&wwandev->dev);
1152
1153	rtnl_lock();	/* Prevent concurent netdev(s) creation/destroying */
1154
1155	/* Remove all child netdev(s), using batch removing */
1156	device_for_each_child(&wwandev->dev, &kill_list,
1157			      wwan_child_dellink);
1158	unregister_netdevice_many(&kill_list);
1159
1160	wwandev->ops = NULL;	/* Finally remove ops */
1161
1162	rtnl_unlock();
1163
1164	wwandev->ops_ctxt = NULL;
1165	wwan_remove_dev(wwandev);
1166}
1167EXPORT_SYMBOL_GPL(wwan_unregister_ops);
1168
1169static int __init wwan_init(void)
1170{
1171	int err;
1172
1173	err = rtnl_link_register(&wwan_rtnl_link_ops);
1174	if (err)
1175		return err;
1176
1177	wwan_class = class_create(THIS_MODULE, "wwan");
1178	if (IS_ERR(wwan_class)) {
1179		err = PTR_ERR(wwan_class);
1180		goto unregister;
1181	}
1182
1183	/* chrdev used for wwan ports */
1184	wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
1185				       &wwan_port_fops);
1186	if (wwan_major < 0) {
1187		err = wwan_major;
1188		goto destroy;
1189	}
1190
1191#ifdef CONFIG_WWAN_DEBUGFS
1192	wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
1193#endif
1194
1195	return 0;
1196
1197destroy:
1198	class_destroy(wwan_class);
1199unregister:
1200	rtnl_link_unregister(&wwan_rtnl_link_ops);
1201	return err;
1202}
1203
1204static void __exit wwan_exit(void)
1205{
1206	debugfs_remove_recursive(wwan_debugfs_dir);
1207	__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
1208	rtnl_link_unregister(&wwan_rtnl_link_ops);
1209	class_destroy(wwan_class);
1210}
1211
1212module_init(wwan_init);
1213module_exit(wwan_exit);
1214
1215MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1216MODULE_DESCRIPTION("WWAN core");
1217MODULE_LICENSE("GPL v2");