Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/fs/9p/trans_fd.c
   3 *
   4 * Fd transport layer.  Includes deprecated socket layer.
   5 *
   6 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
   7 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
   8 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
   9 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
  10 *
  11 *  This program is free software; you can redistribute it and/or modify
  12 *  it under the terms of the GNU General Public License version 2
  13 *  as published by the Free Software Foundation.
  14 *
  15 *  This program is distributed in the hope that it will be useful,
  16 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 *  GNU General Public License for more details.
  19 *
  20 *  You should have received a copy of the GNU General Public License
  21 *  along with this program; if not, write to:
  22 *  Free Software Foundation
  23 *  51 Franklin Street, Fifth Floor
  24 *  Boston, MA  02111-1301  USA
  25 *
  26 */
  27
 
 
  28#include <linux/in.h>
  29#include <linux/module.h>
  30#include <linux/net.h>
  31#include <linux/ipv6.h>
  32#include <linux/kthread.h>
  33#include <linux/errno.h>
  34#include <linux/kernel.h>
  35#include <linux/un.h>
  36#include <linux/uaccess.h>
  37#include <linux/inet.h>
  38#include <linux/idr.h>
  39#include <linux/file.h>
  40#include <linux/parser.h>
  41#include <linux/slab.h>
 
  42#include <net/9p/9p.h>
  43#include <net/9p/client.h>
  44#include <net/9p/transport.h>
  45
  46#include <linux/syscalls.h> /* killme */
  47
  48#define P9_PORT 564
  49#define MAX_SOCK_BUF (64*1024)
  50#define MAXPOLLWADDR	2
  51
 
 
 
  52/**
  53 * struct p9_fd_opts - per-transport options
  54 * @rfd: file descriptor for reading (trans=fd)
  55 * @wfd: file descriptor for writing (trans=fd)
  56 * @port: port to connect to (trans=tcp)
  57 *
  58 */
  59
  60struct p9_fd_opts {
  61	int rfd;
  62	int wfd;
  63	u16 port;
  64};
  65
  66/**
  67 * struct p9_trans_fd - transport state
  68 * @rd: reference to file to read from
  69 * @wr: reference of file to write to
  70 * @conn: connection state reference
  71 *
  72 */
  73
  74struct p9_trans_fd {
  75	struct file *rd;
  76	struct file *wr;
  77	struct p9_conn *conn;
  78};
  79
  80/*
  81  * Option Parsing (code inspired by NFS code)
  82  *  - a little lazy - parse all fd-transport options
  83  */
  84
  85enum {
  86	/* Options that take integer arguments */
  87	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
 
 
  88};
  89
  90static const match_table_t tokens = {
  91	{Opt_port, "port=%u"},
  92	{Opt_rfdno, "rfdno=%u"},
  93	{Opt_wfdno, "wfdno=%u"},
 
  94	{Opt_err, NULL},
  95};
  96
  97enum {
  98	Rworksched = 1,		/* read work scheduled or running */
  99	Rpending = 2,		/* can read */
 100	Wworksched = 4,		/* write work scheduled or running */
 101	Wpending = 8,		/* can write */
 102};
 103
 104struct p9_poll_wait {
 105	struct p9_conn *conn;
 106	wait_queue_t wait;
 107	wait_queue_head_t *wait_addr;
 108};
 109
 110/**
 111 * struct p9_conn - fd mux connection state information
 112 * @mux_list: list link for mux to manage multiple connections (?)
 113 * @client: reference to client instance for this connection
 114 * @err: error state
 
 115 * @req_list: accounting for requests which have been sent
 116 * @unsent_req_list: accounting for requests that haven't been sent
 117 * @req: current request being processed (if any)
 
 118 * @tmp_buf: temporary buffer to read in header
 119 * @rsize: amount to read for current frame
 120 * @rpos: read position in current frame
 121 * @rbuf: current read buffer
 122 * @wpos: write position for current frame
 123 * @wsize: amount of data to write for current frame
 124 * @wbuf: current write buffer
 125 * @poll_pending_link: pending links to be polled per conn
 126 * @poll_wait: array of wait_q's for various worker threads
 127 * @pt: poll state
 128 * @rq: current read work
 129 * @wq: current write work
 130 * @wsched: ????
 131 *
 132 */
 133
 134struct p9_conn {
 135	struct list_head mux_list;
 136	struct p9_client *client;
 137	int err;
 
 138	struct list_head req_list;
 139	struct list_head unsent_req_list;
 140	struct p9_req_t *req;
 141	char tmp_buf[7];
 142	int rsize;
 143	int rpos;
 144	char *rbuf;
 145	int wpos;
 146	int wsize;
 147	char *wbuf;
 148	struct list_head poll_pending_link;
 149	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
 150	poll_table pt;
 151	struct work_struct rq;
 152	struct work_struct wq;
 153	unsigned long wsched;
 154};
 155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156static void p9_poll_workfn(struct work_struct *work);
 157
 158static DEFINE_SPINLOCK(p9_poll_lock);
 159static LIST_HEAD(p9_poll_pending_list);
 160static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
 161
 
 
 
 162static void p9_mux_poll_stop(struct p9_conn *m)
 163{
 164	unsigned long flags;
 165	int i;
 166
 167	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 168		struct p9_poll_wait *pwait = &m->poll_wait[i];
 169
 170		if (pwait->wait_addr) {
 171			remove_wait_queue(pwait->wait_addr, &pwait->wait);
 172			pwait->wait_addr = NULL;
 173		}
 174	}
 175
 176	spin_lock_irqsave(&p9_poll_lock, flags);
 177	list_del_init(&m->poll_pending_link);
 178	spin_unlock_irqrestore(&p9_poll_lock, flags);
 
 
 179}
 180
 181/**
 182 * p9_conn_cancel - cancel all pending requests with error
 183 * @m: mux data
 184 * @err: error code
 185 *
 186 */
 187
 188static void p9_conn_cancel(struct p9_conn *m, int err)
 189{
 190	struct p9_req_t *req, *rtmp;
 191	unsigned long flags;
 192	LIST_HEAD(cancel_list);
 193
 194	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 195
 196	spin_lock_irqsave(&m->client->lock, flags);
 197
 198	if (m->err) {
 199		spin_unlock_irqrestore(&m->client->lock, flags);
 200		return;
 201	}
 202
 203	m->err = err;
 204
 205	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
 206		req->status = REQ_STATUS_ERROR;
 207		if (!req->t_err)
 208			req->t_err = err;
 209		list_move(&req->req_list, &cancel_list);
 
 210	}
 211	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 212		req->status = REQ_STATUS_ERROR;
 213		if (!req->t_err)
 214			req->t_err = err;
 215		list_move(&req->req_list, &cancel_list);
 
 216	}
 217	spin_unlock_irqrestore(&m->client->lock, flags);
 
 218
 219	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 220		P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
 221		list_del(&req->req_list);
 222		p9_client_cb(m->client, req);
 
 
 223	}
 224}
 225
 226static int
 227p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
 228{
 229	int ret, n;
 230	struct p9_trans_fd *ts = NULL;
 231
 232	if (client && client->status == Connected)
 233		ts = client->trans;
 234
 235	if (!ts)
 236		return -EREMOTEIO;
 237
 238	if (!ts->rd->f_op || !ts->rd->f_op->poll)
 239		return -EIO;
 240
 241	if (!ts->wr->f_op || !ts->wr->f_op->poll)
 242		return -EIO;
 243
 244	ret = ts->rd->f_op->poll(ts->rd, pt);
 245	if (ret < 0)
 246		return ret;
 247
 248	if (ts->rd != ts->wr) {
 249		n = ts->wr->f_op->poll(ts->wr, pt);
 250		if (n < 0)
 251			return n;
 252		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
 253	}
 254
 
 
 
 255	return ret;
 256}
 257
 258/**
 259 * p9_fd_read- read from a fd
 260 * @client: client instance
 261 * @v: buffer to receive data into
 262 * @len: size of receive buffer
 263 *
 264 */
 265
 266static int p9_fd_read(struct p9_client *client, void *v, int len)
 267{
 268	int ret;
 269	struct p9_trans_fd *ts = NULL;
 
 270
 271	if (client && client->status != Disconnected)
 272		ts = client->trans;
 273
 274	if (!ts)
 275		return -EREMOTEIO;
 276
 277	if (!(ts->rd->f_flags & O_NONBLOCK))
 278		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
 279
 280	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
 
 281	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 282		client->status = Disconnected;
 283	return ret;
 284}
 285
 286/**
 287 * p9_read_work - called when there is some data to be read from a transport
 288 * @work: container of work to be done
 289 *
 290 */
 291
 292static void p9_read_work(struct work_struct *work)
 293{
 294	int n, err;
 
 295	struct p9_conn *m;
 296
 297	m = container_of(work, struct p9_conn, rq);
 298
 299	if (m->err < 0)
 300		return;
 301
 302	P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
 303
 304	if (!m->rbuf) {
 305		m->rbuf = m->tmp_buf;
 306		m->rpos = 0;
 307		m->rsize = 7; /* start by reading header */
 308	}
 309
 310	clear_bit(Rpending, &m->wsched);
 311	P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
 312					m->rpos, m->rsize, m->rsize-m->rpos);
 313	err = p9_fd_read(m->client, m->rbuf + m->rpos,
 314						m->rsize - m->rpos);
 315	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
 316	if (err == -EAGAIN) {
 317		clear_bit(Rworksched, &m->wsched);
 318		return;
 319	}
 320
 321	if (err <= 0)
 322		goto error;
 323
 324	m->rpos += err;
 325
 326	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
 327		u16 tag;
 328		P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
 329
 330		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
 331		if (n >= m->client->msize) {
 332			P9_DPRINTK(P9_DEBUG_ERROR,
 333				"requested packet size too big: %d\n", n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 334			err = -EIO;
 335			goto error;
 336		}
 337
 338		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
 339		P9_DPRINTK(P9_DEBUG_TRANS,
 340			"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
 341
 342		m->req = p9_tag_lookup(m->client, tag);
 343		if (!m->req || (m->req->status != REQ_STATUS_SENT &&
 344					m->req->status != REQ_STATUS_FLSH)) {
 345			P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
 346								 tag);
 347			err = -EIO;
 348			goto error;
 349		}
 350
 351		if (m->req->rc == NULL) {
 352			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
 353						m->client->msize, GFP_NOFS);
 354			if (!m->req->rc) {
 355				m->req = NULL;
 356				err = -ENOMEM;
 357				goto error;
 358			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359		}
 360		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
 361		memcpy(m->rbuf, m->tmp_buf, m->rsize);
 362		m->rsize = n;
 363	}
 364
 365	/* not an else because some packets (like clunk) have no payload */
 366	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
 367		P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
 368		spin_lock(&m->client->lock);
 369		if (m->req->status != REQ_STATUS_ERROR)
 370			m->req->status = REQ_STATUS_RCVD;
 371		list_del(&m->req->req_list);
 372		spin_unlock(&m->client->lock);
 373		p9_client_cb(m->client, m->req);
 374		m->rbuf = NULL;
 375		m->rpos = 0;
 376		m->rsize = 0;
 377		m->req = NULL;
 378	}
 379
 
 
 
 380	if (!list_empty(&m->req_list)) {
 381		if (test_and_clear_bit(Rpending, &m->wsched))
 382			n = POLLIN;
 383		else
 384			n = p9_fd_poll(m->client, NULL);
 385
 386		if (n & POLLIN) {
 387			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
 388			schedule_work(&m->rq);
 389		} else
 390			clear_bit(Rworksched, &m->wsched);
 391	} else
 392		clear_bit(Rworksched, &m->wsched);
 393
 394	return;
 395error:
 396	p9_conn_cancel(m, err);
 397	clear_bit(Rworksched, &m->wsched);
 398}
 399
 400/**
 401 * p9_fd_write - write to a socket
 402 * @client: client instance
 403 * @v: buffer to send data from
 404 * @len: size of send buffer
 405 *
 406 */
 407
 408static int p9_fd_write(struct p9_client *client, void *v, int len)
 409{
 410	int ret;
 411	mm_segment_t oldfs;
 412	struct p9_trans_fd *ts = NULL;
 413
 414	if (client && client->status != Disconnected)
 415		ts = client->trans;
 416
 417	if (!ts)
 418		return -EREMOTEIO;
 419
 420	if (!(ts->wr->f_flags & O_NONBLOCK))
 421		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
 422
 423	oldfs = get_fs();
 424	set_fs(get_ds());
 425	/* The cast to a user pointer is valid due to the set_fs() */
 426	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
 427	set_fs(oldfs);
 428
 
 429	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 430		client->status = Disconnected;
 431	return ret;
 432}
 433
 434/**
 435 * p9_write_work - called when a transport can send some data
 436 * @work: container for work to be done
 437 *
 438 */
 439
 440static void p9_write_work(struct work_struct *work)
 441{
 442	int n, err;
 
 443	struct p9_conn *m;
 444	struct p9_req_t *req;
 445
 446	m = container_of(work, struct p9_conn, wq);
 447
 448	if (m->err < 0) {
 449		clear_bit(Wworksched, &m->wsched);
 450		return;
 451	}
 452
 453	if (!m->wsize) {
 
 454		if (list_empty(&m->unsent_req_list)) {
 455			clear_bit(Wworksched, &m->wsched);
 
 456			return;
 457		}
 458
 459		spin_lock(&m->client->lock);
 460		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
 461			       req_list);
 462		req->status = REQ_STATUS_SENT;
 463		P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
 464		list_move_tail(&req->req_list, &m->req_list);
 465
 466		m->wbuf = req->tc->sdata;
 467		m->wsize = req->tc->size;
 468		m->wpos = 0;
 469		spin_unlock(&m->client->lock);
 
 
 470	}
 471
 472	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
 473								m->wsize);
 474	clear_bit(Wpending, &m->wsched);
 475	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
 476	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
 477	if (err == -EAGAIN) {
 478		clear_bit(Wworksched, &m->wsched);
 479		return;
 480	}
 481
 482	if (err < 0)
 483		goto error;
 484	else if (err == 0) {
 485		err = -EREMOTEIO;
 486		goto error;
 487	}
 488
 489	m->wpos += err;
 490	if (m->wpos == m->wsize)
 491		m->wpos = m->wsize = 0;
 
 
 
 492
 493	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
 
 
 
 494		if (test_and_clear_bit(Wpending, &m->wsched))
 495			n = POLLOUT;
 496		else
 497			n = p9_fd_poll(m->client, NULL);
 498
 499		if (n & POLLOUT) {
 500			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
 
 501			schedule_work(&m->wq);
 502		} else
 503			clear_bit(Wworksched, &m->wsched);
 504	} else
 505		clear_bit(Wworksched, &m->wsched);
 506
 507	return;
 508
 509error:
 510	p9_conn_cancel(m, err);
 511	clear_bit(Wworksched, &m->wsched);
 512}
 513
 514static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 515{
 516	struct p9_poll_wait *pwait =
 517		container_of(wait, struct p9_poll_wait, wait);
 518	struct p9_conn *m = pwait->conn;
 519	unsigned long flags;
 520
 521	spin_lock_irqsave(&p9_poll_lock, flags);
 522	if (list_empty(&m->poll_pending_link))
 523		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
 524	spin_unlock_irqrestore(&p9_poll_lock, flags);
 525
 526	schedule_work(&p9_poll_work);
 527	return 1;
 528}
 529
 530/**
 531 * p9_pollwait - add poll task to the wait queue
 532 * @filp: file pointer being polled
 533 * @wait_address: wait_q to block on
 534 * @p: poll state
 535 *
 536 * called by files poll operation to add v9fs-poll task to files wait queue
 537 */
 538
 539static void
 540p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 541{
 542	struct p9_conn *m = container_of(p, struct p9_conn, pt);
 543	struct p9_poll_wait *pwait = NULL;
 544	int i;
 545
 546	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 547		if (m->poll_wait[i].wait_addr == NULL) {
 548			pwait = &m->poll_wait[i];
 549			break;
 550		}
 551	}
 552
 553	if (!pwait) {
 554		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
 555		return;
 556	}
 557
 558	pwait->conn = m;
 559	pwait->wait_addr = wait_address;
 560	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
 561	add_wait_queue(wait_address, &pwait->wait);
 562}
 563
 564/**
 565 * p9_conn_create - allocate and initialize the per-session mux data
 566 * @client: client instance
 567 *
 568 * Note: Creates the polling task if this is the first session.
 569 */
 570
 571static struct p9_conn *p9_conn_create(struct p9_client *client)
 572{
 573	int n;
 574	struct p9_conn *m;
 
 575
 576	P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
 577								client->msize);
 578	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
 579	if (!m)
 580		return ERR_PTR(-ENOMEM);
 581
 582	INIT_LIST_HEAD(&m->mux_list);
 583	m->client = client;
 584
 
 585	INIT_LIST_HEAD(&m->req_list);
 586	INIT_LIST_HEAD(&m->unsent_req_list);
 587	INIT_WORK(&m->rq, p9_read_work);
 588	INIT_WORK(&m->wq, p9_write_work);
 589	INIT_LIST_HEAD(&m->poll_pending_link);
 590	init_poll_funcptr(&m->pt, p9_pollwait);
 591
 592	n = p9_fd_poll(client, &m->pt);
 593	if (n & POLLIN) {
 594		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
 595		set_bit(Rpending, &m->wsched);
 596	}
 597
 598	if (n & POLLOUT) {
 599		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
 600		set_bit(Wpending, &m->wsched);
 601	}
 602
 603	return m;
 604}
 605
 606/**
 607 * p9_poll_mux - polls a mux and schedules read or write works if necessary
 608 * @m: connection to poll
 609 *
 610 */
 611
 612static void p9_poll_mux(struct p9_conn *m)
 613{
 614	int n;
 
 615
 616	if (m->err < 0)
 617		return;
 618
 619	n = p9_fd_poll(m->client, NULL);
 620	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
 621		P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
 622		if (n >= 0)
 623			n = -ECONNRESET;
 624		p9_conn_cancel(m, n);
 625	}
 626
 627	if (n & POLLIN) {
 628		set_bit(Rpending, &m->wsched);
 629		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
 630		if (!test_and_set_bit(Rworksched, &m->wsched)) {
 631			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
 632			schedule_work(&m->rq);
 633		}
 634	}
 635
 636	if (n & POLLOUT) {
 637		set_bit(Wpending, &m->wsched);
 638		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
 639		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
 640		    !test_and_set_bit(Wworksched, &m->wsched)) {
 641			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
 642			schedule_work(&m->wq);
 643		}
 644	}
 645}
 646
 647/**
 648 * p9_fd_request - send 9P request
 649 * The function can sleep until the request is scheduled for sending.
 650 * The function can be interrupted. Return from the function is not
 651 * a guarantee that the request is sent successfully.
 652 *
 653 * @client: client instance
 654 * @req: request to be sent
 655 *
 656 */
 657
 658static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 659{
 660	int n;
 661	struct p9_trans_fd *ts = client->trans;
 662	struct p9_conn *m = ts->conn;
 663
 664	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
 665						current, req->tc, req->tc->id);
 666	if (m->err < 0)
 
 
 
 
 667		return m->err;
 
 668
 669	spin_lock(&client->lock);
 670	req->status = REQ_STATUS_UNSENT;
 671	list_add_tail(&req->req_list, &m->unsent_req_list);
 672	spin_unlock(&client->lock);
 673
 674	if (test_and_clear_bit(Wpending, &m->wsched))
 675		n = POLLOUT;
 676	else
 677		n = p9_fd_poll(m->client, NULL);
 678
 679	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
 680		schedule_work(&m->wq);
 681
 682	return 0;
 683}
 684
 685static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 686{
 
 
 687	int ret = 1;
 688
 689	P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 690
 691	spin_lock(&client->lock);
 692
 693	if (req->status == REQ_STATUS_UNSENT) {
 694		list_del(&req->req_list);
 695		req->status = REQ_STATUS_FLSHD;
 
 696		ret = 0;
 697	} else if (req->status == REQ_STATUS_SENT)
 698		req->status = REQ_STATUS_FLSH;
 699
 700	spin_unlock(&client->lock);
 701
 702	return ret;
 703}
 704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705/**
 706 * parse_opts - parse mount options into p9_fd_opts structure
 707 * @params: options string passed from mount
 708 * @opts: fd transport-specific structure to parse options into
 709 *
 710 * Returns 0 upon success, -ERRNO upon failure
 711 */
 712
 713static int parse_opts(char *params, struct p9_fd_opts *opts)
 714{
 715	char *p;
 716	substring_t args[MAX_OPT_ARGS];
 717	int option;
 718	char *options, *tmp_options;
 719
 720	opts->port = P9_PORT;
 721	opts->rfd = ~0;
 722	opts->wfd = ~0;
 
 723
 724	if (!params)
 725		return 0;
 726
 727	tmp_options = kstrdup(params, GFP_KERNEL);
 728	if (!tmp_options) {
 729		P9_DPRINTK(P9_DEBUG_ERROR,
 730				"failed to allocate copy of option string\n");
 731		return -ENOMEM;
 732	}
 733	options = tmp_options;
 734
 735	while ((p = strsep(&options, ",")) != NULL) {
 736		int token;
 737		int r;
 738		if (!*p)
 739			continue;
 740		token = match_token(p, tokens, args);
 741		if (token != Opt_err) {
 742			r = match_int(&args[0], &option);
 743			if (r < 0) {
 744				P9_DPRINTK(P9_DEBUG_ERROR,
 745				"integer field, but no integer?\n");
 746				continue;
 747			}
 748		}
 749		switch (token) {
 750		case Opt_port:
 751			opts->port = option;
 752			break;
 753		case Opt_rfdno:
 754			opts->rfd = option;
 755			break;
 756		case Opt_wfdno:
 757			opts->wfd = option;
 758			break;
 
 
 
 759		default:
 760			continue;
 761		}
 762	}
 763
 764	kfree(tmp_options);
 765	return 0;
 766}
 767
 768static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 769{
 770	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
 771					   GFP_KERNEL);
 772	if (!ts)
 773		return -ENOMEM;
 774
 775	ts->rd = fget(rfd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 776	ts->wr = fget(wfd);
 777	if (!ts->rd || !ts->wr) {
 778		if (ts->rd)
 779			fput(ts->rd);
 780		if (ts->wr)
 781			fput(ts->wr);
 782		kfree(ts);
 783		return -EIO;
 784	}
 785
 786	client->trans = ts;
 787	client->status = Connected;
 788
 789	return 0;
 
 
 
 
 
 
 
 
 790}
 791
 792static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 793{
 794	struct p9_trans_fd *p;
 795	int ret, fd;
 796
 797	p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
 798	if (!p)
 
 799		return -ENOMEM;
 
 800
 801	csocket->sk->sk_allocation = GFP_NOIO;
 802	fd = sock_map_fd(csocket, 0);
 803	if (fd < 0) {
 804		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
 805		sock_release(csocket);
 
 806		kfree(p);
 807		return fd;
 808	}
 809
 810	get_file(csocket->file);
 811	get_file(csocket->file);
 812	p->wr = p->rd = csocket->file;
 813	client->trans = p;
 814	client->status = Connected;
 815
 816	sys_close(fd);	/* still racy */
 817
 818	p->rd->f_flags |= O_NONBLOCK;
 819
 820	p->conn = p9_conn_create(client);
 821	if (IS_ERR(p->conn)) {
 822		ret = PTR_ERR(p->conn);
 823		p->conn = NULL;
 824		kfree(p);
 825		sockfd_put(csocket);
 826		sockfd_put(csocket);
 827		return ret;
 828	}
 829	return 0;
 830}
 831
 832/**
 833 * p9_mux_destroy - cancels all pending requests and frees mux resources
 834 * @m: mux to destroy
 835 *
 836 */
 837
 838static void p9_conn_destroy(struct p9_conn *m)
 839{
 840	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
 841		m->mux_list.prev, m->mux_list.next);
 842
 843	p9_mux_poll_stop(m);
 844	cancel_work_sync(&m->rq);
 
 
 
 
 845	cancel_work_sync(&m->wq);
 
 
 
 
 846
 847	p9_conn_cancel(m, -ECONNRESET);
 848
 849	m->client = NULL;
 850	kfree(m);
 851}
 852
 853/**
 854 * p9_fd_close - shutdown file descriptor transport
 855 * @client: client instance
 856 *
 857 */
 858
 859static void p9_fd_close(struct p9_client *client)
 860{
 861	struct p9_trans_fd *ts;
 862
 863	if (!client)
 864		return;
 865
 866	ts = client->trans;
 867	if (!ts)
 868		return;
 869
 870	client->status = Disconnected;
 871
 872	p9_conn_destroy(ts->conn);
 873
 874	if (ts->rd)
 875		fput(ts->rd);
 876	if (ts->wr)
 877		fput(ts->wr);
 878
 879	kfree(ts);
 880}
 881
 882/*
 883 * stolen from NFS - maybe should be made a generic function?
 884 */
 885static inline int valid_ipaddr4(const char *buf)
 886{
 887	int rc, count, in[4];
 888
 889	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
 890	if (rc != 4)
 891		return -EINVAL;
 892	for (count = 0; count < 4; count++) {
 893		if (in[count] > 255)
 894			return -EINVAL;
 895	}
 896	return 0;
 897}
 898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899static int
 900p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
 901{
 902	int err;
 903	struct socket *csocket;
 904	struct sockaddr_in sin_server;
 905	struct p9_fd_opts opts;
 906
 907	err = parse_opts(args, &opts);
 908	if (err < 0)
 909		return err;
 910
 911	if (valid_ipaddr4(addr) < 0)
 912		return -EINVAL;
 913
 914	csocket = NULL;
 915
 
 
 916	sin_server.sin_family = AF_INET;
 917	sin_server.sin_addr.s_addr = in_aton(addr);
 918	sin_server.sin_port = htons(opts.port);
 919	err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
 920			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
 921	if (err) {
 922		P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
 
 923		return err;
 924	}
 925
 926	err = csocket->ops->connect(csocket,
 
 
 
 
 
 
 
 
 
 
 927				    (struct sockaddr *)&sin_server,
 928				    sizeof(struct sockaddr_in), 0);
 929	if (err < 0) {
 930		P9_EPRINTK(KERN_ERR,
 931			"p9_trans_tcp: problem connecting socket to %s\n",
 932			addr);
 933		sock_release(csocket);
 934		return err;
 935	}
 936
 937	return p9_socket_open(client, csocket);
 938}
 939
 940static int
 941p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
 942{
 943	int err;
 944	struct socket *csocket;
 945	struct sockaddr_un sun_server;
 946
 947	csocket = NULL;
 948
 
 
 
 949	if (strlen(addr) >= UNIX_PATH_MAX) {
 950		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
 951			addr);
 952		return -ENAMETOOLONG;
 953	}
 954
 955	sun_server.sun_family = PF_UNIX;
 956	strcpy(sun_server.sun_path, addr);
 957	err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
 958			    SOCK_STREAM, 0, &csocket, 1);
 959	if (err < 0) {
 960		P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
 
 
 961		return err;
 962	}
 963	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
 964			sizeof(struct sockaddr_un) - 1, 0);
 965	if (err < 0) {
 966		P9_EPRINTK(KERN_ERR,
 967			"p9_trans_unix: problem connecting socket: %s: %d\n",
 968			addr, err);
 969		sock_release(csocket);
 970		return err;
 971	}
 972
 973	return p9_socket_open(client, csocket);
 974}
 975
 976static int
 977p9_fd_create(struct p9_client *client, const char *addr, char *args)
 978{
 979	int err;
 980	struct p9_fd_opts opts;
 981	struct p9_trans_fd *p;
 982
 983	parse_opts(args, &opts);
 
 
 
 
 984
 985	if (opts.rfd == ~0 || opts.wfd == ~0) {
 986		printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
 987		return -ENOPROTOOPT;
 988	}
 989
 990	err = p9_fd_open(client, opts.rfd, opts.wfd);
 991	if (err < 0)
 992		return err;
 993
 994	p = (struct p9_trans_fd *) client->trans;
 995	p->conn = p9_conn_create(client);
 996	if (IS_ERR(p->conn)) {
 997		err = PTR_ERR(p->conn);
 998		p->conn = NULL;
 999		fput(p->rd);
1000		fput(p->wr);
1001		return err;
1002	}
1003
1004	return 0;
1005}
1006
1007static struct p9_trans_module p9_tcp_trans = {
1008	.name = "tcp",
1009	.maxsize = MAX_SOCK_BUF,
1010	.def = 1,
 
1011	.create = p9_fd_create_tcp,
1012	.close = p9_fd_close,
1013	.request = p9_fd_request,
1014	.cancel = p9_fd_cancel,
 
 
1015	.owner = THIS_MODULE,
1016};
 
1017
1018static struct p9_trans_module p9_unix_trans = {
1019	.name = "unix",
1020	.maxsize = MAX_SOCK_BUF,
1021	.def = 0,
1022	.create = p9_fd_create_unix,
1023	.close = p9_fd_close,
1024	.request = p9_fd_request,
1025	.cancel = p9_fd_cancel,
 
 
1026	.owner = THIS_MODULE,
1027};
 
1028
1029static struct p9_trans_module p9_fd_trans = {
1030	.name = "fd",
1031	.maxsize = MAX_SOCK_BUF,
1032	.def = 0,
1033	.create = p9_fd_create,
1034	.close = p9_fd_close,
1035	.request = p9_fd_request,
1036	.cancel = p9_fd_cancel,
 
 
1037	.owner = THIS_MODULE,
1038};
 
1039
1040/**
1041 * p9_poll_proc - poll worker thread
1042 * @a: thread state and arguments
1043 *
1044 * polls all v9fs transports for new events and queues the appropriate
1045 * work to the work queue
1046 *
1047 */
1048
1049static void p9_poll_workfn(struct work_struct *work)
1050{
1051	unsigned long flags;
1052
1053	P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1054
1055	spin_lock_irqsave(&p9_poll_lock, flags);
1056	while (!list_empty(&p9_poll_pending_list)) {
1057		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1058							struct p9_conn,
1059							poll_pending_link);
1060		list_del_init(&conn->poll_pending_link);
1061		spin_unlock_irqrestore(&p9_poll_lock, flags);
1062
1063		p9_poll_mux(conn);
1064
1065		spin_lock_irqsave(&p9_poll_lock, flags);
1066	}
1067	spin_unlock_irqrestore(&p9_poll_lock, flags);
1068
1069	P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1070}
1071
1072int p9_trans_fd_init(void)
1073{
1074	v9fs_register_trans(&p9_tcp_trans);
1075	v9fs_register_trans(&p9_unix_trans);
1076	v9fs_register_trans(&p9_fd_trans);
1077
1078	return 0;
1079}
1080
1081void p9_trans_fd_exit(void)
1082{
1083	flush_work_sync(&p9_poll_work);
1084	v9fs_unregister_trans(&p9_tcp_trans);
1085	v9fs_unregister_trans(&p9_unix_trans);
1086	v9fs_unregister_trans(&p9_fd_trans);
1087}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
 
 
   3 * Fd transport layer.  Includes deprecated socket layer.
   4 *
   5 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
   6 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
   7 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
   8 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   9 */
  10
  11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12
  13#include <linux/in.h>
  14#include <linux/module.h>
  15#include <linux/net.h>
  16#include <linux/ipv6.h>
  17#include <linux/kthread.h>
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/un.h>
  21#include <linux/uaccess.h>
  22#include <linux/inet.h>
 
  23#include <linux/file.h>
  24#include <linux/parser.h>
  25#include <linux/slab.h>
  26#include <linux/seq_file.h>
  27#include <net/9p/9p.h>
  28#include <net/9p/client.h>
  29#include <net/9p/transport.h>
  30
  31#include <linux/syscalls.h> /* killme */
  32
  33#define P9_PORT 564
  34#define MAX_SOCK_BUF (1024*1024)
  35#define MAXPOLLWADDR	2
  36
  37static struct p9_trans_module p9_tcp_trans;
  38static struct p9_trans_module p9_fd_trans;
  39
  40/**
  41 * struct p9_fd_opts - per-transport options
  42 * @rfd: file descriptor for reading (trans=fd)
  43 * @wfd: file descriptor for writing (trans=fd)
  44 * @port: port to connect to (trans=tcp)
  45 * @privport: port is privileged
  46 */
  47
  48struct p9_fd_opts {
  49	int rfd;
  50	int wfd;
  51	u16 port;
  52	bool privport;
 
 
 
 
 
 
 
 
 
 
 
 
 
  53};
  54
  55/*
  56  * Option Parsing (code inspired by NFS code)
  57  *  - a little lazy - parse all fd-transport options
  58  */
  59
  60enum {
  61	/* Options that take integer arguments */
  62	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
  63	/* Options that take no arguments */
  64	Opt_privport,
  65};
  66
  67static const match_table_t tokens = {
  68	{Opt_port, "port=%u"},
  69	{Opt_rfdno, "rfdno=%u"},
  70	{Opt_wfdno, "wfdno=%u"},
  71	{Opt_privport, "privport"},
  72	{Opt_err, NULL},
  73};
  74
  75enum {
  76	Rworksched = 1,		/* read work scheduled or running */
  77	Rpending = 2,		/* can read */
  78	Wworksched = 4,		/* write work scheduled or running */
  79	Wpending = 8,		/* can write */
  80};
  81
  82struct p9_poll_wait {
  83	struct p9_conn *conn;
  84	wait_queue_entry_t wait;
  85	wait_queue_head_t *wait_addr;
  86};
  87
  88/**
  89 * struct p9_conn - fd mux connection state information
  90 * @mux_list: list link for mux to manage multiple connections (?)
  91 * @client: reference to client instance for this connection
  92 * @err: error state
  93 * @req_lock: lock protecting req_list and requests statuses
  94 * @req_list: accounting for requests which have been sent
  95 * @unsent_req_list: accounting for requests that haven't been sent
  96 * @rreq: read request
  97 * @wreq: write request
  98 * @tmp_buf: temporary buffer to read in header
  99 * @rc: temporary fcall for reading current frame
 
 
 100 * @wpos: write position for current frame
 101 * @wsize: amount of data to write for current frame
 102 * @wbuf: current write buffer
 103 * @poll_pending_link: pending links to be polled per conn
 104 * @poll_wait: array of wait_q's for various worker threads
 105 * @pt: poll state
 106 * @rq: current read work
 107 * @wq: current write work
 108 * @wsched: ????
 109 *
 110 */
 111
 112struct p9_conn {
 113	struct list_head mux_list;
 114	struct p9_client *client;
 115	int err;
 116	spinlock_t req_lock;
 117	struct list_head req_list;
 118	struct list_head unsent_req_list;
 119	struct p9_req_t *rreq;
 120	struct p9_req_t *wreq;
 121	char tmp_buf[P9_HDRSZ];
 122	struct p9_fcall rc;
 
 123	int wpos;
 124	int wsize;
 125	char *wbuf;
 126	struct list_head poll_pending_link;
 127	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
 128	poll_table pt;
 129	struct work_struct rq;
 130	struct work_struct wq;
 131	unsigned long wsched;
 132};
 133
 134/**
 135 * struct p9_trans_fd - transport state
 136 * @rd: reference to file to read from
 137 * @wr: reference of file to write to
 138 * @conn: connection state reference
 139 *
 140 */
 141
 142struct p9_trans_fd {
 143	struct file *rd;
 144	struct file *wr;
 145	struct p9_conn conn;
 146};
 147
 148static void p9_poll_workfn(struct work_struct *work);
 149
 150static DEFINE_SPINLOCK(p9_poll_lock);
 151static LIST_HEAD(p9_poll_pending_list);
 152static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
 153
 154static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
 155static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
 156
 157static void p9_mux_poll_stop(struct p9_conn *m)
 158{
 159	unsigned long flags;
 160	int i;
 161
 162	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 163		struct p9_poll_wait *pwait = &m->poll_wait[i];
 164
 165		if (pwait->wait_addr) {
 166			remove_wait_queue(pwait->wait_addr, &pwait->wait);
 167			pwait->wait_addr = NULL;
 168		}
 169	}
 170
 171	spin_lock_irqsave(&p9_poll_lock, flags);
 172	list_del_init(&m->poll_pending_link);
 173	spin_unlock_irqrestore(&p9_poll_lock, flags);
 174
 175	flush_work(&p9_poll_work);
 176}
 177
 178/**
 179 * p9_conn_cancel - cancel all pending requests with error
 180 * @m: mux data
 181 * @err: error code
 182 *
 183 */
 184
 185static void p9_conn_cancel(struct p9_conn *m, int err)
 186{
 187	struct p9_req_t *req, *rtmp;
 
 188	LIST_HEAD(cancel_list);
 189
 190	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 191
 192	spin_lock(&m->req_lock);
 193
 194	if (m->err) {
 195		spin_unlock(&m->req_lock);
 196		return;
 197	}
 198
 199	m->err = err;
 200
 201	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
 
 
 
 202		list_move(&req->req_list, &cancel_list);
 203		WRITE_ONCE(req->status, REQ_STATUS_ERROR);
 204	}
 205	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 
 
 
 206		list_move(&req->req_list, &cancel_list);
 207		WRITE_ONCE(req->status, REQ_STATUS_ERROR);
 208	}
 209
 210	spin_unlock(&m->req_lock);
 211
 212	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 213		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
 214		list_del(&req->req_list);
 215		if (!req->t_err)
 216			req->t_err = err;
 217		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 218	}
 219}
 220
 221static __poll_t
 222p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
 223{
 224	__poll_t ret;
 225	struct p9_trans_fd *ts = NULL;
 226
 227	if (client && client->status == Connected)
 228		ts = client->trans;
 229
 230	if (!ts) {
 231		if (err)
 232			*err = -EREMOTEIO;
 233		return EPOLLERR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 234	}
 235
 236	ret = vfs_poll(ts->rd, pt);
 237	if (ts->rd != ts->wr)
 238		ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN);
 239	return ret;
 240}
 241
 242/**
 243 * p9_fd_read- read from a fd
 244 * @client: client instance
 245 * @v: buffer to receive data into
 246 * @len: size of receive buffer
 247 *
 248 */
 249
 250static int p9_fd_read(struct p9_client *client, void *v, int len)
 251{
 252	int ret;
 253	struct p9_trans_fd *ts = NULL;
 254	loff_t pos;
 255
 256	if (client && client->status != Disconnected)
 257		ts = client->trans;
 258
 259	if (!ts)
 260		return -EREMOTEIO;
 261
 262	if (!(ts->rd->f_flags & O_NONBLOCK))
 263		p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
 264
 265	pos = ts->rd->f_pos;
 266	ret = kernel_read(ts->rd, v, len, &pos);
 267	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 268		client->status = Disconnected;
 269	return ret;
 270}
 271
 272/**
 273 * p9_read_work - called when there is some data to be read from a transport
 274 * @work: container of work to be done
 275 *
 276 */
 277
 278static void p9_read_work(struct work_struct *work)
 279{
 280	__poll_t n;
 281	int err;
 282	struct p9_conn *m;
 283
 284	m = container_of(work, struct p9_conn, rq);
 285
 286	if (m->err < 0)
 287		return;
 288
 289	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
 290
 291	if (!m->rc.sdata) {
 292		m->rc.sdata = m->tmp_buf;
 293		m->rc.offset = 0;
 294		m->rc.capacity = P9_HDRSZ; /* start by reading header */
 295	}
 296
 297	clear_bit(Rpending, &m->wsched);
 298	p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
 299		 m, m->rc.offset, m->rc.capacity,
 300		 m->rc.capacity - m->rc.offset);
 301	err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
 302			 m->rc.capacity - m->rc.offset);
 303	p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
 304	if (err == -EAGAIN)
 305		goto end_clear;
 
 306
 307	if (err <= 0)
 308		goto error;
 309
 310	m->rc.offset += err;
 311
 312	/* header read in */
 313	if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) {
 314		p9_debug(P9_DEBUG_TRANS, "got new header\n");
 315
 316		/* Header size */
 317		m->rc.size = P9_HDRSZ;
 318		err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
 319		if (err) {
 320			p9_debug(P9_DEBUG_ERROR,
 321				 "error parsing header: %d\n", err);
 322			goto error;
 323		}
 324
 325		p9_debug(P9_DEBUG_TRANS,
 326			 "mux %p pkt: size: %d bytes tag: %d\n",
 327			 m, m->rc.size, m->rc.tag);
 328
 329		m->rreq = p9_tag_lookup(m->client, m->rc.tag);
 330		if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) {
 331			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
 332				 m->rc.tag);
 333			err = -EIO;
 334			goto error;
 335		}
 336
 337		if (m->rc.size > m->rreq->rc.capacity) {
 338			p9_debug(P9_DEBUG_ERROR,
 339				 "requested packet size too big: %d for tag %d with capacity %zd\n",
 340				 m->rc.size, m->rc.tag, m->rreq->rc.capacity);
 
 
 
 
 
 341			err = -EIO;
 342			goto error;
 343		}
 344
 345		if (!m->rreq->rc.sdata) {
 346			p9_debug(P9_DEBUG_ERROR,
 347				 "No recv fcall for tag %d (req %p), disconnecting!\n",
 348				 m->rc.tag, m->rreq);
 349			p9_req_put(m->client, m->rreq);
 350			m->rreq = NULL;
 351			err = -EIO;
 352			goto error;
 353		}
 354		m->rc.sdata = m->rreq->rc.sdata;
 355		memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
 356		m->rc.capacity = m->rc.size;
 357	}
 358
 359	/* packet is read in
 360	 * not an else because some packets (like clunk) have no payload
 361	 */
 362	if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
 363		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
 364		m->rreq->rc.size = m->rc.offset;
 365		spin_lock(&m->req_lock);
 366		if (m->rreq->status == REQ_STATUS_SENT) {
 367			list_del(&m->rreq->req_list);
 368			p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
 369		} else if (m->rreq->status == REQ_STATUS_FLSHD) {
 370			/* Ignore replies associated with a cancelled request. */
 371			p9_debug(P9_DEBUG_TRANS,
 372				 "Ignore replies associated with a cancelled request\n");
 373		} else {
 374			spin_unlock(&m->req_lock);
 375			p9_debug(P9_DEBUG_ERROR,
 376				 "Request tag %d errored out while we were reading the reply\n",
 377				 m->rc.tag);
 378			err = -EIO;
 379			goto error;
 380		}
 381		spin_unlock(&m->req_lock);
 382		m->rc.sdata = NULL;
 383		m->rc.offset = 0;
 384		m->rc.capacity = 0;
 385		p9_req_put(m->client, m->rreq);
 386		m->rreq = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 387	}
 388
 389end_clear:
 390	clear_bit(Rworksched, &m->wsched);
 391
 392	if (!list_empty(&m->req_list)) {
 393		if (test_and_clear_bit(Rpending, &m->wsched))
 394			n = EPOLLIN;
 395		else
 396			n = p9_fd_poll(m->client, NULL, NULL);
 397
 398		if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
 399			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 400			schedule_work(&m->rq);
 401		}
 402	}
 
 
 403
 404	return;
 405error:
 406	p9_conn_cancel(m, err);
 407	clear_bit(Rworksched, &m->wsched);
 408}
 409
 410/**
 411 * p9_fd_write - write to a socket
 412 * @client: client instance
 413 * @v: buffer to send data from
 414 * @len: size of send buffer
 415 *
 416 */
 417
 418static int p9_fd_write(struct p9_client *client, void *v, int len)
 419{
 420	ssize_t ret;
 
 421	struct p9_trans_fd *ts = NULL;
 422
 423	if (client && client->status != Disconnected)
 424		ts = client->trans;
 425
 426	if (!ts)
 427		return -EREMOTEIO;
 428
 429	if (!(ts->wr->f_flags & O_NONBLOCK))
 430		p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
 
 
 
 
 
 
 431
 432	ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos);
 433	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 434		client->status = Disconnected;
 435	return ret;
 436}
 437
 438/**
 439 * p9_write_work - called when a transport can send some data
 440 * @work: container for work to be done
 441 *
 442 */
 443
 444static void p9_write_work(struct work_struct *work)
 445{
 446	__poll_t n;
 447	int err;
 448	struct p9_conn *m;
 449	struct p9_req_t *req;
 450
 451	m = container_of(work, struct p9_conn, wq);
 452
 453	if (m->err < 0) {
 454		clear_bit(Wworksched, &m->wsched);
 455		return;
 456	}
 457
 458	if (!m->wsize) {
 459		spin_lock(&m->req_lock);
 460		if (list_empty(&m->unsent_req_list)) {
 461			clear_bit(Wworksched, &m->wsched);
 462			spin_unlock(&m->req_lock);
 463			return;
 464		}
 465
 
 466		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
 467			       req_list);
 468		WRITE_ONCE(req->status, REQ_STATUS_SENT);
 469		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
 470		list_move_tail(&req->req_list, &m->req_list);
 471
 472		m->wbuf = req->tc.sdata;
 473		m->wsize = req->tc.size;
 474		m->wpos = 0;
 475		p9_req_get(req);
 476		m->wreq = req;
 477		spin_unlock(&m->req_lock);
 478	}
 479
 480	p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
 481		 m, m->wpos, m->wsize);
 482	clear_bit(Wpending, &m->wsched);
 483	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
 484	p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
 485	if (err == -EAGAIN)
 486		goto end_clear;
 487
 
 488
 489	if (err < 0)
 490		goto error;
 491	else if (err == 0) {
 492		err = -EREMOTEIO;
 493		goto error;
 494	}
 495
 496	m->wpos += err;
 497	if (m->wpos == m->wsize) {
 498		m->wpos = m->wsize = 0;
 499		p9_req_put(m->client, m->wreq);
 500		m->wreq = NULL;
 501	}
 502
 503end_clear:
 504	clear_bit(Wworksched, &m->wsched);
 505
 506	if (m->wsize || !list_empty(&m->unsent_req_list)) {
 507		if (test_and_clear_bit(Wpending, &m->wsched))
 508			n = EPOLLOUT;
 509		else
 510			n = p9_fd_poll(m->client, NULL, NULL);
 511
 512		if ((n & EPOLLOUT) &&
 513		   !test_and_set_bit(Wworksched, &m->wsched)) {
 514			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 515			schedule_work(&m->wq);
 516		}
 517	}
 
 
 518
 519	return;
 520
 521error:
 522	p9_conn_cancel(m, err);
 523	clear_bit(Wworksched, &m->wsched);
 524}
 525
 526static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 527{
 528	struct p9_poll_wait *pwait =
 529		container_of(wait, struct p9_poll_wait, wait);
 530	struct p9_conn *m = pwait->conn;
 531	unsigned long flags;
 532
 533	spin_lock_irqsave(&p9_poll_lock, flags);
 534	if (list_empty(&m->poll_pending_link))
 535		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
 536	spin_unlock_irqrestore(&p9_poll_lock, flags);
 537
 538	schedule_work(&p9_poll_work);
 539	return 1;
 540}
 541
 542/**
 543 * p9_pollwait - add poll task to the wait queue
 544 * @filp: file pointer being polled
 545 * @wait_address: wait_q to block on
 546 * @p: poll state
 547 *
 548 * called by files poll operation to add v9fs-poll task to files wait queue
 549 */
 550
 551static void
 552p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 553{
 554	struct p9_conn *m = container_of(p, struct p9_conn, pt);
 555	struct p9_poll_wait *pwait = NULL;
 556	int i;
 557
 558	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 559		if (m->poll_wait[i].wait_addr == NULL) {
 560			pwait = &m->poll_wait[i];
 561			break;
 562		}
 563	}
 564
 565	if (!pwait) {
 566		p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
 567		return;
 568	}
 569
 570	pwait->conn = m;
 571	pwait->wait_addr = wait_address;
 572	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
 573	add_wait_queue(wait_address, &pwait->wait);
 574}
 575
 576/**
 577 * p9_conn_create - initialize the per-session mux data
 578 * @client: client instance
 579 *
 580 * Note: Creates the polling task if this is the first session.
 581 */
 582
 583static void p9_conn_create(struct p9_client *client)
 584{
 585	__poll_t n;
 586	struct p9_trans_fd *ts = client->trans;
 587	struct p9_conn *m = &ts->conn;
 588
 589	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
 
 
 
 
 590
 591	INIT_LIST_HEAD(&m->mux_list);
 592	m->client = client;
 593
 594	spin_lock_init(&m->req_lock);
 595	INIT_LIST_HEAD(&m->req_list);
 596	INIT_LIST_HEAD(&m->unsent_req_list);
 597	INIT_WORK(&m->rq, p9_read_work);
 598	INIT_WORK(&m->wq, p9_write_work);
 599	INIT_LIST_HEAD(&m->poll_pending_link);
 600	init_poll_funcptr(&m->pt, p9_pollwait);
 601
 602	n = p9_fd_poll(client, &m->pt, NULL);
 603	if (n & EPOLLIN) {
 604		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 605		set_bit(Rpending, &m->wsched);
 606	}
 607
 608	if (n & EPOLLOUT) {
 609		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 610		set_bit(Wpending, &m->wsched);
 611	}
 
 
 612}
 613
 614/**
 615 * p9_poll_mux - polls a mux and schedules read or write works if necessary
 616 * @m: connection to poll
 617 *
 618 */
 619
 620static void p9_poll_mux(struct p9_conn *m)
 621{
 622	__poll_t n;
 623	int err = -ECONNRESET;
 624
 625	if (m->err < 0)
 626		return;
 627
 628	n = p9_fd_poll(m->client, NULL, &err);
 629	if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
 630		p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
 631		p9_conn_cancel(m, err);
 
 
 632	}
 633
 634	if (n & EPOLLIN) {
 635		set_bit(Rpending, &m->wsched);
 636		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 637		if (!test_and_set_bit(Rworksched, &m->wsched)) {
 638			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 639			schedule_work(&m->rq);
 640		}
 641	}
 642
 643	if (n & EPOLLOUT) {
 644		set_bit(Wpending, &m->wsched);
 645		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 646		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
 647		    !test_and_set_bit(Wworksched, &m->wsched)) {
 648			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 649			schedule_work(&m->wq);
 650		}
 651	}
 652}
 653
 654/**
 655 * p9_fd_request - send 9P request
 656 * The function can sleep until the request is scheduled for sending.
 657 * The function can be interrupted. Return from the function is not
 658 * a guarantee that the request is sent successfully.
 659 *
 660 * @client: client instance
 661 * @req: request to be sent
 662 *
 663 */
 664
 665static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 666{
 667	__poll_t n;
 668	struct p9_trans_fd *ts = client->trans;
 669	struct p9_conn *m = &ts->conn;
 670
 671	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
 672		 m, current, &req->tc, req->tc.id);
 673
 674	spin_lock(&m->req_lock);
 675
 676	if (m->err < 0) {
 677		spin_unlock(&m->req_lock);
 678		return m->err;
 679	}
 680
 681	WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
 
 682	list_add_tail(&req->req_list, &m->unsent_req_list);
 683	spin_unlock(&m->req_lock);
 684
 685	if (test_and_clear_bit(Wpending, &m->wsched))
 686		n = EPOLLOUT;
 687	else
 688		n = p9_fd_poll(m->client, NULL, NULL);
 689
 690	if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
 691		schedule_work(&m->wq);
 692
 693	return 0;
 694}
 695
 696static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 697{
 698	struct p9_trans_fd *ts = client->trans;
 699	struct p9_conn *m = &ts->conn;
 700	int ret = 1;
 701
 702	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 703
 704	spin_lock(&m->req_lock);
 705
 706	if (req->status == REQ_STATUS_UNSENT) {
 707		list_del(&req->req_list);
 708		WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
 709		p9_req_put(client, req);
 710		ret = 0;
 711	}
 712	spin_unlock(&m->req_lock);
 
 
 713
 714	return ret;
 715}
 716
 717static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
 718{
 719	struct p9_trans_fd *ts = client->trans;
 720	struct p9_conn *m = &ts->conn;
 721
 722	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 723
 724	spin_lock(&m->req_lock);
 725	/* Ignore cancelled request if message has been received
 726	 * before lock.
 727	 */
 728	if (req->status == REQ_STATUS_RCVD) {
 729		spin_unlock(&m->req_lock);
 730		return 0;
 731	}
 732
 733	/* we haven't received a response for oldreq,
 734	 * remove it from the list.
 735	 */
 736	list_del(&req->req_list);
 737	WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
 738	spin_unlock(&m->req_lock);
 739
 740	p9_req_put(client, req);
 741
 742	return 0;
 743}
 744
 745static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
 746{
 747	if (clnt->trans_mod == &p9_tcp_trans) {
 748		if (clnt->trans_opts.tcp.port != P9_PORT)
 749			seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
 750	} else if (clnt->trans_mod == &p9_fd_trans) {
 751		if (clnt->trans_opts.fd.rfd != ~0)
 752			seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
 753		if (clnt->trans_opts.fd.wfd != ~0)
 754			seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
 755	}
 756	return 0;
 757}
 758
 759/**
 760 * parse_opts - parse mount options into p9_fd_opts structure
 761 * @params: options string passed from mount
 762 * @opts: fd transport-specific structure to parse options into
 763 *
 764 * Returns 0 upon success, -ERRNO upon failure
 765 */
 766
 767static int parse_opts(char *params, struct p9_fd_opts *opts)
 768{
 769	char *p;
 770	substring_t args[MAX_OPT_ARGS];
 771	int option;
 772	char *options, *tmp_options;
 773
 774	opts->port = P9_PORT;
 775	opts->rfd = ~0;
 776	opts->wfd = ~0;
 777	opts->privport = false;
 778
 779	if (!params)
 780		return 0;
 781
 782	tmp_options = kstrdup(params, GFP_KERNEL);
 783	if (!tmp_options) {
 784		p9_debug(P9_DEBUG_ERROR,
 785			 "failed to allocate copy of option string\n");
 786		return -ENOMEM;
 787	}
 788	options = tmp_options;
 789
 790	while ((p = strsep(&options, ",")) != NULL) {
 791		int token;
 792		int r;
 793		if (!*p)
 794			continue;
 795		token = match_token(p, tokens, args);
 796		if ((token != Opt_err) && (token != Opt_privport)) {
 797			r = match_int(&args[0], &option);
 798			if (r < 0) {
 799				p9_debug(P9_DEBUG_ERROR,
 800					 "integer field, but no integer?\n");
 801				continue;
 802			}
 803		}
 804		switch (token) {
 805		case Opt_port:
 806			opts->port = option;
 807			break;
 808		case Opt_rfdno:
 809			opts->rfd = option;
 810			break;
 811		case Opt_wfdno:
 812			opts->wfd = option;
 813			break;
 814		case Opt_privport:
 815			opts->privport = true;
 816			break;
 817		default:
 818			continue;
 819		}
 820	}
 821
 822	kfree(tmp_options);
 823	return 0;
 824}
 825
 826static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 827{
 828	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
 829					   GFP_KERNEL);
 830	if (!ts)
 831		return -ENOMEM;
 832
 833	ts->rd = fget(rfd);
 834	if (!ts->rd)
 835		goto out_free_ts;
 836	if (!(ts->rd->f_mode & FMODE_READ))
 837		goto out_put_rd;
 838	/* Prevent workers from hanging on IO when fd is a pipe.
 839	 * It's technically possible for userspace or concurrent mounts to
 840	 * modify this flag concurrently, which will likely result in a
 841	 * broken filesystem. However, just having bad flags here should
 842	 * not crash the kernel or cause any other sort of bug, so mark this
 843	 * particular data race as intentional so that tooling (like KCSAN)
 844	 * can allow it and detect further problems.
 845	 */
 846	data_race(ts->rd->f_flags |= O_NONBLOCK);
 847	ts->wr = fget(wfd);
 848	if (!ts->wr)
 849		goto out_put_rd;
 850	if (!(ts->wr->f_mode & FMODE_WRITE))
 851		goto out_put_wr;
 852	data_race(ts->wr->f_flags |= O_NONBLOCK);
 
 
 
 853
 854	client->trans = ts;
 855	client->status = Connected;
 856
 857	return 0;
 858
 859out_put_wr:
 860	fput(ts->wr);
 861out_put_rd:
 862	fput(ts->rd);
 863out_free_ts:
 864	kfree(ts);
 865	return -EIO;
 866}
 867
 868static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 869{
 870	struct p9_trans_fd *p;
 871	struct file *file;
 872
 873	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
 874	if (!p) {
 875		sock_release(csocket);
 876		return -ENOMEM;
 877	}
 878
 879	csocket->sk->sk_allocation = GFP_NOIO;
 880	csocket->sk->sk_use_task_frag = false;
 881	file = sock_alloc_file(csocket, 0, NULL);
 882	if (IS_ERR(file)) {
 883		pr_err("%s (%d): failed to map fd\n",
 884		       __func__, task_pid_nr(current));
 885		kfree(p);
 886		return PTR_ERR(file);
 887	}
 888
 889	get_file(file);
 890	p->wr = p->rd = file;
 
 891	client->trans = p;
 892	client->status = Connected;
 893
 
 
 894	p->rd->f_flags |= O_NONBLOCK;
 895
 896	p9_conn_create(client);
 
 
 
 
 
 
 
 
 897	return 0;
 898}
 899
 900/**
 901 * p9_conn_destroy - cancels all pending requests of mux
 902 * @m: mux to destroy
 903 *
 904 */
 905
 906static void p9_conn_destroy(struct p9_conn *m)
 907{
 908	p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
 909		 m, m->mux_list.prev, m->mux_list.next);
 910
 911	p9_mux_poll_stop(m);
 912	cancel_work_sync(&m->rq);
 913	if (m->rreq) {
 914		p9_req_put(m->client, m->rreq);
 915		m->rreq = NULL;
 916	}
 917	cancel_work_sync(&m->wq);
 918	if (m->wreq) {
 919		p9_req_put(m->client, m->wreq);
 920		m->wreq = NULL;
 921	}
 922
 923	p9_conn_cancel(m, -ECONNRESET);
 924
 925	m->client = NULL;
 
 926}
 927
 928/**
 929 * p9_fd_close - shutdown file descriptor transport
 930 * @client: client instance
 931 *
 932 */
 933
 934static void p9_fd_close(struct p9_client *client)
 935{
 936	struct p9_trans_fd *ts;
 937
 938	if (!client)
 939		return;
 940
 941	ts = client->trans;
 942	if (!ts)
 943		return;
 944
 945	client->status = Disconnected;
 946
 947	p9_conn_destroy(&ts->conn);
 948
 949	if (ts->rd)
 950		fput(ts->rd);
 951	if (ts->wr)
 952		fput(ts->wr);
 953
 954	kfree(ts);
 955}
 956
 957/*
 958 * stolen from NFS - maybe should be made a generic function?
 959 */
 960static inline int valid_ipaddr4(const char *buf)
 961{
 962	int rc, count, in[4];
 963
 964	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
 965	if (rc != 4)
 966		return -EINVAL;
 967	for (count = 0; count < 4; count++) {
 968		if (in[count] > 255)
 969			return -EINVAL;
 970	}
 971	return 0;
 972}
 973
 974static int p9_bind_privport(struct socket *sock)
 975{
 976	struct sockaddr_in cl;
 977	int port, err = -EINVAL;
 978
 979	memset(&cl, 0, sizeof(cl));
 980	cl.sin_family = AF_INET;
 981	cl.sin_addr.s_addr = htonl(INADDR_ANY);
 982	for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
 983		cl.sin_port = htons((ushort)port);
 984		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
 985		if (err != -EADDRINUSE)
 986			break;
 987	}
 988	return err;
 989}
 990
 991
 992static int
 993p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
 994{
 995	int err;
 996	struct socket *csocket;
 997	struct sockaddr_in sin_server;
 998	struct p9_fd_opts opts;
 999
1000	err = parse_opts(args, &opts);
1001	if (err < 0)
1002		return err;
1003
1004	if (addr == NULL || valid_ipaddr4(addr) < 0)
1005		return -EINVAL;
1006
1007	csocket = NULL;
1008
1009	client->trans_opts.tcp.port = opts.port;
1010	client->trans_opts.tcp.privport = opts.privport;
1011	sin_server.sin_family = AF_INET;
1012	sin_server.sin_addr.s_addr = in_aton(addr);
1013	sin_server.sin_port = htons(opts.port);
1014	err = __sock_create(current->nsproxy->net_ns, PF_INET,
1015			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
1016	if (err) {
1017		pr_err("%s (%d): problem creating socket\n",
1018		       __func__, task_pid_nr(current));
1019		return err;
1020	}
1021
1022	if (opts.privport) {
1023		err = p9_bind_privport(csocket);
1024		if (err < 0) {
1025			pr_err("%s (%d): problem binding to privport\n",
1026			       __func__, task_pid_nr(current));
1027			sock_release(csocket);
1028			return err;
1029		}
1030	}
1031
1032	err = READ_ONCE(csocket->ops)->connect(csocket,
1033				    (struct sockaddr *)&sin_server,
1034				    sizeof(struct sockaddr_in), 0);
1035	if (err < 0) {
1036		pr_err("%s (%d): problem connecting socket to %s\n",
1037		       __func__, task_pid_nr(current), addr);
 
1038		sock_release(csocket);
1039		return err;
1040	}
1041
1042	return p9_socket_open(client, csocket);
1043}
1044
1045static int
1046p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1047{
1048	int err;
1049	struct socket *csocket;
1050	struct sockaddr_un sun_server;
1051
1052	csocket = NULL;
1053
1054	if (!addr || !strlen(addr))
1055		return -EINVAL;
1056
1057	if (strlen(addr) >= UNIX_PATH_MAX) {
1058		pr_err("%s (%d): address too long: %s\n",
1059		       __func__, task_pid_nr(current), addr);
1060		return -ENAMETOOLONG;
1061	}
1062
1063	sun_server.sun_family = PF_UNIX;
1064	strcpy(sun_server.sun_path, addr);
1065	err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
1066			    SOCK_STREAM, 0, &csocket, 1);
1067	if (err < 0) {
1068		pr_err("%s (%d): problem creating socket\n",
1069		       __func__, task_pid_nr(current));
1070
1071		return err;
1072	}
1073	err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&sun_server,
1074			sizeof(struct sockaddr_un) - 1, 0);
1075	if (err < 0) {
1076		pr_err("%s (%d): problem connecting socket: %s: %d\n",
1077		       __func__, task_pid_nr(current), addr, err);
 
1078		sock_release(csocket);
1079		return err;
1080	}
1081
1082	return p9_socket_open(client, csocket);
1083}
1084
1085static int
1086p9_fd_create(struct p9_client *client, const char *addr, char *args)
1087{
1088	int err;
1089	struct p9_fd_opts opts;
 
1090
1091	err = parse_opts(args, &opts);
1092	if (err < 0)
1093		return err;
1094	client->trans_opts.fd.rfd = opts.rfd;
1095	client->trans_opts.fd.wfd = opts.wfd;
1096
1097	if (opts.rfd == ~0 || opts.wfd == ~0) {
1098		pr_err("Insufficient options for proto=fd\n");
1099		return -ENOPROTOOPT;
1100	}
1101
1102	err = p9_fd_open(client, opts.rfd, opts.wfd);
1103	if (err < 0)
1104		return err;
1105
1106	p9_conn_create(client);
 
 
 
 
 
 
 
 
1107
1108	return 0;
1109}
1110
1111static struct p9_trans_module p9_tcp_trans = {
1112	.name = "tcp",
1113	.maxsize = MAX_SOCK_BUF,
1114	.pooled_rbuffers = false,
1115	.def = 0,
1116	.create = p9_fd_create_tcp,
1117	.close = p9_fd_close,
1118	.request = p9_fd_request,
1119	.cancel = p9_fd_cancel,
1120	.cancelled = p9_fd_cancelled,
1121	.show_options = p9_fd_show_options,
1122	.owner = THIS_MODULE,
1123};
1124MODULE_ALIAS_9P("tcp");
1125
1126static struct p9_trans_module p9_unix_trans = {
1127	.name = "unix",
1128	.maxsize = MAX_SOCK_BUF,
1129	.def = 0,
1130	.create = p9_fd_create_unix,
1131	.close = p9_fd_close,
1132	.request = p9_fd_request,
1133	.cancel = p9_fd_cancel,
1134	.cancelled = p9_fd_cancelled,
1135	.show_options = p9_fd_show_options,
1136	.owner = THIS_MODULE,
1137};
1138MODULE_ALIAS_9P("unix");
1139
1140static struct p9_trans_module p9_fd_trans = {
1141	.name = "fd",
1142	.maxsize = MAX_SOCK_BUF,
1143	.def = 0,
1144	.create = p9_fd_create,
1145	.close = p9_fd_close,
1146	.request = p9_fd_request,
1147	.cancel = p9_fd_cancel,
1148	.cancelled = p9_fd_cancelled,
1149	.show_options = p9_fd_show_options,
1150	.owner = THIS_MODULE,
1151};
1152MODULE_ALIAS_9P("fd");
1153
1154/**
1155 * p9_poll_workfn - poll worker thread
1156 * @work: work queue
1157 *
1158 * polls all v9fs transports for new events and queues the appropriate
1159 * work to the work queue
1160 *
1161 */
1162
1163static void p9_poll_workfn(struct work_struct *work)
1164{
1165	unsigned long flags;
1166
1167	p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1168
1169	spin_lock_irqsave(&p9_poll_lock, flags);
1170	while (!list_empty(&p9_poll_pending_list)) {
1171		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1172							struct p9_conn,
1173							poll_pending_link);
1174		list_del_init(&conn->poll_pending_link);
1175		spin_unlock_irqrestore(&p9_poll_lock, flags);
1176
1177		p9_poll_mux(conn);
1178
1179		spin_lock_irqsave(&p9_poll_lock, flags);
1180	}
1181	spin_unlock_irqrestore(&p9_poll_lock, flags);
1182
1183	p9_debug(P9_DEBUG_TRANS, "finish\n");
1184}
1185
1186static int __init p9_trans_fd_init(void)
1187{
1188	v9fs_register_trans(&p9_tcp_trans);
1189	v9fs_register_trans(&p9_unix_trans);
1190	v9fs_register_trans(&p9_fd_trans);
1191
1192	return 0;
1193}
1194
1195static void __exit p9_trans_fd_exit(void)
1196{
1197	flush_work(&p9_poll_work);
1198	v9fs_unregister_trans(&p9_tcp_trans);
1199	v9fs_unregister_trans(&p9_unix_trans);
1200	v9fs_unregister_trans(&p9_fd_trans);
1201}
1202
1203module_init(p9_trans_fd_init);
1204module_exit(p9_trans_fd_exit);
1205
1206MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
1207MODULE_DESCRIPTION("Filedescriptor Transport for 9P");
1208MODULE_LICENSE("GPL");