Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/fs/9p/trans_fd.c
   4 *
   5 * Fd transport layer.  Includes deprecated socket layer.
   6 *
   7 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
   8 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
   9 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
  10 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/in.h>
  16#include <linux/module.h>
  17#include <linux/net.h>
  18#include <linux/ipv6.h>
  19#include <linux/kthread.h>
  20#include <linux/errno.h>
  21#include <linux/kernel.h>
  22#include <linux/un.h>
  23#include <linux/uaccess.h>
  24#include <linux/inet.h>
  25#include <linux/idr.h>
  26#include <linux/file.h>
  27#include <linux/parser.h>
  28#include <linux/slab.h>
  29#include <linux/seq_file.h>
  30#include <net/9p/9p.h>
  31#include <net/9p/client.h>
  32#include <net/9p/transport.h>
  33
  34#include <linux/syscalls.h> /* killme */
  35
  36#define P9_PORT 564
  37#define MAX_SOCK_BUF (64*1024)
  38#define MAXPOLLWADDR	2
  39
  40static struct p9_trans_module p9_tcp_trans;
  41static struct p9_trans_module p9_fd_trans;
  42
  43/**
  44 * struct p9_fd_opts - per-transport options
  45 * @rfd: file descriptor for reading (trans=fd)
  46 * @wfd: file descriptor for writing (trans=fd)
  47 * @port: port to connect to (trans=tcp)
  48 * @privport: port is privileged
  49 */
  50
  51struct p9_fd_opts {
  52	int rfd;
  53	int wfd;
  54	u16 port;
  55	bool privport;
  56};
  57
  58/*
  59  * Option Parsing (code inspired by NFS code)
  60  *  - a little lazy - parse all fd-transport options
  61  */
  62
  63enum {
  64	/* Options that take integer arguments */
  65	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
  66	/* Options that take no arguments */
  67	Opt_privport,
  68};
  69
  70static const match_table_t tokens = {
  71	{Opt_port, "port=%u"},
  72	{Opt_rfdno, "rfdno=%u"},
  73	{Opt_wfdno, "wfdno=%u"},
  74	{Opt_privport, "privport"},
  75	{Opt_err, NULL},
  76};
  77
  78enum {
  79	Rworksched = 1,		/* read work scheduled or running */
  80	Rpending = 2,		/* can read */
  81	Wworksched = 4,		/* write work scheduled or running */
  82	Wpending = 8,		/* can write */
  83};
  84
  85struct p9_poll_wait {
  86	struct p9_conn *conn;
  87	wait_queue_entry_t wait;
  88	wait_queue_head_t *wait_addr;
  89};
  90
  91/**
  92 * struct p9_conn - fd mux connection state information
  93 * @mux_list: list link for mux to manage multiple connections (?)
  94 * @client: reference to client instance for this connection
  95 * @err: error state
  96 * @req_list: accounting for requests which have been sent
  97 * @unsent_req_list: accounting for requests that haven't been sent
  98 * @rreq: read request
  99 * @wreq: write request
 100 * @req: current request being processed (if any)
 101 * @tmp_buf: temporary buffer to read in header
 102 * @rc: temporary fcall for reading current frame
 103 * @wpos: write position for current frame
 104 * @wsize: amount of data to write for current frame
 105 * @wbuf: current write buffer
 106 * @poll_pending_link: pending links to be polled per conn
 107 * @poll_wait: array of wait_q's for various worker threads
 108 * @pt: poll state
 109 * @rq: current read work
 110 * @wq: current write work
 111 * @wsched: ????
 112 *
 113 */
 114
 115struct p9_conn {
 116	struct list_head mux_list;
 117	struct p9_client *client;
 118	int err;
 119	struct list_head req_list;
 120	struct list_head unsent_req_list;
 121	struct p9_req_t *rreq;
 122	struct p9_req_t *wreq;
 123	char tmp_buf[7];
 124	struct p9_fcall rc;
 125	int wpos;
 126	int wsize;
 127	char *wbuf;
 128	struct list_head poll_pending_link;
 129	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
 130	poll_table pt;
 131	struct work_struct rq;
 132	struct work_struct wq;
 133	unsigned long wsched;
 134};
 135
 136/**
 137 * struct p9_trans_fd - transport state
 138 * @rd: reference to file to read from
 139 * @wr: reference of file to write to
 140 * @conn: connection state reference
 141 *
 142 */
 143
 144struct p9_trans_fd {
 145	struct file *rd;
 146	struct file *wr;
 147	struct p9_conn conn;
 148};
 149
 150static void p9_poll_workfn(struct work_struct *work);
 151
 152static DEFINE_SPINLOCK(p9_poll_lock);
 153static LIST_HEAD(p9_poll_pending_list);
 154static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
 155
 156static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
 157static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
 158
 159static void p9_mux_poll_stop(struct p9_conn *m)
 160{
 161	unsigned long flags;
 162	int i;
 163
 164	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 165		struct p9_poll_wait *pwait = &m->poll_wait[i];
 166
 167		if (pwait->wait_addr) {
 168			remove_wait_queue(pwait->wait_addr, &pwait->wait);
 169			pwait->wait_addr = NULL;
 170		}
 171	}
 172
 173	spin_lock_irqsave(&p9_poll_lock, flags);
 174	list_del_init(&m->poll_pending_link);
 175	spin_unlock_irqrestore(&p9_poll_lock, flags);
 176
 177	flush_work(&p9_poll_work);
 178}
 179
 180/**
 181 * p9_conn_cancel - cancel all pending requests with error
 182 * @m: mux data
 183 * @err: error code
 184 *
 185 */
 186
 187static void p9_conn_cancel(struct p9_conn *m, int err)
 188{
 189	struct p9_req_t *req, *rtmp;
 
 190	LIST_HEAD(cancel_list);
 191
 192	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 193
 194	spin_lock(&m->client->lock);
 195
 196	if (m->err) {
 197		spin_unlock(&m->client->lock);
 198		return;
 199	}
 200
 201	m->err = err;
 202
 203	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
 204		list_move(&req->req_list, &cancel_list);
 205	}
 206	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 207		list_move(&req->req_list, &cancel_list);
 208	}
 
 209
 210	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 211		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
 212		list_del(&req->req_list);
 213		if (!req->t_err)
 214			req->t_err = err;
 215		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 216	}
 217	spin_unlock(&m->client->lock);
 218}
 219
 220static __poll_t
 221p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
 222{
 223	__poll_t ret;
 224	struct p9_trans_fd *ts = NULL;
 225
 226	if (client && client->status == Connected)
 227		ts = client->trans;
 228
 229	if (!ts) {
 230		if (err)
 231			*err = -EREMOTEIO;
 232		return EPOLLERR;
 233	}
 234
 235	ret = vfs_poll(ts->rd, pt);
 236	if (ts->rd != ts->wr)
 237		ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN);
 
 
 
 
 
 
 
 
 
 
 238	return ret;
 239}
 240
 241/**
 242 * p9_fd_read- read from a fd
 243 * @client: client instance
 244 * @v: buffer to receive data into
 245 * @len: size of receive buffer
 246 *
 247 */
 248
 249static int p9_fd_read(struct p9_client *client, void *v, int len)
 250{
 251	int ret;
 252	struct p9_trans_fd *ts = NULL;
 253	loff_t pos;
 254
 255	if (client && client->status != Disconnected)
 256		ts = client->trans;
 257
 258	if (!ts)
 259		return -EREMOTEIO;
 260
 261	if (!(ts->rd->f_flags & O_NONBLOCK))
 262		p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
 263
 264	pos = ts->rd->f_pos;
 265	ret = kernel_read(ts->rd, v, len, &pos);
 266	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 267		client->status = Disconnected;
 268	return ret;
 269}
 270
 271/**
 272 * p9_read_work - called when there is some data to be read from a transport
 273 * @work: container of work to be done
 274 *
 275 */
 276
 277static void p9_read_work(struct work_struct *work)
 278{
 279	__poll_t n;
 280	int err;
 281	struct p9_conn *m;
 
 282
 283	m = container_of(work, struct p9_conn, rq);
 284
 285	if (m->err < 0)
 286		return;
 287
 288	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
 289
 290	if (!m->rc.sdata) {
 291		m->rc.sdata = m->tmp_buf;
 292		m->rc.offset = 0;
 293		m->rc.capacity = 7; /* start by reading header */
 294	}
 295
 296	clear_bit(Rpending, &m->wsched);
 297	p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
 298		 m, m->rc.offset, m->rc.capacity,
 299		 m->rc.capacity - m->rc.offset);
 300	err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
 301			 m->rc.capacity - m->rc.offset);
 302	p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
 303	if (err == -EAGAIN)
 304		goto end_clear;
 305
 306	if (err <= 0)
 307		goto error;
 308
 309	m->rc.offset += err;
 310
 311	/* header read in */
 312	if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) {
 313		p9_debug(P9_DEBUG_TRANS, "got new header\n");
 314
 315		/* Header size */
 316		m->rc.size = 7;
 317		err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
 318		if (err) {
 319			p9_debug(P9_DEBUG_ERROR,
 320				 "error parsing header: %d\n", err);
 321			goto error;
 322		}
 323
 324		if (m->rc.size >= m->client->msize) {
 325			p9_debug(P9_DEBUG_ERROR,
 326				 "requested packet size too big: %d\n",
 327				 m->rc.size);
 328			err = -EIO;
 329			goto error;
 330		}
 331
 332		p9_debug(P9_DEBUG_TRANS,
 333			 "mux %p pkt: size: %d bytes tag: %d\n",
 334			 m, m->rc.size, m->rc.tag);
 335
 336		m->rreq = p9_tag_lookup(m->client, m->rc.tag);
 337		if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) {
 338			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
 339				 m->rc.tag);
 340			err = -EIO;
 341			goto error;
 342		}
 343
 344		if (!m->rreq->rc.sdata) {
 345			p9_debug(P9_DEBUG_ERROR,
 346				 "No recv fcall for tag %d (req %p), disconnecting!\n",
 347				 m->rc.tag, m->rreq);
 348			m->rreq = NULL;
 349			err = -EIO;
 350			goto error;
 351		}
 352		m->rc.sdata = m->rreq->rc.sdata;
 353		memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
 354		m->rc.capacity = m->rc.size;
 355	}
 356
 357	/* packet is read in
 358	 * not an else because some packets (like clunk) have no payload
 359	 */
 360	if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
 361		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
 362		m->rreq->rc.size = m->rc.offset;
 363		spin_lock(&m->client->lock);
 364		if (m->rreq->status == REQ_STATUS_SENT) {
 365			list_del(&m->rreq->req_list);
 366			p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
 367		} else if (m->rreq->status == REQ_STATUS_FLSHD) {
 368			/* Ignore replies associated with a cancelled request. */
 369			p9_debug(P9_DEBUG_TRANS,
 370				 "Ignore replies associated with a cancelled request\n");
 371		} else {
 372			spin_unlock(&m->client->lock);
 373			p9_debug(P9_DEBUG_ERROR,
 374				 "Request tag %d errored out while we were reading the reply\n",
 375				 m->rc.tag);
 376			err = -EIO;
 377			goto error;
 378		}
 379		spin_unlock(&m->client->lock);
 
 380		m->rc.sdata = NULL;
 381		m->rc.offset = 0;
 382		m->rc.capacity = 0;
 383		p9_req_put(m->rreq);
 384		m->rreq = NULL;
 385	}
 386
 387end_clear:
 388	clear_bit(Rworksched, &m->wsched);
 389
 390	if (!list_empty(&m->req_list)) {
 391		if (test_and_clear_bit(Rpending, &m->wsched))
 392			n = EPOLLIN;
 393		else
 394			n = p9_fd_poll(m->client, NULL, NULL);
 395
 396		if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
 397			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 398			schedule_work(&m->rq);
 399		}
 400	}
 401
 402	return;
 403error:
 404	p9_conn_cancel(m, err);
 405	clear_bit(Rworksched, &m->wsched);
 406}
 407
 408/**
 409 * p9_fd_write - write to a socket
 410 * @client: client instance
 411 * @v: buffer to send data from
 412 * @len: size of send buffer
 413 *
 414 */
 415
 416static int p9_fd_write(struct p9_client *client, void *v, int len)
 417{
 418	ssize_t ret;
 419	struct p9_trans_fd *ts = NULL;
 420
 421	if (client && client->status != Disconnected)
 422		ts = client->trans;
 423
 424	if (!ts)
 425		return -EREMOTEIO;
 426
 427	if (!(ts->wr->f_flags & O_NONBLOCK))
 428		p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
 429
 430	ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos);
 431	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 432		client->status = Disconnected;
 433	return ret;
 434}
 435
 436/**
 437 * p9_write_work - called when a transport can send some data
 438 * @work: container for work to be done
 439 *
 440 */
 441
 442static void p9_write_work(struct work_struct *work)
 443{
 444	__poll_t n;
 445	int err;
 446	struct p9_conn *m;
 447	struct p9_req_t *req;
 448
 449	m = container_of(work, struct p9_conn, wq);
 450
 451	if (m->err < 0) {
 452		clear_bit(Wworksched, &m->wsched);
 453		return;
 454	}
 455
 456	if (!m->wsize) {
 457		spin_lock(&m->client->lock);
 458		if (list_empty(&m->unsent_req_list)) {
 459			clear_bit(Wworksched, &m->wsched);
 460			spin_unlock(&m->client->lock);
 461			return;
 462		}
 463
 464		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
 465			       req_list);
 466		req->status = REQ_STATUS_SENT;
 467		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
 468		list_move_tail(&req->req_list, &m->req_list);
 469
 470		m->wbuf = req->tc.sdata;
 471		m->wsize = req->tc.size;
 472		m->wpos = 0;
 473		p9_req_get(req);
 474		m->wreq = req;
 475		spin_unlock(&m->client->lock);
 476	}
 477
 478	p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
 479		 m, m->wpos, m->wsize);
 480	clear_bit(Wpending, &m->wsched);
 481	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
 482	p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
 483	if (err == -EAGAIN)
 484		goto end_clear;
 485
 486
 487	if (err < 0)
 488		goto error;
 489	else if (err == 0) {
 490		err = -EREMOTEIO;
 491		goto error;
 492	}
 493
 494	m->wpos += err;
 495	if (m->wpos == m->wsize) {
 496		m->wpos = m->wsize = 0;
 497		p9_req_put(m->wreq);
 498		m->wreq = NULL;
 499	}
 500
 501end_clear:
 502	clear_bit(Wworksched, &m->wsched);
 503
 504	if (m->wsize || !list_empty(&m->unsent_req_list)) {
 505		if (test_and_clear_bit(Wpending, &m->wsched))
 506			n = EPOLLOUT;
 507		else
 508			n = p9_fd_poll(m->client, NULL, NULL);
 509
 510		if ((n & EPOLLOUT) &&
 511		   !test_and_set_bit(Wworksched, &m->wsched)) {
 512			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 513			schedule_work(&m->wq);
 514		}
 515	}
 516
 517	return;
 518
 519error:
 520	p9_conn_cancel(m, err);
 521	clear_bit(Wworksched, &m->wsched);
 522}
 523
 524static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 525{
 526	struct p9_poll_wait *pwait =
 527		container_of(wait, struct p9_poll_wait, wait);
 528	struct p9_conn *m = pwait->conn;
 529	unsigned long flags;
 530
 531	spin_lock_irqsave(&p9_poll_lock, flags);
 532	if (list_empty(&m->poll_pending_link))
 533		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
 534	spin_unlock_irqrestore(&p9_poll_lock, flags);
 535
 536	schedule_work(&p9_poll_work);
 537	return 1;
 538}
 539
 540/**
 541 * p9_pollwait - add poll task to the wait queue
 542 * @filp: file pointer being polled
 543 * @wait_address: wait_q to block on
 544 * @p: poll state
 545 *
 546 * called by files poll operation to add v9fs-poll task to files wait queue
 547 */
 548
 549static void
 550p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 551{
 552	struct p9_conn *m = container_of(p, struct p9_conn, pt);
 553	struct p9_poll_wait *pwait = NULL;
 554	int i;
 555
 556	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 557		if (m->poll_wait[i].wait_addr == NULL) {
 558			pwait = &m->poll_wait[i];
 559			break;
 560		}
 561	}
 562
 563	if (!pwait) {
 564		p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
 565		return;
 566	}
 567
 568	pwait->conn = m;
 569	pwait->wait_addr = wait_address;
 570	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
 571	add_wait_queue(wait_address, &pwait->wait);
 572}
 573
 574/**
 575 * p9_conn_create - initialize the per-session mux data
 576 * @client: client instance
 577 *
 578 * Note: Creates the polling task if this is the first session.
 579 */
 580
 581static void p9_conn_create(struct p9_client *client)
 582{
 583	__poll_t n;
 584	struct p9_trans_fd *ts = client->trans;
 585	struct p9_conn *m = &ts->conn;
 586
 587	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
 588
 589	INIT_LIST_HEAD(&m->mux_list);
 590	m->client = client;
 591
 592	INIT_LIST_HEAD(&m->req_list);
 593	INIT_LIST_HEAD(&m->unsent_req_list);
 594	INIT_WORK(&m->rq, p9_read_work);
 595	INIT_WORK(&m->wq, p9_write_work);
 596	INIT_LIST_HEAD(&m->poll_pending_link);
 597	init_poll_funcptr(&m->pt, p9_pollwait);
 598
 599	n = p9_fd_poll(client, &m->pt, NULL);
 600	if (n & EPOLLIN) {
 601		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 602		set_bit(Rpending, &m->wsched);
 603	}
 604
 605	if (n & EPOLLOUT) {
 606		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 607		set_bit(Wpending, &m->wsched);
 608	}
 609}
 610
 611/**
 612 * p9_poll_mux - polls a mux and schedules read or write works if necessary
 613 * @m: connection to poll
 614 *
 615 */
 616
 617static void p9_poll_mux(struct p9_conn *m)
 618{
 619	__poll_t n;
 620	int err = -ECONNRESET;
 621
 622	if (m->err < 0)
 623		return;
 624
 625	n = p9_fd_poll(m->client, NULL, &err);
 626	if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
 627		p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
 628		p9_conn_cancel(m, err);
 629	}
 630
 631	if (n & EPOLLIN) {
 632		set_bit(Rpending, &m->wsched);
 633		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 634		if (!test_and_set_bit(Rworksched, &m->wsched)) {
 635			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 636			schedule_work(&m->rq);
 637		}
 638	}
 639
 640	if (n & EPOLLOUT) {
 641		set_bit(Wpending, &m->wsched);
 642		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 643		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
 644		    !test_and_set_bit(Wworksched, &m->wsched)) {
 645			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 646			schedule_work(&m->wq);
 647		}
 648	}
 649}
 650
 651/**
 652 * p9_fd_request - send 9P request
 653 * The function can sleep until the request is scheduled for sending.
 654 * The function can be interrupted. Return from the function is not
 655 * a guarantee that the request is sent successfully.
 656 *
 657 * @client: client instance
 658 * @req: request to be sent
 659 *
 660 */
 661
 662static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 663{
 664	__poll_t n;
 665	struct p9_trans_fd *ts = client->trans;
 666	struct p9_conn *m = &ts->conn;
 667
 668	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
 669		 m, current, &req->tc, req->tc.id);
 670	if (m->err < 0)
 671		return m->err;
 672
 673	spin_lock(&client->lock);
 674	req->status = REQ_STATUS_UNSENT;
 675	list_add_tail(&req->req_list, &m->unsent_req_list);
 676	spin_unlock(&client->lock);
 677
 678	if (test_and_clear_bit(Wpending, &m->wsched))
 679		n = EPOLLOUT;
 680	else
 681		n = p9_fd_poll(m->client, NULL, NULL);
 682
 683	if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
 684		schedule_work(&m->wq);
 685
 686	return 0;
 687}
 688
 689static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 690{
 691	int ret = 1;
 692
 693	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 694
 695	spin_lock(&client->lock);
 696
 697	if (req->status == REQ_STATUS_UNSENT) {
 698		list_del(&req->req_list);
 699		req->status = REQ_STATUS_FLSHD;
 700		p9_req_put(req);
 701		ret = 0;
 702	}
 703	spin_unlock(&client->lock);
 704
 705	return ret;
 706}
 707
 708static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
 709{
 710	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 711
 712	spin_lock(&client->lock);
 713	/* Ignore cancelled request if message has been received
 714	 * before lock.
 715	 */
 716	if (req->status == REQ_STATUS_RCVD) {
 717		spin_unlock(&client->lock);
 718		return 0;
 719	}
 720
 721	/* we haven't received a response for oldreq,
 722	 * remove it from the list.
 723	 */
 
 724	list_del(&req->req_list);
 725	req->status = REQ_STATUS_FLSHD;
 726	spin_unlock(&client->lock);
 727	p9_req_put(req);
 728
 729	return 0;
 730}
 731
 732static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
 733{
 734	if (clnt->trans_mod == &p9_tcp_trans) {
 735		if (clnt->trans_opts.tcp.port != P9_PORT)
 736			seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
 737	} else if (clnt->trans_mod == &p9_fd_trans) {
 738		if (clnt->trans_opts.fd.rfd != ~0)
 739			seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
 740		if (clnt->trans_opts.fd.wfd != ~0)
 741			seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
 742	}
 743	return 0;
 744}
 745
 746/**
 747 * parse_opts - parse mount options into p9_fd_opts structure
 748 * @params: options string passed from mount
 749 * @opts: fd transport-specific structure to parse options into
 750 *
 751 * Returns 0 upon success, -ERRNO upon failure
 752 */
 753
 754static int parse_opts(char *params, struct p9_fd_opts *opts)
 755{
 756	char *p;
 757	substring_t args[MAX_OPT_ARGS];
 758	int option;
 759	char *options, *tmp_options;
 760
 761	opts->port = P9_PORT;
 762	opts->rfd = ~0;
 763	opts->wfd = ~0;
 764	opts->privport = false;
 765
 766	if (!params)
 767		return 0;
 768
 769	tmp_options = kstrdup(params, GFP_KERNEL);
 770	if (!tmp_options) {
 771		p9_debug(P9_DEBUG_ERROR,
 772			 "failed to allocate copy of option string\n");
 773		return -ENOMEM;
 774	}
 775	options = tmp_options;
 776
 777	while ((p = strsep(&options, ",")) != NULL) {
 778		int token;
 779		int r;
 780		if (!*p)
 781			continue;
 782		token = match_token(p, tokens, args);
 783		if ((token != Opt_err) && (token != Opt_privport)) {
 784			r = match_int(&args[0], &option);
 785			if (r < 0) {
 786				p9_debug(P9_DEBUG_ERROR,
 787					 "integer field, but no integer?\n");
 788				continue;
 789			}
 790		}
 791		switch (token) {
 792		case Opt_port:
 793			opts->port = option;
 794			break;
 795		case Opt_rfdno:
 796			opts->rfd = option;
 797			break;
 798		case Opt_wfdno:
 799			opts->wfd = option;
 800			break;
 801		case Opt_privport:
 802			opts->privport = true;
 803			break;
 804		default:
 805			continue;
 806		}
 807	}
 808
 809	kfree(tmp_options);
 810	return 0;
 811}
 812
 813static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 814{
 815	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
 816					   GFP_KERNEL);
 817	if (!ts)
 818		return -ENOMEM;
 819
 820	ts->rd = fget(rfd);
 821	if (!ts->rd)
 822		goto out_free_ts;
 823	if (!(ts->rd->f_mode & FMODE_READ))
 824		goto out_put_rd;
 825	ts->wr = fget(wfd);
 826	if (!ts->wr)
 827		goto out_put_rd;
 828	if (!(ts->wr->f_mode & FMODE_WRITE))
 829		goto out_put_wr;
 
 
 
 
 830
 831	client->trans = ts;
 832	client->status = Connected;
 833
 834	return 0;
 835
 836out_put_wr:
 837	fput(ts->wr);
 838out_put_rd:
 839	fput(ts->rd);
 840out_free_ts:
 841	kfree(ts);
 842	return -EIO;
 843}
 844
 845static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 846{
 847	struct p9_trans_fd *p;
 848	struct file *file;
 849
 850	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
 851	if (!p)
 852		return -ENOMEM;
 853
 854	csocket->sk->sk_allocation = GFP_NOIO;
 855	file = sock_alloc_file(csocket, 0, NULL);
 856	if (IS_ERR(file)) {
 857		pr_err("%s (%d): failed to map fd\n",
 858		       __func__, task_pid_nr(current));
 859		kfree(p);
 860		return PTR_ERR(file);
 861	}
 862
 863	get_file(file);
 864	p->wr = p->rd = file;
 865	client->trans = p;
 866	client->status = Connected;
 867
 868	p->rd->f_flags |= O_NONBLOCK;
 869
 870	p9_conn_create(client);
 871	return 0;
 872}
 873
 874/**
 875 * p9_conn_destroy - cancels all pending requests of mux
 876 * @m: mux to destroy
 877 *
 878 */
 879
 880static void p9_conn_destroy(struct p9_conn *m)
 881{
 882	p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
 883		 m, m->mux_list.prev, m->mux_list.next);
 884
 885	p9_mux_poll_stop(m);
 886	cancel_work_sync(&m->rq);
 887	if (m->rreq) {
 888		p9_req_put(m->rreq);
 889		m->rreq = NULL;
 890	}
 891	cancel_work_sync(&m->wq);
 892	if (m->wreq) {
 893		p9_req_put(m->wreq);
 894		m->wreq = NULL;
 895	}
 896
 897	p9_conn_cancel(m, -ECONNRESET);
 898
 899	m->client = NULL;
 900}
 901
 902/**
 903 * p9_fd_close - shutdown file descriptor transport
 904 * @client: client instance
 905 *
 906 */
 907
 908static void p9_fd_close(struct p9_client *client)
 909{
 910	struct p9_trans_fd *ts;
 911
 912	if (!client)
 913		return;
 914
 915	ts = client->trans;
 916	if (!ts)
 917		return;
 918
 919	client->status = Disconnected;
 920
 921	p9_conn_destroy(&ts->conn);
 922
 923	if (ts->rd)
 924		fput(ts->rd);
 925	if (ts->wr)
 926		fput(ts->wr);
 927
 928	kfree(ts);
 929}
 930
 931/*
 932 * stolen from NFS - maybe should be made a generic function?
 933 */
 934static inline int valid_ipaddr4(const char *buf)
 935{
 936	int rc, count, in[4];
 937
 938	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
 939	if (rc != 4)
 940		return -EINVAL;
 941	for (count = 0; count < 4; count++) {
 942		if (in[count] > 255)
 943			return -EINVAL;
 944	}
 945	return 0;
 946}
 947
 948static int p9_bind_privport(struct socket *sock)
 949{
 950	struct sockaddr_in cl;
 951	int port, err = -EINVAL;
 952
 953	memset(&cl, 0, sizeof(cl));
 954	cl.sin_family = AF_INET;
 955	cl.sin_addr.s_addr = htonl(INADDR_ANY);
 956	for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
 957		cl.sin_port = htons((ushort)port);
 958		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
 959		if (err != -EADDRINUSE)
 960			break;
 961	}
 962	return err;
 963}
 964
 965
 966static int
 967p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
 968{
 969	int err;
 970	struct socket *csocket;
 971	struct sockaddr_in sin_server;
 972	struct p9_fd_opts opts;
 973
 974	err = parse_opts(args, &opts);
 975	if (err < 0)
 976		return err;
 977
 978	if (addr == NULL || valid_ipaddr4(addr) < 0)
 979		return -EINVAL;
 980
 981	csocket = NULL;
 982
 983	client->trans_opts.tcp.port = opts.port;
 984	client->trans_opts.tcp.privport = opts.privport;
 985	sin_server.sin_family = AF_INET;
 986	sin_server.sin_addr.s_addr = in_aton(addr);
 987	sin_server.sin_port = htons(opts.port);
 988	err = __sock_create(current->nsproxy->net_ns, PF_INET,
 989			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
 990	if (err) {
 991		pr_err("%s (%d): problem creating socket\n",
 992		       __func__, task_pid_nr(current));
 993		return err;
 994	}
 995
 996	if (opts.privport) {
 997		err = p9_bind_privport(csocket);
 998		if (err < 0) {
 999			pr_err("%s (%d): problem binding to privport\n",
1000			       __func__, task_pid_nr(current));
1001			sock_release(csocket);
1002			return err;
1003		}
1004	}
1005
1006	err = csocket->ops->connect(csocket,
1007				    (struct sockaddr *)&sin_server,
1008				    sizeof(struct sockaddr_in), 0);
1009	if (err < 0) {
1010		pr_err("%s (%d): problem connecting socket to %s\n",
1011		       __func__, task_pid_nr(current), addr);
1012		sock_release(csocket);
1013		return err;
1014	}
1015
1016	return p9_socket_open(client, csocket);
1017}
1018
1019static int
1020p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1021{
1022	int err;
1023	struct socket *csocket;
1024	struct sockaddr_un sun_server;
1025
1026	csocket = NULL;
1027
1028	if (!addr || !strlen(addr))
1029		return -EINVAL;
1030
1031	if (strlen(addr) >= UNIX_PATH_MAX) {
1032		pr_err("%s (%d): address too long: %s\n",
1033		       __func__, task_pid_nr(current), addr);
1034		return -ENAMETOOLONG;
1035	}
1036
1037	sun_server.sun_family = PF_UNIX;
1038	strcpy(sun_server.sun_path, addr);
1039	err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
1040			    SOCK_STREAM, 0, &csocket, 1);
1041	if (err < 0) {
1042		pr_err("%s (%d): problem creating socket\n",
1043		       __func__, task_pid_nr(current));
1044
1045		return err;
1046	}
1047	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1048			sizeof(struct sockaddr_un) - 1, 0);
1049	if (err < 0) {
1050		pr_err("%s (%d): problem connecting socket: %s: %d\n",
1051		       __func__, task_pid_nr(current), addr, err);
1052		sock_release(csocket);
1053		return err;
1054	}
1055
1056	return p9_socket_open(client, csocket);
1057}
1058
1059static int
1060p9_fd_create(struct p9_client *client, const char *addr, char *args)
1061{
1062	int err;
1063	struct p9_fd_opts opts;
1064
1065	parse_opts(args, &opts);
1066	client->trans_opts.fd.rfd = opts.rfd;
1067	client->trans_opts.fd.wfd = opts.wfd;
1068
1069	if (opts.rfd == ~0 || opts.wfd == ~0) {
1070		pr_err("Insufficient options for proto=fd\n");
1071		return -ENOPROTOOPT;
1072	}
1073
1074	err = p9_fd_open(client, opts.rfd, opts.wfd);
1075	if (err < 0)
1076		return err;
1077
1078	p9_conn_create(client);
1079
1080	return 0;
1081}
1082
1083static struct p9_trans_module p9_tcp_trans = {
1084	.name = "tcp",
1085	.maxsize = MAX_SOCK_BUF,
1086	.def = 0,
1087	.create = p9_fd_create_tcp,
1088	.close = p9_fd_close,
1089	.request = p9_fd_request,
1090	.cancel = p9_fd_cancel,
1091	.cancelled = p9_fd_cancelled,
1092	.show_options = p9_fd_show_options,
1093	.owner = THIS_MODULE,
1094};
1095
1096static struct p9_trans_module p9_unix_trans = {
1097	.name = "unix",
1098	.maxsize = MAX_SOCK_BUF,
1099	.def = 0,
1100	.create = p9_fd_create_unix,
1101	.close = p9_fd_close,
1102	.request = p9_fd_request,
1103	.cancel = p9_fd_cancel,
1104	.cancelled = p9_fd_cancelled,
1105	.show_options = p9_fd_show_options,
1106	.owner = THIS_MODULE,
1107};
1108
1109static struct p9_trans_module p9_fd_trans = {
1110	.name = "fd",
1111	.maxsize = MAX_SOCK_BUF,
1112	.def = 0,
1113	.create = p9_fd_create,
1114	.close = p9_fd_close,
1115	.request = p9_fd_request,
1116	.cancel = p9_fd_cancel,
1117	.cancelled = p9_fd_cancelled,
1118	.show_options = p9_fd_show_options,
1119	.owner = THIS_MODULE,
1120};
1121
1122/**
1123 * p9_poll_workfn - poll worker thread
1124 * @work: work queue
1125 *
1126 * polls all v9fs transports for new events and queues the appropriate
1127 * work to the work queue
1128 *
1129 */
1130
1131static void p9_poll_workfn(struct work_struct *work)
1132{
1133	unsigned long flags;
1134
1135	p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1136
1137	spin_lock_irqsave(&p9_poll_lock, flags);
1138	while (!list_empty(&p9_poll_pending_list)) {
1139		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1140							struct p9_conn,
1141							poll_pending_link);
1142		list_del_init(&conn->poll_pending_link);
1143		spin_unlock_irqrestore(&p9_poll_lock, flags);
1144
1145		p9_poll_mux(conn);
1146
1147		spin_lock_irqsave(&p9_poll_lock, flags);
1148	}
1149	spin_unlock_irqrestore(&p9_poll_lock, flags);
1150
1151	p9_debug(P9_DEBUG_TRANS, "finish\n");
1152}
1153
1154int p9_trans_fd_init(void)
1155{
1156	v9fs_register_trans(&p9_tcp_trans);
1157	v9fs_register_trans(&p9_unix_trans);
1158	v9fs_register_trans(&p9_fd_trans);
1159
1160	return 0;
1161}
1162
1163void p9_trans_fd_exit(void)
1164{
1165	flush_work(&p9_poll_work);
1166	v9fs_unregister_trans(&p9_tcp_trans);
1167	v9fs_unregister_trans(&p9_unix_trans);
1168	v9fs_unregister_trans(&p9_fd_trans);
1169}
v4.17
 
   1/*
   2 * linux/fs/9p/trans_fd.c
   3 *
   4 * Fd transport layer.  Includes deprecated socket layer.
   5 *
   6 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
   7 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
   8 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
   9 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
  10 *
  11 *  This program is free software; you can redistribute it and/or modify
  12 *  it under the terms of the GNU General Public License version 2
  13 *  as published by the Free Software Foundation.
  14 *
  15 *  This program is distributed in the hope that it will be useful,
  16 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 *  GNU General Public License for more details.
  19 *
  20 *  You should have received a copy of the GNU General Public License
  21 *  along with this program; if not, write to:
  22 *  Free Software Foundation
  23 *  51 Franklin Street, Fifth Floor
  24 *  Boston, MA  02111-1301  USA
  25 *
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#include <linux/in.h>
  31#include <linux/module.h>
  32#include <linux/net.h>
  33#include <linux/ipv6.h>
  34#include <linux/kthread.h>
  35#include <linux/errno.h>
  36#include <linux/kernel.h>
  37#include <linux/un.h>
  38#include <linux/uaccess.h>
  39#include <linux/inet.h>
  40#include <linux/idr.h>
  41#include <linux/file.h>
  42#include <linux/parser.h>
  43#include <linux/slab.h>
  44#include <linux/seq_file.h>
  45#include <net/9p/9p.h>
  46#include <net/9p/client.h>
  47#include <net/9p/transport.h>
  48
  49#include <linux/syscalls.h> /* killme */
  50
  51#define P9_PORT 564
  52#define MAX_SOCK_BUF (64*1024)
  53#define MAXPOLLWADDR	2
  54
  55static struct p9_trans_module p9_tcp_trans;
  56static struct p9_trans_module p9_fd_trans;
  57
  58/**
  59 * struct p9_fd_opts - per-transport options
  60 * @rfd: file descriptor for reading (trans=fd)
  61 * @wfd: file descriptor for writing (trans=fd)
  62 * @port: port to connect to (trans=tcp)
  63 *
  64 */
  65
  66struct p9_fd_opts {
  67	int rfd;
  68	int wfd;
  69	u16 port;
  70	bool privport;
  71};
  72
  73/*
  74  * Option Parsing (code inspired by NFS code)
  75  *  - a little lazy - parse all fd-transport options
  76  */
  77
  78enum {
  79	/* Options that take integer arguments */
  80	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
  81	/* Options that take no arguments */
  82	Opt_privport,
  83};
  84
  85static const match_table_t tokens = {
  86	{Opt_port, "port=%u"},
  87	{Opt_rfdno, "rfdno=%u"},
  88	{Opt_wfdno, "wfdno=%u"},
  89	{Opt_privport, "privport"},
  90	{Opt_err, NULL},
  91};
  92
  93enum {
  94	Rworksched = 1,		/* read work scheduled or running */
  95	Rpending = 2,		/* can read */
  96	Wworksched = 4,		/* write work scheduled or running */
  97	Wpending = 8,		/* can write */
  98};
  99
 100struct p9_poll_wait {
 101	struct p9_conn *conn;
 102	wait_queue_entry_t wait;
 103	wait_queue_head_t *wait_addr;
 104};
 105
 106/**
 107 * struct p9_conn - fd mux connection state information
 108 * @mux_list: list link for mux to manage multiple connections (?)
 109 * @client: reference to client instance for this connection
 110 * @err: error state
 111 * @req_list: accounting for requests which have been sent
 112 * @unsent_req_list: accounting for requests that haven't been sent
 
 
 113 * @req: current request being processed (if any)
 114 * @tmp_buf: temporary buffer to read in header
 115 * @rc: temporary fcall for reading current frame
 116 * @wpos: write position for current frame
 117 * @wsize: amount of data to write for current frame
 118 * @wbuf: current write buffer
 119 * @poll_pending_link: pending links to be polled per conn
 120 * @poll_wait: array of wait_q's for various worker threads
 121 * @pt: poll state
 122 * @rq: current read work
 123 * @wq: current write work
 124 * @wsched: ????
 125 *
 126 */
 127
 128struct p9_conn {
 129	struct list_head mux_list;
 130	struct p9_client *client;
 131	int err;
 132	struct list_head req_list;
 133	struct list_head unsent_req_list;
 134	struct p9_req_t *req;
 
 135	char tmp_buf[7];
 136	struct p9_fcall rc;
 137	int wpos;
 138	int wsize;
 139	char *wbuf;
 140	struct list_head poll_pending_link;
 141	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
 142	poll_table pt;
 143	struct work_struct rq;
 144	struct work_struct wq;
 145	unsigned long wsched;
 146};
 147
 148/**
 149 * struct p9_trans_fd - transport state
 150 * @rd: reference to file to read from
 151 * @wr: reference of file to write to
 152 * @conn: connection state reference
 153 *
 154 */
 155
 156struct p9_trans_fd {
 157	struct file *rd;
 158	struct file *wr;
 159	struct p9_conn conn;
 160};
 161
 162static void p9_poll_workfn(struct work_struct *work);
 163
 164static DEFINE_SPINLOCK(p9_poll_lock);
 165static LIST_HEAD(p9_poll_pending_list);
 166static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
 167
 168static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
 169static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
 170
 171static void p9_mux_poll_stop(struct p9_conn *m)
 172{
 173	unsigned long flags;
 174	int i;
 175
 176	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 177		struct p9_poll_wait *pwait = &m->poll_wait[i];
 178
 179		if (pwait->wait_addr) {
 180			remove_wait_queue(pwait->wait_addr, &pwait->wait);
 181			pwait->wait_addr = NULL;
 182		}
 183	}
 184
 185	spin_lock_irqsave(&p9_poll_lock, flags);
 186	list_del_init(&m->poll_pending_link);
 187	spin_unlock_irqrestore(&p9_poll_lock, flags);
 
 
 188}
 189
 190/**
 191 * p9_conn_cancel - cancel all pending requests with error
 192 * @m: mux data
 193 * @err: error code
 194 *
 195 */
 196
 197static void p9_conn_cancel(struct p9_conn *m, int err)
 198{
 199	struct p9_req_t *req, *rtmp;
 200	unsigned long flags;
 201	LIST_HEAD(cancel_list);
 202
 203	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 204
 205	spin_lock_irqsave(&m->client->lock, flags);
 206
 207	if (m->err) {
 208		spin_unlock_irqrestore(&m->client->lock, flags);
 209		return;
 210	}
 211
 212	m->err = err;
 213
 214	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
 215		list_move(&req->req_list, &cancel_list);
 216	}
 217	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 218		list_move(&req->req_list, &cancel_list);
 219	}
 220	spin_unlock_irqrestore(&m->client->lock, flags);
 221
 222	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 223		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
 224		list_del(&req->req_list);
 225		if (!req->t_err)
 226			req->t_err = err;
 227		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 228	}
 
 229}
 230
 231static __poll_t
 232p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
 233{
 234	__poll_t ret, n;
 235	struct p9_trans_fd *ts = NULL;
 236
 237	if (client && client->status == Connected)
 238		ts = client->trans;
 239
 240	if (!ts) {
 241		if (err)
 242			*err = -EREMOTEIO;
 243		return EPOLLERR;
 244	}
 245
 246	if (!ts->rd->f_op->poll)
 247		ret = DEFAULT_POLLMASK;
 248	else
 249		ret = ts->rd->f_op->poll(ts->rd, pt);
 250
 251	if (ts->rd != ts->wr) {
 252		if (!ts->wr->f_op->poll)
 253			n = DEFAULT_POLLMASK;
 254		else
 255			n = ts->wr->f_op->poll(ts->wr, pt);
 256		ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN);
 257	}
 258
 259	return ret;
 260}
 261
 262/**
 263 * p9_fd_read- read from a fd
 264 * @client: client instance
 265 * @v: buffer to receive data into
 266 * @len: size of receive buffer
 267 *
 268 */
 269
 270static int p9_fd_read(struct p9_client *client, void *v, int len)
 271{
 272	int ret;
 273	struct p9_trans_fd *ts = NULL;
 274	loff_t pos;
 275
 276	if (client && client->status != Disconnected)
 277		ts = client->trans;
 278
 279	if (!ts)
 280		return -EREMOTEIO;
 281
 282	if (!(ts->rd->f_flags & O_NONBLOCK))
 283		p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
 284
 285	pos = ts->rd->f_pos;
 286	ret = kernel_read(ts->rd, v, len, &pos);
 287	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 288		client->status = Disconnected;
 289	return ret;
 290}
 291
 292/**
 293 * p9_read_work - called when there is some data to be read from a transport
 294 * @work: container of work to be done
 295 *
 296 */
 297
 298static void p9_read_work(struct work_struct *work)
 299{
 300	__poll_t n;
 301	int err;
 302	struct p9_conn *m;
 303	int status = REQ_STATUS_ERROR;
 304
 305	m = container_of(work, struct p9_conn, rq);
 306
 307	if (m->err < 0)
 308		return;
 309
 310	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
 311
 312	if (!m->rc.sdata) {
 313		m->rc.sdata = m->tmp_buf;
 314		m->rc.offset = 0;
 315		m->rc.capacity = 7; /* start by reading header */
 316	}
 317
 318	clear_bit(Rpending, &m->wsched);
 319	p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
 320		 m, m->rc.offset, m->rc.capacity,
 321		 m->rc.capacity - m->rc.offset);
 322	err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
 323			 m->rc.capacity - m->rc.offset);
 324	p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
 325	if (err == -EAGAIN)
 326		goto end_clear;
 327
 328	if (err <= 0)
 329		goto error;
 330
 331	m->rc.offset += err;
 332
 333	/* header read in */
 334	if ((!m->req) && (m->rc.offset == m->rc.capacity)) {
 335		p9_debug(P9_DEBUG_TRANS, "got new header\n");
 336
 337		err = p9_parse_header(&m->rc, NULL, NULL, NULL, 0);
 
 
 338		if (err) {
 339			p9_debug(P9_DEBUG_ERROR,
 340				 "error parsing header: %d\n", err);
 341			goto error;
 342		}
 343
 344		if (m->rc.size >= m->client->msize) {
 345			p9_debug(P9_DEBUG_ERROR,
 346				 "requested packet size too big: %d\n",
 347				 m->rc.size);
 348			err = -EIO;
 349			goto error;
 350		}
 351
 352		p9_debug(P9_DEBUG_TRANS,
 353			 "mux %p pkt: size: %d bytes tag: %d\n",
 354			 m, m->rc.size, m->rc.tag);
 355
 356		m->req = p9_tag_lookup(m->client, m->rc.tag);
 357		if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
 358			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
 359				 m->rc.tag);
 360			err = -EIO;
 361			goto error;
 362		}
 363
 364		if (m->req->rc == NULL) {
 365			p9_debug(P9_DEBUG_ERROR,
 366				 "No recv fcall for tag %d (req %p), disconnecting!\n",
 367				 m->rc.tag, m->req);
 368			m->req = NULL;
 369			err = -EIO;
 370			goto error;
 371		}
 372		m->rc.sdata = (char *)m->req->rc + sizeof(struct p9_fcall);
 373		memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
 374		m->rc.capacity = m->rc.size;
 375	}
 376
 377	/* packet is read in
 378	 * not an else because some packets (like clunk) have no payload
 379	 */
 380	if ((m->req) && (m->rc.offset == m->rc.capacity)) {
 381		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
 
 382		spin_lock(&m->client->lock);
 383		if (m->req->status != REQ_STATUS_ERROR)
 384			status = REQ_STATUS_RCVD;
 385		list_del(&m->req->req_list);
 
 
 
 
 
 
 
 
 
 
 
 
 386		spin_unlock(&m->client->lock);
 387		p9_client_cb(m->client, m->req, status);
 388		m->rc.sdata = NULL;
 389		m->rc.offset = 0;
 390		m->rc.capacity = 0;
 391		m->req = NULL;
 
 392	}
 393
 394end_clear:
 395	clear_bit(Rworksched, &m->wsched);
 396
 397	if (!list_empty(&m->req_list)) {
 398		if (test_and_clear_bit(Rpending, &m->wsched))
 399			n = EPOLLIN;
 400		else
 401			n = p9_fd_poll(m->client, NULL, NULL);
 402
 403		if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
 404			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 405			schedule_work(&m->rq);
 406		}
 407	}
 408
 409	return;
 410error:
 411	p9_conn_cancel(m, err);
 412	clear_bit(Rworksched, &m->wsched);
 413}
 414
 415/**
 416 * p9_fd_write - write to a socket
 417 * @client: client instance
 418 * @v: buffer to send data from
 419 * @len: size of send buffer
 420 *
 421 */
 422
 423static int p9_fd_write(struct p9_client *client, void *v, int len)
 424{
 425	ssize_t ret;
 426	struct p9_trans_fd *ts = NULL;
 427
 428	if (client && client->status != Disconnected)
 429		ts = client->trans;
 430
 431	if (!ts)
 432		return -EREMOTEIO;
 433
 434	if (!(ts->wr->f_flags & O_NONBLOCK))
 435		p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
 436
 437	ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos);
 438	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
 439		client->status = Disconnected;
 440	return ret;
 441}
 442
 443/**
 444 * p9_write_work - called when a transport can send some data
 445 * @work: container for work to be done
 446 *
 447 */
 448
 449static void p9_write_work(struct work_struct *work)
 450{
 451	__poll_t n;
 452	int err;
 453	struct p9_conn *m;
 454	struct p9_req_t *req;
 455
 456	m = container_of(work, struct p9_conn, wq);
 457
 458	if (m->err < 0) {
 459		clear_bit(Wworksched, &m->wsched);
 460		return;
 461	}
 462
 463	if (!m->wsize) {
 464		spin_lock(&m->client->lock);
 465		if (list_empty(&m->unsent_req_list)) {
 466			clear_bit(Wworksched, &m->wsched);
 467			spin_unlock(&m->client->lock);
 468			return;
 469		}
 470
 471		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
 472			       req_list);
 473		req->status = REQ_STATUS_SENT;
 474		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
 475		list_move_tail(&req->req_list, &m->req_list);
 476
 477		m->wbuf = req->tc->sdata;
 478		m->wsize = req->tc->size;
 479		m->wpos = 0;
 
 
 480		spin_unlock(&m->client->lock);
 481	}
 482
 483	p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
 484		 m, m->wpos, m->wsize);
 485	clear_bit(Wpending, &m->wsched);
 486	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
 487	p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
 488	if (err == -EAGAIN)
 489		goto end_clear;
 490
 491
 492	if (err < 0)
 493		goto error;
 494	else if (err == 0) {
 495		err = -EREMOTEIO;
 496		goto error;
 497	}
 498
 499	m->wpos += err;
 500	if (m->wpos == m->wsize)
 501		m->wpos = m->wsize = 0;
 
 
 
 502
 503end_clear:
 504	clear_bit(Wworksched, &m->wsched);
 505
 506	if (m->wsize || !list_empty(&m->unsent_req_list)) {
 507		if (test_and_clear_bit(Wpending, &m->wsched))
 508			n = EPOLLOUT;
 509		else
 510			n = p9_fd_poll(m->client, NULL, NULL);
 511
 512		if ((n & EPOLLOUT) &&
 513		   !test_and_set_bit(Wworksched, &m->wsched)) {
 514			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 515			schedule_work(&m->wq);
 516		}
 517	}
 518
 519	return;
 520
 521error:
 522	p9_conn_cancel(m, err);
 523	clear_bit(Wworksched, &m->wsched);
 524}
 525
 526static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 527{
 528	struct p9_poll_wait *pwait =
 529		container_of(wait, struct p9_poll_wait, wait);
 530	struct p9_conn *m = pwait->conn;
 531	unsigned long flags;
 532
 533	spin_lock_irqsave(&p9_poll_lock, flags);
 534	if (list_empty(&m->poll_pending_link))
 535		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
 536	spin_unlock_irqrestore(&p9_poll_lock, flags);
 537
 538	schedule_work(&p9_poll_work);
 539	return 1;
 540}
 541
 542/**
 543 * p9_pollwait - add poll task to the wait queue
 544 * @filp: file pointer being polled
 545 * @wait_address: wait_q to block on
 546 * @p: poll state
 547 *
 548 * called by files poll operation to add v9fs-poll task to files wait queue
 549 */
 550
 551static void
 552p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 553{
 554	struct p9_conn *m = container_of(p, struct p9_conn, pt);
 555	struct p9_poll_wait *pwait = NULL;
 556	int i;
 557
 558	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
 559		if (m->poll_wait[i].wait_addr == NULL) {
 560			pwait = &m->poll_wait[i];
 561			break;
 562		}
 563	}
 564
 565	if (!pwait) {
 566		p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
 567		return;
 568	}
 569
 570	pwait->conn = m;
 571	pwait->wait_addr = wait_address;
 572	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
 573	add_wait_queue(wait_address, &pwait->wait);
 574}
 575
 576/**
 577 * p9_conn_create - initialize the per-session mux data
 578 * @client: client instance
 579 *
 580 * Note: Creates the polling task if this is the first session.
 581 */
 582
 583static void p9_conn_create(struct p9_client *client)
 584{
 585	__poll_t n;
 586	struct p9_trans_fd *ts = client->trans;
 587	struct p9_conn *m = &ts->conn;
 588
 589	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
 590
 591	INIT_LIST_HEAD(&m->mux_list);
 592	m->client = client;
 593
 594	INIT_LIST_HEAD(&m->req_list);
 595	INIT_LIST_HEAD(&m->unsent_req_list);
 596	INIT_WORK(&m->rq, p9_read_work);
 597	INIT_WORK(&m->wq, p9_write_work);
 598	INIT_LIST_HEAD(&m->poll_pending_link);
 599	init_poll_funcptr(&m->pt, p9_pollwait);
 600
 601	n = p9_fd_poll(client, &m->pt, NULL);
 602	if (n & EPOLLIN) {
 603		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 604		set_bit(Rpending, &m->wsched);
 605	}
 606
 607	if (n & EPOLLOUT) {
 608		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 609		set_bit(Wpending, &m->wsched);
 610	}
 611}
 612
 613/**
 614 * p9_poll_mux - polls a mux and schedules read or write works if necessary
 615 * @m: connection to poll
 616 *
 617 */
 618
 619static void p9_poll_mux(struct p9_conn *m)
 620{
 621	__poll_t n;
 622	int err = -ECONNRESET;
 623
 624	if (m->err < 0)
 625		return;
 626
 627	n = p9_fd_poll(m->client, NULL, &err);
 628	if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
 629		p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
 630		p9_conn_cancel(m, err);
 631	}
 632
 633	if (n & EPOLLIN) {
 634		set_bit(Rpending, &m->wsched);
 635		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
 636		if (!test_and_set_bit(Rworksched, &m->wsched)) {
 637			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
 638			schedule_work(&m->rq);
 639		}
 640	}
 641
 642	if (n & EPOLLOUT) {
 643		set_bit(Wpending, &m->wsched);
 644		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
 645		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
 646		    !test_and_set_bit(Wworksched, &m->wsched)) {
 647			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
 648			schedule_work(&m->wq);
 649		}
 650	}
 651}
 652
 653/**
 654 * p9_fd_request - send 9P request
 655 * The function can sleep until the request is scheduled for sending.
 656 * The function can be interrupted. Return from the function is not
 657 * a guarantee that the request is sent successfully.
 658 *
 659 * @client: client instance
 660 * @req: request to be sent
 661 *
 662 */
 663
 664static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 665{
 666	__poll_t n;
 667	struct p9_trans_fd *ts = client->trans;
 668	struct p9_conn *m = &ts->conn;
 669
 670	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
 671		 m, current, req->tc, req->tc->id);
 672	if (m->err < 0)
 673		return m->err;
 674
 675	spin_lock(&client->lock);
 676	req->status = REQ_STATUS_UNSENT;
 677	list_add_tail(&req->req_list, &m->unsent_req_list);
 678	spin_unlock(&client->lock);
 679
 680	if (test_and_clear_bit(Wpending, &m->wsched))
 681		n = EPOLLOUT;
 682	else
 683		n = p9_fd_poll(m->client, NULL, NULL);
 684
 685	if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
 686		schedule_work(&m->wq);
 687
 688	return 0;
 689}
 690
 691static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 692{
 693	int ret = 1;
 694
 695	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 696
 697	spin_lock(&client->lock);
 698
 699	if (req->status == REQ_STATUS_UNSENT) {
 700		list_del(&req->req_list);
 701		req->status = REQ_STATUS_FLSHD;
 
 702		ret = 0;
 703	}
 704	spin_unlock(&client->lock);
 705
 706	return ret;
 707}
 708
 709static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
 710{
 711	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 712
 
 
 
 
 
 
 
 
 
 713	/* we haven't received a response for oldreq,
 714	 * remove it from the list.
 715	 */
 716	spin_lock(&client->lock);
 717	list_del(&req->req_list);
 
 718	spin_unlock(&client->lock);
 
 719
 720	return 0;
 721}
 722
 723static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
 724{
 725	if (clnt->trans_mod == &p9_tcp_trans) {
 726		if (clnt->trans_opts.tcp.port != P9_PORT)
 727			seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
 728	} else if (clnt->trans_mod == &p9_fd_trans) {
 729		if (clnt->trans_opts.fd.rfd != ~0)
 730			seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
 731		if (clnt->trans_opts.fd.wfd != ~0)
 732			seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
 733	}
 734	return 0;
 735}
 736
 737/**
 738 * parse_opts - parse mount options into p9_fd_opts structure
 739 * @params: options string passed from mount
 740 * @opts: fd transport-specific structure to parse options into
 741 *
 742 * Returns 0 upon success, -ERRNO upon failure
 743 */
 744
 745static int parse_opts(char *params, struct p9_fd_opts *opts)
 746{
 747	char *p;
 748	substring_t args[MAX_OPT_ARGS];
 749	int option;
 750	char *options, *tmp_options;
 751
 752	opts->port = P9_PORT;
 753	opts->rfd = ~0;
 754	opts->wfd = ~0;
 755	opts->privport = false;
 756
 757	if (!params)
 758		return 0;
 759
 760	tmp_options = kstrdup(params, GFP_KERNEL);
 761	if (!tmp_options) {
 762		p9_debug(P9_DEBUG_ERROR,
 763			 "failed to allocate copy of option string\n");
 764		return -ENOMEM;
 765	}
 766	options = tmp_options;
 767
 768	while ((p = strsep(&options, ",")) != NULL) {
 769		int token;
 770		int r;
 771		if (!*p)
 772			continue;
 773		token = match_token(p, tokens, args);
 774		if ((token != Opt_err) && (token != Opt_privport)) {
 775			r = match_int(&args[0], &option);
 776			if (r < 0) {
 777				p9_debug(P9_DEBUG_ERROR,
 778					 "integer field, but no integer?\n");
 779				continue;
 780			}
 781		}
 782		switch (token) {
 783		case Opt_port:
 784			opts->port = option;
 785			break;
 786		case Opt_rfdno:
 787			opts->rfd = option;
 788			break;
 789		case Opt_wfdno:
 790			opts->wfd = option;
 791			break;
 792		case Opt_privport:
 793			opts->privport = true;
 794			break;
 795		default:
 796			continue;
 797		}
 798	}
 799
 800	kfree(tmp_options);
 801	return 0;
 802}
 803
 804static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 805{
 806	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
 807					   GFP_KERNEL);
 808	if (!ts)
 809		return -ENOMEM;
 810
 811	ts->rd = fget(rfd);
 
 
 
 
 812	ts->wr = fget(wfd);
 813	if (!ts->rd || !ts->wr) {
 814		if (ts->rd)
 815			fput(ts->rd);
 816		if (ts->wr)
 817			fput(ts->wr);
 818		kfree(ts);
 819		return -EIO;
 820	}
 821
 822	client->trans = ts;
 823	client->status = Connected;
 824
 825	return 0;
 
 
 
 
 
 
 
 
 826}
 827
 828static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 829{
 830	struct p9_trans_fd *p;
 831	struct file *file;
 832
 833	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
 834	if (!p)
 835		return -ENOMEM;
 836
 837	csocket->sk->sk_allocation = GFP_NOIO;
 838	file = sock_alloc_file(csocket, 0, NULL);
 839	if (IS_ERR(file)) {
 840		pr_err("%s (%d): failed to map fd\n",
 841		       __func__, task_pid_nr(current));
 842		kfree(p);
 843		return PTR_ERR(file);
 844	}
 845
 846	get_file(file);
 847	p->wr = p->rd = file;
 848	client->trans = p;
 849	client->status = Connected;
 850
 851	p->rd->f_flags |= O_NONBLOCK;
 852
 853	p9_conn_create(client);
 854	return 0;
 855}
 856
 857/**
 858 * p9_mux_destroy - cancels all pending requests of mux
 859 * @m: mux to destroy
 860 *
 861 */
 862
 863static void p9_conn_destroy(struct p9_conn *m)
 864{
 865	p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
 866		 m, m->mux_list.prev, m->mux_list.next);
 867
 868	p9_mux_poll_stop(m);
 869	cancel_work_sync(&m->rq);
 
 
 
 
 870	cancel_work_sync(&m->wq);
 
 
 
 
 871
 872	p9_conn_cancel(m, -ECONNRESET);
 873
 874	m->client = NULL;
 875}
 876
 877/**
 878 * p9_fd_close - shutdown file descriptor transport
 879 * @client: client instance
 880 *
 881 */
 882
 883static void p9_fd_close(struct p9_client *client)
 884{
 885	struct p9_trans_fd *ts;
 886
 887	if (!client)
 888		return;
 889
 890	ts = client->trans;
 891	if (!ts)
 892		return;
 893
 894	client->status = Disconnected;
 895
 896	p9_conn_destroy(&ts->conn);
 897
 898	if (ts->rd)
 899		fput(ts->rd);
 900	if (ts->wr)
 901		fput(ts->wr);
 902
 903	kfree(ts);
 904}
 905
 906/*
 907 * stolen from NFS - maybe should be made a generic function?
 908 */
 909static inline int valid_ipaddr4(const char *buf)
 910{
 911	int rc, count, in[4];
 912
 913	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
 914	if (rc != 4)
 915		return -EINVAL;
 916	for (count = 0; count < 4; count++) {
 917		if (in[count] > 255)
 918			return -EINVAL;
 919	}
 920	return 0;
 921}
 922
 923static int p9_bind_privport(struct socket *sock)
 924{
 925	struct sockaddr_in cl;
 926	int port, err = -EINVAL;
 927
 928	memset(&cl, 0, sizeof(cl));
 929	cl.sin_family = AF_INET;
 930	cl.sin_addr.s_addr = INADDR_ANY;
 931	for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
 932		cl.sin_port = htons((ushort)port);
 933		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
 934		if (err != -EADDRINUSE)
 935			break;
 936	}
 937	return err;
 938}
 939
 940
 941static int
 942p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
 943{
 944	int err;
 945	struct socket *csocket;
 946	struct sockaddr_in sin_server;
 947	struct p9_fd_opts opts;
 948
 949	err = parse_opts(args, &opts);
 950	if (err < 0)
 951		return err;
 952
 953	if (valid_ipaddr4(addr) < 0)
 954		return -EINVAL;
 955
 956	csocket = NULL;
 957
 958	client->trans_opts.tcp.port = opts.port;
 959	client->trans_opts.tcp.privport = opts.privport;
 960	sin_server.sin_family = AF_INET;
 961	sin_server.sin_addr.s_addr = in_aton(addr);
 962	sin_server.sin_port = htons(opts.port);
 963	err = __sock_create(current->nsproxy->net_ns, PF_INET,
 964			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
 965	if (err) {
 966		pr_err("%s (%d): problem creating socket\n",
 967		       __func__, task_pid_nr(current));
 968		return err;
 969	}
 970
 971	if (opts.privport) {
 972		err = p9_bind_privport(csocket);
 973		if (err < 0) {
 974			pr_err("%s (%d): problem binding to privport\n",
 975			       __func__, task_pid_nr(current));
 976			sock_release(csocket);
 977			return err;
 978		}
 979	}
 980
 981	err = csocket->ops->connect(csocket,
 982				    (struct sockaddr *)&sin_server,
 983				    sizeof(struct sockaddr_in), 0);
 984	if (err < 0) {
 985		pr_err("%s (%d): problem connecting socket to %s\n",
 986		       __func__, task_pid_nr(current), addr);
 987		sock_release(csocket);
 988		return err;
 989	}
 990
 991	return p9_socket_open(client, csocket);
 992}
 993
 994static int
 995p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
 996{
 997	int err;
 998	struct socket *csocket;
 999	struct sockaddr_un sun_server;
1000
1001	csocket = NULL;
 
 
 
1002
1003	if (strlen(addr) >= UNIX_PATH_MAX) {
1004		pr_err("%s (%d): address too long: %s\n",
1005		       __func__, task_pid_nr(current), addr);
1006		return -ENAMETOOLONG;
1007	}
1008
1009	sun_server.sun_family = PF_UNIX;
1010	strcpy(sun_server.sun_path, addr);
1011	err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
1012			    SOCK_STREAM, 0, &csocket, 1);
1013	if (err < 0) {
1014		pr_err("%s (%d): problem creating socket\n",
1015		       __func__, task_pid_nr(current));
1016
1017		return err;
1018	}
1019	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1020			sizeof(struct sockaddr_un) - 1, 0);
1021	if (err < 0) {
1022		pr_err("%s (%d): problem connecting socket: %s: %d\n",
1023		       __func__, task_pid_nr(current), addr, err);
1024		sock_release(csocket);
1025		return err;
1026	}
1027
1028	return p9_socket_open(client, csocket);
1029}
1030
1031static int
1032p9_fd_create(struct p9_client *client, const char *addr, char *args)
1033{
1034	int err;
1035	struct p9_fd_opts opts;
1036
1037	parse_opts(args, &opts);
1038	client->trans_opts.fd.rfd = opts.rfd;
1039	client->trans_opts.fd.wfd = opts.wfd;
1040
1041	if (opts.rfd == ~0 || opts.wfd == ~0) {
1042		pr_err("Insufficient options for proto=fd\n");
1043		return -ENOPROTOOPT;
1044	}
1045
1046	err = p9_fd_open(client, opts.rfd, opts.wfd);
1047	if (err < 0)
1048		return err;
1049
1050	p9_conn_create(client);
1051
1052	return 0;
1053}
1054
1055static struct p9_trans_module p9_tcp_trans = {
1056	.name = "tcp",
1057	.maxsize = MAX_SOCK_BUF,
1058	.def = 0,
1059	.create = p9_fd_create_tcp,
1060	.close = p9_fd_close,
1061	.request = p9_fd_request,
1062	.cancel = p9_fd_cancel,
1063	.cancelled = p9_fd_cancelled,
1064	.show_options = p9_fd_show_options,
1065	.owner = THIS_MODULE,
1066};
1067
1068static struct p9_trans_module p9_unix_trans = {
1069	.name = "unix",
1070	.maxsize = MAX_SOCK_BUF,
1071	.def = 0,
1072	.create = p9_fd_create_unix,
1073	.close = p9_fd_close,
1074	.request = p9_fd_request,
1075	.cancel = p9_fd_cancel,
1076	.cancelled = p9_fd_cancelled,
1077	.show_options = p9_fd_show_options,
1078	.owner = THIS_MODULE,
1079};
1080
1081static struct p9_trans_module p9_fd_trans = {
1082	.name = "fd",
1083	.maxsize = MAX_SOCK_BUF,
1084	.def = 0,
1085	.create = p9_fd_create,
1086	.close = p9_fd_close,
1087	.request = p9_fd_request,
1088	.cancel = p9_fd_cancel,
1089	.cancelled = p9_fd_cancelled,
1090	.show_options = p9_fd_show_options,
1091	.owner = THIS_MODULE,
1092};
1093
1094/**
1095 * p9_poll_workfn - poll worker thread
1096 * @work: work queue
1097 *
1098 * polls all v9fs transports for new events and queues the appropriate
1099 * work to the work queue
1100 *
1101 */
1102
1103static void p9_poll_workfn(struct work_struct *work)
1104{
1105	unsigned long flags;
1106
1107	p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1108
1109	spin_lock_irqsave(&p9_poll_lock, flags);
1110	while (!list_empty(&p9_poll_pending_list)) {
1111		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1112							struct p9_conn,
1113							poll_pending_link);
1114		list_del_init(&conn->poll_pending_link);
1115		spin_unlock_irqrestore(&p9_poll_lock, flags);
1116
1117		p9_poll_mux(conn);
1118
1119		spin_lock_irqsave(&p9_poll_lock, flags);
1120	}
1121	spin_unlock_irqrestore(&p9_poll_lock, flags);
1122
1123	p9_debug(P9_DEBUG_TRANS, "finish\n");
1124}
1125
1126int p9_trans_fd_init(void)
1127{
1128	v9fs_register_trans(&p9_tcp_trans);
1129	v9fs_register_trans(&p9_unix_trans);
1130	v9fs_register_trans(&p9_fd_trans);
1131
1132	return 0;
1133}
1134
1135void p9_trans_fd_exit(void)
1136{
1137	flush_work(&p9_poll_work);
1138	v9fs_unregister_trans(&p9_tcp_trans);
1139	v9fs_unregister_trans(&p9_unix_trans);
1140	v9fs_unregister_trans(&p9_fd_trans);
1141}