Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
   4 */
   5
   6#include <linux/inet.h>
   7#include <linux/kthread.h>
   8#include <linux/list.h>
   9#include <linux/radix-tree.h>
  10#include <linux/module.h>
  11#include <linux/semaphore.h>
  12#include <linux/wait.h>
  13#include <net/sock.h>
  14#include <net/inet_common.h>
  15#include <net/inet_connection_sock.h>
  16#include <net/request_sock.h>
  17#include <trace/events/sock.h>
  18
  19#include <xen/events.h>
  20#include <xen/grant_table.h>
  21#include <xen/xen.h>
  22#include <xen/xenbus.h>
  23#include <xen/interface/io/pvcalls.h>
  24
  25#define PVCALLS_VERSIONS "1"
  26#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
  27
  28static struct pvcalls_back_global {
  29	struct list_head frontends;
  30	struct semaphore frontends_lock;
  31} pvcalls_back_global;
  32
  33/*
  34 * Per-frontend data structure. It contains pointers to the command
  35 * ring, its event channel, a list of active sockets and a tree of
  36 * passive sockets.
  37 */
  38struct pvcalls_fedata {
  39	struct list_head list;
  40	struct xenbus_device *dev;
  41	struct xen_pvcalls_sring *sring;
  42	struct xen_pvcalls_back_ring ring;
  43	int irq;
  44	struct list_head socket_mappings;
  45	struct radix_tree_root socketpass_mappings;
  46	struct semaphore socket_lock;
  47};
  48
  49struct pvcalls_ioworker {
  50	struct work_struct register_work;
  51	struct workqueue_struct *wq;
  52};
  53
  54struct sock_mapping {
  55	struct list_head list;
  56	struct pvcalls_fedata *fedata;
  57	struct sockpass_mapping *sockpass;
  58	struct socket *sock;
  59	uint64_t id;
  60	grant_ref_t ref;
  61	struct pvcalls_data_intf *ring;
  62	void *bytes;
  63	struct pvcalls_data data;
  64	uint32_t ring_order;
  65	int irq;
  66	atomic_t read;
  67	atomic_t write;
  68	atomic_t io;
  69	atomic_t release;
  70	atomic_t eoi;
  71	void (*saved_data_ready)(struct sock *sk);
  72	struct pvcalls_ioworker ioworker;
  73};
  74
  75struct sockpass_mapping {
  76	struct list_head list;
  77	struct pvcalls_fedata *fedata;
  78	struct socket *sock;
  79	uint64_t id;
  80	struct xen_pvcalls_request reqcopy;
  81	spinlock_t copy_lock;
  82	struct workqueue_struct *wq;
  83	struct work_struct register_work;
  84	void (*saved_data_ready)(struct sock *sk);
  85};
  86
  87static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
  88static int pvcalls_back_release_active(struct xenbus_device *dev,
  89				       struct pvcalls_fedata *fedata,
  90				       struct sock_mapping *map);
  91
  92static bool pvcalls_conn_back_read(void *opaque)
  93{
  94	struct sock_mapping *map = (struct sock_mapping *)opaque;
  95	struct msghdr msg;
  96	struct kvec vec[2];
  97	RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
  98	int32_t error;
  99	struct pvcalls_data_intf *intf = map->ring;
 100	struct pvcalls_data *data = &map->data;
 101	unsigned long flags;
 102	int ret;
 103
 104	array_size = XEN_FLEX_RING_SIZE(map->ring_order);
 105	cons = intf->in_cons;
 106	prod = intf->in_prod;
 107	error = intf->in_error;
 108	/* read the indexes first, then deal with the data */
 109	virt_mb();
 110
 111	if (error)
 112		return false;
 113
 114	size = pvcalls_queued(prod, cons, array_size);
 115	if (size >= array_size)
 116		return false;
 117	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
 118	if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
 119		atomic_set(&map->read, 0);
 120		spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
 121				flags);
 122		return true;
 123	}
 124	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
 125	wanted = array_size - size;
 126	masked_prod = pvcalls_mask(prod, array_size);
 127	masked_cons = pvcalls_mask(cons, array_size);
 128
 129	memset(&msg, 0, sizeof(msg));
 130	if (masked_prod < masked_cons) {
 131		vec[0].iov_base = data->in + masked_prod;
 132		vec[0].iov_len = wanted;
 133		iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 1, wanted);
 134	} else {
 135		vec[0].iov_base = data->in + masked_prod;
 136		vec[0].iov_len = array_size - masked_prod;
 137		vec[1].iov_base = data->in;
 138		vec[1].iov_len = wanted - vec[0].iov_len;
 139		iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 2, wanted);
 140	}
 141
 142	atomic_set(&map->read, 0);
 143	ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
 144	WARN_ON(ret > wanted);
 145	if (ret == -EAGAIN) /* shouldn't happen */
 146		return true;
 147	if (!ret)
 148		ret = -ENOTCONN;
 149	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
 150	if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
 151		atomic_inc(&map->read);
 152	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
 153
 154	/* write the data, then modify the indexes */
 155	virt_wmb();
 156	if (ret < 0) {
 157		atomic_set(&map->read, 0);
 158		intf->in_error = ret;
 159	} else
 160		intf->in_prod = prod + ret;
 161	/* update the indexes, then notify the other end */
 162	virt_wmb();
 163	notify_remote_via_irq(map->irq);
 164
 165	return true;
 166}
 167
 168static bool pvcalls_conn_back_write(struct sock_mapping *map)
 169{
 170	struct pvcalls_data_intf *intf = map->ring;
 171	struct pvcalls_data *data = &map->data;
 172	struct msghdr msg;
 173	struct kvec vec[2];
 174	RING_IDX cons, prod, size, array_size;
 175	int ret;
 176
 177	atomic_set(&map->write, 0);
 178
 179	cons = intf->out_cons;
 180	prod = intf->out_prod;
 181	/* read the indexes before dealing with the data */
 182	virt_mb();
 183
 184	array_size = XEN_FLEX_RING_SIZE(map->ring_order);
 185	size = pvcalls_queued(prod, cons, array_size);
 186	if (size == 0)
 187		return false;
 188
 189	memset(&msg, 0, sizeof(msg));
 190	msg.msg_flags |= MSG_DONTWAIT;
 191	if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
 192		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
 193		vec[0].iov_len = size;
 194		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, size);
 195	} else {
 196		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
 197		vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
 198		vec[1].iov_base = data->out;
 199		vec[1].iov_len = size - vec[0].iov_len;
 200		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 2, size);
 201	}
 202
 203	ret = inet_sendmsg(map->sock, &msg, size);
 204	if (ret == -EAGAIN) {
 205		atomic_inc(&map->write);
 206		atomic_inc(&map->io);
 207		return true;
 208	}
 209
 210	/* write the data, then update the indexes */
 211	virt_wmb();
 212	if (ret < 0) {
 213		intf->out_error = ret;
 214	} else {
 215		intf->out_error = 0;
 216		intf->out_cons = cons + ret;
 217		prod = intf->out_prod;
 218	}
 219	/* update the indexes, then notify the other end */
 220	virt_wmb();
 221	if (prod != cons + ret) {
 222		atomic_inc(&map->write);
 223		atomic_inc(&map->io);
 224	}
 225	notify_remote_via_irq(map->irq);
 226
 227	return true;
 228}
 229
 230static void pvcalls_back_ioworker(struct work_struct *work)
 231{
 232	struct pvcalls_ioworker *ioworker = container_of(work,
 233		struct pvcalls_ioworker, register_work);
 234	struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
 235		ioworker);
 236	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
 237
 238	while (atomic_read(&map->io) > 0) {
 239		if (atomic_read(&map->release) > 0) {
 240			atomic_set(&map->release, 0);
 241			return;
 242		}
 243
 244		if (atomic_read(&map->read) > 0 &&
 245		    pvcalls_conn_back_read(map))
 246			eoi_flags = 0;
 247		if (atomic_read(&map->write) > 0 &&
 248		    pvcalls_conn_back_write(map))
 249			eoi_flags = 0;
 250
 251		if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
 252			atomic_set(&map->eoi, 0);
 253			xen_irq_lateeoi(map->irq, eoi_flags);
 254			eoi_flags = XEN_EOI_FLAG_SPURIOUS;
 255		}
 256
 257		atomic_dec(&map->io);
 258	}
 259}
 260
 261static int pvcalls_back_socket(struct xenbus_device *dev,
 262		struct xen_pvcalls_request *req)
 263{
 264	struct pvcalls_fedata *fedata;
 265	int ret;
 266	struct xen_pvcalls_response *rsp;
 267
 268	fedata = dev_get_drvdata(&dev->dev);
 269
 270	if (req->u.socket.domain != AF_INET ||
 271	    req->u.socket.type != SOCK_STREAM ||
 272	    (req->u.socket.protocol != IPPROTO_IP &&
 273	     req->u.socket.protocol != AF_INET))
 274		ret = -EAFNOSUPPORT;
 275	else
 276		ret = 0;
 277
 278	/* leave the actual socket allocation for later */
 279
 280	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 281	rsp->req_id = req->req_id;
 282	rsp->cmd = req->cmd;
 283	rsp->u.socket.id = req->u.socket.id;
 284	rsp->ret = ret;
 285
 286	return 0;
 287}
 288
 289static void pvcalls_sk_state_change(struct sock *sock)
 290{
 291	struct sock_mapping *map = sock->sk_user_data;
 292
 293	if (map == NULL)
 294		return;
 295
 296	atomic_inc(&map->read);
 297	notify_remote_via_irq(map->irq);
 298}
 299
 300static void pvcalls_sk_data_ready(struct sock *sock)
 301{
 302	struct sock_mapping *map = sock->sk_user_data;
 303	struct pvcalls_ioworker *iow;
 304
 305	trace_sk_data_ready(sock);
 306
 307	if (map == NULL)
 308		return;
 309
 310	iow = &map->ioworker;
 311	atomic_inc(&map->read);
 312	atomic_inc(&map->io);
 313	queue_work(iow->wq, &iow->register_work);
 314}
 315
 316static struct sock_mapping *pvcalls_new_active_socket(
 317		struct pvcalls_fedata *fedata,
 318		uint64_t id,
 319		grant_ref_t ref,
 320		evtchn_port_t evtchn,
 321		struct socket *sock)
 322{
 323	int ret;
 324	struct sock_mapping *map;
 325	void *page;
 326
 327	map = kzalloc(sizeof(*map), GFP_KERNEL);
 328	if (map == NULL) {
 329		sock_release(sock);
 330		return NULL;
 331	}
 332
 333	map->fedata = fedata;
 334	map->sock = sock;
 335	map->id = id;
 336	map->ref = ref;
 337
 338	ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
 339	if (ret < 0)
 340		goto out;
 341	map->ring = page;
 342	map->ring_order = map->ring->ring_order;
 343	/* first read the order, then map the data ring */
 344	virt_rmb();
 345	if (map->ring_order > MAX_RING_ORDER) {
 346		pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
 347				__func__, map->ring_order, MAX_RING_ORDER);
 348		goto out;
 349	}
 350	ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
 351				     (1 << map->ring_order), &page);
 352	if (ret < 0)
 353		goto out;
 354	map->bytes = page;
 355
 356	ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
 357			fedata->dev, evtchn,
 358			pvcalls_back_conn_event, 0, "pvcalls-backend", map);
 359	if (ret < 0)
 360		goto out;
 361	map->irq = ret;
 362
 363	map->data.in = map->bytes;
 364	map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
 365
 366	map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0);
 367	if (!map->ioworker.wq)
 368		goto out;
 369	atomic_set(&map->io, 1);
 370	INIT_WORK(&map->ioworker.register_work,	pvcalls_back_ioworker);
 371
 372	down(&fedata->socket_lock);
 373	list_add_tail(&map->list, &fedata->socket_mappings);
 374	up(&fedata->socket_lock);
 375
 376	write_lock_bh(&map->sock->sk->sk_callback_lock);
 377	map->saved_data_ready = map->sock->sk->sk_data_ready;
 378	map->sock->sk->sk_user_data = map;
 379	map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
 380	map->sock->sk->sk_state_change = pvcalls_sk_state_change;
 381	write_unlock_bh(&map->sock->sk->sk_callback_lock);
 382
 383	return map;
 384out:
 385	down(&fedata->socket_lock);
 386	list_del(&map->list);
 387	pvcalls_back_release_active(fedata->dev, fedata, map);
 388	up(&fedata->socket_lock);
 389	return NULL;
 390}
 391
 392static int pvcalls_back_connect(struct xenbus_device *dev,
 393				struct xen_pvcalls_request *req)
 394{
 395	struct pvcalls_fedata *fedata;
 396	int ret = -EINVAL;
 397	struct socket *sock;
 398	struct sock_mapping *map;
 399	struct xen_pvcalls_response *rsp;
 400	struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
 401
 402	fedata = dev_get_drvdata(&dev->dev);
 403
 404	if (req->u.connect.len < sizeof(sa->sa_family) ||
 405	    req->u.connect.len > sizeof(req->u.connect.addr) ||
 406	    sa->sa_family != AF_INET)
 407		goto out;
 408
 409	ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
 410	if (ret < 0)
 411		goto out;
 412	ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
 413	if (ret < 0) {
 414		sock_release(sock);
 415		goto out;
 416	}
 417
 418	map = pvcalls_new_active_socket(fedata,
 419					req->u.connect.id,
 420					req->u.connect.ref,
 421					req->u.connect.evtchn,
 422					sock);
 423	if (!map)
 424		ret = -EFAULT;
 425
 426out:
 427	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 428	rsp->req_id = req->req_id;
 429	rsp->cmd = req->cmd;
 430	rsp->u.connect.id = req->u.connect.id;
 431	rsp->ret = ret;
 432
 433	return 0;
 434}
 435
 436static int pvcalls_back_release_active(struct xenbus_device *dev,
 437				       struct pvcalls_fedata *fedata,
 438				       struct sock_mapping *map)
 439{
 440	disable_irq(map->irq);
 441	if (map->sock->sk != NULL) {
 442		write_lock_bh(&map->sock->sk->sk_callback_lock);
 443		map->sock->sk->sk_user_data = NULL;
 444		map->sock->sk->sk_data_ready = map->saved_data_ready;
 445		write_unlock_bh(&map->sock->sk->sk_callback_lock);
 446	}
 447
 448	atomic_set(&map->release, 1);
 449	flush_work(&map->ioworker.register_work);
 450
 451	xenbus_unmap_ring_vfree(dev, map->bytes);
 452	xenbus_unmap_ring_vfree(dev, (void *)map->ring);
 453	unbind_from_irqhandler(map->irq, map);
 454
 455	sock_release(map->sock);
 456	kfree(map);
 457
 458	return 0;
 459}
 460
 461static int pvcalls_back_release_passive(struct xenbus_device *dev,
 462					struct pvcalls_fedata *fedata,
 463					struct sockpass_mapping *mappass)
 464{
 465	if (mappass->sock->sk != NULL) {
 466		write_lock_bh(&mappass->sock->sk->sk_callback_lock);
 467		mappass->sock->sk->sk_user_data = NULL;
 468		mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
 469		write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
 470	}
 471	sock_release(mappass->sock);
 472	destroy_workqueue(mappass->wq);
 473	kfree(mappass);
 474
 475	return 0;
 476}
 477
 478static int pvcalls_back_release(struct xenbus_device *dev,
 479				struct xen_pvcalls_request *req)
 480{
 481	struct pvcalls_fedata *fedata;
 482	struct sock_mapping *map, *n;
 483	struct sockpass_mapping *mappass;
 484	int ret = 0;
 485	struct xen_pvcalls_response *rsp;
 486
 487	fedata = dev_get_drvdata(&dev->dev);
 488
 489	down(&fedata->socket_lock);
 490	list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
 491		if (map->id == req->u.release.id) {
 492			list_del(&map->list);
 493			up(&fedata->socket_lock);
 494			ret = pvcalls_back_release_active(dev, fedata, map);
 495			goto out;
 496		}
 497	}
 498	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
 499				    req->u.release.id);
 500	if (mappass != NULL) {
 501		radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
 502		up(&fedata->socket_lock);
 503		ret = pvcalls_back_release_passive(dev, fedata, mappass);
 504	} else
 505		up(&fedata->socket_lock);
 506
 507out:
 508	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 509	rsp->req_id = req->req_id;
 510	rsp->u.release.id = req->u.release.id;
 511	rsp->cmd = req->cmd;
 512	rsp->ret = ret;
 513	return 0;
 514}
 515
 516static void __pvcalls_back_accept(struct work_struct *work)
 517{
 518	struct sockpass_mapping *mappass = container_of(
 519		work, struct sockpass_mapping, register_work);
 520	struct proto_accept_arg arg = {
 521		.flags = O_NONBLOCK,
 522		.kern = true,
 523	};
 524	struct sock_mapping *map;
 525	struct pvcalls_ioworker *iow;
 526	struct pvcalls_fedata *fedata;
 527	struct socket *sock;
 528	struct xen_pvcalls_response *rsp;
 529	struct xen_pvcalls_request *req;
 530	int notify;
 531	int ret = -EINVAL;
 532	unsigned long flags;
 533
 534	fedata = mappass->fedata;
 535	/*
 536	 * __pvcalls_back_accept can race against pvcalls_back_accept.
 537	 * We only need to check the value of "cmd" on read. It could be
 538	 * done atomically, but to simplify the code on the write side, we
 539	 * use a spinlock.
 540	 */
 541	spin_lock_irqsave(&mappass->copy_lock, flags);
 542	req = &mappass->reqcopy;
 543	if (req->cmd != PVCALLS_ACCEPT) {
 544		spin_unlock_irqrestore(&mappass->copy_lock, flags);
 545		return;
 546	}
 547	spin_unlock_irqrestore(&mappass->copy_lock, flags);
 548
 549	sock = sock_alloc();
 550	if (sock == NULL)
 551		goto out_error;
 552	sock->type = mappass->sock->type;
 553	sock->ops = mappass->sock->ops;
 554
 555	ret = inet_accept(mappass->sock, sock, &arg);
 556	if (ret == -EAGAIN) {
 557		sock_release(sock);
 558		return;
 559	}
 560
 561	map = pvcalls_new_active_socket(fedata,
 562					req->u.accept.id_new,
 563					req->u.accept.ref,
 564					req->u.accept.evtchn,
 565					sock);
 566	if (!map) {
 567		ret = -EFAULT;
 568		goto out_error;
 569	}
 570
 571	map->sockpass = mappass;
 572	iow = &map->ioworker;
 573	atomic_inc(&map->read);
 574	atomic_inc(&map->io);
 575	queue_work(iow->wq, &iow->register_work);
 576
 577out_error:
 578	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 579	rsp->req_id = req->req_id;
 580	rsp->cmd = req->cmd;
 581	rsp->u.accept.id = req->u.accept.id;
 582	rsp->ret = ret;
 583	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
 584	if (notify)
 585		notify_remote_via_irq(fedata->irq);
 586
 587	mappass->reqcopy.cmd = 0;
 588}
 589
 590static void pvcalls_pass_sk_data_ready(struct sock *sock)
 591{
 592	struct sockpass_mapping *mappass = sock->sk_user_data;
 593	struct pvcalls_fedata *fedata;
 594	struct xen_pvcalls_response *rsp;
 595	unsigned long flags;
 596	int notify;
 597
 598	trace_sk_data_ready(sock);
 599
 600	if (mappass == NULL)
 601		return;
 602
 603	fedata = mappass->fedata;
 604	spin_lock_irqsave(&mappass->copy_lock, flags);
 605	if (mappass->reqcopy.cmd == PVCALLS_POLL) {
 606		rsp = RING_GET_RESPONSE(&fedata->ring,
 607					fedata->ring.rsp_prod_pvt++);
 608		rsp->req_id = mappass->reqcopy.req_id;
 609		rsp->u.poll.id = mappass->reqcopy.u.poll.id;
 610		rsp->cmd = mappass->reqcopy.cmd;
 611		rsp->ret = 0;
 612
 613		mappass->reqcopy.cmd = 0;
 614		spin_unlock_irqrestore(&mappass->copy_lock, flags);
 615
 616		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
 617		if (notify)
 618			notify_remote_via_irq(mappass->fedata->irq);
 619	} else {
 620		spin_unlock_irqrestore(&mappass->copy_lock, flags);
 621		queue_work(mappass->wq, &mappass->register_work);
 622	}
 623}
 624
 625static int pvcalls_back_bind(struct xenbus_device *dev,
 626			     struct xen_pvcalls_request *req)
 627{
 628	struct pvcalls_fedata *fedata;
 629	int ret;
 630	struct sockpass_mapping *map;
 631	struct xen_pvcalls_response *rsp;
 632
 633	fedata = dev_get_drvdata(&dev->dev);
 634
 635	map = kzalloc(sizeof(*map), GFP_KERNEL);
 636	if (map == NULL) {
 637		ret = -ENOMEM;
 638		goto out;
 639	}
 640
 641	INIT_WORK(&map->register_work, __pvcalls_back_accept);
 642	spin_lock_init(&map->copy_lock);
 643	map->wq = alloc_ordered_workqueue("pvcalls_wq", 0);
 644	if (!map->wq) {
 645		ret = -ENOMEM;
 646		goto out;
 647	}
 648
 649	ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
 650	if (ret < 0)
 651		goto out;
 652
 653	ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
 654			req->u.bind.len);
 655	if (ret < 0)
 656		goto out;
 657
 658	map->fedata = fedata;
 659	map->id = req->u.bind.id;
 660
 661	down(&fedata->socket_lock);
 662	ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
 663				map);
 664	up(&fedata->socket_lock);
 665	if (ret)
 666		goto out;
 667
 668	write_lock_bh(&map->sock->sk->sk_callback_lock);
 669	map->saved_data_ready = map->sock->sk->sk_data_ready;
 670	map->sock->sk->sk_user_data = map;
 671	map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
 672	write_unlock_bh(&map->sock->sk->sk_callback_lock);
 673
 674out:
 675	if (ret) {
 676		if (map && map->sock)
 677			sock_release(map->sock);
 678		if (map && map->wq)
 679			destroy_workqueue(map->wq);
 680		kfree(map);
 681	}
 682	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 683	rsp->req_id = req->req_id;
 684	rsp->cmd = req->cmd;
 685	rsp->u.bind.id = req->u.bind.id;
 686	rsp->ret = ret;
 687	return 0;
 688}
 689
 690static int pvcalls_back_listen(struct xenbus_device *dev,
 691			       struct xen_pvcalls_request *req)
 692{
 693	struct pvcalls_fedata *fedata;
 694	int ret = -EINVAL;
 695	struct sockpass_mapping *map;
 696	struct xen_pvcalls_response *rsp;
 697
 698	fedata = dev_get_drvdata(&dev->dev);
 699
 700	down(&fedata->socket_lock);
 701	map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
 702	up(&fedata->socket_lock);
 703	if (map == NULL)
 704		goto out;
 705
 706	ret = inet_listen(map->sock, req->u.listen.backlog);
 707
 708out:
 709	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 710	rsp->req_id = req->req_id;
 711	rsp->cmd = req->cmd;
 712	rsp->u.listen.id = req->u.listen.id;
 713	rsp->ret = ret;
 714	return 0;
 715}
 716
 717static int pvcalls_back_accept(struct xenbus_device *dev,
 718			       struct xen_pvcalls_request *req)
 719{
 720	struct pvcalls_fedata *fedata;
 721	struct sockpass_mapping *mappass;
 722	int ret = -EINVAL;
 723	struct xen_pvcalls_response *rsp;
 724	unsigned long flags;
 725
 726	fedata = dev_get_drvdata(&dev->dev);
 727
 728	down(&fedata->socket_lock);
 729	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
 730		req->u.accept.id);
 731	up(&fedata->socket_lock);
 732	if (mappass == NULL)
 733		goto out_error;
 734
 735	/*
 736	 * Limitation of the current implementation: only support one
 737	 * concurrent accept or poll call on one socket.
 738	 */
 739	spin_lock_irqsave(&mappass->copy_lock, flags);
 740	if (mappass->reqcopy.cmd != 0) {
 741		spin_unlock_irqrestore(&mappass->copy_lock, flags);
 742		ret = -EINTR;
 743		goto out_error;
 744	}
 745
 746	mappass->reqcopy = *req;
 747	spin_unlock_irqrestore(&mappass->copy_lock, flags);
 748	queue_work(mappass->wq, &mappass->register_work);
 749
 750	/* Tell the caller we don't need to send back a notification yet */
 751	return -1;
 752
 753out_error:
 754	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 755	rsp->req_id = req->req_id;
 756	rsp->cmd = req->cmd;
 757	rsp->u.accept.id = req->u.accept.id;
 758	rsp->ret = ret;
 759	return 0;
 760}
 761
 762static int pvcalls_back_poll(struct xenbus_device *dev,
 763			     struct xen_pvcalls_request *req)
 764{
 765	struct pvcalls_fedata *fedata;
 766	struct sockpass_mapping *mappass;
 767	struct xen_pvcalls_response *rsp;
 768	struct inet_connection_sock *icsk;
 769	struct request_sock_queue *queue;
 770	unsigned long flags;
 771	int ret;
 772	bool data;
 773
 774	fedata = dev_get_drvdata(&dev->dev);
 775
 776	down(&fedata->socket_lock);
 777	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
 778				    req->u.poll.id);
 779	up(&fedata->socket_lock);
 780	if (mappass == NULL)
 781		return -EINVAL;
 782
 783	/*
 784	 * Limitation of the current implementation: only support one
 785	 * concurrent accept or poll call on one socket.
 786	 */
 787	spin_lock_irqsave(&mappass->copy_lock, flags);
 788	if (mappass->reqcopy.cmd != 0) {
 789		ret = -EINTR;
 790		goto out;
 791	}
 792
 793	mappass->reqcopy = *req;
 794	icsk = inet_csk(mappass->sock->sk);
 795	queue = &icsk->icsk_accept_queue;
 796	data = READ_ONCE(queue->rskq_accept_head) != NULL;
 797	if (data) {
 798		mappass->reqcopy.cmd = 0;
 799		ret = 0;
 800		goto out;
 801	}
 802	spin_unlock_irqrestore(&mappass->copy_lock, flags);
 803
 804	/* Tell the caller we don't need to send back a notification yet */
 805	return -1;
 806
 807out:
 808	spin_unlock_irqrestore(&mappass->copy_lock, flags);
 809
 810	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
 811	rsp->req_id = req->req_id;
 812	rsp->cmd = req->cmd;
 813	rsp->u.poll.id = req->u.poll.id;
 814	rsp->ret = ret;
 815	return 0;
 816}
 817
 818static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
 819				   struct xen_pvcalls_request *req)
 820{
 821	int ret = 0;
 822
 823	switch (req->cmd) {
 824	case PVCALLS_SOCKET:
 825		ret = pvcalls_back_socket(dev, req);
 826		break;
 827	case PVCALLS_CONNECT:
 828		ret = pvcalls_back_connect(dev, req);
 829		break;
 830	case PVCALLS_RELEASE:
 831		ret = pvcalls_back_release(dev, req);
 832		break;
 833	case PVCALLS_BIND:
 834		ret = pvcalls_back_bind(dev, req);
 835		break;
 836	case PVCALLS_LISTEN:
 837		ret = pvcalls_back_listen(dev, req);
 838		break;
 839	case PVCALLS_ACCEPT:
 840		ret = pvcalls_back_accept(dev, req);
 841		break;
 842	case PVCALLS_POLL:
 843		ret = pvcalls_back_poll(dev, req);
 844		break;
 845	default:
 846	{
 847		struct pvcalls_fedata *fedata;
 848		struct xen_pvcalls_response *rsp;
 849
 850		fedata = dev_get_drvdata(&dev->dev);
 851		rsp = RING_GET_RESPONSE(
 852				&fedata->ring, fedata->ring.rsp_prod_pvt++);
 853		rsp->req_id = req->req_id;
 854		rsp->cmd = req->cmd;
 855		rsp->ret = -ENOTSUPP;
 856		break;
 857	}
 858	}
 859	return ret;
 860}
 861
 862static void pvcalls_back_work(struct pvcalls_fedata *fedata)
 863{
 864	int notify, notify_all = 0, more = 1;
 865	struct xen_pvcalls_request req;
 866	struct xenbus_device *dev = fedata->dev;
 867
 868	while (more) {
 869		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
 870			RING_COPY_REQUEST(&fedata->ring,
 871					  fedata->ring.req_cons++,
 872					  &req);
 873
 874			if (!pvcalls_back_handle_cmd(dev, &req)) {
 875				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
 876					&fedata->ring, notify);
 877				notify_all += notify;
 878			}
 879		}
 880
 881		if (notify_all) {
 882			notify_remote_via_irq(fedata->irq);
 883			notify_all = 0;
 884		}
 885
 886		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
 887	}
 888}
 889
 890static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
 891{
 892	struct xenbus_device *dev = dev_id;
 893	struct pvcalls_fedata *fedata = NULL;
 894	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
 895
 896	if (dev) {
 897		fedata = dev_get_drvdata(&dev->dev);
 898		if (fedata) {
 899			pvcalls_back_work(fedata);
 900			eoi_flags = 0;
 901		}
 902	}
 903
 904	xen_irq_lateeoi(irq, eoi_flags);
 905
 906	return IRQ_HANDLED;
 907}
 908
 909static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
 910{
 911	struct sock_mapping *map = sock_map;
 912	struct pvcalls_ioworker *iow;
 913
 914	if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
 915		map->sock->sk->sk_user_data != map) {
 916		xen_irq_lateeoi(irq, 0);
 917		return IRQ_HANDLED;
 918	}
 919
 920	iow = &map->ioworker;
 921
 922	atomic_inc(&map->write);
 923	atomic_inc(&map->eoi);
 924	atomic_inc(&map->io);
 925	queue_work(iow->wq, &iow->register_work);
 926
 927	return IRQ_HANDLED;
 928}
 929
 930static int backend_connect(struct xenbus_device *dev)
 931{
 932	int err;
 933	evtchn_port_t evtchn;
 934	grant_ref_t ring_ref;
 935	struct pvcalls_fedata *fedata = NULL;
 936
 937	fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
 938	if (!fedata)
 939		return -ENOMEM;
 940
 941	fedata->irq = -1;
 942	err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
 943			   &evtchn);
 944	if (err != 1) {
 945		err = -EINVAL;
 946		xenbus_dev_fatal(dev, err, "reading %s/event-channel",
 947				 dev->otherend);
 948		goto error;
 949	}
 950
 951	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
 952	if (err != 1) {
 953		err = -EINVAL;
 954		xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
 955				 dev->otherend);
 956		goto error;
 957	}
 958
 959	err = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
 960	if (err < 0)
 961		goto error;
 962	fedata->irq = err;
 963
 964	err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
 965				   IRQF_ONESHOT, "pvcalls-back", dev);
 966	if (err < 0)
 967		goto error;
 968
 969	err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
 970				     (void **)&fedata->sring);
 971	if (err < 0)
 972		goto error;
 973
 974	BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
 975	fedata->dev = dev;
 976
 977	INIT_LIST_HEAD(&fedata->socket_mappings);
 978	INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
 979	sema_init(&fedata->socket_lock, 1);
 980	dev_set_drvdata(&dev->dev, fedata);
 981
 982	down(&pvcalls_back_global.frontends_lock);
 983	list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
 984	up(&pvcalls_back_global.frontends_lock);
 985
 986	return 0;
 987
 988 error:
 989	if (fedata->irq >= 0)
 990		unbind_from_irqhandler(fedata->irq, dev);
 991	if (fedata->sring != NULL)
 992		xenbus_unmap_ring_vfree(dev, fedata->sring);
 993	kfree(fedata);
 994	return err;
 995}
 996
 997static int backend_disconnect(struct xenbus_device *dev)
 998{
 999	struct pvcalls_fedata *fedata;
1000	struct sock_mapping *map, *n;
1001	struct sockpass_mapping *mappass;
1002	struct radix_tree_iter iter;
1003	void **slot;
1004
1005
1006	fedata = dev_get_drvdata(&dev->dev);
1007
1008	down(&fedata->socket_lock);
1009	list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1010		list_del(&map->list);
1011		pvcalls_back_release_active(dev, fedata, map);
1012	}
1013
1014	radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1015		mappass = radix_tree_deref_slot(slot);
1016		if (!mappass)
1017			continue;
1018		if (radix_tree_exception(mappass)) {
1019			if (radix_tree_deref_retry(mappass))
1020				slot = radix_tree_iter_retry(&iter);
1021		} else {
1022			radix_tree_delete(&fedata->socketpass_mappings,
1023					  mappass->id);
1024			pvcalls_back_release_passive(dev, fedata, mappass);
1025		}
1026	}
1027	up(&fedata->socket_lock);
1028
1029	unbind_from_irqhandler(fedata->irq, dev);
1030	xenbus_unmap_ring_vfree(dev, fedata->sring);
1031
1032	list_del(&fedata->list);
1033	kfree(fedata);
1034	dev_set_drvdata(&dev->dev, NULL);
1035
1036	return 0;
1037}
1038
1039static int pvcalls_back_probe(struct xenbus_device *dev,
1040			      const struct xenbus_device_id *id)
1041{
1042	int err, abort;
1043	struct xenbus_transaction xbt;
1044
1045again:
1046	abort = 1;
1047
1048	err = xenbus_transaction_start(&xbt);
1049	if (err) {
1050		pr_warn("%s cannot create xenstore transaction\n", __func__);
1051		return err;
1052	}
1053
1054	err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1055			    PVCALLS_VERSIONS);
1056	if (err) {
1057		pr_warn("%s write out 'versions' failed\n", __func__);
1058		goto abort;
1059	}
1060
1061	err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1062			    MAX_RING_ORDER);
1063	if (err) {
1064		pr_warn("%s write out 'max-page-order' failed\n", __func__);
1065		goto abort;
1066	}
1067
1068	err = xenbus_printf(xbt, dev->nodename, "function-calls",
1069			    XENBUS_FUNCTIONS_CALLS);
1070	if (err) {
1071		pr_warn("%s write out 'function-calls' failed\n", __func__);
1072		goto abort;
1073	}
1074
1075	abort = 0;
1076abort:
1077	err = xenbus_transaction_end(xbt, abort);
1078	if (err) {
1079		if (err == -EAGAIN && !abort)
1080			goto again;
1081		pr_warn("%s cannot complete xenstore transaction\n", __func__);
1082		return err;
1083	}
1084
1085	if (abort)
1086		return -EFAULT;
1087
1088	xenbus_switch_state(dev, XenbusStateInitWait);
1089
1090	return 0;
1091}
1092
1093static void set_backend_state(struct xenbus_device *dev,
1094			      enum xenbus_state state)
1095{
1096	while (dev->state != state) {
1097		switch (dev->state) {
1098		case XenbusStateClosed:
1099			switch (state) {
1100			case XenbusStateInitWait:
1101			case XenbusStateConnected:
1102				xenbus_switch_state(dev, XenbusStateInitWait);
1103				break;
1104			case XenbusStateClosing:
1105				xenbus_switch_state(dev, XenbusStateClosing);
1106				break;
1107			default:
1108				WARN_ON(1);
1109			}
1110			break;
1111		case XenbusStateInitWait:
1112		case XenbusStateInitialised:
1113			switch (state) {
1114			case XenbusStateConnected:
1115				if (backend_connect(dev))
1116					return;
1117				xenbus_switch_state(dev, XenbusStateConnected);
1118				break;
1119			case XenbusStateClosing:
1120			case XenbusStateClosed:
1121				xenbus_switch_state(dev, XenbusStateClosing);
1122				break;
1123			default:
1124				WARN_ON(1);
1125			}
1126			break;
1127		case XenbusStateConnected:
1128			switch (state) {
1129			case XenbusStateInitWait:
1130			case XenbusStateClosing:
1131			case XenbusStateClosed:
1132				down(&pvcalls_back_global.frontends_lock);
1133				backend_disconnect(dev);
1134				up(&pvcalls_back_global.frontends_lock);
1135				xenbus_switch_state(dev, XenbusStateClosing);
1136				break;
1137			default:
1138				WARN_ON(1);
1139			}
1140			break;
1141		case XenbusStateClosing:
1142			switch (state) {
1143			case XenbusStateInitWait:
1144			case XenbusStateConnected:
1145			case XenbusStateClosed:
1146				xenbus_switch_state(dev, XenbusStateClosed);
1147				break;
1148			default:
1149				WARN_ON(1);
1150			}
1151			break;
1152		default:
1153			WARN_ON(1);
1154		}
1155	}
1156}
1157
1158static void pvcalls_back_changed(struct xenbus_device *dev,
1159				 enum xenbus_state frontend_state)
1160{
1161	switch (frontend_state) {
1162	case XenbusStateInitialising:
1163		set_backend_state(dev, XenbusStateInitWait);
1164		break;
1165
1166	case XenbusStateInitialised:
1167	case XenbusStateConnected:
1168		set_backend_state(dev, XenbusStateConnected);
1169		break;
1170
1171	case XenbusStateClosing:
1172		set_backend_state(dev, XenbusStateClosing);
1173		break;
1174
1175	case XenbusStateClosed:
1176		set_backend_state(dev, XenbusStateClosed);
1177		if (xenbus_dev_is_online(dev))
1178			break;
1179		device_unregister(&dev->dev);
1180		break;
1181	case XenbusStateUnknown:
1182		set_backend_state(dev, XenbusStateClosed);
1183		device_unregister(&dev->dev);
1184		break;
1185
1186	default:
1187		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1188				 frontend_state);
1189		break;
1190	}
1191}
1192
1193static void pvcalls_back_remove(struct xenbus_device *dev)
1194{
1195}
1196
1197static int pvcalls_back_uevent(const struct xenbus_device *xdev,
1198			       struct kobj_uevent_env *env)
1199{
1200	return 0;
1201}
1202
1203static const struct xenbus_device_id pvcalls_back_ids[] = {
1204	{ "pvcalls" },
1205	{ "" }
1206};
1207
1208static struct xenbus_driver pvcalls_back_driver = {
1209	.ids = pvcalls_back_ids,
1210	.probe = pvcalls_back_probe,
1211	.remove = pvcalls_back_remove,
1212	.uevent = pvcalls_back_uevent,
1213	.otherend_changed = pvcalls_back_changed,
1214};
1215
1216static int __init pvcalls_back_init(void)
1217{
1218	int ret;
1219
1220	if (!xen_domain())
1221		return -ENODEV;
1222
1223	ret = xenbus_register_backend(&pvcalls_back_driver);
1224	if (ret < 0)
1225		return ret;
1226
1227	sema_init(&pvcalls_back_global.frontends_lock, 1);
1228	INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1229	return 0;
1230}
1231module_init(pvcalls_back_init);
1232
1233static void __exit pvcalls_back_fin(void)
1234{
1235	struct pvcalls_fedata *fedata, *nfedata;
1236
1237	down(&pvcalls_back_global.frontends_lock);
1238	list_for_each_entry_safe(fedata, nfedata,
1239				 &pvcalls_back_global.frontends, list) {
1240		backend_disconnect(fedata->dev);
1241	}
1242	up(&pvcalls_back_global.frontends_lock);
1243
1244	xenbus_unregister_driver(&pvcalls_back_driver);
1245}
1246
1247module_exit(pvcalls_back_fin);
1248
1249MODULE_DESCRIPTION("Xen PV Calls backend driver");
1250MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
1251MODULE_LICENSE("GPL");