Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Char device for device raw access
   3 *
   4 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#include <linux/bug.h>
  22#include <linux/compat.h>
  23#include <linux/delay.h>
  24#include <linux/device.h>
 
  25#include <linux/errno.h>
  26#include <linux/firewire.h>
  27#include <linux/firewire-cdev.h>
  28#include <linux/idr.h>
  29#include <linux/irqflags.h>
  30#include <linux/jiffies.h>
  31#include <linux/kernel.h>
  32#include <linux/kref.h>
  33#include <linux/mm.h>
  34#include <linux/module.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h> /* required for linux/wait.h */
  38#include <linux/slab.h>
  39#include <linux/spinlock.h>
  40#include <linux/string.h>
  41#include <linux/time.h>
  42#include <linux/uaccess.h>
  43#include <linux/vmalloc.h>
  44#include <linux/wait.h>
  45#include <linux/workqueue.h>
  46
  47#include <asm/system.h>
  48
  49#include "core.h"
  50
  51/*
  52 * ABI version history is documented in linux/firewire-cdev.h.
  53 */
  54#define FW_CDEV_KERNEL_VERSION			4
  55#define FW_CDEV_VERSION_EVENT_REQUEST2		4
  56#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
 
  57
  58struct client {
  59	u32 version;
  60	struct fw_device *device;
  61
  62	spinlock_t lock;
  63	bool in_shutdown;
  64	struct idr resource_idr;
  65	struct list_head event_list;
  66	wait_queue_head_t wait;
  67	wait_queue_head_t tx_flush_wait;
  68	u64 bus_reset_closure;
  69
  70	struct fw_iso_context *iso_context;
  71	u64 iso_closure;
  72	struct fw_iso_buffer buffer;
  73	unsigned long vm_start;
 
  74
  75	struct list_head phy_receiver_link;
  76	u64 phy_receiver_closure;
  77
  78	struct list_head link;
  79	struct kref kref;
  80};
  81
  82static inline void client_get(struct client *client)
  83{
  84	kref_get(&client->kref);
  85}
  86
  87static void client_release(struct kref *kref)
  88{
  89	struct client *client = container_of(kref, struct client, kref);
  90
  91	fw_device_put(client->device);
  92	kfree(client);
  93}
  94
  95static void client_put(struct client *client)
  96{
  97	kref_put(&client->kref, client_release);
  98}
  99
 100struct client_resource;
 101typedef void (*client_resource_release_fn_t)(struct client *,
 102					     struct client_resource *);
 103struct client_resource {
 104	client_resource_release_fn_t release;
 105	int handle;
 106};
 107
 108struct address_handler_resource {
 109	struct client_resource resource;
 110	struct fw_address_handler handler;
 111	__u64 closure;
 112	struct client *client;
 113};
 114
 115struct outbound_transaction_resource {
 116	struct client_resource resource;
 117	struct fw_transaction transaction;
 118};
 119
 120struct inbound_transaction_resource {
 121	struct client_resource resource;
 122	struct fw_card *card;
 123	struct fw_request *request;
 124	void *data;
 125	size_t length;
 126};
 127
 128struct descriptor_resource {
 129	struct client_resource resource;
 130	struct fw_descriptor descriptor;
 131	u32 data[0];
 132};
 133
 134struct iso_resource {
 135	struct client_resource resource;
 136	struct client *client;
 137	/* Schedule work and access todo only with client->lock held. */
 138	struct delayed_work work;
 139	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
 140	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
 141	int generation;
 142	u64 channels;
 143	s32 bandwidth;
 144	struct iso_resource_event *e_alloc, *e_dealloc;
 145};
 146
 147static void release_iso_resource(struct client *, struct client_resource *);
 148
 149static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 150{
 151	client_get(r->client);
 152	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
 153		client_put(r->client);
 154}
 155
 156static void schedule_if_iso_resource(struct client_resource *resource)
 157{
 158	if (resource->release == release_iso_resource)
 159		schedule_iso_resource(container_of(resource,
 160					struct iso_resource, resource), 0);
 161}
 162
 163/*
 164 * dequeue_event() just kfree()'s the event, so the event has to be
 165 * the first field in a struct XYZ_event.
 166 */
 167struct event {
 168	struct { void *data; size_t size; } v[2];
 169	struct list_head link;
 170};
 171
 172struct bus_reset_event {
 173	struct event event;
 174	struct fw_cdev_event_bus_reset reset;
 175};
 176
 177struct outbound_transaction_event {
 178	struct event event;
 179	struct client *client;
 180	struct outbound_transaction_resource r;
 181	struct fw_cdev_event_response response;
 182};
 183
 184struct inbound_transaction_event {
 185	struct event event;
 186	union {
 187		struct fw_cdev_event_request request;
 188		struct fw_cdev_event_request2 request2;
 189	} req;
 190};
 191
 192struct iso_interrupt_event {
 193	struct event event;
 194	struct fw_cdev_event_iso_interrupt interrupt;
 195};
 196
 197struct iso_interrupt_mc_event {
 198	struct event event;
 199	struct fw_cdev_event_iso_interrupt_mc interrupt;
 200};
 201
 202struct iso_resource_event {
 203	struct event event;
 204	struct fw_cdev_event_iso_resource iso_resource;
 205};
 206
 207struct outbound_phy_packet_event {
 208	struct event event;
 209	struct client *client;
 210	struct fw_packet p;
 211	struct fw_cdev_event_phy_packet phy_packet;
 212};
 213
 214struct inbound_phy_packet_event {
 215	struct event event;
 216	struct fw_cdev_event_phy_packet phy_packet;
 217};
 218
 219#ifdef CONFIG_COMPAT
 220static void __user *u64_to_uptr(u64 value)
 221{
 222	if (is_compat_task())
 223		return compat_ptr(value);
 224	else
 225		return (void __user *)(unsigned long)value;
 226}
 227
 228static u64 uptr_to_u64(void __user *ptr)
 229{
 230	if (is_compat_task())
 231		return ptr_to_compat(ptr);
 232	else
 233		return (u64)(unsigned long)ptr;
 234}
 235#else
 236static inline void __user *u64_to_uptr(u64 value)
 237{
 238	return (void __user *)(unsigned long)value;
 239}
 240
 241static inline u64 uptr_to_u64(void __user *ptr)
 242{
 243	return (u64)(unsigned long)ptr;
 244}
 245#endif /* CONFIG_COMPAT */
 246
 247static int fw_device_op_open(struct inode *inode, struct file *file)
 248{
 249	struct fw_device *device;
 250	struct client *client;
 251
 252	device = fw_device_get_by_devt(inode->i_rdev);
 253	if (device == NULL)
 254		return -ENODEV;
 255
 256	if (fw_device_is_shutdown(device)) {
 257		fw_device_put(device);
 258		return -ENODEV;
 259	}
 260
 261	client = kzalloc(sizeof(*client), GFP_KERNEL);
 262	if (client == NULL) {
 263		fw_device_put(device);
 264		return -ENOMEM;
 265	}
 266
 267	client->device = device;
 268	spin_lock_init(&client->lock);
 269	idr_init(&client->resource_idr);
 270	INIT_LIST_HEAD(&client->event_list);
 271	init_waitqueue_head(&client->wait);
 272	init_waitqueue_head(&client->tx_flush_wait);
 273	INIT_LIST_HEAD(&client->phy_receiver_link);
 274	INIT_LIST_HEAD(&client->link);
 275	kref_init(&client->kref);
 276
 277	file->private_data = client;
 278
 279	return nonseekable_open(inode, file);
 280}
 281
 282static void queue_event(struct client *client, struct event *event,
 283			void *data0, size_t size0, void *data1, size_t size1)
 284{
 285	unsigned long flags;
 286
 287	event->v[0].data = data0;
 288	event->v[0].size = size0;
 289	event->v[1].data = data1;
 290	event->v[1].size = size1;
 291
 292	spin_lock_irqsave(&client->lock, flags);
 293	if (client->in_shutdown)
 294		kfree(event);
 295	else
 296		list_add_tail(&event->link, &client->event_list);
 297	spin_unlock_irqrestore(&client->lock, flags);
 298
 299	wake_up_interruptible(&client->wait);
 300}
 301
 302static int dequeue_event(struct client *client,
 303			 char __user *buffer, size_t count)
 304{
 305	struct event *event;
 306	size_t size, total;
 307	int i, ret;
 308
 309	ret = wait_event_interruptible(client->wait,
 310			!list_empty(&client->event_list) ||
 311			fw_device_is_shutdown(client->device));
 312	if (ret < 0)
 313		return ret;
 314
 315	if (list_empty(&client->event_list) &&
 316		       fw_device_is_shutdown(client->device))
 317		return -ENODEV;
 318
 319	spin_lock_irq(&client->lock);
 320	event = list_first_entry(&client->event_list, struct event, link);
 321	list_del(&event->link);
 322	spin_unlock_irq(&client->lock);
 323
 324	total = 0;
 325	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 326		size = min(event->v[i].size, count - total);
 327		if (copy_to_user(buffer + total, event->v[i].data, size)) {
 328			ret = -EFAULT;
 329			goto out;
 330		}
 331		total += size;
 332	}
 333	ret = total;
 334
 335 out:
 336	kfree(event);
 337
 338	return ret;
 339}
 340
 341static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
 342				 size_t count, loff_t *offset)
 343{
 344	struct client *client = file->private_data;
 345
 346	return dequeue_event(client, buffer, count);
 347}
 348
 349static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 350				 struct client *client)
 351{
 352	struct fw_card *card = client->device->card;
 353
 354	spin_lock_irq(&card->lock);
 355
 356	event->closure	     = client->bus_reset_closure;
 357	event->type          = FW_CDEV_EVENT_BUS_RESET;
 358	event->generation    = client->device->generation;
 359	event->node_id       = client->device->node_id;
 360	event->local_node_id = card->local_node->node_id;
 361	event->bm_node_id    = card->bm_node_id;
 362	event->irm_node_id   = card->irm_node->node_id;
 363	event->root_node_id  = card->root_node->node_id;
 364
 365	spin_unlock_irq(&card->lock);
 366}
 367
 368static void for_each_client(struct fw_device *device,
 369			    void (*callback)(struct client *client))
 370{
 371	struct client *c;
 372
 373	mutex_lock(&device->client_list_mutex);
 374	list_for_each_entry(c, &device->client_list, link)
 375		callback(c);
 376	mutex_unlock(&device->client_list_mutex);
 377}
 378
 379static int schedule_reallocations(int id, void *p, void *data)
 380{
 381	schedule_if_iso_resource(p);
 382
 383	return 0;
 384}
 385
 386static void queue_bus_reset_event(struct client *client)
 387{
 388	struct bus_reset_event *e;
 389
 390	e = kzalloc(sizeof(*e), GFP_KERNEL);
 391	if (e == NULL) {
 392		fw_notify("Out of memory when allocating event\n");
 393		return;
 394	}
 395
 396	fill_bus_reset_event(&e->reset, client);
 397
 398	queue_event(client, &e->event,
 399		    &e->reset, sizeof(e->reset), NULL, 0);
 400
 401	spin_lock_irq(&client->lock);
 402	idr_for_each(&client->resource_idr, schedule_reallocations, client);
 403	spin_unlock_irq(&client->lock);
 404}
 405
 406void fw_device_cdev_update(struct fw_device *device)
 407{
 408	for_each_client(device, queue_bus_reset_event);
 409}
 410
 411static void wake_up_client(struct client *client)
 412{
 413	wake_up_interruptible(&client->wait);
 414}
 415
 416void fw_device_cdev_remove(struct fw_device *device)
 417{
 418	for_each_client(device, wake_up_client);
 419}
 420
 421union ioctl_arg {
 422	struct fw_cdev_get_info			get_info;
 423	struct fw_cdev_send_request		send_request;
 424	struct fw_cdev_allocate			allocate;
 425	struct fw_cdev_deallocate		deallocate;
 426	struct fw_cdev_send_response		send_response;
 427	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
 428	struct fw_cdev_add_descriptor		add_descriptor;
 429	struct fw_cdev_remove_descriptor	remove_descriptor;
 430	struct fw_cdev_create_iso_context	create_iso_context;
 431	struct fw_cdev_queue_iso		queue_iso;
 432	struct fw_cdev_start_iso		start_iso;
 433	struct fw_cdev_stop_iso			stop_iso;
 434	struct fw_cdev_get_cycle_timer		get_cycle_timer;
 435	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
 436	struct fw_cdev_send_stream_packet	send_stream_packet;
 437	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
 438	struct fw_cdev_send_phy_packet		send_phy_packet;
 439	struct fw_cdev_receive_phy_packets	receive_phy_packets;
 440	struct fw_cdev_set_iso_channels		set_iso_channels;
 
 441};
 442
 443static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 444{
 445	struct fw_cdev_get_info *a = &arg->get_info;
 446	struct fw_cdev_event_bus_reset bus_reset;
 447	unsigned long ret = 0;
 448
 449	client->version = a->version;
 450	a->version = FW_CDEV_KERNEL_VERSION;
 451	a->card = client->device->card->index;
 452
 453	down_read(&fw_device_rwsem);
 454
 455	if (a->rom != 0) {
 456		size_t want = a->rom_length;
 457		size_t have = client->device->config_rom_length * 4;
 458
 459		ret = copy_to_user(u64_to_uptr(a->rom),
 460				   client->device->config_rom, min(want, have));
 461	}
 462	a->rom_length = client->device->config_rom_length * 4;
 463
 464	up_read(&fw_device_rwsem);
 465
 466	if (ret != 0)
 467		return -EFAULT;
 468
 469	mutex_lock(&client->device->client_list_mutex);
 470
 471	client->bus_reset_closure = a->bus_reset_closure;
 472	if (a->bus_reset != 0) {
 473		fill_bus_reset_event(&bus_reset, client);
 474		ret = copy_to_user(u64_to_uptr(a->bus_reset),
 475				   &bus_reset, sizeof(bus_reset));
 476	}
 477	if (ret == 0 && list_empty(&client->link))
 478		list_add_tail(&client->link, &client->device->client_list);
 479
 480	mutex_unlock(&client->device->client_list_mutex);
 481
 482	return ret ? -EFAULT : 0;
 483}
 484
 485static int add_client_resource(struct client *client,
 486			       struct client_resource *resource, gfp_t gfp_mask)
 487{
 
 488	unsigned long flags;
 489	int ret;
 490
 491 retry:
 492	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
 493		return -ENOMEM;
 494
 495	spin_lock_irqsave(&client->lock, flags);
 
 496	if (client->in_shutdown)
 497		ret = -ECANCELED;
 498	else
 499		ret = idr_get_new(&client->resource_idr, resource,
 500				  &resource->handle);
 501	if (ret >= 0) {
 
 502		client_get(client);
 503		schedule_if_iso_resource(resource);
 504	}
 505	spin_unlock_irqrestore(&client->lock, flags);
 506
 507	if (ret == -EAGAIN)
 508		goto retry;
 
 509
 510	return ret < 0 ? ret : 0;
 511}
 512
 513static int release_client_resource(struct client *client, u32 handle,
 514				   client_resource_release_fn_t release,
 515				   struct client_resource **return_resource)
 516{
 517	struct client_resource *resource;
 518
 519	spin_lock_irq(&client->lock);
 520	if (client->in_shutdown)
 521		resource = NULL;
 522	else
 523		resource = idr_find(&client->resource_idr, handle);
 524	if (resource && resource->release == release)
 525		idr_remove(&client->resource_idr, handle);
 526	spin_unlock_irq(&client->lock);
 527
 528	if (!(resource && resource->release == release))
 529		return -EINVAL;
 530
 531	if (return_resource)
 532		*return_resource = resource;
 533	else
 534		resource->release(client, resource);
 535
 536	client_put(client);
 537
 538	return 0;
 539}
 540
 541static void release_transaction(struct client *client,
 542				struct client_resource *resource)
 543{
 544}
 545
 546static void complete_transaction(struct fw_card *card, int rcode,
 547				 void *payload, size_t length, void *data)
 548{
 549	struct outbound_transaction_event *e = data;
 550	struct fw_cdev_event_response *rsp = &e->response;
 551	struct client *client = e->client;
 552	unsigned long flags;
 553
 554	if (length < rsp->length)
 555		rsp->length = length;
 556	if (rcode == RCODE_COMPLETE)
 557		memcpy(rsp->data, payload, rsp->length);
 558
 559	spin_lock_irqsave(&client->lock, flags);
 560	idr_remove(&client->resource_idr, e->r.resource.handle);
 561	if (client->in_shutdown)
 562		wake_up(&client->tx_flush_wait);
 563	spin_unlock_irqrestore(&client->lock, flags);
 564
 565	rsp->type = FW_CDEV_EVENT_RESPONSE;
 566	rsp->rcode = rcode;
 567
 568	/*
 569	 * In the case that sizeof(*rsp) doesn't align with the position of the
 570	 * data, and the read is short, preserve an extra copy of the data
 571	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 572	 * for short reads and some apps depended on it, this is both safe
 573	 * and prudent for compatibility.
 574	 */
 575	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
 576		queue_event(client, &e->event, rsp, sizeof(*rsp),
 577			    rsp->data, rsp->length);
 578	else
 579		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 580			    NULL, 0);
 581
 582	/* Drop the idr's reference */
 583	client_put(client);
 584}
 585
 586static int init_request(struct client *client,
 587			struct fw_cdev_send_request *request,
 588			int destination_id, int speed)
 589{
 590	struct outbound_transaction_event *e;
 591	int ret;
 592
 593	if (request->tcode != TCODE_STREAM_DATA &&
 594	    (request->length > 4096 || request->length > 512 << speed))
 595		return -EIO;
 596
 597	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
 598	    request->length < 4)
 599		return -EINVAL;
 600
 601	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
 602	if (e == NULL)
 603		return -ENOMEM;
 604
 605	e->client = client;
 606	e->response.length = request->length;
 607	e->response.closure = request->closure;
 608
 609	if (request->data &&
 610	    copy_from_user(e->response.data,
 611			   u64_to_uptr(request->data), request->length)) {
 612		ret = -EFAULT;
 613		goto failed;
 614	}
 615
 616	e->r.resource.release = release_transaction;
 617	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
 618	if (ret < 0)
 619		goto failed;
 620
 621	fw_send_request(client->device->card, &e->r.transaction,
 622			request->tcode, destination_id, request->generation,
 623			speed, request->offset, e->response.data,
 624			request->length, complete_transaction, e);
 625	return 0;
 626
 627 failed:
 628	kfree(e);
 629
 630	return ret;
 631}
 632
 633static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
 634{
 635	switch (arg->send_request.tcode) {
 636	case TCODE_WRITE_QUADLET_REQUEST:
 637	case TCODE_WRITE_BLOCK_REQUEST:
 638	case TCODE_READ_QUADLET_REQUEST:
 639	case TCODE_READ_BLOCK_REQUEST:
 640	case TCODE_LOCK_MASK_SWAP:
 641	case TCODE_LOCK_COMPARE_SWAP:
 642	case TCODE_LOCK_FETCH_ADD:
 643	case TCODE_LOCK_LITTLE_ADD:
 644	case TCODE_LOCK_BOUNDED_ADD:
 645	case TCODE_LOCK_WRAP_ADD:
 646	case TCODE_LOCK_VENDOR_DEPENDENT:
 647		break;
 648	default:
 649		return -EINVAL;
 650	}
 651
 652	return init_request(client, &arg->send_request, client->device->node_id,
 653			    client->device->max_speed);
 654}
 655
 656static inline bool is_fcp_request(struct fw_request *request)
 657{
 658	return request == NULL;
 659}
 660
 661static void release_request(struct client *client,
 662			    struct client_resource *resource)
 663{
 664	struct inbound_transaction_resource *r = container_of(resource,
 665			struct inbound_transaction_resource, resource);
 666
 667	if (is_fcp_request(r->request))
 668		kfree(r->data);
 669	else
 670		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
 671
 672	fw_card_put(r->card);
 673	kfree(r);
 674}
 675
 676static void handle_request(struct fw_card *card, struct fw_request *request,
 677			   int tcode, int destination, int source,
 678			   int generation, unsigned long long offset,
 679			   void *payload, size_t length, void *callback_data)
 680{
 681	struct address_handler_resource *handler = callback_data;
 682	struct inbound_transaction_resource *r;
 683	struct inbound_transaction_event *e;
 684	size_t event_size0;
 685	void *fcp_frame = NULL;
 686	int ret;
 687
 688	/* card may be different from handler->client->device->card */
 689	fw_card_get(card);
 690
 691	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 692	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 693	if (r == NULL || e == NULL) {
 694		fw_notify("Out of memory when allocating event\n");
 695		goto failed;
 696	}
 697	r->card    = card;
 698	r->request = request;
 699	r->data    = payload;
 700	r->length  = length;
 701
 702	if (is_fcp_request(request)) {
 703		/*
 704		 * FIXME: Let core-transaction.c manage a
 705		 * single reference-counted copy?
 706		 */
 707		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
 708		if (fcp_frame == NULL)
 709			goto failed;
 710
 711		r->data = fcp_frame;
 712	}
 713
 714	r->resource.release = release_request;
 715	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
 716	if (ret < 0)
 717		goto failed;
 718
 719	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
 720		struct fw_cdev_event_request *req = &e->req.request;
 721
 722		if (tcode & 0x10)
 723			tcode = TCODE_LOCK_REQUEST;
 724
 725		req->type	= FW_CDEV_EVENT_REQUEST;
 726		req->tcode	= tcode;
 727		req->offset	= offset;
 728		req->length	= length;
 729		req->handle	= r->resource.handle;
 730		req->closure	= handler->closure;
 731		event_size0	= sizeof(*req);
 732	} else {
 733		struct fw_cdev_event_request2 *req = &e->req.request2;
 734
 735		req->type	= FW_CDEV_EVENT_REQUEST2;
 736		req->tcode	= tcode;
 737		req->offset	= offset;
 738		req->source_node_id = source;
 739		req->destination_node_id = destination;
 740		req->card	= card->index;
 741		req->generation	= generation;
 742		req->length	= length;
 743		req->handle	= r->resource.handle;
 744		req->closure	= handler->closure;
 745		event_size0	= sizeof(*req);
 746	}
 747
 748	queue_event(handler->client, &e->event,
 749		    &e->req, event_size0, r->data, length);
 750	return;
 751
 752 failed:
 753	kfree(r);
 754	kfree(e);
 755	kfree(fcp_frame);
 756
 757	if (!is_fcp_request(request))
 758		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 759
 760	fw_card_put(card);
 761}
 762
 763static void release_address_handler(struct client *client,
 764				    struct client_resource *resource)
 765{
 766	struct address_handler_resource *r =
 767	    container_of(resource, struct address_handler_resource, resource);
 768
 769	fw_core_remove_address_handler(&r->handler);
 770	kfree(r);
 771}
 772
 773static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
 774{
 775	struct fw_cdev_allocate *a = &arg->allocate;
 776	struct address_handler_resource *r;
 777	struct fw_address_region region;
 778	int ret;
 779
 780	r = kmalloc(sizeof(*r), GFP_KERNEL);
 781	if (r == NULL)
 782		return -ENOMEM;
 783
 784	region.start = a->offset;
 785	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
 786		region.end = a->offset + a->length;
 787	else
 788		region.end = a->region_end;
 789
 790	r->handler.length           = a->length;
 791	r->handler.address_callback = handle_request;
 792	r->handler.callback_data    = r;
 793	r->closure   = a->closure;
 794	r->client    = client;
 795
 796	ret = fw_core_add_address_handler(&r->handler, &region);
 797	if (ret < 0) {
 798		kfree(r);
 799		return ret;
 800	}
 801	a->offset = r->handler.offset;
 802
 803	r->resource.release = release_address_handler;
 804	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 805	if (ret < 0) {
 806		release_address_handler(client, &r->resource);
 807		return ret;
 808	}
 809	a->handle = r->resource.handle;
 810
 811	return 0;
 812}
 813
 814static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
 815{
 816	return release_client_resource(client, arg->deallocate.handle,
 817				       release_address_handler, NULL);
 818}
 819
 820static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
 821{
 822	struct fw_cdev_send_response *a = &arg->send_response;
 823	struct client_resource *resource;
 824	struct inbound_transaction_resource *r;
 825	int ret = 0;
 826
 827	if (release_client_resource(client, a->handle,
 828				    release_request, &resource) < 0)
 829		return -EINVAL;
 830
 831	r = container_of(resource, struct inbound_transaction_resource,
 832			 resource);
 833	if (is_fcp_request(r->request))
 834		goto out;
 835
 836	if (a->length != fw_get_response_length(r->request)) {
 837		ret = -EINVAL;
 838		kfree(r->request);
 839		goto out;
 840	}
 841	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
 842		ret = -EFAULT;
 843		kfree(r->request);
 844		goto out;
 845	}
 846	fw_send_response(r->card, r->request, a->rcode);
 847 out:
 848	fw_card_put(r->card);
 849	kfree(r);
 850
 851	return ret;
 852}
 853
 854static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
 855{
 856	fw_schedule_bus_reset(client->device->card, true,
 857			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
 858	return 0;
 859}
 860
 861static void release_descriptor(struct client *client,
 862			       struct client_resource *resource)
 863{
 864	struct descriptor_resource *r =
 865		container_of(resource, struct descriptor_resource, resource);
 866
 867	fw_core_remove_descriptor(&r->descriptor);
 868	kfree(r);
 869}
 870
 871static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
 872{
 873	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
 874	struct descriptor_resource *r;
 875	int ret;
 876
 877	/* Access policy: Allow this ioctl only on local nodes' device files. */
 878	if (!client->device->is_local)
 879		return -ENOSYS;
 880
 881	if (a->length > 256)
 882		return -EINVAL;
 883
 884	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
 885	if (r == NULL)
 886		return -ENOMEM;
 887
 888	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
 889		ret = -EFAULT;
 890		goto failed;
 891	}
 892
 893	r->descriptor.length    = a->length;
 894	r->descriptor.immediate = a->immediate;
 895	r->descriptor.key       = a->key;
 896	r->descriptor.data      = r->data;
 897
 898	ret = fw_core_add_descriptor(&r->descriptor);
 899	if (ret < 0)
 900		goto failed;
 901
 902	r->resource.release = release_descriptor;
 903	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 904	if (ret < 0) {
 905		fw_core_remove_descriptor(&r->descriptor);
 906		goto failed;
 907	}
 908	a->handle = r->resource.handle;
 909
 910	return 0;
 911 failed:
 912	kfree(r);
 913
 914	return ret;
 915}
 916
 917static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
 918{
 919	return release_client_resource(client, arg->remove_descriptor.handle,
 920				       release_descriptor, NULL);
 921}
 922
 923static void iso_callback(struct fw_iso_context *context, u32 cycle,
 924			 size_t header_length, void *header, void *data)
 925{
 926	struct client *client = data;
 927	struct iso_interrupt_event *e;
 928
 929	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
 930	if (e == NULL) {
 931		fw_notify("Out of memory when allocating event\n");
 932		return;
 933	}
 934	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 935	e->interrupt.closure   = client->iso_closure;
 936	e->interrupt.cycle     = cycle;
 937	e->interrupt.header_length = header_length;
 938	memcpy(e->interrupt.header, header, header_length);
 939	queue_event(client, &e->event, &e->interrupt,
 940		    sizeof(e->interrupt) + header_length, NULL, 0);
 941}
 942
 943static void iso_mc_callback(struct fw_iso_context *context,
 944			    dma_addr_t completed, void *data)
 945{
 946	struct client *client = data;
 947	struct iso_interrupt_mc_event *e;
 948
 949	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 950	if (e == NULL) {
 951		fw_notify("Out of memory when allocating event\n");
 952		return;
 953	}
 954	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 955	e->interrupt.closure   = client->iso_closure;
 956	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
 957						      completed);
 958	queue_event(client, &e->event, &e->interrupt,
 959		    sizeof(e->interrupt), NULL, 0);
 960}
 961
 
 
 
 
 
 
 
 
 962static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 963{
 964	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 965	struct fw_iso_context *context;
 966	fw_iso_callback_t cb;
 
 967
 968	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 969		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
 970		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
 971					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
 972
 973	switch (a->type) {
 974	case FW_ISO_CONTEXT_TRANSMIT:
 975		if (a->speed > SCODE_3200 || a->channel > 63)
 976			return -EINVAL;
 977
 978		cb = iso_callback;
 979		break;
 980
 981	case FW_ISO_CONTEXT_RECEIVE:
 982		if (a->header_size < 4 || (a->header_size & 3) ||
 983		    a->channel > 63)
 984			return -EINVAL;
 985
 986		cb = iso_callback;
 987		break;
 988
 989	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
 990		cb = (fw_iso_callback_t)iso_mc_callback;
 991		break;
 992
 993	default:
 994		return -EINVAL;
 995	}
 996
 997	context = fw_iso_context_create(client->device->card, a->type,
 998			a->channel, a->speed, a->header_size, cb, client);
 999	if (IS_ERR(context))
1000		return PTR_ERR(context);
 
 
1001
1002	/* We only support one context at this time. */
1003	spin_lock_irq(&client->lock);
1004	if (client->iso_context != NULL) {
1005		spin_unlock_irq(&client->lock);
1006		fw_iso_context_destroy(context);
 
1007		return -EBUSY;
1008	}
 
 
 
 
 
 
 
 
 
 
 
 
1009	client->iso_closure = a->closure;
1010	client->iso_context = context;
1011	spin_unlock_irq(&client->lock);
1012
1013	a->handle = 0;
1014
1015	return 0;
1016}
1017
1018static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1019{
1020	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1021	struct fw_iso_context *ctx = client->iso_context;
1022
1023	if (ctx == NULL || a->handle != 0)
1024		return -EINVAL;
1025
1026	return fw_iso_context_set_channels(ctx, &a->channels);
1027}
1028
1029/* Macros for decoding the iso packet control header. */
1030#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
1031#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
1032#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1033#define GET_TAG(v)		(((v) >> 18) & 0x03)
1034#define GET_SY(v)		(((v) >> 20) & 0x0f)
1035#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
1036
1037static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1038{
1039	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1040	struct fw_cdev_iso_packet __user *p, *end, *next;
1041	struct fw_iso_context *ctx = client->iso_context;
1042	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1043	u32 control;
1044	int count;
1045	struct {
1046		struct fw_iso_packet packet;
1047		u8 header[256];
1048	} u;
1049
1050	if (ctx == NULL || a->handle != 0)
1051		return -EINVAL;
1052
1053	/*
1054	 * If the user passes a non-NULL data pointer, has mmap()'ed
1055	 * the iso buffer, and the pointer points inside the buffer,
1056	 * we setup the payload pointers accordingly.  Otherwise we
1057	 * set them both to 0, which will still let packets with
1058	 * payload_length == 0 through.  In other words, if no packets
1059	 * use the indirect payload, the iso buffer need not be mapped
1060	 * and the a->data pointer is ignored.
1061	 */
1062	payload = (unsigned long)a->data - client->vm_start;
1063	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1064	if (a->data == 0 || client->buffer.pages == NULL ||
1065	    payload >= buffer_end) {
1066		payload = 0;
1067		buffer_end = 0;
1068	}
1069
1070	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1071		return -EINVAL;
1072
1073	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1074	if (!access_ok(VERIFY_READ, p, a->size))
1075		return -EFAULT;
1076
1077	end = (void __user *)p + a->size;
1078	count = 0;
1079	while (p < end) {
1080		if (get_user(control, &p->control))
1081			return -EFAULT;
1082		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1083		u.packet.interrupt = GET_INTERRUPT(control);
1084		u.packet.skip = GET_SKIP(control);
1085		u.packet.tag = GET_TAG(control);
1086		u.packet.sy = GET_SY(control);
1087		u.packet.header_length = GET_HEADER_LENGTH(control);
1088
1089		switch (ctx->type) {
1090		case FW_ISO_CONTEXT_TRANSMIT:
1091			if (u.packet.header_length & 3)
1092				return -EINVAL;
1093			transmit_header_bytes = u.packet.header_length;
1094			break;
1095
1096		case FW_ISO_CONTEXT_RECEIVE:
1097			if (u.packet.header_length == 0 ||
1098			    u.packet.header_length % ctx->header_size != 0)
1099				return -EINVAL;
1100			break;
1101
1102		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1103			if (u.packet.payload_length == 0 ||
1104			    u.packet.payload_length & 3)
1105				return -EINVAL;
1106			break;
1107		}
1108
1109		next = (struct fw_cdev_iso_packet __user *)
1110			&p->header[transmit_header_bytes / 4];
1111		if (next > end)
1112			return -EINVAL;
1113		if (__copy_from_user
1114		    (u.packet.header, p->header, transmit_header_bytes))
1115			return -EFAULT;
1116		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1117		    u.packet.header_length + u.packet.payload_length > 0)
1118			return -EINVAL;
1119		if (payload + u.packet.payload_length > buffer_end)
1120			return -EINVAL;
1121
1122		if (fw_iso_context_queue(ctx, &u.packet,
1123					 &client->buffer, payload))
1124			break;
1125
1126		p = next;
1127		payload += u.packet.payload_length;
1128		count++;
1129	}
1130	fw_iso_context_queue_flush(ctx);
1131
1132	a->size    -= uptr_to_u64(p) - a->packets;
1133	a->packets  = uptr_to_u64(p);
1134	a->data     = client->vm_start + payload;
1135
1136	return count;
1137}
1138
1139static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1140{
1141	struct fw_cdev_start_iso *a = &arg->start_iso;
1142
1143	BUILD_BUG_ON(
1144	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1145	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1146	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1147	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1148	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1149
1150	if (client->iso_context == NULL || a->handle != 0)
1151		return -EINVAL;
1152
1153	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1154	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
1155		return -EINVAL;
1156
1157	return fw_iso_context_start(client->iso_context,
1158				    a->cycle, a->sync, a->tags);
1159}
1160
1161static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1162{
1163	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1164
1165	if (client->iso_context == NULL || a->handle != 0)
1166		return -EINVAL;
1167
1168	return fw_iso_context_stop(client->iso_context);
1169}
1170
 
 
 
 
 
 
 
 
 
 
1171static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1172{
1173	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1174	struct fw_card *card = client->device->card;
1175	struct timespec ts = {0, 0};
1176	u32 cycle_time;
1177	int ret = 0;
1178
1179	local_irq_disable();
1180
1181	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1182
1183	switch (a->clk_id) {
1184	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
1185	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
1186	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
1187	default:
1188		ret = -EINVAL;
1189	}
1190
1191	local_irq_enable();
1192
1193	a->tv_sec      = ts.tv_sec;
1194	a->tv_nsec     = ts.tv_nsec;
1195	a->cycle_timer = cycle_time;
1196
1197	return ret;
1198}
1199
1200static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1201{
1202	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1203	struct fw_cdev_get_cycle_timer2 ct2;
1204
1205	ct2.clk_id = CLOCK_REALTIME;
1206	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1207
1208	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1209	a->cycle_timer = ct2.cycle_timer;
1210
1211	return 0;
1212}
1213
1214static void iso_resource_work(struct work_struct *work)
1215{
1216	struct iso_resource_event *e;
1217	struct iso_resource *r =
1218			container_of(work, struct iso_resource, work.work);
1219	struct client *client = r->client;
1220	int generation, channel, bandwidth, todo;
1221	bool skip, free, success;
1222
1223	spin_lock_irq(&client->lock);
1224	generation = client->device->generation;
1225	todo = r->todo;
1226	/* Allow 1000ms grace period for other reallocations. */
1227	if (todo == ISO_RES_ALLOC &&
1228	    time_before64(get_jiffies_64(),
1229			  client->device->card->reset_jiffies + HZ)) {
1230		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1231		skip = true;
1232	} else {
1233		/* We could be called twice within the same generation. */
1234		skip = todo == ISO_RES_REALLOC &&
1235		       r->generation == generation;
1236	}
1237	free = todo == ISO_RES_DEALLOC ||
1238	       todo == ISO_RES_ALLOC_ONCE ||
1239	       todo == ISO_RES_DEALLOC_ONCE;
1240	r->generation = generation;
1241	spin_unlock_irq(&client->lock);
1242
1243	if (skip)
1244		goto out;
1245
1246	bandwidth = r->bandwidth;
1247
1248	fw_iso_resource_manage(client->device->card, generation,
1249			r->channels, &channel, &bandwidth,
1250			todo == ISO_RES_ALLOC ||
1251			todo == ISO_RES_REALLOC ||
1252			todo == ISO_RES_ALLOC_ONCE);
1253	/*
1254	 * Is this generation outdated already?  As long as this resource sticks
1255	 * in the idr, it will be scheduled again for a newer generation or at
1256	 * shutdown.
1257	 */
1258	if (channel == -EAGAIN &&
1259	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1260		goto out;
1261
1262	success = channel >= 0 || bandwidth > 0;
1263
1264	spin_lock_irq(&client->lock);
1265	/*
1266	 * Transit from allocation to reallocation, except if the client
1267	 * requested deallocation in the meantime.
1268	 */
1269	if (r->todo == ISO_RES_ALLOC)
1270		r->todo = ISO_RES_REALLOC;
1271	/*
1272	 * Allocation or reallocation failure?  Pull this resource out of the
1273	 * idr and prepare for deletion, unless the client is shutting down.
1274	 */
1275	if (r->todo == ISO_RES_REALLOC && !success &&
1276	    !client->in_shutdown &&
1277	    idr_find(&client->resource_idr, r->resource.handle)) {
1278		idr_remove(&client->resource_idr, r->resource.handle);
1279		client_put(client);
1280		free = true;
1281	}
1282	spin_unlock_irq(&client->lock);
1283
1284	if (todo == ISO_RES_ALLOC && channel >= 0)
1285		r->channels = 1ULL << channel;
1286
1287	if (todo == ISO_RES_REALLOC && success)
1288		goto out;
1289
1290	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1291		e = r->e_alloc;
1292		r->e_alloc = NULL;
1293	} else {
1294		e = r->e_dealloc;
1295		r->e_dealloc = NULL;
1296	}
1297	e->iso_resource.handle    = r->resource.handle;
1298	e->iso_resource.channel   = channel;
1299	e->iso_resource.bandwidth = bandwidth;
1300
1301	queue_event(client, &e->event,
1302		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1303
1304	if (free) {
1305		cancel_delayed_work(&r->work);
1306		kfree(r->e_alloc);
1307		kfree(r->e_dealloc);
1308		kfree(r);
1309	}
1310 out:
1311	client_put(client);
1312}
1313
1314static void release_iso_resource(struct client *client,
1315				 struct client_resource *resource)
1316{
1317	struct iso_resource *r =
1318		container_of(resource, struct iso_resource, resource);
1319
1320	spin_lock_irq(&client->lock);
1321	r->todo = ISO_RES_DEALLOC;
1322	schedule_iso_resource(r, 0);
1323	spin_unlock_irq(&client->lock);
1324}
1325
1326static int init_iso_resource(struct client *client,
1327		struct fw_cdev_allocate_iso_resource *request, int todo)
1328{
1329	struct iso_resource_event *e1, *e2;
1330	struct iso_resource *r;
1331	int ret;
1332
1333	if ((request->channels == 0 && request->bandwidth == 0) ||
1334	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1335	    request->bandwidth < 0)
1336		return -EINVAL;
1337
1338	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1339	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1340	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1341	if (r == NULL || e1 == NULL || e2 == NULL) {
1342		ret = -ENOMEM;
1343		goto fail;
1344	}
1345
1346	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1347	r->client	= client;
1348	r->todo		= todo;
1349	r->generation	= -1;
1350	r->channels	= request->channels;
1351	r->bandwidth	= request->bandwidth;
1352	r->e_alloc	= e1;
1353	r->e_dealloc	= e2;
1354
1355	e1->iso_resource.closure = request->closure;
1356	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1357	e2->iso_resource.closure = request->closure;
1358	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1359
1360	if (todo == ISO_RES_ALLOC) {
1361		r->resource.release = release_iso_resource;
1362		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1363		if (ret < 0)
1364			goto fail;
1365	} else {
1366		r->resource.release = NULL;
1367		r->resource.handle = -1;
1368		schedule_iso_resource(r, 0);
1369	}
1370	request->handle = r->resource.handle;
1371
1372	return 0;
1373 fail:
1374	kfree(r);
1375	kfree(e1);
1376	kfree(e2);
1377
1378	return ret;
1379}
1380
1381static int ioctl_allocate_iso_resource(struct client *client,
1382				       union ioctl_arg *arg)
1383{
1384	return init_iso_resource(client,
1385			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1386}
1387
1388static int ioctl_deallocate_iso_resource(struct client *client,
1389					 union ioctl_arg *arg)
1390{
1391	return release_client_resource(client,
1392			arg->deallocate.handle, release_iso_resource, NULL);
1393}
1394
1395static int ioctl_allocate_iso_resource_once(struct client *client,
1396					    union ioctl_arg *arg)
1397{
1398	return init_iso_resource(client,
1399			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1400}
1401
1402static int ioctl_deallocate_iso_resource_once(struct client *client,
1403					      union ioctl_arg *arg)
1404{
1405	return init_iso_resource(client,
1406			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1407}
1408
1409/*
1410 * Returns a speed code:  Maximum speed to or from this device,
1411 * limited by the device's link speed, the local node's link speed,
1412 * and all PHY port speeds between the two links.
1413 */
1414static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1415{
1416	return client->device->max_speed;
1417}
1418
1419static int ioctl_send_broadcast_request(struct client *client,
1420					union ioctl_arg *arg)
1421{
1422	struct fw_cdev_send_request *a = &arg->send_request;
1423
1424	switch (a->tcode) {
1425	case TCODE_WRITE_QUADLET_REQUEST:
1426	case TCODE_WRITE_BLOCK_REQUEST:
1427		break;
1428	default:
1429		return -EINVAL;
1430	}
1431
1432	/* Security policy: Only allow accesses to Units Space. */
1433	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1434		return -EACCES;
1435
1436	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1437}
1438
1439static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1440{
1441	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1442	struct fw_cdev_send_request request;
1443	int dest;
1444
1445	if (a->speed > client->device->card->link_speed ||
1446	    a->length > 1024 << a->speed)
1447		return -EIO;
1448
1449	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1450		return -EINVAL;
1451
1452	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1453	request.tcode		= TCODE_STREAM_DATA;
1454	request.length		= a->length;
1455	request.closure		= a->closure;
1456	request.data		= a->data;
1457	request.generation	= a->generation;
1458
1459	return init_request(client, &request, dest, a->speed);
1460}
1461
1462static void outbound_phy_packet_callback(struct fw_packet *packet,
1463					 struct fw_card *card, int status)
1464{
1465	struct outbound_phy_packet_event *e =
1466		container_of(packet, struct outbound_phy_packet_event, p);
1467
1468	switch (status) {
1469	/* expected: */
1470	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1471	/* should never happen with PHY packets: */
1472	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1473	case ACK_BUSY_X:
1474	case ACK_BUSY_A:
1475	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
1476	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
1477	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
1478	/* stale generation; cancelled; on certain controllers: no ack */
1479	default:		e->phy_packet.rcode = status;		break;
1480	}
1481	e->phy_packet.data[0] = packet->timestamp;
1482
1483	queue_event(e->client, &e->event, &e->phy_packet,
1484		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1485	client_put(e->client);
1486}
1487
1488static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1489{
1490	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1491	struct fw_card *card = client->device->card;
1492	struct outbound_phy_packet_event *e;
1493
1494	/* Access policy: Allow this ioctl only on local nodes' device files. */
1495	if (!client->device->is_local)
1496		return -ENOSYS;
1497
1498	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1499	if (e == NULL)
1500		return -ENOMEM;
1501
1502	client_get(client);
1503	e->client		= client;
1504	e->p.speed		= SCODE_100;
1505	e->p.generation		= a->generation;
1506	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
1507	e->p.header[1]		= a->data[0];
1508	e->p.header[2]		= a->data[1];
1509	e->p.header_length	= 12;
1510	e->p.callback		= outbound_phy_packet_callback;
1511	e->phy_packet.closure	= a->closure;
1512	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
1513	if (is_ping_packet(a->data))
1514			e->phy_packet.length = 4;
1515
1516	card->driver->send_request(card, &e->p);
1517
1518	return 0;
1519}
1520
1521static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1522{
1523	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1524	struct fw_card *card = client->device->card;
1525
1526	/* Access policy: Allow this ioctl only on local nodes' device files. */
1527	if (!client->device->is_local)
1528		return -ENOSYS;
1529
1530	spin_lock_irq(&card->lock);
1531
1532	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1533	client->phy_receiver_closure = a->closure;
1534
1535	spin_unlock_irq(&card->lock);
1536
1537	return 0;
1538}
1539
1540void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1541{
1542	struct client *client;
1543	struct inbound_phy_packet_event *e;
1544	unsigned long flags;
1545
1546	spin_lock_irqsave(&card->lock, flags);
1547
1548	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1549		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1550		if (e == NULL) {
1551			fw_notify("Out of memory when allocating event\n");
1552			break;
1553		}
1554		e->phy_packet.closure	= client->phy_receiver_closure;
1555		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1556		e->phy_packet.rcode	= RCODE_COMPLETE;
1557		e->phy_packet.length	= 8;
1558		e->phy_packet.data[0]	= p->header[1];
1559		e->phy_packet.data[1]	= p->header[2];
1560		queue_event(client, &e->event,
1561			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1562	}
1563
1564	spin_unlock_irqrestore(&card->lock, flags);
1565}
1566
1567static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1568	[0x00] = ioctl_get_info,
1569	[0x01] = ioctl_send_request,
1570	[0x02] = ioctl_allocate,
1571	[0x03] = ioctl_deallocate,
1572	[0x04] = ioctl_send_response,
1573	[0x05] = ioctl_initiate_bus_reset,
1574	[0x06] = ioctl_add_descriptor,
1575	[0x07] = ioctl_remove_descriptor,
1576	[0x08] = ioctl_create_iso_context,
1577	[0x09] = ioctl_queue_iso,
1578	[0x0a] = ioctl_start_iso,
1579	[0x0b] = ioctl_stop_iso,
1580	[0x0c] = ioctl_get_cycle_timer,
1581	[0x0d] = ioctl_allocate_iso_resource,
1582	[0x0e] = ioctl_deallocate_iso_resource,
1583	[0x0f] = ioctl_allocate_iso_resource_once,
1584	[0x10] = ioctl_deallocate_iso_resource_once,
1585	[0x11] = ioctl_get_speed,
1586	[0x12] = ioctl_send_broadcast_request,
1587	[0x13] = ioctl_send_stream_packet,
1588	[0x14] = ioctl_get_cycle_timer2,
1589	[0x15] = ioctl_send_phy_packet,
1590	[0x16] = ioctl_receive_phy_packets,
1591	[0x17] = ioctl_set_iso_channels,
 
1592};
1593
1594static int dispatch_ioctl(struct client *client,
1595			  unsigned int cmd, void __user *arg)
1596{
1597	union ioctl_arg buffer;
1598	int ret;
1599
1600	if (fw_device_is_shutdown(client->device))
1601		return -ENODEV;
1602
1603	if (_IOC_TYPE(cmd) != '#' ||
1604	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1605	    _IOC_SIZE(cmd) > sizeof(buffer))
1606		return -ENOTTY;
1607
1608	if (_IOC_DIR(cmd) == _IOC_READ)
1609		memset(&buffer, 0, _IOC_SIZE(cmd));
1610
1611	if (_IOC_DIR(cmd) & _IOC_WRITE)
1612		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1613			return -EFAULT;
1614
1615	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1616	if (ret < 0)
1617		return ret;
1618
1619	if (_IOC_DIR(cmd) & _IOC_READ)
1620		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1621			return -EFAULT;
1622
1623	return ret;
1624}
1625
1626static long fw_device_op_ioctl(struct file *file,
1627			       unsigned int cmd, unsigned long arg)
1628{
1629	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1630}
1631
1632#ifdef CONFIG_COMPAT
1633static long fw_device_op_compat_ioctl(struct file *file,
1634				      unsigned int cmd, unsigned long arg)
1635{
1636	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1637}
1638#endif
1639
1640static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1641{
1642	struct client *client = file->private_data;
1643	enum dma_data_direction direction;
1644	unsigned long size;
1645	int page_count, ret;
1646
1647	if (fw_device_is_shutdown(client->device))
1648		return -ENODEV;
1649
1650	/* FIXME: We could support multiple buffers, but we don't. */
1651	if (client->buffer.pages != NULL)
1652		return -EBUSY;
1653
1654	if (!(vma->vm_flags & VM_SHARED))
1655		return -EINVAL;
1656
1657	if (vma->vm_start & ~PAGE_MASK)
1658		return -EINVAL;
1659
1660	client->vm_start = vma->vm_start;
1661	size = vma->vm_end - vma->vm_start;
1662	page_count = size >> PAGE_SHIFT;
1663	if (size & ~PAGE_MASK)
1664		return -EINVAL;
1665
1666	if (vma->vm_flags & VM_WRITE)
1667		direction = DMA_TO_DEVICE;
1668	else
1669		direction = DMA_FROM_DEVICE;
1670
1671	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1672				 page_count, direction);
1673	if (ret < 0)
1674		return ret;
1675
1676	ret = fw_iso_buffer_map(&client->buffer, vma);
 
 
 
 
 
 
 
1677	if (ret < 0)
1678		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1679
 
 
 
 
 
 
 
 
1680	return ret;
1681}
1682
1683static int is_outbound_transaction_resource(int id, void *p, void *data)
1684{
1685	struct client_resource *resource = p;
1686
1687	return resource->release == release_transaction;
1688}
1689
1690static int has_outbound_transactions(struct client *client)
1691{
1692	int ret;
1693
1694	spin_lock_irq(&client->lock);
1695	ret = idr_for_each(&client->resource_idr,
1696			   is_outbound_transaction_resource, NULL);
1697	spin_unlock_irq(&client->lock);
1698
1699	return ret;
1700}
1701
1702static int shutdown_resource(int id, void *p, void *data)
1703{
1704	struct client_resource *resource = p;
1705	struct client *client = data;
1706
1707	resource->release(client, resource);
1708	client_put(client);
1709
1710	return 0;
1711}
1712
1713static int fw_device_op_release(struct inode *inode, struct file *file)
1714{
1715	struct client *client = file->private_data;
1716	struct event *event, *next_event;
1717
1718	spin_lock_irq(&client->device->card->lock);
1719	list_del(&client->phy_receiver_link);
1720	spin_unlock_irq(&client->device->card->lock);
1721
1722	mutex_lock(&client->device->client_list_mutex);
1723	list_del(&client->link);
1724	mutex_unlock(&client->device->client_list_mutex);
1725
1726	if (client->iso_context)
1727		fw_iso_context_destroy(client->iso_context);
1728
1729	if (client->buffer.pages)
1730		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1731
1732	/* Freeze client->resource_idr and client->event_list */
1733	spin_lock_irq(&client->lock);
1734	client->in_shutdown = true;
1735	spin_unlock_irq(&client->lock);
1736
1737	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1738
1739	idr_for_each(&client->resource_idr, shutdown_resource, client);
1740	idr_remove_all(&client->resource_idr);
1741	idr_destroy(&client->resource_idr);
1742
1743	list_for_each_entry_safe(event, next_event, &client->event_list, link)
1744		kfree(event);
1745
1746	client_put(client);
1747
1748	return 0;
1749}
1750
1751static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1752{
1753	struct client *client = file->private_data;
1754	unsigned int mask = 0;
1755
1756	poll_wait(file, &client->wait, pt);
1757
1758	if (fw_device_is_shutdown(client->device))
1759		mask |= POLLHUP | POLLERR;
1760	if (!list_empty(&client->event_list))
1761		mask |= POLLIN | POLLRDNORM;
1762
1763	return mask;
1764}
1765
1766const struct file_operations fw_device_ops = {
1767	.owner		= THIS_MODULE,
1768	.llseek		= no_llseek,
1769	.open		= fw_device_op_open,
1770	.read		= fw_device_op_read,
1771	.unlocked_ioctl	= fw_device_op_ioctl,
1772	.mmap		= fw_device_op_mmap,
1773	.release	= fw_device_op_release,
1774	.poll		= fw_device_op_poll,
1775#ifdef CONFIG_COMPAT
1776	.compat_ioctl	= fw_device_op_compat_ioctl,
1777#endif
1778};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Char device for device raw access
   4 *
   5 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/bug.h>
   9#include <linux/compat.h>
  10#include <linux/delay.h>
  11#include <linux/device.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/errno.h>
  14#include <linux/firewire.h>
  15#include <linux/firewire-cdev.h>
  16#include <linux/idr.h>
  17#include <linux/irqflags.h>
  18#include <linux/jiffies.h>
  19#include <linux/kernel.h>
  20#include <linux/kref.h>
  21#include <linux/mm.h>
  22#include <linux/module.h>
  23#include <linux/mutex.h>
  24#include <linux/poll.h>
  25#include <linux/sched.h> /* required for linux/wait.h */
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/string.h>
  29#include <linux/time.h>
  30#include <linux/uaccess.h>
  31#include <linux/vmalloc.h>
  32#include <linux/wait.h>
  33#include <linux/workqueue.h>
  34
 
  35
  36#include "core.h"
  37
  38/*
  39 * ABI version history is documented in linux/firewire-cdev.h.
  40 */
  41#define FW_CDEV_KERNEL_VERSION			5
  42#define FW_CDEV_VERSION_EVENT_REQUEST2		4
  43#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
  44#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
  45
  46struct client {
  47	u32 version;
  48	struct fw_device *device;
  49
  50	spinlock_t lock;
  51	bool in_shutdown;
  52	struct idr resource_idr;
  53	struct list_head event_list;
  54	wait_queue_head_t wait;
  55	wait_queue_head_t tx_flush_wait;
  56	u64 bus_reset_closure;
  57
  58	struct fw_iso_context *iso_context;
  59	u64 iso_closure;
  60	struct fw_iso_buffer buffer;
  61	unsigned long vm_start;
  62	bool buffer_is_mapped;
  63
  64	struct list_head phy_receiver_link;
  65	u64 phy_receiver_closure;
  66
  67	struct list_head link;
  68	struct kref kref;
  69};
  70
  71static inline void client_get(struct client *client)
  72{
  73	kref_get(&client->kref);
  74}
  75
  76static void client_release(struct kref *kref)
  77{
  78	struct client *client = container_of(kref, struct client, kref);
  79
  80	fw_device_put(client->device);
  81	kfree(client);
  82}
  83
  84static void client_put(struct client *client)
  85{
  86	kref_put(&client->kref, client_release);
  87}
  88
  89struct client_resource;
  90typedef void (*client_resource_release_fn_t)(struct client *,
  91					     struct client_resource *);
  92struct client_resource {
  93	client_resource_release_fn_t release;
  94	int handle;
  95};
  96
  97struct address_handler_resource {
  98	struct client_resource resource;
  99	struct fw_address_handler handler;
 100	__u64 closure;
 101	struct client *client;
 102};
 103
 104struct outbound_transaction_resource {
 105	struct client_resource resource;
 106	struct fw_transaction transaction;
 107};
 108
 109struct inbound_transaction_resource {
 110	struct client_resource resource;
 111	struct fw_card *card;
 112	struct fw_request *request;
 113	void *data;
 114	size_t length;
 115};
 116
 117struct descriptor_resource {
 118	struct client_resource resource;
 119	struct fw_descriptor descriptor;
 120	u32 data[];
 121};
 122
 123struct iso_resource {
 124	struct client_resource resource;
 125	struct client *client;
 126	/* Schedule work and access todo only with client->lock held. */
 127	struct delayed_work work;
 128	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
 129	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
 130	int generation;
 131	u64 channels;
 132	s32 bandwidth;
 133	struct iso_resource_event *e_alloc, *e_dealloc;
 134};
 135
 136static void release_iso_resource(struct client *, struct client_resource *);
 137
 138static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 139{
 140	client_get(r->client);
 141	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
 142		client_put(r->client);
 143}
 144
 145static void schedule_if_iso_resource(struct client_resource *resource)
 146{
 147	if (resource->release == release_iso_resource)
 148		schedule_iso_resource(container_of(resource,
 149					struct iso_resource, resource), 0);
 150}
 151
 152/*
 153 * dequeue_event() just kfree()'s the event, so the event has to be
 154 * the first field in a struct XYZ_event.
 155 */
 156struct event {
 157	struct { void *data; size_t size; } v[2];
 158	struct list_head link;
 159};
 160
 161struct bus_reset_event {
 162	struct event event;
 163	struct fw_cdev_event_bus_reset reset;
 164};
 165
 166struct outbound_transaction_event {
 167	struct event event;
 168	struct client *client;
 169	struct outbound_transaction_resource r;
 170	struct fw_cdev_event_response response;
 171};
 172
 173struct inbound_transaction_event {
 174	struct event event;
 175	union {
 176		struct fw_cdev_event_request request;
 177		struct fw_cdev_event_request2 request2;
 178	} req;
 179};
 180
 181struct iso_interrupt_event {
 182	struct event event;
 183	struct fw_cdev_event_iso_interrupt interrupt;
 184};
 185
 186struct iso_interrupt_mc_event {
 187	struct event event;
 188	struct fw_cdev_event_iso_interrupt_mc interrupt;
 189};
 190
 191struct iso_resource_event {
 192	struct event event;
 193	struct fw_cdev_event_iso_resource iso_resource;
 194};
 195
 196struct outbound_phy_packet_event {
 197	struct event event;
 198	struct client *client;
 199	struct fw_packet p;
 200	struct fw_cdev_event_phy_packet phy_packet;
 201};
 202
 203struct inbound_phy_packet_event {
 204	struct event event;
 205	struct fw_cdev_event_phy_packet phy_packet;
 206};
 207
 208#ifdef CONFIG_COMPAT
 209static void __user *u64_to_uptr(u64 value)
 210{
 211	if (in_compat_syscall())
 212		return compat_ptr(value);
 213	else
 214		return (void __user *)(unsigned long)value;
 215}
 216
 217static u64 uptr_to_u64(void __user *ptr)
 218{
 219	if (in_compat_syscall())
 220		return ptr_to_compat(ptr);
 221	else
 222		return (u64)(unsigned long)ptr;
 223}
 224#else
 225static inline void __user *u64_to_uptr(u64 value)
 226{
 227	return (void __user *)(unsigned long)value;
 228}
 229
 230static inline u64 uptr_to_u64(void __user *ptr)
 231{
 232	return (u64)(unsigned long)ptr;
 233}
 234#endif /* CONFIG_COMPAT */
 235
 236static int fw_device_op_open(struct inode *inode, struct file *file)
 237{
 238	struct fw_device *device;
 239	struct client *client;
 240
 241	device = fw_device_get_by_devt(inode->i_rdev);
 242	if (device == NULL)
 243		return -ENODEV;
 244
 245	if (fw_device_is_shutdown(device)) {
 246		fw_device_put(device);
 247		return -ENODEV;
 248	}
 249
 250	client = kzalloc(sizeof(*client), GFP_KERNEL);
 251	if (client == NULL) {
 252		fw_device_put(device);
 253		return -ENOMEM;
 254	}
 255
 256	client->device = device;
 257	spin_lock_init(&client->lock);
 258	idr_init(&client->resource_idr);
 259	INIT_LIST_HEAD(&client->event_list);
 260	init_waitqueue_head(&client->wait);
 261	init_waitqueue_head(&client->tx_flush_wait);
 262	INIT_LIST_HEAD(&client->phy_receiver_link);
 263	INIT_LIST_HEAD(&client->link);
 264	kref_init(&client->kref);
 265
 266	file->private_data = client;
 267
 268	return nonseekable_open(inode, file);
 269}
 270
 271static void queue_event(struct client *client, struct event *event,
 272			void *data0, size_t size0, void *data1, size_t size1)
 273{
 274	unsigned long flags;
 275
 276	event->v[0].data = data0;
 277	event->v[0].size = size0;
 278	event->v[1].data = data1;
 279	event->v[1].size = size1;
 280
 281	spin_lock_irqsave(&client->lock, flags);
 282	if (client->in_shutdown)
 283		kfree(event);
 284	else
 285		list_add_tail(&event->link, &client->event_list);
 286	spin_unlock_irqrestore(&client->lock, flags);
 287
 288	wake_up_interruptible(&client->wait);
 289}
 290
 291static int dequeue_event(struct client *client,
 292			 char __user *buffer, size_t count)
 293{
 294	struct event *event;
 295	size_t size, total;
 296	int i, ret;
 297
 298	ret = wait_event_interruptible(client->wait,
 299			!list_empty(&client->event_list) ||
 300			fw_device_is_shutdown(client->device));
 301	if (ret < 0)
 302		return ret;
 303
 304	if (list_empty(&client->event_list) &&
 305		       fw_device_is_shutdown(client->device))
 306		return -ENODEV;
 307
 308	spin_lock_irq(&client->lock);
 309	event = list_first_entry(&client->event_list, struct event, link);
 310	list_del(&event->link);
 311	spin_unlock_irq(&client->lock);
 312
 313	total = 0;
 314	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 315		size = min(event->v[i].size, count - total);
 316		if (copy_to_user(buffer + total, event->v[i].data, size)) {
 317			ret = -EFAULT;
 318			goto out;
 319		}
 320		total += size;
 321	}
 322	ret = total;
 323
 324 out:
 325	kfree(event);
 326
 327	return ret;
 328}
 329
 330static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
 331				 size_t count, loff_t *offset)
 332{
 333	struct client *client = file->private_data;
 334
 335	return dequeue_event(client, buffer, count);
 336}
 337
 338static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 339				 struct client *client)
 340{
 341	struct fw_card *card = client->device->card;
 342
 343	spin_lock_irq(&card->lock);
 344
 345	event->closure	     = client->bus_reset_closure;
 346	event->type          = FW_CDEV_EVENT_BUS_RESET;
 347	event->generation    = client->device->generation;
 348	event->node_id       = client->device->node_id;
 349	event->local_node_id = card->local_node->node_id;
 350	event->bm_node_id    = card->bm_node_id;
 351	event->irm_node_id   = card->irm_node->node_id;
 352	event->root_node_id  = card->root_node->node_id;
 353
 354	spin_unlock_irq(&card->lock);
 355}
 356
 357static void for_each_client(struct fw_device *device,
 358			    void (*callback)(struct client *client))
 359{
 360	struct client *c;
 361
 362	mutex_lock(&device->client_list_mutex);
 363	list_for_each_entry(c, &device->client_list, link)
 364		callback(c);
 365	mutex_unlock(&device->client_list_mutex);
 366}
 367
 368static int schedule_reallocations(int id, void *p, void *data)
 369{
 370	schedule_if_iso_resource(p);
 371
 372	return 0;
 373}
 374
 375static void queue_bus_reset_event(struct client *client)
 376{
 377	struct bus_reset_event *e;
 378
 379	e = kzalloc(sizeof(*e), GFP_KERNEL);
 380	if (e == NULL)
 
 381		return;
 
 382
 383	fill_bus_reset_event(&e->reset, client);
 384
 385	queue_event(client, &e->event,
 386		    &e->reset, sizeof(e->reset), NULL, 0);
 387
 388	spin_lock_irq(&client->lock);
 389	idr_for_each(&client->resource_idr, schedule_reallocations, client);
 390	spin_unlock_irq(&client->lock);
 391}
 392
 393void fw_device_cdev_update(struct fw_device *device)
 394{
 395	for_each_client(device, queue_bus_reset_event);
 396}
 397
 398static void wake_up_client(struct client *client)
 399{
 400	wake_up_interruptible(&client->wait);
 401}
 402
 403void fw_device_cdev_remove(struct fw_device *device)
 404{
 405	for_each_client(device, wake_up_client);
 406}
 407
 408union ioctl_arg {
 409	struct fw_cdev_get_info			get_info;
 410	struct fw_cdev_send_request		send_request;
 411	struct fw_cdev_allocate			allocate;
 412	struct fw_cdev_deallocate		deallocate;
 413	struct fw_cdev_send_response		send_response;
 414	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
 415	struct fw_cdev_add_descriptor		add_descriptor;
 416	struct fw_cdev_remove_descriptor	remove_descriptor;
 417	struct fw_cdev_create_iso_context	create_iso_context;
 418	struct fw_cdev_queue_iso		queue_iso;
 419	struct fw_cdev_start_iso		start_iso;
 420	struct fw_cdev_stop_iso			stop_iso;
 421	struct fw_cdev_get_cycle_timer		get_cycle_timer;
 422	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
 423	struct fw_cdev_send_stream_packet	send_stream_packet;
 424	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
 425	struct fw_cdev_send_phy_packet		send_phy_packet;
 426	struct fw_cdev_receive_phy_packets	receive_phy_packets;
 427	struct fw_cdev_set_iso_channels		set_iso_channels;
 428	struct fw_cdev_flush_iso		flush_iso;
 429};
 430
 431static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 432{
 433	struct fw_cdev_get_info *a = &arg->get_info;
 434	struct fw_cdev_event_bus_reset bus_reset;
 435	unsigned long ret = 0;
 436
 437	client->version = a->version;
 438	a->version = FW_CDEV_KERNEL_VERSION;
 439	a->card = client->device->card->index;
 440
 441	down_read(&fw_device_rwsem);
 442
 443	if (a->rom != 0) {
 444		size_t want = a->rom_length;
 445		size_t have = client->device->config_rom_length * 4;
 446
 447		ret = copy_to_user(u64_to_uptr(a->rom),
 448				   client->device->config_rom, min(want, have));
 449	}
 450	a->rom_length = client->device->config_rom_length * 4;
 451
 452	up_read(&fw_device_rwsem);
 453
 454	if (ret != 0)
 455		return -EFAULT;
 456
 457	mutex_lock(&client->device->client_list_mutex);
 458
 459	client->bus_reset_closure = a->bus_reset_closure;
 460	if (a->bus_reset != 0) {
 461		fill_bus_reset_event(&bus_reset, client);
 462		/* unaligned size of bus_reset is 36 bytes */
 463		ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
 464	}
 465	if (ret == 0 && list_empty(&client->link))
 466		list_add_tail(&client->link, &client->device->client_list);
 467
 468	mutex_unlock(&client->device->client_list_mutex);
 469
 470	return ret ? -EFAULT : 0;
 471}
 472
 473static int add_client_resource(struct client *client,
 474			       struct client_resource *resource, gfp_t gfp_mask)
 475{
 476	bool preload = gfpflags_allow_blocking(gfp_mask);
 477	unsigned long flags;
 478	int ret;
 479
 480	if (preload)
 481		idr_preload(gfp_mask);
 
 
 482	spin_lock_irqsave(&client->lock, flags);
 483
 484	if (client->in_shutdown)
 485		ret = -ECANCELED;
 486	else
 487		ret = idr_alloc(&client->resource_idr, resource, 0, 0,
 488				GFP_NOWAIT);
 489	if (ret >= 0) {
 490		resource->handle = ret;
 491		client_get(client);
 492		schedule_if_iso_resource(resource);
 493	}
 
 494
 495	spin_unlock_irqrestore(&client->lock, flags);
 496	if (preload)
 497		idr_preload_end();
 498
 499	return ret < 0 ? ret : 0;
 500}
 501
 502static int release_client_resource(struct client *client, u32 handle,
 503				   client_resource_release_fn_t release,
 504				   struct client_resource **return_resource)
 505{
 506	struct client_resource *resource;
 507
 508	spin_lock_irq(&client->lock);
 509	if (client->in_shutdown)
 510		resource = NULL;
 511	else
 512		resource = idr_find(&client->resource_idr, handle);
 513	if (resource && resource->release == release)
 514		idr_remove(&client->resource_idr, handle);
 515	spin_unlock_irq(&client->lock);
 516
 517	if (!(resource && resource->release == release))
 518		return -EINVAL;
 519
 520	if (return_resource)
 521		*return_resource = resource;
 522	else
 523		resource->release(client, resource);
 524
 525	client_put(client);
 526
 527	return 0;
 528}
 529
 530static void release_transaction(struct client *client,
 531				struct client_resource *resource)
 532{
 533}
 534
 535static void complete_transaction(struct fw_card *card, int rcode,
 536				 void *payload, size_t length, void *data)
 537{
 538	struct outbound_transaction_event *e = data;
 539	struct fw_cdev_event_response *rsp = &e->response;
 540	struct client *client = e->client;
 541	unsigned long flags;
 542
 543	if (length < rsp->length)
 544		rsp->length = length;
 545	if (rcode == RCODE_COMPLETE)
 546		memcpy(rsp->data, payload, rsp->length);
 547
 548	spin_lock_irqsave(&client->lock, flags);
 549	idr_remove(&client->resource_idr, e->r.resource.handle);
 550	if (client->in_shutdown)
 551		wake_up(&client->tx_flush_wait);
 552	spin_unlock_irqrestore(&client->lock, flags);
 553
 554	rsp->type = FW_CDEV_EVENT_RESPONSE;
 555	rsp->rcode = rcode;
 556
 557	/*
 558	 * In the case that sizeof(*rsp) doesn't align with the position of the
 559	 * data, and the read is short, preserve an extra copy of the data
 560	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 561	 * for short reads and some apps depended on it, this is both safe
 562	 * and prudent for compatibility.
 563	 */
 564	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
 565		queue_event(client, &e->event, rsp, sizeof(*rsp),
 566			    rsp->data, rsp->length);
 567	else
 568		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 569			    NULL, 0);
 570
 571	/* Drop the idr's reference */
 572	client_put(client);
 573}
 574
 575static int init_request(struct client *client,
 576			struct fw_cdev_send_request *request,
 577			int destination_id, int speed)
 578{
 579	struct outbound_transaction_event *e;
 580	int ret;
 581
 582	if (request->tcode != TCODE_STREAM_DATA &&
 583	    (request->length > 4096 || request->length > 512 << speed))
 584		return -EIO;
 585
 586	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
 587	    request->length < 4)
 588		return -EINVAL;
 589
 590	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
 591	if (e == NULL)
 592		return -ENOMEM;
 593
 594	e->client = client;
 595	e->response.length = request->length;
 596	e->response.closure = request->closure;
 597
 598	if (request->data &&
 599	    copy_from_user(e->response.data,
 600			   u64_to_uptr(request->data), request->length)) {
 601		ret = -EFAULT;
 602		goto failed;
 603	}
 604
 605	e->r.resource.release = release_transaction;
 606	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
 607	if (ret < 0)
 608		goto failed;
 609
 610	fw_send_request(client->device->card, &e->r.transaction,
 611			request->tcode, destination_id, request->generation,
 612			speed, request->offset, e->response.data,
 613			request->length, complete_transaction, e);
 614	return 0;
 615
 616 failed:
 617	kfree(e);
 618
 619	return ret;
 620}
 621
 622static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
 623{
 624	switch (arg->send_request.tcode) {
 625	case TCODE_WRITE_QUADLET_REQUEST:
 626	case TCODE_WRITE_BLOCK_REQUEST:
 627	case TCODE_READ_QUADLET_REQUEST:
 628	case TCODE_READ_BLOCK_REQUEST:
 629	case TCODE_LOCK_MASK_SWAP:
 630	case TCODE_LOCK_COMPARE_SWAP:
 631	case TCODE_LOCK_FETCH_ADD:
 632	case TCODE_LOCK_LITTLE_ADD:
 633	case TCODE_LOCK_BOUNDED_ADD:
 634	case TCODE_LOCK_WRAP_ADD:
 635	case TCODE_LOCK_VENDOR_DEPENDENT:
 636		break;
 637	default:
 638		return -EINVAL;
 639	}
 640
 641	return init_request(client, &arg->send_request, client->device->node_id,
 642			    client->device->max_speed);
 643}
 644
 645static inline bool is_fcp_request(struct fw_request *request)
 646{
 647	return request == NULL;
 648}
 649
 650static void release_request(struct client *client,
 651			    struct client_resource *resource)
 652{
 653	struct inbound_transaction_resource *r = container_of(resource,
 654			struct inbound_transaction_resource, resource);
 655
 656	if (is_fcp_request(r->request))
 657		kfree(r->data);
 658	else
 659		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
 660
 661	fw_card_put(r->card);
 662	kfree(r);
 663}
 664
 665static void handle_request(struct fw_card *card, struct fw_request *request,
 666			   int tcode, int destination, int source,
 667			   int generation, unsigned long long offset,
 668			   void *payload, size_t length, void *callback_data)
 669{
 670	struct address_handler_resource *handler = callback_data;
 671	struct inbound_transaction_resource *r;
 672	struct inbound_transaction_event *e;
 673	size_t event_size0;
 674	void *fcp_frame = NULL;
 675	int ret;
 676
 677	/* card may be different from handler->client->device->card */
 678	fw_card_get(card);
 679
 680	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 681	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 682	if (r == NULL || e == NULL)
 
 683		goto failed;
 684
 685	r->card    = card;
 686	r->request = request;
 687	r->data    = payload;
 688	r->length  = length;
 689
 690	if (is_fcp_request(request)) {
 691		/*
 692		 * FIXME: Let core-transaction.c manage a
 693		 * single reference-counted copy?
 694		 */
 695		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
 696		if (fcp_frame == NULL)
 697			goto failed;
 698
 699		r->data = fcp_frame;
 700	}
 701
 702	r->resource.release = release_request;
 703	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
 704	if (ret < 0)
 705		goto failed;
 706
 707	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
 708		struct fw_cdev_event_request *req = &e->req.request;
 709
 710		if (tcode & 0x10)
 711			tcode = TCODE_LOCK_REQUEST;
 712
 713		req->type	= FW_CDEV_EVENT_REQUEST;
 714		req->tcode	= tcode;
 715		req->offset	= offset;
 716		req->length	= length;
 717		req->handle	= r->resource.handle;
 718		req->closure	= handler->closure;
 719		event_size0	= sizeof(*req);
 720	} else {
 721		struct fw_cdev_event_request2 *req = &e->req.request2;
 722
 723		req->type	= FW_CDEV_EVENT_REQUEST2;
 724		req->tcode	= tcode;
 725		req->offset	= offset;
 726		req->source_node_id = source;
 727		req->destination_node_id = destination;
 728		req->card	= card->index;
 729		req->generation	= generation;
 730		req->length	= length;
 731		req->handle	= r->resource.handle;
 732		req->closure	= handler->closure;
 733		event_size0	= sizeof(*req);
 734	}
 735
 736	queue_event(handler->client, &e->event,
 737		    &e->req, event_size0, r->data, length);
 738	return;
 739
 740 failed:
 741	kfree(r);
 742	kfree(e);
 743	kfree(fcp_frame);
 744
 745	if (!is_fcp_request(request))
 746		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 747
 748	fw_card_put(card);
 749}
 750
 751static void release_address_handler(struct client *client,
 752				    struct client_resource *resource)
 753{
 754	struct address_handler_resource *r =
 755	    container_of(resource, struct address_handler_resource, resource);
 756
 757	fw_core_remove_address_handler(&r->handler);
 758	kfree(r);
 759}
 760
 761static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
 762{
 763	struct fw_cdev_allocate *a = &arg->allocate;
 764	struct address_handler_resource *r;
 765	struct fw_address_region region;
 766	int ret;
 767
 768	r = kmalloc(sizeof(*r), GFP_KERNEL);
 769	if (r == NULL)
 770		return -ENOMEM;
 771
 772	region.start = a->offset;
 773	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
 774		region.end = a->offset + a->length;
 775	else
 776		region.end = a->region_end;
 777
 778	r->handler.length           = a->length;
 779	r->handler.address_callback = handle_request;
 780	r->handler.callback_data    = r;
 781	r->closure   = a->closure;
 782	r->client    = client;
 783
 784	ret = fw_core_add_address_handler(&r->handler, &region);
 785	if (ret < 0) {
 786		kfree(r);
 787		return ret;
 788	}
 789	a->offset = r->handler.offset;
 790
 791	r->resource.release = release_address_handler;
 792	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 793	if (ret < 0) {
 794		release_address_handler(client, &r->resource);
 795		return ret;
 796	}
 797	a->handle = r->resource.handle;
 798
 799	return 0;
 800}
 801
 802static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
 803{
 804	return release_client_resource(client, arg->deallocate.handle,
 805				       release_address_handler, NULL);
 806}
 807
 808static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
 809{
 810	struct fw_cdev_send_response *a = &arg->send_response;
 811	struct client_resource *resource;
 812	struct inbound_transaction_resource *r;
 813	int ret = 0;
 814
 815	if (release_client_resource(client, a->handle,
 816				    release_request, &resource) < 0)
 817		return -EINVAL;
 818
 819	r = container_of(resource, struct inbound_transaction_resource,
 820			 resource);
 821	if (is_fcp_request(r->request))
 822		goto out;
 823
 824	if (a->length != fw_get_response_length(r->request)) {
 825		ret = -EINVAL;
 826		kfree(r->request);
 827		goto out;
 828	}
 829	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
 830		ret = -EFAULT;
 831		kfree(r->request);
 832		goto out;
 833	}
 834	fw_send_response(r->card, r->request, a->rcode);
 835 out:
 836	fw_card_put(r->card);
 837	kfree(r);
 838
 839	return ret;
 840}
 841
 842static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
 843{
 844	fw_schedule_bus_reset(client->device->card, true,
 845			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
 846	return 0;
 847}
 848
 849static void release_descriptor(struct client *client,
 850			       struct client_resource *resource)
 851{
 852	struct descriptor_resource *r =
 853		container_of(resource, struct descriptor_resource, resource);
 854
 855	fw_core_remove_descriptor(&r->descriptor);
 856	kfree(r);
 857}
 858
 859static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
 860{
 861	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
 862	struct descriptor_resource *r;
 863	int ret;
 864
 865	/* Access policy: Allow this ioctl only on local nodes' device files. */
 866	if (!client->device->is_local)
 867		return -ENOSYS;
 868
 869	if (a->length > 256)
 870		return -EINVAL;
 871
 872	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
 873	if (r == NULL)
 874		return -ENOMEM;
 875
 876	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
 877		ret = -EFAULT;
 878		goto failed;
 879	}
 880
 881	r->descriptor.length    = a->length;
 882	r->descriptor.immediate = a->immediate;
 883	r->descriptor.key       = a->key;
 884	r->descriptor.data      = r->data;
 885
 886	ret = fw_core_add_descriptor(&r->descriptor);
 887	if (ret < 0)
 888		goto failed;
 889
 890	r->resource.release = release_descriptor;
 891	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 892	if (ret < 0) {
 893		fw_core_remove_descriptor(&r->descriptor);
 894		goto failed;
 895	}
 896	a->handle = r->resource.handle;
 897
 898	return 0;
 899 failed:
 900	kfree(r);
 901
 902	return ret;
 903}
 904
 905static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
 906{
 907	return release_client_resource(client, arg->remove_descriptor.handle,
 908				       release_descriptor, NULL);
 909}
 910
 911static void iso_callback(struct fw_iso_context *context, u32 cycle,
 912			 size_t header_length, void *header, void *data)
 913{
 914	struct client *client = data;
 915	struct iso_interrupt_event *e;
 916
 917	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
 918	if (e == NULL)
 
 919		return;
 920
 921	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 922	e->interrupt.closure   = client->iso_closure;
 923	e->interrupt.cycle     = cycle;
 924	e->interrupt.header_length = header_length;
 925	memcpy(e->interrupt.header, header, header_length);
 926	queue_event(client, &e->event, &e->interrupt,
 927		    sizeof(e->interrupt) + header_length, NULL, 0);
 928}
 929
 930static void iso_mc_callback(struct fw_iso_context *context,
 931			    dma_addr_t completed, void *data)
 932{
 933	struct client *client = data;
 934	struct iso_interrupt_mc_event *e;
 935
 936	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 937	if (e == NULL)
 
 938		return;
 939
 940	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 941	e->interrupt.closure   = client->iso_closure;
 942	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
 943						      completed);
 944	queue_event(client, &e->event, &e->interrupt,
 945		    sizeof(e->interrupt), NULL, 0);
 946}
 947
 948static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
 949{
 950		if (context->type == FW_ISO_CONTEXT_TRANSMIT)
 951			return DMA_TO_DEVICE;
 952		else
 953			return DMA_FROM_DEVICE;
 954}
 955
 956static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 957{
 958	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 959	struct fw_iso_context *context;
 960	fw_iso_callback_t cb;
 961	int ret;
 962
 963	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 964		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
 965		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
 966					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
 967
 968	switch (a->type) {
 969	case FW_ISO_CONTEXT_TRANSMIT:
 970		if (a->speed > SCODE_3200 || a->channel > 63)
 971			return -EINVAL;
 972
 973		cb = iso_callback;
 974		break;
 975
 976	case FW_ISO_CONTEXT_RECEIVE:
 977		if (a->header_size < 4 || (a->header_size & 3) ||
 978		    a->channel > 63)
 979			return -EINVAL;
 980
 981		cb = iso_callback;
 982		break;
 983
 984	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
 985		cb = (fw_iso_callback_t)iso_mc_callback;
 986		break;
 987
 988	default:
 989		return -EINVAL;
 990	}
 991
 992	context = fw_iso_context_create(client->device->card, a->type,
 993			a->channel, a->speed, a->header_size, cb, client);
 994	if (IS_ERR(context))
 995		return PTR_ERR(context);
 996	if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
 997		context->drop_overflow_headers = true;
 998
 999	/* We only support one context at this time. */
1000	spin_lock_irq(&client->lock);
1001	if (client->iso_context != NULL) {
1002		spin_unlock_irq(&client->lock);
1003		fw_iso_context_destroy(context);
1004
1005		return -EBUSY;
1006	}
1007	if (!client->buffer_is_mapped) {
1008		ret = fw_iso_buffer_map_dma(&client->buffer,
1009					    client->device->card,
1010					    iso_dma_direction(context));
1011		if (ret < 0) {
1012			spin_unlock_irq(&client->lock);
1013			fw_iso_context_destroy(context);
1014
1015			return ret;
1016		}
1017		client->buffer_is_mapped = true;
1018	}
1019	client->iso_closure = a->closure;
1020	client->iso_context = context;
1021	spin_unlock_irq(&client->lock);
1022
1023	a->handle = 0;
1024
1025	return 0;
1026}
1027
1028static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1029{
1030	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1031	struct fw_iso_context *ctx = client->iso_context;
1032
1033	if (ctx == NULL || a->handle != 0)
1034		return -EINVAL;
1035
1036	return fw_iso_context_set_channels(ctx, &a->channels);
1037}
1038
1039/* Macros for decoding the iso packet control header. */
1040#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
1041#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
1042#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1043#define GET_TAG(v)		(((v) >> 18) & 0x03)
1044#define GET_SY(v)		(((v) >> 20) & 0x0f)
1045#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
1046
1047static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1048{
1049	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1050	struct fw_cdev_iso_packet __user *p, *end, *next;
1051	struct fw_iso_context *ctx = client->iso_context;
1052	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1053	u32 control;
1054	int count;
1055	struct {
1056		struct fw_iso_packet packet;
1057		u8 header[256];
1058	} u;
1059
1060	if (ctx == NULL || a->handle != 0)
1061		return -EINVAL;
1062
1063	/*
1064	 * If the user passes a non-NULL data pointer, has mmap()'ed
1065	 * the iso buffer, and the pointer points inside the buffer,
1066	 * we setup the payload pointers accordingly.  Otherwise we
1067	 * set them both to 0, which will still let packets with
1068	 * payload_length == 0 through.  In other words, if no packets
1069	 * use the indirect payload, the iso buffer need not be mapped
1070	 * and the a->data pointer is ignored.
1071	 */
1072	payload = (unsigned long)a->data - client->vm_start;
1073	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1074	if (a->data == 0 || client->buffer.pages == NULL ||
1075	    payload >= buffer_end) {
1076		payload = 0;
1077		buffer_end = 0;
1078	}
1079
1080	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1081		return -EINVAL;
1082
1083	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
 
 
1084
1085	end = (void __user *)p + a->size;
1086	count = 0;
1087	while (p < end) {
1088		if (get_user(control, &p->control))
1089			return -EFAULT;
1090		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1091		u.packet.interrupt = GET_INTERRUPT(control);
1092		u.packet.skip = GET_SKIP(control);
1093		u.packet.tag = GET_TAG(control);
1094		u.packet.sy = GET_SY(control);
1095		u.packet.header_length = GET_HEADER_LENGTH(control);
1096
1097		switch (ctx->type) {
1098		case FW_ISO_CONTEXT_TRANSMIT:
1099			if (u.packet.header_length & 3)
1100				return -EINVAL;
1101			transmit_header_bytes = u.packet.header_length;
1102			break;
1103
1104		case FW_ISO_CONTEXT_RECEIVE:
1105			if (u.packet.header_length == 0 ||
1106			    u.packet.header_length % ctx->header_size != 0)
1107				return -EINVAL;
1108			break;
1109
1110		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1111			if (u.packet.payload_length == 0 ||
1112			    u.packet.payload_length & 3)
1113				return -EINVAL;
1114			break;
1115		}
1116
1117		next = (struct fw_cdev_iso_packet __user *)
1118			&p->header[transmit_header_bytes / 4];
1119		if (next > end)
1120			return -EINVAL;
1121		if (copy_from_user
1122		    (u.packet.header, p->header, transmit_header_bytes))
1123			return -EFAULT;
1124		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1125		    u.packet.header_length + u.packet.payload_length > 0)
1126			return -EINVAL;
1127		if (payload + u.packet.payload_length > buffer_end)
1128			return -EINVAL;
1129
1130		if (fw_iso_context_queue(ctx, &u.packet,
1131					 &client->buffer, payload))
1132			break;
1133
1134		p = next;
1135		payload += u.packet.payload_length;
1136		count++;
1137	}
1138	fw_iso_context_queue_flush(ctx);
1139
1140	a->size    -= uptr_to_u64(p) - a->packets;
1141	a->packets  = uptr_to_u64(p);
1142	a->data     = client->vm_start + payload;
1143
1144	return count;
1145}
1146
1147static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1148{
1149	struct fw_cdev_start_iso *a = &arg->start_iso;
1150
1151	BUILD_BUG_ON(
1152	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1153	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1154	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1155	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1156	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1157
1158	if (client->iso_context == NULL || a->handle != 0)
1159		return -EINVAL;
1160
1161	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1162	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
1163		return -EINVAL;
1164
1165	return fw_iso_context_start(client->iso_context,
1166				    a->cycle, a->sync, a->tags);
1167}
1168
1169static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1170{
1171	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1172
1173	if (client->iso_context == NULL || a->handle != 0)
1174		return -EINVAL;
1175
1176	return fw_iso_context_stop(client->iso_context);
1177}
1178
1179static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1180{
1181	struct fw_cdev_flush_iso *a = &arg->flush_iso;
1182
1183	if (client->iso_context == NULL || a->handle != 0)
1184		return -EINVAL;
1185
1186	return fw_iso_context_flush_completions(client->iso_context);
1187}
1188
1189static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1190{
1191	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1192	struct fw_card *card = client->device->card;
1193	struct timespec64 ts = {0, 0};
1194	u32 cycle_time;
1195	int ret = 0;
1196
1197	local_irq_disable();
1198
1199	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1200
1201	switch (a->clk_id) {
1202	case CLOCK_REALTIME:      ktime_get_real_ts64(&ts);	break;
1203	case CLOCK_MONOTONIC:     ktime_get_ts64(&ts);		break;
1204	case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts);	break;
1205	default:
1206		ret = -EINVAL;
1207	}
1208
1209	local_irq_enable();
1210
1211	a->tv_sec      = ts.tv_sec;
1212	a->tv_nsec     = ts.tv_nsec;
1213	a->cycle_timer = cycle_time;
1214
1215	return ret;
1216}
1217
1218static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1219{
1220	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1221	struct fw_cdev_get_cycle_timer2 ct2;
1222
1223	ct2.clk_id = CLOCK_REALTIME;
1224	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1225
1226	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1227	a->cycle_timer = ct2.cycle_timer;
1228
1229	return 0;
1230}
1231
1232static void iso_resource_work(struct work_struct *work)
1233{
1234	struct iso_resource_event *e;
1235	struct iso_resource *r =
1236			container_of(work, struct iso_resource, work.work);
1237	struct client *client = r->client;
1238	int generation, channel, bandwidth, todo;
1239	bool skip, free, success;
1240
1241	spin_lock_irq(&client->lock);
1242	generation = client->device->generation;
1243	todo = r->todo;
1244	/* Allow 1000ms grace period for other reallocations. */
1245	if (todo == ISO_RES_ALLOC &&
1246	    time_before64(get_jiffies_64(),
1247			  client->device->card->reset_jiffies + HZ)) {
1248		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1249		skip = true;
1250	} else {
1251		/* We could be called twice within the same generation. */
1252		skip = todo == ISO_RES_REALLOC &&
1253		       r->generation == generation;
1254	}
1255	free = todo == ISO_RES_DEALLOC ||
1256	       todo == ISO_RES_ALLOC_ONCE ||
1257	       todo == ISO_RES_DEALLOC_ONCE;
1258	r->generation = generation;
1259	spin_unlock_irq(&client->lock);
1260
1261	if (skip)
1262		goto out;
1263
1264	bandwidth = r->bandwidth;
1265
1266	fw_iso_resource_manage(client->device->card, generation,
1267			r->channels, &channel, &bandwidth,
1268			todo == ISO_RES_ALLOC ||
1269			todo == ISO_RES_REALLOC ||
1270			todo == ISO_RES_ALLOC_ONCE);
1271	/*
1272	 * Is this generation outdated already?  As long as this resource sticks
1273	 * in the idr, it will be scheduled again for a newer generation or at
1274	 * shutdown.
1275	 */
1276	if (channel == -EAGAIN &&
1277	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1278		goto out;
1279
1280	success = channel >= 0 || bandwidth > 0;
1281
1282	spin_lock_irq(&client->lock);
1283	/*
1284	 * Transit from allocation to reallocation, except if the client
1285	 * requested deallocation in the meantime.
1286	 */
1287	if (r->todo == ISO_RES_ALLOC)
1288		r->todo = ISO_RES_REALLOC;
1289	/*
1290	 * Allocation or reallocation failure?  Pull this resource out of the
1291	 * idr and prepare for deletion, unless the client is shutting down.
1292	 */
1293	if (r->todo == ISO_RES_REALLOC && !success &&
1294	    !client->in_shutdown &&
1295	    idr_remove(&client->resource_idr, r->resource.handle)) {
 
1296		client_put(client);
1297		free = true;
1298	}
1299	spin_unlock_irq(&client->lock);
1300
1301	if (todo == ISO_RES_ALLOC && channel >= 0)
1302		r->channels = 1ULL << channel;
1303
1304	if (todo == ISO_RES_REALLOC && success)
1305		goto out;
1306
1307	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1308		e = r->e_alloc;
1309		r->e_alloc = NULL;
1310	} else {
1311		e = r->e_dealloc;
1312		r->e_dealloc = NULL;
1313	}
1314	e->iso_resource.handle    = r->resource.handle;
1315	e->iso_resource.channel   = channel;
1316	e->iso_resource.bandwidth = bandwidth;
1317
1318	queue_event(client, &e->event,
1319		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1320
1321	if (free) {
1322		cancel_delayed_work(&r->work);
1323		kfree(r->e_alloc);
1324		kfree(r->e_dealloc);
1325		kfree(r);
1326	}
1327 out:
1328	client_put(client);
1329}
1330
1331static void release_iso_resource(struct client *client,
1332				 struct client_resource *resource)
1333{
1334	struct iso_resource *r =
1335		container_of(resource, struct iso_resource, resource);
1336
1337	spin_lock_irq(&client->lock);
1338	r->todo = ISO_RES_DEALLOC;
1339	schedule_iso_resource(r, 0);
1340	spin_unlock_irq(&client->lock);
1341}
1342
1343static int init_iso_resource(struct client *client,
1344		struct fw_cdev_allocate_iso_resource *request, int todo)
1345{
1346	struct iso_resource_event *e1, *e2;
1347	struct iso_resource *r;
1348	int ret;
1349
1350	if ((request->channels == 0 && request->bandwidth == 0) ||
1351	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
 
1352		return -EINVAL;
1353
1354	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1355	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1356	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1357	if (r == NULL || e1 == NULL || e2 == NULL) {
1358		ret = -ENOMEM;
1359		goto fail;
1360	}
1361
1362	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1363	r->client	= client;
1364	r->todo		= todo;
1365	r->generation	= -1;
1366	r->channels	= request->channels;
1367	r->bandwidth	= request->bandwidth;
1368	r->e_alloc	= e1;
1369	r->e_dealloc	= e2;
1370
1371	e1->iso_resource.closure = request->closure;
1372	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1373	e2->iso_resource.closure = request->closure;
1374	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1375
1376	if (todo == ISO_RES_ALLOC) {
1377		r->resource.release = release_iso_resource;
1378		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1379		if (ret < 0)
1380			goto fail;
1381	} else {
1382		r->resource.release = NULL;
1383		r->resource.handle = -1;
1384		schedule_iso_resource(r, 0);
1385	}
1386	request->handle = r->resource.handle;
1387
1388	return 0;
1389 fail:
1390	kfree(r);
1391	kfree(e1);
1392	kfree(e2);
1393
1394	return ret;
1395}
1396
1397static int ioctl_allocate_iso_resource(struct client *client,
1398				       union ioctl_arg *arg)
1399{
1400	return init_iso_resource(client,
1401			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1402}
1403
1404static int ioctl_deallocate_iso_resource(struct client *client,
1405					 union ioctl_arg *arg)
1406{
1407	return release_client_resource(client,
1408			arg->deallocate.handle, release_iso_resource, NULL);
1409}
1410
1411static int ioctl_allocate_iso_resource_once(struct client *client,
1412					    union ioctl_arg *arg)
1413{
1414	return init_iso_resource(client,
1415			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1416}
1417
1418static int ioctl_deallocate_iso_resource_once(struct client *client,
1419					      union ioctl_arg *arg)
1420{
1421	return init_iso_resource(client,
1422			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1423}
1424
1425/*
1426 * Returns a speed code:  Maximum speed to or from this device,
1427 * limited by the device's link speed, the local node's link speed,
1428 * and all PHY port speeds between the two links.
1429 */
1430static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1431{
1432	return client->device->max_speed;
1433}
1434
1435static int ioctl_send_broadcast_request(struct client *client,
1436					union ioctl_arg *arg)
1437{
1438	struct fw_cdev_send_request *a = &arg->send_request;
1439
1440	switch (a->tcode) {
1441	case TCODE_WRITE_QUADLET_REQUEST:
1442	case TCODE_WRITE_BLOCK_REQUEST:
1443		break;
1444	default:
1445		return -EINVAL;
1446	}
1447
1448	/* Security policy: Only allow accesses to Units Space. */
1449	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1450		return -EACCES;
1451
1452	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1453}
1454
1455static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1456{
1457	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1458	struct fw_cdev_send_request request;
1459	int dest;
1460
1461	if (a->speed > client->device->card->link_speed ||
1462	    a->length > 1024 << a->speed)
1463		return -EIO;
1464
1465	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1466		return -EINVAL;
1467
1468	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1469	request.tcode		= TCODE_STREAM_DATA;
1470	request.length		= a->length;
1471	request.closure		= a->closure;
1472	request.data		= a->data;
1473	request.generation	= a->generation;
1474
1475	return init_request(client, &request, dest, a->speed);
1476}
1477
1478static void outbound_phy_packet_callback(struct fw_packet *packet,
1479					 struct fw_card *card, int status)
1480{
1481	struct outbound_phy_packet_event *e =
1482		container_of(packet, struct outbound_phy_packet_event, p);
1483
1484	switch (status) {
1485	/* expected: */
1486	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1487	/* should never happen with PHY packets: */
1488	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1489	case ACK_BUSY_X:
1490	case ACK_BUSY_A:
1491	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
1492	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
1493	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
1494	/* stale generation; cancelled; on certain controllers: no ack */
1495	default:		e->phy_packet.rcode = status;		break;
1496	}
1497	e->phy_packet.data[0] = packet->timestamp;
1498
1499	queue_event(e->client, &e->event, &e->phy_packet,
1500		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1501	client_put(e->client);
1502}
1503
1504static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1505{
1506	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1507	struct fw_card *card = client->device->card;
1508	struct outbound_phy_packet_event *e;
1509
1510	/* Access policy: Allow this ioctl only on local nodes' device files. */
1511	if (!client->device->is_local)
1512		return -ENOSYS;
1513
1514	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1515	if (e == NULL)
1516		return -ENOMEM;
1517
1518	client_get(client);
1519	e->client		= client;
1520	e->p.speed		= SCODE_100;
1521	e->p.generation		= a->generation;
1522	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
1523	e->p.header[1]		= a->data[0];
1524	e->p.header[2]		= a->data[1];
1525	e->p.header_length	= 12;
1526	e->p.callback		= outbound_phy_packet_callback;
1527	e->phy_packet.closure	= a->closure;
1528	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
1529	if (is_ping_packet(a->data))
1530			e->phy_packet.length = 4;
1531
1532	card->driver->send_request(card, &e->p);
1533
1534	return 0;
1535}
1536
1537static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1538{
1539	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1540	struct fw_card *card = client->device->card;
1541
1542	/* Access policy: Allow this ioctl only on local nodes' device files. */
1543	if (!client->device->is_local)
1544		return -ENOSYS;
1545
1546	spin_lock_irq(&card->lock);
1547
1548	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1549	client->phy_receiver_closure = a->closure;
1550
1551	spin_unlock_irq(&card->lock);
1552
1553	return 0;
1554}
1555
1556void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1557{
1558	struct client *client;
1559	struct inbound_phy_packet_event *e;
1560	unsigned long flags;
1561
1562	spin_lock_irqsave(&card->lock, flags);
1563
1564	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1565		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1566		if (e == NULL)
 
1567			break;
1568
1569		e->phy_packet.closure	= client->phy_receiver_closure;
1570		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1571		e->phy_packet.rcode	= RCODE_COMPLETE;
1572		e->phy_packet.length	= 8;
1573		e->phy_packet.data[0]	= p->header[1];
1574		e->phy_packet.data[1]	= p->header[2];
1575		queue_event(client, &e->event,
1576			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1577	}
1578
1579	spin_unlock_irqrestore(&card->lock, flags);
1580}
1581
1582static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1583	[0x00] = ioctl_get_info,
1584	[0x01] = ioctl_send_request,
1585	[0x02] = ioctl_allocate,
1586	[0x03] = ioctl_deallocate,
1587	[0x04] = ioctl_send_response,
1588	[0x05] = ioctl_initiate_bus_reset,
1589	[0x06] = ioctl_add_descriptor,
1590	[0x07] = ioctl_remove_descriptor,
1591	[0x08] = ioctl_create_iso_context,
1592	[0x09] = ioctl_queue_iso,
1593	[0x0a] = ioctl_start_iso,
1594	[0x0b] = ioctl_stop_iso,
1595	[0x0c] = ioctl_get_cycle_timer,
1596	[0x0d] = ioctl_allocate_iso_resource,
1597	[0x0e] = ioctl_deallocate_iso_resource,
1598	[0x0f] = ioctl_allocate_iso_resource_once,
1599	[0x10] = ioctl_deallocate_iso_resource_once,
1600	[0x11] = ioctl_get_speed,
1601	[0x12] = ioctl_send_broadcast_request,
1602	[0x13] = ioctl_send_stream_packet,
1603	[0x14] = ioctl_get_cycle_timer2,
1604	[0x15] = ioctl_send_phy_packet,
1605	[0x16] = ioctl_receive_phy_packets,
1606	[0x17] = ioctl_set_iso_channels,
1607	[0x18] = ioctl_flush_iso,
1608};
1609
1610static int dispatch_ioctl(struct client *client,
1611			  unsigned int cmd, void __user *arg)
1612{
1613	union ioctl_arg buffer;
1614	int ret;
1615
1616	if (fw_device_is_shutdown(client->device))
1617		return -ENODEV;
1618
1619	if (_IOC_TYPE(cmd) != '#' ||
1620	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1621	    _IOC_SIZE(cmd) > sizeof(buffer))
1622		return -ENOTTY;
1623
1624	memset(&buffer, 0, sizeof(buffer));
 
1625
1626	if (_IOC_DIR(cmd) & _IOC_WRITE)
1627		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1628			return -EFAULT;
1629
1630	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1631	if (ret < 0)
1632		return ret;
1633
1634	if (_IOC_DIR(cmd) & _IOC_READ)
1635		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1636			return -EFAULT;
1637
1638	return ret;
1639}
1640
1641static long fw_device_op_ioctl(struct file *file,
1642			       unsigned int cmd, unsigned long arg)
1643{
1644	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1645}
1646
 
 
 
 
 
 
 
 
1647static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1648{
1649	struct client *client = file->private_data;
 
1650	unsigned long size;
1651	int page_count, ret;
1652
1653	if (fw_device_is_shutdown(client->device))
1654		return -ENODEV;
1655
1656	/* FIXME: We could support multiple buffers, but we don't. */
1657	if (client->buffer.pages != NULL)
1658		return -EBUSY;
1659
1660	if (!(vma->vm_flags & VM_SHARED))
1661		return -EINVAL;
1662
1663	if (vma->vm_start & ~PAGE_MASK)
1664		return -EINVAL;
1665
1666	client->vm_start = vma->vm_start;
1667	size = vma->vm_end - vma->vm_start;
1668	page_count = size >> PAGE_SHIFT;
1669	if (size & ~PAGE_MASK)
1670		return -EINVAL;
1671
1672	ret = fw_iso_buffer_alloc(&client->buffer, page_count);
 
 
 
 
 
 
1673	if (ret < 0)
1674		return ret;
1675
1676	spin_lock_irq(&client->lock);
1677	if (client->iso_context) {
1678		ret = fw_iso_buffer_map_dma(&client->buffer,
1679				client->device->card,
1680				iso_dma_direction(client->iso_context));
1681		client->buffer_is_mapped = (ret == 0);
1682	}
1683	spin_unlock_irq(&client->lock);
1684	if (ret < 0)
1685		goto fail;
1686
1687	ret = vm_map_pages_zero(vma, client->buffer.pages,
1688				client->buffer.page_count);
1689	if (ret < 0)
1690		goto fail;
1691
1692	return 0;
1693 fail:
1694	fw_iso_buffer_destroy(&client->buffer, client->device->card);
1695	return ret;
1696}
1697
1698static int is_outbound_transaction_resource(int id, void *p, void *data)
1699{
1700	struct client_resource *resource = p;
1701
1702	return resource->release == release_transaction;
1703}
1704
1705static int has_outbound_transactions(struct client *client)
1706{
1707	int ret;
1708
1709	spin_lock_irq(&client->lock);
1710	ret = idr_for_each(&client->resource_idr,
1711			   is_outbound_transaction_resource, NULL);
1712	spin_unlock_irq(&client->lock);
1713
1714	return ret;
1715}
1716
1717static int shutdown_resource(int id, void *p, void *data)
1718{
1719	struct client_resource *resource = p;
1720	struct client *client = data;
1721
1722	resource->release(client, resource);
1723	client_put(client);
1724
1725	return 0;
1726}
1727
1728static int fw_device_op_release(struct inode *inode, struct file *file)
1729{
1730	struct client *client = file->private_data;
1731	struct event *event, *next_event;
1732
1733	spin_lock_irq(&client->device->card->lock);
1734	list_del(&client->phy_receiver_link);
1735	spin_unlock_irq(&client->device->card->lock);
1736
1737	mutex_lock(&client->device->client_list_mutex);
1738	list_del(&client->link);
1739	mutex_unlock(&client->device->client_list_mutex);
1740
1741	if (client->iso_context)
1742		fw_iso_context_destroy(client->iso_context);
1743
1744	if (client->buffer.pages)
1745		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1746
1747	/* Freeze client->resource_idr and client->event_list */
1748	spin_lock_irq(&client->lock);
1749	client->in_shutdown = true;
1750	spin_unlock_irq(&client->lock);
1751
1752	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1753
1754	idr_for_each(&client->resource_idr, shutdown_resource, client);
 
1755	idr_destroy(&client->resource_idr);
1756
1757	list_for_each_entry_safe(event, next_event, &client->event_list, link)
1758		kfree(event);
1759
1760	client_put(client);
1761
1762	return 0;
1763}
1764
1765static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1766{
1767	struct client *client = file->private_data;
1768	__poll_t mask = 0;
1769
1770	poll_wait(file, &client->wait, pt);
1771
1772	if (fw_device_is_shutdown(client->device))
1773		mask |= EPOLLHUP | EPOLLERR;
1774	if (!list_empty(&client->event_list))
1775		mask |= EPOLLIN | EPOLLRDNORM;
1776
1777	return mask;
1778}
1779
1780const struct file_operations fw_device_ops = {
1781	.owner		= THIS_MODULE,
1782	.llseek		= no_llseek,
1783	.open		= fw_device_op_open,
1784	.read		= fw_device_op_read,
1785	.unlocked_ioctl	= fw_device_op_ioctl,
1786	.mmap		= fw_device_op_mmap,
1787	.release	= fw_device_op_release,
1788	.poll		= fw_device_op_poll,
1789	.compat_ioctl	= compat_ptr_ioctl,
 
 
1790};