Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * Char device for device raw access
   3 *
   4 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#include <linux/bug.h>
  22#include <linux/compat.h>
  23#include <linux/delay.h>
  24#include <linux/device.h>
  25#include <linux/dma-mapping.h>
 
  26#include <linux/errno.h>
  27#include <linux/firewire.h>
  28#include <linux/firewire-cdev.h>
  29#include <linux/idr.h>
  30#include <linux/irqflags.h>
  31#include <linux/jiffies.h>
  32#include <linux/kernel.h>
  33#include <linux/kref.h>
  34#include <linux/mm.h>
  35#include <linux/module.h>
  36#include <linux/mutex.h>
  37#include <linux/poll.h>
  38#include <linux/sched.h> /* required for linux/wait.h */
  39#include <linux/slab.h>
  40#include <linux/spinlock.h>
  41#include <linux/string.h>
  42#include <linux/time.h>
  43#include <linux/uaccess.h>
  44#include <linux/vmalloc.h>
  45#include <linux/wait.h>
  46#include <linux/workqueue.h>
  47
  48
  49#include "core.h"
  50
  51/*
  52 * ABI version history is documented in linux/firewire-cdev.h.
  53 */
  54#define FW_CDEV_KERNEL_VERSION			5
  55#define FW_CDEV_VERSION_EVENT_REQUEST2		4
  56#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
 
  57
  58struct client {
  59	u32 version;
  60	struct fw_device *device;
  61
  62	spinlock_t lock;
  63	bool in_shutdown;
  64	struct idr resource_idr;
  65	struct list_head event_list;
  66	wait_queue_head_t wait;
  67	wait_queue_head_t tx_flush_wait;
  68	u64 bus_reset_closure;
  69
  70	struct fw_iso_context *iso_context;
  71	u64 iso_closure;
  72	struct fw_iso_buffer buffer;
  73	unsigned long vm_start;
  74	bool buffer_is_mapped;
  75
  76	struct list_head phy_receiver_link;
  77	u64 phy_receiver_closure;
  78
  79	struct list_head link;
  80	struct kref kref;
  81};
  82
  83static inline void client_get(struct client *client)
  84{
  85	kref_get(&client->kref);
  86}
  87
  88static void client_release(struct kref *kref)
  89{
  90	struct client *client = container_of(kref, struct client, kref);
  91
  92	fw_device_put(client->device);
  93	kfree(client);
  94}
  95
  96static void client_put(struct client *client)
  97{
  98	kref_put(&client->kref, client_release);
  99}
 100
 101struct client_resource;
 102typedef void (*client_resource_release_fn_t)(struct client *,
 103					     struct client_resource *);
 104struct client_resource {
 105	client_resource_release_fn_t release;
 106	int handle;
 107};
 108
 109struct address_handler_resource {
 110	struct client_resource resource;
 111	struct fw_address_handler handler;
 112	__u64 closure;
 113	struct client *client;
 114};
 115
 116struct outbound_transaction_resource {
 117	struct client_resource resource;
 118	struct fw_transaction transaction;
 119};
 120
 121struct inbound_transaction_resource {
 122	struct client_resource resource;
 123	struct fw_card *card;
 124	struct fw_request *request;
 125	void *data;
 126	size_t length;
 127};
 128
 129struct descriptor_resource {
 130	struct client_resource resource;
 131	struct fw_descriptor descriptor;
 132	u32 data[0];
 133};
 134
 135struct iso_resource {
 136	struct client_resource resource;
 137	struct client *client;
 138	/* Schedule work and access todo only with client->lock held. */
 139	struct delayed_work work;
 140	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
 141	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
 142	int generation;
 143	u64 channels;
 144	s32 bandwidth;
 145	struct iso_resource_event *e_alloc, *e_dealloc;
 146};
 147
 148static void release_iso_resource(struct client *, struct client_resource *);
 149
 150static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 151{
 152	client_get(r->client);
 153	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
 154		client_put(r->client);
 155}
 156
 157static void schedule_if_iso_resource(struct client_resource *resource)
 158{
 159	if (resource->release == release_iso_resource)
 160		schedule_iso_resource(container_of(resource,
 161					struct iso_resource, resource), 0);
 162}
 163
 164/*
 165 * dequeue_event() just kfree()'s the event, so the event has to be
 166 * the first field in a struct XYZ_event.
 167 */
 168struct event {
 169	struct { void *data; size_t size; } v[2];
 170	struct list_head link;
 171};
 172
 173struct bus_reset_event {
 174	struct event event;
 175	struct fw_cdev_event_bus_reset reset;
 176};
 177
 178struct outbound_transaction_event {
 179	struct event event;
 180	struct client *client;
 181	struct outbound_transaction_resource r;
 182	struct fw_cdev_event_response response;
 183};
 184
 185struct inbound_transaction_event {
 186	struct event event;
 187	union {
 188		struct fw_cdev_event_request request;
 189		struct fw_cdev_event_request2 request2;
 190	} req;
 191};
 192
 193struct iso_interrupt_event {
 194	struct event event;
 195	struct fw_cdev_event_iso_interrupt interrupt;
 196};
 197
 198struct iso_interrupt_mc_event {
 199	struct event event;
 200	struct fw_cdev_event_iso_interrupt_mc interrupt;
 201};
 202
 203struct iso_resource_event {
 204	struct event event;
 205	struct fw_cdev_event_iso_resource iso_resource;
 206};
 207
 208struct outbound_phy_packet_event {
 209	struct event event;
 210	struct client *client;
 211	struct fw_packet p;
 212	struct fw_cdev_event_phy_packet phy_packet;
 213};
 214
 215struct inbound_phy_packet_event {
 216	struct event event;
 217	struct fw_cdev_event_phy_packet phy_packet;
 218};
 219
 220#ifdef CONFIG_COMPAT
 221static void __user *u64_to_uptr(u64 value)
 222{
 223	if (is_compat_task())
 224		return compat_ptr(value);
 225	else
 226		return (void __user *)(unsigned long)value;
 227}
 228
 229static u64 uptr_to_u64(void __user *ptr)
 230{
 231	if (is_compat_task())
 232		return ptr_to_compat(ptr);
 233	else
 234		return (u64)(unsigned long)ptr;
 235}
 236#else
 237static inline void __user *u64_to_uptr(u64 value)
 238{
 239	return (void __user *)(unsigned long)value;
 240}
 241
 242static inline u64 uptr_to_u64(void __user *ptr)
 243{
 244	return (u64)(unsigned long)ptr;
 245}
 246#endif /* CONFIG_COMPAT */
 247
 248static int fw_device_op_open(struct inode *inode, struct file *file)
 249{
 250	struct fw_device *device;
 251	struct client *client;
 252
 253	device = fw_device_get_by_devt(inode->i_rdev);
 254	if (device == NULL)
 255		return -ENODEV;
 256
 257	if (fw_device_is_shutdown(device)) {
 258		fw_device_put(device);
 259		return -ENODEV;
 260	}
 261
 262	client = kzalloc(sizeof(*client), GFP_KERNEL);
 263	if (client == NULL) {
 264		fw_device_put(device);
 265		return -ENOMEM;
 266	}
 267
 268	client->device = device;
 269	spin_lock_init(&client->lock);
 270	idr_init(&client->resource_idr);
 271	INIT_LIST_HEAD(&client->event_list);
 272	init_waitqueue_head(&client->wait);
 273	init_waitqueue_head(&client->tx_flush_wait);
 274	INIT_LIST_HEAD(&client->phy_receiver_link);
 275	INIT_LIST_HEAD(&client->link);
 276	kref_init(&client->kref);
 277
 278	file->private_data = client;
 279
 280	return nonseekable_open(inode, file);
 281}
 282
 283static void queue_event(struct client *client, struct event *event,
 284			void *data0, size_t size0, void *data1, size_t size1)
 285{
 286	unsigned long flags;
 287
 288	event->v[0].data = data0;
 289	event->v[0].size = size0;
 290	event->v[1].data = data1;
 291	event->v[1].size = size1;
 292
 293	spin_lock_irqsave(&client->lock, flags);
 294	if (client->in_shutdown)
 295		kfree(event);
 296	else
 297		list_add_tail(&event->link, &client->event_list);
 298	spin_unlock_irqrestore(&client->lock, flags);
 299
 300	wake_up_interruptible(&client->wait);
 301}
 302
 303static int dequeue_event(struct client *client,
 304			 char __user *buffer, size_t count)
 305{
 306	struct event *event;
 307	size_t size, total;
 308	int i, ret;
 309
 310	ret = wait_event_interruptible(client->wait,
 311			!list_empty(&client->event_list) ||
 312			fw_device_is_shutdown(client->device));
 313	if (ret < 0)
 314		return ret;
 315
 316	if (list_empty(&client->event_list) &&
 317		       fw_device_is_shutdown(client->device))
 318		return -ENODEV;
 319
 320	spin_lock_irq(&client->lock);
 321	event = list_first_entry(&client->event_list, struct event, link);
 322	list_del(&event->link);
 323	spin_unlock_irq(&client->lock);
 324
 325	total = 0;
 326	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 327		size = min(event->v[i].size, count - total);
 328		if (copy_to_user(buffer + total, event->v[i].data, size)) {
 329			ret = -EFAULT;
 330			goto out;
 331		}
 332		total += size;
 333	}
 334	ret = total;
 335
 336 out:
 337	kfree(event);
 338
 339	return ret;
 340}
 341
 342static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
 343				 size_t count, loff_t *offset)
 344{
 345	struct client *client = file->private_data;
 346
 347	return dequeue_event(client, buffer, count);
 348}
 349
 350static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 351				 struct client *client)
 352{
 353	struct fw_card *card = client->device->card;
 354
 355	spin_lock_irq(&card->lock);
 356
 357	event->closure	     = client->bus_reset_closure;
 358	event->type          = FW_CDEV_EVENT_BUS_RESET;
 359	event->generation    = client->device->generation;
 360	event->node_id       = client->device->node_id;
 361	event->local_node_id = card->local_node->node_id;
 362	event->bm_node_id    = card->bm_node_id;
 363	event->irm_node_id   = card->irm_node->node_id;
 364	event->root_node_id  = card->root_node->node_id;
 365
 366	spin_unlock_irq(&card->lock);
 367}
 368
 369static void for_each_client(struct fw_device *device,
 370			    void (*callback)(struct client *client))
 371{
 372	struct client *c;
 373
 374	mutex_lock(&device->client_list_mutex);
 375	list_for_each_entry(c, &device->client_list, link)
 376		callback(c);
 377	mutex_unlock(&device->client_list_mutex);
 378}
 379
 380static int schedule_reallocations(int id, void *p, void *data)
 381{
 382	schedule_if_iso_resource(p);
 383
 384	return 0;
 385}
 386
 387static void queue_bus_reset_event(struct client *client)
 388{
 389	struct bus_reset_event *e;
 390
 391	e = kzalloc(sizeof(*e), GFP_KERNEL);
 392	if (e == NULL) {
 393		fw_notice(client->device->card, "out of memory when allocating event\n");
 394		return;
 395	}
 396
 397	fill_bus_reset_event(&e->reset, client);
 398
 399	queue_event(client, &e->event,
 400		    &e->reset, sizeof(e->reset), NULL, 0);
 401
 402	spin_lock_irq(&client->lock);
 403	idr_for_each(&client->resource_idr, schedule_reallocations, client);
 404	spin_unlock_irq(&client->lock);
 405}
 406
 407void fw_device_cdev_update(struct fw_device *device)
 408{
 409	for_each_client(device, queue_bus_reset_event);
 410}
 411
 412static void wake_up_client(struct client *client)
 413{
 414	wake_up_interruptible(&client->wait);
 415}
 416
 417void fw_device_cdev_remove(struct fw_device *device)
 418{
 419	for_each_client(device, wake_up_client);
 420}
 421
 422union ioctl_arg {
 423	struct fw_cdev_get_info			get_info;
 424	struct fw_cdev_send_request		send_request;
 425	struct fw_cdev_allocate			allocate;
 426	struct fw_cdev_deallocate		deallocate;
 427	struct fw_cdev_send_response		send_response;
 428	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
 429	struct fw_cdev_add_descriptor		add_descriptor;
 430	struct fw_cdev_remove_descriptor	remove_descriptor;
 431	struct fw_cdev_create_iso_context	create_iso_context;
 432	struct fw_cdev_queue_iso		queue_iso;
 433	struct fw_cdev_start_iso		start_iso;
 434	struct fw_cdev_stop_iso			stop_iso;
 435	struct fw_cdev_get_cycle_timer		get_cycle_timer;
 436	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
 437	struct fw_cdev_send_stream_packet	send_stream_packet;
 438	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
 439	struct fw_cdev_send_phy_packet		send_phy_packet;
 440	struct fw_cdev_receive_phy_packets	receive_phy_packets;
 441	struct fw_cdev_set_iso_channels		set_iso_channels;
 442	struct fw_cdev_flush_iso		flush_iso;
 443};
 444
 445static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 446{
 447	struct fw_cdev_get_info *a = &arg->get_info;
 448	struct fw_cdev_event_bus_reset bus_reset;
 449	unsigned long ret = 0;
 450
 451	client->version = a->version;
 452	a->version = FW_CDEV_KERNEL_VERSION;
 453	a->card = client->device->card->index;
 454
 455	down_read(&fw_device_rwsem);
 456
 457	if (a->rom != 0) {
 458		size_t want = a->rom_length;
 459		size_t have = client->device->config_rom_length * 4;
 460
 461		ret = copy_to_user(u64_to_uptr(a->rom),
 462				   client->device->config_rom, min(want, have));
 463	}
 464	a->rom_length = client->device->config_rom_length * 4;
 465
 466	up_read(&fw_device_rwsem);
 467
 468	if (ret != 0)
 469		return -EFAULT;
 470
 471	mutex_lock(&client->device->client_list_mutex);
 472
 473	client->bus_reset_closure = a->bus_reset_closure;
 474	if (a->bus_reset != 0) {
 475		fill_bus_reset_event(&bus_reset, client);
 476		ret = copy_to_user(u64_to_uptr(a->bus_reset),
 477				   &bus_reset, sizeof(bus_reset));
 478	}
 479	if (ret == 0 && list_empty(&client->link))
 480		list_add_tail(&client->link, &client->device->client_list);
 481
 482	mutex_unlock(&client->device->client_list_mutex);
 483
 484	return ret ? -EFAULT : 0;
 485}
 486
 487static int add_client_resource(struct client *client,
 488			       struct client_resource *resource, gfp_t gfp_mask)
 489{
 
 490	unsigned long flags;
 491	int ret;
 492
 493 retry:
 494	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
 495		return -ENOMEM;
 496
 497	spin_lock_irqsave(&client->lock, flags);
 
 498	if (client->in_shutdown)
 499		ret = -ECANCELED;
 500	else
 501		ret = idr_get_new(&client->resource_idr, resource,
 502				  &resource->handle);
 503	if (ret >= 0) {
 
 504		client_get(client);
 505		schedule_if_iso_resource(resource);
 506	}
 507	spin_unlock_irqrestore(&client->lock, flags);
 508
 509	if (ret == -EAGAIN)
 510		goto retry;
 
 511
 512	return ret < 0 ? ret : 0;
 513}
 514
 515static int release_client_resource(struct client *client, u32 handle,
 516				   client_resource_release_fn_t release,
 517				   struct client_resource **return_resource)
 518{
 519	struct client_resource *resource;
 520
 521	spin_lock_irq(&client->lock);
 522	if (client->in_shutdown)
 523		resource = NULL;
 524	else
 525		resource = idr_find(&client->resource_idr, handle);
 526	if (resource && resource->release == release)
 527		idr_remove(&client->resource_idr, handle);
 528	spin_unlock_irq(&client->lock);
 529
 530	if (!(resource && resource->release == release))
 531		return -EINVAL;
 532
 533	if (return_resource)
 534		*return_resource = resource;
 535	else
 536		resource->release(client, resource);
 537
 538	client_put(client);
 539
 540	return 0;
 541}
 542
 543static void release_transaction(struct client *client,
 544				struct client_resource *resource)
 545{
 546}
 547
 548static void complete_transaction(struct fw_card *card, int rcode,
 549				 void *payload, size_t length, void *data)
 550{
 551	struct outbound_transaction_event *e = data;
 552	struct fw_cdev_event_response *rsp = &e->response;
 553	struct client *client = e->client;
 554	unsigned long flags;
 555
 556	if (length < rsp->length)
 557		rsp->length = length;
 558	if (rcode == RCODE_COMPLETE)
 559		memcpy(rsp->data, payload, rsp->length);
 560
 561	spin_lock_irqsave(&client->lock, flags);
 562	idr_remove(&client->resource_idr, e->r.resource.handle);
 563	if (client->in_shutdown)
 564		wake_up(&client->tx_flush_wait);
 565	spin_unlock_irqrestore(&client->lock, flags);
 566
 567	rsp->type = FW_CDEV_EVENT_RESPONSE;
 568	rsp->rcode = rcode;
 569
 570	/*
 571	 * In the case that sizeof(*rsp) doesn't align with the position of the
 572	 * data, and the read is short, preserve an extra copy of the data
 573	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 574	 * for short reads and some apps depended on it, this is both safe
 575	 * and prudent for compatibility.
 576	 */
 577	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
 578		queue_event(client, &e->event, rsp, sizeof(*rsp),
 579			    rsp->data, rsp->length);
 580	else
 581		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 582			    NULL, 0);
 583
 584	/* Drop the idr's reference */
 585	client_put(client);
 586}
 587
 588static int init_request(struct client *client,
 589			struct fw_cdev_send_request *request,
 590			int destination_id, int speed)
 591{
 592	struct outbound_transaction_event *e;
 593	int ret;
 594
 595	if (request->tcode != TCODE_STREAM_DATA &&
 596	    (request->length > 4096 || request->length > 512 << speed))
 597		return -EIO;
 598
 599	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
 600	    request->length < 4)
 601		return -EINVAL;
 602
 603	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
 604	if (e == NULL)
 605		return -ENOMEM;
 606
 607	e->client = client;
 608	e->response.length = request->length;
 609	e->response.closure = request->closure;
 610
 611	if (request->data &&
 612	    copy_from_user(e->response.data,
 613			   u64_to_uptr(request->data), request->length)) {
 614		ret = -EFAULT;
 615		goto failed;
 616	}
 617
 618	e->r.resource.release = release_transaction;
 619	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
 620	if (ret < 0)
 621		goto failed;
 622
 623	fw_send_request(client->device->card, &e->r.transaction,
 624			request->tcode, destination_id, request->generation,
 625			speed, request->offset, e->response.data,
 626			request->length, complete_transaction, e);
 627	return 0;
 628
 629 failed:
 630	kfree(e);
 631
 632	return ret;
 633}
 634
 635static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
 636{
 637	switch (arg->send_request.tcode) {
 638	case TCODE_WRITE_QUADLET_REQUEST:
 639	case TCODE_WRITE_BLOCK_REQUEST:
 640	case TCODE_READ_QUADLET_REQUEST:
 641	case TCODE_READ_BLOCK_REQUEST:
 642	case TCODE_LOCK_MASK_SWAP:
 643	case TCODE_LOCK_COMPARE_SWAP:
 644	case TCODE_LOCK_FETCH_ADD:
 645	case TCODE_LOCK_LITTLE_ADD:
 646	case TCODE_LOCK_BOUNDED_ADD:
 647	case TCODE_LOCK_WRAP_ADD:
 648	case TCODE_LOCK_VENDOR_DEPENDENT:
 649		break;
 650	default:
 651		return -EINVAL;
 652	}
 653
 654	return init_request(client, &arg->send_request, client->device->node_id,
 655			    client->device->max_speed);
 656}
 657
 658static inline bool is_fcp_request(struct fw_request *request)
 659{
 660	return request == NULL;
 661}
 662
 663static void release_request(struct client *client,
 664			    struct client_resource *resource)
 665{
 666	struct inbound_transaction_resource *r = container_of(resource,
 667			struct inbound_transaction_resource, resource);
 668
 669	if (is_fcp_request(r->request))
 670		kfree(r->data);
 671	else
 672		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
 673
 674	fw_card_put(r->card);
 675	kfree(r);
 676}
 677
 678static void handle_request(struct fw_card *card, struct fw_request *request,
 679			   int tcode, int destination, int source,
 680			   int generation, unsigned long long offset,
 681			   void *payload, size_t length, void *callback_data)
 682{
 683	struct address_handler_resource *handler = callback_data;
 684	struct inbound_transaction_resource *r;
 685	struct inbound_transaction_event *e;
 686	size_t event_size0;
 687	void *fcp_frame = NULL;
 688	int ret;
 689
 690	/* card may be different from handler->client->device->card */
 691	fw_card_get(card);
 692
 693	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 694	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 695	if (r == NULL || e == NULL) {
 696		fw_notice(card, "out of memory when allocating event\n");
 697		goto failed;
 698	}
 699	r->card    = card;
 700	r->request = request;
 701	r->data    = payload;
 702	r->length  = length;
 703
 704	if (is_fcp_request(request)) {
 705		/*
 706		 * FIXME: Let core-transaction.c manage a
 707		 * single reference-counted copy?
 708		 */
 709		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
 710		if (fcp_frame == NULL)
 711			goto failed;
 712
 713		r->data = fcp_frame;
 714	}
 715
 716	r->resource.release = release_request;
 717	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
 718	if (ret < 0)
 719		goto failed;
 720
 721	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
 722		struct fw_cdev_event_request *req = &e->req.request;
 723
 724		if (tcode & 0x10)
 725			tcode = TCODE_LOCK_REQUEST;
 726
 727		req->type	= FW_CDEV_EVENT_REQUEST;
 728		req->tcode	= tcode;
 729		req->offset	= offset;
 730		req->length	= length;
 731		req->handle	= r->resource.handle;
 732		req->closure	= handler->closure;
 733		event_size0	= sizeof(*req);
 734	} else {
 735		struct fw_cdev_event_request2 *req = &e->req.request2;
 736
 737		req->type	= FW_CDEV_EVENT_REQUEST2;
 738		req->tcode	= tcode;
 739		req->offset	= offset;
 740		req->source_node_id = source;
 741		req->destination_node_id = destination;
 742		req->card	= card->index;
 743		req->generation	= generation;
 744		req->length	= length;
 745		req->handle	= r->resource.handle;
 746		req->closure	= handler->closure;
 747		event_size0	= sizeof(*req);
 748	}
 749
 750	queue_event(handler->client, &e->event,
 751		    &e->req, event_size0, r->data, length);
 752	return;
 753
 754 failed:
 755	kfree(r);
 756	kfree(e);
 757	kfree(fcp_frame);
 758
 759	if (!is_fcp_request(request))
 760		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 761
 762	fw_card_put(card);
 763}
 764
 765static void release_address_handler(struct client *client,
 766				    struct client_resource *resource)
 767{
 768	struct address_handler_resource *r =
 769	    container_of(resource, struct address_handler_resource, resource);
 770
 771	fw_core_remove_address_handler(&r->handler);
 772	kfree(r);
 773}
 774
 775static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
 776{
 777	struct fw_cdev_allocate *a = &arg->allocate;
 778	struct address_handler_resource *r;
 779	struct fw_address_region region;
 780	int ret;
 781
 782	r = kmalloc(sizeof(*r), GFP_KERNEL);
 783	if (r == NULL)
 784		return -ENOMEM;
 785
 786	region.start = a->offset;
 787	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
 788		region.end = a->offset + a->length;
 789	else
 790		region.end = a->region_end;
 791
 792	r->handler.length           = a->length;
 793	r->handler.address_callback = handle_request;
 794	r->handler.callback_data    = r;
 795	r->closure   = a->closure;
 796	r->client    = client;
 797
 798	ret = fw_core_add_address_handler(&r->handler, &region);
 799	if (ret < 0) {
 800		kfree(r);
 801		return ret;
 802	}
 803	a->offset = r->handler.offset;
 804
 805	r->resource.release = release_address_handler;
 806	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 807	if (ret < 0) {
 808		release_address_handler(client, &r->resource);
 809		return ret;
 810	}
 811	a->handle = r->resource.handle;
 812
 813	return 0;
 814}
 815
 816static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
 817{
 818	return release_client_resource(client, arg->deallocate.handle,
 819				       release_address_handler, NULL);
 820}
 821
 822static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
 823{
 824	struct fw_cdev_send_response *a = &arg->send_response;
 825	struct client_resource *resource;
 826	struct inbound_transaction_resource *r;
 827	int ret = 0;
 828
 829	if (release_client_resource(client, a->handle,
 830				    release_request, &resource) < 0)
 831		return -EINVAL;
 832
 833	r = container_of(resource, struct inbound_transaction_resource,
 834			 resource);
 835	if (is_fcp_request(r->request))
 
 836		goto out;
 
 837
 838	if (a->length != fw_get_response_length(r->request)) {
 839		ret = -EINVAL;
 840		kfree(r->request);
 841		goto out;
 842	}
 843	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
 844		ret = -EFAULT;
 845		kfree(r->request);
 846		goto out;
 847	}
 848	fw_send_response(r->card, r->request, a->rcode);
 849 out:
 850	fw_card_put(r->card);
 851	kfree(r);
 852
 853	return ret;
 854}
 855
 856static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
 857{
 858	fw_schedule_bus_reset(client->device->card, true,
 859			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
 860	return 0;
 861}
 862
 863static void release_descriptor(struct client *client,
 864			       struct client_resource *resource)
 865{
 866	struct descriptor_resource *r =
 867		container_of(resource, struct descriptor_resource, resource);
 868
 869	fw_core_remove_descriptor(&r->descriptor);
 870	kfree(r);
 871}
 872
 873static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
 874{
 875	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
 876	struct descriptor_resource *r;
 877	int ret;
 878
 879	/* Access policy: Allow this ioctl only on local nodes' device files. */
 880	if (!client->device->is_local)
 881		return -ENOSYS;
 882
 883	if (a->length > 256)
 884		return -EINVAL;
 885
 886	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
 887	if (r == NULL)
 888		return -ENOMEM;
 889
 890	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
 891		ret = -EFAULT;
 892		goto failed;
 893	}
 894
 895	r->descriptor.length    = a->length;
 896	r->descriptor.immediate = a->immediate;
 897	r->descriptor.key       = a->key;
 898	r->descriptor.data      = r->data;
 899
 900	ret = fw_core_add_descriptor(&r->descriptor);
 901	if (ret < 0)
 902		goto failed;
 903
 904	r->resource.release = release_descriptor;
 905	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 906	if (ret < 0) {
 907		fw_core_remove_descriptor(&r->descriptor);
 908		goto failed;
 909	}
 910	a->handle = r->resource.handle;
 911
 912	return 0;
 913 failed:
 914	kfree(r);
 915
 916	return ret;
 917}
 918
 919static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
 920{
 921	return release_client_resource(client, arg->remove_descriptor.handle,
 922				       release_descriptor, NULL);
 923}
 924
 925static void iso_callback(struct fw_iso_context *context, u32 cycle,
 926			 size_t header_length, void *header, void *data)
 927{
 928	struct client *client = data;
 929	struct iso_interrupt_event *e;
 930
 931	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
 932	if (e == NULL) {
 933		fw_notice(context->card, "out of memory when allocating event\n");
 934		return;
 935	}
 936	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 937	e->interrupt.closure   = client->iso_closure;
 938	e->interrupt.cycle     = cycle;
 939	e->interrupt.header_length = header_length;
 940	memcpy(e->interrupt.header, header, header_length);
 941	queue_event(client, &e->event, &e->interrupt,
 942		    sizeof(e->interrupt) + header_length, NULL, 0);
 943}
 944
 945static void iso_mc_callback(struct fw_iso_context *context,
 946			    dma_addr_t completed, void *data)
 947{
 948	struct client *client = data;
 949	struct iso_interrupt_mc_event *e;
 950
 951	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 952	if (e == NULL) {
 953		fw_notice(context->card, "out of memory when allocating event\n");
 954		return;
 955	}
 956	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 957	e->interrupt.closure   = client->iso_closure;
 958	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
 959						      completed);
 960	queue_event(client, &e->event, &e->interrupt,
 961		    sizeof(e->interrupt), NULL, 0);
 962}
 963
 964static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
 965{
 966		if (context->type == FW_ISO_CONTEXT_TRANSMIT)
 967			return DMA_TO_DEVICE;
 968		else
 969			return DMA_FROM_DEVICE;
 970}
 971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 973{
 974	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 975	struct fw_iso_context *context;
 976	fw_iso_callback_t cb;
 977	int ret;
 978
 979	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 980		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
 981		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
 982					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
 983
 984	switch (a->type) {
 985	case FW_ISO_CONTEXT_TRANSMIT:
 986		if (a->speed > SCODE_3200 || a->channel > 63)
 987			return -EINVAL;
 988
 989		cb = iso_callback;
 990		break;
 991
 992	case FW_ISO_CONTEXT_RECEIVE:
 993		if (a->header_size < 4 || (a->header_size & 3) ||
 994		    a->channel > 63)
 995			return -EINVAL;
 996
 997		cb = iso_callback;
 998		break;
 999
1000	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1001		cb = (fw_iso_callback_t)iso_mc_callback;
1002		break;
1003
1004	default:
1005		return -EINVAL;
1006	}
1007
1008	context = fw_iso_context_create(client->device->card, a->type,
1009			a->channel, a->speed, a->header_size, cb, client);
 
 
 
 
 
1010	if (IS_ERR(context))
1011		return PTR_ERR(context);
 
 
1012
1013	/* We only support one context at this time. */
1014	spin_lock_irq(&client->lock);
1015	if (client->iso_context != NULL) {
1016		spin_unlock_irq(&client->lock);
1017		fw_iso_context_destroy(context);
1018
1019		return -EBUSY;
1020	}
1021	if (!client->buffer_is_mapped) {
1022		ret = fw_iso_buffer_map_dma(&client->buffer,
1023					    client->device->card,
1024					    iso_dma_direction(context));
1025		if (ret < 0) {
1026			spin_unlock_irq(&client->lock);
1027			fw_iso_context_destroy(context);
1028
1029			return ret;
1030		}
1031		client->buffer_is_mapped = true;
1032	}
1033	client->iso_closure = a->closure;
1034	client->iso_context = context;
1035	spin_unlock_irq(&client->lock);
1036
1037	a->handle = 0;
1038
1039	return 0;
1040}
1041
1042static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1043{
1044	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1045	struct fw_iso_context *ctx = client->iso_context;
1046
1047	if (ctx == NULL || a->handle != 0)
1048		return -EINVAL;
1049
1050	return fw_iso_context_set_channels(ctx, &a->channels);
1051}
1052
1053/* Macros for decoding the iso packet control header. */
1054#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
1055#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
1056#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1057#define GET_TAG(v)		(((v) >> 18) & 0x03)
1058#define GET_SY(v)		(((v) >> 20) & 0x0f)
1059#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
1060
1061static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1062{
1063	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1064	struct fw_cdev_iso_packet __user *p, *end, *next;
1065	struct fw_iso_context *ctx = client->iso_context;
1066	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1067	u32 control;
1068	int count;
1069	struct {
1070		struct fw_iso_packet packet;
1071		u8 header[256];
1072	} u;
1073
1074	if (ctx == NULL || a->handle != 0)
1075		return -EINVAL;
1076
1077	/*
1078	 * If the user passes a non-NULL data pointer, has mmap()'ed
1079	 * the iso buffer, and the pointer points inside the buffer,
1080	 * we setup the payload pointers accordingly.  Otherwise we
1081	 * set them both to 0, which will still let packets with
1082	 * payload_length == 0 through.  In other words, if no packets
1083	 * use the indirect payload, the iso buffer need not be mapped
1084	 * and the a->data pointer is ignored.
1085	 */
1086	payload = (unsigned long)a->data - client->vm_start;
1087	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1088	if (a->data == 0 || client->buffer.pages == NULL ||
1089	    payload >= buffer_end) {
1090		payload = 0;
1091		buffer_end = 0;
1092	}
1093
1094	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1095		return -EINVAL;
1096
1097	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1098	if (!access_ok(VERIFY_READ, p, a->size))
1099		return -EFAULT;
1100
1101	end = (void __user *)p + a->size;
1102	count = 0;
1103	while (p < end) {
1104		if (get_user(control, &p->control))
1105			return -EFAULT;
1106		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1107		u.packet.interrupt = GET_INTERRUPT(control);
1108		u.packet.skip = GET_SKIP(control);
1109		u.packet.tag = GET_TAG(control);
1110		u.packet.sy = GET_SY(control);
1111		u.packet.header_length = GET_HEADER_LENGTH(control);
1112
1113		switch (ctx->type) {
1114		case FW_ISO_CONTEXT_TRANSMIT:
1115			if (u.packet.header_length & 3)
1116				return -EINVAL;
1117			transmit_header_bytes = u.packet.header_length;
1118			break;
1119
1120		case FW_ISO_CONTEXT_RECEIVE:
1121			if (u.packet.header_length == 0 ||
1122			    u.packet.header_length % ctx->header_size != 0)
1123				return -EINVAL;
1124			break;
1125
1126		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1127			if (u.packet.payload_length == 0 ||
1128			    u.packet.payload_length & 3)
1129				return -EINVAL;
1130			break;
1131		}
1132
1133		next = (struct fw_cdev_iso_packet __user *)
1134			&p->header[transmit_header_bytes / 4];
1135		if (next > end)
1136			return -EINVAL;
1137		if (__copy_from_user
1138		    (u.packet.header, p->header, transmit_header_bytes))
1139			return -EFAULT;
1140		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1141		    u.packet.header_length + u.packet.payload_length > 0)
1142			return -EINVAL;
1143		if (payload + u.packet.payload_length > buffer_end)
1144			return -EINVAL;
1145
1146		if (fw_iso_context_queue(ctx, &u.packet,
1147					 &client->buffer, payload))
1148			break;
1149
1150		p = next;
1151		payload += u.packet.payload_length;
1152		count++;
1153	}
1154	fw_iso_context_queue_flush(ctx);
1155
1156	a->size    -= uptr_to_u64(p) - a->packets;
1157	a->packets  = uptr_to_u64(p);
1158	a->data     = client->vm_start + payload;
1159
1160	return count;
1161}
1162
1163static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1164{
1165	struct fw_cdev_start_iso *a = &arg->start_iso;
1166
1167	BUILD_BUG_ON(
1168	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1169	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1170	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1171	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1172	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1173
1174	if (client->iso_context == NULL || a->handle != 0)
1175		return -EINVAL;
1176
1177	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1178	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
1179		return -EINVAL;
1180
1181	return fw_iso_context_start(client->iso_context,
1182				    a->cycle, a->sync, a->tags);
1183}
1184
1185static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1186{
1187	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1188
1189	if (client->iso_context == NULL || a->handle != 0)
1190		return -EINVAL;
1191
1192	return fw_iso_context_stop(client->iso_context);
1193}
1194
1195static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1196{
1197	struct fw_cdev_flush_iso *a = &arg->flush_iso;
1198
1199	if (client->iso_context == NULL || a->handle != 0)
1200		return -EINVAL;
1201
1202	return fw_iso_context_flush_completions(client->iso_context);
1203}
1204
1205static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1206{
1207	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1208	struct fw_card *card = client->device->card;
1209	struct timespec ts = {0, 0};
1210	u32 cycle_time;
1211	int ret = 0;
1212
1213	local_irq_disable();
1214
1215	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
 
 
1216
1217	switch (a->clk_id) {
1218	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
1219	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
1220	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
1221	default:
1222		ret = -EINVAL;
1223	}
1224
1225	local_irq_enable();
1226
1227	a->tv_sec      = ts.tv_sec;
1228	a->tv_nsec     = ts.tv_nsec;
1229	a->cycle_timer = cycle_time;
1230
1231	return ret;
1232}
1233
1234static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1235{
1236	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1237	struct fw_cdev_get_cycle_timer2 ct2;
1238
1239	ct2.clk_id = CLOCK_REALTIME;
1240	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1241
1242	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1243	a->cycle_timer = ct2.cycle_timer;
1244
1245	return 0;
1246}
1247
1248static void iso_resource_work(struct work_struct *work)
1249{
1250	struct iso_resource_event *e;
1251	struct iso_resource *r =
1252			container_of(work, struct iso_resource, work.work);
1253	struct client *client = r->client;
1254	int generation, channel, bandwidth, todo;
1255	bool skip, free, success;
1256
1257	spin_lock_irq(&client->lock);
1258	generation = client->device->generation;
1259	todo = r->todo;
1260	/* Allow 1000ms grace period for other reallocations. */
1261	if (todo == ISO_RES_ALLOC &&
1262	    time_before64(get_jiffies_64(),
1263			  client->device->card->reset_jiffies + HZ)) {
1264		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1265		skip = true;
1266	} else {
1267		/* We could be called twice within the same generation. */
1268		skip = todo == ISO_RES_REALLOC &&
1269		       r->generation == generation;
1270	}
1271	free = todo == ISO_RES_DEALLOC ||
1272	       todo == ISO_RES_ALLOC_ONCE ||
1273	       todo == ISO_RES_DEALLOC_ONCE;
1274	r->generation = generation;
1275	spin_unlock_irq(&client->lock);
1276
1277	if (skip)
1278		goto out;
1279
1280	bandwidth = r->bandwidth;
1281
1282	fw_iso_resource_manage(client->device->card, generation,
1283			r->channels, &channel, &bandwidth,
1284			todo == ISO_RES_ALLOC ||
1285			todo == ISO_RES_REALLOC ||
1286			todo == ISO_RES_ALLOC_ONCE);
1287	/*
1288	 * Is this generation outdated already?  As long as this resource sticks
1289	 * in the idr, it will be scheduled again for a newer generation or at
1290	 * shutdown.
1291	 */
1292	if (channel == -EAGAIN &&
1293	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1294		goto out;
1295
1296	success = channel >= 0 || bandwidth > 0;
1297
1298	spin_lock_irq(&client->lock);
1299	/*
1300	 * Transit from allocation to reallocation, except if the client
1301	 * requested deallocation in the meantime.
1302	 */
1303	if (r->todo == ISO_RES_ALLOC)
1304		r->todo = ISO_RES_REALLOC;
1305	/*
1306	 * Allocation or reallocation failure?  Pull this resource out of the
1307	 * idr and prepare for deletion, unless the client is shutting down.
1308	 */
1309	if (r->todo == ISO_RES_REALLOC && !success &&
1310	    !client->in_shutdown &&
1311	    idr_find(&client->resource_idr, r->resource.handle)) {
1312		idr_remove(&client->resource_idr, r->resource.handle);
1313		client_put(client);
1314		free = true;
1315	}
1316	spin_unlock_irq(&client->lock);
1317
1318	if (todo == ISO_RES_ALLOC && channel >= 0)
1319		r->channels = 1ULL << channel;
1320
1321	if (todo == ISO_RES_REALLOC && success)
1322		goto out;
1323
1324	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1325		e = r->e_alloc;
1326		r->e_alloc = NULL;
1327	} else {
1328		e = r->e_dealloc;
1329		r->e_dealloc = NULL;
1330	}
1331	e->iso_resource.handle    = r->resource.handle;
1332	e->iso_resource.channel   = channel;
1333	e->iso_resource.bandwidth = bandwidth;
1334
1335	queue_event(client, &e->event,
1336		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1337
1338	if (free) {
1339		cancel_delayed_work(&r->work);
1340		kfree(r->e_alloc);
1341		kfree(r->e_dealloc);
1342		kfree(r);
1343	}
1344 out:
1345	client_put(client);
1346}
1347
1348static void release_iso_resource(struct client *client,
1349				 struct client_resource *resource)
1350{
1351	struct iso_resource *r =
1352		container_of(resource, struct iso_resource, resource);
1353
1354	spin_lock_irq(&client->lock);
1355	r->todo = ISO_RES_DEALLOC;
1356	schedule_iso_resource(r, 0);
1357	spin_unlock_irq(&client->lock);
1358}
1359
1360static int init_iso_resource(struct client *client,
1361		struct fw_cdev_allocate_iso_resource *request, int todo)
1362{
1363	struct iso_resource_event *e1, *e2;
1364	struct iso_resource *r;
1365	int ret;
1366
1367	if ((request->channels == 0 && request->bandwidth == 0) ||
1368	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1369	    request->bandwidth < 0)
1370		return -EINVAL;
1371
1372	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1373	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1374	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1375	if (r == NULL || e1 == NULL || e2 == NULL) {
1376		ret = -ENOMEM;
1377		goto fail;
1378	}
1379
1380	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1381	r->client	= client;
1382	r->todo		= todo;
1383	r->generation	= -1;
1384	r->channels	= request->channels;
1385	r->bandwidth	= request->bandwidth;
1386	r->e_alloc	= e1;
1387	r->e_dealloc	= e2;
1388
1389	e1->iso_resource.closure = request->closure;
1390	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1391	e2->iso_resource.closure = request->closure;
1392	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1393
1394	if (todo == ISO_RES_ALLOC) {
1395		r->resource.release = release_iso_resource;
1396		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1397		if (ret < 0)
1398			goto fail;
1399	} else {
1400		r->resource.release = NULL;
1401		r->resource.handle = -1;
1402		schedule_iso_resource(r, 0);
1403	}
1404	request->handle = r->resource.handle;
1405
1406	return 0;
1407 fail:
1408	kfree(r);
1409	kfree(e1);
1410	kfree(e2);
1411
1412	return ret;
1413}
1414
1415static int ioctl_allocate_iso_resource(struct client *client,
1416				       union ioctl_arg *arg)
1417{
1418	return init_iso_resource(client,
1419			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1420}
1421
1422static int ioctl_deallocate_iso_resource(struct client *client,
1423					 union ioctl_arg *arg)
1424{
1425	return release_client_resource(client,
1426			arg->deallocate.handle, release_iso_resource, NULL);
1427}
1428
1429static int ioctl_allocate_iso_resource_once(struct client *client,
1430					    union ioctl_arg *arg)
1431{
1432	return init_iso_resource(client,
1433			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1434}
1435
1436static int ioctl_deallocate_iso_resource_once(struct client *client,
1437					      union ioctl_arg *arg)
1438{
1439	return init_iso_resource(client,
1440			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1441}
1442
1443/*
1444 * Returns a speed code:  Maximum speed to or from this device,
1445 * limited by the device's link speed, the local node's link speed,
1446 * and all PHY port speeds between the two links.
1447 */
1448static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1449{
1450	return client->device->max_speed;
1451}
1452
1453static int ioctl_send_broadcast_request(struct client *client,
1454					union ioctl_arg *arg)
1455{
1456	struct fw_cdev_send_request *a = &arg->send_request;
1457
1458	switch (a->tcode) {
1459	case TCODE_WRITE_QUADLET_REQUEST:
1460	case TCODE_WRITE_BLOCK_REQUEST:
1461		break;
1462	default:
1463		return -EINVAL;
1464	}
1465
1466	/* Security policy: Only allow accesses to Units Space. */
1467	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1468		return -EACCES;
1469
1470	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1471}
1472
1473static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1474{
1475	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1476	struct fw_cdev_send_request request;
1477	int dest;
1478
1479	if (a->speed > client->device->card->link_speed ||
1480	    a->length > 1024 << a->speed)
1481		return -EIO;
1482
1483	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1484		return -EINVAL;
1485
1486	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1487	request.tcode		= TCODE_STREAM_DATA;
1488	request.length		= a->length;
1489	request.closure		= a->closure;
1490	request.data		= a->data;
1491	request.generation	= a->generation;
1492
1493	return init_request(client, &request, dest, a->speed);
1494}
1495
1496static void outbound_phy_packet_callback(struct fw_packet *packet,
1497					 struct fw_card *card, int status)
1498{
1499	struct outbound_phy_packet_event *e =
1500		container_of(packet, struct outbound_phy_packet_event, p);
 
1501
1502	switch (status) {
1503	/* expected: */
1504	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1505	/* should never happen with PHY packets: */
1506	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1507	case ACK_BUSY_X:
1508	case ACK_BUSY_A:
1509	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
1510	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
1511	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
1512	/* stale generation; cancelled; on certain controllers: no ack */
1513	default:		e->phy_packet.rcode = status;		break;
1514	}
1515	e->phy_packet.data[0] = packet->timestamp;
1516
 
1517	queue_event(e->client, &e->event, &e->phy_packet,
1518		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1519	client_put(e->client);
1520}
1521
1522static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1523{
1524	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1525	struct fw_card *card = client->device->card;
1526	struct outbound_phy_packet_event *e;
1527
1528	/* Access policy: Allow this ioctl only on local nodes' device files. */
1529	if (!client->device->is_local)
1530		return -ENOSYS;
1531
1532	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1533	if (e == NULL)
1534		return -ENOMEM;
1535
1536	client_get(client);
1537	e->client		= client;
1538	e->p.speed		= SCODE_100;
1539	e->p.generation		= a->generation;
1540	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
1541	e->p.header[1]		= a->data[0];
1542	e->p.header[2]		= a->data[1];
1543	e->p.header_length	= 12;
1544	e->p.callback		= outbound_phy_packet_callback;
1545	e->phy_packet.closure	= a->closure;
1546	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
1547	if (is_ping_packet(a->data))
1548			e->phy_packet.length = 4;
1549
1550	card->driver->send_request(card, &e->p);
1551
1552	return 0;
1553}
1554
1555static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1556{
1557	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1558	struct fw_card *card = client->device->card;
1559
1560	/* Access policy: Allow this ioctl only on local nodes' device files. */
1561	if (!client->device->is_local)
1562		return -ENOSYS;
1563
1564	spin_lock_irq(&card->lock);
1565
1566	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1567	client->phy_receiver_closure = a->closure;
1568
1569	spin_unlock_irq(&card->lock);
1570
1571	return 0;
1572}
1573
1574void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1575{
1576	struct client *client;
1577	struct inbound_phy_packet_event *e;
1578	unsigned long flags;
1579
1580	spin_lock_irqsave(&card->lock, flags);
1581
1582	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1583		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1584		if (e == NULL) {
1585			fw_notice(card, "out of memory when allocating event\n");
1586			break;
1587		}
1588		e->phy_packet.closure	= client->phy_receiver_closure;
1589		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1590		e->phy_packet.rcode	= RCODE_COMPLETE;
1591		e->phy_packet.length	= 8;
1592		e->phy_packet.data[0]	= p->header[1];
1593		e->phy_packet.data[1]	= p->header[2];
1594		queue_event(client, &e->event,
1595			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1596	}
1597
1598	spin_unlock_irqrestore(&card->lock, flags);
1599}
1600
1601static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1602	[0x00] = ioctl_get_info,
1603	[0x01] = ioctl_send_request,
1604	[0x02] = ioctl_allocate,
1605	[0x03] = ioctl_deallocate,
1606	[0x04] = ioctl_send_response,
1607	[0x05] = ioctl_initiate_bus_reset,
1608	[0x06] = ioctl_add_descriptor,
1609	[0x07] = ioctl_remove_descriptor,
1610	[0x08] = ioctl_create_iso_context,
1611	[0x09] = ioctl_queue_iso,
1612	[0x0a] = ioctl_start_iso,
1613	[0x0b] = ioctl_stop_iso,
1614	[0x0c] = ioctl_get_cycle_timer,
1615	[0x0d] = ioctl_allocate_iso_resource,
1616	[0x0e] = ioctl_deallocate_iso_resource,
1617	[0x0f] = ioctl_allocate_iso_resource_once,
1618	[0x10] = ioctl_deallocate_iso_resource_once,
1619	[0x11] = ioctl_get_speed,
1620	[0x12] = ioctl_send_broadcast_request,
1621	[0x13] = ioctl_send_stream_packet,
1622	[0x14] = ioctl_get_cycle_timer2,
1623	[0x15] = ioctl_send_phy_packet,
1624	[0x16] = ioctl_receive_phy_packets,
1625	[0x17] = ioctl_set_iso_channels,
1626	[0x18] = ioctl_flush_iso,
1627};
1628
1629static int dispatch_ioctl(struct client *client,
1630			  unsigned int cmd, void __user *arg)
1631{
1632	union ioctl_arg buffer;
1633	int ret;
1634
1635	if (fw_device_is_shutdown(client->device))
1636		return -ENODEV;
1637
1638	if (_IOC_TYPE(cmd) != '#' ||
1639	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1640	    _IOC_SIZE(cmd) > sizeof(buffer))
1641		return -ENOTTY;
1642
1643	if (_IOC_DIR(cmd) == _IOC_READ)
1644		memset(&buffer, 0, _IOC_SIZE(cmd));
1645
1646	if (_IOC_DIR(cmd) & _IOC_WRITE)
1647		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1648			return -EFAULT;
1649
1650	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1651	if (ret < 0)
1652		return ret;
1653
1654	if (_IOC_DIR(cmd) & _IOC_READ)
1655		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1656			return -EFAULT;
1657
1658	return ret;
1659}
1660
1661static long fw_device_op_ioctl(struct file *file,
1662			       unsigned int cmd, unsigned long arg)
1663{
1664	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1665}
1666
1667#ifdef CONFIG_COMPAT
1668static long fw_device_op_compat_ioctl(struct file *file,
1669				      unsigned int cmd, unsigned long arg)
1670{
1671	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1672}
1673#endif
1674
1675static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1676{
1677	struct client *client = file->private_data;
1678	unsigned long size;
1679	int page_count, ret;
1680
1681	if (fw_device_is_shutdown(client->device))
1682		return -ENODEV;
1683
1684	/* FIXME: We could support multiple buffers, but we don't. */
1685	if (client->buffer.pages != NULL)
1686		return -EBUSY;
1687
1688	if (!(vma->vm_flags & VM_SHARED))
1689		return -EINVAL;
1690
1691	if (vma->vm_start & ~PAGE_MASK)
1692		return -EINVAL;
1693
1694	client->vm_start = vma->vm_start;
1695	size = vma->vm_end - vma->vm_start;
1696	page_count = size >> PAGE_SHIFT;
1697	if (size & ~PAGE_MASK)
1698		return -EINVAL;
1699
1700	ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1701	if (ret < 0)
1702		return ret;
1703
1704	spin_lock_irq(&client->lock);
1705	if (client->iso_context) {
1706		ret = fw_iso_buffer_map_dma(&client->buffer,
1707				client->device->card,
1708				iso_dma_direction(client->iso_context));
1709		client->buffer_is_mapped = (ret == 0);
1710	}
1711	spin_unlock_irq(&client->lock);
1712	if (ret < 0)
1713		goto fail;
1714
1715	ret = fw_iso_buffer_map_vma(&client->buffer, vma);
 
1716	if (ret < 0)
1717		goto fail;
1718
1719	return 0;
1720 fail:
1721	fw_iso_buffer_destroy(&client->buffer, client->device->card);
1722	return ret;
1723}
1724
1725static int is_outbound_transaction_resource(int id, void *p, void *data)
1726{
1727	struct client_resource *resource = p;
1728
1729	return resource->release == release_transaction;
1730}
1731
1732static int has_outbound_transactions(struct client *client)
1733{
1734	int ret;
1735
1736	spin_lock_irq(&client->lock);
1737	ret = idr_for_each(&client->resource_idr,
1738			   is_outbound_transaction_resource, NULL);
1739	spin_unlock_irq(&client->lock);
1740
1741	return ret;
1742}
1743
1744static int shutdown_resource(int id, void *p, void *data)
1745{
1746	struct client_resource *resource = p;
1747	struct client *client = data;
1748
1749	resource->release(client, resource);
1750	client_put(client);
1751
1752	return 0;
1753}
1754
1755static int fw_device_op_release(struct inode *inode, struct file *file)
1756{
1757	struct client *client = file->private_data;
1758	struct event *event, *next_event;
1759
1760	spin_lock_irq(&client->device->card->lock);
1761	list_del(&client->phy_receiver_link);
1762	spin_unlock_irq(&client->device->card->lock);
1763
1764	mutex_lock(&client->device->client_list_mutex);
1765	list_del(&client->link);
1766	mutex_unlock(&client->device->client_list_mutex);
1767
1768	if (client->iso_context)
1769		fw_iso_context_destroy(client->iso_context);
1770
1771	if (client->buffer.pages)
1772		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1773
1774	/* Freeze client->resource_idr and client->event_list */
1775	spin_lock_irq(&client->lock);
1776	client->in_shutdown = true;
1777	spin_unlock_irq(&client->lock);
1778
1779	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1780
1781	idr_for_each(&client->resource_idr, shutdown_resource, client);
1782	idr_remove_all(&client->resource_idr);
1783	idr_destroy(&client->resource_idr);
1784
1785	list_for_each_entry_safe(event, next_event, &client->event_list, link)
1786		kfree(event);
1787
1788	client_put(client);
1789
1790	return 0;
1791}
1792
1793static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1794{
1795	struct client *client = file->private_data;
1796	unsigned int mask = 0;
1797
1798	poll_wait(file, &client->wait, pt);
1799
1800	if (fw_device_is_shutdown(client->device))
1801		mask |= POLLHUP | POLLERR;
1802	if (!list_empty(&client->event_list))
1803		mask |= POLLIN | POLLRDNORM;
1804
1805	return mask;
1806}
1807
1808const struct file_operations fw_device_ops = {
1809	.owner		= THIS_MODULE,
1810	.llseek		= no_llseek,
1811	.open		= fw_device_op_open,
1812	.read		= fw_device_op_read,
1813	.unlocked_ioctl	= fw_device_op_ioctl,
1814	.mmap		= fw_device_op_mmap,
1815	.release	= fw_device_op_release,
1816	.poll		= fw_device_op_poll,
1817#ifdef CONFIG_COMPAT
1818	.compat_ioctl	= fw_device_op_compat_ioctl,
1819#endif
1820};
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Char device for device raw access
   4 *
   5 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/bug.h>
   9#include <linux/compat.h>
  10#include <linux/delay.h>
  11#include <linux/device.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/errno.h>
  15#include <linux/firewire.h>
  16#include <linux/firewire-cdev.h>
  17#include <linux/idr.h>
  18#include <linux/irqflags.h>
  19#include <linux/jiffies.h>
  20#include <linux/kernel.h>
  21#include <linux/kref.h>
  22#include <linux/mm.h>
  23#include <linux/module.h>
  24#include <linux/mutex.h>
  25#include <linux/poll.h>
  26#include <linux/sched.h> /* required for linux/wait.h */
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <linux/string.h>
  30#include <linux/time.h>
  31#include <linux/uaccess.h>
  32#include <linux/vmalloc.h>
  33#include <linux/wait.h>
  34#include <linux/workqueue.h>
  35
  36
  37#include "core.h"
  38
  39/*
  40 * ABI version history is documented in linux/firewire-cdev.h.
  41 */
  42#define FW_CDEV_KERNEL_VERSION			5
  43#define FW_CDEV_VERSION_EVENT_REQUEST2		4
  44#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
  45#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
  46
  47struct client {
  48	u32 version;
  49	struct fw_device *device;
  50
  51	spinlock_t lock;
  52	bool in_shutdown;
  53	struct idr resource_idr;
  54	struct list_head event_list;
  55	wait_queue_head_t wait;
  56	wait_queue_head_t tx_flush_wait;
  57	u64 bus_reset_closure;
  58
  59	struct fw_iso_context *iso_context;
  60	u64 iso_closure;
  61	struct fw_iso_buffer buffer;
  62	unsigned long vm_start;
  63	bool buffer_is_mapped;
  64
  65	struct list_head phy_receiver_link;
  66	u64 phy_receiver_closure;
  67
  68	struct list_head link;
  69	struct kref kref;
  70};
  71
  72static inline void client_get(struct client *client)
  73{
  74	kref_get(&client->kref);
  75}
  76
  77static void client_release(struct kref *kref)
  78{
  79	struct client *client = container_of(kref, struct client, kref);
  80
  81	fw_device_put(client->device);
  82	kfree(client);
  83}
  84
  85static void client_put(struct client *client)
  86{
  87	kref_put(&client->kref, client_release);
  88}
  89
  90struct client_resource;
  91typedef void (*client_resource_release_fn_t)(struct client *,
  92					     struct client_resource *);
  93struct client_resource {
  94	client_resource_release_fn_t release;
  95	int handle;
  96};
  97
  98struct address_handler_resource {
  99	struct client_resource resource;
 100	struct fw_address_handler handler;
 101	__u64 closure;
 102	struct client *client;
 103};
 104
 105struct outbound_transaction_resource {
 106	struct client_resource resource;
 107	struct fw_transaction transaction;
 108};
 109
 110struct inbound_transaction_resource {
 111	struct client_resource resource;
 112	struct fw_card *card;
 113	struct fw_request *request;
 114	void *data;
 115	size_t length;
 116};
 117
 118struct descriptor_resource {
 119	struct client_resource resource;
 120	struct fw_descriptor descriptor;
 121	u32 data[];
 122};
 123
 124struct iso_resource {
 125	struct client_resource resource;
 126	struct client *client;
 127	/* Schedule work and access todo only with client->lock held. */
 128	struct delayed_work work;
 129	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
 130	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
 131	int generation;
 132	u64 channels;
 133	s32 bandwidth;
 134	struct iso_resource_event *e_alloc, *e_dealloc;
 135};
 136
 137static void release_iso_resource(struct client *, struct client_resource *);
 138
 139static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 140{
 141	client_get(r->client);
 142	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
 143		client_put(r->client);
 144}
 145
 146static void schedule_if_iso_resource(struct client_resource *resource)
 147{
 148	if (resource->release == release_iso_resource)
 149		schedule_iso_resource(container_of(resource,
 150					struct iso_resource, resource), 0);
 151}
 152
 153/*
 154 * dequeue_event() just kfree()'s the event, so the event has to be
 155 * the first field in a struct XYZ_event.
 156 */
 157struct event {
 158	struct { void *data; size_t size; } v[2];
 159	struct list_head link;
 160};
 161
 162struct bus_reset_event {
 163	struct event event;
 164	struct fw_cdev_event_bus_reset reset;
 165};
 166
 167struct outbound_transaction_event {
 168	struct event event;
 169	struct client *client;
 170	struct outbound_transaction_resource r;
 171	struct fw_cdev_event_response response;
 172};
 173
 174struct inbound_transaction_event {
 175	struct event event;
 176	union {
 177		struct fw_cdev_event_request request;
 178		struct fw_cdev_event_request2 request2;
 179	} req;
 180};
 181
 182struct iso_interrupt_event {
 183	struct event event;
 184	struct fw_cdev_event_iso_interrupt interrupt;
 185};
 186
 187struct iso_interrupt_mc_event {
 188	struct event event;
 189	struct fw_cdev_event_iso_interrupt_mc interrupt;
 190};
 191
 192struct iso_resource_event {
 193	struct event event;
 194	struct fw_cdev_event_iso_resource iso_resource;
 195};
 196
 197struct outbound_phy_packet_event {
 198	struct event event;
 199	struct client *client;
 200	struct fw_packet p;
 201	struct fw_cdev_event_phy_packet phy_packet;
 202};
 203
 204struct inbound_phy_packet_event {
 205	struct event event;
 206	struct fw_cdev_event_phy_packet phy_packet;
 207};
 208
 209#ifdef CONFIG_COMPAT
 210static void __user *u64_to_uptr(u64 value)
 211{
 212	if (in_compat_syscall())
 213		return compat_ptr(value);
 214	else
 215		return (void __user *)(unsigned long)value;
 216}
 217
 218static u64 uptr_to_u64(void __user *ptr)
 219{
 220	if (in_compat_syscall())
 221		return ptr_to_compat(ptr);
 222	else
 223		return (u64)(unsigned long)ptr;
 224}
 225#else
 226static inline void __user *u64_to_uptr(u64 value)
 227{
 228	return (void __user *)(unsigned long)value;
 229}
 230
 231static inline u64 uptr_to_u64(void __user *ptr)
 232{
 233	return (u64)(unsigned long)ptr;
 234}
 235#endif /* CONFIG_COMPAT */
 236
 237static int fw_device_op_open(struct inode *inode, struct file *file)
 238{
 239	struct fw_device *device;
 240	struct client *client;
 241
 242	device = fw_device_get_by_devt(inode->i_rdev);
 243	if (device == NULL)
 244		return -ENODEV;
 245
 246	if (fw_device_is_shutdown(device)) {
 247		fw_device_put(device);
 248		return -ENODEV;
 249	}
 250
 251	client = kzalloc(sizeof(*client), GFP_KERNEL);
 252	if (client == NULL) {
 253		fw_device_put(device);
 254		return -ENOMEM;
 255	}
 256
 257	client->device = device;
 258	spin_lock_init(&client->lock);
 259	idr_init(&client->resource_idr);
 260	INIT_LIST_HEAD(&client->event_list);
 261	init_waitqueue_head(&client->wait);
 262	init_waitqueue_head(&client->tx_flush_wait);
 263	INIT_LIST_HEAD(&client->phy_receiver_link);
 264	INIT_LIST_HEAD(&client->link);
 265	kref_init(&client->kref);
 266
 267	file->private_data = client;
 268
 269	return nonseekable_open(inode, file);
 270}
 271
 272static void queue_event(struct client *client, struct event *event,
 273			void *data0, size_t size0, void *data1, size_t size1)
 274{
 275	unsigned long flags;
 276
 277	event->v[0].data = data0;
 278	event->v[0].size = size0;
 279	event->v[1].data = data1;
 280	event->v[1].size = size1;
 281
 282	spin_lock_irqsave(&client->lock, flags);
 283	if (client->in_shutdown)
 284		kfree(event);
 285	else
 286		list_add_tail(&event->link, &client->event_list);
 287	spin_unlock_irqrestore(&client->lock, flags);
 288
 289	wake_up_interruptible(&client->wait);
 290}
 291
 292static int dequeue_event(struct client *client,
 293			 char __user *buffer, size_t count)
 294{
 295	struct event *event;
 296	size_t size, total;
 297	int i, ret;
 298
 299	ret = wait_event_interruptible(client->wait,
 300			!list_empty(&client->event_list) ||
 301			fw_device_is_shutdown(client->device));
 302	if (ret < 0)
 303		return ret;
 304
 305	if (list_empty(&client->event_list) &&
 306		       fw_device_is_shutdown(client->device))
 307		return -ENODEV;
 308
 309	spin_lock_irq(&client->lock);
 310	event = list_first_entry(&client->event_list, struct event, link);
 311	list_del(&event->link);
 312	spin_unlock_irq(&client->lock);
 313
 314	total = 0;
 315	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 316		size = min(event->v[i].size, count - total);
 317		if (copy_to_user(buffer + total, event->v[i].data, size)) {
 318			ret = -EFAULT;
 319			goto out;
 320		}
 321		total += size;
 322	}
 323	ret = total;
 324
 325 out:
 326	kfree(event);
 327
 328	return ret;
 329}
 330
 331static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
 332				 size_t count, loff_t *offset)
 333{
 334	struct client *client = file->private_data;
 335
 336	return dequeue_event(client, buffer, count);
 337}
 338
 339static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 340				 struct client *client)
 341{
 342	struct fw_card *card = client->device->card;
 343
 344	spin_lock_irq(&card->lock);
 345
 346	event->closure	     = client->bus_reset_closure;
 347	event->type          = FW_CDEV_EVENT_BUS_RESET;
 348	event->generation    = client->device->generation;
 349	event->node_id       = client->device->node_id;
 350	event->local_node_id = card->local_node->node_id;
 351	event->bm_node_id    = card->bm_node_id;
 352	event->irm_node_id   = card->irm_node->node_id;
 353	event->root_node_id  = card->root_node->node_id;
 354
 355	spin_unlock_irq(&card->lock);
 356}
 357
 358static void for_each_client(struct fw_device *device,
 359			    void (*callback)(struct client *client))
 360{
 361	struct client *c;
 362
 363	mutex_lock(&device->client_list_mutex);
 364	list_for_each_entry(c, &device->client_list, link)
 365		callback(c);
 366	mutex_unlock(&device->client_list_mutex);
 367}
 368
 369static int schedule_reallocations(int id, void *p, void *data)
 370{
 371	schedule_if_iso_resource(p);
 372
 373	return 0;
 374}
 375
 376static void queue_bus_reset_event(struct client *client)
 377{
 378	struct bus_reset_event *e;
 379
 380	e = kzalloc(sizeof(*e), GFP_KERNEL);
 381	if (e == NULL)
 
 382		return;
 
 383
 384	fill_bus_reset_event(&e->reset, client);
 385
 386	queue_event(client, &e->event,
 387		    &e->reset, sizeof(e->reset), NULL, 0);
 388
 389	spin_lock_irq(&client->lock);
 390	idr_for_each(&client->resource_idr, schedule_reallocations, client);
 391	spin_unlock_irq(&client->lock);
 392}
 393
 394void fw_device_cdev_update(struct fw_device *device)
 395{
 396	for_each_client(device, queue_bus_reset_event);
 397}
 398
 399static void wake_up_client(struct client *client)
 400{
 401	wake_up_interruptible(&client->wait);
 402}
 403
 404void fw_device_cdev_remove(struct fw_device *device)
 405{
 406	for_each_client(device, wake_up_client);
 407}
 408
 409union ioctl_arg {
 410	struct fw_cdev_get_info			get_info;
 411	struct fw_cdev_send_request		send_request;
 412	struct fw_cdev_allocate			allocate;
 413	struct fw_cdev_deallocate		deallocate;
 414	struct fw_cdev_send_response		send_response;
 415	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
 416	struct fw_cdev_add_descriptor		add_descriptor;
 417	struct fw_cdev_remove_descriptor	remove_descriptor;
 418	struct fw_cdev_create_iso_context	create_iso_context;
 419	struct fw_cdev_queue_iso		queue_iso;
 420	struct fw_cdev_start_iso		start_iso;
 421	struct fw_cdev_stop_iso			stop_iso;
 422	struct fw_cdev_get_cycle_timer		get_cycle_timer;
 423	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
 424	struct fw_cdev_send_stream_packet	send_stream_packet;
 425	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
 426	struct fw_cdev_send_phy_packet		send_phy_packet;
 427	struct fw_cdev_receive_phy_packets	receive_phy_packets;
 428	struct fw_cdev_set_iso_channels		set_iso_channels;
 429	struct fw_cdev_flush_iso		flush_iso;
 430};
 431
 432static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 433{
 434	struct fw_cdev_get_info *a = &arg->get_info;
 435	struct fw_cdev_event_bus_reset bus_reset;
 436	unsigned long ret = 0;
 437
 438	client->version = a->version;
 439	a->version = FW_CDEV_KERNEL_VERSION;
 440	a->card = client->device->card->index;
 441
 442	down_read(&fw_device_rwsem);
 443
 444	if (a->rom != 0) {
 445		size_t want = a->rom_length;
 446		size_t have = client->device->config_rom_length * 4;
 447
 448		ret = copy_to_user(u64_to_uptr(a->rom),
 449				   client->device->config_rom, min(want, have));
 450	}
 451	a->rom_length = client->device->config_rom_length * 4;
 452
 453	up_read(&fw_device_rwsem);
 454
 455	if (ret != 0)
 456		return -EFAULT;
 457
 458	mutex_lock(&client->device->client_list_mutex);
 459
 460	client->bus_reset_closure = a->bus_reset_closure;
 461	if (a->bus_reset != 0) {
 462		fill_bus_reset_event(&bus_reset, client);
 463		/* unaligned size of bus_reset is 36 bytes */
 464		ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
 465	}
 466	if (ret == 0 && list_empty(&client->link))
 467		list_add_tail(&client->link, &client->device->client_list);
 468
 469	mutex_unlock(&client->device->client_list_mutex);
 470
 471	return ret ? -EFAULT : 0;
 472}
 473
 474static int add_client_resource(struct client *client,
 475			       struct client_resource *resource, gfp_t gfp_mask)
 476{
 477	bool preload = gfpflags_allow_blocking(gfp_mask);
 478	unsigned long flags;
 479	int ret;
 480
 481	if (preload)
 482		idr_preload(gfp_mask);
 
 
 483	spin_lock_irqsave(&client->lock, flags);
 484
 485	if (client->in_shutdown)
 486		ret = -ECANCELED;
 487	else
 488		ret = idr_alloc(&client->resource_idr, resource, 0, 0,
 489				GFP_NOWAIT);
 490	if (ret >= 0) {
 491		resource->handle = ret;
 492		client_get(client);
 493		schedule_if_iso_resource(resource);
 494	}
 
 495
 496	spin_unlock_irqrestore(&client->lock, flags);
 497	if (preload)
 498		idr_preload_end();
 499
 500	return ret < 0 ? ret : 0;
 501}
 502
 503static int release_client_resource(struct client *client, u32 handle,
 504				   client_resource_release_fn_t release,
 505				   struct client_resource **return_resource)
 506{
 507	struct client_resource *resource;
 508
 509	spin_lock_irq(&client->lock);
 510	if (client->in_shutdown)
 511		resource = NULL;
 512	else
 513		resource = idr_find(&client->resource_idr, handle);
 514	if (resource && resource->release == release)
 515		idr_remove(&client->resource_idr, handle);
 516	spin_unlock_irq(&client->lock);
 517
 518	if (!(resource && resource->release == release))
 519		return -EINVAL;
 520
 521	if (return_resource)
 522		*return_resource = resource;
 523	else
 524		resource->release(client, resource);
 525
 526	client_put(client);
 527
 528	return 0;
 529}
 530
 531static void release_transaction(struct client *client,
 532				struct client_resource *resource)
 533{
 534}
 535
 536static void complete_transaction(struct fw_card *card, int rcode,
 537				 void *payload, size_t length, void *data)
 538{
 539	struct outbound_transaction_event *e = data;
 540	struct fw_cdev_event_response *rsp = &e->response;
 541	struct client *client = e->client;
 542	unsigned long flags;
 543
 544	if (length < rsp->length)
 545		rsp->length = length;
 546	if (rcode == RCODE_COMPLETE)
 547		memcpy(rsp->data, payload, rsp->length);
 548
 549	spin_lock_irqsave(&client->lock, flags);
 550	idr_remove(&client->resource_idr, e->r.resource.handle);
 551	if (client->in_shutdown)
 552		wake_up(&client->tx_flush_wait);
 553	spin_unlock_irqrestore(&client->lock, flags);
 554
 555	rsp->type = FW_CDEV_EVENT_RESPONSE;
 556	rsp->rcode = rcode;
 557
 558	/*
 559	 * In the case that sizeof(*rsp) doesn't align with the position of the
 560	 * data, and the read is short, preserve an extra copy of the data
 561	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 562	 * for short reads and some apps depended on it, this is both safe
 563	 * and prudent for compatibility.
 564	 */
 565	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
 566		queue_event(client, &e->event, rsp, sizeof(*rsp),
 567			    rsp->data, rsp->length);
 568	else
 569		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 570			    NULL, 0);
 571
 572	/* Drop the idr's reference */
 573	client_put(client);
 574}
 575
 576static int init_request(struct client *client,
 577			struct fw_cdev_send_request *request,
 578			int destination_id, int speed)
 579{
 580	struct outbound_transaction_event *e;
 581	int ret;
 582
 583	if (request->tcode != TCODE_STREAM_DATA &&
 584	    (request->length > 4096 || request->length > 512 << speed))
 585		return -EIO;
 586
 587	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
 588	    request->length < 4)
 589		return -EINVAL;
 590
 591	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
 592	if (e == NULL)
 593		return -ENOMEM;
 594
 595	e->client = client;
 596	e->response.length = request->length;
 597	e->response.closure = request->closure;
 598
 599	if (request->data &&
 600	    copy_from_user(e->response.data,
 601			   u64_to_uptr(request->data), request->length)) {
 602		ret = -EFAULT;
 603		goto failed;
 604	}
 605
 606	e->r.resource.release = release_transaction;
 607	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
 608	if (ret < 0)
 609		goto failed;
 610
 611	fw_send_request(client->device->card, &e->r.transaction,
 612			request->tcode, destination_id, request->generation,
 613			speed, request->offset, e->response.data,
 614			request->length, complete_transaction, e);
 615	return 0;
 616
 617 failed:
 618	kfree(e);
 619
 620	return ret;
 621}
 622
 623static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
 624{
 625	switch (arg->send_request.tcode) {
 626	case TCODE_WRITE_QUADLET_REQUEST:
 627	case TCODE_WRITE_BLOCK_REQUEST:
 628	case TCODE_READ_QUADLET_REQUEST:
 629	case TCODE_READ_BLOCK_REQUEST:
 630	case TCODE_LOCK_MASK_SWAP:
 631	case TCODE_LOCK_COMPARE_SWAP:
 632	case TCODE_LOCK_FETCH_ADD:
 633	case TCODE_LOCK_LITTLE_ADD:
 634	case TCODE_LOCK_BOUNDED_ADD:
 635	case TCODE_LOCK_WRAP_ADD:
 636	case TCODE_LOCK_VENDOR_DEPENDENT:
 637		break;
 638	default:
 639		return -EINVAL;
 640	}
 641
 642	return init_request(client, &arg->send_request, client->device->node_id,
 643			    client->device->max_speed);
 644}
 645
 646static inline bool is_fcp_request(struct fw_request *request)
 647{
 648	return request == NULL;
 649}
 650
 651static void release_request(struct client *client,
 652			    struct client_resource *resource)
 653{
 654	struct inbound_transaction_resource *r = container_of(resource,
 655			struct inbound_transaction_resource, resource);
 656
 657	if (is_fcp_request(r->request))
 658		kfree(r->data);
 659	else
 660		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
 661
 662	fw_card_put(r->card);
 663	kfree(r);
 664}
 665
 666static void handle_request(struct fw_card *card, struct fw_request *request,
 667			   int tcode, int destination, int source,
 668			   int generation, unsigned long long offset,
 669			   void *payload, size_t length, void *callback_data)
 670{
 671	struct address_handler_resource *handler = callback_data;
 672	struct inbound_transaction_resource *r;
 673	struct inbound_transaction_event *e;
 674	size_t event_size0;
 675	void *fcp_frame = NULL;
 676	int ret;
 677
 678	/* card may be different from handler->client->device->card */
 679	fw_card_get(card);
 680
 681	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 682	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 683	if (r == NULL || e == NULL)
 
 684		goto failed;
 685
 686	r->card    = card;
 687	r->request = request;
 688	r->data    = payload;
 689	r->length  = length;
 690
 691	if (is_fcp_request(request)) {
 692		/*
 693		 * FIXME: Let core-transaction.c manage a
 694		 * single reference-counted copy?
 695		 */
 696		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
 697		if (fcp_frame == NULL)
 698			goto failed;
 699
 700		r->data = fcp_frame;
 701	}
 702
 703	r->resource.release = release_request;
 704	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
 705	if (ret < 0)
 706		goto failed;
 707
 708	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
 709		struct fw_cdev_event_request *req = &e->req.request;
 710
 711		if (tcode & 0x10)
 712			tcode = TCODE_LOCK_REQUEST;
 713
 714		req->type	= FW_CDEV_EVENT_REQUEST;
 715		req->tcode	= tcode;
 716		req->offset	= offset;
 717		req->length	= length;
 718		req->handle	= r->resource.handle;
 719		req->closure	= handler->closure;
 720		event_size0	= sizeof(*req);
 721	} else {
 722		struct fw_cdev_event_request2 *req = &e->req.request2;
 723
 724		req->type	= FW_CDEV_EVENT_REQUEST2;
 725		req->tcode	= tcode;
 726		req->offset	= offset;
 727		req->source_node_id = source;
 728		req->destination_node_id = destination;
 729		req->card	= card->index;
 730		req->generation	= generation;
 731		req->length	= length;
 732		req->handle	= r->resource.handle;
 733		req->closure	= handler->closure;
 734		event_size0	= sizeof(*req);
 735	}
 736
 737	queue_event(handler->client, &e->event,
 738		    &e->req, event_size0, r->data, length);
 739	return;
 740
 741 failed:
 742	kfree(r);
 743	kfree(e);
 744	kfree(fcp_frame);
 745
 746	if (!is_fcp_request(request))
 747		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 748
 749	fw_card_put(card);
 750}
 751
 752static void release_address_handler(struct client *client,
 753				    struct client_resource *resource)
 754{
 755	struct address_handler_resource *r =
 756	    container_of(resource, struct address_handler_resource, resource);
 757
 758	fw_core_remove_address_handler(&r->handler);
 759	kfree(r);
 760}
 761
 762static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
 763{
 764	struct fw_cdev_allocate *a = &arg->allocate;
 765	struct address_handler_resource *r;
 766	struct fw_address_region region;
 767	int ret;
 768
 769	r = kmalloc(sizeof(*r), GFP_KERNEL);
 770	if (r == NULL)
 771		return -ENOMEM;
 772
 773	region.start = a->offset;
 774	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
 775		region.end = a->offset + a->length;
 776	else
 777		region.end = a->region_end;
 778
 779	r->handler.length           = a->length;
 780	r->handler.address_callback = handle_request;
 781	r->handler.callback_data    = r;
 782	r->closure   = a->closure;
 783	r->client    = client;
 784
 785	ret = fw_core_add_address_handler(&r->handler, &region);
 786	if (ret < 0) {
 787		kfree(r);
 788		return ret;
 789	}
 790	a->offset = r->handler.offset;
 791
 792	r->resource.release = release_address_handler;
 793	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 794	if (ret < 0) {
 795		release_address_handler(client, &r->resource);
 796		return ret;
 797	}
 798	a->handle = r->resource.handle;
 799
 800	return 0;
 801}
 802
 803static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
 804{
 805	return release_client_resource(client, arg->deallocate.handle,
 806				       release_address_handler, NULL);
 807}
 808
 809static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
 810{
 811	struct fw_cdev_send_response *a = &arg->send_response;
 812	struct client_resource *resource;
 813	struct inbound_transaction_resource *r;
 814	int ret = 0;
 815
 816	if (release_client_resource(client, a->handle,
 817				    release_request, &resource) < 0)
 818		return -EINVAL;
 819
 820	r = container_of(resource, struct inbound_transaction_resource,
 821			 resource);
 822	if (is_fcp_request(r->request)) {
 823		kfree(r->data);
 824		goto out;
 825	}
 826
 827	if (a->length != fw_get_response_length(r->request)) {
 828		ret = -EINVAL;
 829		kfree(r->request);
 830		goto out;
 831	}
 832	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
 833		ret = -EFAULT;
 834		kfree(r->request);
 835		goto out;
 836	}
 837	fw_send_response(r->card, r->request, a->rcode);
 838 out:
 839	fw_card_put(r->card);
 840	kfree(r);
 841
 842	return ret;
 843}
 844
 845static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
 846{
 847	fw_schedule_bus_reset(client->device->card, true,
 848			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
 849	return 0;
 850}
 851
 852static void release_descriptor(struct client *client,
 853			       struct client_resource *resource)
 854{
 855	struct descriptor_resource *r =
 856		container_of(resource, struct descriptor_resource, resource);
 857
 858	fw_core_remove_descriptor(&r->descriptor);
 859	kfree(r);
 860}
 861
 862static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
 863{
 864	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
 865	struct descriptor_resource *r;
 866	int ret;
 867
 868	/* Access policy: Allow this ioctl only on local nodes' device files. */
 869	if (!client->device->is_local)
 870		return -ENOSYS;
 871
 872	if (a->length > 256)
 873		return -EINVAL;
 874
 875	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
 876	if (r == NULL)
 877		return -ENOMEM;
 878
 879	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
 880		ret = -EFAULT;
 881		goto failed;
 882	}
 883
 884	r->descriptor.length    = a->length;
 885	r->descriptor.immediate = a->immediate;
 886	r->descriptor.key       = a->key;
 887	r->descriptor.data      = r->data;
 888
 889	ret = fw_core_add_descriptor(&r->descriptor);
 890	if (ret < 0)
 891		goto failed;
 892
 893	r->resource.release = release_descriptor;
 894	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 895	if (ret < 0) {
 896		fw_core_remove_descriptor(&r->descriptor);
 897		goto failed;
 898	}
 899	a->handle = r->resource.handle;
 900
 901	return 0;
 902 failed:
 903	kfree(r);
 904
 905	return ret;
 906}
 907
 908static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
 909{
 910	return release_client_resource(client, arg->remove_descriptor.handle,
 911				       release_descriptor, NULL);
 912}
 913
 914static void iso_callback(struct fw_iso_context *context, u32 cycle,
 915			 size_t header_length, void *header, void *data)
 916{
 917	struct client *client = data;
 918	struct iso_interrupt_event *e;
 919
 920	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
 921	if (e == NULL)
 
 922		return;
 923
 924	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 925	e->interrupt.closure   = client->iso_closure;
 926	e->interrupt.cycle     = cycle;
 927	e->interrupt.header_length = header_length;
 928	memcpy(e->interrupt.header, header, header_length);
 929	queue_event(client, &e->event, &e->interrupt,
 930		    sizeof(e->interrupt) + header_length, NULL, 0);
 931}
 932
 933static void iso_mc_callback(struct fw_iso_context *context,
 934			    dma_addr_t completed, void *data)
 935{
 936	struct client *client = data;
 937	struct iso_interrupt_mc_event *e;
 938
 939	e = kmalloc(sizeof(*e), GFP_ATOMIC);
 940	if (e == NULL)
 
 941		return;
 942
 943	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 944	e->interrupt.closure   = client->iso_closure;
 945	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
 946						      completed);
 947	queue_event(client, &e->event, &e->interrupt,
 948		    sizeof(e->interrupt), NULL, 0);
 949}
 950
 951static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
 952{
 953		if (context->type == FW_ISO_CONTEXT_TRANSMIT)
 954			return DMA_TO_DEVICE;
 955		else
 956			return DMA_FROM_DEVICE;
 957}
 958
 959static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
 960						fw_iso_mc_callback_t callback,
 961						void *callback_data)
 962{
 963	struct fw_iso_context *ctx;
 964
 965	ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
 966				    0, 0, 0, NULL, callback_data);
 967	if (!IS_ERR(ctx))
 968		ctx->callback.mc = callback;
 969
 970	return ctx;
 971}
 972
 973static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 974{
 975	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 976	struct fw_iso_context *context;
 977	union fw_iso_callback cb;
 978	int ret;
 979
 980	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 981		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
 982		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
 983					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
 984
 985	switch (a->type) {
 986	case FW_ISO_CONTEXT_TRANSMIT:
 987		if (a->speed > SCODE_3200 || a->channel > 63)
 988			return -EINVAL;
 989
 990		cb.sc = iso_callback;
 991		break;
 992
 993	case FW_ISO_CONTEXT_RECEIVE:
 994		if (a->header_size < 4 || (a->header_size & 3) ||
 995		    a->channel > 63)
 996			return -EINVAL;
 997
 998		cb.sc = iso_callback;
 999		break;
1000
1001	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1002		cb.mc = iso_mc_callback;
1003		break;
1004
1005	default:
1006		return -EINVAL;
1007	}
1008
1009	if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1010		context = fw_iso_mc_context_create(client->device->card, cb.mc,
1011						   client);
1012	else
1013		context = fw_iso_context_create(client->device->card, a->type,
1014						a->channel, a->speed,
1015						a->header_size, cb.sc, client);
1016	if (IS_ERR(context))
1017		return PTR_ERR(context);
1018	if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1019		context->drop_overflow_headers = true;
1020
1021	/* We only support one context at this time. */
1022	spin_lock_irq(&client->lock);
1023	if (client->iso_context != NULL) {
1024		spin_unlock_irq(&client->lock);
1025		fw_iso_context_destroy(context);
1026
1027		return -EBUSY;
1028	}
1029	if (!client->buffer_is_mapped) {
1030		ret = fw_iso_buffer_map_dma(&client->buffer,
1031					    client->device->card,
1032					    iso_dma_direction(context));
1033		if (ret < 0) {
1034			spin_unlock_irq(&client->lock);
1035			fw_iso_context_destroy(context);
1036
1037			return ret;
1038		}
1039		client->buffer_is_mapped = true;
1040	}
1041	client->iso_closure = a->closure;
1042	client->iso_context = context;
1043	spin_unlock_irq(&client->lock);
1044
1045	a->handle = 0;
1046
1047	return 0;
1048}
1049
1050static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1051{
1052	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1053	struct fw_iso_context *ctx = client->iso_context;
1054
1055	if (ctx == NULL || a->handle != 0)
1056		return -EINVAL;
1057
1058	return fw_iso_context_set_channels(ctx, &a->channels);
1059}
1060
1061/* Macros for decoding the iso packet control header. */
1062#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
1063#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
1064#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1065#define GET_TAG(v)		(((v) >> 18) & 0x03)
1066#define GET_SY(v)		(((v) >> 20) & 0x0f)
1067#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
1068
1069static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1070{
1071	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1072	struct fw_cdev_iso_packet __user *p, *end, *next;
1073	struct fw_iso_context *ctx = client->iso_context;
1074	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1075	u32 control;
1076	int count;
1077	struct {
1078		struct fw_iso_packet packet;
1079		u8 header[256];
1080	} u;
1081
1082	if (ctx == NULL || a->handle != 0)
1083		return -EINVAL;
1084
1085	/*
1086	 * If the user passes a non-NULL data pointer, has mmap()'ed
1087	 * the iso buffer, and the pointer points inside the buffer,
1088	 * we setup the payload pointers accordingly.  Otherwise we
1089	 * set them both to 0, which will still let packets with
1090	 * payload_length == 0 through.  In other words, if no packets
1091	 * use the indirect payload, the iso buffer need not be mapped
1092	 * and the a->data pointer is ignored.
1093	 */
1094	payload = (unsigned long)a->data - client->vm_start;
1095	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1096	if (a->data == 0 || client->buffer.pages == NULL ||
1097	    payload >= buffer_end) {
1098		payload = 0;
1099		buffer_end = 0;
1100	}
1101
1102	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1103		return -EINVAL;
1104
1105	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
 
 
1106
1107	end = (void __user *)p + a->size;
1108	count = 0;
1109	while (p < end) {
1110		if (get_user(control, &p->control))
1111			return -EFAULT;
1112		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1113		u.packet.interrupt = GET_INTERRUPT(control);
1114		u.packet.skip = GET_SKIP(control);
1115		u.packet.tag = GET_TAG(control);
1116		u.packet.sy = GET_SY(control);
1117		u.packet.header_length = GET_HEADER_LENGTH(control);
1118
1119		switch (ctx->type) {
1120		case FW_ISO_CONTEXT_TRANSMIT:
1121			if (u.packet.header_length & 3)
1122				return -EINVAL;
1123			transmit_header_bytes = u.packet.header_length;
1124			break;
1125
1126		case FW_ISO_CONTEXT_RECEIVE:
1127			if (u.packet.header_length == 0 ||
1128			    u.packet.header_length % ctx->header_size != 0)
1129				return -EINVAL;
1130			break;
1131
1132		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1133			if (u.packet.payload_length == 0 ||
1134			    u.packet.payload_length & 3)
1135				return -EINVAL;
1136			break;
1137		}
1138
1139		next = (struct fw_cdev_iso_packet __user *)
1140			&p->header[transmit_header_bytes / 4];
1141		if (next > end)
1142			return -EINVAL;
1143		if (copy_from_user
1144		    (u.packet.header, p->header, transmit_header_bytes))
1145			return -EFAULT;
1146		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1147		    u.packet.header_length + u.packet.payload_length > 0)
1148			return -EINVAL;
1149		if (payload + u.packet.payload_length > buffer_end)
1150			return -EINVAL;
1151
1152		if (fw_iso_context_queue(ctx, &u.packet,
1153					 &client->buffer, payload))
1154			break;
1155
1156		p = next;
1157		payload += u.packet.payload_length;
1158		count++;
1159	}
1160	fw_iso_context_queue_flush(ctx);
1161
1162	a->size    -= uptr_to_u64(p) - a->packets;
1163	a->packets  = uptr_to_u64(p);
1164	a->data     = client->vm_start + payload;
1165
1166	return count;
1167}
1168
1169static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1170{
1171	struct fw_cdev_start_iso *a = &arg->start_iso;
1172
1173	BUILD_BUG_ON(
1174	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1175	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1176	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1177	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1178	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1179
1180	if (client->iso_context == NULL || a->handle != 0)
1181		return -EINVAL;
1182
1183	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1184	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
1185		return -EINVAL;
1186
1187	return fw_iso_context_start(client->iso_context,
1188				    a->cycle, a->sync, a->tags);
1189}
1190
1191static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1192{
1193	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1194
1195	if (client->iso_context == NULL || a->handle != 0)
1196		return -EINVAL;
1197
1198	return fw_iso_context_stop(client->iso_context);
1199}
1200
1201static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1202{
1203	struct fw_cdev_flush_iso *a = &arg->flush_iso;
1204
1205	if (client->iso_context == NULL || a->handle != 0)
1206		return -EINVAL;
1207
1208	return fw_iso_context_flush_completions(client->iso_context);
1209}
1210
1211static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1212{
1213	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1214	struct fw_card *card = client->device->card;
1215	struct timespec64 ts = {0, 0};
1216	u32 cycle_time = 0;
1217	int ret = 0;
1218
1219	local_irq_disable();
1220
1221	ret = fw_card_read_cycle_time(card, &cycle_time);
1222	if (ret < 0)
1223		goto end;
1224
1225	switch (a->clk_id) {
1226	case CLOCK_REALTIME:      ktime_get_real_ts64(&ts);	break;
1227	case CLOCK_MONOTONIC:     ktime_get_ts64(&ts);		break;
1228	case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts);	break;
1229	default:
1230		ret = -EINVAL;
1231	}
1232end:
1233	local_irq_enable();
1234
1235	a->tv_sec      = ts.tv_sec;
1236	a->tv_nsec     = ts.tv_nsec;
1237	a->cycle_timer = cycle_time;
1238
1239	return ret;
1240}
1241
1242static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1243{
1244	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1245	struct fw_cdev_get_cycle_timer2 ct2;
1246
1247	ct2.clk_id = CLOCK_REALTIME;
1248	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1249
1250	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1251	a->cycle_timer = ct2.cycle_timer;
1252
1253	return 0;
1254}
1255
1256static void iso_resource_work(struct work_struct *work)
1257{
1258	struct iso_resource_event *e;
1259	struct iso_resource *r =
1260			container_of(work, struct iso_resource, work.work);
1261	struct client *client = r->client;
1262	int generation, channel, bandwidth, todo;
1263	bool skip, free, success;
1264
1265	spin_lock_irq(&client->lock);
1266	generation = client->device->generation;
1267	todo = r->todo;
1268	/* Allow 1000ms grace period for other reallocations. */
1269	if (todo == ISO_RES_ALLOC &&
1270	    time_before64(get_jiffies_64(),
1271			  client->device->card->reset_jiffies + HZ)) {
1272		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1273		skip = true;
1274	} else {
1275		/* We could be called twice within the same generation. */
1276		skip = todo == ISO_RES_REALLOC &&
1277		       r->generation == generation;
1278	}
1279	free = todo == ISO_RES_DEALLOC ||
1280	       todo == ISO_RES_ALLOC_ONCE ||
1281	       todo == ISO_RES_DEALLOC_ONCE;
1282	r->generation = generation;
1283	spin_unlock_irq(&client->lock);
1284
1285	if (skip)
1286		goto out;
1287
1288	bandwidth = r->bandwidth;
1289
1290	fw_iso_resource_manage(client->device->card, generation,
1291			r->channels, &channel, &bandwidth,
1292			todo == ISO_RES_ALLOC ||
1293			todo == ISO_RES_REALLOC ||
1294			todo == ISO_RES_ALLOC_ONCE);
1295	/*
1296	 * Is this generation outdated already?  As long as this resource sticks
1297	 * in the idr, it will be scheduled again for a newer generation or at
1298	 * shutdown.
1299	 */
1300	if (channel == -EAGAIN &&
1301	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1302		goto out;
1303
1304	success = channel >= 0 || bandwidth > 0;
1305
1306	spin_lock_irq(&client->lock);
1307	/*
1308	 * Transit from allocation to reallocation, except if the client
1309	 * requested deallocation in the meantime.
1310	 */
1311	if (r->todo == ISO_RES_ALLOC)
1312		r->todo = ISO_RES_REALLOC;
1313	/*
1314	 * Allocation or reallocation failure?  Pull this resource out of the
1315	 * idr and prepare for deletion, unless the client is shutting down.
1316	 */
1317	if (r->todo == ISO_RES_REALLOC && !success &&
1318	    !client->in_shutdown &&
1319	    idr_remove(&client->resource_idr, r->resource.handle)) {
 
1320		client_put(client);
1321		free = true;
1322	}
1323	spin_unlock_irq(&client->lock);
1324
1325	if (todo == ISO_RES_ALLOC && channel >= 0)
1326		r->channels = 1ULL << channel;
1327
1328	if (todo == ISO_RES_REALLOC && success)
1329		goto out;
1330
1331	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1332		e = r->e_alloc;
1333		r->e_alloc = NULL;
1334	} else {
1335		e = r->e_dealloc;
1336		r->e_dealloc = NULL;
1337	}
1338	e->iso_resource.handle    = r->resource.handle;
1339	e->iso_resource.channel   = channel;
1340	e->iso_resource.bandwidth = bandwidth;
1341
1342	queue_event(client, &e->event,
1343		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1344
1345	if (free) {
1346		cancel_delayed_work(&r->work);
1347		kfree(r->e_alloc);
1348		kfree(r->e_dealloc);
1349		kfree(r);
1350	}
1351 out:
1352	client_put(client);
1353}
1354
1355static void release_iso_resource(struct client *client,
1356				 struct client_resource *resource)
1357{
1358	struct iso_resource *r =
1359		container_of(resource, struct iso_resource, resource);
1360
1361	spin_lock_irq(&client->lock);
1362	r->todo = ISO_RES_DEALLOC;
1363	schedule_iso_resource(r, 0);
1364	spin_unlock_irq(&client->lock);
1365}
1366
1367static int init_iso_resource(struct client *client,
1368		struct fw_cdev_allocate_iso_resource *request, int todo)
1369{
1370	struct iso_resource_event *e1, *e2;
1371	struct iso_resource *r;
1372	int ret;
1373
1374	if ((request->channels == 0 && request->bandwidth == 0) ||
1375	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
 
1376		return -EINVAL;
1377
1378	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1379	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1380	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1381	if (r == NULL || e1 == NULL || e2 == NULL) {
1382		ret = -ENOMEM;
1383		goto fail;
1384	}
1385
1386	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1387	r->client	= client;
1388	r->todo		= todo;
1389	r->generation	= -1;
1390	r->channels	= request->channels;
1391	r->bandwidth	= request->bandwidth;
1392	r->e_alloc	= e1;
1393	r->e_dealloc	= e2;
1394
1395	e1->iso_resource.closure = request->closure;
1396	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1397	e2->iso_resource.closure = request->closure;
1398	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1399
1400	if (todo == ISO_RES_ALLOC) {
1401		r->resource.release = release_iso_resource;
1402		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1403		if (ret < 0)
1404			goto fail;
1405	} else {
1406		r->resource.release = NULL;
1407		r->resource.handle = -1;
1408		schedule_iso_resource(r, 0);
1409	}
1410	request->handle = r->resource.handle;
1411
1412	return 0;
1413 fail:
1414	kfree(r);
1415	kfree(e1);
1416	kfree(e2);
1417
1418	return ret;
1419}
1420
1421static int ioctl_allocate_iso_resource(struct client *client,
1422				       union ioctl_arg *arg)
1423{
1424	return init_iso_resource(client,
1425			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1426}
1427
1428static int ioctl_deallocate_iso_resource(struct client *client,
1429					 union ioctl_arg *arg)
1430{
1431	return release_client_resource(client,
1432			arg->deallocate.handle, release_iso_resource, NULL);
1433}
1434
1435static int ioctl_allocate_iso_resource_once(struct client *client,
1436					    union ioctl_arg *arg)
1437{
1438	return init_iso_resource(client,
1439			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1440}
1441
1442static int ioctl_deallocate_iso_resource_once(struct client *client,
1443					      union ioctl_arg *arg)
1444{
1445	return init_iso_resource(client,
1446			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1447}
1448
1449/*
1450 * Returns a speed code:  Maximum speed to or from this device,
1451 * limited by the device's link speed, the local node's link speed,
1452 * and all PHY port speeds between the two links.
1453 */
1454static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1455{
1456	return client->device->max_speed;
1457}
1458
1459static int ioctl_send_broadcast_request(struct client *client,
1460					union ioctl_arg *arg)
1461{
1462	struct fw_cdev_send_request *a = &arg->send_request;
1463
1464	switch (a->tcode) {
1465	case TCODE_WRITE_QUADLET_REQUEST:
1466	case TCODE_WRITE_BLOCK_REQUEST:
1467		break;
1468	default:
1469		return -EINVAL;
1470	}
1471
1472	/* Security policy: Only allow accesses to Units Space. */
1473	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1474		return -EACCES;
1475
1476	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1477}
1478
1479static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1480{
1481	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1482	struct fw_cdev_send_request request;
1483	int dest;
1484
1485	if (a->speed > client->device->card->link_speed ||
1486	    a->length > 1024 << a->speed)
1487		return -EIO;
1488
1489	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1490		return -EINVAL;
1491
1492	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1493	request.tcode		= TCODE_STREAM_DATA;
1494	request.length		= a->length;
1495	request.closure		= a->closure;
1496	request.data		= a->data;
1497	request.generation	= a->generation;
1498
1499	return init_request(client, &request, dest, a->speed);
1500}
1501
1502static void outbound_phy_packet_callback(struct fw_packet *packet,
1503					 struct fw_card *card, int status)
1504{
1505	struct outbound_phy_packet_event *e =
1506		container_of(packet, struct outbound_phy_packet_event, p);
1507	struct client *e_client;
1508
1509	switch (status) {
1510	/* expected: */
1511	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1512	/* should never happen with PHY packets: */
1513	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
1514	case ACK_BUSY_X:
1515	case ACK_BUSY_A:
1516	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
1517	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
1518	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
1519	/* stale generation; cancelled; on certain controllers: no ack */
1520	default:		e->phy_packet.rcode = status;		break;
1521	}
1522	e->phy_packet.data[0] = packet->timestamp;
1523
1524	e_client = e->client;
1525	queue_event(e->client, &e->event, &e->phy_packet,
1526		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1527	client_put(e_client);
1528}
1529
1530static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1531{
1532	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1533	struct fw_card *card = client->device->card;
1534	struct outbound_phy_packet_event *e;
1535
1536	/* Access policy: Allow this ioctl only on local nodes' device files. */
1537	if (!client->device->is_local)
1538		return -ENOSYS;
1539
1540	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1541	if (e == NULL)
1542		return -ENOMEM;
1543
1544	client_get(client);
1545	e->client		= client;
1546	e->p.speed		= SCODE_100;
1547	e->p.generation		= a->generation;
1548	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
1549	e->p.header[1]		= a->data[0];
1550	e->p.header[2]		= a->data[1];
1551	e->p.header_length	= 12;
1552	e->p.callback		= outbound_phy_packet_callback;
1553	e->phy_packet.closure	= a->closure;
1554	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
1555	if (is_ping_packet(a->data))
1556			e->phy_packet.length = 4;
1557
1558	card->driver->send_request(card, &e->p);
1559
1560	return 0;
1561}
1562
1563static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1564{
1565	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1566	struct fw_card *card = client->device->card;
1567
1568	/* Access policy: Allow this ioctl only on local nodes' device files. */
1569	if (!client->device->is_local)
1570		return -ENOSYS;
1571
1572	spin_lock_irq(&card->lock);
1573
1574	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1575	client->phy_receiver_closure = a->closure;
1576
1577	spin_unlock_irq(&card->lock);
1578
1579	return 0;
1580}
1581
1582void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1583{
1584	struct client *client;
1585	struct inbound_phy_packet_event *e;
1586	unsigned long flags;
1587
1588	spin_lock_irqsave(&card->lock, flags);
1589
1590	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1591		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1592		if (e == NULL)
 
1593			break;
1594
1595		e->phy_packet.closure	= client->phy_receiver_closure;
1596		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1597		e->phy_packet.rcode	= RCODE_COMPLETE;
1598		e->phy_packet.length	= 8;
1599		e->phy_packet.data[0]	= p->header[1];
1600		e->phy_packet.data[1]	= p->header[2];
1601		queue_event(client, &e->event,
1602			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1603	}
1604
1605	spin_unlock_irqrestore(&card->lock, flags);
1606}
1607
1608static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1609	[0x00] = ioctl_get_info,
1610	[0x01] = ioctl_send_request,
1611	[0x02] = ioctl_allocate,
1612	[0x03] = ioctl_deallocate,
1613	[0x04] = ioctl_send_response,
1614	[0x05] = ioctl_initiate_bus_reset,
1615	[0x06] = ioctl_add_descriptor,
1616	[0x07] = ioctl_remove_descriptor,
1617	[0x08] = ioctl_create_iso_context,
1618	[0x09] = ioctl_queue_iso,
1619	[0x0a] = ioctl_start_iso,
1620	[0x0b] = ioctl_stop_iso,
1621	[0x0c] = ioctl_get_cycle_timer,
1622	[0x0d] = ioctl_allocate_iso_resource,
1623	[0x0e] = ioctl_deallocate_iso_resource,
1624	[0x0f] = ioctl_allocate_iso_resource_once,
1625	[0x10] = ioctl_deallocate_iso_resource_once,
1626	[0x11] = ioctl_get_speed,
1627	[0x12] = ioctl_send_broadcast_request,
1628	[0x13] = ioctl_send_stream_packet,
1629	[0x14] = ioctl_get_cycle_timer2,
1630	[0x15] = ioctl_send_phy_packet,
1631	[0x16] = ioctl_receive_phy_packets,
1632	[0x17] = ioctl_set_iso_channels,
1633	[0x18] = ioctl_flush_iso,
1634};
1635
1636static int dispatch_ioctl(struct client *client,
1637			  unsigned int cmd, void __user *arg)
1638{
1639	union ioctl_arg buffer;
1640	int ret;
1641
1642	if (fw_device_is_shutdown(client->device))
1643		return -ENODEV;
1644
1645	if (_IOC_TYPE(cmd) != '#' ||
1646	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1647	    _IOC_SIZE(cmd) > sizeof(buffer))
1648		return -ENOTTY;
1649
1650	memset(&buffer, 0, sizeof(buffer));
 
1651
1652	if (_IOC_DIR(cmd) & _IOC_WRITE)
1653		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1654			return -EFAULT;
1655
1656	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1657	if (ret < 0)
1658		return ret;
1659
1660	if (_IOC_DIR(cmd) & _IOC_READ)
1661		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1662			return -EFAULT;
1663
1664	return ret;
1665}
1666
1667static long fw_device_op_ioctl(struct file *file,
1668			       unsigned int cmd, unsigned long arg)
1669{
1670	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1671}
1672
 
 
 
 
 
 
 
 
1673static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1674{
1675	struct client *client = file->private_data;
1676	unsigned long size;
1677	int page_count, ret;
1678
1679	if (fw_device_is_shutdown(client->device))
1680		return -ENODEV;
1681
1682	/* FIXME: We could support multiple buffers, but we don't. */
1683	if (client->buffer.pages != NULL)
1684		return -EBUSY;
1685
1686	if (!(vma->vm_flags & VM_SHARED))
1687		return -EINVAL;
1688
1689	if (vma->vm_start & ~PAGE_MASK)
1690		return -EINVAL;
1691
1692	client->vm_start = vma->vm_start;
1693	size = vma->vm_end - vma->vm_start;
1694	page_count = size >> PAGE_SHIFT;
1695	if (size & ~PAGE_MASK)
1696		return -EINVAL;
1697
1698	ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1699	if (ret < 0)
1700		return ret;
1701
1702	spin_lock_irq(&client->lock);
1703	if (client->iso_context) {
1704		ret = fw_iso_buffer_map_dma(&client->buffer,
1705				client->device->card,
1706				iso_dma_direction(client->iso_context));
1707		client->buffer_is_mapped = (ret == 0);
1708	}
1709	spin_unlock_irq(&client->lock);
1710	if (ret < 0)
1711		goto fail;
1712
1713	ret = vm_map_pages_zero(vma, client->buffer.pages,
1714				client->buffer.page_count);
1715	if (ret < 0)
1716		goto fail;
1717
1718	return 0;
1719 fail:
1720	fw_iso_buffer_destroy(&client->buffer, client->device->card);
1721	return ret;
1722}
1723
1724static int is_outbound_transaction_resource(int id, void *p, void *data)
1725{
1726	struct client_resource *resource = p;
1727
1728	return resource->release == release_transaction;
1729}
1730
1731static int has_outbound_transactions(struct client *client)
1732{
1733	int ret;
1734
1735	spin_lock_irq(&client->lock);
1736	ret = idr_for_each(&client->resource_idr,
1737			   is_outbound_transaction_resource, NULL);
1738	spin_unlock_irq(&client->lock);
1739
1740	return ret;
1741}
1742
1743static int shutdown_resource(int id, void *p, void *data)
1744{
1745	struct client_resource *resource = p;
1746	struct client *client = data;
1747
1748	resource->release(client, resource);
1749	client_put(client);
1750
1751	return 0;
1752}
1753
1754static int fw_device_op_release(struct inode *inode, struct file *file)
1755{
1756	struct client *client = file->private_data;
1757	struct event *event, *next_event;
1758
1759	spin_lock_irq(&client->device->card->lock);
1760	list_del(&client->phy_receiver_link);
1761	spin_unlock_irq(&client->device->card->lock);
1762
1763	mutex_lock(&client->device->client_list_mutex);
1764	list_del(&client->link);
1765	mutex_unlock(&client->device->client_list_mutex);
1766
1767	if (client->iso_context)
1768		fw_iso_context_destroy(client->iso_context);
1769
1770	if (client->buffer.pages)
1771		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1772
1773	/* Freeze client->resource_idr and client->event_list */
1774	spin_lock_irq(&client->lock);
1775	client->in_shutdown = true;
1776	spin_unlock_irq(&client->lock);
1777
1778	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1779
1780	idr_for_each(&client->resource_idr, shutdown_resource, client);
 
1781	idr_destroy(&client->resource_idr);
1782
1783	list_for_each_entry_safe(event, next_event, &client->event_list, link)
1784		kfree(event);
1785
1786	client_put(client);
1787
1788	return 0;
1789}
1790
1791static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1792{
1793	struct client *client = file->private_data;
1794	__poll_t mask = 0;
1795
1796	poll_wait(file, &client->wait, pt);
1797
1798	if (fw_device_is_shutdown(client->device))
1799		mask |= EPOLLHUP | EPOLLERR;
1800	if (!list_empty(&client->event_list))
1801		mask |= EPOLLIN | EPOLLRDNORM;
1802
1803	return mask;
1804}
1805
1806const struct file_operations fw_device_ops = {
1807	.owner		= THIS_MODULE,
1808	.llseek		= no_llseek,
1809	.open		= fw_device_op_open,
1810	.read		= fw_device_op_read,
1811	.unlocked_ioctl	= fw_device_op_ioctl,
1812	.mmap		= fw_device_op_mmap,
1813	.release	= fw_device_op_release,
1814	.poll		= fw_device_op_poll,
1815	.compat_ioctl	= compat_ptr_ioctl,
 
 
1816};