Loading...
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/bug.h>
22#include <linux/compat.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/errno.h>
27#include <linux/firewire.h>
28#include <linux/firewire-cdev.h>
29#include <linux/idr.h>
30#include <linux/irqflags.h>
31#include <linux/jiffies.h>
32#include <linux/kernel.h>
33#include <linux/kref.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/mutex.h>
37#include <linux/poll.h>
38#include <linux/sched.h> /* required for linux/wait.h */
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/string.h>
42#include <linux/time.h>
43#include <linux/uaccess.h>
44#include <linux/vmalloc.h>
45#include <linux/wait.h>
46#include <linux/workqueue.h>
47
48
49#include "core.h"
50
51/*
52 * ABI version history is documented in linux/firewire-cdev.h.
53 */
54#define FW_CDEV_KERNEL_VERSION 5
55#define FW_CDEV_VERSION_EVENT_REQUEST2 4
56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
57
58struct client {
59 u32 version;
60 struct fw_device *device;
61
62 spinlock_t lock;
63 bool in_shutdown;
64 struct idr resource_idr;
65 struct list_head event_list;
66 wait_queue_head_t wait;
67 wait_queue_head_t tx_flush_wait;
68 u64 bus_reset_closure;
69
70 struct fw_iso_context *iso_context;
71 u64 iso_closure;
72 struct fw_iso_buffer buffer;
73 unsigned long vm_start;
74 bool buffer_is_mapped;
75
76 struct list_head phy_receiver_link;
77 u64 phy_receiver_closure;
78
79 struct list_head link;
80 struct kref kref;
81};
82
83static inline void client_get(struct client *client)
84{
85 kref_get(&client->kref);
86}
87
88static void client_release(struct kref *kref)
89{
90 struct client *client = container_of(kref, struct client, kref);
91
92 fw_device_put(client->device);
93 kfree(client);
94}
95
96static void client_put(struct client *client)
97{
98 kref_put(&client->kref, client_release);
99}
100
101struct client_resource;
102typedef void (*client_resource_release_fn_t)(struct client *,
103 struct client_resource *);
104struct client_resource {
105 client_resource_release_fn_t release;
106 int handle;
107};
108
109struct address_handler_resource {
110 struct client_resource resource;
111 struct fw_address_handler handler;
112 __u64 closure;
113 struct client *client;
114};
115
116struct outbound_transaction_resource {
117 struct client_resource resource;
118 struct fw_transaction transaction;
119};
120
121struct inbound_transaction_resource {
122 struct client_resource resource;
123 struct fw_card *card;
124 struct fw_request *request;
125 void *data;
126 size_t length;
127};
128
129struct descriptor_resource {
130 struct client_resource resource;
131 struct fw_descriptor descriptor;
132 u32 data[0];
133};
134
135struct iso_resource {
136 struct client_resource resource;
137 struct client *client;
138 /* Schedule work and access todo only with client->lock held. */
139 struct delayed_work work;
140 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
141 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
142 int generation;
143 u64 channels;
144 s32 bandwidth;
145 struct iso_resource_event *e_alloc, *e_dealloc;
146};
147
148static void release_iso_resource(struct client *, struct client_resource *);
149
150static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
151{
152 client_get(r->client);
153 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
154 client_put(r->client);
155}
156
157static void schedule_if_iso_resource(struct client_resource *resource)
158{
159 if (resource->release == release_iso_resource)
160 schedule_iso_resource(container_of(resource,
161 struct iso_resource, resource), 0);
162}
163
164/*
165 * dequeue_event() just kfree()'s the event, so the event has to be
166 * the first field in a struct XYZ_event.
167 */
168struct event {
169 struct { void *data; size_t size; } v[2];
170 struct list_head link;
171};
172
173struct bus_reset_event {
174 struct event event;
175 struct fw_cdev_event_bus_reset reset;
176};
177
178struct outbound_transaction_event {
179 struct event event;
180 struct client *client;
181 struct outbound_transaction_resource r;
182 struct fw_cdev_event_response response;
183};
184
185struct inbound_transaction_event {
186 struct event event;
187 union {
188 struct fw_cdev_event_request request;
189 struct fw_cdev_event_request2 request2;
190 } req;
191};
192
193struct iso_interrupt_event {
194 struct event event;
195 struct fw_cdev_event_iso_interrupt interrupt;
196};
197
198struct iso_interrupt_mc_event {
199 struct event event;
200 struct fw_cdev_event_iso_interrupt_mc interrupt;
201};
202
203struct iso_resource_event {
204 struct event event;
205 struct fw_cdev_event_iso_resource iso_resource;
206};
207
208struct outbound_phy_packet_event {
209 struct event event;
210 struct client *client;
211 struct fw_packet p;
212 struct fw_cdev_event_phy_packet phy_packet;
213};
214
215struct inbound_phy_packet_event {
216 struct event event;
217 struct fw_cdev_event_phy_packet phy_packet;
218};
219
220#ifdef CONFIG_COMPAT
221static void __user *u64_to_uptr(u64 value)
222{
223 if (is_compat_task())
224 return compat_ptr(value);
225 else
226 return (void __user *)(unsigned long)value;
227}
228
229static u64 uptr_to_u64(void __user *ptr)
230{
231 if (is_compat_task())
232 return ptr_to_compat(ptr);
233 else
234 return (u64)(unsigned long)ptr;
235}
236#else
237static inline void __user *u64_to_uptr(u64 value)
238{
239 return (void __user *)(unsigned long)value;
240}
241
242static inline u64 uptr_to_u64(void __user *ptr)
243{
244 return (u64)(unsigned long)ptr;
245}
246#endif /* CONFIG_COMPAT */
247
248static int fw_device_op_open(struct inode *inode, struct file *file)
249{
250 struct fw_device *device;
251 struct client *client;
252
253 device = fw_device_get_by_devt(inode->i_rdev);
254 if (device == NULL)
255 return -ENODEV;
256
257 if (fw_device_is_shutdown(device)) {
258 fw_device_put(device);
259 return -ENODEV;
260 }
261
262 client = kzalloc(sizeof(*client), GFP_KERNEL);
263 if (client == NULL) {
264 fw_device_put(device);
265 return -ENOMEM;
266 }
267
268 client->device = device;
269 spin_lock_init(&client->lock);
270 idr_init(&client->resource_idr);
271 INIT_LIST_HEAD(&client->event_list);
272 init_waitqueue_head(&client->wait);
273 init_waitqueue_head(&client->tx_flush_wait);
274 INIT_LIST_HEAD(&client->phy_receiver_link);
275 INIT_LIST_HEAD(&client->link);
276 kref_init(&client->kref);
277
278 file->private_data = client;
279
280 return nonseekable_open(inode, file);
281}
282
283static void queue_event(struct client *client, struct event *event,
284 void *data0, size_t size0, void *data1, size_t size1)
285{
286 unsigned long flags;
287
288 event->v[0].data = data0;
289 event->v[0].size = size0;
290 event->v[1].data = data1;
291 event->v[1].size = size1;
292
293 spin_lock_irqsave(&client->lock, flags);
294 if (client->in_shutdown)
295 kfree(event);
296 else
297 list_add_tail(&event->link, &client->event_list);
298 spin_unlock_irqrestore(&client->lock, flags);
299
300 wake_up_interruptible(&client->wait);
301}
302
303static int dequeue_event(struct client *client,
304 char __user *buffer, size_t count)
305{
306 struct event *event;
307 size_t size, total;
308 int i, ret;
309
310 ret = wait_event_interruptible(client->wait,
311 !list_empty(&client->event_list) ||
312 fw_device_is_shutdown(client->device));
313 if (ret < 0)
314 return ret;
315
316 if (list_empty(&client->event_list) &&
317 fw_device_is_shutdown(client->device))
318 return -ENODEV;
319
320 spin_lock_irq(&client->lock);
321 event = list_first_entry(&client->event_list, struct event, link);
322 list_del(&event->link);
323 spin_unlock_irq(&client->lock);
324
325 total = 0;
326 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
327 size = min(event->v[i].size, count - total);
328 if (copy_to_user(buffer + total, event->v[i].data, size)) {
329 ret = -EFAULT;
330 goto out;
331 }
332 total += size;
333 }
334 ret = total;
335
336 out:
337 kfree(event);
338
339 return ret;
340}
341
342static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
343 size_t count, loff_t *offset)
344{
345 struct client *client = file->private_data;
346
347 return dequeue_event(client, buffer, count);
348}
349
350static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
351 struct client *client)
352{
353 struct fw_card *card = client->device->card;
354
355 spin_lock_irq(&card->lock);
356
357 event->closure = client->bus_reset_closure;
358 event->type = FW_CDEV_EVENT_BUS_RESET;
359 event->generation = client->device->generation;
360 event->node_id = client->device->node_id;
361 event->local_node_id = card->local_node->node_id;
362 event->bm_node_id = card->bm_node_id;
363 event->irm_node_id = card->irm_node->node_id;
364 event->root_node_id = card->root_node->node_id;
365
366 spin_unlock_irq(&card->lock);
367}
368
369static void for_each_client(struct fw_device *device,
370 void (*callback)(struct client *client))
371{
372 struct client *c;
373
374 mutex_lock(&device->client_list_mutex);
375 list_for_each_entry(c, &device->client_list, link)
376 callback(c);
377 mutex_unlock(&device->client_list_mutex);
378}
379
380static int schedule_reallocations(int id, void *p, void *data)
381{
382 schedule_if_iso_resource(p);
383
384 return 0;
385}
386
387static void queue_bus_reset_event(struct client *client)
388{
389 struct bus_reset_event *e;
390
391 e = kzalloc(sizeof(*e), GFP_KERNEL);
392 if (e == NULL) {
393 fw_notice(client->device->card, "out of memory when allocating event\n");
394 return;
395 }
396
397 fill_bus_reset_event(&e->reset, client);
398
399 queue_event(client, &e->event,
400 &e->reset, sizeof(e->reset), NULL, 0);
401
402 spin_lock_irq(&client->lock);
403 idr_for_each(&client->resource_idr, schedule_reallocations, client);
404 spin_unlock_irq(&client->lock);
405}
406
407void fw_device_cdev_update(struct fw_device *device)
408{
409 for_each_client(device, queue_bus_reset_event);
410}
411
412static void wake_up_client(struct client *client)
413{
414 wake_up_interruptible(&client->wait);
415}
416
417void fw_device_cdev_remove(struct fw_device *device)
418{
419 for_each_client(device, wake_up_client);
420}
421
422union ioctl_arg {
423 struct fw_cdev_get_info get_info;
424 struct fw_cdev_send_request send_request;
425 struct fw_cdev_allocate allocate;
426 struct fw_cdev_deallocate deallocate;
427 struct fw_cdev_send_response send_response;
428 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
429 struct fw_cdev_add_descriptor add_descriptor;
430 struct fw_cdev_remove_descriptor remove_descriptor;
431 struct fw_cdev_create_iso_context create_iso_context;
432 struct fw_cdev_queue_iso queue_iso;
433 struct fw_cdev_start_iso start_iso;
434 struct fw_cdev_stop_iso stop_iso;
435 struct fw_cdev_get_cycle_timer get_cycle_timer;
436 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
437 struct fw_cdev_send_stream_packet send_stream_packet;
438 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
439 struct fw_cdev_send_phy_packet send_phy_packet;
440 struct fw_cdev_receive_phy_packets receive_phy_packets;
441 struct fw_cdev_set_iso_channels set_iso_channels;
442 struct fw_cdev_flush_iso flush_iso;
443};
444
445static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
446{
447 struct fw_cdev_get_info *a = &arg->get_info;
448 struct fw_cdev_event_bus_reset bus_reset;
449 unsigned long ret = 0;
450
451 client->version = a->version;
452 a->version = FW_CDEV_KERNEL_VERSION;
453 a->card = client->device->card->index;
454
455 down_read(&fw_device_rwsem);
456
457 if (a->rom != 0) {
458 size_t want = a->rom_length;
459 size_t have = client->device->config_rom_length * 4;
460
461 ret = copy_to_user(u64_to_uptr(a->rom),
462 client->device->config_rom, min(want, have));
463 }
464 a->rom_length = client->device->config_rom_length * 4;
465
466 up_read(&fw_device_rwsem);
467
468 if (ret != 0)
469 return -EFAULT;
470
471 mutex_lock(&client->device->client_list_mutex);
472
473 client->bus_reset_closure = a->bus_reset_closure;
474 if (a->bus_reset != 0) {
475 fill_bus_reset_event(&bus_reset, client);
476 ret = copy_to_user(u64_to_uptr(a->bus_reset),
477 &bus_reset, sizeof(bus_reset));
478 }
479 if (ret == 0 && list_empty(&client->link))
480 list_add_tail(&client->link, &client->device->client_list);
481
482 mutex_unlock(&client->device->client_list_mutex);
483
484 return ret ? -EFAULT : 0;
485}
486
487static int add_client_resource(struct client *client,
488 struct client_resource *resource, gfp_t gfp_mask)
489{
490 unsigned long flags;
491 int ret;
492
493 retry:
494 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
495 return -ENOMEM;
496
497 spin_lock_irqsave(&client->lock, flags);
498 if (client->in_shutdown)
499 ret = -ECANCELED;
500 else
501 ret = idr_get_new(&client->resource_idr, resource,
502 &resource->handle);
503 if (ret >= 0) {
504 client_get(client);
505 schedule_if_iso_resource(resource);
506 }
507 spin_unlock_irqrestore(&client->lock, flags);
508
509 if (ret == -EAGAIN)
510 goto retry;
511
512 return ret < 0 ? ret : 0;
513}
514
515static int release_client_resource(struct client *client, u32 handle,
516 client_resource_release_fn_t release,
517 struct client_resource **return_resource)
518{
519 struct client_resource *resource;
520
521 spin_lock_irq(&client->lock);
522 if (client->in_shutdown)
523 resource = NULL;
524 else
525 resource = idr_find(&client->resource_idr, handle);
526 if (resource && resource->release == release)
527 idr_remove(&client->resource_idr, handle);
528 spin_unlock_irq(&client->lock);
529
530 if (!(resource && resource->release == release))
531 return -EINVAL;
532
533 if (return_resource)
534 *return_resource = resource;
535 else
536 resource->release(client, resource);
537
538 client_put(client);
539
540 return 0;
541}
542
543static void release_transaction(struct client *client,
544 struct client_resource *resource)
545{
546}
547
548static void complete_transaction(struct fw_card *card, int rcode,
549 void *payload, size_t length, void *data)
550{
551 struct outbound_transaction_event *e = data;
552 struct fw_cdev_event_response *rsp = &e->response;
553 struct client *client = e->client;
554 unsigned long flags;
555
556 if (length < rsp->length)
557 rsp->length = length;
558 if (rcode == RCODE_COMPLETE)
559 memcpy(rsp->data, payload, rsp->length);
560
561 spin_lock_irqsave(&client->lock, flags);
562 idr_remove(&client->resource_idr, e->r.resource.handle);
563 if (client->in_shutdown)
564 wake_up(&client->tx_flush_wait);
565 spin_unlock_irqrestore(&client->lock, flags);
566
567 rsp->type = FW_CDEV_EVENT_RESPONSE;
568 rsp->rcode = rcode;
569
570 /*
571 * In the case that sizeof(*rsp) doesn't align with the position of the
572 * data, and the read is short, preserve an extra copy of the data
573 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
574 * for short reads and some apps depended on it, this is both safe
575 * and prudent for compatibility.
576 */
577 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
578 queue_event(client, &e->event, rsp, sizeof(*rsp),
579 rsp->data, rsp->length);
580 else
581 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
582 NULL, 0);
583
584 /* Drop the idr's reference */
585 client_put(client);
586}
587
588static int init_request(struct client *client,
589 struct fw_cdev_send_request *request,
590 int destination_id, int speed)
591{
592 struct outbound_transaction_event *e;
593 int ret;
594
595 if (request->tcode != TCODE_STREAM_DATA &&
596 (request->length > 4096 || request->length > 512 << speed))
597 return -EIO;
598
599 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
600 request->length < 4)
601 return -EINVAL;
602
603 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
604 if (e == NULL)
605 return -ENOMEM;
606
607 e->client = client;
608 e->response.length = request->length;
609 e->response.closure = request->closure;
610
611 if (request->data &&
612 copy_from_user(e->response.data,
613 u64_to_uptr(request->data), request->length)) {
614 ret = -EFAULT;
615 goto failed;
616 }
617
618 e->r.resource.release = release_transaction;
619 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
620 if (ret < 0)
621 goto failed;
622
623 fw_send_request(client->device->card, &e->r.transaction,
624 request->tcode, destination_id, request->generation,
625 speed, request->offset, e->response.data,
626 request->length, complete_transaction, e);
627 return 0;
628
629 failed:
630 kfree(e);
631
632 return ret;
633}
634
635static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
636{
637 switch (arg->send_request.tcode) {
638 case TCODE_WRITE_QUADLET_REQUEST:
639 case TCODE_WRITE_BLOCK_REQUEST:
640 case TCODE_READ_QUADLET_REQUEST:
641 case TCODE_READ_BLOCK_REQUEST:
642 case TCODE_LOCK_MASK_SWAP:
643 case TCODE_LOCK_COMPARE_SWAP:
644 case TCODE_LOCK_FETCH_ADD:
645 case TCODE_LOCK_LITTLE_ADD:
646 case TCODE_LOCK_BOUNDED_ADD:
647 case TCODE_LOCK_WRAP_ADD:
648 case TCODE_LOCK_VENDOR_DEPENDENT:
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 return init_request(client, &arg->send_request, client->device->node_id,
655 client->device->max_speed);
656}
657
658static inline bool is_fcp_request(struct fw_request *request)
659{
660 return request == NULL;
661}
662
663static void release_request(struct client *client,
664 struct client_resource *resource)
665{
666 struct inbound_transaction_resource *r = container_of(resource,
667 struct inbound_transaction_resource, resource);
668
669 if (is_fcp_request(r->request))
670 kfree(r->data);
671 else
672 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
673
674 fw_card_put(r->card);
675 kfree(r);
676}
677
678static void handle_request(struct fw_card *card, struct fw_request *request,
679 int tcode, int destination, int source,
680 int generation, unsigned long long offset,
681 void *payload, size_t length, void *callback_data)
682{
683 struct address_handler_resource *handler = callback_data;
684 struct inbound_transaction_resource *r;
685 struct inbound_transaction_event *e;
686 size_t event_size0;
687 void *fcp_frame = NULL;
688 int ret;
689
690 /* card may be different from handler->client->device->card */
691 fw_card_get(card);
692
693 r = kmalloc(sizeof(*r), GFP_ATOMIC);
694 e = kmalloc(sizeof(*e), GFP_ATOMIC);
695 if (r == NULL || e == NULL) {
696 fw_notice(card, "out of memory when allocating event\n");
697 goto failed;
698 }
699 r->card = card;
700 r->request = request;
701 r->data = payload;
702 r->length = length;
703
704 if (is_fcp_request(request)) {
705 /*
706 * FIXME: Let core-transaction.c manage a
707 * single reference-counted copy?
708 */
709 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
710 if (fcp_frame == NULL)
711 goto failed;
712
713 r->data = fcp_frame;
714 }
715
716 r->resource.release = release_request;
717 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
718 if (ret < 0)
719 goto failed;
720
721 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
722 struct fw_cdev_event_request *req = &e->req.request;
723
724 if (tcode & 0x10)
725 tcode = TCODE_LOCK_REQUEST;
726
727 req->type = FW_CDEV_EVENT_REQUEST;
728 req->tcode = tcode;
729 req->offset = offset;
730 req->length = length;
731 req->handle = r->resource.handle;
732 req->closure = handler->closure;
733 event_size0 = sizeof(*req);
734 } else {
735 struct fw_cdev_event_request2 *req = &e->req.request2;
736
737 req->type = FW_CDEV_EVENT_REQUEST2;
738 req->tcode = tcode;
739 req->offset = offset;
740 req->source_node_id = source;
741 req->destination_node_id = destination;
742 req->card = card->index;
743 req->generation = generation;
744 req->length = length;
745 req->handle = r->resource.handle;
746 req->closure = handler->closure;
747 event_size0 = sizeof(*req);
748 }
749
750 queue_event(handler->client, &e->event,
751 &e->req, event_size0, r->data, length);
752 return;
753
754 failed:
755 kfree(r);
756 kfree(e);
757 kfree(fcp_frame);
758
759 if (!is_fcp_request(request))
760 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
761
762 fw_card_put(card);
763}
764
765static void release_address_handler(struct client *client,
766 struct client_resource *resource)
767{
768 struct address_handler_resource *r =
769 container_of(resource, struct address_handler_resource, resource);
770
771 fw_core_remove_address_handler(&r->handler);
772 kfree(r);
773}
774
775static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
776{
777 struct fw_cdev_allocate *a = &arg->allocate;
778 struct address_handler_resource *r;
779 struct fw_address_region region;
780 int ret;
781
782 r = kmalloc(sizeof(*r), GFP_KERNEL);
783 if (r == NULL)
784 return -ENOMEM;
785
786 region.start = a->offset;
787 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
788 region.end = a->offset + a->length;
789 else
790 region.end = a->region_end;
791
792 r->handler.length = a->length;
793 r->handler.address_callback = handle_request;
794 r->handler.callback_data = r;
795 r->closure = a->closure;
796 r->client = client;
797
798 ret = fw_core_add_address_handler(&r->handler, ®ion);
799 if (ret < 0) {
800 kfree(r);
801 return ret;
802 }
803 a->offset = r->handler.offset;
804
805 r->resource.release = release_address_handler;
806 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
807 if (ret < 0) {
808 release_address_handler(client, &r->resource);
809 return ret;
810 }
811 a->handle = r->resource.handle;
812
813 return 0;
814}
815
816static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
817{
818 return release_client_resource(client, arg->deallocate.handle,
819 release_address_handler, NULL);
820}
821
822static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
823{
824 struct fw_cdev_send_response *a = &arg->send_response;
825 struct client_resource *resource;
826 struct inbound_transaction_resource *r;
827 int ret = 0;
828
829 if (release_client_resource(client, a->handle,
830 release_request, &resource) < 0)
831 return -EINVAL;
832
833 r = container_of(resource, struct inbound_transaction_resource,
834 resource);
835 if (is_fcp_request(r->request))
836 goto out;
837
838 if (a->length != fw_get_response_length(r->request)) {
839 ret = -EINVAL;
840 kfree(r->request);
841 goto out;
842 }
843 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
844 ret = -EFAULT;
845 kfree(r->request);
846 goto out;
847 }
848 fw_send_response(r->card, r->request, a->rcode);
849 out:
850 fw_card_put(r->card);
851 kfree(r);
852
853 return ret;
854}
855
856static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
857{
858 fw_schedule_bus_reset(client->device->card, true,
859 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
860 return 0;
861}
862
863static void release_descriptor(struct client *client,
864 struct client_resource *resource)
865{
866 struct descriptor_resource *r =
867 container_of(resource, struct descriptor_resource, resource);
868
869 fw_core_remove_descriptor(&r->descriptor);
870 kfree(r);
871}
872
873static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
874{
875 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
876 struct descriptor_resource *r;
877 int ret;
878
879 /* Access policy: Allow this ioctl only on local nodes' device files. */
880 if (!client->device->is_local)
881 return -ENOSYS;
882
883 if (a->length > 256)
884 return -EINVAL;
885
886 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
887 if (r == NULL)
888 return -ENOMEM;
889
890 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
891 ret = -EFAULT;
892 goto failed;
893 }
894
895 r->descriptor.length = a->length;
896 r->descriptor.immediate = a->immediate;
897 r->descriptor.key = a->key;
898 r->descriptor.data = r->data;
899
900 ret = fw_core_add_descriptor(&r->descriptor);
901 if (ret < 0)
902 goto failed;
903
904 r->resource.release = release_descriptor;
905 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
906 if (ret < 0) {
907 fw_core_remove_descriptor(&r->descriptor);
908 goto failed;
909 }
910 a->handle = r->resource.handle;
911
912 return 0;
913 failed:
914 kfree(r);
915
916 return ret;
917}
918
919static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
920{
921 return release_client_resource(client, arg->remove_descriptor.handle,
922 release_descriptor, NULL);
923}
924
925static void iso_callback(struct fw_iso_context *context, u32 cycle,
926 size_t header_length, void *header, void *data)
927{
928 struct client *client = data;
929 struct iso_interrupt_event *e;
930
931 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
932 if (e == NULL) {
933 fw_notice(context->card, "out of memory when allocating event\n");
934 return;
935 }
936 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
937 e->interrupt.closure = client->iso_closure;
938 e->interrupt.cycle = cycle;
939 e->interrupt.header_length = header_length;
940 memcpy(e->interrupt.header, header, header_length);
941 queue_event(client, &e->event, &e->interrupt,
942 sizeof(e->interrupt) + header_length, NULL, 0);
943}
944
945static void iso_mc_callback(struct fw_iso_context *context,
946 dma_addr_t completed, void *data)
947{
948 struct client *client = data;
949 struct iso_interrupt_mc_event *e;
950
951 e = kmalloc(sizeof(*e), GFP_ATOMIC);
952 if (e == NULL) {
953 fw_notice(context->card, "out of memory when allocating event\n");
954 return;
955 }
956 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
957 e->interrupt.closure = client->iso_closure;
958 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
959 completed);
960 queue_event(client, &e->event, &e->interrupt,
961 sizeof(e->interrupt), NULL, 0);
962}
963
964static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
965{
966 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
967 return DMA_TO_DEVICE;
968 else
969 return DMA_FROM_DEVICE;
970}
971
972static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
973{
974 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
975 struct fw_iso_context *context;
976 fw_iso_callback_t cb;
977 int ret;
978
979 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
980 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
981 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
982 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
983
984 switch (a->type) {
985 case FW_ISO_CONTEXT_TRANSMIT:
986 if (a->speed > SCODE_3200 || a->channel > 63)
987 return -EINVAL;
988
989 cb = iso_callback;
990 break;
991
992 case FW_ISO_CONTEXT_RECEIVE:
993 if (a->header_size < 4 || (a->header_size & 3) ||
994 a->channel > 63)
995 return -EINVAL;
996
997 cb = iso_callback;
998 break;
999
1000 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1001 cb = (fw_iso_callback_t)iso_mc_callback;
1002 break;
1003
1004 default:
1005 return -EINVAL;
1006 }
1007
1008 context = fw_iso_context_create(client->device->card, a->type,
1009 a->channel, a->speed, a->header_size, cb, client);
1010 if (IS_ERR(context))
1011 return PTR_ERR(context);
1012
1013 /* We only support one context at this time. */
1014 spin_lock_irq(&client->lock);
1015 if (client->iso_context != NULL) {
1016 spin_unlock_irq(&client->lock);
1017 fw_iso_context_destroy(context);
1018
1019 return -EBUSY;
1020 }
1021 if (!client->buffer_is_mapped) {
1022 ret = fw_iso_buffer_map_dma(&client->buffer,
1023 client->device->card,
1024 iso_dma_direction(context));
1025 if (ret < 0) {
1026 spin_unlock_irq(&client->lock);
1027 fw_iso_context_destroy(context);
1028
1029 return ret;
1030 }
1031 client->buffer_is_mapped = true;
1032 }
1033 client->iso_closure = a->closure;
1034 client->iso_context = context;
1035 spin_unlock_irq(&client->lock);
1036
1037 a->handle = 0;
1038
1039 return 0;
1040}
1041
1042static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1043{
1044 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1045 struct fw_iso_context *ctx = client->iso_context;
1046
1047 if (ctx == NULL || a->handle != 0)
1048 return -EINVAL;
1049
1050 return fw_iso_context_set_channels(ctx, &a->channels);
1051}
1052
1053/* Macros for decoding the iso packet control header. */
1054#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1055#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1056#define GET_SKIP(v) (((v) >> 17) & 0x01)
1057#define GET_TAG(v) (((v) >> 18) & 0x03)
1058#define GET_SY(v) (((v) >> 20) & 0x0f)
1059#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1060
1061static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1062{
1063 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1064 struct fw_cdev_iso_packet __user *p, *end, *next;
1065 struct fw_iso_context *ctx = client->iso_context;
1066 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1067 u32 control;
1068 int count;
1069 struct {
1070 struct fw_iso_packet packet;
1071 u8 header[256];
1072 } u;
1073
1074 if (ctx == NULL || a->handle != 0)
1075 return -EINVAL;
1076
1077 /*
1078 * If the user passes a non-NULL data pointer, has mmap()'ed
1079 * the iso buffer, and the pointer points inside the buffer,
1080 * we setup the payload pointers accordingly. Otherwise we
1081 * set them both to 0, which will still let packets with
1082 * payload_length == 0 through. In other words, if no packets
1083 * use the indirect payload, the iso buffer need not be mapped
1084 * and the a->data pointer is ignored.
1085 */
1086 payload = (unsigned long)a->data - client->vm_start;
1087 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1088 if (a->data == 0 || client->buffer.pages == NULL ||
1089 payload >= buffer_end) {
1090 payload = 0;
1091 buffer_end = 0;
1092 }
1093
1094 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1095 return -EINVAL;
1096
1097 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1098 if (!access_ok(VERIFY_READ, p, a->size))
1099 return -EFAULT;
1100
1101 end = (void __user *)p + a->size;
1102 count = 0;
1103 while (p < end) {
1104 if (get_user(control, &p->control))
1105 return -EFAULT;
1106 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1107 u.packet.interrupt = GET_INTERRUPT(control);
1108 u.packet.skip = GET_SKIP(control);
1109 u.packet.tag = GET_TAG(control);
1110 u.packet.sy = GET_SY(control);
1111 u.packet.header_length = GET_HEADER_LENGTH(control);
1112
1113 switch (ctx->type) {
1114 case FW_ISO_CONTEXT_TRANSMIT:
1115 if (u.packet.header_length & 3)
1116 return -EINVAL;
1117 transmit_header_bytes = u.packet.header_length;
1118 break;
1119
1120 case FW_ISO_CONTEXT_RECEIVE:
1121 if (u.packet.header_length == 0 ||
1122 u.packet.header_length % ctx->header_size != 0)
1123 return -EINVAL;
1124 break;
1125
1126 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1127 if (u.packet.payload_length == 0 ||
1128 u.packet.payload_length & 3)
1129 return -EINVAL;
1130 break;
1131 }
1132
1133 next = (struct fw_cdev_iso_packet __user *)
1134 &p->header[transmit_header_bytes / 4];
1135 if (next > end)
1136 return -EINVAL;
1137 if (__copy_from_user
1138 (u.packet.header, p->header, transmit_header_bytes))
1139 return -EFAULT;
1140 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1141 u.packet.header_length + u.packet.payload_length > 0)
1142 return -EINVAL;
1143 if (payload + u.packet.payload_length > buffer_end)
1144 return -EINVAL;
1145
1146 if (fw_iso_context_queue(ctx, &u.packet,
1147 &client->buffer, payload))
1148 break;
1149
1150 p = next;
1151 payload += u.packet.payload_length;
1152 count++;
1153 }
1154 fw_iso_context_queue_flush(ctx);
1155
1156 a->size -= uptr_to_u64(p) - a->packets;
1157 a->packets = uptr_to_u64(p);
1158 a->data = client->vm_start + payload;
1159
1160 return count;
1161}
1162
1163static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1164{
1165 struct fw_cdev_start_iso *a = &arg->start_iso;
1166
1167 BUILD_BUG_ON(
1168 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1169 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1170 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1171 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1172 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1173
1174 if (client->iso_context == NULL || a->handle != 0)
1175 return -EINVAL;
1176
1177 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1178 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1179 return -EINVAL;
1180
1181 return fw_iso_context_start(client->iso_context,
1182 a->cycle, a->sync, a->tags);
1183}
1184
1185static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1186{
1187 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1188
1189 if (client->iso_context == NULL || a->handle != 0)
1190 return -EINVAL;
1191
1192 return fw_iso_context_stop(client->iso_context);
1193}
1194
1195static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1196{
1197 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1198
1199 if (client->iso_context == NULL || a->handle != 0)
1200 return -EINVAL;
1201
1202 return fw_iso_context_flush_completions(client->iso_context);
1203}
1204
1205static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1206{
1207 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1208 struct fw_card *card = client->device->card;
1209 struct timespec ts = {0, 0};
1210 u32 cycle_time;
1211 int ret = 0;
1212
1213 local_irq_disable();
1214
1215 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1216
1217 switch (a->clk_id) {
1218 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1219 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1220 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1221 default:
1222 ret = -EINVAL;
1223 }
1224
1225 local_irq_enable();
1226
1227 a->tv_sec = ts.tv_sec;
1228 a->tv_nsec = ts.tv_nsec;
1229 a->cycle_timer = cycle_time;
1230
1231 return ret;
1232}
1233
1234static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1235{
1236 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1237 struct fw_cdev_get_cycle_timer2 ct2;
1238
1239 ct2.clk_id = CLOCK_REALTIME;
1240 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1241
1242 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1243 a->cycle_timer = ct2.cycle_timer;
1244
1245 return 0;
1246}
1247
1248static void iso_resource_work(struct work_struct *work)
1249{
1250 struct iso_resource_event *e;
1251 struct iso_resource *r =
1252 container_of(work, struct iso_resource, work.work);
1253 struct client *client = r->client;
1254 int generation, channel, bandwidth, todo;
1255 bool skip, free, success;
1256
1257 spin_lock_irq(&client->lock);
1258 generation = client->device->generation;
1259 todo = r->todo;
1260 /* Allow 1000ms grace period for other reallocations. */
1261 if (todo == ISO_RES_ALLOC &&
1262 time_before64(get_jiffies_64(),
1263 client->device->card->reset_jiffies + HZ)) {
1264 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1265 skip = true;
1266 } else {
1267 /* We could be called twice within the same generation. */
1268 skip = todo == ISO_RES_REALLOC &&
1269 r->generation == generation;
1270 }
1271 free = todo == ISO_RES_DEALLOC ||
1272 todo == ISO_RES_ALLOC_ONCE ||
1273 todo == ISO_RES_DEALLOC_ONCE;
1274 r->generation = generation;
1275 spin_unlock_irq(&client->lock);
1276
1277 if (skip)
1278 goto out;
1279
1280 bandwidth = r->bandwidth;
1281
1282 fw_iso_resource_manage(client->device->card, generation,
1283 r->channels, &channel, &bandwidth,
1284 todo == ISO_RES_ALLOC ||
1285 todo == ISO_RES_REALLOC ||
1286 todo == ISO_RES_ALLOC_ONCE);
1287 /*
1288 * Is this generation outdated already? As long as this resource sticks
1289 * in the idr, it will be scheduled again for a newer generation or at
1290 * shutdown.
1291 */
1292 if (channel == -EAGAIN &&
1293 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1294 goto out;
1295
1296 success = channel >= 0 || bandwidth > 0;
1297
1298 spin_lock_irq(&client->lock);
1299 /*
1300 * Transit from allocation to reallocation, except if the client
1301 * requested deallocation in the meantime.
1302 */
1303 if (r->todo == ISO_RES_ALLOC)
1304 r->todo = ISO_RES_REALLOC;
1305 /*
1306 * Allocation or reallocation failure? Pull this resource out of the
1307 * idr and prepare for deletion, unless the client is shutting down.
1308 */
1309 if (r->todo == ISO_RES_REALLOC && !success &&
1310 !client->in_shutdown &&
1311 idr_find(&client->resource_idr, r->resource.handle)) {
1312 idr_remove(&client->resource_idr, r->resource.handle);
1313 client_put(client);
1314 free = true;
1315 }
1316 spin_unlock_irq(&client->lock);
1317
1318 if (todo == ISO_RES_ALLOC && channel >= 0)
1319 r->channels = 1ULL << channel;
1320
1321 if (todo == ISO_RES_REALLOC && success)
1322 goto out;
1323
1324 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1325 e = r->e_alloc;
1326 r->e_alloc = NULL;
1327 } else {
1328 e = r->e_dealloc;
1329 r->e_dealloc = NULL;
1330 }
1331 e->iso_resource.handle = r->resource.handle;
1332 e->iso_resource.channel = channel;
1333 e->iso_resource.bandwidth = bandwidth;
1334
1335 queue_event(client, &e->event,
1336 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1337
1338 if (free) {
1339 cancel_delayed_work(&r->work);
1340 kfree(r->e_alloc);
1341 kfree(r->e_dealloc);
1342 kfree(r);
1343 }
1344 out:
1345 client_put(client);
1346}
1347
1348static void release_iso_resource(struct client *client,
1349 struct client_resource *resource)
1350{
1351 struct iso_resource *r =
1352 container_of(resource, struct iso_resource, resource);
1353
1354 spin_lock_irq(&client->lock);
1355 r->todo = ISO_RES_DEALLOC;
1356 schedule_iso_resource(r, 0);
1357 spin_unlock_irq(&client->lock);
1358}
1359
1360static int init_iso_resource(struct client *client,
1361 struct fw_cdev_allocate_iso_resource *request, int todo)
1362{
1363 struct iso_resource_event *e1, *e2;
1364 struct iso_resource *r;
1365 int ret;
1366
1367 if ((request->channels == 0 && request->bandwidth == 0) ||
1368 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1369 request->bandwidth < 0)
1370 return -EINVAL;
1371
1372 r = kmalloc(sizeof(*r), GFP_KERNEL);
1373 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1374 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1375 if (r == NULL || e1 == NULL || e2 == NULL) {
1376 ret = -ENOMEM;
1377 goto fail;
1378 }
1379
1380 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1381 r->client = client;
1382 r->todo = todo;
1383 r->generation = -1;
1384 r->channels = request->channels;
1385 r->bandwidth = request->bandwidth;
1386 r->e_alloc = e1;
1387 r->e_dealloc = e2;
1388
1389 e1->iso_resource.closure = request->closure;
1390 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1391 e2->iso_resource.closure = request->closure;
1392 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1393
1394 if (todo == ISO_RES_ALLOC) {
1395 r->resource.release = release_iso_resource;
1396 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1397 if (ret < 0)
1398 goto fail;
1399 } else {
1400 r->resource.release = NULL;
1401 r->resource.handle = -1;
1402 schedule_iso_resource(r, 0);
1403 }
1404 request->handle = r->resource.handle;
1405
1406 return 0;
1407 fail:
1408 kfree(r);
1409 kfree(e1);
1410 kfree(e2);
1411
1412 return ret;
1413}
1414
1415static int ioctl_allocate_iso_resource(struct client *client,
1416 union ioctl_arg *arg)
1417{
1418 return init_iso_resource(client,
1419 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1420}
1421
1422static int ioctl_deallocate_iso_resource(struct client *client,
1423 union ioctl_arg *arg)
1424{
1425 return release_client_resource(client,
1426 arg->deallocate.handle, release_iso_resource, NULL);
1427}
1428
1429static int ioctl_allocate_iso_resource_once(struct client *client,
1430 union ioctl_arg *arg)
1431{
1432 return init_iso_resource(client,
1433 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1434}
1435
1436static int ioctl_deallocate_iso_resource_once(struct client *client,
1437 union ioctl_arg *arg)
1438{
1439 return init_iso_resource(client,
1440 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1441}
1442
1443/*
1444 * Returns a speed code: Maximum speed to or from this device,
1445 * limited by the device's link speed, the local node's link speed,
1446 * and all PHY port speeds between the two links.
1447 */
1448static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1449{
1450 return client->device->max_speed;
1451}
1452
1453static int ioctl_send_broadcast_request(struct client *client,
1454 union ioctl_arg *arg)
1455{
1456 struct fw_cdev_send_request *a = &arg->send_request;
1457
1458 switch (a->tcode) {
1459 case TCODE_WRITE_QUADLET_REQUEST:
1460 case TCODE_WRITE_BLOCK_REQUEST:
1461 break;
1462 default:
1463 return -EINVAL;
1464 }
1465
1466 /* Security policy: Only allow accesses to Units Space. */
1467 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1468 return -EACCES;
1469
1470 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1471}
1472
1473static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1474{
1475 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1476 struct fw_cdev_send_request request;
1477 int dest;
1478
1479 if (a->speed > client->device->card->link_speed ||
1480 a->length > 1024 << a->speed)
1481 return -EIO;
1482
1483 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1484 return -EINVAL;
1485
1486 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1487 request.tcode = TCODE_STREAM_DATA;
1488 request.length = a->length;
1489 request.closure = a->closure;
1490 request.data = a->data;
1491 request.generation = a->generation;
1492
1493 return init_request(client, &request, dest, a->speed);
1494}
1495
1496static void outbound_phy_packet_callback(struct fw_packet *packet,
1497 struct fw_card *card, int status)
1498{
1499 struct outbound_phy_packet_event *e =
1500 container_of(packet, struct outbound_phy_packet_event, p);
1501
1502 switch (status) {
1503 /* expected: */
1504 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1505 /* should never happen with PHY packets: */
1506 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1507 case ACK_BUSY_X:
1508 case ACK_BUSY_A:
1509 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1510 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1511 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1512 /* stale generation; cancelled; on certain controllers: no ack */
1513 default: e->phy_packet.rcode = status; break;
1514 }
1515 e->phy_packet.data[0] = packet->timestamp;
1516
1517 queue_event(e->client, &e->event, &e->phy_packet,
1518 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1519 client_put(e->client);
1520}
1521
1522static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1523{
1524 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1525 struct fw_card *card = client->device->card;
1526 struct outbound_phy_packet_event *e;
1527
1528 /* Access policy: Allow this ioctl only on local nodes' device files. */
1529 if (!client->device->is_local)
1530 return -ENOSYS;
1531
1532 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1533 if (e == NULL)
1534 return -ENOMEM;
1535
1536 client_get(client);
1537 e->client = client;
1538 e->p.speed = SCODE_100;
1539 e->p.generation = a->generation;
1540 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1541 e->p.header[1] = a->data[0];
1542 e->p.header[2] = a->data[1];
1543 e->p.header_length = 12;
1544 e->p.callback = outbound_phy_packet_callback;
1545 e->phy_packet.closure = a->closure;
1546 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1547 if (is_ping_packet(a->data))
1548 e->phy_packet.length = 4;
1549
1550 card->driver->send_request(card, &e->p);
1551
1552 return 0;
1553}
1554
1555static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1556{
1557 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1558 struct fw_card *card = client->device->card;
1559
1560 /* Access policy: Allow this ioctl only on local nodes' device files. */
1561 if (!client->device->is_local)
1562 return -ENOSYS;
1563
1564 spin_lock_irq(&card->lock);
1565
1566 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1567 client->phy_receiver_closure = a->closure;
1568
1569 spin_unlock_irq(&card->lock);
1570
1571 return 0;
1572}
1573
1574void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1575{
1576 struct client *client;
1577 struct inbound_phy_packet_event *e;
1578 unsigned long flags;
1579
1580 spin_lock_irqsave(&card->lock, flags);
1581
1582 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1583 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1584 if (e == NULL) {
1585 fw_notice(card, "out of memory when allocating event\n");
1586 break;
1587 }
1588 e->phy_packet.closure = client->phy_receiver_closure;
1589 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1590 e->phy_packet.rcode = RCODE_COMPLETE;
1591 e->phy_packet.length = 8;
1592 e->phy_packet.data[0] = p->header[1];
1593 e->phy_packet.data[1] = p->header[2];
1594 queue_event(client, &e->event,
1595 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1596 }
1597
1598 spin_unlock_irqrestore(&card->lock, flags);
1599}
1600
1601static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1602 [0x00] = ioctl_get_info,
1603 [0x01] = ioctl_send_request,
1604 [0x02] = ioctl_allocate,
1605 [0x03] = ioctl_deallocate,
1606 [0x04] = ioctl_send_response,
1607 [0x05] = ioctl_initiate_bus_reset,
1608 [0x06] = ioctl_add_descriptor,
1609 [0x07] = ioctl_remove_descriptor,
1610 [0x08] = ioctl_create_iso_context,
1611 [0x09] = ioctl_queue_iso,
1612 [0x0a] = ioctl_start_iso,
1613 [0x0b] = ioctl_stop_iso,
1614 [0x0c] = ioctl_get_cycle_timer,
1615 [0x0d] = ioctl_allocate_iso_resource,
1616 [0x0e] = ioctl_deallocate_iso_resource,
1617 [0x0f] = ioctl_allocate_iso_resource_once,
1618 [0x10] = ioctl_deallocate_iso_resource_once,
1619 [0x11] = ioctl_get_speed,
1620 [0x12] = ioctl_send_broadcast_request,
1621 [0x13] = ioctl_send_stream_packet,
1622 [0x14] = ioctl_get_cycle_timer2,
1623 [0x15] = ioctl_send_phy_packet,
1624 [0x16] = ioctl_receive_phy_packets,
1625 [0x17] = ioctl_set_iso_channels,
1626 [0x18] = ioctl_flush_iso,
1627};
1628
1629static int dispatch_ioctl(struct client *client,
1630 unsigned int cmd, void __user *arg)
1631{
1632 union ioctl_arg buffer;
1633 int ret;
1634
1635 if (fw_device_is_shutdown(client->device))
1636 return -ENODEV;
1637
1638 if (_IOC_TYPE(cmd) != '#' ||
1639 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1640 _IOC_SIZE(cmd) > sizeof(buffer))
1641 return -ENOTTY;
1642
1643 if (_IOC_DIR(cmd) == _IOC_READ)
1644 memset(&buffer, 0, _IOC_SIZE(cmd));
1645
1646 if (_IOC_DIR(cmd) & _IOC_WRITE)
1647 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1648 return -EFAULT;
1649
1650 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1651 if (ret < 0)
1652 return ret;
1653
1654 if (_IOC_DIR(cmd) & _IOC_READ)
1655 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1656 return -EFAULT;
1657
1658 return ret;
1659}
1660
1661static long fw_device_op_ioctl(struct file *file,
1662 unsigned int cmd, unsigned long arg)
1663{
1664 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1665}
1666
1667#ifdef CONFIG_COMPAT
1668static long fw_device_op_compat_ioctl(struct file *file,
1669 unsigned int cmd, unsigned long arg)
1670{
1671 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1672}
1673#endif
1674
1675static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1676{
1677 struct client *client = file->private_data;
1678 unsigned long size;
1679 int page_count, ret;
1680
1681 if (fw_device_is_shutdown(client->device))
1682 return -ENODEV;
1683
1684 /* FIXME: We could support multiple buffers, but we don't. */
1685 if (client->buffer.pages != NULL)
1686 return -EBUSY;
1687
1688 if (!(vma->vm_flags & VM_SHARED))
1689 return -EINVAL;
1690
1691 if (vma->vm_start & ~PAGE_MASK)
1692 return -EINVAL;
1693
1694 client->vm_start = vma->vm_start;
1695 size = vma->vm_end - vma->vm_start;
1696 page_count = size >> PAGE_SHIFT;
1697 if (size & ~PAGE_MASK)
1698 return -EINVAL;
1699
1700 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1701 if (ret < 0)
1702 return ret;
1703
1704 spin_lock_irq(&client->lock);
1705 if (client->iso_context) {
1706 ret = fw_iso_buffer_map_dma(&client->buffer,
1707 client->device->card,
1708 iso_dma_direction(client->iso_context));
1709 client->buffer_is_mapped = (ret == 0);
1710 }
1711 spin_unlock_irq(&client->lock);
1712 if (ret < 0)
1713 goto fail;
1714
1715 ret = fw_iso_buffer_map_vma(&client->buffer, vma);
1716 if (ret < 0)
1717 goto fail;
1718
1719 return 0;
1720 fail:
1721 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1722 return ret;
1723}
1724
1725static int is_outbound_transaction_resource(int id, void *p, void *data)
1726{
1727 struct client_resource *resource = p;
1728
1729 return resource->release == release_transaction;
1730}
1731
1732static int has_outbound_transactions(struct client *client)
1733{
1734 int ret;
1735
1736 spin_lock_irq(&client->lock);
1737 ret = idr_for_each(&client->resource_idr,
1738 is_outbound_transaction_resource, NULL);
1739 spin_unlock_irq(&client->lock);
1740
1741 return ret;
1742}
1743
1744static int shutdown_resource(int id, void *p, void *data)
1745{
1746 struct client_resource *resource = p;
1747 struct client *client = data;
1748
1749 resource->release(client, resource);
1750 client_put(client);
1751
1752 return 0;
1753}
1754
1755static int fw_device_op_release(struct inode *inode, struct file *file)
1756{
1757 struct client *client = file->private_data;
1758 struct event *event, *next_event;
1759
1760 spin_lock_irq(&client->device->card->lock);
1761 list_del(&client->phy_receiver_link);
1762 spin_unlock_irq(&client->device->card->lock);
1763
1764 mutex_lock(&client->device->client_list_mutex);
1765 list_del(&client->link);
1766 mutex_unlock(&client->device->client_list_mutex);
1767
1768 if (client->iso_context)
1769 fw_iso_context_destroy(client->iso_context);
1770
1771 if (client->buffer.pages)
1772 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1773
1774 /* Freeze client->resource_idr and client->event_list */
1775 spin_lock_irq(&client->lock);
1776 client->in_shutdown = true;
1777 spin_unlock_irq(&client->lock);
1778
1779 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1780
1781 idr_for_each(&client->resource_idr, shutdown_resource, client);
1782 idr_remove_all(&client->resource_idr);
1783 idr_destroy(&client->resource_idr);
1784
1785 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1786 kfree(event);
1787
1788 client_put(client);
1789
1790 return 0;
1791}
1792
1793static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1794{
1795 struct client *client = file->private_data;
1796 unsigned int mask = 0;
1797
1798 poll_wait(file, &client->wait, pt);
1799
1800 if (fw_device_is_shutdown(client->device))
1801 mask |= POLLHUP | POLLERR;
1802 if (!list_empty(&client->event_list))
1803 mask |= POLLIN | POLLRDNORM;
1804
1805 return mask;
1806}
1807
1808const struct file_operations fw_device_ops = {
1809 .owner = THIS_MODULE,
1810 .llseek = no_llseek,
1811 .open = fw_device_op_open,
1812 .read = fw_device_op_read,
1813 .unlocked_ioctl = fw_device_op_ioctl,
1814 .mmap = fw_device_op_mmap,
1815 .release = fw_device_op_release,
1816 .poll = fw_device_op_poll,
1817#ifdef CONFIG_COMPAT
1818 .compat_ioctl = fw_device_op_compat_ioctl,
1819#endif
1820};
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/bug.h>
22#include <linux/compat.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/errno.h>
26#include <linux/firewire.h>
27#include <linux/firewire-cdev.h>
28#include <linux/idr.h>
29#include <linux/irqflags.h>
30#include <linux/jiffies.h>
31#include <linux/kernel.h>
32#include <linux/kref.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/poll.h>
37#include <linux/sched.h> /* required for linux/wait.h */
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/string.h>
41#include <linux/time.h>
42#include <linux/uaccess.h>
43#include <linux/vmalloc.h>
44#include <linux/wait.h>
45#include <linux/workqueue.h>
46
47#include <asm/system.h>
48
49#include "core.h"
50
51/*
52 * ABI version history is documented in linux/firewire-cdev.h.
53 */
54#define FW_CDEV_KERNEL_VERSION 4
55#define FW_CDEV_VERSION_EVENT_REQUEST2 4
56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
57
58struct client {
59 u32 version;
60 struct fw_device *device;
61
62 spinlock_t lock;
63 bool in_shutdown;
64 struct idr resource_idr;
65 struct list_head event_list;
66 wait_queue_head_t wait;
67 wait_queue_head_t tx_flush_wait;
68 u64 bus_reset_closure;
69
70 struct fw_iso_context *iso_context;
71 u64 iso_closure;
72 struct fw_iso_buffer buffer;
73 unsigned long vm_start;
74
75 struct list_head phy_receiver_link;
76 u64 phy_receiver_closure;
77
78 struct list_head link;
79 struct kref kref;
80};
81
82static inline void client_get(struct client *client)
83{
84 kref_get(&client->kref);
85}
86
87static void client_release(struct kref *kref)
88{
89 struct client *client = container_of(kref, struct client, kref);
90
91 fw_device_put(client->device);
92 kfree(client);
93}
94
95static void client_put(struct client *client)
96{
97 kref_put(&client->kref, client_release);
98}
99
100struct client_resource;
101typedef void (*client_resource_release_fn_t)(struct client *,
102 struct client_resource *);
103struct client_resource {
104 client_resource_release_fn_t release;
105 int handle;
106};
107
108struct address_handler_resource {
109 struct client_resource resource;
110 struct fw_address_handler handler;
111 __u64 closure;
112 struct client *client;
113};
114
115struct outbound_transaction_resource {
116 struct client_resource resource;
117 struct fw_transaction transaction;
118};
119
120struct inbound_transaction_resource {
121 struct client_resource resource;
122 struct fw_card *card;
123 struct fw_request *request;
124 void *data;
125 size_t length;
126};
127
128struct descriptor_resource {
129 struct client_resource resource;
130 struct fw_descriptor descriptor;
131 u32 data[0];
132};
133
134struct iso_resource {
135 struct client_resource resource;
136 struct client *client;
137 /* Schedule work and access todo only with client->lock held. */
138 struct delayed_work work;
139 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
140 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
141 int generation;
142 u64 channels;
143 s32 bandwidth;
144 struct iso_resource_event *e_alloc, *e_dealloc;
145};
146
147static void release_iso_resource(struct client *, struct client_resource *);
148
149static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
150{
151 client_get(r->client);
152 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
153 client_put(r->client);
154}
155
156static void schedule_if_iso_resource(struct client_resource *resource)
157{
158 if (resource->release == release_iso_resource)
159 schedule_iso_resource(container_of(resource,
160 struct iso_resource, resource), 0);
161}
162
163/*
164 * dequeue_event() just kfree()'s the event, so the event has to be
165 * the first field in a struct XYZ_event.
166 */
167struct event {
168 struct { void *data; size_t size; } v[2];
169 struct list_head link;
170};
171
172struct bus_reset_event {
173 struct event event;
174 struct fw_cdev_event_bus_reset reset;
175};
176
177struct outbound_transaction_event {
178 struct event event;
179 struct client *client;
180 struct outbound_transaction_resource r;
181 struct fw_cdev_event_response response;
182};
183
184struct inbound_transaction_event {
185 struct event event;
186 union {
187 struct fw_cdev_event_request request;
188 struct fw_cdev_event_request2 request2;
189 } req;
190};
191
192struct iso_interrupt_event {
193 struct event event;
194 struct fw_cdev_event_iso_interrupt interrupt;
195};
196
197struct iso_interrupt_mc_event {
198 struct event event;
199 struct fw_cdev_event_iso_interrupt_mc interrupt;
200};
201
202struct iso_resource_event {
203 struct event event;
204 struct fw_cdev_event_iso_resource iso_resource;
205};
206
207struct outbound_phy_packet_event {
208 struct event event;
209 struct client *client;
210 struct fw_packet p;
211 struct fw_cdev_event_phy_packet phy_packet;
212};
213
214struct inbound_phy_packet_event {
215 struct event event;
216 struct fw_cdev_event_phy_packet phy_packet;
217};
218
219#ifdef CONFIG_COMPAT
220static void __user *u64_to_uptr(u64 value)
221{
222 if (is_compat_task())
223 return compat_ptr(value);
224 else
225 return (void __user *)(unsigned long)value;
226}
227
228static u64 uptr_to_u64(void __user *ptr)
229{
230 if (is_compat_task())
231 return ptr_to_compat(ptr);
232 else
233 return (u64)(unsigned long)ptr;
234}
235#else
236static inline void __user *u64_to_uptr(u64 value)
237{
238 return (void __user *)(unsigned long)value;
239}
240
241static inline u64 uptr_to_u64(void __user *ptr)
242{
243 return (u64)(unsigned long)ptr;
244}
245#endif /* CONFIG_COMPAT */
246
247static int fw_device_op_open(struct inode *inode, struct file *file)
248{
249 struct fw_device *device;
250 struct client *client;
251
252 device = fw_device_get_by_devt(inode->i_rdev);
253 if (device == NULL)
254 return -ENODEV;
255
256 if (fw_device_is_shutdown(device)) {
257 fw_device_put(device);
258 return -ENODEV;
259 }
260
261 client = kzalloc(sizeof(*client), GFP_KERNEL);
262 if (client == NULL) {
263 fw_device_put(device);
264 return -ENOMEM;
265 }
266
267 client->device = device;
268 spin_lock_init(&client->lock);
269 idr_init(&client->resource_idr);
270 INIT_LIST_HEAD(&client->event_list);
271 init_waitqueue_head(&client->wait);
272 init_waitqueue_head(&client->tx_flush_wait);
273 INIT_LIST_HEAD(&client->phy_receiver_link);
274 INIT_LIST_HEAD(&client->link);
275 kref_init(&client->kref);
276
277 file->private_data = client;
278
279 return nonseekable_open(inode, file);
280}
281
282static void queue_event(struct client *client, struct event *event,
283 void *data0, size_t size0, void *data1, size_t size1)
284{
285 unsigned long flags;
286
287 event->v[0].data = data0;
288 event->v[0].size = size0;
289 event->v[1].data = data1;
290 event->v[1].size = size1;
291
292 spin_lock_irqsave(&client->lock, flags);
293 if (client->in_shutdown)
294 kfree(event);
295 else
296 list_add_tail(&event->link, &client->event_list);
297 spin_unlock_irqrestore(&client->lock, flags);
298
299 wake_up_interruptible(&client->wait);
300}
301
302static int dequeue_event(struct client *client,
303 char __user *buffer, size_t count)
304{
305 struct event *event;
306 size_t size, total;
307 int i, ret;
308
309 ret = wait_event_interruptible(client->wait,
310 !list_empty(&client->event_list) ||
311 fw_device_is_shutdown(client->device));
312 if (ret < 0)
313 return ret;
314
315 if (list_empty(&client->event_list) &&
316 fw_device_is_shutdown(client->device))
317 return -ENODEV;
318
319 spin_lock_irq(&client->lock);
320 event = list_first_entry(&client->event_list, struct event, link);
321 list_del(&event->link);
322 spin_unlock_irq(&client->lock);
323
324 total = 0;
325 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
326 size = min(event->v[i].size, count - total);
327 if (copy_to_user(buffer + total, event->v[i].data, size)) {
328 ret = -EFAULT;
329 goto out;
330 }
331 total += size;
332 }
333 ret = total;
334
335 out:
336 kfree(event);
337
338 return ret;
339}
340
341static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
342 size_t count, loff_t *offset)
343{
344 struct client *client = file->private_data;
345
346 return dequeue_event(client, buffer, count);
347}
348
349static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
350 struct client *client)
351{
352 struct fw_card *card = client->device->card;
353
354 spin_lock_irq(&card->lock);
355
356 event->closure = client->bus_reset_closure;
357 event->type = FW_CDEV_EVENT_BUS_RESET;
358 event->generation = client->device->generation;
359 event->node_id = client->device->node_id;
360 event->local_node_id = card->local_node->node_id;
361 event->bm_node_id = card->bm_node_id;
362 event->irm_node_id = card->irm_node->node_id;
363 event->root_node_id = card->root_node->node_id;
364
365 spin_unlock_irq(&card->lock);
366}
367
368static void for_each_client(struct fw_device *device,
369 void (*callback)(struct client *client))
370{
371 struct client *c;
372
373 mutex_lock(&device->client_list_mutex);
374 list_for_each_entry(c, &device->client_list, link)
375 callback(c);
376 mutex_unlock(&device->client_list_mutex);
377}
378
379static int schedule_reallocations(int id, void *p, void *data)
380{
381 schedule_if_iso_resource(p);
382
383 return 0;
384}
385
386static void queue_bus_reset_event(struct client *client)
387{
388 struct bus_reset_event *e;
389
390 e = kzalloc(sizeof(*e), GFP_KERNEL);
391 if (e == NULL) {
392 fw_notify("Out of memory when allocating event\n");
393 return;
394 }
395
396 fill_bus_reset_event(&e->reset, client);
397
398 queue_event(client, &e->event,
399 &e->reset, sizeof(e->reset), NULL, 0);
400
401 spin_lock_irq(&client->lock);
402 idr_for_each(&client->resource_idr, schedule_reallocations, client);
403 spin_unlock_irq(&client->lock);
404}
405
406void fw_device_cdev_update(struct fw_device *device)
407{
408 for_each_client(device, queue_bus_reset_event);
409}
410
411static void wake_up_client(struct client *client)
412{
413 wake_up_interruptible(&client->wait);
414}
415
416void fw_device_cdev_remove(struct fw_device *device)
417{
418 for_each_client(device, wake_up_client);
419}
420
421union ioctl_arg {
422 struct fw_cdev_get_info get_info;
423 struct fw_cdev_send_request send_request;
424 struct fw_cdev_allocate allocate;
425 struct fw_cdev_deallocate deallocate;
426 struct fw_cdev_send_response send_response;
427 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
428 struct fw_cdev_add_descriptor add_descriptor;
429 struct fw_cdev_remove_descriptor remove_descriptor;
430 struct fw_cdev_create_iso_context create_iso_context;
431 struct fw_cdev_queue_iso queue_iso;
432 struct fw_cdev_start_iso start_iso;
433 struct fw_cdev_stop_iso stop_iso;
434 struct fw_cdev_get_cycle_timer get_cycle_timer;
435 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
436 struct fw_cdev_send_stream_packet send_stream_packet;
437 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
438 struct fw_cdev_send_phy_packet send_phy_packet;
439 struct fw_cdev_receive_phy_packets receive_phy_packets;
440 struct fw_cdev_set_iso_channels set_iso_channels;
441};
442
443static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
444{
445 struct fw_cdev_get_info *a = &arg->get_info;
446 struct fw_cdev_event_bus_reset bus_reset;
447 unsigned long ret = 0;
448
449 client->version = a->version;
450 a->version = FW_CDEV_KERNEL_VERSION;
451 a->card = client->device->card->index;
452
453 down_read(&fw_device_rwsem);
454
455 if (a->rom != 0) {
456 size_t want = a->rom_length;
457 size_t have = client->device->config_rom_length * 4;
458
459 ret = copy_to_user(u64_to_uptr(a->rom),
460 client->device->config_rom, min(want, have));
461 }
462 a->rom_length = client->device->config_rom_length * 4;
463
464 up_read(&fw_device_rwsem);
465
466 if (ret != 0)
467 return -EFAULT;
468
469 mutex_lock(&client->device->client_list_mutex);
470
471 client->bus_reset_closure = a->bus_reset_closure;
472 if (a->bus_reset != 0) {
473 fill_bus_reset_event(&bus_reset, client);
474 ret = copy_to_user(u64_to_uptr(a->bus_reset),
475 &bus_reset, sizeof(bus_reset));
476 }
477 if (ret == 0 && list_empty(&client->link))
478 list_add_tail(&client->link, &client->device->client_list);
479
480 mutex_unlock(&client->device->client_list_mutex);
481
482 return ret ? -EFAULT : 0;
483}
484
485static int add_client_resource(struct client *client,
486 struct client_resource *resource, gfp_t gfp_mask)
487{
488 unsigned long flags;
489 int ret;
490
491 retry:
492 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
493 return -ENOMEM;
494
495 spin_lock_irqsave(&client->lock, flags);
496 if (client->in_shutdown)
497 ret = -ECANCELED;
498 else
499 ret = idr_get_new(&client->resource_idr, resource,
500 &resource->handle);
501 if (ret >= 0) {
502 client_get(client);
503 schedule_if_iso_resource(resource);
504 }
505 spin_unlock_irqrestore(&client->lock, flags);
506
507 if (ret == -EAGAIN)
508 goto retry;
509
510 return ret < 0 ? ret : 0;
511}
512
513static int release_client_resource(struct client *client, u32 handle,
514 client_resource_release_fn_t release,
515 struct client_resource **return_resource)
516{
517 struct client_resource *resource;
518
519 spin_lock_irq(&client->lock);
520 if (client->in_shutdown)
521 resource = NULL;
522 else
523 resource = idr_find(&client->resource_idr, handle);
524 if (resource && resource->release == release)
525 idr_remove(&client->resource_idr, handle);
526 spin_unlock_irq(&client->lock);
527
528 if (!(resource && resource->release == release))
529 return -EINVAL;
530
531 if (return_resource)
532 *return_resource = resource;
533 else
534 resource->release(client, resource);
535
536 client_put(client);
537
538 return 0;
539}
540
541static void release_transaction(struct client *client,
542 struct client_resource *resource)
543{
544}
545
546static void complete_transaction(struct fw_card *card, int rcode,
547 void *payload, size_t length, void *data)
548{
549 struct outbound_transaction_event *e = data;
550 struct fw_cdev_event_response *rsp = &e->response;
551 struct client *client = e->client;
552 unsigned long flags;
553
554 if (length < rsp->length)
555 rsp->length = length;
556 if (rcode == RCODE_COMPLETE)
557 memcpy(rsp->data, payload, rsp->length);
558
559 spin_lock_irqsave(&client->lock, flags);
560 idr_remove(&client->resource_idr, e->r.resource.handle);
561 if (client->in_shutdown)
562 wake_up(&client->tx_flush_wait);
563 spin_unlock_irqrestore(&client->lock, flags);
564
565 rsp->type = FW_CDEV_EVENT_RESPONSE;
566 rsp->rcode = rcode;
567
568 /*
569 * In the case that sizeof(*rsp) doesn't align with the position of the
570 * data, and the read is short, preserve an extra copy of the data
571 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
572 * for short reads and some apps depended on it, this is both safe
573 * and prudent for compatibility.
574 */
575 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
576 queue_event(client, &e->event, rsp, sizeof(*rsp),
577 rsp->data, rsp->length);
578 else
579 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
580 NULL, 0);
581
582 /* Drop the idr's reference */
583 client_put(client);
584}
585
586static int init_request(struct client *client,
587 struct fw_cdev_send_request *request,
588 int destination_id, int speed)
589{
590 struct outbound_transaction_event *e;
591 int ret;
592
593 if (request->tcode != TCODE_STREAM_DATA &&
594 (request->length > 4096 || request->length > 512 << speed))
595 return -EIO;
596
597 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
598 request->length < 4)
599 return -EINVAL;
600
601 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
602 if (e == NULL)
603 return -ENOMEM;
604
605 e->client = client;
606 e->response.length = request->length;
607 e->response.closure = request->closure;
608
609 if (request->data &&
610 copy_from_user(e->response.data,
611 u64_to_uptr(request->data), request->length)) {
612 ret = -EFAULT;
613 goto failed;
614 }
615
616 e->r.resource.release = release_transaction;
617 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
618 if (ret < 0)
619 goto failed;
620
621 fw_send_request(client->device->card, &e->r.transaction,
622 request->tcode, destination_id, request->generation,
623 speed, request->offset, e->response.data,
624 request->length, complete_transaction, e);
625 return 0;
626
627 failed:
628 kfree(e);
629
630 return ret;
631}
632
633static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
634{
635 switch (arg->send_request.tcode) {
636 case TCODE_WRITE_QUADLET_REQUEST:
637 case TCODE_WRITE_BLOCK_REQUEST:
638 case TCODE_READ_QUADLET_REQUEST:
639 case TCODE_READ_BLOCK_REQUEST:
640 case TCODE_LOCK_MASK_SWAP:
641 case TCODE_LOCK_COMPARE_SWAP:
642 case TCODE_LOCK_FETCH_ADD:
643 case TCODE_LOCK_LITTLE_ADD:
644 case TCODE_LOCK_BOUNDED_ADD:
645 case TCODE_LOCK_WRAP_ADD:
646 case TCODE_LOCK_VENDOR_DEPENDENT:
647 break;
648 default:
649 return -EINVAL;
650 }
651
652 return init_request(client, &arg->send_request, client->device->node_id,
653 client->device->max_speed);
654}
655
656static inline bool is_fcp_request(struct fw_request *request)
657{
658 return request == NULL;
659}
660
661static void release_request(struct client *client,
662 struct client_resource *resource)
663{
664 struct inbound_transaction_resource *r = container_of(resource,
665 struct inbound_transaction_resource, resource);
666
667 if (is_fcp_request(r->request))
668 kfree(r->data);
669 else
670 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
671
672 fw_card_put(r->card);
673 kfree(r);
674}
675
676static void handle_request(struct fw_card *card, struct fw_request *request,
677 int tcode, int destination, int source,
678 int generation, unsigned long long offset,
679 void *payload, size_t length, void *callback_data)
680{
681 struct address_handler_resource *handler = callback_data;
682 struct inbound_transaction_resource *r;
683 struct inbound_transaction_event *e;
684 size_t event_size0;
685 void *fcp_frame = NULL;
686 int ret;
687
688 /* card may be different from handler->client->device->card */
689 fw_card_get(card);
690
691 r = kmalloc(sizeof(*r), GFP_ATOMIC);
692 e = kmalloc(sizeof(*e), GFP_ATOMIC);
693 if (r == NULL || e == NULL) {
694 fw_notify("Out of memory when allocating event\n");
695 goto failed;
696 }
697 r->card = card;
698 r->request = request;
699 r->data = payload;
700 r->length = length;
701
702 if (is_fcp_request(request)) {
703 /*
704 * FIXME: Let core-transaction.c manage a
705 * single reference-counted copy?
706 */
707 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
708 if (fcp_frame == NULL)
709 goto failed;
710
711 r->data = fcp_frame;
712 }
713
714 r->resource.release = release_request;
715 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
716 if (ret < 0)
717 goto failed;
718
719 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
720 struct fw_cdev_event_request *req = &e->req.request;
721
722 if (tcode & 0x10)
723 tcode = TCODE_LOCK_REQUEST;
724
725 req->type = FW_CDEV_EVENT_REQUEST;
726 req->tcode = tcode;
727 req->offset = offset;
728 req->length = length;
729 req->handle = r->resource.handle;
730 req->closure = handler->closure;
731 event_size0 = sizeof(*req);
732 } else {
733 struct fw_cdev_event_request2 *req = &e->req.request2;
734
735 req->type = FW_CDEV_EVENT_REQUEST2;
736 req->tcode = tcode;
737 req->offset = offset;
738 req->source_node_id = source;
739 req->destination_node_id = destination;
740 req->card = card->index;
741 req->generation = generation;
742 req->length = length;
743 req->handle = r->resource.handle;
744 req->closure = handler->closure;
745 event_size0 = sizeof(*req);
746 }
747
748 queue_event(handler->client, &e->event,
749 &e->req, event_size0, r->data, length);
750 return;
751
752 failed:
753 kfree(r);
754 kfree(e);
755 kfree(fcp_frame);
756
757 if (!is_fcp_request(request))
758 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
759
760 fw_card_put(card);
761}
762
763static void release_address_handler(struct client *client,
764 struct client_resource *resource)
765{
766 struct address_handler_resource *r =
767 container_of(resource, struct address_handler_resource, resource);
768
769 fw_core_remove_address_handler(&r->handler);
770 kfree(r);
771}
772
773static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
774{
775 struct fw_cdev_allocate *a = &arg->allocate;
776 struct address_handler_resource *r;
777 struct fw_address_region region;
778 int ret;
779
780 r = kmalloc(sizeof(*r), GFP_KERNEL);
781 if (r == NULL)
782 return -ENOMEM;
783
784 region.start = a->offset;
785 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
786 region.end = a->offset + a->length;
787 else
788 region.end = a->region_end;
789
790 r->handler.length = a->length;
791 r->handler.address_callback = handle_request;
792 r->handler.callback_data = r;
793 r->closure = a->closure;
794 r->client = client;
795
796 ret = fw_core_add_address_handler(&r->handler, ®ion);
797 if (ret < 0) {
798 kfree(r);
799 return ret;
800 }
801 a->offset = r->handler.offset;
802
803 r->resource.release = release_address_handler;
804 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
805 if (ret < 0) {
806 release_address_handler(client, &r->resource);
807 return ret;
808 }
809 a->handle = r->resource.handle;
810
811 return 0;
812}
813
814static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
815{
816 return release_client_resource(client, arg->deallocate.handle,
817 release_address_handler, NULL);
818}
819
820static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
821{
822 struct fw_cdev_send_response *a = &arg->send_response;
823 struct client_resource *resource;
824 struct inbound_transaction_resource *r;
825 int ret = 0;
826
827 if (release_client_resource(client, a->handle,
828 release_request, &resource) < 0)
829 return -EINVAL;
830
831 r = container_of(resource, struct inbound_transaction_resource,
832 resource);
833 if (is_fcp_request(r->request))
834 goto out;
835
836 if (a->length != fw_get_response_length(r->request)) {
837 ret = -EINVAL;
838 kfree(r->request);
839 goto out;
840 }
841 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
842 ret = -EFAULT;
843 kfree(r->request);
844 goto out;
845 }
846 fw_send_response(r->card, r->request, a->rcode);
847 out:
848 fw_card_put(r->card);
849 kfree(r);
850
851 return ret;
852}
853
854static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
855{
856 fw_schedule_bus_reset(client->device->card, true,
857 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
858 return 0;
859}
860
861static void release_descriptor(struct client *client,
862 struct client_resource *resource)
863{
864 struct descriptor_resource *r =
865 container_of(resource, struct descriptor_resource, resource);
866
867 fw_core_remove_descriptor(&r->descriptor);
868 kfree(r);
869}
870
871static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
872{
873 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
874 struct descriptor_resource *r;
875 int ret;
876
877 /* Access policy: Allow this ioctl only on local nodes' device files. */
878 if (!client->device->is_local)
879 return -ENOSYS;
880
881 if (a->length > 256)
882 return -EINVAL;
883
884 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
885 if (r == NULL)
886 return -ENOMEM;
887
888 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
889 ret = -EFAULT;
890 goto failed;
891 }
892
893 r->descriptor.length = a->length;
894 r->descriptor.immediate = a->immediate;
895 r->descriptor.key = a->key;
896 r->descriptor.data = r->data;
897
898 ret = fw_core_add_descriptor(&r->descriptor);
899 if (ret < 0)
900 goto failed;
901
902 r->resource.release = release_descriptor;
903 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
904 if (ret < 0) {
905 fw_core_remove_descriptor(&r->descriptor);
906 goto failed;
907 }
908 a->handle = r->resource.handle;
909
910 return 0;
911 failed:
912 kfree(r);
913
914 return ret;
915}
916
917static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
918{
919 return release_client_resource(client, arg->remove_descriptor.handle,
920 release_descriptor, NULL);
921}
922
923static void iso_callback(struct fw_iso_context *context, u32 cycle,
924 size_t header_length, void *header, void *data)
925{
926 struct client *client = data;
927 struct iso_interrupt_event *e;
928
929 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
930 if (e == NULL) {
931 fw_notify("Out of memory when allocating event\n");
932 return;
933 }
934 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
935 e->interrupt.closure = client->iso_closure;
936 e->interrupt.cycle = cycle;
937 e->interrupt.header_length = header_length;
938 memcpy(e->interrupt.header, header, header_length);
939 queue_event(client, &e->event, &e->interrupt,
940 sizeof(e->interrupt) + header_length, NULL, 0);
941}
942
943static void iso_mc_callback(struct fw_iso_context *context,
944 dma_addr_t completed, void *data)
945{
946 struct client *client = data;
947 struct iso_interrupt_mc_event *e;
948
949 e = kmalloc(sizeof(*e), GFP_ATOMIC);
950 if (e == NULL) {
951 fw_notify("Out of memory when allocating event\n");
952 return;
953 }
954 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
955 e->interrupt.closure = client->iso_closure;
956 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
957 completed);
958 queue_event(client, &e->event, &e->interrupt,
959 sizeof(e->interrupt), NULL, 0);
960}
961
962static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
963{
964 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
965 struct fw_iso_context *context;
966 fw_iso_callback_t cb;
967
968 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
969 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
970 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
971 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
972
973 switch (a->type) {
974 case FW_ISO_CONTEXT_TRANSMIT:
975 if (a->speed > SCODE_3200 || a->channel > 63)
976 return -EINVAL;
977
978 cb = iso_callback;
979 break;
980
981 case FW_ISO_CONTEXT_RECEIVE:
982 if (a->header_size < 4 || (a->header_size & 3) ||
983 a->channel > 63)
984 return -EINVAL;
985
986 cb = iso_callback;
987 break;
988
989 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
990 cb = (fw_iso_callback_t)iso_mc_callback;
991 break;
992
993 default:
994 return -EINVAL;
995 }
996
997 context = fw_iso_context_create(client->device->card, a->type,
998 a->channel, a->speed, a->header_size, cb, client);
999 if (IS_ERR(context))
1000 return PTR_ERR(context);
1001
1002 /* We only support one context at this time. */
1003 spin_lock_irq(&client->lock);
1004 if (client->iso_context != NULL) {
1005 spin_unlock_irq(&client->lock);
1006 fw_iso_context_destroy(context);
1007 return -EBUSY;
1008 }
1009 client->iso_closure = a->closure;
1010 client->iso_context = context;
1011 spin_unlock_irq(&client->lock);
1012
1013 a->handle = 0;
1014
1015 return 0;
1016}
1017
1018static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1019{
1020 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1021 struct fw_iso_context *ctx = client->iso_context;
1022
1023 if (ctx == NULL || a->handle != 0)
1024 return -EINVAL;
1025
1026 return fw_iso_context_set_channels(ctx, &a->channels);
1027}
1028
1029/* Macros for decoding the iso packet control header. */
1030#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1031#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1032#define GET_SKIP(v) (((v) >> 17) & 0x01)
1033#define GET_TAG(v) (((v) >> 18) & 0x03)
1034#define GET_SY(v) (((v) >> 20) & 0x0f)
1035#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1036
1037static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1038{
1039 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1040 struct fw_cdev_iso_packet __user *p, *end, *next;
1041 struct fw_iso_context *ctx = client->iso_context;
1042 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1043 u32 control;
1044 int count;
1045 struct {
1046 struct fw_iso_packet packet;
1047 u8 header[256];
1048 } u;
1049
1050 if (ctx == NULL || a->handle != 0)
1051 return -EINVAL;
1052
1053 /*
1054 * If the user passes a non-NULL data pointer, has mmap()'ed
1055 * the iso buffer, and the pointer points inside the buffer,
1056 * we setup the payload pointers accordingly. Otherwise we
1057 * set them both to 0, which will still let packets with
1058 * payload_length == 0 through. In other words, if no packets
1059 * use the indirect payload, the iso buffer need not be mapped
1060 * and the a->data pointer is ignored.
1061 */
1062 payload = (unsigned long)a->data - client->vm_start;
1063 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1064 if (a->data == 0 || client->buffer.pages == NULL ||
1065 payload >= buffer_end) {
1066 payload = 0;
1067 buffer_end = 0;
1068 }
1069
1070 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1071 return -EINVAL;
1072
1073 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1074 if (!access_ok(VERIFY_READ, p, a->size))
1075 return -EFAULT;
1076
1077 end = (void __user *)p + a->size;
1078 count = 0;
1079 while (p < end) {
1080 if (get_user(control, &p->control))
1081 return -EFAULT;
1082 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1083 u.packet.interrupt = GET_INTERRUPT(control);
1084 u.packet.skip = GET_SKIP(control);
1085 u.packet.tag = GET_TAG(control);
1086 u.packet.sy = GET_SY(control);
1087 u.packet.header_length = GET_HEADER_LENGTH(control);
1088
1089 switch (ctx->type) {
1090 case FW_ISO_CONTEXT_TRANSMIT:
1091 if (u.packet.header_length & 3)
1092 return -EINVAL;
1093 transmit_header_bytes = u.packet.header_length;
1094 break;
1095
1096 case FW_ISO_CONTEXT_RECEIVE:
1097 if (u.packet.header_length == 0 ||
1098 u.packet.header_length % ctx->header_size != 0)
1099 return -EINVAL;
1100 break;
1101
1102 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1103 if (u.packet.payload_length == 0 ||
1104 u.packet.payload_length & 3)
1105 return -EINVAL;
1106 break;
1107 }
1108
1109 next = (struct fw_cdev_iso_packet __user *)
1110 &p->header[transmit_header_bytes / 4];
1111 if (next > end)
1112 return -EINVAL;
1113 if (__copy_from_user
1114 (u.packet.header, p->header, transmit_header_bytes))
1115 return -EFAULT;
1116 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1117 u.packet.header_length + u.packet.payload_length > 0)
1118 return -EINVAL;
1119 if (payload + u.packet.payload_length > buffer_end)
1120 return -EINVAL;
1121
1122 if (fw_iso_context_queue(ctx, &u.packet,
1123 &client->buffer, payload))
1124 break;
1125
1126 p = next;
1127 payload += u.packet.payload_length;
1128 count++;
1129 }
1130 fw_iso_context_queue_flush(ctx);
1131
1132 a->size -= uptr_to_u64(p) - a->packets;
1133 a->packets = uptr_to_u64(p);
1134 a->data = client->vm_start + payload;
1135
1136 return count;
1137}
1138
1139static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1140{
1141 struct fw_cdev_start_iso *a = &arg->start_iso;
1142
1143 BUILD_BUG_ON(
1144 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1145 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1146 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1147 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1148 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1149
1150 if (client->iso_context == NULL || a->handle != 0)
1151 return -EINVAL;
1152
1153 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1154 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1155 return -EINVAL;
1156
1157 return fw_iso_context_start(client->iso_context,
1158 a->cycle, a->sync, a->tags);
1159}
1160
1161static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1162{
1163 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1164
1165 if (client->iso_context == NULL || a->handle != 0)
1166 return -EINVAL;
1167
1168 return fw_iso_context_stop(client->iso_context);
1169}
1170
1171static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1172{
1173 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1174 struct fw_card *card = client->device->card;
1175 struct timespec ts = {0, 0};
1176 u32 cycle_time;
1177 int ret = 0;
1178
1179 local_irq_disable();
1180
1181 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1182
1183 switch (a->clk_id) {
1184 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1185 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1186 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1187 default:
1188 ret = -EINVAL;
1189 }
1190
1191 local_irq_enable();
1192
1193 a->tv_sec = ts.tv_sec;
1194 a->tv_nsec = ts.tv_nsec;
1195 a->cycle_timer = cycle_time;
1196
1197 return ret;
1198}
1199
1200static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1201{
1202 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1203 struct fw_cdev_get_cycle_timer2 ct2;
1204
1205 ct2.clk_id = CLOCK_REALTIME;
1206 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1207
1208 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1209 a->cycle_timer = ct2.cycle_timer;
1210
1211 return 0;
1212}
1213
1214static void iso_resource_work(struct work_struct *work)
1215{
1216 struct iso_resource_event *e;
1217 struct iso_resource *r =
1218 container_of(work, struct iso_resource, work.work);
1219 struct client *client = r->client;
1220 int generation, channel, bandwidth, todo;
1221 bool skip, free, success;
1222
1223 spin_lock_irq(&client->lock);
1224 generation = client->device->generation;
1225 todo = r->todo;
1226 /* Allow 1000ms grace period for other reallocations. */
1227 if (todo == ISO_RES_ALLOC &&
1228 time_before64(get_jiffies_64(),
1229 client->device->card->reset_jiffies + HZ)) {
1230 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1231 skip = true;
1232 } else {
1233 /* We could be called twice within the same generation. */
1234 skip = todo == ISO_RES_REALLOC &&
1235 r->generation == generation;
1236 }
1237 free = todo == ISO_RES_DEALLOC ||
1238 todo == ISO_RES_ALLOC_ONCE ||
1239 todo == ISO_RES_DEALLOC_ONCE;
1240 r->generation = generation;
1241 spin_unlock_irq(&client->lock);
1242
1243 if (skip)
1244 goto out;
1245
1246 bandwidth = r->bandwidth;
1247
1248 fw_iso_resource_manage(client->device->card, generation,
1249 r->channels, &channel, &bandwidth,
1250 todo == ISO_RES_ALLOC ||
1251 todo == ISO_RES_REALLOC ||
1252 todo == ISO_RES_ALLOC_ONCE);
1253 /*
1254 * Is this generation outdated already? As long as this resource sticks
1255 * in the idr, it will be scheduled again for a newer generation or at
1256 * shutdown.
1257 */
1258 if (channel == -EAGAIN &&
1259 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1260 goto out;
1261
1262 success = channel >= 0 || bandwidth > 0;
1263
1264 spin_lock_irq(&client->lock);
1265 /*
1266 * Transit from allocation to reallocation, except if the client
1267 * requested deallocation in the meantime.
1268 */
1269 if (r->todo == ISO_RES_ALLOC)
1270 r->todo = ISO_RES_REALLOC;
1271 /*
1272 * Allocation or reallocation failure? Pull this resource out of the
1273 * idr and prepare for deletion, unless the client is shutting down.
1274 */
1275 if (r->todo == ISO_RES_REALLOC && !success &&
1276 !client->in_shutdown &&
1277 idr_find(&client->resource_idr, r->resource.handle)) {
1278 idr_remove(&client->resource_idr, r->resource.handle);
1279 client_put(client);
1280 free = true;
1281 }
1282 spin_unlock_irq(&client->lock);
1283
1284 if (todo == ISO_RES_ALLOC && channel >= 0)
1285 r->channels = 1ULL << channel;
1286
1287 if (todo == ISO_RES_REALLOC && success)
1288 goto out;
1289
1290 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1291 e = r->e_alloc;
1292 r->e_alloc = NULL;
1293 } else {
1294 e = r->e_dealloc;
1295 r->e_dealloc = NULL;
1296 }
1297 e->iso_resource.handle = r->resource.handle;
1298 e->iso_resource.channel = channel;
1299 e->iso_resource.bandwidth = bandwidth;
1300
1301 queue_event(client, &e->event,
1302 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1303
1304 if (free) {
1305 cancel_delayed_work(&r->work);
1306 kfree(r->e_alloc);
1307 kfree(r->e_dealloc);
1308 kfree(r);
1309 }
1310 out:
1311 client_put(client);
1312}
1313
1314static void release_iso_resource(struct client *client,
1315 struct client_resource *resource)
1316{
1317 struct iso_resource *r =
1318 container_of(resource, struct iso_resource, resource);
1319
1320 spin_lock_irq(&client->lock);
1321 r->todo = ISO_RES_DEALLOC;
1322 schedule_iso_resource(r, 0);
1323 spin_unlock_irq(&client->lock);
1324}
1325
1326static int init_iso_resource(struct client *client,
1327 struct fw_cdev_allocate_iso_resource *request, int todo)
1328{
1329 struct iso_resource_event *e1, *e2;
1330 struct iso_resource *r;
1331 int ret;
1332
1333 if ((request->channels == 0 && request->bandwidth == 0) ||
1334 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1335 request->bandwidth < 0)
1336 return -EINVAL;
1337
1338 r = kmalloc(sizeof(*r), GFP_KERNEL);
1339 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1340 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1341 if (r == NULL || e1 == NULL || e2 == NULL) {
1342 ret = -ENOMEM;
1343 goto fail;
1344 }
1345
1346 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1347 r->client = client;
1348 r->todo = todo;
1349 r->generation = -1;
1350 r->channels = request->channels;
1351 r->bandwidth = request->bandwidth;
1352 r->e_alloc = e1;
1353 r->e_dealloc = e2;
1354
1355 e1->iso_resource.closure = request->closure;
1356 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1357 e2->iso_resource.closure = request->closure;
1358 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1359
1360 if (todo == ISO_RES_ALLOC) {
1361 r->resource.release = release_iso_resource;
1362 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1363 if (ret < 0)
1364 goto fail;
1365 } else {
1366 r->resource.release = NULL;
1367 r->resource.handle = -1;
1368 schedule_iso_resource(r, 0);
1369 }
1370 request->handle = r->resource.handle;
1371
1372 return 0;
1373 fail:
1374 kfree(r);
1375 kfree(e1);
1376 kfree(e2);
1377
1378 return ret;
1379}
1380
1381static int ioctl_allocate_iso_resource(struct client *client,
1382 union ioctl_arg *arg)
1383{
1384 return init_iso_resource(client,
1385 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1386}
1387
1388static int ioctl_deallocate_iso_resource(struct client *client,
1389 union ioctl_arg *arg)
1390{
1391 return release_client_resource(client,
1392 arg->deallocate.handle, release_iso_resource, NULL);
1393}
1394
1395static int ioctl_allocate_iso_resource_once(struct client *client,
1396 union ioctl_arg *arg)
1397{
1398 return init_iso_resource(client,
1399 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1400}
1401
1402static int ioctl_deallocate_iso_resource_once(struct client *client,
1403 union ioctl_arg *arg)
1404{
1405 return init_iso_resource(client,
1406 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1407}
1408
1409/*
1410 * Returns a speed code: Maximum speed to or from this device,
1411 * limited by the device's link speed, the local node's link speed,
1412 * and all PHY port speeds between the two links.
1413 */
1414static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1415{
1416 return client->device->max_speed;
1417}
1418
1419static int ioctl_send_broadcast_request(struct client *client,
1420 union ioctl_arg *arg)
1421{
1422 struct fw_cdev_send_request *a = &arg->send_request;
1423
1424 switch (a->tcode) {
1425 case TCODE_WRITE_QUADLET_REQUEST:
1426 case TCODE_WRITE_BLOCK_REQUEST:
1427 break;
1428 default:
1429 return -EINVAL;
1430 }
1431
1432 /* Security policy: Only allow accesses to Units Space. */
1433 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1434 return -EACCES;
1435
1436 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1437}
1438
1439static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1440{
1441 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1442 struct fw_cdev_send_request request;
1443 int dest;
1444
1445 if (a->speed > client->device->card->link_speed ||
1446 a->length > 1024 << a->speed)
1447 return -EIO;
1448
1449 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1450 return -EINVAL;
1451
1452 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1453 request.tcode = TCODE_STREAM_DATA;
1454 request.length = a->length;
1455 request.closure = a->closure;
1456 request.data = a->data;
1457 request.generation = a->generation;
1458
1459 return init_request(client, &request, dest, a->speed);
1460}
1461
1462static void outbound_phy_packet_callback(struct fw_packet *packet,
1463 struct fw_card *card, int status)
1464{
1465 struct outbound_phy_packet_event *e =
1466 container_of(packet, struct outbound_phy_packet_event, p);
1467
1468 switch (status) {
1469 /* expected: */
1470 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1471 /* should never happen with PHY packets: */
1472 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1473 case ACK_BUSY_X:
1474 case ACK_BUSY_A:
1475 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1476 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1477 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1478 /* stale generation; cancelled; on certain controllers: no ack */
1479 default: e->phy_packet.rcode = status; break;
1480 }
1481 e->phy_packet.data[0] = packet->timestamp;
1482
1483 queue_event(e->client, &e->event, &e->phy_packet,
1484 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1485 client_put(e->client);
1486}
1487
1488static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1489{
1490 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1491 struct fw_card *card = client->device->card;
1492 struct outbound_phy_packet_event *e;
1493
1494 /* Access policy: Allow this ioctl only on local nodes' device files. */
1495 if (!client->device->is_local)
1496 return -ENOSYS;
1497
1498 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1499 if (e == NULL)
1500 return -ENOMEM;
1501
1502 client_get(client);
1503 e->client = client;
1504 e->p.speed = SCODE_100;
1505 e->p.generation = a->generation;
1506 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1507 e->p.header[1] = a->data[0];
1508 e->p.header[2] = a->data[1];
1509 e->p.header_length = 12;
1510 e->p.callback = outbound_phy_packet_callback;
1511 e->phy_packet.closure = a->closure;
1512 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1513 if (is_ping_packet(a->data))
1514 e->phy_packet.length = 4;
1515
1516 card->driver->send_request(card, &e->p);
1517
1518 return 0;
1519}
1520
1521static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1522{
1523 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1524 struct fw_card *card = client->device->card;
1525
1526 /* Access policy: Allow this ioctl only on local nodes' device files. */
1527 if (!client->device->is_local)
1528 return -ENOSYS;
1529
1530 spin_lock_irq(&card->lock);
1531
1532 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1533 client->phy_receiver_closure = a->closure;
1534
1535 spin_unlock_irq(&card->lock);
1536
1537 return 0;
1538}
1539
1540void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1541{
1542 struct client *client;
1543 struct inbound_phy_packet_event *e;
1544 unsigned long flags;
1545
1546 spin_lock_irqsave(&card->lock, flags);
1547
1548 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1549 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1550 if (e == NULL) {
1551 fw_notify("Out of memory when allocating event\n");
1552 break;
1553 }
1554 e->phy_packet.closure = client->phy_receiver_closure;
1555 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1556 e->phy_packet.rcode = RCODE_COMPLETE;
1557 e->phy_packet.length = 8;
1558 e->phy_packet.data[0] = p->header[1];
1559 e->phy_packet.data[1] = p->header[2];
1560 queue_event(client, &e->event,
1561 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1562 }
1563
1564 spin_unlock_irqrestore(&card->lock, flags);
1565}
1566
1567static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1568 [0x00] = ioctl_get_info,
1569 [0x01] = ioctl_send_request,
1570 [0x02] = ioctl_allocate,
1571 [0x03] = ioctl_deallocate,
1572 [0x04] = ioctl_send_response,
1573 [0x05] = ioctl_initiate_bus_reset,
1574 [0x06] = ioctl_add_descriptor,
1575 [0x07] = ioctl_remove_descriptor,
1576 [0x08] = ioctl_create_iso_context,
1577 [0x09] = ioctl_queue_iso,
1578 [0x0a] = ioctl_start_iso,
1579 [0x0b] = ioctl_stop_iso,
1580 [0x0c] = ioctl_get_cycle_timer,
1581 [0x0d] = ioctl_allocate_iso_resource,
1582 [0x0e] = ioctl_deallocate_iso_resource,
1583 [0x0f] = ioctl_allocate_iso_resource_once,
1584 [0x10] = ioctl_deallocate_iso_resource_once,
1585 [0x11] = ioctl_get_speed,
1586 [0x12] = ioctl_send_broadcast_request,
1587 [0x13] = ioctl_send_stream_packet,
1588 [0x14] = ioctl_get_cycle_timer2,
1589 [0x15] = ioctl_send_phy_packet,
1590 [0x16] = ioctl_receive_phy_packets,
1591 [0x17] = ioctl_set_iso_channels,
1592};
1593
1594static int dispatch_ioctl(struct client *client,
1595 unsigned int cmd, void __user *arg)
1596{
1597 union ioctl_arg buffer;
1598 int ret;
1599
1600 if (fw_device_is_shutdown(client->device))
1601 return -ENODEV;
1602
1603 if (_IOC_TYPE(cmd) != '#' ||
1604 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1605 _IOC_SIZE(cmd) > sizeof(buffer))
1606 return -ENOTTY;
1607
1608 if (_IOC_DIR(cmd) == _IOC_READ)
1609 memset(&buffer, 0, _IOC_SIZE(cmd));
1610
1611 if (_IOC_DIR(cmd) & _IOC_WRITE)
1612 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1613 return -EFAULT;
1614
1615 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1616 if (ret < 0)
1617 return ret;
1618
1619 if (_IOC_DIR(cmd) & _IOC_READ)
1620 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1621 return -EFAULT;
1622
1623 return ret;
1624}
1625
1626static long fw_device_op_ioctl(struct file *file,
1627 unsigned int cmd, unsigned long arg)
1628{
1629 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1630}
1631
1632#ifdef CONFIG_COMPAT
1633static long fw_device_op_compat_ioctl(struct file *file,
1634 unsigned int cmd, unsigned long arg)
1635{
1636 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1637}
1638#endif
1639
1640static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1641{
1642 struct client *client = file->private_data;
1643 enum dma_data_direction direction;
1644 unsigned long size;
1645 int page_count, ret;
1646
1647 if (fw_device_is_shutdown(client->device))
1648 return -ENODEV;
1649
1650 /* FIXME: We could support multiple buffers, but we don't. */
1651 if (client->buffer.pages != NULL)
1652 return -EBUSY;
1653
1654 if (!(vma->vm_flags & VM_SHARED))
1655 return -EINVAL;
1656
1657 if (vma->vm_start & ~PAGE_MASK)
1658 return -EINVAL;
1659
1660 client->vm_start = vma->vm_start;
1661 size = vma->vm_end - vma->vm_start;
1662 page_count = size >> PAGE_SHIFT;
1663 if (size & ~PAGE_MASK)
1664 return -EINVAL;
1665
1666 if (vma->vm_flags & VM_WRITE)
1667 direction = DMA_TO_DEVICE;
1668 else
1669 direction = DMA_FROM_DEVICE;
1670
1671 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1672 page_count, direction);
1673 if (ret < 0)
1674 return ret;
1675
1676 ret = fw_iso_buffer_map(&client->buffer, vma);
1677 if (ret < 0)
1678 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1679
1680 return ret;
1681}
1682
1683static int is_outbound_transaction_resource(int id, void *p, void *data)
1684{
1685 struct client_resource *resource = p;
1686
1687 return resource->release == release_transaction;
1688}
1689
1690static int has_outbound_transactions(struct client *client)
1691{
1692 int ret;
1693
1694 spin_lock_irq(&client->lock);
1695 ret = idr_for_each(&client->resource_idr,
1696 is_outbound_transaction_resource, NULL);
1697 spin_unlock_irq(&client->lock);
1698
1699 return ret;
1700}
1701
1702static int shutdown_resource(int id, void *p, void *data)
1703{
1704 struct client_resource *resource = p;
1705 struct client *client = data;
1706
1707 resource->release(client, resource);
1708 client_put(client);
1709
1710 return 0;
1711}
1712
1713static int fw_device_op_release(struct inode *inode, struct file *file)
1714{
1715 struct client *client = file->private_data;
1716 struct event *event, *next_event;
1717
1718 spin_lock_irq(&client->device->card->lock);
1719 list_del(&client->phy_receiver_link);
1720 spin_unlock_irq(&client->device->card->lock);
1721
1722 mutex_lock(&client->device->client_list_mutex);
1723 list_del(&client->link);
1724 mutex_unlock(&client->device->client_list_mutex);
1725
1726 if (client->iso_context)
1727 fw_iso_context_destroy(client->iso_context);
1728
1729 if (client->buffer.pages)
1730 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1731
1732 /* Freeze client->resource_idr and client->event_list */
1733 spin_lock_irq(&client->lock);
1734 client->in_shutdown = true;
1735 spin_unlock_irq(&client->lock);
1736
1737 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1738
1739 idr_for_each(&client->resource_idr, shutdown_resource, client);
1740 idr_remove_all(&client->resource_idr);
1741 idr_destroy(&client->resource_idr);
1742
1743 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1744 kfree(event);
1745
1746 client_put(client);
1747
1748 return 0;
1749}
1750
1751static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1752{
1753 struct client *client = file->private_data;
1754 unsigned int mask = 0;
1755
1756 poll_wait(file, &client->wait, pt);
1757
1758 if (fw_device_is_shutdown(client->device))
1759 mask |= POLLHUP | POLLERR;
1760 if (!list_empty(&client->event_list))
1761 mask |= POLLIN | POLLRDNORM;
1762
1763 return mask;
1764}
1765
1766const struct file_operations fw_device_ops = {
1767 .owner = THIS_MODULE,
1768 .llseek = no_llseek,
1769 .open = fw_device_op_open,
1770 .read = fw_device_op_read,
1771 .unlocked_ioctl = fw_device_op_ioctl,
1772 .mmap = fw_device_op_mmap,
1773 .release = fw_device_op_release,
1774 .poll = fw_device_op_poll,
1775#ifdef CONFIG_COMPAT
1776 .compat_ioctl = fw_device_op_compat_ioctl,
1777#endif
1778};