Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Char device for device raw access
4 *
5 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bug.h>
9#include <linux/compat.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/firewire.h>
16#include <linux/firewire-cdev.h>
17#include <linux/idr.h>
18#include <linux/irqflags.h>
19#include <linux/jiffies.h>
20#include <linux/kernel.h>
21#include <linux/kref.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/poll.h>
26#include <linux/sched.h> /* required for linux/wait.h */
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/time.h>
31#include <linux/uaccess.h>
32#include <linux/vmalloc.h>
33#include <linux/wait.h>
34#include <linux/workqueue.h>
35
36
37#include "core.h"
38
39/*
40 * ABI version history is documented in linux/firewire-cdev.h.
41 */
42#define FW_CDEV_KERNEL_VERSION 5
43#define FW_CDEV_VERSION_EVENT_REQUEST2 4
44#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
45#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
46
47struct client {
48 u32 version;
49 struct fw_device *device;
50
51 spinlock_t lock;
52 bool in_shutdown;
53 struct idr resource_idr;
54 struct list_head event_list;
55 wait_queue_head_t wait;
56 wait_queue_head_t tx_flush_wait;
57 u64 bus_reset_closure;
58
59 struct fw_iso_context *iso_context;
60 u64 iso_closure;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
63 bool buffer_is_mapped;
64
65 struct list_head phy_receiver_link;
66 u64 phy_receiver_closure;
67
68 struct list_head link;
69 struct kref kref;
70};
71
72static inline void client_get(struct client *client)
73{
74 kref_get(&client->kref);
75}
76
77static void client_release(struct kref *kref)
78{
79 struct client *client = container_of(kref, struct client, kref);
80
81 fw_device_put(client->device);
82 kfree(client);
83}
84
85static void client_put(struct client *client)
86{
87 kref_put(&client->kref, client_release);
88}
89
90struct client_resource;
91typedef void (*client_resource_release_fn_t)(struct client *,
92 struct client_resource *);
93struct client_resource {
94 client_resource_release_fn_t release;
95 int handle;
96};
97
98struct address_handler_resource {
99 struct client_resource resource;
100 struct fw_address_handler handler;
101 __u64 closure;
102 struct client *client;
103};
104
105struct outbound_transaction_resource {
106 struct client_resource resource;
107 struct fw_transaction transaction;
108};
109
110struct inbound_transaction_resource {
111 struct client_resource resource;
112 struct fw_card *card;
113 struct fw_request *request;
114 void *data;
115 size_t length;
116};
117
118struct descriptor_resource {
119 struct client_resource resource;
120 struct fw_descriptor descriptor;
121 u32 data[];
122};
123
124struct iso_resource {
125 struct client_resource resource;
126 struct client *client;
127 /* Schedule work and access todo only with client->lock held. */
128 struct delayed_work work;
129 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
130 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
131 int generation;
132 u64 channels;
133 s32 bandwidth;
134 struct iso_resource_event *e_alloc, *e_dealloc;
135};
136
137static void release_iso_resource(struct client *, struct client_resource *);
138
139static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
140{
141 client_get(r->client);
142 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
143 client_put(r->client);
144}
145
146static void schedule_if_iso_resource(struct client_resource *resource)
147{
148 if (resource->release == release_iso_resource)
149 schedule_iso_resource(container_of(resource,
150 struct iso_resource, resource), 0);
151}
152
153/*
154 * dequeue_event() just kfree()'s the event, so the event has to be
155 * the first field in a struct XYZ_event.
156 */
157struct event {
158 struct { void *data; size_t size; } v[2];
159 struct list_head link;
160};
161
162struct bus_reset_event {
163 struct event event;
164 struct fw_cdev_event_bus_reset reset;
165};
166
167struct outbound_transaction_event {
168 struct event event;
169 struct client *client;
170 struct outbound_transaction_resource r;
171 struct fw_cdev_event_response response;
172};
173
174struct inbound_transaction_event {
175 struct event event;
176 union {
177 struct fw_cdev_event_request request;
178 struct fw_cdev_event_request2 request2;
179 } req;
180};
181
182struct iso_interrupt_event {
183 struct event event;
184 struct fw_cdev_event_iso_interrupt interrupt;
185};
186
187struct iso_interrupt_mc_event {
188 struct event event;
189 struct fw_cdev_event_iso_interrupt_mc interrupt;
190};
191
192struct iso_resource_event {
193 struct event event;
194 struct fw_cdev_event_iso_resource iso_resource;
195};
196
197struct outbound_phy_packet_event {
198 struct event event;
199 struct client *client;
200 struct fw_packet p;
201 struct fw_cdev_event_phy_packet phy_packet;
202};
203
204struct inbound_phy_packet_event {
205 struct event event;
206 struct fw_cdev_event_phy_packet phy_packet;
207};
208
209#ifdef CONFIG_COMPAT
210static void __user *u64_to_uptr(u64 value)
211{
212 if (in_compat_syscall())
213 return compat_ptr(value);
214 else
215 return (void __user *)(unsigned long)value;
216}
217
218static u64 uptr_to_u64(void __user *ptr)
219{
220 if (in_compat_syscall())
221 return ptr_to_compat(ptr);
222 else
223 return (u64)(unsigned long)ptr;
224}
225#else
226static inline void __user *u64_to_uptr(u64 value)
227{
228 return (void __user *)(unsigned long)value;
229}
230
231static inline u64 uptr_to_u64(void __user *ptr)
232{
233 return (u64)(unsigned long)ptr;
234}
235#endif /* CONFIG_COMPAT */
236
237static int fw_device_op_open(struct inode *inode, struct file *file)
238{
239 struct fw_device *device;
240 struct client *client;
241
242 device = fw_device_get_by_devt(inode->i_rdev);
243 if (device == NULL)
244 return -ENODEV;
245
246 if (fw_device_is_shutdown(device)) {
247 fw_device_put(device);
248 return -ENODEV;
249 }
250
251 client = kzalloc(sizeof(*client), GFP_KERNEL);
252 if (client == NULL) {
253 fw_device_put(device);
254 return -ENOMEM;
255 }
256
257 client->device = device;
258 spin_lock_init(&client->lock);
259 idr_init(&client->resource_idr);
260 INIT_LIST_HEAD(&client->event_list);
261 init_waitqueue_head(&client->wait);
262 init_waitqueue_head(&client->tx_flush_wait);
263 INIT_LIST_HEAD(&client->phy_receiver_link);
264 INIT_LIST_HEAD(&client->link);
265 kref_init(&client->kref);
266
267 file->private_data = client;
268
269 return nonseekable_open(inode, file);
270}
271
272static void queue_event(struct client *client, struct event *event,
273 void *data0, size_t size0, void *data1, size_t size1)
274{
275 unsigned long flags;
276
277 event->v[0].data = data0;
278 event->v[0].size = size0;
279 event->v[1].data = data1;
280 event->v[1].size = size1;
281
282 spin_lock_irqsave(&client->lock, flags);
283 if (client->in_shutdown)
284 kfree(event);
285 else
286 list_add_tail(&event->link, &client->event_list);
287 spin_unlock_irqrestore(&client->lock, flags);
288
289 wake_up_interruptible(&client->wait);
290}
291
292static int dequeue_event(struct client *client,
293 char __user *buffer, size_t count)
294{
295 struct event *event;
296 size_t size, total;
297 int i, ret;
298
299 ret = wait_event_interruptible(client->wait,
300 !list_empty(&client->event_list) ||
301 fw_device_is_shutdown(client->device));
302 if (ret < 0)
303 return ret;
304
305 if (list_empty(&client->event_list) &&
306 fw_device_is_shutdown(client->device))
307 return -ENODEV;
308
309 spin_lock_irq(&client->lock);
310 event = list_first_entry(&client->event_list, struct event, link);
311 list_del(&event->link);
312 spin_unlock_irq(&client->lock);
313
314 total = 0;
315 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
316 size = min(event->v[i].size, count - total);
317 if (copy_to_user(buffer + total, event->v[i].data, size)) {
318 ret = -EFAULT;
319 goto out;
320 }
321 total += size;
322 }
323 ret = total;
324
325 out:
326 kfree(event);
327
328 return ret;
329}
330
331static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
332 size_t count, loff_t *offset)
333{
334 struct client *client = file->private_data;
335
336 return dequeue_event(client, buffer, count);
337}
338
339static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
340 struct client *client)
341{
342 struct fw_card *card = client->device->card;
343
344 spin_lock_irq(&card->lock);
345
346 event->closure = client->bus_reset_closure;
347 event->type = FW_CDEV_EVENT_BUS_RESET;
348 event->generation = client->device->generation;
349 event->node_id = client->device->node_id;
350 event->local_node_id = card->local_node->node_id;
351 event->bm_node_id = card->bm_node_id;
352 event->irm_node_id = card->irm_node->node_id;
353 event->root_node_id = card->root_node->node_id;
354
355 spin_unlock_irq(&card->lock);
356}
357
358static void for_each_client(struct fw_device *device,
359 void (*callback)(struct client *client))
360{
361 struct client *c;
362
363 mutex_lock(&device->client_list_mutex);
364 list_for_each_entry(c, &device->client_list, link)
365 callback(c);
366 mutex_unlock(&device->client_list_mutex);
367}
368
369static int schedule_reallocations(int id, void *p, void *data)
370{
371 schedule_if_iso_resource(p);
372
373 return 0;
374}
375
376static void queue_bus_reset_event(struct client *client)
377{
378 struct bus_reset_event *e;
379
380 e = kzalloc(sizeof(*e), GFP_KERNEL);
381 if (e == NULL)
382 return;
383
384 fill_bus_reset_event(&e->reset, client);
385
386 queue_event(client, &e->event,
387 &e->reset, sizeof(e->reset), NULL, 0);
388
389 spin_lock_irq(&client->lock);
390 idr_for_each(&client->resource_idr, schedule_reallocations, client);
391 spin_unlock_irq(&client->lock);
392}
393
394void fw_device_cdev_update(struct fw_device *device)
395{
396 for_each_client(device, queue_bus_reset_event);
397}
398
399static void wake_up_client(struct client *client)
400{
401 wake_up_interruptible(&client->wait);
402}
403
404void fw_device_cdev_remove(struct fw_device *device)
405{
406 for_each_client(device, wake_up_client);
407}
408
409union ioctl_arg {
410 struct fw_cdev_get_info get_info;
411 struct fw_cdev_send_request send_request;
412 struct fw_cdev_allocate allocate;
413 struct fw_cdev_deallocate deallocate;
414 struct fw_cdev_send_response send_response;
415 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
416 struct fw_cdev_add_descriptor add_descriptor;
417 struct fw_cdev_remove_descriptor remove_descriptor;
418 struct fw_cdev_create_iso_context create_iso_context;
419 struct fw_cdev_queue_iso queue_iso;
420 struct fw_cdev_start_iso start_iso;
421 struct fw_cdev_stop_iso stop_iso;
422 struct fw_cdev_get_cycle_timer get_cycle_timer;
423 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
424 struct fw_cdev_send_stream_packet send_stream_packet;
425 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
426 struct fw_cdev_send_phy_packet send_phy_packet;
427 struct fw_cdev_receive_phy_packets receive_phy_packets;
428 struct fw_cdev_set_iso_channels set_iso_channels;
429 struct fw_cdev_flush_iso flush_iso;
430};
431
432static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
433{
434 struct fw_cdev_get_info *a = &arg->get_info;
435 struct fw_cdev_event_bus_reset bus_reset;
436 unsigned long ret = 0;
437
438 client->version = a->version;
439 a->version = FW_CDEV_KERNEL_VERSION;
440 a->card = client->device->card->index;
441
442 down_read(&fw_device_rwsem);
443
444 if (a->rom != 0) {
445 size_t want = a->rom_length;
446 size_t have = client->device->config_rom_length * 4;
447
448 ret = copy_to_user(u64_to_uptr(a->rom),
449 client->device->config_rom, min(want, have));
450 }
451 a->rom_length = client->device->config_rom_length * 4;
452
453 up_read(&fw_device_rwsem);
454
455 if (ret != 0)
456 return -EFAULT;
457
458 mutex_lock(&client->device->client_list_mutex);
459
460 client->bus_reset_closure = a->bus_reset_closure;
461 if (a->bus_reset != 0) {
462 fill_bus_reset_event(&bus_reset, client);
463 /* unaligned size of bus_reset is 36 bytes */
464 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
465 }
466 if (ret == 0 && list_empty(&client->link))
467 list_add_tail(&client->link, &client->device->client_list);
468
469 mutex_unlock(&client->device->client_list_mutex);
470
471 return ret ? -EFAULT : 0;
472}
473
474static int add_client_resource(struct client *client,
475 struct client_resource *resource, gfp_t gfp_mask)
476{
477 bool preload = gfpflags_allow_blocking(gfp_mask);
478 unsigned long flags;
479 int ret;
480
481 if (preload)
482 idr_preload(gfp_mask);
483 spin_lock_irqsave(&client->lock, flags);
484
485 if (client->in_shutdown)
486 ret = -ECANCELED;
487 else
488 ret = idr_alloc(&client->resource_idr, resource, 0, 0,
489 GFP_NOWAIT);
490 if (ret >= 0) {
491 resource->handle = ret;
492 client_get(client);
493 schedule_if_iso_resource(resource);
494 }
495
496 spin_unlock_irqrestore(&client->lock, flags);
497 if (preload)
498 idr_preload_end();
499
500 return ret < 0 ? ret : 0;
501}
502
503static int release_client_resource(struct client *client, u32 handle,
504 client_resource_release_fn_t release,
505 struct client_resource **return_resource)
506{
507 struct client_resource *resource;
508
509 spin_lock_irq(&client->lock);
510 if (client->in_shutdown)
511 resource = NULL;
512 else
513 resource = idr_find(&client->resource_idr, handle);
514 if (resource && resource->release == release)
515 idr_remove(&client->resource_idr, handle);
516 spin_unlock_irq(&client->lock);
517
518 if (!(resource && resource->release == release))
519 return -EINVAL;
520
521 if (return_resource)
522 *return_resource = resource;
523 else
524 resource->release(client, resource);
525
526 client_put(client);
527
528 return 0;
529}
530
531static void release_transaction(struct client *client,
532 struct client_resource *resource)
533{
534}
535
536static void complete_transaction(struct fw_card *card, int rcode,
537 void *payload, size_t length, void *data)
538{
539 struct outbound_transaction_event *e = data;
540 struct fw_cdev_event_response *rsp = &e->response;
541 struct client *client = e->client;
542 unsigned long flags;
543
544 if (length < rsp->length)
545 rsp->length = length;
546 if (rcode == RCODE_COMPLETE)
547 memcpy(rsp->data, payload, rsp->length);
548
549 spin_lock_irqsave(&client->lock, flags);
550 idr_remove(&client->resource_idr, e->r.resource.handle);
551 if (client->in_shutdown)
552 wake_up(&client->tx_flush_wait);
553 spin_unlock_irqrestore(&client->lock, flags);
554
555 rsp->type = FW_CDEV_EVENT_RESPONSE;
556 rsp->rcode = rcode;
557
558 /*
559 * In the case that sizeof(*rsp) doesn't align with the position of the
560 * data, and the read is short, preserve an extra copy of the data
561 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
562 * for short reads and some apps depended on it, this is both safe
563 * and prudent for compatibility.
564 */
565 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
566 queue_event(client, &e->event, rsp, sizeof(*rsp),
567 rsp->data, rsp->length);
568 else
569 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
570 NULL, 0);
571
572 /* Drop the idr's reference */
573 client_put(client);
574}
575
576static int init_request(struct client *client,
577 struct fw_cdev_send_request *request,
578 int destination_id, int speed)
579{
580 struct outbound_transaction_event *e;
581 int ret;
582
583 if (request->tcode != TCODE_STREAM_DATA &&
584 (request->length > 4096 || request->length > 512 << speed))
585 return -EIO;
586
587 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
588 request->length < 4)
589 return -EINVAL;
590
591 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
592 if (e == NULL)
593 return -ENOMEM;
594
595 e->client = client;
596 e->response.length = request->length;
597 e->response.closure = request->closure;
598
599 if (request->data &&
600 copy_from_user(e->response.data,
601 u64_to_uptr(request->data), request->length)) {
602 ret = -EFAULT;
603 goto failed;
604 }
605
606 e->r.resource.release = release_transaction;
607 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
608 if (ret < 0)
609 goto failed;
610
611 fw_send_request(client->device->card, &e->r.transaction,
612 request->tcode, destination_id, request->generation,
613 speed, request->offset, e->response.data,
614 request->length, complete_transaction, e);
615 return 0;
616
617 failed:
618 kfree(e);
619
620 return ret;
621}
622
623static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
624{
625 switch (arg->send_request.tcode) {
626 case TCODE_WRITE_QUADLET_REQUEST:
627 case TCODE_WRITE_BLOCK_REQUEST:
628 case TCODE_READ_QUADLET_REQUEST:
629 case TCODE_READ_BLOCK_REQUEST:
630 case TCODE_LOCK_MASK_SWAP:
631 case TCODE_LOCK_COMPARE_SWAP:
632 case TCODE_LOCK_FETCH_ADD:
633 case TCODE_LOCK_LITTLE_ADD:
634 case TCODE_LOCK_BOUNDED_ADD:
635 case TCODE_LOCK_WRAP_ADD:
636 case TCODE_LOCK_VENDOR_DEPENDENT:
637 break;
638 default:
639 return -EINVAL;
640 }
641
642 return init_request(client, &arg->send_request, client->device->node_id,
643 client->device->max_speed);
644}
645
646static inline bool is_fcp_request(struct fw_request *request)
647{
648 return request == NULL;
649}
650
651static void release_request(struct client *client,
652 struct client_resource *resource)
653{
654 struct inbound_transaction_resource *r = container_of(resource,
655 struct inbound_transaction_resource, resource);
656
657 if (is_fcp_request(r->request))
658 kfree(r->data);
659 else
660 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
661
662 fw_card_put(r->card);
663 kfree(r);
664}
665
666static void handle_request(struct fw_card *card, struct fw_request *request,
667 int tcode, int destination, int source,
668 int generation, unsigned long long offset,
669 void *payload, size_t length, void *callback_data)
670{
671 struct address_handler_resource *handler = callback_data;
672 struct inbound_transaction_resource *r;
673 struct inbound_transaction_event *e;
674 size_t event_size0;
675 void *fcp_frame = NULL;
676 int ret;
677
678 /* card may be different from handler->client->device->card */
679 fw_card_get(card);
680
681 r = kmalloc(sizeof(*r), GFP_ATOMIC);
682 e = kmalloc(sizeof(*e), GFP_ATOMIC);
683 if (r == NULL || e == NULL)
684 goto failed;
685
686 r->card = card;
687 r->request = request;
688 r->data = payload;
689 r->length = length;
690
691 if (is_fcp_request(request)) {
692 /*
693 * FIXME: Let core-transaction.c manage a
694 * single reference-counted copy?
695 */
696 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
697 if (fcp_frame == NULL)
698 goto failed;
699
700 r->data = fcp_frame;
701 }
702
703 r->resource.release = release_request;
704 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
705 if (ret < 0)
706 goto failed;
707
708 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
709 struct fw_cdev_event_request *req = &e->req.request;
710
711 if (tcode & 0x10)
712 tcode = TCODE_LOCK_REQUEST;
713
714 req->type = FW_CDEV_EVENT_REQUEST;
715 req->tcode = tcode;
716 req->offset = offset;
717 req->length = length;
718 req->handle = r->resource.handle;
719 req->closure = handler->closure;
720 event_size0 = sizeof(*req);
721 } else {
722 struct fw_cdev_event_request2 *req = &e->req.request2;
723
724 req->type = FW_CDEV_EVENT_REQUEST2;
725 req->tcode = tcode;
726 req->offset = offset;
727 req->source_node_id = source;
728 req->destination_node_id = destination;
729 req->card = card->index;
730 req->generation = generation;
731 req->length = length;
732 req->handle = r->resource.handle;
733 req->closure = handler->closure;
734 event_size0 = sizeof(*req);
735 }
736
737 queue_event(handler->client, &e->event,
738 &e->req, event_size0, r->data, length);
739 return;
740
741 failed:
742 kfree(r);
743 kfree(e);
744 kfree(fcp_frame);
745
746 if (!is_fcp_request(request))
747 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
748
749 fw_card_put(card);
750}
751
752static void release_address_handler(struct client *client,
753 struct client_resource *resource)
754{
755 struct address_handler_resource *r =
756 container_of(resource, struct address_handler_resource, resource);
757
758 fw_core_remove_address_handler(&r->handler);
759 kfree(r);
760}
761
762static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
763{
764 struct fw_cdev_allocate *a = &arg->allocate;
765 struct address_handler_resource *r;
766 struct fw_address_region region;
767 int ret;
768
769 r = kmalloc(sizeof(*r), GFP_KERNEL);
770 if (r == NULL)
771 return -ENOMEM;
772
773 region.start = a->offset;
774 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
775 region.end = a->offset + a->length;
776 else
777 region.end = a->region_end;
778
779 r->handler.length = a->length;
780 r->handler.address_callback = handle_request;
781 r->handler.callback_data = r;
782 r->closure = a->closure;
783 r->client = client;
784
785 ret = fw_core_add_address_handler(&r->handler, ®ion);
786 if (ret < 0) {
787 kfree(r);
788 return ret;
789 }
790 a->offset = r->handler.offset;
791
792 r->resource.release = release_address_handler;
793 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
794 if (ret < 0) {
795 release_address_handler(client, &r->resource);
796 return ret;
797 }
798 a->handle = r->resource.handle;
799
800 return 0;
801}
802
803static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
804{
805 return release_client_resource(client, arg->deallocate.handle,
806 release_address_handler, NULL);
807}
808
809static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
810{
811 struct fw_cdev_send_response *a = &arg->send_response;
812 struct client_resource *resource;
813 struct inbound_transaction_resource *r;
814 int ret = 0;
815
816 if (release_client_resource(client, a->handle,
817 release_request, &resource) < 0)
818 return -EINVAL;
819
820 r = container_of(resource, struct inbound_transaction_resource,
821 resource);
822 if (is_fcp_request(r->request)) {
823 kfree(r->data);
824 goto out;
825 }
826
827 if (a->length != fw_get_response_length(r->request)) {
828 ret = -EINVAL;
829 kfree(r->request);
830 goto out;
831 }
832 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
833 ret = -EFAULT;
834 kfree(r->request);
835 goto out;
836 }
837 fw_send_response(r->card, r->request, a->rcode);
838 out:
839 fw_card_put(r->card);
840 kfree(r);
841
842 return ret;
843}
844
845static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
846{
847 fw_schedule_bus_reset(client->device->card, true,
848 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
849 return 0;
850}
851
852static void release_descriptor(struct client *client,
853 struct client_resource *resource)
854{
855 struct descriptor_resource *r =
856 container_of(resource, struct descriptor_resource, resource);
857
858 fw_core_remove_descriptor(&r->descriptor);
859 kfree(r);
860}
861
862static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
863{
864 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
865 struct descriptor_resource *r;
866 int ret;
867
868 /* Access policy: Allow this ioctl only on local nodes' device files. */
869 if (!client->device->is_local)
870 return -ENOSYS;
871
872 if (a->length > 256)
873 return -EINVAL;
874
875 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
876 if (r == NULL)
877 return -ENOMEM;
878
879 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
880 ret = -EFAULT;
881 goto failed;
882 }
883
884 r->descriptor.length = a->length;
885 r->descriptor.immediate = a->immediate;
886 r->descriptor.key = a->key;
887 r->descriptor.data = r->data;
888
889 ret = fw_core_add_descriptor(&r->descriptor);
890 if (ret < 0)
891 goto failed;
892
893 r->resource.release = release_descriptor;
894 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
895 if (ret < 0) {
896 fw_core_remove_descriptor(&r->descriptor);
897 goto failed;
898 }
899 a->handle = r->resource.handle;
900
901 return 0;
902 failed:
903 kfree(r);
904
905 return ret;
906}
907
908static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
909{
910 return release_client_resource(client, arg->remove_descriptor.handle,
911 release_descriptor, NULL);
912}
913
914static void iso_callback(struct fw_iso_context *context, u32 cycle,
915 size_t header_length, void *header, void *data)
916{
917 struct client *client = data;
918 struct iso_interrupt_event *e;
919
920 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
921 if (e == NULL)
922 return;
923
924 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
925 e->interrupt.closure = client->iso_closure;
926 e->interrupt.cycle = cycle;
927 e->interrupt.header_length = header_length;
928 memcpy(e->interrupt.header, header, header_length);
929 queue_event(client, &e->event, &e->interrupt,
930 sizeof(e->interrupt) + header_length, NULL, 0);
931}
932
933static void iso_mc_callback(struct fw_iso_context *context,
934 dma_addr_t completed, void *data)
935{
936 struct client *client = data;
937 struct iso_interrupt_mc_event *e;
938
939 e = kmalloc(sizeof(*e), GFP_ATOMIC);
940 if (e == NULL)
941 return;
942
943 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
944 e->interrupt.closure = client->iso_closure;
945 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
946 completed);
947 queue_event(client, &e->event, &e->interrupt,
948 sizeof(e->interrupt), NULL, 0);
949}
950
951static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
952{
953 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
954 return DMA_TO_DEVICE;
955 else
956 return DMA_FROM_DEVICE;
957}
958
959static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
960 fw_iso_mc_callback_t callback,
961 void *callback_data)
962{
963 struct fw_iso_context *ctx;
964
965 ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
966 0, 0, 0, NULL, callback_data);
967 if (!IS_ERR(ctx))
968 ctx->callback.mc = callback;
969
970 return ctx;
971}
972
973static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
974{
975 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
976 struct fw_iso_context *context;
977 union fw_iso_callback cb;
978 int ret;
979
980 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
981 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
982 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
983 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
984
985 switch (a->type) {
986 case FW_ISO_CONTEXT_TRANSMIT:
987 if (a->speed > SCODE_3200 || a->channel > 63)
988 return -EINVAL;
989
990 cb.sc = iso_callback;
991 break;
992
993 case FW_ISO_CONTEXT_RECEIVE:
994 if (a->header_size < 4 || (a->header_size & 3) ||
995 a->channel > 63)
996 return -EINVAL;
997
998 cb.sc = iso_callback;
999 break;
1000
1001 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1002 cb.mc = iso_mc_callback;
1003 break;
1004
1005 default:
1006 return -EINVAL;
1007 }
1008
1009 if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1010 context = fw_iso_mc_context_create(client->device->card, cb.mc,
1011 client);
1012 else
1013 context = fw_iso_context_create(client->device->card, a->type,
1014 a->channel, a->speed,
1015 a->header_size, cb.sc, client);
1016 if (IS_ERR(context))
1017 return PTR_ERR(context);
1018 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1019 context->drop_overflow_headers = true;
1020
1021 /* We only support one context at this time. */
1022 spin_lock_irq(&client->lock);
1023 if (client->iso_context != NULL) {
1024 spin_unlock_irq(&client->lock);
1025 fw_iso_context_destroy(context);
1026
1027 return -EBUSY;
1028 }
1029 if (!client->buffer_is_mapped) {
1030 ret = fw_iso_buffer_map_dma(&client->buffer,
1031 client->device->card,
1032 iso_dma_direction(context));
1033 if (ret < 0) {
1034 spin_unlock_irq(&client->lock);
1035 fw_iso_context_destroy(context);
1036
1037 return ret;
1038 }
1039 client->buffer_is_mapped = true;
1040 }
1041 client->iso_closure = a->closure;
1042 client->iso_context = context;
1043 spin_unlock_irq(&client->lock);
1044
1045 a->handle = 0;
1046
1047 return 0;
1048}
1049
1050static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1051{
1052 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1053 struct fw_iso_context *ctx = client->iso_context;
1054
1055 if (ctx == NULL || a->handle != 0)
1056 return -EINVAL;
1057
1058 return fw_iso_context_set_channels(ctx, &a->channels);
1059}
1060
1061/* Macros for decoding the iso packet control header. */
1062#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1063#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1064#define GET_SKIP(v) (((v) >> 17) & 0x01)
1065#define GET_TAG(v) (((v) >> 18) & 0x03)
1066#define GET_SY(v) (((v) >> 20) & 0x0f)
1067#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1068
1069static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1070{
1071 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1072 struct fw_cdev_iso_packet __user *p, *end, *next;
1073 struct fw_iso_context *ctx = client->iso_context;
1074 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1075 u32 control;
1076 int count;
1077 struct {
1078 struct fw_iso_packet packet;
1079 u8 header[256];
1080 } u;
1081
1082 if (ctx == NULL || a->handle != 0)
1083 return -EINVAL;
1084
1085 /*
1086 * If the user passes a non-NULL data pointer, has mmap()'ed
1087 * the iso buffer, and the pointer points inside the buffer,
1088 * we setup the payload pointers accordingly. Otherwise we
1089 * set them both to 0, which will still let packets with
1090 * payload_length == 0 through. In other words, if no packets
1091 * use the indirect payload, the iso buffer need not be mapped
1092 * and the a->data pointer is ignored.
1093 */
1094 payload = (unsigned long)a->data - client->vm_start;
1095 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1096 if (a->data == 0 || client->buffer.pages == NULL ||
1097 payload >= buffer_end) {
1098 payload = 0;
1099 buffer_end = 0;
1100 }
1101
1102 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1103 return -EINVAL;
1104
1105 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1106
1107 end = (void __user *)p + a->size;
1108 count = 0;
1109 while (p < end) {
1110 if (get_user(control, &p->control))
1111 return -EFAULT;
1112 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1113 u.packet.interrupt = GET_INTERRUPT(control);
1114 u.packet.skip = GET_SKIP(control);
1115 u.packet.tag = GET_TAG(control);
1116 u.packet.sy = GET_SY(control);
1117 u.packet.header_length = GET_HEADER_LENGTH(control);
1118
1119 switch (ctx->type) {
1120 case FW_ISO_CONTEXT_TRANSMIT:
1121 if (u.packet.header_length & 3)
1122 return -EINVAL;
1123 transmit_header_bytes = u.packet.header_length;
1124 break;
1125
1126 case FW_ISO_CONTEXT_RECEIVE:
1127 if (u.packet.header_length == 0 ||
1128 u.packet.header_length % ctx->header_size != 0)
1129 return -EINVAL;
1130 break;
1131
1132 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1133 if (u.packet.payload_length == 0 ||
1134 u.packet.payload_length & 3)
1135 return -EINVAL;
1136 break;
1137 }
1138
1139 next = (struct fw_cdev_iso_packet __user *)
1140 &p->header[transmit_header_bytes / 4];
1141 if (next > end)
1142 return -EINVAL;
1143 if (copy_from_user
1144 (u.packet.header, p->header, transmit_header_bytes))
1145 return -EFAULT;
1146 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1147 u.packet.header_length + u.packet.payload_length > 0)
1148 return -EINVAL;
1149 if (payload + u.packet.payload_length > buffer_end)
1150 return -EINVAL;
1151
1152 if (fw_iso_context_queue(ctx, &u.packet,
1153 &client->buffer, payload))
1154 break;
1155
1156 p = next;
1157 payload += u.packet.payload_length;
1158 count++;
1159 }
1160 fw_iso_context_queue_flush(ctx);
1161
1162 a->size -= uptr_to_u64(p) - a->packets;
1163 a->packets = uptr_to_u64(p);
1164 a->data = client->vm_start + payload;
1165
1166 return count;
1167}
1168
1169static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1170{
1171 struct fw_cdev_start_iso *a = &arg->start_iso;
1172
1173 BUILD_BUG_ON(
1174 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1175 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1176 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1177 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1178 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1179
1180 if (client->iso_context == NULL || a->handle != 0)
1181 return -EINVAL;
1182
1183 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1184 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1185 return -EINVAL;
1186
1187 return fw_iso_context_start(client->iso_context,
1188 a->cycle, a->sync, a->tags);
1189}
1190
1191static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1192{
1193 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1194
1195 if (client->iso_context == NULL || a->handle != 0)
1196 return -EINVAL;
1197
1198 return fw_iso_context_stop(client->iso_context);
1199}
1200
1201static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1202{
1203 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1204
1205 if (client->iso_context == NULL || a->handle != 0)
1206 return -EINVAL;
1207
1208 return fw_iso_context_flush_completions(client->iso_context);
1209}
1210
1211static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1212{
1213 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1214 struct fw_card *card = client->device->card;
1215 struct timespec64 ts = {0, 0};
1216 u32 cycle_time = 0;
1217 int ret = 0;
1218
1219 local_irq_disable();
1220
1221 ret = fw_card_read_cycle_time(card, &cycle_time);
1222 if (ret < 0)
1223 goto end;
1224
1225 switch (a->clk_id) {
1226 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1227 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1228 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1229 default:
1230 ret = -EINVAL;
1231 }
1232end:
1233 local_irq_enable();
1234
1235 a->tv_sec = ts.tv_sec;
1236 a->tv_nsec = ts.tv_nsec;
1237 a->cycle_timer = cycle_time;
1238
1239 return ret;
1240}
1241
1242static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1243{
1244 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1245 struct fw_cdev_get_cycle_timer2 ct2;
1246
1247 ct2.clk_id = CLOCK_REALTIME;
1248 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1249
1250 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1251 a->cycle_timer = ct2.cycle_timer;
1252
1253 return 0;
1254}
1255
1256static void iso_resource_work(struct work_struct *work)
1257{
1258 struct iso_resource_event *e;
1259 struct iso_resource *r =
1260 container_of(work, struct iso_resource, work.work);
1261 struct client *client = r->client;
1262 int generation, channel, bandwidth, todo;
1263 bool skip, free, success;
1264
1265 spin_lock_irq(&client->lock);
1266 generation = client->device->generation;
1267 todo = r->todo;
1268 /* Allow 1000ms grace period for other reallocations. */
1269 if (todo == ISO_RES_ALLOC &&
1270 time_before64(get_jiffies_64(),
1271 client->device->card->reset_jiffies + HZ)) {
1272 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1273 skip = true;
1274 } else {
1275 /* We could be called twice within the same generation. */
1276 skip = todo == ISO_RES_REALLOC &&
1277 r->generation == generation;
1278 }
1279 free = todo == ISO_RES_DEALLOC ||
1280 todo == ISO_RES_ALLOC_ONCE ||
1281 todo == ISO_RES_DEALLOC_ONCE;
1282 r->generation = generation;
1283 spin_unlock_irq(&client->lock);
1284
1285 if (skip)
1286 goto out;
1287
1288 bandwidth = r->bandwidth;
1289
1290 fw_iso_resource_manage(client->device->card, generation,
1291 r->channels, &channel, &bandwidth,
1292 todo == ISO_RES_ALLOC ||
1293 todo == ISO_RES_REALLOC ||
1294 todo == ISO_RES_ALLOC_ONCE);
1295 /*
1296 * Is this generation outdated already? As long as this resource sticks
1297 * in the idr, it will be scheduled again for a newer generation or at
1298 * shutdown.
1299 */
1300 if (channel == -EAGAIN &&
1301 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1302 goto out;
1303
1304 success = channel >= 0 || bandwidth > 0;
1305
1306 spin_lock_irq(&client->lock);
1307 /*
1308 * Transit from allocation to reallocation, except if the client
1309 * requested deallocation in the meantime.
1310 */
1311 if (r->todo == ISO_RES_ALLOC)
1312 r->todo = ISO_RES_REALLOC;
1313 /*
1314 * Allocation or reallocation failure? Pull this resource out of the
1315 * idr and prepare for deletion, unless the client is shutting down.
1316 */
1317 if (r->todo == ISO_RES_REALLOC && !success &&
1318 !client->in_shutdown &&
1319 idr_remove(&client->resource_idr, r->resource.handle)) {
1320 client_put(client);
1321 free = true;
1322 }
1323 spin_unlock_irq(&client->lock);
1324
1325 if (todo == ISO_RES_ALLOC && channel >= 0)
1326 r->channels = 1ULL << channel;
1327
1328 if (todo == ISO_RES_REALLOC && success)
1329 goto out;
1330
1331 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1332 e = r->e_alloc;
1333 r->e_alloc = NULL;
1334 } else {
1335 e = r->e_dealloc;
1336 r->e_dealloc = NULL;
1337 }
1338 e->iso_resource.handle = r->resource.handle;
1339 e->iso_resource.channel = channel;
1340 e->iso_resource.bandwidth = bandwidth;
1341
1342 queue_event(client, &e->event,
1343 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1344
1345 if (free) {
1346 cancel_delayed_work(&r->work);
1347 kfree(r->e_alloc);
1348 kfree(r->e_dealloc);
1349 kfree(r);
1350 }
1351 out:
1352 client_put(client);
1353}
1354
1355static void release_iso_resource(struct client *client,
1356 struct client_resource *resource)
1357{
1358 struct iso_resource *r =
1359 container_of(resource, struct iso_resource, resource);
1360
1361 spin_lock_irq(&client->lock);
1362 r->todo = ISO_RES_DEALLOC;
1363 schedule_iso_resource(r, 0);
1364 spin_unlock_irq(&client->lock);
1365}
1366
1367static int init_iso_resource(struct client *client,
1368 struct fw_cdev_allocate_iso_resource *request, int todo)
1369{
1370 struct iso_resource_event *e1, *e2;
1371 struct iso_resource *r;
1372 int ret;
1373
1374 if ((request->channels == 0 && request->bandwidth == 0) ||
1375 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1376 return -EINVAL;
1377
1378 r = kmalloc(sizeof(*r), GFP_KERNEL);
1379 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1380 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1381 if (r == NULL || e1 == NULL || e2 == NULL) {
1382 ret = -ENOMEM;
1383 goto fail;
1384 }
1385
1386 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1387 r->client = client;
1388 r->todo = todo;
1389 r->generation = -1;
1390 r->channels = request->channels;
1391 r->bandwidth = request->bandwidth;
1392 r->e_alloc = e1;
1393 r->e_dealloc = e2;
1394
1395 e1->iso_resource.closure = request->closure;
1396 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1397 e2->iso_resource.closure = request->closure;
1398 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1399
1400 if (todo == ISO_RES_ALLOC) {
1401 r->resource.release = release_iso_resource;
1402 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1403 if (ret < 0)
1404 goto fail;
1405 } else {
1406 r->resource.release = NULL;
1407 r->resource.handle = -1;
1408 schedule_iso_resource(r, 0);
1409 }
1410 request->handle = r->resource.handle;
1411
1412 return 0;
1413 fail:
1414 kfree(r);
1415 kfree(e1);
1416 kfree(e2);
1417
1418 return ret;
1419}
1420
1421static int ioctl_allocate_iso_resource(struct client *client,
1422 union ioctl_arg *arg)
1423{
1424 return init_iso_resource(client,
1425 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1426}
1427
1428static int ioctl_deallocate_iso_resource(struct client *client,
1429 union ioctl_arg *arg)
1430{
1431 return release_client_resource(client,
1432 arg->deallocate.handle, release_iso_resource, NULL);
1433}
1434
1435static int ioctl_allocate_iso_resource_once(struct client *client,
1436 union ioctl_arg *arg)
1437{
1438 return init_iso_resource(client,
1439 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1440}
1441
1442static int ioctl_deallocate_iso_resource_once(struct client *client,
1443 union ioctl_arg *arg)
1444{
1445 return init_iso_resource(client,
1446 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1447}
1448
1449/*
1450 * Returns a speed code: Maximum speed to or from this device,
1451 * limited by the device's link speed, the local node's link speed,
1452 * and all PHY port speeds between the two links.
1453 */
1454static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1455{
1456 return client->device->max_speed;
1457}
1458
1459static int ioctl_send_broadcast_request(struct client *client,
1460 union ioctl_arg *arg)
1461{
1462 struct fw_cdev_send_request *a = &arg->send_request;
1463
1464 switch (a->tcode) {
1465 case TCODE_WRITE_QUADLET_REQUEST:
1466 case TCODE_WRITE_BLOCK_REQUEST:
1467 break;
1468 default:
1469 return -EINVAL;
1470 }
1471
1472 /* Security policy: Only allow accesses to Units Space. */
1473 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1474 return -EACCES;
1475
1476 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1477}
1478
1479static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1480{
1481 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1482 struct fw_cdev_send_request request;
1483 int dest;
1484
1485 if (a->speed > client->device->card->link_speed ||
1486 a->length > 1024 << a->speed)
1487 return -EIO;
1488
1489 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1490 return -EINVAL;
1491
1492 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1493 request.tcode = TCODE_STREAM_DATA;
1494 request.length = a->length;
1495 request.closure = a->closure;
1496 request.data = a->data;
1497 request.generation = a->generation;
1498
1499 return init_request(client, &request, dest, a->speed);
1500}
1501
1502static void outbound_phy_packet_callback(struct fw_packet *packet,
1503 struct fw_card *card, int status)
1504{
1505 struct outbound_phy_packet_event *e =
1506 container_of(packet, struct outbound_phy_packet_event, p);
1507 struct client *e_client;
1508
1509 switch (status) {
1510 /* expected: */
1511 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1512 /* should never happen with PHY packets: */
1513 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1514 case ACK_BUSY_X:
1515 case ACK_BUSY_A:
1516 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1517 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1518 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1519 /* stale generation; cancelled; on certain controllers: no ack */
1520 default: e->phy_packet.rcode = status; break;
1521 }
1522 e->phy_packet.data[0] = packet->timestamp;
1523
1524 e_client = e->client;
1525 queue_event(e->client, &e->event, &e->phy_packet,
1526 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1527 client_put(e_client);
1528}
1529
1530static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1531{
1532 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1533 struct fw_card *card = client->device->card;
1534 struct outbound_phy_packet_event *e;
1535
1536 /* Access policy: Allow this ioctl only on local nodes' device files. */
1537 if (!client->device->is_local)
1538 return -ENOSYS;
1539
1540 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1541 if (e == NULL)
1542 return -ENOMEM;
1543
1544 client_get(client);
1545 e->client = client;
1546 e->p.speed = SCODE_100;
1547 e->p.generation = a->generation;
1548 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1549 e->p.header[1] = a->data[0];
1550 e->p.header[2] = a->data[1];
1551 e->p.header_length = 12;
1552 e->p.callback = outbound_phy_packet_callback;
1553 e->phy_packet.closure = a->closure;
1554 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1555 if (is_ping_packet(a->data))
1556 e->phy_packet.length = 4;
1557
1558 card->driver->send_request(card, &e->p);
1559
1560 return 0;
1561}
1562
1563static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1564{
1565 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1566 struct fw_card *card = client->device->card;
1567
1568 /* Access policy: Allow this ioctl only on local nodes' device files. */
1569 if (!client->device->is_local)
1570 return -ENOSYS;
1571
1572 spin_lock_irq(&card->lock);
1573
1574 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1575 client->phy_receiver_closure = a->closure;
1576
1577 spin_unlock_irq(&card->lock);
1578
1579 return 0;
1580}
1581
1582void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1583{
1584 struct client *client;
1585 struct inbound_phy_packet_event *e;
1586 unsigned long flags;
1587
1588 spin_lock_irqsave(&card->lock, flags);
1589
1590 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1591 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1592 if (e == NULL)
1593 break;
1594
1595 e->phy_packet.closure = client->phy_receiver_closure;
1596 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1597 e->phy_packet.rcode = RCODE_COMPLETE;
1598 e->phy_packet.length = 8;
1599 e->phy_packet.data[0] = p->header[1];
1600 e->phy_packet.data[1] = p->header[2];
1601 queue_event(client, &e->event,
1602 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1603 }
1604
1605 spin_unlock_irqrestore(&card->lock, flags);
1606}
1607
1608static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1609 [0x00] = ioctl_get_info,
1610 [0x01] = ioctl_send_request,
1611 [0x02] = ioctl_allocate,
1612 [0x03] = ioctl_deallocate,
1613 [0x04] = ioctl_send_response,
1614 [0x05] = ioctl_initiate_bus_reset,
1615 [0x06] = ioctl_add_descriptor,
1616 [0x07] = ioctl_remove_descriptor,
1617 [0x08] = ioctl_create_iso_context,
1618 [0x09] = ioctl_queue_iso,
1619 [0x0a] = ioctl_start_iso,
1620 [0x0b] = ioctl_stop_iso,
1621 [0x0c] = ioctl_get_cycle_timer,
1622 [0x0d] = ioctl_allocate_iso_resource,
1623 [0x0e] = ioctl_deallocate_iso_resource,
1624 [0x0f] = ioctl_allocate_iso_resource_once,
1625 [0x10] = ioctl_deallocate_iso_resource_once,
1626 [0x11] = ioctl_get_speed,
1627 [0x12] = ioctl_send_broadcast_request,
1628 [0x13] = ioctl_send_stream_packet,
1629 [0x14] = ioctl_get_cycle_timer2,
1630 [0x15] = ioctl_send_phy_packet,
1631 [0x16] = ioctl_receive_phy_packets,
1632 [0x17] = ioctl_set_iso_channels,
1633 [0x18] = ioctl_flush_iso,
1634};
1635
1636static int dispatch_ioctl(struct client *client,
1637 unsigned int cmd, void __user *arg)
1638{
1639 union ioctl_arg buffer;
1640 int ret;
1641
1642 if (fw_device_is_shutdown(client->device))
1643 return -ENODEV;
1644
1645 if (_IOC_TYPE(cmd) != '#' ||
1646 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1647 _IOC_SIZE(cmd) > sizeof(buffer))
1648 return -ENOTTY;
1649
1650 memset(&buffer, 0, sizeof(buffer));
1651
1652 if (_IOC_DIR(cmd) & _IOC_WRITE)
1653 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1654 return -EFAULT;
1655
1656 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1657 if (ret < 0)
1658 return ret;
1659
1660 if (_IOC_DIR(cmd) & _IOC_READ)
1661 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1662 return -EFAULT;
1663
1664 return ret;
1665}
1666
1667static long fw_device_op_ioctl(struct file *file,
1668 unsigned int cmd, unsigned long arg)
1669{
1670 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1671}
1672
1673static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1674{
1675 struct client *client = file->private_data;
1676 unsigned long size;
1677 int page_count, ret;
1678
1679 if (fw_device_is_shutdown(client->device))
1680 return -ENODEV;
1681
1682 /* FIXME: We could support multiple buffers, but we don't. */
1683 if (client->buffer.pages != NULL)
1684 return -EBUSY;
1685
1686 if (!(vma->vm_flags & VM_SHARED))
1687 return -EINVAL;
1688
1689 if (vma->vm_start & ~PAGE_MASK)
1690 return -EINVAL;
1691
1692 client->vm_start = vma->vm_start;
1693 size = vma->vm_end - vma->vm_start;
1694 page_count = size >> PAGE_SHIFT;
1695 if (size & ~PAGE_MASK)
1696 return -EINVAL;
1697
1698 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1699 if (ret < 0)
1700 return ret;
1701
1702 spin_lock_irq(&client->lock);
1703 if (client->iso_context) {
1704 ret = fw_iso_buffer_map_dma(&client->buffer,
1705 client->device->card,
1706 iso_dma_direction(client->iso_context));
1707 client->buffer_is_mapped = (ret == 0);
1708 }
1709 spin_unlock_irq(&client->lock);
1710 if (ret < 0)
1711 goto fail;
1712
1713 ret = vm_map_pages_zero(vma, client->buffer.pages,
1714 client->buffer.page_count);
1715 if (ret < 0)
1716 goto fail;
1717
1718 return 0;
1719 fail:
1720 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1721 return ret;
1722}
1723
1724static int is_outbound_transaction_resource(int id, void *p, void *data)
1725{
1726 struct client_resource *resource = p;
1727
1728 return resource->release == release_transaction;
1729}
1730
1731static int has_outbound_transactions(struct client *client)
1732{
1733 int ret;
1734
1735 spin_lock_irq(&client->lock);
1736 ret = idr_for_each(&client->resource_idr,
1737 is_outbound_transaction_resource, NULL);
1738 spin_unlock_irq(&client->lock);
1739
1740 return ret;
1741}
1742
1743static int shutdown_resource(int id, void *p, void *data)
1744{
1745 struct client_resource *resource = p;
1746 struct client *client = data;
1747
1748 resource->release(client, resource);
1749 client_put(client);
1750
1751 return 0;
1752}
1753
1754static int fw_device_op_release(struct inode *inode, struct file *file)
1755{
1756 struct client *client = file->private_data;
1757 struct event *event, *next_event;
1758
1759 spin_lock_irq(&client->device->card->lock);
1760 list_del(&client->phy_receiver_link);
1761 spin_unlock_irq(&client->device->card->lock);
1762
1763 mutex_lock(&client->device->client_list_mutex);
1764 list_del(&client->link);
1765 mutex_unlock(&client->device->client_list_mutex);
1766
1767 if (client->iso_context)
1768 fw_iso_context_destroy(client->iso_context);
1769
1770 if (client->buffer.pages)
1771 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1772
1773 /* Freeze client->resource_idr and client->event_list */
1774 spin_lock_irq(&client->lock);
1775 client->in_shutdown = true;
1776 spin_unlock_irq(&client->lock);
1777
1778 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1779
1780 idr_for_each(&client->resource_idr, shutdown_resource, client);
1781 idr_destroy(&client->resource_idr);
1782
1783 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1784 kfree(event);
1785
1786 client_put(client);
1787
1788 return 0;
1789}
1790
1791static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1792{
1793 struct client *client = file->private_data;
1794 __poll_t mask = 0;
1795
1796 poll_wait(file, &client->wait, pt);
1797
1798 if (fw_device_is_shutdown(client->device))
1799 mask |= EPOLLHUP | EPOLLERR;
1800 if (!list_empty(&client->event_list))
1801 mask |= EPOLLIN | EPOLLRDNORM;
1802
1803 return mask;
1804}
1805
1806const struct file_operations fw_device_ops = {
1807 .owner = THIS_MODULE,
1808 .llseek = no_llseek,
1809 .open = fw_device_op_open,
1810 .read = fw_device_op_read,
1811 .unlocked_ioctl = fw_device_op_ioctl,
1812 .mmap = fw_device_op_mmap,
1813 .release = fw_device_op_release,
1814 .poll = fw_device_op_poll,
1815 .compat_ioctl = compat_ptr_ioctl,
1816};
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Char device for device raw access
4 *
5 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bug.h>
9#include <linux/compat.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/errno.h>
14#include <linux/firewire.h>
15#include <linux/firewire-cdev.h>
16#include <linux/idr.h>
17#include <linux/irqflags.h>
18#include <linux/jiffies.h>
19#include <linux/kernel.h>
20#include <linux/kref.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/poll.h>
25#include <linux/sched.h> /* required for linux/wait.h */
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29#include <linux/time.h>
30#include <linux/uaccess.h>
31#include <linux/vmalloc.h>
32#include <linux/wait.h>
33#include <linux/workqueue.h>
34
35
36#include "core.h"
37
38/*
39 * ABI version history is documented in linux/firewire-cdev.h.
40 */
41#define FW_CDEV_KERNEL_VERSION 5
42#define FW_CDEV_VERSION_EVENT_REQUEST2 4
43#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
44#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
45
46struct client {
47 u32 version;
48 struct fw_device *device;
49
50 spinlock_t lock;
51 bool in_shutdown;
52 struct idr resource_idr;
53 struct list_head event_list;
54 wait_queue_head_t wait;
55 wait_queue_head_t tx_flush_wait;
56 u64 bus_reset_closure;
57
58 struct fw_iso_context *iso_context;
59 u64 iso_closure;
60 struct fw_iso_buffer buffer;
61 unsigned long vm_start;
62 bool buffer_is_mapped;
63
64 struct list_head phy_receiver_link;
65 u64 phy_receiver_closure;
66
67 struct list_head link;
68 struct kref kref;
69};
70
71static inline void client_get(struct client *client)
72{
73 kref_get(&client->kref);
74}
75
76static void client_release(struct kref *kref)
77{
78 struct client *client = container_of(kref, struct client, kref);
79
80 fw_device_put(client->device);
81 kfree(client);
82}
83
84static void client_put(struct client *client)
85{
86 kref_put(&client->kref, client_release);
87}
88
89struct client_resource;
90typedef void (*client_resource_release_fn_t)(struct client *,
91 struct client_resource *);
92struct client_resource {
93 client_resource_release_fn_t release;
94 int handle;
95};
96
97struct address_handler_resource {
98 struct client_resource resource;
99 struct fw_address_handler handler;
100 __u64 closure;
101 struct client *client;
102};
103
104struct outbound_transaction_resource {
105 struct client_resource resource;
106 struct fw_transaction transaction;
107};
108
109struct inbound_transaction_resource {
110 struct client_resource resource;
111 struct fw_card *card;
112 struct fw_request *request;
113 void *data;
114 size_t length;
115};
116
117struct descriptor_resource {
118 struct client_resource resource;
119 struct fw_descriptor descriptor;
120 u32 data[0];
121};
122
123struct iso_resource {
124 struct client_resource resource;
125 struct client *client;
126 /* Schedule work and access todo only with client->lock held. */
127 struct delayed_work work;
128 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
129 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
130 int generation;
131 u64 channels;
132 s32 bandwidth;
133 struct iso_resource_event *e_alloc, *e_dealloc;
134};
135
136static void release_iso_resource(struct client *, struct client_resource *);
137
138static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
139{
140 client_get(r->client);
141 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
142 client_put(r->client);
143}
144
145static void schedule_if_iso_resource(struct client_resource *resource)
146{
147 if (resource->release == release_iso_resource)
148 schedule_iso_resource(container_of(resource,
149 struct iso_resource, resource), 0);
150}
151
152/*
153 * dequeue_event() just kfree()'s the event, so the event has to be
154 * the first field in a struct XYZ_event.
155 */
156struct event {
157 struct { void *data; size_t size; } v[2];
158 struct list_head link;
159};
160
161struct bus_reset_event {
162 struct event event;
163 struct fw_cdev_event_bus_reset reset;
164};
165
166struct outbound_transaction_event {
167 struct event event;
168 struct client *client;
169 struct outbound_transaction_resource r;
170 struct fw_cdev_event_response response;
171};
172
173struct inbound_transaction_event {
174 struct event event;
175 union {
176 struct fw_cdev_event_request request;
177 struct fw_cdev_event_request2 request2;
178 } req;
179};
180
181struct iso_interrupt_event {
182 struct event event;
183 struct fw_cdev_event_iso_interrupt interrupt;
184};
185
186struct iso_interrupt_mc_event {
187 struct event event;
188 struct fw_cdev_event_iso_interrupt_mc interrupt;
189};
190
191struct iso_resource_event {
192 struct event event;
193 struct fw_cdev_event_iso_resource iso_resource;
194};
195
196struct outbound_phy_packet_event {
197 struct event event;
198 struct client *client;
199 struct fw_packet p;
200 struct fw_cdev_event_phy_packet phy_packet;
201};
202
203struct inbound_phy_packet_event {
204 struct event event;
205 struct fw_cdev_event_phy_packet phy_packet;
206};
207
208#ifdef CONFIG_COMPAT
209static void __user *u64_to_uptr(u64 value)
210{
211 if (in_compat_syscall())
212 return compat_ptr(value);
213 else
214 return (void __user *)(unsigned long)value;
215}
216
217static u64 uptr_to_u64(void __user *ptr)
218{
219 if (in_compat_syscall())
220 return ptr_to_compat(ptr);
221 else
222 return (u64)(unsigned long)ptr;
223}
224#else
225static inline void __user *u64_to_uptr(u64 value)
226{
227 return (void __user *)(unsigned long)value;
228}
229
230static inline u64 uptr_to_u64(void __user *ptr)
231{
232 return (u64)(unsigned long)ptr;
233}
234#endif /* CONFIG_COMPAT */
235
236static int fw_device_op_open(struct inode *inode, struct file *file)
237{
238 struct fw_device *device;
239 struct client *client;
240
241 device = fw_device_get_by_devt(inode->i_rdev);
242 if (device == NULL)
243 return -ENODEV;
244
245 if (fw_device_is_shutdown(device)) {
246 fw_device_put(device);
247 return -ENODEV;
248 }
249
250 client = kzalloc(sizeof(*client), GFP_KERNEL);
251 if (client == NULL) {
252 fw_device_put(device);
253 return -ENOMEM;
254 }
255
256 client->device = device;
257 spin_lock_init(&client->lock);
258 idr_init(&client->resource_idr);
259 INIT_LIST_HEAD(&client->event_list);
260 init_waitqueue_head(&client->wait);
261 init_waitqueue_head(&client->tx_flush_wait);
262 INIT_LIST_HEAD(&client->phy_receiver_link);
263 INIT_LIST_HEAD(&client->link);
264 kref_init(&client->kref);
265
266 file->private_data = client;
267
268 return nonseekable_open(inode, file);
269}
270
271static void queue_event(struct client *client, struct event *event,
272 void *data0, size_t size0, void *data1, size_t size1)
273{
274 unsigned long flags;
275
276 event->v[0].data = data0;
277 event->v[0].size = size0;
278 event->v[1].data = data1;
279 event->v[1].size = size1;
280
281 spin_lock_irqsave(&client->lock, flags);
282 if (client->in_shutdown)
283 kfree(event);
284 else
285 list_add_tail(&event->link, &client->event_list);
286 spin_unlock_irqrestore(&client->lock, flags);
287
288 wake_up_interruptible(&client->wait);
289}
290
291static int dequeue_event(struct client *client,
292 char __user *buffer, size_t count)
293{
294 struct event *event;
295 size_t size, total;
296 int i, ret;
297
298 ret = wait_event_interruptible(client->wait,
299 !list_empty(&client->event_list) ||
300 fw_device_is_shutdown(client->device));
301 if (ret < 0)
302 return ret;
303
304 if (list_empty(&client->event_list) &&
305 fw_device_is_shutdown(client->device))
306 return -ENODEV;
307
308 spin_lock_irq(&client->lock);
309 event = list_first_entry(&client->event_list, struct event, link);
310 list_del(&event->link);
311 spin_unlock_irq(&client->lock);
312
313 total = 0;
314 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
315 size = min(event->v[i].size, count - total);
316 if (copy_to_user(buffer + total, event->v[i].data, size)) {
317 ret = -EFAULT;
318 goto out;
319 }
320 total += size;
321 }
322 ret = total;
323
324 out:
325 kfree(event);
326
327 return ret;
328}
329
330static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
331 size_t count, loff_t *offset)
332{
333 struct client *client = file->private_data;
334
335 return dequeue_event(client, buffer, count);
336}
337
338static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
339 struct client *client)
340{
341 struct fw_card *card = client->device->card;
342
343 spin_lock_irq(&card->lock);
344
345 event->closure = client->bus_reset_closure;
346 event->type = FW_CDEV_EVENT_BUS_RESET;
347 event->generation = client->device->generation;
348 event->node_id = client->device->node_id;
349 event->local_node_id = card->local_node->node_id;
350 event->bm_node_id = card->bm_node_id;
351 event->irm_node_id = card->irm_node->node_id;
352 event->root_node_id = card->root_node->node_id;
353
354 spin_unlock_irq(&card->lock);
355}
356
357static void for_each_client(struct fw_device *device,
358 void (*callback)(struct client *client))
359{
360 struct client *c;
361
362 mutex_lock(&device->client_list_mutex);
363 list_for_each_entry(c, &device->client_list, link)
364 callback(c);
365 mutex_unlock(&device->client_list_mutex);
366}
367
368static int schedule_reallocations(int id, void *p, void *data)
369{
370 schedule_if_iso_resource(p);
371
372 return 0;
373}
374
375static void queue_bus_reset_event(struct client *client)
376{
377 struct bus_reset_event *e;
378
379 e = kzalloc(sizeof(*e), GFP_KERNEL);
380 if (e == NULL)
381 return;
382
383 fill_bus_reset_event(&e->reset, client);
384
385 queue_event(client, &e->event,
386 &e->reset, sizeof(e->reset), NULL, 0);
387
388 spin_lock_irq(&client->lock);
389 idr_for_each(&client->resource_idr, schedule_reallocations, client);
390 spin_unlock_irq(&client->lock);
391}
392
393void fw_device_cdev_update(struct fw_device *device)
394{
395 for_each_client(device, queue_bus_reset_event);
396}
397
398static void wake_up_client(struct client *client)
399{
400 wake_up_interruptible(&client->wait);
401}
402
403void fw_device_cdev_remove(struct fw_device *device)
404{
405 for_each_client(device, wake_up_client);
406}
407
408union ioctl_arg {
409 struct fw_cdev_get_info get_info;
410 struct fw_cdev_send_request send_request;
411 struct fw_cdev_allocate allocate;
412 struct fw_cdev_deallocate deallocate;
413 struct fw_cdev_send_response send_response;
414 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
415 struct fw_cdev_add_descriptor add_descriptor;
416 struct fw_cdev_remove_descriptor remove_descriptor;
417 struct fw_cdev_create_iso_context create_iso_context;
418 struct fw_cdev_queue_iso queue_iso;
419 struct fw_cdev_start_iso start_iso;
420 struct fw_cdev_stop_iso stop_iso;
421 struct fw_cdev_get_cycle_timer get_cycle_timer;
422 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
423 struct fw_cdev_send_stream_packet send_stream_packet;
424 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
425 struct fw_cdev_send_phy_packet send_phy_packet;
426 struct fw_cdev_receive_phy_packets receive_phy_packets;
427 struct fw_cdev_set_iso_channels set_iso_channels;
428 struct fw_cdev_flush_iso flush_iso;
429};
430
431static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
432{
433 struct fw_cdev_get_info *a = &arg->get_info;
434 struct fw_cdev_event_bus_reset bus_reset;
435 unsigned long ret = 0;
436
437 client->version = a->version;
438 a->version = FW_CDEV_KERNEL_VERSION;
439 a->card = client->device->card->index;
440
441 down_read(&fw_device_rwsem);
442
443 if (a->rom != 0) {
444 size_t want = a->rom_length;
445 size_t have = client->device->config_rom_length * 4;
446
447 ret = copy_to_user(u64_to_uptr(a->rom),
448 client->device->config_rom, min(want, have));
449 }
450 a->rom_length = client->device->config_rom_length * 4;
451
452 up_read(&fw_device_rwsem);
453
454 if (ret != 0)
455 return -EFAULT;
456
457 mutex_lock(&client->device->client_list_mutex);
458
459 client->bus_reset_closure = a->bus_reset_closure;
460 if (a->bus_reset != 0) {
461 fill_bus_reset_event(&bus_reset, client);
462 /* unaligned size of bus_reset is 36 bytes */
463 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
464 }
465 if (ret == 0 && list_empty(&client->link))
466 list_add_tail(&client->link, &client->device->client_list);
467
468 mutex_unlock(&client->device->client_list_mutex);
469
470 return ret ? -EFAULT : 0;
471}
472
473static int add_client_resource(struct client *client,
474 struct client_resource *resource, gfp_t gfp_mask)
475{
476 bool preload = gfpflags_allow_blocking(gfp_mask);
477 unsigned long flags;
478 int ret;
479
480 if (preload)
481 idr_preload(gfp_mask);
482 spin_lock_irqsave(&client->lock, flags);
483
484 if (client->in_shutdown)
485 ret = -ECANCELED;
486 else
487 ret = idr_alloc(&client->resource_idr, resource, 0, 0,
488 GFP_NOWAIT);
489 if (ret >= 0) {
490 resource->handle = ret;
491 client_get(client);
492 schedule_if_iso_resource(resource);
493 }
494
495 spin_unlock_irqrestore(&client->lock, flags);
496 if (preload)
497 idr_preload_end();
498
499 return ret < 0 ? ret : 0;
500}
501
502static int release_client_resource(struct client *client, u32 handle,
503 client_resource_release_fn_t release,
504 struct client_resource **return_resource)
505{
506 struct client_resource *resource;
507
508 spin_lock_irq(&client->lock);
509 if (client->in_shutdown)
510 resource = NULL;
511 else
512 resource = idr_find(&client->resource_idr, handle);
513 if (resource && resource->release == release)
514 idr_remove(&client->resource_idr, handle);
515 spin_unlock_irq(&client->lock);
516
517 if (!(resource && resource->release == release))
518 return -EINVAL;
519
520 if (return_resource)
521 *return_resource = resource;
522 else
523 resource->release(client, resource);
524
525 client_put(client);
526
527 return 0;
528}
529
530static void release_transaction(struct client *client,
531 struct client_resource *resource)
532{
533}
534
535static void complete_transaction(struct fw_card *card, int rcode,
536 void *payload, size_t length, void *data)
537{
538 struct outbound_transaction_event *e = data;
539 struct fw_cdev_event_response *rsp = &e->response;
540 struct client *client = e->client;
541 unsigned long flags;
542
543 if (length < rsp->length)
544 rsp->length = length;
545 if (rcode == RCODE_COMPLETE)
546 memcpy(rsp->data, payload, rsp->length);
547
548 spin_lock_irqsave(&client->lock, flags);
549 idr_remove(&client->resource_idr, e->r.resource.handle);
550 if (client->in_shutdown)
551 wake_up(&client->tx_flush_wait);
552 spin_unlock_irqrestore(&client->lock, flags);
553
554 rsp->type = FW_CDEV_EVENT_RESPONSE;
555 rsp->rcode = rcode;
556
557 /*
558 * In the case that sizeof(*rsp) doesn't align with the position of the
559 * data, and the read is short, preserve an extra copy of the data
560 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
561 * for short reads and some apps depended on it, this is both safe
562 * and prudent for compatibility.
563 */
564 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
565 queue_event(client, &e->event, rsp, sizeof(*rsp),
566 rsp->data, rsp->length);
567 else
568 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
569 NULL, 0);
570
571 /* Drop the idr's reference */
572 client_put(client);
573}
574
575static int init_request(struct client *client,
576 struct fw_cdev_send_request *request,
577 int destination_id, int speed)
578{
579 struct outbound_transaction_event *e;
580 int ret;
581
582 if (request->tcode != TCODE_STREAM_DATA &&
583 (request->length > 4096 || request->length > 512 << speed))
584 return -EIO;
585
586 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
587 request->length < 4)
588 return -EINVAL;
589
590 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
591 if (e == NULL)
592 return -ENOMEM;
593
594 e->client = client;
595 e->response.length = request->length;
596 e->response.closure = request->closure;
597
598 if (request->data &&
599 copy_from_user(e->response.data,
600 u64_to_uptr(request->data), request->length)) {
601 ret = -EFAULT;
602 goto failed;
603 }
604
605 e->r.resource.release = release_transaction;
606 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
607 if (ret < 0)
608 goto failed;
609
610 fw_send_request(client->device->card, &e->r.transaction,
611 request->tcode, destination_id, request->generation,
612 speed, request->offset, e->response.data,
613 request->length, complete_transaction, e);
614 return 0;
615
616 failed:
617 kfree(e);
618
619 return ret;
620}
621
622static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
623{
624 switch (arg->send_request.tcode) {
625 case TCODE_WRITE_QUADLET_REQUEST:
626 case TCODE_WRITE_BLOCK_REQUEST:
627 case TCODE_READ_QUADLET_REQUEST:
628 case TCODE_READ_BLOCK_REQUEST:
629 case TCODE_LOCK_MASK_SWAP:
630 case TCODE_LOCK_COMPARE_SWAP:
631 case TCODE_LOCK_FETCH_ADD:
632 case TCODE_LOCK_LITTLE_ADD:
633 case TCODE_LOCK_BOUNDED_ADD:
634 case TCODE_LOCK_WRAP_ADD:
635 case TCODE_LOCK_VENDOR_DEPENDENT:
636 break;
637 default:
638 return -EINVAL;
639 }
640
641 return init_request(client, &arg->send_request, client->device->node_id,
642 client->device->max_speed);
643}
644
645static inline bool is_fcp_request(struct fw_request *request)
646{
647 return request == NULL;
648}
649
650static void release_request(struct client *client,
651 struct client_resource *resource)
652{
653 struct inbound_transaction_resource *r = container_of(resource,
654 struct inbound_transaction_resource, resource);
655
656 if (is_fcp_request(r->request))
657 kfree(r->data);
658 else
659 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
660
661 fw_card_put(r->card);
662 kfree(r);
663}
664
665static void handle_request(struct fw_card *card, struct fw_request *request,
666 int tcode, int destination, int source,
667 int generation, unsigned long long offset,
668 void *payload, size_t length, void *callback_data)
669{
670 struct address_handler_resource *handler = callback_data;
671 struct inbound_transaction_resource *r;
672 struct inbound_transaction_event *e;
673 size_t event_size0;
674 void *fcp_frame = NULL;
675 int ret;
676
677 /* card may be different from handler->client->device->card */
678 fw_card_get(card);
679
680 r = kmalloc(sizeof(*r), GFP_ATOMIC);
681 e = kmalloc(sizeof(*e), GFP_ATOMIC);
682 if (r == NULL || e == NULL)
683 goto failed;
684
685 r->card = card;
686 r->request = request;
687 r->data = payload;
688 r->length = length;
689
690 if (is_fcp_request(request)) {
691 /*
692 * FIXME: Let core-transaction.c manage a
693 * single reference-counted copy?
694 */
695 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
696 if (fcp_frame == NULL)
697 goto failed;
698
699 r->data = fcp_frame;
700 }
701
702 r->resource.release = release_request;
703 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
704 if (ret < 0)
705 goto failed;
706
707 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
708 struct fw_cdev_event_request *req = &e->req.request;
709
710 if (tcode & 0x10)
711 tcode = TCODE_LOCK_REQUEST;
712
713 req->type = FW_CDEV_EVENT_REQUEST;
714 req->tcode = tcode;
715 req->offset = offset;
716 req->length = length;
717 req->handle = r->resource.handle;
718 req->closure = handler->closure;
719 event_size0 = sizeof(*req);
720 } else {
721 struct fw_cdev_event_request2 *req = &e->req.request2;
722
723 req->type = FW_CDEV_EVENT_REQUEST2;
724 req->tcode = tcode;
725 req->offset = offset;
726 req->source_node_id = source;
727 req->destination_node_id = destination;
728 req->card = card->index;
729 req->generation = generation;
730 req->length = length;
731 req->handle = r->resource.handle;
732 req->closure = handler->closure;
733 event_size0 = sizeof(*req);
734 }
735
736 queue_event(handler->client, &e->event,
737 &e->req, event_size0, r->data, length);
738 return;
739
740 failed:
741 kfree(r);
742 kfree(e);
743 kfree(fcp_frame);
744
745 if (!is_fcp_request(request))
746 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
747
748 fw_card_put(card);
749}
750
751static void release_address_handler(struct client *client,
752 struct client_resource *resource)
753{
754 struct address_handler_resource *r =
755 container_of(resource, struct address_handler_resource, resource);
756
757 fw_core_remove_address_handler(&r->handler);
758 kfree(r);
759}
760
761static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
762{
763 struct fw_cdev_allocate *a = &arg->allocate;
764 struct address_handler_resource *r;
765 struct fw_address_region region;
766 int ret;
767
768 r = kmalloc(sizeof(*r), GFP_KERNEL);
769 if (r == NULL)
770 return -ENOMEM;
771
772 region.start = a->offset;
773 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
774 region.end = a->offset + a->length;
775 else
776 region.end = a->region_end;
777
778 r->handler.length = a->length;
779 r->handler.address_callback = handle_request;
780 r->handler.callback_data = r;
781 r->closure = a->closure;
782 r->client = client;
783
784 ret = fw_core_add_address_handler(&r->handler, ®ion);
785 if (ret < 0) {
786 kfree(r);
787 return ret;
788 }
789 a->offset = r->handler.offset;
790
791 r->resource.release = release_address_handler;
792 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
793 if (ret < 0) {
794 release_address_handler(client, &r->resource);
795 return ret;
796 }
797 a->handle = r->resource.handle;
798
799 return 0;
800}
801
802static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
803{
804 return release_client_resource(client, arg->deallocate.handle,
805 release_address_handler, NULL);
806}
807
808static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
809{
810 struct fw_cdev_send_response *a = &arg->send_response;
811 struct client_resource *resource;
812 struct inbound_transaction_resource *r;
813 int ret = 0;
814
815 if (release_client_resource(client, a->handle,
816 release_request, &resource) < 0)
817 return -EINVAL;
818
819 r = container_of(resource, struct inbound_transaction_resource,
820 resource);
821 if (is_fcp_request(r->request))
822 goto out;
823
824 if (a->length != fw_get_response_length(r->request)) {
825 ret = -EINVAL;
826 kfree(r->request);
827 goto out;
828 }
829 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
830 ret = -EFAULT;
831 kfree(r->request);
832 goto out;
833 }
834 fw_send_response(r->card, r->request, a->rcode);
835 out:
836 fw_card_put(r->card);
837 kfree(r);
838
839 return ret;
840}
841
842static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
843{
844 fw_schedule_bus_reset(client->device->card, true,
845 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
846 return 0;
847}
848
849static void release_descriptor(struct client *client,
850 struct client_resource *resource)
851{
852 struct descriptor_resource *r =
853 container_of(resource, struct descriptor_resource, resource);
854
855 fw_core_remove_descriptor(&r->descriptor);
856 kfree(r);
857}
858
859static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
860{
861 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
862 struct descriptor_resource *r;
863 int ret;
864
865 /* Access policy: Allow this ioctl only on local nodes' device files. */
866 if (!client->device->is_local)
867 return -ENOSYS;
868
869 if (a->length > 256)
870 return -EINVAL;
871
872 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
873 if (r == NULL)
874 return -ENOMEM;
875
876 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
877 ret = -EFAULT;
878 goto failed;
879 }
880
881 r->descriptor.length = a->length;
882 r->descriptor.immediate = a->immediate;
883 r->descriptor.key = a->key;
884 r->descriptor.data = r->data;
885
886 ret = fw_core_add_descriptor(&r->descriptor);
887 if (ret < 0)
888 goto failed;
889
890 r->resource.release = release_descriptor;
891 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
892 if (ret < 0) {
893 fw_core_remove_descriptor(&r->descriptor);
894 goto failed;
895 }
896 a->handle = r->resource.handle;
897
898 return 0;
899 failed:
900 kfree(r);
901
902 return ret;
903}
904
905static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
906{
907 return release_client_resource(client, arg->remove_descriptor.handle,
908 release_descriptor, NULL);
909}
910
911static void iso_callback(struct fw_iso_context *context, u32 cycle,
912 size_t header_length, void *header, void *data)
913{
914 struct client *client = data;
915 struct iso_interrupt_event *e;
916
917 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
918 if (e == NULL)
919 return;
920
921 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
922 e->interrupt.closure = client->iso_closure;
923 e->interrupt.cycle = cycle;
924 e->interrupt.header_length = header_length;
925 memcpy(e->interrupt.header, header, header_length);
926 queue_event(client, &e->event, &e->interrupt,
927 sizeof(e->interrupt) + header_length, NULL, 0);
928}
929
930static void iso_mc_callback(struct fw_iso_context *context,
931 dma_addr_t completed, void *data)
932{
933 struct client *client = data;
934 struct iso_interrupt_mc_event *e;
935
936 e = kmalloc(sizeof(*e), GFP_ATOMIC);
937 if (e == NULL)
938 return;
939
940 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
941 e->interrupt.closure = client->iso_closure;
942 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
943 completed);
944 queue_event(client, &e->event, &e->interrupt,
945 sizeof(e->interrupt), NULL, 0);
946}
947
948static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
949{
950 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
951 return DMA_TO_DEVICE;
952 else
953 return DMA_FROM_DEVICE;
954}
955
956static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
957{
958 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
959 struct fw_iso_context *context;
960 fw_iso_callback_t cb;
961 int ret;
962
963 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
964 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
965 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
966 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
967
968 switch (a->type) {
969 case FW_ISO_CONTEXT_TRANSMIT:
970 if (a->speed > SCODE_3200 || a->channel > 63)
971 return -EINVAL;
972
973 cb = iso_callback;
974 break;
975
976 case FW_ISO_CONTEXT_RECEIVE:
977 if (a->header_size < 4 || (a->header_size & 3) ||
978 a->channel > 63)
979 return -EINVAL;
980
981 cb = iso_callback;
982 break;
983
984 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
985 cb = (fw_iso_callback_t)iso_mc_callback;
986 break;
987
988 default:
989 return -EINVAL;
990 }
991
992 context = fw_iso_context_create(client->device->card, a->type,
993 a->channel, a->speed, a->header_size, cb, client);
994 if (IS_ERR(context))
995 return PTR_ERR(context);
996 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
997 context->drop_overflow_headers = true;
998
999 /* We only support one context at this time. */
1000 spin_lock_irq(&client->lock);
1001 if (client->iso_context != NULL) {
1002 spin_unlock_irq(&client->lock);
1003 fw_iso_context_destroy(context);
1004
1005 return -EBUSY;
1006 }
1007 if (!client->buffer_is_mapped) {
1008 ret = fw_iso_buffer_map_dma(&client->buffer,
1009 client->device->card,
1010 iso_dma_direction(context));
1011 if (ret < 0) {
1012 spin_unlock_irq(&client->lock);
1013 fw_iso_context_destroy(context);
1014
1015 return ret;
1016 }
1017 client->buffer_is_mapped = true;
1018 }
1019 client->iso_closure = a->closure;
1020 client->iso_context = context;
1021 spin_unlock_irq(&client->lock);
1022
1023 a->handle = 0;
1024
1025 return 0;
1026}
1027
1028static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1029{
1030 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1031 struct fw_iso_context *ctx = client->iso_context;
1032
1033 if (ctx == NULL || a->handle != 0)
1034 return -EINVAL;
1035
1036 return fw_iso_context_set_channels(ctx, &a->channels);
1037}
1038
1039/* Macros for decoding the iso packet control header. */
1040#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1041#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1042#define GET_SKIP(v) (((v) >> 17) & 0x01)
1043#define GET_TAG(v) (((v) >> 18) & 0x03)
1044#define GET_SY(v) (((v) >> 20) & 0x0f)
1045#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1046
1047static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1048{
1049 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1050 struct fw_cdev_iso_packet __user *p, *end, *next;
1051 struct fw_iso_context *ctx = client->iso_context;
1052 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1053 u32 control;
1054 int count;
1055 struct {
1056 struct fw_iso_packet packet;
1057 u8 header[256];
1058 } u;
1059
1060 if (ctx == NULL || a->handle != 0)
1061 return -EINVAL;
1062
1063 /*
1064 * If the user passes a non-NULL data pointer, has mmap()'ed
1065 * the iso buffer, and the pointer points inside the buffer,
1066 * we setup the payload pointers accordingly. Otherwise we
1067 * set them both to 0, which will still let packets with
1068 * payload_length == 0 through. In other words, if no packets
1069 * use the indirect payload, the iso buffer need not be mapped
1070 * and the a->data pointer is ignored.
1071 */
1072 payload = (unsigned long)a->data - client->vm_start;
1073 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1074 if (a->data == 0 || client->buffer.pages == NULL ||
1075 payload >= buffer_end) {
1076 payload = 0;
1077 buffer_end = 0;
1078 }
1079
1080 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1081 return -EINVAL;
1082
1083 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1084 if (!access_ok(p, a->size))
1085 return -EFAULT;
1086
1087 end = (void __user *)p + a->size;
1088 count = 0;
1089 while (p < end) {
1090 if (get_user(control, &p->control))
1091 return -EFAULT;
1092 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1093 u.packet.interrupt = GET_INTERRUPT(control);
1094 u.packet.skip = GET_SKIP(control);
1095 u.packet.tag = GET_TAG(control);
1096 u.packet.sy = GET_SY(control);
1097 u.packet.header_length = GET_HEADER_LENGTH(control);
1098
1099 switch (ctx->type) {
1100 case FW_ISO_CONTEXT_TRANSMIT:
1101 if (u.packet.header_length & 3)
1102 return -EINVAL;
1103 transmit_header_bytes = u.packet.header_length;
1104 break;
1105
1106 case FW_ISO_CONTEXT_RECEIVE:
1107 if (u.packet.header_length == 0 ||
1108 u.packet.header_length % ctx->header_size != 0)
1109 return -EINVAL;
1110 break;
1111
1112 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1113 if (u.packet.payload_length == 0 ||
1114 u.packet.payload_length & 3)
1115 return -EINVAL;
1116 break;
1117 }
1118
1119 next = (struct fw_cdev_iso_packet __user *)
1120 &p->header[transmit_header_bytes / 4];
1121 if (next > end)
1122 return -EINVAL;
1123 if (__copy_from_user
1124 (u.packet.header, p->header, transmit_header_bytes))
1125 return -EFAULT;
1126 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1127 u.packet.header_length + u.packet.payload_length > 0)
1128 return -EINVAL;
1129 if (payload + u.packet.payload_length > buffer_end)
1130 return -EINVAL;
1131
1132 if (fw_iso_context_queue(ctx, &u.packet,
1133 &client->buffer, payload))
1134 break;
1135
1136 p = next;
1137 payload += u.packet.payload_length;
1138 count++;
1139 }
1140 fw_iso_context_queue_flush(ctx);
1141
1142 a->size -= uptr_to_u64(p) - a->packets;
1143 a->packets = uptr_to_u64(p);
1144 a->data = client->vm_start + payload;
1145
1146 return count;
1147}
1148
1149static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1150{
1151 struct fw_cdev_start_iso *a = &arg->start_iso;
1152
1153 BUILD_BUG_ON(
1154 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1155 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1156 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1157 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1158 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1159
1160 if (client->iso_context == NULL || a->handle != 0)
1161 return -EINVAL;
1162
1163 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1164 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1165 return -EINVAL;
1166
1167 return fw_iso_context_start(client->iso_context,
1168 a->cycle, a->sync, a->tags);
1169}
1170
1171static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1172{
1173 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1174
1175 if (client->iso_context == NULL || a->handle != 0)
1176 return -EINVAL;
1177
1178 return fw_iso_context_stop(client->iso_context);
1179}
1180
1181static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1182{
1183 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1184
1185 if (client->iso_context == NULL || a->handle != 0)
1186 return -EINVAL;
1187
1188 return fw_iso_context_flush_completions(client->iso_context);
1189}
1190
1191static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1192{
1193 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1194 struct fw_card *card = client->device->card;
1195 struct timespec64 ts = {0, 0};
1196 u32 cycle_time;
1197 int ret = 0;
1198
1199 local_irq_disable();
1200
1201 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1202
1203 switch (a->clk_id) {
1204 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1205 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1206 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1207 default:
1208 ret = -EINVAL;
1209 }
1210
1211 local_irq_enable();
1212
1213 a->tv_sec = ts.tv_sec;
1214 a->tv_nsec = ts.tv_nsec;
1215 a->cycle_timer = cycle_time;
1216
1217 return ret;
1218}
1219
1220static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1221{
1222 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1223 struct fw_cdev_get_cycle_timer2 ct2;
1224
1225 ct2.clk_id = CLOCK_REALTIME;
1226 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1227
1228 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1229 a->cycle_timer = ct2.cycle_timer;
1230
1231 return 0;
1232}
1233
1234static void iso_resource_work(struct work_struct *work)
1235{
1236 struct iso_resource_event *e;
1237 struct iso_resource *r =
1238 container_of(work, struct iso_resource, work.work);
1239 struct client *client = r->client;
1240 int generation, channel, bandwidth, todo;
1241 bool skip, free, success;
1242
1243 spin_lock_irq(&client->lock);
1244 generation = client->device->generation;
1245 todo = r->todo;
1246 /* Allow 1000ms grace period for other reallocations. */
1247 if (todo == ISO_RES_ALLOC &&
1248 time_before64(get_jiffies_64(),
1249 client->device->card->reset_jiffies + HZ)) {
1250 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1251 skip = true;
1252 } else {
1253 /* We could be called twice within the same generation. */
1254 skip = todo == ISO_RES_REALLOC &&
1255 r->generation == generation;
1256 }
1257 free = todo == ISO_RES_DEALLOC ||
1258 todo == ISO_RES_ALLOC_ONCE ||
1259 todo == ISO_RES_DEALLOC_ONCE;
1260 r->generation = generation;
1261 spin_unlock_irq(&client->lock);
1262
1263 if (skip)
1264 goto out;
1265
1266 bandwidth = r->bandwidth;
1267
1268 fw_iso_resource_manage(client->device->card, generation,
1269 r->channels, &channel, &bandwidth,
1270 todo == ISO_RES_ALLOC ||
1271 todo == ISO_RES_REALLOC ||
1272 todo == ISO_RES_ALLOC_ONCE);
1273 /*
1274 * Is this generation outdated already? As long as this resource sticks
1275 * in the idr, it will be scheduled again for a newer generation or at
1276 * shutdown.
1277 */
1278 if (channel == -EAGAIN &&
1279 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1280 goto out;
1281
1282 success = channel >= 0 || bandwidth > 0;
1283
1284 spin_lock_irq(&client->lock);
1285 /*
1286 * Transit from allocation to reallocation, except if the client
1287 * requested deallocation in the meantime.
1288 */
1289 if (r->todo == ISO_RES_ALLOC)
1290 r->todo = ISO_RES_REALLOC;
1291 /*
1292 * Allocation or reallocation failure? Pull this resource out of the
1293 * idr and prepare for deletion, unless the client is shutting down.
1294 */
1295 if (r->todo == ISO_RES_REALLOC && !success &&
1296 !client->in_shutdown &&
1297 idr_remove(&client->resource_idr, r->resource.handle)) {
1298 client_put(client);
1299 free = true;
1300 }
1301 spin_unlock_irq(&client->lock);
1302
1303 if (todo == ISO_RES_ALLOC && channel >= 0)
1304 r->channels = 1ULL << channel;
1305
1306 if (todo == ISO_RES_REALLOC && success)
1307 goto out;
1308
1309 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1310 e = r->e_alloc;
1311 r->e_alloc = NULL;
1312 } else {
1313 e = r->e_dealloc;
1314 r->e_dealloc = NULL;
1315 }
1316 e->iso_resource.handle = r->resource.handle;
1317 e->iso_resource.channel = channel;
1318 e->iso_resource.bandwidth = bandwidth;
1319
1320 queue_event(client, &e->event,
1321 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1322
1323 if (free) {
1324 cancel_delayed_work(&r->work);
1325 kfree(r->e_alloc);
1326 kfree(r->e_dealloc);
1327 kfree(r);
1328 }
1329 out:
1330 client_put(client);
1331}
1332
1333static void release_iso_resource(struct client *client,
1334 struct client_resource *resource)
1335{
1336 struct iso_resource *r =
1337 container_of(resource, struct iso_resource, resource);
1338
1339 spin_lock_irq(&client->lock);
1340 r->todo = ISO_RES_DEALLOC;
1341 schedule_iso_resource(r, 0);
1342 spin_unlock_irq(&client->lock);
1343}
1344
1345static int init_iso_resource(struct client *client,
1346 struct fw_cdev_allocate_iso_resource *request, int todo)
1347{
1348 struct iso_resource_event *e1, *e2;
1349 struct iso_resource *r;
1350 int ret;
1351
1352 if ((request->channels == 0 && request->bandwidth == 0) ||
1353 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1354 return -EINVAL;
1355
1356 r = kmalloc(sizeof(*r), GFP_KERNEL);
1357 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1358 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1359 if (r == NULL || e1 == NULL || e2 == NULL) {
1360 ret = -ENOMEM;
1361 goto fail;
1362 }
1363
1364 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1365 r->client = client;
1366 r->todo = todo;
1367 r->generation = -1;
1368 r->channels = request->channels;
1369 r->bandwidth = request->bandwidth;
1370 r->e_alloc = e1;
1371 r->e_dealloc = e2;
1372
1373 e1->iso_resource.closure = request->closure;
1374 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1375 e2->iso_resource.closure = request->closure;
1376 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1377
1378 if (todo == ISO_RES_ALLOC) {
1379 r->resource.release = release_iso_resource;
1380 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1381 if (ret < 0)
1382 goto fail;
1383 } else {
1384 r->resource.release = NULL;
1385 r->resource.handle = -1;
1386 schedule_iso_resource(r, 0);
1387 }
1388 request->handle = r->resource.handle;
1389
1390 return 0;
1391 fail:
1392 kfree(r);
1393 kfree(e1);
1394 kfree(e2);
1395
1396 return ret;
1397}
1398
1399static int ioctl_allocate_iso_resource(struct client *client,
1400 union ioctl_arg *arg)
1401{
1402 return init_iso_resource(client,
1403 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1404}
1405
1406static int ioctl_deallocate_iso_resource(struct client *client,
1407 union ioctl_arg *arg)
1408{
1409 return release_client_resource(client,
1410 arg->deallocate.handle, release_iso_resource, NULL);
1411}
1412
1413static int ioctl_allocate_iso_resource_once(struct client *client,
1414 union ioctl_arg *arg)
1415{
1416 return init_iso_resource(client,
1417 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1418}
1419
1420static int ioctl_deallocate_iso_resource_once(struct client *client,
1421 union ioctl_arg *arg)
1422{
1423 return init_iso_resource(client,
1424 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1425}
1426
1427/*
1428 * Returns a speed code: Maximum speed to or from this device,
1429 * limited by the device's link speed, the local node's link speed,
1430 * and all PHY port speeds between the two links.
1431 */
1432static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1433{
1434 return client->device->max_speed;
1435}
1436
1437static int ioctl_send_broadcast_request(struct client *client,
1438 union ioctl_arg *arg)
1439{
1440 struct fw_cdev_send_request *a = &arg->send_request;
1441
1442 switch (a->tcode) {
1443 case TCODE_WRITE_QUADLET_REQUEST:
1444 case TCODE_WRITE_BLOCK_REQUEST:
1445 break;
1446 default:
1447 return -EINVAL;
1448 }
1449
1450 /* Security policy: Only allow accesses to Units Space. */
1451 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1452 return -EACCES;
1453
1454 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1455}
1456
1457static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1458{
1459 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1460 struct fw_cdev_send_request request;
1461 int dest;
1462
1463 if (a->speed > client->device->card->link_speed ||
1464 a->length > 1024 << a->speed)
1465 return -EIO;
1466
1467 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1468 return -EINVAL;
1469
1470 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1471 request.tcode = TCODE_STREAM_DATA;
1472 request.length = a->length;
1473 request.closure = a->closure;
1474 request.data = a->data;
1475 request.generation = a->generation;
1476
1477 return init_request(client, &request, dest, a->speed);
1478}
1479
1480static void outbound_phy_packet_callback(struct fw_packet *packet,
1481 struct fw_card *card, int status)
1482{
1483 struct outbound_phy_packet_event *e =
1484 container_of(packet, struct outbound_phy_packet_event, p);
1485
1486 switch (status) {
1487 /* expected: */
1488 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1489 /* should never happen with PHY packets: */
1490 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1491 case ACK_BUSY_X:
1492 case ACK_BUSY_A:
1493 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1494 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1495 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1496 /* stale generation; cancelled; on certain controllers: no ack */
1497 default: e->phy_packet.rcode = status; break;
1498 }
1499 e->phy_packet.data[0] = packet->timestamp;
1500
1501 queue_event(e->client, &e->event, &e->phy_packet,
1502 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1503 client_put(e->client);
1504}
1505
1506static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1507{
1508 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1509 struct fw_card *card = client->device->card;
1510 struct outbound_phy_packet_event *e;
1511
1512 /* Access policy: Allow this ioctl only on local nodes' device files. */
1513 if (!client->device->is_local)
1514 return -ENOSYS;
1515
1516 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1517 if (e == NULL)
1518 return -ENOMEM;
1519
1520 client_get(client);
1521 e->client = client;
1522 e->p.speed = SCODE_100;
1523 e->p.generation = a->generation;
1524 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1525 e->p.header[1] = a->data[0];
1526 e->p.header[2] = a->data[1];
1527 e->p.header_length = 12;
1528 e->p.callback = outbound_phy_packet_callback;
1529 e->phy_packet.closure = a->closure;
1530 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1531 if (is_ping_packet(a->data))
1532 e->phy_packet.length = 4;
1533
1534 card->driver->send_request(card, &e->p);
1535
1536 return 0;
1537}
1538
1539static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1540{
1541 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1542 struct fw_card *card = client->device->card;
1543
1544 /* Access policy: Allow this ioctl only on local nodes' device files. */
1545 if (!client->device->is_local)
1546 return -ENOSYS;
1547
1548 spin_lock_irq(&card->lock);
1549
1550 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1551 client->phy_receiver_closure = a->closure;
1552
1553 spin_unlock_irq(&card->lock);
1554
1555 return 0;
1556}
1557
1558void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1559{
1560 struct client *client;
1561 struct inbound_phy_packet_event *e;
1562 unsigned long flags;
1563
1564 spin_lock_irqsave(&card->lock, flags);
1565
1566 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1567 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1568 if (e == NULL)
1569 break;
1570
1571 e->phy_packet.closure = client->phy_receiver_closure;
1572 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1573 e->phy_packet.rcode = RCODE_COMPLETE;
1574 e->phy_packet.length = 8;
1575 e->phy_packet.data[0] = p->header[1];
1576 e->phy_packet.data[1] = p->header[2];
1577 queue_event(client, &e->event,
1578 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1579 }
1580
1581 spin_unlock_irqrestore(&card->lock, flags);
1582}
1583
1584static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1585 [0x00] = ioctl_get_info,
1586 [0x01] = ioctl_send_request,
1587 [0x02] = ioctl_allocate,
1588 [0x03] = ioctl_deallocate,
1589 [0x04] = ioctl_send_response,
1590 [0x05] = ioctl_initiate_bus_reset,
1591 [0x06] = ioctl_add_descriptor,
1592 [0x07] = ioctl_remove_descriptor,
1593 [0x08] = ioctl_create_iso_context,
1594 [0x09] = ioctl_queue_iso,
1595 [0x0a] = ioctl_start_iso,
1596 [0x0b] = ioctl_stop_iso,
1597 [0x0c] = ioctl_get_cycle_timer,
1598 [0x0d] = ioctl_allocate_iso_resource,
1599 [0x0e] = ioctl_deallocate_iso_resource,
1600 [0x0f] = ioctl_allocate_iso_resource_once,
1601 [0x10] = ioctl_deallocate_iso_resource_once,
1602 [0x11] = ioctl_get_speed,
1603 [0x12] = ioctl_send_broadcast_request,
1604 [0x13] = ioctl_send_stream_packet,
1605 [0x14] = ioctl_get_cycle_timer2,
1606 [0x15] = ioctl_send_phy_packet,
1607 [0x16] = ioctl_receive_phy_packets,
1608 [0x17] = ioctl_set_iso_channels,
1609 [0x18] = ioctl_flush_iso,
1610};
1611
1612static int dispatch_ioctl(struct client *client,
1613 unsigned int cmd, void __user *arg)
1614{
1615 union ioctl_arg buffer;
1616 int ret;
1617
1618 if (fw_device_is_shutdown(client->device))
1619 return -ENODEV;
1620
1621 if (_IOC_TYPE(cmd) != '#' ||
1622 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1623 _IOC_SIZE(cmd) > sizeof(buffer))
1624 return -ENOTTY;
1625
1626 memset(&buffer, 0, sizeof(buffer));
1627
1628 if (_IOC_DIR(cmd) & _IOC_WRITE)
1629 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1630 return -EFAULT;
1631
1632 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1633 if (ret < 0)
1634 return ret;
1635
1636 if (_IOC_DIR(cmd) & _IOC_READ)
1637 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1638 return -EFAULT;
1639
1640 return ret;
1641}
1642
1643static long fw_device_op_ioctl(struct file *file,
1644 unsigned int cmd, unsigned long arg)
1645{
1646 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1647}
1648
1649#ifdef CONFIG_COMPAT
1650static long fw_device_op_compat_ioctl(struct file *file,
1651 unsigned int cmd, unsigned long arg)
1652{
1653 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1654}
1655#endif
1656
1657static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1658{
1659 struct client *client = file->private_data;
1660 unsigned long size;
1661 int page_count, ret;
1662
1663 if (fw_device_is_shutdown(client->device))
1664 return -ENODEV;
1665
1666 /* FIXME: We could support multiple buffers, but we don't. */
1667 if (client->buffer.pages != NULL)
1668 return -EBUSY;
1669
1670 if (!(vma->vm_flags & VM_SHARED))
1671 return -EINVAL;
1672
1673 if (vma->vm_start & ~PAGE_MASK)
1674 return -EINVAL;
1675
1676 client->vm_start = vma->vm_start;
1677 size = vma->vm_end - vma->vm_start;
1678 page_count = size >> PAGE_SHIFT;
1679 if (size & ~PAGE_MASK)
1680 return -EINVAL;
1681
1682 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1683 if (ret < 0)
1684 return ret;
1685
1686 spin_lock_irq(&client->lock);
1687 if (client->iso_context) {
1688 ret = fw_iso_buffer_map_dma(&client->buffer,
1689 client->device->card,
1690 iso_dma_direction(client->iso_context));
1691 client->buffer_is_mapped = (ret == 0);
1692 }
1693 spin_unlock_irq(&client->lock);
1694 if (ret < 0)
1695 goto fail;
1696
1697 ret = fw_iso_buffer_map_vma(&client->buffer, vma);
1698 if (ret < 0)
1699 goto fail;
1700
1701 return 0;
1702 fail:
1703 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1704 return ret;
1705}
1706
1707static int is_outbound_transaction_resource(int id, void *p, void *data)
1708{
1709 struct client_resource *resource = p;
1710
1711 return resource->release == release_transaction;
1712}
1713
1714static int has_outbound_transactions(struct client *client)
1715{
1716 int ret;
1717
1718 spin_lock_irq(&client->lock);
1719 ret = idr_for_each(&client->resource_idr,
1720 is_outbound_transaction_resource, NULL);
1721 spin_unlock_irq(&client->lock);
1722
1723 return ret;
1724}
1725
1726static int shutdown_resource(int id, void *p, void *data)
1727{
1728 struct client_resource *resource = p;
1729 struct client *client = data;
1730
1731 resource->release(client, resource);
1732 client_put(client);
1733
1734 return 0;
1735}
1736
1737static int fw_device_op_release(struct inode *inode, struct file *file)
1738{
1739 struct client *client = file->private_data;
1740 struct event *event, *next_event;
1741
1742 spin_lock_irq(&client->device->card->lock);
1743 list_del(&client->phy_receiver_link);
1744 spin_unlock_irq(&client->device->card->lock);
1745
1746 mutex_lock(&client->device->client_list_mutex);
1747 list_del(&client->link);
1748 mutex_unlock(&client->device->client_list_mutex);
1749
1750 if (client->iso_context)
1751 fw_iso_context_destroy(client->iso_context);
1752
1753 if (client->buffer.pages)
1754 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1755
1756 /* Freeze client->resource_idr and client->event_list */
1757 spin_lock_irq(&client->lock);
1758 client->in_shutdown = true;
1759 spin_unlock_irq(&client->lock);
1760
1761 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1762
1763 idr_for_each(&client->resource_idr, shutdown_resource, client);
1764 idr_destroy(&client->resource_idr);
1765
1766 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1767 kfree(event);
1768
1769 client_put(client);
1770
1771 return 0;
1772}
1773
1774static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1775{
1776 struct client *client = file->private_data;
1777 __poll_t mask = 0;
1778
1779 poll_wait(file, &client->wait, pt);
1780
1781 if (fw_device_is_shutdown(client->device))
1782 mask |= EPOLLHUP | EPOLLERR;
1783 if (!list_empty(&client->event_list))
1784 mask |= EPOLLIN | EPOLLRDNORM;
1785
1786 return mask;
1787}
1788
1789const struct file_operations fw_device_ops = {
1790 .owner = THIS_MODULE,
1791 .llseek = no_llseek,
1792 .open = fw_device_op_open,
1793 .read = fw_device_op_read,
1794 .unlocked_ioctl = fw_device_op_ioctl,
1795 .mmap = fw_device_op_mmap,
1796 .release = fw_device_op_release,
1797 .poll = fw_device_op_poll,
1798#ifdef CONFIG_COMPAT
1799 .compat_ioctl = fw_device_op_compat_ioctl,
1800#endif
1801};