firewire: Future proof the iso ioctls by adding a handle for the iso context.
[powerpc.git] / drivers / firewire / fw-device-cdev.c
1 /*                                              -*- c-basic-offset: 8 -*-
2  *
3  * fw-device-cdev.c - Char device for device raw access
4  *
5  * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/errno.h>
26 #include <linux/device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
30 #include <linux/mm.h>
31 #include <linux/idr.h>
32 #include <linux/compat.h>
33 #include <asm/uaccess.h>
34 #include "fw-transaction.h"
35 #include "fw-topology.h"
36 #include "fw-device.h"
37 #include "fw-device-cdev.h"
38
39 /* dequeue_event() just kfree()'s the event, so the event has to be
40  * the first field in the struct. */
41
42 struct client;
43 struct client_resource {
44         struct list_head link;
45         void (*release)(struct client *client, struct client_resource *r);
46         u32 handle;
47 };
48
49 struct event {
50         struct { void *data; size_t size; } v[2];
51         struct list_head link;
52 };
53
54 struct bus_reset {
55         struct event event;
56         struct fw_cdev_event_bus_reset reset;
57 };
58
59 struct response {
60         struct event event;
61         struct fw_transaction transaction;
62         struct client *client;
63         struct client_resource resource;
64         struct fw_cdev_event_response response;
65 };
66
67 struct iso_interrupt {
68         struct event event;
69         struct fw_cdev_event_iso_interrupt interrupt;
70 };
71
72 struct client {
73         u32 version;
74         struct fw_device *device;
75         spinlock_t lock;
76         u32 resource_handle;
77         struct list_head resource_list;
78         struct list_head event_list;
79         wait_queue_head_t wait;
80         u64 bus_reset_closure;
81
82         struct fw_iso_context *iso_context;
83         u64 iso_closure;
84         struct fw_iso_buffer buffer;
85         unsigned long vm_start;
86
87         struct list_head link;
88 };
89
90 static inline void __user *
91 u64_to_uptr(__u64 value)
92 {
93         return (void __user *)(unsigned long)value;
94 }
95
96 static inline __u64
97 uptr_to_u64(void __user *ptr)
98 {
99         return (__u64)(unsigned long)ptr;
100 }
101
102 static int fw_device_op_open(struct inode *inode, struct file *file)
103 {
104         struct fw_device *device;
105         struct client *client;
106         unsigned long flags;
107
108         device = fw_device_from_devt(inode->i_rdev);
109         if (device == NULL)
110                 return -ENODEV;
111
112         client = kzalloc(sizeof *client, GFP_KERNEL);
113         if (client == NULL)
114                 return -ENOMEM;
115
116         client->device = fw_device_get(device);
117         INIT_LIST_HEAD(&client->event_list);
118         INIT_LIST_HEAD(&client->resource_list);
119         spin_lock_init(&client->lock);
120         init_waitqueue_head(&client->wait);
121
122         file->private_data = client;
123
124         spin_lock_irqsave(&device->card->lock, flags);
125         list_add_tail(&client->link, &device->client_list);
126         spin_unlock_irqrestore(&device->card->lock, flags);
127
128         return 0;
129 }
130
131 static void queue_event(struct client *client, struct event *event,
132                         void *data0, size_t size0, void *data1, size_t size1)
133 {
134         unsigned long flags;
135
136         event->v[0].data = data0;
137         event->v[0].size = size0;
138         event->v[1].data = data1;
139         event->v[1].size = size1;
140
141         spin_lock_irqsave(&client->lock, flags);
142
143         list_add_tail(&event->link, &client->event_list);
144         wake_up_interruptible(&client->wait);
145
146         spin_unlock_irqrestore(&client->lock, flags);
147 }
148
149 static int
150 dequeue_event(struct client *client, char __user *buffer, size_t count)
151 {
152         unsigned long flags;
153         struct event *event;
154         size_t size, total;
155         int i, retval;
156
157         retval = wait_event_interruptible(client->wait,
158                                           !list_empty(&client->event_list) ||
159                                           fw_device_is_shutdown(client->device));
160         if (retval < 0)
161                 return retval;
162
163         if (list_empty(&client->event_list) &&
164                        fw_device_is_shutdown(client->device))
165                 return -ENODEV;
166
167         spin_lock_irqsave(&client->lock, flags);
168         event = container_of(client->event_list.next, struct event, link);
169         list_del(&event->link);
170         spin_unlock_irqrestore(&client->lock, flags);
171
172         total = 0;
173         for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
174                 size = min(event->v[i].size, count - total);
175                 if (copy_to_user(buffer + total, event->v[i].data, size)) {
176                         retval = -EFAULT;
177                         goto out;
178                 }
179                 total += size;
180         }
181         retval = total;
182
183  out:
184         kfree(event);
185
186         return retval;
187 }
188
189 static ssize_t
190 fw_device_op_read(struct file *file,
191                   char __user *buffer, size_t count, loff_t *offset)
192 {
193         struct client *client = file->private_data;
194
195         return dequeue_event(client, buffer, count);
196 }
197
198 static void
199 fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
200                      struct client *client)
201 {
202         struct fw_card *card = client->device->card;
203
204         event->closure       = client->bus_reset_closure;
205         event->type          = FW_CDEV_EVENT_BUS_RESET;
206         event->node_id       = client->device->node_id;
207         event->local_node_id = card->local_node->node_id;
208         event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
209         event->irm_node_id   = card->irm_node->node_id;
210         event->root_node_id  = card->root_node->node_id;
211         event->generation    = card->generation;
212 }
213
214 static void
215 for_each_client(struct fw_device *device,
216                 void (*callback)(struct client *client))
217 {
218         struct fw_card *card = device->card;
219         struct client *c;
220         unsigned long flags;
221
222         spin_lock_irqsave(&card->lock, flags);
223
224         list_for_each_entry(c, &device->client_list, link)
225                 callback(c);
226
227         spin_unlock_irqrestore(&card->lock, flags);
228 }
229
230 static void
231 queue_bus_reset_event(struct client *client)
232 {
233         struct bus_reset *bus_reset;
234
235         bus_reset = kzalloc(sizeof *bus_reset, GFP_ATOMIC);
236         if (bus_reset == NULL) {
237                 fw_notify("Out of memory when allocating bus reset event\n");
238                 return;
239         }
240
241         fill_bus_reset_event(&bus_reset->reset, client);
242
243         queue_event(client, &bus_reset->event,
244                     &bus_reset->reset, sizeof bus_reset->reset, NULL, 0);
245 }
246
247 void fw_device_cdev_update(struct fw_device *device)
248 {
249         for_each_client(device, queue_bus_reset_event);
250 }
251
252 static void wake_up_client(struct client *client)
253 {
254         wake_up_interruptible(&client->wait);
255 }
256
257 void fw_device_cdev_remove(struct fw_device *device)
258 {
259         for_each_client(device, wake_up_client);
260 }
261
262 static int ioctl_get_info(struct client *client, void *buffer)
263 {
264         struct fw_cdev_get_info *get_info = buffer;
265         struct fw_cdev_event_bus_reset bus_reset;
266
267         client->version = get_info->version;
268         get_info->version = FW_CDEV_VERSION;
269
270         if (get_info->rom != 0) {
271                 void __user *uptr = u64_to_uptr(get_info->rom);
272                 size_t want = get_info->rom_length;
273                 size_t have = client->device->config_rom_length * 4;
274
275                 if (copy_to_user(uptr, client->device->config_rom,
276                                  min(want, have)))
277                         return -EFAULT;
278         }
279         get_info->rom_length = client->device->config_rom_length * 4;
280
281         client->bus_reset_closure = get_info->bus_reset_closure;
282         if (get_info->bus_reset != 0) {
283                 void __user *uptr = u64_to_uptr(get_info->bus_reset);
284
285                 fill_bus_reset_event(&bus_reset, client);
286                 if (copy_to_user(uptr, &bus_reset, sizeof bus_reset))
287                         return -EFAULT;
288         }
289
290         get_info->card = client->device->card->index;
291
292         return 0;
293 }
294
295 static void
296 add_client_resource(struct client *client, struct client_resource *resource)
297 {
298         unsigned long flags;
299
300         spin_lock_irqsave(&client->lock, flags);
301         list_add_tail(&resource->link, &client->resource_list);
302         resource->handle = client->resource_handle++;
303         spin_unlock_irqrestore(&client->lock, flags);
304 }
305
306 static int
307 release_client_resource(struct client *client, u32 handle,
308                         struct client_resource **resource)
309 {
310         struct client_resource *r;
311         unsigned long flags;
312
313         spin_lock_irqsave(&client->lock, flags);
314         list_for_each_entry(r, &client->resource_list, link) {
315                 if (r->handle == handle) {
316                         list_del(&r->link);
317                         break;
318                 }
319         }
320         spin_unlock_irqrestore(&client->lock, flags);
321
322         if (&r->link == &client->resource_list)
323                 return -EINVAL;
324
325         if (resource)
326                 *resource = r;
327         else
328                 r->release(client, r);
329
330         return 0;
331 }
332
333 static void
334 release_transaction(struct client *client, struct client_resource *resource)
335 {
336         struct response *response =
337                 container_of(resource, struct response, resource);
338
339         fw_cancel_transaction(client->device->card, &response->transaction);
340 }
341
342 static void
343 complete_transaction(struct fw_card *card, int rcode,
344                      void *payload, size_t length, void *data)
345 {
346         struct response *response = data;
347         struct client *client = response->client;
348         unsigned long flags;
349
350         if (length < response->response.length)
351                 response->response.length = length;
352         if (rcode == RCODE_COMPLETE)
353                 memcpy(response->response.data, payload,
354                        response->response.length);
355
356         spin_lock_irqsave(&client->lock, flags);
357         list_del(&response->resource.link);
358         spin_unlock_irqrestore(&client->lock, flags);
359
360         response->response.type   = FW_CDEV_EVENT_RESPONSE;
361         response->response.rcode  = rcode;
362         queue_event(client, &response->event,
363                     &response->response, sizeof response->response,
364                     response->response.data, response->response.length);
365 }
366
367 static ssize_t ioctl_send_request(struct client *client, void *buffer)
368 {
369         struct fw_device *device = client->device;
370         struct fw_cdev_send_request *request = buffer;
371         struct response *response;
372
373         /* What is the biggest size we'll accept, really? */
374         if (request->length > 4096)
375                 return -EINVAL;
376
377         response = kmalloc(sizeof *response + request->length, GFP_KERNEL);
378         if (response == NULL)
379                 return -ENOMEM;
380
381         response->client = client;
382         response->response.length = request->length;
383         response->response.closure = request->closure;
384
385         if (request->data &&
386             copy_from_user(response->response.data,
387                            u64_to_uptr(request->data), request->length)) {
388                 kfree(response);
389                 return -EFAULT;
390         }
391
392         response->resource.release = release_transaction;
393         add_client_resource(client, &response->resource);
394
395         fw_send_request(device->card, &response->transaction,
396                         request->tcode & 0x1f,
397                         device->node->node_id,
398                         request->generation,
399                         device->node->max_speed,
400                         request->offset,
401                         response->response.data, request->length,
402                         complete_transaction, response);
403
404         if (request->data)
405                 return sizeof request + request->length;
406         else
407                 return sizeof request;
408 }
409
410 struct address_handler {
411         struct fw_address_handler handler;
412         __u64 closure;
413         struct client *client;
414         struct client_resource resource;
415 };
416
417 struct request {
418         struct fw_request *request;
419         void *data;
420         size_t length;
421         struct client_resource resource;
422 };
423
424 struct request_event {
425         struct event event;
426         struct fw_cdev_event_request request;
427 };
428
429 static void
430 release_request(struct client *client, struct client_resource *resource)
431 {
432         struct request *request =
433                 container_of(resource, struct request, resource);
434
435         fw_send_response(client->device->card, request->request,
436                          RCODE_CONFLICT_ERROR);
437         kfree(request);
438 }
439
440 static void
441 handle_request(struct fw_card *card, struct fw_request *r,
442                int tcode, int destination, int source,
443                int generation, int speed,
444                unsigned long long offset,
445                void *payload, size_t length, void *callback_data)
446 {
447         struct address_handler *handler = callback_data;
448         struct request *request;
449         struct request_event *e;
450         struct client *client = handler->client;
451
452         request = kmalloc(sizeof *request, GFP_ATOMIC);
453         e = kmalloc(sizeof *e, GFP_ATOMIC);
454         if (request == NULL || e == NULL) {
455                 kfree(request);
456                 kfree(e);
457                 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
458                 return;
459         }
460
461         request->request = r;
462         request->data    = payload;
463         request->length  = length;
464
465         request->resource.release = release_request;
466         add_client_resource(client, &request->resource);
467
468         e->request.type    = FW_CDEV_EVENT_REQUEST;
469         e->request.tcode   = tcode;
470         e->request.offset  = offset;
471         e->request.length  = length;
472         e->request.handle  = request->resource.handle;
473         e->request.closure = handler->closure;
474
475         queue_event(client, &e->event,
476                     &e->request, sizeof e->request, payload, length);
477 }
478
479 static void
480 release_address_handler(struct client *client,
481                         struct client_resource *resource)
482 {
483         struct address_handler *handler =
484                 container_of(resource, struct address_handler, resource);
485
486         fw_core_remove_address_handler(&handler->handler);
487         kfree(handler);
488 }
489
490 static int ioctl_allocate(struct client *client, void *buffer)
491 {
492         struct fw_cdev_allocate *request = buffer;
493         struct address_handler *handler;
494         struct fw_address_region region;
495
496         handler = kmalloc(sizeof *handler, GFP_KERNEL);
497         if (handler == NULL)
498                 return -ENOMEM;
499
500         region.start = request->offset;
501         region.end = request->offset + request->length;
502         handler->handler.length = request->length;
503         handler->handler.address_callback = handle_request;
504         handler->handler.callback_data = handler;
505         handler->closure = request->closure;
506         handler->client = client;
507
508         if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
509                 kfree(handler);
510                 return -EBUSY;
511         }
512
513         handler->resource.release = release_address_handler;
514         add_client_resource(client, &handler->resource);
515         request->handle = handler->resource.handle;
516
517         return 0;
518 }
519
520 static int ioctl_deallocate(struct client *client, void *buffer)
521 {
522         struct fw_cdev_deallocate *request = buffer;
523
524         return release_client_resource(client, request->handle, NULL);
525 }
526
527 static int ioctl_send_response(struct client *client, void *buffer)
528 {
529         struct fw_cdev_send_response *request = buffer;
530         struct client_resource *resource;
531         struct request *r;
532
533         if (release_client_resource(client, request->handle, &resource) < 0)
534                 return -EINVAL;
535         r = container_of(resource, struct request, resource);
536         if (request->length < r->length)
537                 r->length = request->length;
538         if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
539                 return -EFAULT;
540
541         fw_send_response(client->device->card, r->request, request->rcode);
542         kfree(r);
543
544         return 0;
545 }
546
547 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
548 {
549         struct fw_cdev_initiate_bus_reset *request = buffer;
550         int short_reset;
551
552         short_reset = (request->type == FW_CDEV_SHORT_RESET);
553
554         return fw_core_initiate_bus_reset(client->device->card, short_reset);
555 }
556
557 struct descriptor {
558         struct fw_descriptor d;
559         struct client_resource resource;
560         u32 data[0];
561 };
562
563 static void release_descriptor(struct client *client,
564                                struct client_resource *resource)
565 {
566         struct descriptor *descriptor =
567                 container_of(resource, struct descriptor, resource);
568
569         fw_core_remove_descriptor(&descriptor->d);
570         kfree(descriptor);
571 }
572
573 static int ioctl_add_descriptor(struct client *client, void *buffer)
574 {
575         struct fw_cdev_add_descriptor *request = buffer;
576         struct descriptor *descriptor;
577         int retval;
578
579         if (request->length > 256)
580                 return -EINVAL;
581
582         descriptor =
583                 kmalloc(sizeof *descriptor + request->length * 4, GFP_KERNEL);
584         if (descriptor == NULL)
585                 return -ENOMEM;
586
587         if (copy_from_user(descriptor->data,
588                            u64_to_uptr(request->data), request->length * 4)) {
589                 kfree(descriptor);
590                 return -EFAULT;
591         }
592
593         descriptor->d.length = request->length;
594         descriptor->d.immediate = request->immediate;
595         descriptor->d.key = request->key;
596         descriptor->d.data = descriptor->data;
597
598         retval = fw_core_add_descriptor(&descriptor->d);
599         if (retval < 0) {
600                 kfree(descriptor);
601                 return retval;
602         }
603
604         descriptor->resource.release = release_descriptor;
605         add_client_resource(client, &descriptor->resource);
606         request->handle = descriptor->resource.handle;
607
608         return 0;
609 }
610
611 static int ioctl_remove_descriptor(struct client *client, void *buffer)
612 {
613         struct fw_cdev_remove_descriptor *request = buffer;
614
615         return release_client_resource(client, request->handle, NULL);
616 }
617
618 static void
619 iso_callback(struct fw_iso_context *context, u32 cycle,
620              size_t header_length, void *header, void *data)
621 {
622         struct client *client = data;
623         struct iso_interrupt *interrupt;
624
625         interrupt = kzalloc(sizeof *interrupt + header_length, GFP_ATOMIC);
626         if (interrupt == NULL)
627                 return;
628
629         interrupt->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
630         interrupt->interrupt.closure   = client->iso_closure;
631         interrupt->interrupt.cycle     = cycle;
632         interrupt->interrupt.header_length = header_length;
633         memcpy(interrupt->interrupt.header, header, header_length);
634         queue_event(client, &interrupt->event,
635                     &interrupt->interrupt,
636                     sizeof interrupt->interrupt + header_length, NULL, 0);
637 }
638
639 static int ioctl_create_iso_context(struct client *client, void *buffer)
640 {
641         struct fw_cdev_create_iso_context *request = buffer;
642
643         if (request->channel > 63)
644                 return -EINVAL;
645
646         switch (request->type) {
647         case FW_ISO_CONTEXT_RECEIVE:
648                 if (request->header_size < 4 || (request->header_size & 3))
649                         return -EINVAL;
650
651                 break;
652
653         case FW_ISO_CONTEXT_TRANSMIT:
654                 if (request->speed > SCODE_3200)
655                         return -EINVAL;
656
657                 break;
658
659         default:
660                 return -EINVAL;
661         }
662
663         client->iso_closure = request->closure;
664         client->iso_context = fw_iso_context_create(client->device->card,
665                                                     request->type,
666                                                     request->channel,
667                                                     request->speed,
668                                                     request->header_size,
669                                                     iso_callback, client);
670         if (IS_ERR(client->iso_context))
671                 return PTR_ERR(client->iso_context);
672
673         /* We only support one context at this time. */
674         request->handle = 0;
675
676         return 0;
677 }
678
679 static int ioctl_queue_iso(struct client *client, void *buffer)
680 {
681         struct fw_cdev_queue_iso *request = buffer;
682         struct fw_cdev_iso_packet __user *p, *end, *next;
683         struct fw_iso_context *ctx = client->iso_context;
684         unsigned long payload, buffer_end, header_length;
685         int count;
686         struct {
687                 struct fw_iso_packet packet;
688                 u8 header[256];
689         } u;
690
691         if (ctx == NULL || request->handle != 0)
692                 return -EINVAL;
693
694         /* If the user passes a non-NULL data pointer, has mmap()'ed
695          * the iso buffer, and the pointer points inside the buffer,
696          * we setup the payload pointers accordingly.  Otherwise we
697          * set them both to 0, which will still let packets with
698          * payload_length == 0 through.  In other words, if no packets
699          * use the indirect payload, the iso buffer need not be mapped
700          * and the request->data pointer is ignored.*/
701
702         payload = (unsigned long)request->data - client->vm_start;
703         buffer_end = client->buffer.page_count << PAGE_SHIFT;
704         if (request->data == 0 || client->buffer.pages == NULL ||
705             payload >= buffer_end) {
706                 payload = 0;
707                 buffer_end = 0;
708         }
709
710         if (!access_ok(VERIFY_READ, request->packets, request->size))
711                 return -EFAULT;
712
713         p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
714         end = (void __user *)p + request->size;
715         count = 0;
716         while (p < end) {
717                 if (__copy_from_user(&u.packet, p, sizeof *p))
718                         return -EFAULT;
719
720                 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
721                         header_length = u.packet.header_length;
722                 } else {
723                         /* We require that header_length is a multiple of
724                          * the fixed header size, ctx->header_size */
725                         if (ctx->header_size == 0) {
726                                 if (u.packet.header_length > 0)
727                                         return -EINVAL;
728                         } else if (u.packet.header_length % ctx->header_size != 0) {
729                                 return -EINVAL;
730                         }
731                         header_length = 0;
732                 }
733
734                 next = (struct fw_cdev_iso_packet __user *)
735                         &p->header[header_length / 4];
736                 if (next > end)
737                         return -EINVAL;
738                 if (__copy_from_user
739                     (u.packet.header, p->header, header_length))
740                         return -EFAULT;
741                 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
742                     u.packet.header_length + u.packet.payload_length > 0)
743                         return -EINVAL;
744                 if (payload + u.packet.payload_length > buffer_end)
745                         return -EINVAL;
746
747                 if (fw_iso_context_queue(ctx, &u.packet,
748                                          &client->buffer, payload))
749                         break;
750
751                 p = next;
752                 payload += u.packet.payload_length;
753                 count++;
754         }
755
756         request->size    -= uptr_to_u64(p) - request->packets;
757         request->packets  = uptr_to_u64(p);
758         request->data     = client->vm_start + payload;
759
760         return count;
761 }
762
763 static int ioctl_start_iso(struct client *client, void *buffer)
764 {
765         struct fw_cdev_start_iso *request = buffer;
766
767         if (request->handle != 0)
768                 return -EINVAL;
769         if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
770                 if (request->tags == 0 || request->tags > 15)
771                         return -EINVAL;
772
773                 if (request->sync > 15)
774                         return -EINVAL;
775         }
776
777         return fw_iso_context_start(client->iso_context, request->cycle,
778                                     request->sync, request->tags);
779 }
780
781 static int ioctl_stop_iso(struct client *client, void *buffer)
782 {
783         struct fw_cdev_stop_iso *request = buffer;
784
785         if (request->handle != 0)
786                 return -EINVAL;
787
788         return fw_iso_context_stop(client->iso_context);
789 }
790
791 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
792         ioctl_get_info,
793         ioctl_send_request,
794         ioctl_allocate,
795         ioctl_deallocate,
796         ioctl_send_response,
797         ioctl_initiate_bus_reset,
798         ioctl_add_descriptor,
799         ioctl_remove_descriptor,
800         ioctl_create_iso_context,
801         ioctl_queue_iso,
802         ioctl_start_iso,
803         ioctl_stop_iso,
804 };
805
806 static int
807 dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
808 {
809         char buffer[256];
810         int retval;
811
812         if (_IOC_TYPE(cmd) != '#' ||
813             _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
814                 return -EINVAL;
815
816         if (_IOC_DIR(cmd) & _IOC_WRITE) {
817                 if (_IOC_SIZE(cmd) > sizeof buffer ||
818                     copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
819                         return -EFAULT;
820         }
821
822         retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
823         if (retval < 0)
824                 return retval;
825
826         if (_IOC_DIR(cmd) & _IOC_READ) {
827                 if (_IOC_SIZE(cmd) > sizeof buffer ||
828                     copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
829                         return -EFAULT;
830         }
831
832         return 0;
833 }
834
835 static long
836 fw_device_op_ioctl(struct file *file,
837                    unsigned int cmd, unsigned long arg)
838 {
839         struct client *client = file->private_data;
840
841         return dispatch_ioctl(client, cmd, (void __user *) arg);
842 }
843
844 #ifdef CONFIG_COMPAT
845 static long
846 fw_device_op_compat_ioctl(struct file *file,
847                           unsigned int cmd, unsigned long arg)
848 {
849         struct client *client = file->private_data;
850
851         return dispatch_ioctl(client, cmd, compat_ptr(arg));
852 }
853 #endif
854
855 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
856 {
857         struct client *client = file->private_data;
858         enum dma_data_direction direction;
859         unsigned long size;
860         int page_count, retval;
861
862         /* FIXME: We could support multiple buffers, but we don't. */
863         if (client->buffer.pages != NULL)
864                 return -EBUSY;
865
866         if (!(vma->vm_flags & VM_SHARED))
867                 return -EINVAL;
868
869         if (vma->vm_start & ~PAGE_MASK)
870                 return -EINVAL;
871
872         client->vm_start = vma->vm_start;
873         size = vma->vm_end - vma->vm_start;
874         page_count = size >> PAGE_SHIFT;
875         if (size & ~PAGE_MASK)
876                 return -EINVAL;
877
878         if (vma->vm_flags & VM_WRITE)
879                 direction = DMA_TO_DEVICE;
880         else
881                 direction = DMA_FROM_DEVICE;
882
883         retval = fw_iso_buffer_init(&client->buffer, client->device->card,
884                                     page_count, direction);
885         if (retval < 0)
886                 return retval;
887
888         retval = fw_iso_buffer_map(&client->buffer, vma);
889         if (retval < 0)
890                 fw_iso_buffer_destroy(&client->buffer, client->device->card);
891
892         return retval;
893 }
894
895 static int fw_device_op_release(struct inode *inode, struct file *file)
896 {
897         struct client *client = file->private_data;
898         struct event *e, *next_e;
899         struct client_resource *r, *next_r;
900         unsigned long flags;
901
902         if (client->buffer.pages)
903                 fw_iso_buffer_destroy(&client->buffer, client->device->card);
904
905         if (client->iso_context)
906                 fw_iso_context_destroy(client->iso_context);
907
908         list_for_each_entry_safe(r, next_r, &client->resource_list, link)
909                 r->release(client, r);
910
911         /* FIXME: We should wait for the async tasklets to stop
912          * running before freeing the memory. */
913
914         list_for_each_entry_safe(e, next_e, &client->event_list, link)
915                 kfree(e);
916
917         spin_lock_irqsave(&client->device->card->lock, flags);
918         list_del(&client->link);
919         spin_unlock_irqrestore(&client->device->card->lock, flags);
920
921         fw_device_put(client->device);
922         kfree(client);
923
924         return 0;
925 }
926
927 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
928 {
929         struct client *client = file->private_data;
930         unsigned int mask = 0;
931
932         poll_wait(file, &client->wait, pt);
933
934         if (fw_device_is_shutdown(client->device))
935                 mask |= POLLHUP | POLLERR;
936         if (!list_empty(&client->event_list))
937                 mask |= POLLIN | POLLRDNORM;
938
939         return mask;
940 }
941
942 const struct file_operations fw_device_ops = {
943         .owner          = THIS_MODULE,
944         .open           = fw_device_op_open,
945         .read           = fw_device_op_read,
946         .unlocked_ioctl = fw_device_op_ioctl,
947         .poll           = fw_device_op_poll,
948         .release        = fw_device_op_release,
949         .mmap           = fw_device_op_mmap,
950
951 #ifdef CONFIG_COMPAT
952         .compat_ioctl   = fw_device_op_compat_ioctl,
953 #endif
954 };