xref: /openbmc/linux/drivers/firewire/core-cdev.c (revision fd589a8f)
1 /*
2  * Char device for device raw access
3  *
4  * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #include <linux/compat.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-cdev.h>
27 #include <linux/idr.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kref.h>
31 #include <linux/mm.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
34 #include <linux/poll.h>
35 #include <linux/preempt.h>
36 #include <linux/spinlock.h>
37 #include <linux/time.h>
38 #include <linux/uaccess.h>
39 #include <linux/vmalloc.h>
40 #include <linux/wait.h>
41 #include <linux/workqueue.h>
42 
43 #include <asm/system.h>
44 
45 #include "core.h"
46 
47 struct client {
48 	u32 version;
49 	struct fw_device *device;
50 
51 	spinlock_t lock;
52 	bool in_shutdown;
53 	struct idr resource_idr;
54 	struct list_head event_list;
55 	wait_queue_head_t wait;
56 	u64 bus_reset_closure;
57 
58 	struct fw_iso_context *iso_context;
59 	u64 iso_closure;
60 	struct fw_iso_buffer buffer;
61 	unsigned long vm_start;
62 
63 	struct list_head link;
64 	struct kref kref;
65 };
66 
67 static inline void client_get(struct client *client)
68 {
69 	kref_get(&client->kref);
70 }
71 
72 static void client_release(struct kref *kref)
73 {
74 	struct client *client = container_of(kref, struct client, kref);
75 
76 	fw_device_put(client->device);
77 	kfree(client);
78 }
79 
80 static void client_put(struct client *client)
81 {
82 	kref_put(&client->kref, client_release);
83 }
84 
85 struct client_resource;
86 typedef void (*client_resource_release_fn_t)(struct client *,
87 					     struct client_resource *);
88 struct client_resource {
89 	client_resource_release_fn_t release;
90 	int handle;
91 };
92 
93 struct address_handler_resource {
94 	struct client_resource resource;
95 	struct fw_address_handler handler;
96 	__u64 closure;
97 	struct client *client;
98 };
99 
100 struct outbound_transaction_resource {
101 	struct client_resource resource;
102 	struct fw_transaction transaction;
103 };
104 
105 struct inbound_transaction_resource {
106 	struct client_resource resource;
107 	struct fw_request *request;
108 	void *data;
109 	size_t length;
110 };
111 
112 struct descriptor_resource {
113 	struct client_resource resource;
114 	struct fw_descriptor descriptor;
115 	u32 data[0];
116 };
117 
118 struct iso_resource {
119 	struct client_resource resource;
120 	struct client *client;
121 	/* Schedule work and access todo only with client->lock held. */
122 	struct delayed_work work;
123 	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
124 	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
125 	int generation;
126 	u64 channels;
127 	s32 bandwidth;
128 	__be32 transaction_data[2];
129 	struct iso_resource_event *e_alloc, *e_dealloc;
130 };
131 
132 static void schedule_iso_resource(struct iso_resource *);
133 static void release_iso_resource(struct client *, struct client_resource *);
134 
135 /*
136  * dequeue_event() just kfree()'s the event, so the event has to be
137  * the first field in a struct XYZ_event.
138  */
139 struct event {
140 	struct { void *data; size_t size; } v[2];
141 	struct list_head link;
142 };
143 
144 struct bus_reset_event {
145 	struct event event;
146 	struct fw_cdev_event_bus_reset reset;
147 };
148 
149 struct outbound_transaction_event {
150 	struct event event;
151 	struct client *client;
152 	struct outbound_transaction_resource r;
153 	struct fw_cdev_event_response response;
154 };
155 
156 struct inbound_transaction_event {
157 	struct event event;
158 	struct fw_cdev_event_request request;
159 };
160 
161 struct iso_interrupt_event {
162 	struct event event;
163 	struct fw_cdev_event_iso_interrupt interrupt;
164 };
165 
166 struct iso_resource_event {
167 	struct event event;
168 	struct fw_cdev_event_iso_resource resource;
169 };
170 
171 static inline void __user *u64_to_uptr(__u64 value)
172 {
173 	return (void __user *)(unsigned long)value;
174 }
175 
176 static inline __u64 uptr_to_u64(void __user *ptr)
177 {
178 	return (__u64)(unsigned long)ptr;
179 }
180 
181 static int fw_device_op_open(struct inode *inode, struct file *file)
182 {
183 	struct fw_device *device;
184 	struct client *client;
185 
186 	device = fw_device_get_by_devt(inode->i_rdev);
187 	if (device == NULL)
188 		return -ENODEV;
189 
190 	if (fw_device_is_shutdown(device)) {
191 		fw_device_put(device);
192 		return -ENODEV;
193 	}
194 
195 	client = kzalloc(sizeof(*client), GFP_KERNEL);
196 	if (client == NULL) {
197 		fw_device_put(device);
198 		return -ENOMEM;
199 	}
200 
201 	client->device = device;
202 	spin_lock_init(&client->lock);
203 	idr_init(&client->resource_idr);
204 	INIT_LIST_HEAD(&client->event_list);
205 	init_waitqueue_head(&client->wait);
206 	kref_init(&client->kref);
207 
208 	file->private_data = client;
209 
210 	mutex_lock(&device->client_list_mutex);
211 	list_add_tail(&client->link, &device->client_list);
212 	mutex_unlock(&device->client_list_mutex);
213 
214 	return 0;
215 }
216 
217 static void queue_event(struct client *client, struct event *event,
218 			void *data0, size_t size0, void *data1, size_t size1)
219 {
220 	unsigned long flags;
221 
222 	event->v[0].data = data0;
223 	event->v[0].size = size0;
224 	event->v[1].data = data1;
225 	event->v[1].size = size1;
226 
227 	spin_lock_irqsave(&client->lock, flags);
228 	if (client->in_shutdown)
229 		kfree(event);
230 	else
231 		list_add_tail(&event->link, &client->event_list);
232 	spin_unlock_irqrestore(&client->lock, flags);
233 
234 	wake_up_interruptible(&client->wait);
235 }
236 
237 static int dequeue_event(struct client *client,
238 			 char __user *buffer, size_t count)
239 {
240 	struct event *event;
241 	size_t size, total;
242 	int i, ret;
243 
244 	ret = wait_event_interruptible(client->wait,
245 			!list_empty(&client->event_list) ||
246 			fw_device_is_shutdown(client->device));
247 	if (ret < 0)
248 		return ret;
249 
250 	if (list_empty(&client->event_list) &&
251 		       fw_device_is_shutdown(client->device))
252 		return -ENODEV;
253 
254 	spin_lock_irq(&client->lock);
255 	event = list_first_entry(&client->event_list, struct event, link);
256 	list_del(&event->link);
257 	spin_unlock_irq(&client->lock);
258 
259 	total = 0;
260 	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
261 		size = min(event->v[i].size, count - total);
262 		if (copy_to_user(buffer + total, event->v[i].data, size)) {
263 			ret = -EFAULT;
264 			goto out;
265 		}
266 		total += size;
267 	}
268 	ret = total;
269 
270  out:
271 	kfree(event);
272 
273 	return ret;
274 }
275 
276 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
277 				 size_t count, loff_t *offset)
278 {
279 	struct client *client = file->private_data;
280 
281 	return dequeue_event(client, buffer, count);
282 }
283 
284 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
285 				 struct client *client)
286 {
287 	struct fw_card *card = client->device->card;
288 
289 	spin_lock_irq(&card->lock);
290 
291 	event->closure	     = client->bus_reset_closure;
292 	event->type          = FW_CDEV_EVENT_BUS_RESET;
293 	event->generation    = client->device->generation;
294 	event->node_id       = client->device->node_id;
295 	event->local_node_id = card->local_node->node_id;
296 	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
297 	event->irm_node_id   = card->irm_node->node_id;
298 	event->root_node_id  = card->root_node->node_id;
299 
300 	spin_unlock_irq(&card->lock);
301 }
302 
303 static void for_each_client(struct fw_device *device,
304 			    void (*callback)(struct client *client))
305 {
306 	struct client *c;
307 
308 	mutex_lock(&device->client_list_mutex);
309 	list_for_each_entry(c, &device->client_list, link)
310 		callback(c);
311 	mutex_unlock(&device->client_list_mutex);
312 }
313 
314 static int schedule_reallocations(int id, void *p, void *data)
315 {
316 	struct client_resource *r = p;
317 
318 	if (r->release == release_iso_resource)
319 		schedule_iso_resource(container_of(r,
320 					struct iso_resource, resource));
321 	return 0;
322 }
323 
324 static void queue_bus_reset_event(struct client *client)
325 {
326 	struct bus_reset_event *e;
327 
328 	e = kzalloc(sizeof(*e), GFP_KERNEL);
329 	if (e == NULL) {
330 		fw_notify("Out of memory when allocating bus reset event\n");
331 		return;
332 	}
333 
334 	fill_bus_reset_event(&e->reset, client);
335 
336 	queue_event(client, &e->event,
337 		    &e->reset, sizeof(e->reset), NULL, 0);
338 
339 	spin_lock_irq(&client->lock);
340 	idr_for_each(&client->resource_idr, schedule_reallocations, client);
341 	spin_unlock_irq(&client->lock);
342 }
343 
344 void fw_device_cdev_update(struct fw_device *device)
345 {
346 	for_each_client(device, queue_bus_reset_event);
347 }
348 
349 static void wake_up_client(struct client *client)
350 {
351 	wake_up_interruptible(&client->wait);
352 }
353 
354 void fw_device_cdev_remove(struct fw_device *device)
355 {
356 	for_each_client(device, wake_up_client);
357 }
358 
359 static int ioctl_get_info(struct client *client, void *buffer)
360 {
361 	struct fw_cdev_get_info *get_info = buffer;
362 	struct fw_cdev_event_bus_reset bus_reset;
363 	unsigned long ret = 0;
364 
365 	client->version = get_info->version;
366 	get_info->version = FW_CDEV_VERSION;
367 	get_info->card = client->device->card->index;
368 
369 	down_read(&fw_device_rwsem);
370 
371 	if (get_info->rom != 0) {
372 		void __user *uptr = u64_to_uptr(get_info->rom);
373 		size_t want = get_info->rom_length;
374 		size_t have = client->device->config_rom_length * 4;
375 
376 		ret = copy_to_user(uptr, client->device->config_rom,
377 				   min(want, have));
378 	}
379 	get_info->rom_length = client->device->config_rom_length * 4;
380 
381 	up_read(&fw_device_rwsem);
382 
383 	if (ret != 0)
384 		return -EFAULT;
385 
386 	client->bus_reset_closure = get_info->bus_reset_closure;
387 	if (get_info->bus_reset != 0) {
388 		void __user *uptr = u64_to_uptr(get_info->bus_reset);
389 
390 		fill_bus_reset_event(&bus_reset, client);
391 		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
392 			return -EFAULT;
393 	}
394 
395 	return 0;
396 }
397 
398 static int add_client_resource(struct client *client,
399 			       struct client_resource *resource, gfp_t gfp_mask)
400 {
401 	unsigned long flags;
402 	int ret;
403 
404  retry:
405 	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
406 		return -ENOMEM;
407 
408 	spin_lock_irqsave(&client->lock, flags);
409 	if (client->in_shutdown)
410 		ret = -ECANCELED;
411 	else
412 		ret = idr_get_new(&client->resource_idr, resource,
413 				  &resource->handle);
414 	if (ret >= 0) {
415 		client_get(client);
416 		if (resource->release == release_iso_resource)
417 			schedule_iso_resource(container_of(resource,
418 						struct iso_resource, resource));
419 	}
420 	spin_unlock_irqrestore(&client->lock, flags);
421 
422 	if (ret == -EAGAIN)
423 		goto retry;
424 
425 	return ret < 0 ? ret : 0;
426 }
427 
428 static int release_client_resource(struct client *client, u32 handle,
429 				   client_resource_release_fn_t release,
430 				   struct client_resource **resource)
431 {
432 	struct client_resource *r;
433 
434 	spin_lock_irq(&client->lock);
435 	if (client->in_shutdown)
436 		r = NULL;
437 	else
438 		r = idr_find(&client->resource_idr, handle);
439 	if (r && r->release == release)
440 		idr_remove(&client->resource_idr, handle);
441 	spin_unlock_irq(&client->lock);
442 
443 	if (!(r && r->release == release))
444 		return -EINVAL;
445 
446 	if (resource)
447 		*resource = r;
448 	else
449 		r->release(client, r);
450 
451 	client_put(client);
452 
453 	return 0;
454 }
455 
456 static void release_transaction(struct client *client,
457 				struct client_resource *resource)
458 {
459 	struct outbound_transaction_resource *r = container_of(resource,
460 			struct outbound_transaction_resource, resource);
461 
462 	fw_cancel_transaction(client->device->card, &r->transaction);
463 }
464 
465 static void complete_transaction(struct fw_card *card, int rcode,
466 				 void *payload, size_t length, void *data)
467 {
468 	struct outbound_transaction_event *e = data;
469 	struct fw_cdev_event_response *rsp = &e->response;
470 	struct client *client = e->client;
471 	unsigned long flags;
472 
473 	if (length < rsp->length)
474 		rsp->length = length;
475 	if (rcode == RCODE_COMPLETE)
476 		memcpy(rsp->data, payload, rsp->length);
477 
478 	spin_lock_irqsave(&client->lock, flags);
479 	/*
480 	 * 1. If called while in shutdown, the idr tree must be left untouched.
481 	 *    The idr handle will be removed and the client reference will be
482 	 *    dropped later.
483 	 * 2. If the call chain was release_client_resource ->
484 	 *    release_transaction -> complete_transaction (instead of a normal
485 	 *    conclusion of the transaction), i.e. if this resource was already
486 	 *    unregistered from the idr, the client reference will be dropped
487 	 *    by release_client_resource and we must not drop it here.
488 	 */
489 	if (!client->in_shutdown &&
490 	    idr_find(&client->resource_idr, e->r.resource.handle)) {
491 		idr_remove(&client->resource_idr, e->r.resource.handle);
492 		/* Drop the idr's reference */
493 		client_put(client);
494 	}
495 	spin_unlock_irqrestore(&client->lock, flags);
496 
497 	rsp->type = FW_CDEV_EVENT_RESPONSE;
498 	rsp->rcode = rcode;
499 
500 	/*
501 	 * In the case that sizeof(*rsp) doesn't align with the position of the
502 	 * data, and the read is short, preserve an extra copy of the data
503 	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
504 	 * for short reads and some apps depended on it, this is both safe
505 	 * and prudent for compatibility.
506 	 */
507 	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
508 		queue_event(client, &e->event, rsp, sizeof(*rsp),
509 			    rsp->data, rsp->length);
510 	else
511 		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
512 			    NULL, 0);
513 
514 	/* Drop the transaction callback's reference */
515 	client_put(client);
516 }
517 
518 static int init_request(struct client *client,
519 			struct fw_cdev_send_request *request,
520 			int destination_id, int speed)
521 {
522 	struct outbound_transaction_event *e;
523 	int ret;
524 
525 	if (request->tcode != TCODE_STREAM_DATA &&
526 	    (request->length > 4096 || request->length > 512 << speed))
527 		return -EIO;
528 
529 	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
530 	if (e == NULL)
531 		return -ENOMEM;
532 
533 	e->client = client;
534 	e->response.length = request->length;
535 	e->response.closure = request->closure;
536 
537 	if (request->data &&
538 	    copy_from_user(e->response.data,
539 			   u64_to_uptr(request->data), request->length)) {
540 		ret = -EFAULT;
541 		goto failed;
542 	}
543 
544 	e->r.resource.release = release_transaction;
545 	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
546 	if (ret < 0)
547 		goto failed;
548 
549 	/* Get a reference for the transaction callback */
550 	client_get(client);
551 
552 	fw_send_request(client->device->card, &e->r.transaction,
553 			request->tcode, destination_id, request->generation,
554 			speed, request->offset, e->response.data,
555 			request->length, complete_transaction, e);
556 	return 0;
557 
558  failed:
559 	kfree(e);
560 
561 	return ret;
562 }
563 
564 static int ioctl_send_request(struct client *client, void *buffer)
565 {
566 	struct fw_cdev_send_request *request = buffer;
567 
568 	switch (request->tcode) {
569 	case TCODE_WRITE_QUADLET_REQUEST:
570 	case TCODE_WRITE_BLOCK_REQUEST:
571 	case TCODE_READ_QUADLET_REQUEST:
572 	case TCODE_READ_BLOCK_REQUEST:
573 	case TCODE_LOCK_MASK_SWAP:
574 	case TCODE_LOCK_COMPARE_SWAP:
575 	case TCODE_LOCK_FETCH_ADD:
576 	case TCODE_LOCK_LITTLE_ADD:
577 	case TCODE_LOCK_BOUNDED_ADD:
578 	case TCODE_LOCK_WRAP_ADD:
579 	case TCODE_LOCK_VENDOR_DEPENDENT:
580 		break;
581 	default:
582 		return -EINVAL;
583 	}
584 
585 	return init_request(client, request, client->device->node_id,
586 			    client->device->max_speed);
587 }
588 
589 static void release_request(struct client *client,
590 			    struct client_resource *resource)
591 {
592 	struct inbound_transaction_resource *r = container_of(resource,
593 			struct inbound_transaction_resource, resource);
594 
595 	fw_send_response(client->device->card, r->request,
596 			 RCODE_CONFLICT_ERROR);
597 	kfree(r);
598 }
599 
600 static void handle_request(struct fw_card *card, struct fw_request *request,
601 			   int tcode, int destination, int source,
602 			   int generation, int speed,
603 			   unsigned long long offset,
604 			   void *payload, size_t length, void *callback_data)
605 {
606 	struct address_handler_resource *handler = callback_data;
607 	struct inbound_transaction_resource *r;
608 	struct inbound_transaction_event *e;
609 	int ret;
610 
611 	r = kmalloc(sizeof(*r), GFP_ATOMIC);
612 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
613 	if (r == NULL || e == NULL)
614 		goto failed;
615 
616 	r->request = request;
617 	r->data    = payload;
618 	r->length  = length;
619 
620 	r->resource.release = release_request;
621 	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
622 	if (ret < 0)
623 		goto failed;
624 
625 	e->request.type    = FW_CDEV_EVENT_REQUEST;
626 	e->request.tcode   = tcode;
627 	e->request.offset  = offset;
628 	e->request.length  = length;
629 	e->request.handle  = r->resource.handle;
630 	e->request.closure = handler->closure;
631 
632 	queue_event(handler->client, &e->event,
633 		    &e->request, sizeof(e->request), payload, length);
634 	return;
635 
636  failed:
637 	kfree(r);
638 	kfree(e);
639 	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
640 }
641 
642 static void release_address_handler(struct client *client,
643 				    struct client_resource *resource)
644 {
645 	struct address_handler_resource *r =
646 	    container_of(resource, struct address_handler_resource, resource);
647 
648 	fw_core_remove_address_handler(&r->handler);
649 	kfree(r);
650 }
651 
652 static int ioctl_allocate(struct client *client, void *buffer)
653 {
654 	struct fw_cdev_allocate *request = buffer;
655 	struct address_handler_resource *r;
656 	struct fw_address_region region;
657 	int ret;
658 
659 	r = kmalloc(sizeof(*r), GFP_KERNEL);
660 	if (r == NULL)
661 		return -ENOMEM;
662 
663 	region.start = request->offset;
664 	region.end = request->offset + request->length;
665 	r->handler.length = request->length;
666 	r->handler.address_callback = handle_request;
667 	r->handler.callback_data = r;
668 	r->closure = request->closure;
669 	r->client = client;
670 
671 	ret = fw_core_add_address_handler(&r->handler, &region);
672 	if (ret < 0) {
673 		kfree(r);
674 		return ret;
675 	}
676 
677 	r->resource.release = release_address_handler;
678 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
679 	if (ret < 0) {
680 		release_address_handler(client, &r->resource);
681 		return ret;
682 	}
683 	request->handle = r->resource.handle;
684 
685 	return 0;
686 }
687 
688 static int ioctl_deallocate(struct client *client, void *buffer)
689 {
690 	struct fw_cdev_deallocate *request = buffer;
691 
692 	return release_client_resource(client, request->handle,
693 				       release_address_handler, NULL);
694 }
695 
696 static int ioctl_send_response(struct client *client, void *buffer)
697 {
698 	struct fw_cdev_send_response *request = buffer;
699 	struct client_resource *resource;
700 	struct inbound_transaction_resource *r;
701 
702 	if (release_client_resource(client, request->handle,
703 				    release_request, &resource) < 0)
704 		return -EINVAL;
705 
706 	r = container_of(resource, struct inbound_transaction_resource,
707 			 resource);
708 	if (request->length < r->length)
709 		r->length = request->length;
710 	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
711 		return -EFAULT;
712 
713 	fw_send_response(client->device->card, r->request, request->rcode);
714 	kfree(r);
715 
716 	return 0;
717 }
718 
719 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
720 {
721 	struct fw_cdev_initiate_bus_reset *request = buffer;
722 	int short_reset;
723 
724 	short_reset = (request->type == FW_CDEV_SHORT_RESET);
725 
726 	return fw_core_initiate_bus_reset(client->device->card, short_reset);
727 }
728 
729 static void release_descriptor(struct client *client,
730 			       struct client_resource *resource)
731 {
732 	struct descriptor_resource *r =
733 		container_of(resource, struct descriptor_resource, resource);
734 
735 	fw_core_remove_descriptor(&r->descriptor);
736 	kfree(r);
737 }
738 
739 static int ioctl_add_descriptor(struct client *client, void *buffer)
740 {
741 	struct fw_cdev_add_descriptor *request = buffer;
742 	struct descriptor_resource *r;
743 	int ret;
744 
745 	/* Access policy: Allow this ioctl only on local nodes' device files. */
746 	if (!client->device->is_local)
747 		return -ENOSYS;
748 
749 	if (request->length > 256)
750 		return -EINVAL;
751 
752 	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
753 	if (r == NULL)
754 		return -ENOMEM;
755 
756 	if (copy_from_user(r->data,
757 			   u64_to_uptr(request->data), request->length * 4)) {
758 		ret = -EFAULT;
759 		goto failed;
760 	}
761 
762 	r->descriptor.length    = request->length;
763 	r->descriptor.immediate = request->immediate;
764 	r->descriptor.key       = request->key;
765 	r->descriptor.data      = r->data;
766 
767 	ret = fw_core_add_descriptor(&r->descriptor);
768 	if (ret < 0)
769 		goto failed;
770 
771 	r->resource.release = release_descriptor;
772 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
773 	if (ret < 0) {
774 		fw_core_remove_descriptor(&r->descriptor);
775 		goto failed;
776 	}
777 	request->handle = r->resource.handle;
778 
779 	return 0;
780  failed:
781 	kfree(r);
782 
783 	return ret;
784 }
785 
786 static int ioctl_remove_descriptor(struct client *client, void *buffer)
787 {
788 	struct fw_cdev_remove_descriptor *request = buffer;
789 
790 	return release_client_resource(client, request->handle,
791 				       release_descriptor, NULL);
792 }
793 
794 static void iso_callback(struct fw_iso_context *context, u32 cycle,
795 			 size_t header_length, void *header, void *data)
796 {
797 	struct client *client = data;
798 	struct iso_interrupt_event *e;
799 
800 	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
801 	if (e == NULL)
802 		return;
803 
804 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
805 	e->interrupt.closure   = client->iso_closure;
806 	e->interrupt.cycle     = cycle;
807 	e->interrupt.header_length = header_length;
808 	memcpy(e->interrupt.header, header, header_length);
809 	queue_event(client, &e->event, &e->interrupt,
810 		    sizeof(e->interrupt) + header_length, NULL, 0);
811 }
812 
813 static int ioctl_create_iso_context(struct client *client, void *buffer)
814 {
815 	struct fw_cdev_create_iso_context *request = buffer;
816 	struct fw_iso_context *context;
817 
818 	/* We only support one context at this time. */
819 	if (client->iso_context != NULL)
820 		return -EBUSY;
821 
822 	if (request->channel > 63)
823 		return -EINVAL;
824 
825 	switch (request->type) {
826 	case FW_ISO_CONTEXT_RECEIVE:
827 		if (request->header_size < 4 || (request->header_size & 3))
828 			return -EINVAL;
829 
830 		break;
831 
832 	case FW_ISO_CONTEXT_TRANSMIT:
833 		if (request->speed > SCODE_3200)
834 			return -EINVAL;
835 
836 		break;
837 
838 	default:
839 		return -EINVAL;
840 	}
841 
842 	context =  fw_iso_context_create(client->device->card,
843 					 request->type,
844 					 request->channel,
845 					 request->speed,
846 					 request->header_size,
847 					 iso_callback, client);
848 	if (IS_ERR(context))
849 		return PTR_ERR(context);
850 
851 	client->iso_closure = request->closure;
852 	client->iso_context = context;
853 
854 	/* We only support one context at this time. */
855 	request->handle = 0;
856 
857 	return 0;
858 }
859 
860 /* Macros for decoding the iso packet control header. */
861 #define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
862 #define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
863 #define GET_SKIP(v)		(((v) >> 17) & 0x01)
864 #define GET_TAG(v)		(((v) >> 18) & 0x03)
865 #define GET_SY(v)		(((v) >> 20) & 0x0f)
866 #define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
867 
868 static int ioctl_queue_iso(struct client *client, void *buffer)
869 {
870 	struct fw_cdev_queue_iso *request = buffer;
871 	struct fw_cdev_iso_packet __user *p, *end, *next;
872 	struct fw_iso_context *ctx = client->iso_context;
873 	unsigned long payload, buffer_end, header_length;
874 	u32 control;
875 	int count;
876 	struct {
877 		struct fw_iso_packet packet;
878 		u8 header[256];
879 	} u;
880 
881 	if (ctx == NULL || request->handle != 0)
882 		return -EINVAL;
883 
884 	/*
885 	 * If the user passes a non-NULL data pointer, has mmap()'ed
886 	 * the iso buffer, and the pointer points inside the buffer,
887 	 * we setup the payload pointers accordingly.  Otherwise we
888 	 * set them both to 0, which will still let packets with
889 	 * payload_length == 0 through.  In other words, if no packets
890 	 * use the indirect payload, the iso buffer need not be mapped
891 	 * and the request->data pointer is ignored.
892 	 */
893 
894 	payload = (unsigned long)request->data - client->vm_start;
895 	buffer_end = client->buffer.page_count << PAGE_SHIFT;
896 	if (request->data == 0 || client->buffer.pages == NULL ||
897 	    payload >= buffer_end) {
898 		payload = 0;
899 		buffer_end = 0;
900 	}
901 
902 	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
903 
904 	if (!access_ok(VERIFY_READ, p, request->size))
905 		return -EFAULT;
906 
907 	end = (void __user *)p + request->size;
908 	count = 0;
909 	while (p < end) {
910 		if (get_user(control, &p->control))
911 			return -EFAULT;
912 		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
913 		u.packet.interrupt = GET_INTERRUPT(control);
914 		u.packet.skip = GET_SKIP(control);
915 		u.packet.tag = GET_TAG(control);
916 		u.packet.sy = GET_SY(control);
917 		u.packet.header_length = GET_HEADER_LENGTH(control);
918 
919 		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
920 			header_length = u.packet.header_length;
921 		} else {
922 			/*
923 			 * We require that header_length is a multiple of
924 			 * the fixed header size, ctx->header_size.
925 			 */
926 			if (ctx->header_size == 0) {
927 				if (u.packet.header_length > 0)
928 					return -EINVAL;
929 			} else if (u.packet.header_length % ctx->header_size != 0) {
930 				return -EINVAL;
931 			}
932 			header_length = 0;
933 		}
934 
935 		next = (struct fw_cdev_iso_packet __user *)
936 			&p->header[header_length / 4];
937 		if (next > end)
938 			return -EINVAL;
939 		if (__copy_from_user
940 		    (u.packet.header, p->header, header_length))
941 			return -EFAULT;
942 		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
943 		    u.packet.header_length + u.packet.payload_length > 0)
944 			return -EINVAL;
945 		if (payload + u.packet.payload_length > buffer_end)
946 			return -EINVAL;
947 
948 		if (fw_iso_context_queue(ctx, &u.packet,
949 					 &client->buffer, payload))
950 			break;
951 
952 		p = next;
953 		payload += u.packet.payload_length;
954 		count++;
955 	}
956 
957 	request->size    -= uptr_to_u64(p) - request->packets;
958 	request->packets  = uptr_to_u64(p);
959 	request->data     = client->vm_start + payload;
960 
961 	return count;
962 }
963 
964 static int ioctl_start_iso(struct client *client, void *buffer)
965 {
966 	struct fw_cdev_start_iso *request = buffer;
967 
968 	if (client->iso_context == NULL || request->handle != 0)
969 		return -EINVAL;
970 
971 	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
972 		if (request->tags == 0 || request->tags > 15)
973 			return -EINVAL;
974 
975 		if (request->sync > 15)
976 			return -EINVAL;
977 	}
978 
979 	return fw_iso_context_start(client->iso_context, request->cycle,
980 				    request->sync, request->tags);
981 }
982 
983 static int ioctl_stop_iso(struct client *client, void *buffer)
984 {
985 	struct fw_cdev_stop_iso *request = buffer;
986 
987 	if (client->iso_context == NULL || request->handle != 0)
988 		return -EINVAL;
989 
990 	return fw_iso_context_stop(client->iso_context);
991 }
992 
993 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
994 {
995 	struct fw_cdev_get_cycle_timer *request = buffer;
996 	struct fw_card *card = client->device->card;
997 	unsigned long long bus_time;
998 	struct timeval tv;
999 	unsigned long flags;
1000 
1001 	preempt_disable();
1002 	local_irq_save(flags);
1003 
1004 	bus_time = card->driver->get_bus_time(card);
1005 	do_gettimeofday(&tv);
1006 
1007 	local_irq_restore(flags);
1008 	preempt_enable();
1009 
1010 	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
1011 	request->cycle_timer = bus_time & 0xffffffff;
1012 	return 0;
1013 }
1014 
1015 static void iso_resource_work(struct work_struct *work)
1016 {
1017 	struct iso_resource_event *e;
1018 	struct iso_resource *r =
1019 			container_of(work, struct iso_resource, work.work);
1020 	struct client *client = r->client;
1021 	int generation, channel, bandwidth, todo;
1022 	bool skip, free, success;
1023 
1024 	spin_lock_irq(&client->lock);
1025 	generation = client->device->generation;
1026 	todo = r->todo;
1027 	/* Allow 1000ms grace period for other reallocations. */
1028 	if (todo == ISO_RES_ALLOC &&
1029 	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1030 		if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1031 			client_get(client);
1032 		skip = true;
1033 	} else {
1034 		/* We could be called twice within the same generation. */
1035 		skip = todo == ISO_RES_REALLOC &&
1036 		       r->generation == generation;
1037 	}
1038 	free = todo == ISO_RES_DEALLOC ||
1039 	       todo == ISO_RES_ALLOC_ONCE ||
1040 	       todo == ISO_RES_DEALLOC_ONCE;
1041 	r->generation = generation;
1042 	spin_unlock_irq(&client->lock);
1043 
1044 	if (skip)
1045 		goto out;
1046 
1047 	bandwidth = r->bandwidth;
1048 
1049 	fw_iso_resource_manage(client->device->card, generation,
1050 			r->channels, &channel, &bandwidth,
1051 			todo == ISO_RES_ALLOC ||
1052 			todo == ISO_RES_REALLOC ||
1053 			todo == ISO_RES_ALLOC_ONCE,
1054 			r->transaction_data);
1055 	/*
1056 	 * Is this generation outdated already?  As long as this resource sticks
1057 	 * in the idr, it will be scheduled again for a newer generation or at
1058 	 * shutdown.
1059 	 */
1060 	if (channel == -EAGAIN &&
1061 	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1062 		goto out;
1063 
1064 	success = channel >= 0 || bandwidth > 0;
1065 
1066 	spin_lock_irq(&client->lock);
1067 	/*
1068 	 * Transit from allocation to reallocation, except if the client
1069 	 * requested deallocation in the meantime.
1070 	 */
1071 	if (r->todo == ISO_RES_ALLOC)
1072 		r->todo = ISO_RES_REALLOC;
1073 	/*
1074 	 * Allocation or reallocation failure?  Pull this resource out of the
1075 	 * idr and prepare for deletion, unless the client is shutting down.
1076 	 */
1077 	if (r->todo == ISO_RES_REALLOC && !success &&
1078 	    !client->in_shutdown &&
1079 	    idr_find(&client->resource_idr, r->resource.handle)) {
1080 		idr_remove(&client->resource_idr, r->resource.handle);
1081 		client_put(client);
1082 		free = true;
1083 	}
1084 	spin_unlock_irq(&client->lock);
1085 
1086 	if (todo == ISO_RES_ALLOC && channel >= 0)
1087 		r->channels = 1ULL << channel;
1088 
1089 	if (todo == ISO_RES_REALLOC && success)
1090 		goto out;
1091 
1092 	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1093 		e = r->e_alloc;
1094 		r->e_alloc = NULL;
1095 	} else {
1096 		e = r->e_dealloc;
1097 		r->e_dealloc = NULL;
1098 	}
1099 	e->resource.handle	= r->resource.handle;
1100 	e->resource.channel	= channel;
1101 	e->resource.bandwidth	= bandwidth;
1102 
1103 	queue_event(client, &e->event,
1104 		    &e->resource, sizeof(e->resource), NULL, 0);
1105 
1106 	if (free) {
1107 		cancel_delayed_work(&r->work);
1108 		kfree(r->e_alloc);
1109 		kfree(r->e_dealloc);
1110 		kfree(r);
1111 	}
1112  out:
1113 	client_put(client);
1114 }
1115 
1116 static void schedule_iso_resource(struct iso_resource *r)
1117 {
1118 	client_get(r->client);
1119 	if (!schedule_delayed_work(&r->work, 0))
1120 		client_put(r->client);
1121 }
1122 
1123 static void release_iso_resource(struct client *client,
1124 				 struct client_resource *resource)
1125 {
1126 	struct iso_resource *r =
1127 		container_of(resource, struct iso_resource, resource);
1128 
1129 	spin_lock_irq(&client->lock);
1130 	r->todo = ISO_RES_DEALLOC;
1131 	schedule_iso_resource(r);
1132 	spin_unlock_irq(&client->lock);
1133 }
1134 
1135 static int init_iso_resource(struct client *client,
1136 		struct fw_cdev_allocate_iso_resource *request, int todo)
1137 {
1138 	struct iso_resource_event *e1, *e2;
1139 	struct iso_resource *r;
1140 	int ret;
1141 
1142 	if ((request->channels == 0 && request->bandwidth == 0) ||
1143 	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1144 	    request->bandwidth < 0)
1145 		return -EINVAL;
1146 
1147 	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1148 	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1149 	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1150 	if (r == NULL || e1 == NULL || e2 == NULL) {
1151 		ret = -ENOMEM;
1152 		goto fail;
1153 	}
1154 
1155 	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1156 	r->client	= client;
1157 	r->todo		= todo;
1158 	r->generation	= -1;
1159 	r->channels	= request->channels;
1160 	r->bandwidth	= request->bandwidth;
1161 	r->e_alloc	= e1;
1162 	r->e_dealloc	= e2;
1163 
1164 	e1->resource.closure	= request->closure;
1165 	e1->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1166 	e2->resource.closure	= request->closure;
1167 	e2->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1168 
1169 	if (todo == ISO_RES_ALLOC) {
1170 		r->resource.release = release_iso_resource;
1171 		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1172 		if (ret < 0)
1173 			goto fail;
1174 	} else {
1175 		r->resource.release = NULL;
1176 		r->resource.handle = -1;
1177 		schedule_iso_resource(r);
1178 	}
1179 	request->handle = r->resource.handle;
1180 
1181 	return 0;
1182  fail:
1183 	kfree(r);
1184 	kfree(e1);
1185 	kfree(e2);
1186 
1187 	return ret;
1188 }
1189 
1190 static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1191 {
1192 	struct fw_cdev_allocate_iso_resource *request = buffer;
1193 
1194 	return init_iso_resource(client, request, ISO_RES_ALLOC);
1195 }
1196 
1197 static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1198 {
1199 	struct fw_cdev_deallocate *request = buffer;
1200 
1201 	return release_client_resource(client, request->handle,
1202 				       release_iso_resource, NULL);
1203 }
1204 
1205 static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1206 {
1207 	struct fw_cdev_allocate_iso_resource *request = buffer;
1208 
1209 	return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1210 }
1211 
1212 static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1213 {
1214 	struct fw_cdev_allocate_iso_resource *request = buffer;
1215 
1216 	return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1217 }
1218 
1219 /*
1220  * Returns a speed code:  Maximum speed to or from this device,
1221  * limited by the device's link speed, the local node's link speed,
1222  * and all PHY port speeds between the two links.
1223  */
1224 static int ioctl_get_speed(struct client *client, void *buffer)
1225 {
1226 	return client->device->max_speed;
1227 }
1228 
1229 static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1230 {
1231 	struct fw_cdev_send_request *request = buffer;
1232 
1233 	switch (request->tcode) {
1234 	case TCODE_WRITE_QUADLET_REQUEST:
1235 	case TCODE_WRITE_BLOCK_REQUEST:
1236 		break;
1237 	default:
1238 		return -EINVAL;
1239 	}
1240 
1241 	/* Security policy: Only allow accesses to Units Space. */
1242 	if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1243 		return -EACCES;
1244 
1245 	return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1246 }
1247 
1248 static int ioctl_send_stream_packet(struct client *client, void *buffer)
1249 {
1250 	struct fw_cdev_send_stream_packet *p = buffer;
1251 	struct fw_cdev_send_request request;
1252 	int dest;
1253 
1254 	if (p->speed > client->device->card->link_speed ||
1255 	    p->length > 1024 << p->speed)
1256 		return -EIO;
1257 
1258 	if (p->tag > 3 || p->channel > 63 || p->sy > 15)
1259 		return -EINVAL;
1260 
1261 	dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
1262 	request.tcode		= TCODE_STREAM_DATA;
1263 	request.length		= p->length;
1264 	request.closure		= p->closure;
1265 	request.data		= p->data;
1266 	request.generation	= p->generation;
1267 
1268 	return init_request(client, &request, dest, p->speed);
1269 }
1270 
1271 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
1272 	ioctl_get_info,
1273 	ioctl_send_request,
1274 	ioctl_allocate,
1275 	ioctl_deallocate,
1276 	ioctl_send_response,
1277 	ioctl_initiate_bus_reset,
1278 	ioctl_add_descriptor,
1279 	ioctl_remove_descriptor,
1280 	ioctl_create_iso_context,
1281 	ioctl_queue_iso,
1282 	ioctl_start_iso,
1283 	ioctl_stop_iso,
1284 	ioctl_get_cycle_timer,
1285 	ioctl_allocate_iso_resource,
1286 	ioctl_deallocate_iso_resource,
1287 	ioctl_allocate_iso_resource_once,
1288 	ioctl_deallocate_iso_resource_once,
1289 	ioctl_get_speed,
1290 	ioctl_send_broadcast_request,
1291 	ioctl_send_stream_packet,
1292 };
1293 
1294 static int dispatch_ioctl(struct client *client,
1295 			  unsigned int cmd, void __user *arg)
1296 {
1297 	char buffer[256];
1298 	int ret;
1299 
1300 	if (_IOC_TYPE(cmd) != '#' ||
1301 	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1302 		return -EINVAL;
1303 
1304 	if (_IOC_DIR(cmd) & _IOC_WRITE) {
1305 		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1306 		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
1307 			return -EFAULT;
1308 	}
1309 
1310 	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
1311 	if (ret < 0)
1312 		return ret;
1313 
1314 	if (_IOC_DIR(cmd) & _IOC_READ) {
1315 		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1316 		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
1317 			return -EFAULT;
1318 	}
1319 
1320 	return ret;
1321 }
1322 
1323 static long fw_device_op_ioctl(struct file *file,
1324 			       unsigned int cmd, unsigned long arg)
1325 {
1326 	struct client *client = file->private_data;
1327 
1328 	if (fw_device_is_shutdown(client->device))
1329 		return -ENODEV;
1330 
1331 	return dispatch_ioctl(client, cmd, (void __user *) arg);
1332 }
1333 
1334 #ifdef CONFIG_COMPAT
1335 static long fw_device_op_compat_ioctl(struct file *file,
1336 				      unsigned int cmd, unsigned long arg)
1337 {
1338 	struct client *client = file->private_data;
1339 
1340 	if (fw_device_is_shutdown(client->device))
1341 		return -ENODEV;
1342 
1343 	return dispatch_ioctl(client, cmd, compat_ptr(arg));
1344 }
1345 #endif
1346 
1347 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1348 {
1349 	struct client *client = file->private_data;
1350 	enum dma_data_direction direction;
1351 	unsigned long size;
1352 	int page_count, ret;
1353 
1354 	if (fw_device_is_shutdown(client->device))
1355 		return -ENODEV;
1356 
1357 	/* FIXME: We could support multiple buffers, but we don't. */
1358 	if (client->buffer.pages != NULL)
1359 		return -EBUSY;
1360 
1361 	if (!(vma->vm_flags & VM_SHARED))
1362 		return -EINVAL;
1363 
1364 	if (vma->vm_start & ~PAGE_MASK)
1365 		return -EINVAL;
1366 
1367 	client->vm_start = vma->vm_start;
1368 	size = vma->vm_end - vma->vm_start;
1369 	page_count = size >> PAGE_SHIFT;
1370 	if (size & ~PAGE_MASK)
1371 		return -EINVAL;
1372 
1373 	if (vma->vm_flags & VM_WRITE)
1374 		direction = DMA_TO_DEVICE;
1375 	else
1376 		direction = DMA_FROM_DEVICE;
1377 
1378 	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1379 				 page_count, direction);
1380 	if (ret < 0)
1381 		return ret;
1382 
1383 	ret = fw_iso_buffer_map(&client->buffer, vma);
1384 	if (ret < 0)
1385 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1386 
1387 	return ret;
1388 }
1389 
1390 static int shutdown_resource(int id, void *p, void *data)
1391 {
1392 	struct client_resource *r = p;
1393 	struct client *client = data;
1394 
1395 	r->release(client, r);
1396 	client_put(client);
1397 
1398 	return 0;
1399 }
1400 
1401 static int fw_device_op_release(struct inode *inode, struct file *file)
1402 {
1403 	struct client *client = file->private_data;
1404 	struct event *e, *next_e;
1405 
1406 	mutex_lock(&client->device->client_list_mutex);
1407 	list_del(&client->link);
1408 	mutex_unlock(&client->device->client_list_mutex);
1409 
1410 	if (client->iso_context)
1411 		fw_iso_context_destroy(client->iso_context);
1412 
1413 	if (client->buffer.pages)
1414 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1415 
1416 	/* Freeze client->resource_idr and client->event_list */
1417 	spin_lock_irq(&client->lock);
1418 	client->in_shutdown = true;
1419 	spin_unlock_irq(&client->lock);
1420 
1421 	idr_for_each(&client->resource_idr, shutdown_resource, client);
1422 	idr_remove_all(&client->resource_idr);
1423 	idr_destroy(&client->resource_idr);
1424 
1425 	list_for_each_entry_safe(e, next_e, &client->event_list, link)
1426 		kfree(e);
1427 
1428 	client_put(client);
1429 
1430 	return 0;
1431 }
1432 
1433 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1434 {
1435 	struct client *client = file->private_data;
1436 	unsigned int mask = 0;
1437 
1438 	poll_wait(file, &client->wait, pt);
1439 
1440 	if (fw_device_is_shutdown(client->device))
1441 		mask |= POLLHUP | POLLERR;
1442 	if (!list_empty(&client->event_list))
1443 		mask |= POLLIN | POLLRDNORM;
1444 
1445 	return mask;
1446 }
1447 
1448 const struct file_operations fw_device_ops = {
1449 	.owner		= THIS_MODULE,
1450 	.open		= fw_device_op_open,
1451 	.read		= fw_device_op_read,
1452 	.unlocked_ioctl	= fw_device_op_ioctl,
1453 	.poll		= fw_device_op_poll,
1454 	.release	= fw_device_op_release,
1455 	.mmap		= fw_device_op_mmap,
1456 
1457 #ifdef CONFIG_COMPAT
1458 	.compat_ioctl	= fw_device_op_compat_ioctl,
1459 #endif
1460 };
1461