xref: /openbmc/linux/drivers/char/xillybus/xillyusb.c (revision 11976fe2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2020 Xillybus Ltd, http://xillybus.com
4  *
5  * Driver for the XillyUSB FPGA/host framework.
6  *
7  * This driver interfaces with a special IP core in an FPGA, setting up
8  * a pipe between a hardware FIFO in the programmable logic and a device
9  * file in the host. The number of such pipes and their attributes are
10  * set up on the logic. This driver detects these automatically and
11  * creates the device files accordingly.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <asm/byteorder.h>
20 #include <linux/io.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <linux/fs.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <linux/workqueue.h>
27 #include <linux/crc32.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
30 #include <linux/usb.h>
31 
32 #include "xillybus_class.h"
33 
34 MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
35 MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
36 MODULE_ALIAS("xillyusb");
37 MODULE_LICENSE("GPL v2");
38 
39 #define XILLY_RX_TIMEOUT		(10 * HZ / 1000)
40 #define XILLY_RESPONSE_TIMEOUT		(500 * HZ / 1000)
41 
42 #define BUF_SIZE_ORDER			4
43 #define BUFNUM				8
44 #define LOG2_IDT_FIFO_SIZE		16
45 #define LOG2_INITIAL_FIFO_BUF_SIZE	16
46 
47 #define MSG_EP_NUM			1
48 #define IN_EP_NUM			1
49 
50 static const char xillyname[] = "xillyusb";
51 
52 static unsigned int fifo_buf_order;
53 
54 #define USB_VENDOR_ID_XILINX		0x03fd
55 #define USB_VENDOR_ID_ALTERA		0x09fb
56 
57 #define USB_PRODUCT_ID_XILLYUSB		0xebbe
58 
59 static const struct usb_device_id xillyusb_table[] = {
60 	{ USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
61 	{ USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
62 	{ }
63 };
64 
65 MODULE_DEVICE_TABLE(usb, xillyusb_table);
66 
67 struct xillyusb_dev;
68 
69 struct xillyfifo {
70 	unsigned int bufsize; /* In bytes, always a power of 2 */
71 	unsigned int bufnum;
72 	unsigned int size; /* Lazy: Equals bufsize * bufnum */
73 	unsigned int buf_order;
74 
75 	int fill; /* Number of bytes in the FIFO */
76 	spinlock_t lock;
77 	wait_queue_head_t waitq;
78 
79 	unsigned int readpos;
80 	unsigned int readbuf;
81 	unsigned int writepos;
82 	unsigned int writebuf;
83 	char **mem;
84 };
85 
86 struct xillyusb_channel;
87 
88 struct xillyusb_endpoint {
89 	struct xillyusb_dev *xdev;
90 
91 	struct mutex ep_mutex; /* serialize operations on endpoint */
92 
93 	struct list_head buffers;
94 	struct list_head filled_buffers;
95 	spinlock_t buffers_lock; /* protect these two lists */
96 
97 	unsigned int order;
98 	unsigned int buffer_size;
99 
100 	unsigned int fill_mask;
101 
102 	int outstanding_urbs;
103 
104 	struct usb_anchor anchor;
105 
106 	struct xillyfifo fifo;
107 
108 	struct work_struct workitem;
109 
110 	bool shutting_down;
111 	bool drained;
112 	bool wake_on_drain;
113 
114 	u8 ep_num;
115 };
116 
117 struct xillyusb_channel {
118 	struct xillyusb_dev *xdev;
119 
120 	struct xillyfifo *in_fifo;
121 	struct xillyusb_endpoint *out_ep;
122 	struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
123 
124 	struct mutex in_mutex; /* serialize fops on FPGA to host stream */
125 	struct mutex out_mutex; /* serialize fops on host to FPGA stream */
126 	wait_queue_head_t flushq;
127 
128 	int chan_idx;
129 
130 	u32 in_consumed_bytes;
131 	u32 in_current_checkpoint;
132 	u32 out_bytes;
133 
134 	unsigned int in_log2_element_size;
135 	unsigned int out_log2_element_size;
136 	unsigned int in_log2_fifo_size;
137 	unsigned int out_log2_fifo_size;
138 
139 	unsigned int read_data_ok; /* EOF not arrived (yet) */
140 	unsigned int poll_used;
141 	unsigned int flushing;
142 	unsigned int flushed;
143 	unsigned int canceled;
144 
145 	/* Bit fields protected by @lock except for initialization */
146 	unsigned readable:1;
147 	unsigned writable:1;
148 	unsigned open_for_read:1;
149 	unsigned open_for_write:1;
150 	unsigned in_synchronous:1;
151 	unsigned out_synchronous:1;
152 	unsigned in_seekable:1;
153 	unsigned out_seekable:1;
154 };
155 
156 struct xillybuffer {
157 	struct list_head entry;
158 	struct xillyusb_endpoint *ep;
159 	void *buf;
160 	unsigned int len;
161 };
162 
163 struct xillyusb_dev {
164 	struct xillyusb_channel *channels;
165 
166 	struct usb_device	*udev;
167 	struct device		*dev; /* For dev_err() and such */
168 	struct kref		kref;
169 	struct workqueue_struct	*workq;
170 
171 	int error;
172 	spinlock_t error_lock; /* protect @error */
173 	struct work_struct wakeup_workitem;
174 
175 	int num_channels;
176 
177 	struct xillyusb_endpoint *msg_ep;
178 	struct xillyusb_endpoint *in_ep;
179 
180 	struct mutex msg_mutex; /* serialize opcode transmission */
181 	int in_bytes_left;
182 	int leftover_chan_num;
183 	unsigned int in_counter;
184 	struct mutex process_in_mutex; /* synchronize wakeup_all() */
185 };
186 
187 /*
188  * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev
189  * struct from being freed during the gap between being found by
190  * xillybus_find_inode() and having its reference count incremented.
191  */
192 
193 static DEFINE_MUTEX(kref_mutex);
194 
195 /* FPGA to host opcodes */
196 enum {
197 	OPCODE_DATA = 0,
198 	OPCODE_QUIESCE_ACK = 1,
199 	OPCODE_EOF = 2,
200 	OPCODE_REACHED_CHECKPOINT = 3,
201 	OPCODE_CANCELED_CHECKPOINT = 4,
202 };
203 
204 /* Host to FPGA opcodes */
205 enum {
206 	OPCODE_QUIESCE = 0,
207 	OPCODE_REQ_IDT = 1,
208 	OPCODE_SET_CHECKPOINT = 2,
209 	OPCODE_CLOSE = 3,
210 	OPCODE_SET_PUSH = 4,
211 	OPCODE_UPDATE_PUSH = 5,
212 	OPCODE_CANCEL_CHECKPOINT = 6,
213 	OPCODE_SET_ADDR = 7,
214 };
215 
216 /*
217  * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
218  * calls to each on the same FIFO is not allowed) however it's OK to have
219  * threads calling each of the two functions once on the same FIFO, and
220  * at the same time.
221  */
222 
223 static int fifo_write(struct xillyfifo *fifo,
224 		      const void *data, unsigned int len,
225 		      int (*copier)(void *, const void *, int))
226 {
227 	unsigned int done = 0;
228 	unsigned int todo = len;
229 	unsigned int nmax;
230 	unsigned int writepos = fifo->writepos;
231 	unsigned int writebuf = fifo->writebuf;
232 	unsigned long flags;
233 	int rc;
234 
235 	nmax = fifo->size - READ_ONCE(fifo->fill);
236 
237 	while (1) {
238 		unsigned int nrail = fifo->bufsize - writepos;
239 		unsigned int n = min(todo, nmax);
240 
241 		if (n == 0) {
242 			spin_lock_irqsave(&fifo->lock, flags);
243 			fifo->fill += done;
244 			spin_unlock_irqrestore(&fifo->lock, flags);
245 
246 			fifo->writepos = writepos;
247 			fifo->writebuf = writebuf;
248 
249 			return done;
250 		}
251 
252 		if (n > nrail)
253 			n = nrail;
254 
255 		rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
256 
257 		if (rc)
258 			return rc;
259 
260 		done += n;
261 		todo -= n;
262 
263 		writepos += n;
264 		nmax -= n;
265 
266 		if (writepos == fifo->bufsize) {
267 			writepos = 0;
268 			writebuf++;
269 
270 			if (writebuf == fifo->bufnum)
271 				writebuf = 0;
272 		}
273 	}
274 }
275 
276 static int fifo_read(struct xillyfifo *fifo,
277 		     void *data, unsigned int len,
278 		     int (*copier)(void *, const void *, int))
279 {
280 	unsigned int done = 0;
281 	unsigned int todo = len;
282 	unsigned int fill;
283 	unsigned int readpos = fifo->readpos;
284 	unsigned int readbuf = fifo->readbuf;
285 	unsigned long flags;
286 	int rc;
287 
288 	/*
289 	 * The spinlock here is necessary, because otherwise fifo->fill
290 	 * could have been increased by fifo_write() after writing data
291 	 * to the buffer, but this data would potentially not have been
292 	 * visible on this thread at the time the updated fifo->fill was.
293 	 * That could lead to reading invalid data.
294 	 */
295 
296 	spin_lock_irqsave(&fifo->lock, flags);
297 	fill = fifo->fill;
298 	spin_unlock_irqrestore(&fifo->lock, flags);
299 
300 	while (1) {
301 		unsigned int nrail = fifo->bufsize - readpos;
302 		unsigned int n = min(todo, fill);
303 
304 		if (n == 0) {
305 			spin_lock_irqsave(&fifo->lock, flags);
306 			fifo->fill -= done;
307 			spin_unlock_irqrestore(&fifo->lock, flags);
308 
309 			fifo->readpos = readpos;
310 			fifo->readbuf = readbuf;
311 
312 			return done;
313 		}
314 
315 		if (n > nrail)
316 			n = nrail;
317 
318 		rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
319 
320 		if (rc)
321 			return rc;
322 
323 		done += n;
324 		todo -= n;
325 
326 		readpos += n;
327 		fill -= n;
328 
329 		if (readpos == fifo->bufsize) {
330 			readpos = 0;
331 			readbuf++;
332 
333 			if (readbuf == fifo->bufnum)
334 				readbuf = 0;
335 		}
336 	}
337 }
338 
339 /*
340  * These three wrapper functions are used as the @copier argument to
341  * fifo_write() and fifo_read(), so that they can work directly with
342  * user memory as well.
343  */
344 
345 static int xilly_copy_from_user(void *dst, const void *src, int n)
346 {
347 	if (copy_from_user(dst, (const void __user *)src, n))
348 		return -EFAULT;
349 
350 	return 0;
351 }
352 
353 static int xilly_copy_to_user(void *dst, const void *src, int n)
354 {
355 	if (copy_to_user((void __user *)dst, src, n))
356 		return -EFAULT;
357 
358 	return 0;
359 }
360 
361 static int xilly_memcpy(void *dst, const void *src, int n)
362 {
363 	memcpy(dst, src, n);
364 
365 	return 0;
366 }
367 
368 static int fifo_init(struct xillyfifo *fifo,
369 		     unsigned int log2_size)
370 {
371 	unsigned int log2_bufnum;
372 	unsigned int buf_order;
373 	int i;
374 
375 	unsigned int log2_fifo_buf_size;
376 
377 retry:
378 	log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
379 
380 	if (log2_size > log2_fifo_buf_size) {
381 		log2_bufnum = log2_size - log2_fifo_buf_size;
382 		buf_order = fifo_buf_order;
383 		fifo->bufsize = 1 << log2_fifo_buf_size;
384 	} else {
385 		log2_bufnum = 0;
386 		buf_order = (log2_size > PAGE_SHIFT) ?
387 			log2_size - PAGE_SHIFT : 0;
388 		fifo->bufsize = 1 << log2_size;
389 	}
390 
391 	fifo->bufnum = 1 << log2_bufnum;
392 	fifo->size = fifo->bufnum * fifo->bufsize;
393 	fifo->buf_order = buf_order;
394 
395 	fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
396 
397 	if (!fifo->mem)
398 		return -ENOMEM;
399 
400 	for (i = 0; i < fifo->bufnum; i++) {
401 		fifo->mem[i] = (void *)
402 			__get_free_pages(GFP_KERNEL, buf_order);
403 
404 		if (!fifo->mem[i])
405 			goto memfail;
406 	}
407 
408 	fifo->fill = 0;
409 	fifo->readpos = 0;
410 	fifo->readbuf = 0;
411 	fifo->writepos = 0;
412 	fifo->writebuf = 0;
413 	spin_lock_init(&fifo->lock);
414 	init_waitqueue_head(&fifo->waitq);
415 	return 0;
416 
417 memfail:
418 	for (i--; i >= 0; i--)
419 		free_pages((unsigned long)fifo->mem[i], buf_order);
420 
421 	kfree(fifo->mem);
422 	fifo->mem = NULL;
423 
424 	if (fifo_buf_order) {
425 		fifo_buf_order--;
426 		goto retry;
427 	} else {
428 		return -ENOMEM;
429 	}
430 }
431 
432 static void fifo_mem_release(struct xillyfifo *fifo)
433 {
434 	int i;
435 
436 	if (!fifo->mem)
437 		return;
438 
439 	for (i = 0; i < fifo->bufnum; i++)
440 		free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
441 
442 	kfree(fifo->mem);
443 }
444 
445 /*
446  * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
447  * won't accept any new URB submissions, and its related work item doesn't
448  * and won't run anymore.
449  */
450 
451 static void endpoint_quiesce(struct xillyusb_endpoint *ep)
452 {
453 	mutex_lock(&ep->ep_mutex);
454 	ep->shutting_down = true;
455 	mutex_unlock(&ep->ep_mutex);
456 
457 	usb_kill_anchored_urbs(&ep->anchor);
458 	cancel_work_sync(&ep->workitem);
459 }
460 
461 /*
462  * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
463  * though endpoint_alloc doesn't allocate that memory.
464  */
465 
466 static void endpoint_dealloc(struct xillyusb_endpoint *ep)
467 {
468 	struct list_head *this, *next;
469 
470 	fifo_mem_release(&ep->fifo);
471 
472 	/* Join @filled_buffers with @buffers to free these entries too */
473 	list_splice(&ep->filled_buffers, &ep->buffers);
474 
475 	list_for_each_safe(this, next, &ep->buffers) {
476 		struct xillybuffer *xb =
477 			list_entry(this, struct xillybuffer, entry);
478 
479 		free_pages((unsigned long)xb->buf, ep->order);
480 		kfree(xb);
481 	}
482 
483 	kfree(ep);
484 }
485 
486 static struct xillyusb_endpoint
487 *endpoint_alloc(struct xillyusb_dev *xdev,
488 		u8 ep_num,
489 		void (*work)(struct work_struct *),
490 		unsigned int order,
491 		int bufnum)
492 {
493 	int i;
494 
495 	struct xillyusb_endpoint *ep;
496 
497 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
498 
499 	if (!ep)
500 		return NULL;
501 
502 	INIT_LIST_HEAD(&ep->buffers);
503 	INIT_LIST_HEAD(&ep->filled_buffers);
504 
505 	spin_lock_init(&ep->buffers_lock);
506 	mutex_init(&ep->ep_mutex);
507 
508 	init_usb_anchor(&ep->anchor);
509 	INIT_WORK(&ep->workitem, work);
510 
511 	ep->order = order;
512 	ep->buffer_size =  1 << (PAGE_SHIFT + order);
513 	ep->outstanding_urbs = 0;
514 	ep->drained = true;
515 	ep->wake_on_drain = false;
516 	ep->xdev = xdev;
517 	ep->ep_num = ep_num;
518 	ep->shutting_down = false;
519 
520 	for (i = 0; i < bufnum; i++) {
521 		struct xillybuffer *xb;
522 		unsigned long addr;
523 
524 		xb = kzalloc(sizeof(*xb), GFP_KERNEL);
525 
526 		if (!xb) {
527 			endpoint_dealloc(ep);
528 			return NULL;
529 		}
530 
531 		addr = __get_free_pages(GFP_KERNEL, order);
532 
533 		if (!addr) {
534 			kfree(xb);
535 			endpoint_dealloc(ep);
536 			return NULL;
537 		}
538 
539 		xb->buf = (void *)addr;
540 		xb->ep = ep;
541 		list_add_tail(&xb->entry, &ep->buffers);
542 	}
543 	return ep;
544 }
545 
546 static void cleanup_dev(struct kref *kref)
547 {
548 	struct xillyusb_dev *xdev =
549 		container_of(kref, struct xillyusb_dev, kref);
550 
551 	if (xdev->in_ep)
552 		endpoint_dealloc(xdev->in_ep);
553 
554 	if (xdev->msg_ep)
555 		endpoint_dealloc(xdev->msg_ep);
556 
557 	if (xdev->workq)
558 		destroy_workqueue(xdev->workq);
559 
560 	usb_put_dev(xdev->udev);
561 	kfree(xdev->channels); /* Argument may be NULL, and that's fine */
562 	kfree(xdev);
563 }
564 
565 /*
566  * @process_in_mutex is taken to ensure that bulk_in_work() won't call
567  * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
568  * @read_data_ok entries, which will make process_bulk_in() report false
569  * errors if executed. The mechanism relies on that xdev->error is assigned
570  * a non-zero value by report_io_error() prior to queueing wakeup_all(),
571  * which prevents bulk_in_work() from calling process_bulk_in().
572  *
573  * The fact that wakeup_all() and bulk_in_work() are queued on the same
574  * workqueue makes their concurrent execution very unlikely, however the
575  * kernel's API doesn't seem to ensure this strictly.
576  */
577 
578 static void wakeup_all(struct work_struct *work)
579 {
580 	int i;
581 	struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
582 						 wakeup_workitem);
583 
584 	mutex_lock(&xdev->process_in_mutex);
585 
586 	for (i = 0; i < xdev->num_channels; i++) {
587 		struct xillyusb_channel *chan = &xdev->channels[i];
588 
589 		mutex_lock(&chan->lock);
590 
591 		if (chan->in_fifo) {
592 			/*
593 			 * Fake an EOF: Even if such arrives, it won't be
594 			 * processed.
595 			 */
596 			chan->read_data_ok = 0;
597 			wake_up_interruptible(&chan->in_fifo->waitq);
598 		}
599 
600 		if (chan->out_ep)
601 			wake_up_interruptible(&chan->out_ep->fifo.waitq);
602 
603 		mutex_unlock(&chan->lock);
604 
605 		wake_up_interruptible(&chan->flushq);
606 	}
607 
608 	mutex_unlock(&xdev->process_in_mutex);
609 
610 	wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
611 
612 	kref_put(&xdev->kref, cleanup_dev);
613 }
614 
615 static void report_io_error(struct xillyusb_dev *xdev,
616 			    int errcode)
617 {
618 	unsigned long flags;
619 	bool do_once = false;
620 
621 	spin_lock_irqsave(&xdev->error_lock, flags);
622 	if (!xdev->error) {
623 		xdev->error = errcode;
624 		do_once = true;
625 	}
626 	spin_unlock_irqrestore(&xdev->error_lock, flags);
627 
628 	if (do_once) {
629 		kref_get(&xdev->kref); /* xdev is used by work item */
630 		queue_work(xdev->workq, &xdev->wakeup_workitem);
631 	}
632 }
633 
634 /*
635  * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
636  * the previous pointer is never used after its return.
637  */
638 
639 static void safely_assign_in_fifo(struct xillyusb_channel *chan,
640 				  struct xillyfifo *fifo)
641 {
642 	mutex_lock(&chan->lock);
643 	chan->in_fifo = fifo;
644 	mutex_unlock(&chan->lock);
645 
646 	flush_work(&chan->xdev->in_ep->workitem);
647 }
648 
649 static void bulk_in_completer(struct urb *urb)
650 {
651 	struct xillybuffer *xb = urb->context;
652 	struct xillyusb_endpoint *ep = xb->ep;
653 	unsigned long flags;
654 
655 	if (urb->status) {
656 		if (!(urb->status == -ENOENT ||
657 		      urb->status == -ECONNRESET ||
658 		      urb->status == -ESHUTDOWN))
659 			report_io_error(ep->xdev, -EIO);
660 
661 		spin_lock_irqsave(&ep->buffers_lock, flags);
662 		list_add_tail(&xb->entry, &ep->buffers);
663 		ep->outstanding_urbs--;
664 		spin_unlock_irqrestore(&ep->buffers_lock, flags);
665 
666 		return;
667 	}
668 
669 	xb->len = urb->actual_length;
670 
671 	spin_lock_irqsave(&ep->buffers_lock, flags);
672 	list_add_tail(&xb->entry, &ep->filled_buffers);
673 	spin_unlock_irqrestore(&ep->buffers_lock, flags);
674 
675 	if (!ep->shutting_down)
676 		queue_work(ep->xdev->workq, &ep->workitem);
677 }
678 
679 static void bulk_out_completer(struct urb *urb)
680 {
681 	struct xillybuffer *xb = urb->context;
682 	struct xillyusb_endpoint *ep = xb->ep;
683 	unsigned long flags;
684 
685 	if (urb->status &&
686 	    (!(urb->status == -ENOENT ||
687 	       urb->status == -ECONNRESET ||
688 	       urb->status == -ESHUTDOWN)))
689 		report_io_error(ep->xdev, -EIO);
690 
691 	spin_lock_irqsave(&ep->buffers_lock, flags);
692 	list_add_tail(&xb->entry, &ep->buffers);
693 	ep->outstanding_urbs--;
694 	spin_unlock_irqrestore(&ep->buffers_lock, flags);
695 
696 	if (!ep->shutting_down)
697 		queue_work(ep->xdev->workq, &ep->workitem);
698 }
699 
700 static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
701 {
702 	struct xillyusb_dev *xdev = ep->xdev;
703 	struct xillybuffer *xb;
704 	struct urb *urb;
705 
706 	int rc;
707 	unsigned long flags;
708 	unsigned int bufsize = ep->buffer_size;
709 
710 	mutex_lock(&ep->ep_mutex);
711 
712 	if (ep->shutting_down || xdev->error)
713 		goto done;
714 
715 	while (1) {
716 		spin_lock_irqsave(&ep->buffers_lock, flags);
717 
718 		if (list_empty(&ep->buffers)) {
719 			spin_unlock_irqrestore(&ep->buffers_lock, flags);
720 			goto done;
721 		}
722 
723 		xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
724 		list_del(&xb->entry);
725 		ep->outstanding_urbs++;
726 
727 		spin_unlock_irqrestore(&ep->buffers_lock, flags);
728 
729 		urb = usb_alloc_urb(0, GFP_KERNEL);
730 		if (!urb) {
731 			report_io_error(xdev, -ENOMEM);
732 			goto relist;
733 		}
734 
735 		usb_fill_bulk_urb(urb, xdev->udev,
736 				  usb_rcvbulkpipe(xdev->udev, ep->ep_num),
737 				  xb->buf, bufsize, bulk_in_completer, xb);
738 
739 		usb_anchor_urb(urb, &ep->anchor);
740 
741 		rc = usb_submit_urb(urb, GFP_KERNEL);
742 
743 		if (rc) {
744 			report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
745 					-EIO);
746 			goto unanchor;
747 		}
748 
749 		usb_free_urb(urb); /* This just decrements reference count */
750 	}
751 
752 unanchor:
753 	usb_unanchor_urb(urb);
754 	usb_free_urb(urb);
755 
756 relist:
757 	spin_lock_irqsave(&ep->buffers_lock, flags);
758 	list_add_tail(&xb->entry, &ep->buffers);
759 	ep->outstanding_urbs--;
760 	spin_unlock_irqrestore(&ep->buffers_lock, flags);
761 
762 done:
763 	mutex_unlock(&ep->ep_mutex);
764 }
765 
766 static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
767 {
768 	struct xillyfifo *fifo = &ep->fifo;
769 	struct xillyusb_dev *xdev = ep->xdev;
770 	struct xillybuffer *xb;
771 	struct urb *urb;
772 
773 	int rc;
774 	unsigned int fill;
775 	unsigned long flags;
776 	bool do_wake = false;
777 
778 	mutex_lock(&ep->ep_mutex);
779 
780 	if (ep->shutting_down || xdev->error)
781 		goto done;
782 
783 	fill = READ_ONCE(fifo->fill) & ep->fill_mask;
784 
785 	while (1) {
786 		int count;
787 		unsigned int max_read;
788 
789 		spin_lock_irqsave(&ep->buffers_lock, flags);
790 
791 		/*
792 		 * Race conditions might have the FIFO filled while the
793 		 * endpoint is marked as drained here. That doesn't matter,
794 		 * because the sole purpose of @drained is to ensure that
795 		 * certain data has been sent on the USB channel before
796 		 * shutting it down. Hence knowing that the FIFO appears
797 		 * to be empty with no outstanding URBs at some moment
798 		 * is good enough.
799 		 */
800 
801 		if (!fill) {
802 			ep->drained = !ep->outstanding_urbs;
803 			if (ep->drained && ep->wake_on_drain)
804 				do_wake = true;
805 
806 			spin_unlock_irqrestore(&ep->buffers_lock, flags);
807 			goto done;
808 		}
809 
810 		ep->drained = false;
811 
812 		if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
813 		    list_empty(&ep->buffers)) {
814 			spin_unlock_irqrestore(&ep->buffers_lock, flags);
815 			goto done;
816 		}
817 
818 		xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
819 		list_del(&xb->entry);
820 		ep->outstanding_urbs++;
821 
822 		spin_unlock_irqrestore(&ep->buffers_lock, flags);
823 
824 		max_read = min(fill, ep->buffer_size);
825 
826 		count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
827 
828 		/*
829 		 * xilly_memcpy always returns 0 => fifo_read can't fail =>
830 		 * count > 0
831 		 */
832 
833 		urb = usb_alloc_urb(0, GFP_KERNEL);
834 		if (!urb) {
835 			report_io_error(xdev, -ENOMEM);
836 			goto relist;
837 		}
838 
839 		usb_fill_bulk_urb(urb, xdev->udev,
840 				  usb_sndbulkpipe(xdev->udev, ep->ep_num),
841 				  xb->buf, count, bulk_out_completer, xb);
842 
843 		usb_anchor_urb(urb, &ep->anchor);
844 
845 		rc = usb_submit_urb(urb, GFP_KERNEL);
846 
847 		if (rc) {
848 			report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
849 					-EIO);
850 			goto unanchor;
851 		}
852 
853 		usb_free_urb(urb); /* This just decrements reference count */
854 
855 		fill -= count;
856 		do_wake = true;
857 	}
858 
859 unanchor:
860 	usb_unanchor_urb(urb);
861 	usb_free_urb(urb);
862 
863 relist:
864 	spin_lock_irqsave(&ep->buffers_lock, flags);
865 	list_add_tail(&xb->entry, &ep->buffers);
866 	ep->outstanding_urbs--;
867 	spin_unlock_irqrestore(&ep->buffers_lock, flags);
868 
869 done:
870 	mutex_unlock(&ep->ep_mutex);
871 
872 	if (do_wake)
873 		wake_up_interruptible(&fifo->waitq);
874 }
875 
876 static void bulk_out_work(struct work_struct *work)
877 {
878 	struct xillyusb_endpoint *ep = container_of(work,
879 						    struct xillyusb_endpoint,
880 						    workitem);
881 	try_queue_bulk_out(ep);
882 }
883 
884 static int process_in_opcode(struct xillyusb_dev *xdev,
885 			     int opcode,
886 			     int chan_num)
887 {
888 	struct xillyusb_channel *chan;
889 	struct device *dev = xdev->dev;
890 	int chan_idx = chan_num >> 1;
891 
892 	if (chan_idx >= xdev->num_channels) {
893 		dev_err(dev, "Received illegal channel ID %d from FPGA\n",
894 			chan_num);
895 		return -EIO;
896 	}
897 
898 	chan = &xdev->channels[chan_idx];
899 
900 	switch (opcode) {
901 	case OPCODE_EOF:
902 		if (!chan->read_data_ok) {
903 			dev_err(dev, "Received unexpected EOF for channel %d\n",
904 				chan_num);
905 			return -EIO;
906 		}
907 
908 		/*
909 		 * A write memory barrier ensures that the FIFO's fill level
910 		 * is visible before read_data_ok turns zero, so the data in
911 		 * the FIFO isn't missed by the consumer.
912 		 */
913 		smp_wmb();
914 		WRITE_ONCE(chan->read_data_ok, 0);
915 		wake_up_interruptible(&chan->in_fifo->waitq);
916 		break;
917 
918 	case OPCODE_REACHED_CHECKPOINT:
919 		chan->flushing = 0;
920 		wake_up_interruptible(&chan->flushq);
921 		break;
922 
923 	case OPCODE_CANCELED_CHECKPOINT:
924 		chan->canceled = 1;
925 		wake_up_interruptible(&chan->flushq);
926 		break;
927 
928 	default:
929 		dev_err(dev, "Received illegal opcode %d from FPGA\n",
930 			opcode);
931 		return -EIO;
932 	}
933 
934 	return 0;
935 }
936 
937 static int process_bulk_in(struct xillybuffer *xb)
938 {
939 	struct xillyusb_endpoint *ep = xb->ep;
940 	struct xillyusb_dev *xdev = ep->xdev;
941 	struct device *dev = xdev->dev;
942 	int dws = xb->len >> 2;
943 	__le32 *p = xb->buf;
944 	u32 ctrlword;
945 	struct xillyusb_channel *chan;
946 	struct xillyfifo *fifo;
947 	int chan_num = 0, opcode;
948 	int chan_idx;
949 	int bytes, count, dwconsume;
950 	int in_bytes_left = 0;
951 	int rc;
952 
953 	if ((dws << 2) != xb->len) {
954 		dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
955 			xb->len);
956 		return -EIO;
957 	}
958 
959 	if (xdev->in_bytes_left) {
960 		bytes = min(xdev->in_bytes_left, dws << 2);
961 		in_bytes_left = xdev->in_bytes_left - bytes;
962 		chan_num = xdev->leftover_chan_num;
963 		goto resume_leftovers;
964 	}
965 
966 	while (dws) {
967 		ctrlword = le32_to_cpu(*p++);
968 		dws--;
969 
970 		chan_num = ctrlword & 0xfff;
971 		count = (ctrlword >> 12) & 0x3ff;
972 		opcode = (ctrlword >> 24) & 0xf;
973 
974 		if (opcode != OPCODE_DATA) {
975 			unsigned int in_counter = xdev->in_counter++ & 0x3ff;
976 
977 			if (count != in_counter) {
978 				dev_err(dev, "Expected opcode counter %d, got %d\n",
979 					in_counter, count);
980 				return -EIO;
981 			}
982 
983 			rc = process_in_opcode(xdev, opcode, chan_num);
984 
985 			if (rc)
986 				return rc;
987 
988 			continue;
989 		}
990 
991 		bytes = min(count + 1, dws << 2);
992 		in_bytes_left = count + 1 - bytes;
993 
994 resume_leftovers:
995 		chan_idx = chan_num >> 1;
996 
997 		if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
998 		    !xdev->channels[chan_idx].read_data_ok) {
999 			dev_err(dev, "Received illegal channel ID %d from FPGA\n",
1000 				chan_num);
1001 			return -EIO;
1002 		}
1003 		chan = &xdev->channels[chan_idx];
1004 
1005 		fifo = chan->in_fifo;
1006 
1007 		if (unlikely(!fifo))
1008 			return -EIO; /* We got really unexpected data */
1009 
1010 		if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
1011 			dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
1012 			return -EIO;
1013 		}
1014 
1015 		wake_up_interruptible(&fifo->waitq);
1016 
1017 		dwconsume = (bytes + 3) >> 2;
1018 		dws -= dwconsume;
1019 		p += dwconsume;
1020 	}
1021 
1022 	xdev->in_bytes_left = in_bytes_left;
1023 	xdev->leftover_chan_num = chan_num;
1024 	return 0;
1025 }
1026 
1027 static void bulk_in_work(struct work_struct *work)
1028 {
1029 	struct xillyusb_endpoint *ep =
1030 		container_of(work, struct xillyusb_endpoint, workitem);
1031 	struct xillyusb_dev *xdev = ep->xdev;
1032 	unsigned long flags;
1033 	struct xillybuffer *xb;
1034 	bool consumed = false;
1035 	int rc = 0;
1036 
1037 	mutex_lock(&xdev->process_in_mutex);
1038 
1039 	spin_lock_irqsave(&ep->buffers_lock, flags);
1040 
1041 	while (1) {
1042 		if (rc || list_empty(&ep->filled_buffers)) {
1043 			spin_unlock_irqrestore(&ep->buffers_lock, flags);
1044 			mutex_unlock(&xdev->process_in_mutex);
1045 
1046 			if (rc)
1047 				report_io_error(xdev, rc);
1048 			else if (consumed)
1049 				try_queue_bulk_in(ep);
1050 
1051 			return;
1052 		}
1053 
1054 		xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
1055 				      entry);
1056 		list_del(&xb->entry);
1057 
1058 		spin_unlock_irqrestore(&ep->buffers_lock, flags);
1059 
1060 		consumed = true;
1061 
1062 		if (!xdev->error)
1063 			rc = process_bulk_in(xb);
1064 
1065 		spin_lock_irqsave(&ep->buffers_lock, flags);
1066 		list_add_tail(&xb->entry, &ep->buffers);
1067 		ep->outstanding_urbs--;
1068 	}
1069 }
1070 
1071 static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
1072 				int chan_num, char opcode, u32 data)
1073 {
1074 	struct xillyusb_endpoint *ep = xdev->msg_ep;
1075 	struct xillyfifo *fifo = &ep->fifo;
1076 	__le32 msg[2];
1077 
1078 	int rc = 0;
1079 
1080 	msg[0] = cpu_to_le32((chan_num & 0xfff) |
1081 			     ((opcode & 0xf) << 24));
1082 	msg[1] = cpu_to_le32(data);
1083 
1084 	mutex_lock(&xdev->msg_mutex);
1085 
1086 	/*
1087 	 * The wait queue is woken with the interruptible variant, so the
1088 	 * wait function matches, however returning because of an interrupt
1089 	 * will mess things up considerably, in particular when the caller is
1090 	 * the release method. And the xdev->error part prevents being stuck
1091 	 * forever in the event of a bizarre hardware bug: Pull the USB plug.
1092 	 */
1093 
1094 	while (wait_event_interruptible(fifo->waitq,
1095 					fifo->fill <= (fifo->size - 8) ||
1096 					xdev->error))
1097 		; /* Empty loop */
1098 
1099 	if (xdev->error) {
1100 		rc = xdev->error;
1101 		goto unlock_done;
1102 	}
1103 
1104 	fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
1105 
1106 	try_queue_bulk_out(ep);
1107 
1108 unlock_done:
1109 	mutex_unlock(&xdev->msg_mutex);
1110 
1111 	return rc;
1112 }
1113 
1114 /*
1115  * Note that flush_downstream() merely waits for the data to arrive to
1116  * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
1117  * it does nothing to make it happen (and neither is it necessary).
1118  *
1119  * This function is not reentrant for the same @chan, but this is covered
1120  * by the fact that for any given @chan, it's called either by the open,
1121  * write, llseek and flush fops methods, which can't run in parallel (and the
1122  * write + flush and llseek method handlers are protected with out_mutex).
1123  *
1124  * chan->flushed is there to avoid multiple flushes at the same position,
1125  * in particular as a result of programs that close the file descriptor
1126  * e.g. after a dup2() for redirection.
1127  */
1128 
1129 static int flush_downstream(struct xillyusb_channel *chan,
1130 			    long timeout,
1131 			    bool interruptible)
1132 {
1133 	struct xillyusb_dev *xdev = chan->xdev;
1134 	int chan_num = chan->chan_idx << 1;
1135 	long deadline, left_to_sleep;
1136 	int rc;
1137 
1138 	if (chan->flushed)
1139 		return 0;
1140 
1141 	deadline = jiffies + 1 + timeout;
1142 
1143 	if (chan->flushing) {
1144 		long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
1145 
1146 		chan->canceled = 0;
1147 		rc = xillyusb_send_opcode(xdev, chan_num,
1148 					  OPCODE_CANCEL_CHECKPOINT, 0);
1149 
1150 		if (rc)
1151 			return rc; /* Only real error, never -EINTR */
1152 
1153 		/* Ignoring interrupts. Cancellation must be handled */
1154 		while (!chan->canceled) {
1155 			left_to_sleep = cancel_deadline - ((long)jiffies);
1156 
1157 			if (left_to_sleep <= 0) {
1158 				report_io_error(xdev, -EIO);
1159 				return -EIO;
1160 			}
1161 
1162 			rc = wait_event_interruptible_timeout(chan->flushq,
1163 							      chan->canceled ||
1164 							      xdev->error,
1165 							      left_to_sleep);
1166 
1167 			if (xdev->error)
1168 				return xdev->error;
1169 		}
1170 	}
1171 
1172 	chan->flushing = 1;
1173 
1174 	/*
1175 	 * The checkpoint is given in terms of data elements, not bytes. As
1176 	 * a result, if less than an element's worth of data is stored in the
1177 	 * FIFO, it's not flushed, including the flush before closing, which
1178 	 * means that such data is lost. This is consistent with PCIe Xillybus.
1179 	 */
1180 
1181 	rc = xillyusb_send_opcode(xdev, chan_num,
1182 				  OPCODE_SET_CHECKPOINT,
1183 				  chan->out_bytes >>
1184 				  chan->out_log2_element_size);
1185 
1186 	if (rc)
1187 		return rc; /* Only real error, never -EINTR */
1188 
1189 	if (!timeout) {
1190 		while (chan->flushing) {
1191 			rc = wait_event_interruptible(chan->flushq,
1192 						      !chan->flushing ||
1193 						      xdev->error);
1194 			if (xdev->error)
1195 				return xdev->error;
1196 
1197 			if (interruptible && rc)
1198 				return -EINTR;
1199 		}
1200 
1201 		goto done;
1202 	}
1203 
1204 	while (chan->flushing) {
1205 		left_to_sleep = deadline - ((long)jiffies);
1206 
1207 		if (left_to_sleep <= 0)
1208 			return -ETIMEDOUT;
1209 
1210 		rc = wait_event_interruptible_timeout(chan->flushq,
1211 						      !chan->flushing ||
1212 						      xdev->error,
1213 						      left_to_sleep);
1214 
1215 		if (xdev->error)
1216 			return xdev->error;
1217 
1218 		if (interruptible && rc < 0)
1219 			return -EINTR;
1220 	}
1221 
1222 done:
1223 	chan->flushed = 1;
1224 	return 0;
1225 }
1226 
1227 /* request_read_anything(): Ask the FPGA for any little amount of data */
1228 static int request_read_anything(struct xillyusb_channel *chan,
1229 				 char opcode)
1230 {
1231 	struct xillyusb_dev *xdev = chan->xdev;
1232 	unsigned int sh = chan->in_log2_element_size;
1233 	int chan_num = (chan->chan_idx << 1) | 1;
1234 	u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
1235 
1236 	return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
1237 }
1238 
1239 static int xillyusb_open(struct inode *inode, struct file *filp)
1240 {
1241 	struct xillyusb_dev *xdev;
1242 	struct xillyusb_channel *chan;
1243 	struct xillyfifo *in_fifo = NULL;
1244 	struct xillyusb_endpoint *out_ep = NULL;
1245 	int rc;
1246 	int index;
1247 
1248 	mutex_lock(&kref_mutex);
1249 
1250 	rc = xillybus_find_inode(inode, (void **)&xdev, &index);
1251 	if (rc) {
1252 		mutex_unlock(&kref_mutex);
1253 		return rc;
1254 	}
1255 
1256 	kref_get(&xdev->kref);
1257 	mutex_unlock(&kref_mutex);
1258 
1259 	chan = &xdev->channels[index];
1260 	filp->private_data = chan;
1261 
1262 	mutex_lock(&chan->lock);
1263 
1264 	rc = -ENODEV;
1265 
1266 	if (xdev->error)
1267 		goto unmutex_fail;
1268 
1269 	if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
1270 	    ((filp->f_mode & FMODE_WRITE) && !chan->writable))
1271 		goto unmutex_fail;
1272 
1273 	if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
1274 	    chan->in_synchronous) {
1275 		dev_err(xdev->dev,
1276 			"open() failed: O_NONBLOCK not allowed for read on this device\n");
1277 		goto unmutex_fail;
1278 	}
1279 
1280 	if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
1281 	    chan->out_synchronous) {
1282 		dev_err(xdev->dev,
1283 			"open() failed: O_NONBLOCK not allowed for write on this device\n");
1284 		goto unmutex_fail;
1285 	}
1286 
1287 	rc = -EBUSY;
1288 
1289 	if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
1290 	    ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
1291 		goto unmutex_fail;
1292 
1293 	if (filp->f_mode & FMODE_READ)
1294 		chan->open_for_read = 1;
1295 
1296 	if (filp->f_mode & FMODE_WRITE)
1297 		chan->open_for_write = 1;
1298 
1299 	mutex_unlock(&chan->lock);
1300 
1301 	if (filp->f_mode & FMODE_WRITE) {
1302 		out_ep = endpoint_alloc(xdev,
1303 					(chan->chan_idx + 2) | USB_DIR_OUT,
1304 					bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
1305 
1306 		if (!out_ep) {
1307 			rc = -ENOMEM;
1308 			goto unopen;
1309 		}
1310 
1311 		rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
1312 
1313 		if (rc)
1314 			goto late_unopen;
1315 
1316 		out_ep->fill_mask = -(1 << chan->out_log2_element_size);
1317 		chan->out_bytes = 0;
1318 		chan->flushed = 0;
1319 
1320 		/*
1321 		 * Sending a flush request to a previously closed stream
1322 		 * effectively opens it, and also waits until the command is
1323 		 * confirmed by the FPGA. The latter is necessary because the
1324 		 * data is sent through a separate BULK OUT endpoint, and the
1325 		 * xHCI controller is free to reorder transmissions.
1326 		 *
1327 		 * This can't go wrong unless there's a serious hardware error
1328 		 * (or the computer is stuck for 500 ms?)
1329 		 */
1330 		rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
1331 
1332 		if (rc == -ETIMEDOUT) {
1333 			rc = -EIO;
1334 			report_io_error(xdev, rc);
1335 		}
1336 
1337 		if (rc)
1338 			goto late_unopen;
1339 	}
1340 
1341 	if (filp->f_mode & FMODE_READ) {
1342 		in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
1343 
1344 		if (!in_fifo) {
1345 			rc = -ENOMEM;
1346 			goto late_unopen;
1347 		}
1348 
1349 		rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
1350 
1351 		if (rc) {
1352 			kfree(in_fifo);
1353 			goto late_unopen;
1354 		}
1355 	}
1356 
1357 	mutex_lock(&chan->lock);
1358 	if (in_fifo) {
1359 		chan->in_fifo = in_fifo;
1360 		chan->read_data_ok = 1;
1361 	}
1362 	if (out_ep)
1363 		chan->out_ep = out_ep;
1364 	mutex_unlock(&chan->lock);
1365 
1366 	if (in_fifo) {
1367 		u32 in_checkpoint = 0;
1368 
1369 		if (!chan->in_synchronous)
1370 			in_checkpoint = in_fifo->size >>
1371 				chan->in_log2_element_size;
1372 
1373 		chan->in_consumed_bytes = 0;
1374 		chan->poll_used = 0;
1375 		chan->in_current_checkpoint = in_checkpoint;
1376 		rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1377 					  OPCODE_SET_CHECKPOINT,
1378 					  in_checkpoint);
1379 
1380 		if (rc) /* Failure guarantees that opcode wasn't sent */
1381 			goto unfifo;
1382 
1383 		/*
1384 		 * In non-blocking mode, request the FPGA to send any data it
1385 		 * has right away. Otherwise, the first read() will always
1386 		 * return -EAGAIN, which is OK strictly speaking, but ugly.
1387 		 * Checking and unrolling if this fails isn't worth the
1388 		 * effort -- the error is propagated to the first read()
1389 		 * anyhow.
1390 		 */
1391 		if (filp->f_flags & O_NONBLOCK)
1392 			request_read_anything(chan, OPCODE_SET_PUSH);
1393 	}
1394 
1395 	return 0;
1396 
1397 unfifo:
1398 	chan->read_data_ok = 0;
1399 	safely_assign_in_fifo(chan, NULL);
1400 	fifo_mem_release(in_fifo);
1401 	kfree(in_fifo);
1402 
1403 	if (out_ep) {
1404 		mutex_lock(&chan->lock);
1405 		chan->out_ep = NULL;
1406 		mutex_unlock(&chan->lock);
1407 	}
1408 
1409 late_unopen:
1410 	if (out_ep)
1411 		endpoint_dealloc(out_ep);
1412 
1413 unopen:
1414 	mutex_lock(&chan->lock);
1415 
1416 	if (filp->f_mode & FMODE_READ)
1417 		chan->open_for_read = 0;
1418 
1419 	if (filp->f_mode & FMODE_WRITE)
1420 		chan->open_for_write = 0;
1421 
1422 	mutex_unlock(&chan->lock);
1423 
1424 	kref_put(&xdev->kref, cleanup_dev);
1425 
1426 	return rc;
1427 
1428 unmutex_fail:
1429 	kref_put(&xdev->kref, cleanup_dev);
1430 	mutex_unlock(&chan->lock);
1431 	return rc;
1432 }
1433 
1434 static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
1435 			     size_t count, loff_t *f_pos)
1436 {
1437 	struct xillyusb_channel *chan = filp->private_data;
1438 	struct xillyusb_dev *xdev = chan->xdev;
1439 	struct xillyfifo *fifo = chan->in_fifo;
1440 	int chan_num = (chan->chan_idx << 1) | 1;
1441 
1442 	long deadline, left_to_sleep;
1443 	int bytes_done = 0;
1444 	bool sent_set_push = false;
1445 	int rc;
1446 
1447 	deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
1448 
1449 	rc = mutex_lock_interruptible(&chan->in_mutex);
1450 
1451 	if (rc)
1452 		return rc;
1453 
1454 	while (1) {
1455 		u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
1456 		u32 complete_checkpoint, fifo_checkpoint;
1457 		u32 checkpoint;
1458 		s32 diff, leap;
1459 		unsigned int sh = chan->in_log2_element_size;
1460 		bool checkpoint_for_complete;
1461 
1462 		rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
1463 			       count - bytes_done, xilly_copy_to_user);
1464 
1465 		if (rc < 0)
1466 			break;
1467 
1468 		bytes_done += rc;
1469 		chan->in_consumed_bytes += rc;
1470 
1471 		left_to_sleep = deadline - ((long)jiffies);
1472 
1473 		/*
1474 		 * Some 32-bit arithmetic that may wrap. Note that
1475 		 * complete_checkpoint is rounded up to the closest element
1476 		 * boundary, because the read() can't be completed otherwise.
1477 		 * fifo_checkpoint_bytes is rounded down, because it protects
1478 		 * in_fifo from overflowing.
1479 		 */
1480 
1481 		fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
1482 		complete_checkpoint_bytes =
1483 			chan->in_consumed_bytes + count - bytes_done;
1484 
1485 		fifo_checkpoint = fifo_checkpoint_bytes >> sh;
1486 		complete_checkpoint =
1487 			(complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
1488 
1489 		diff = (fifo_checkpoint - complete_checkpoint) << sh;
1490 
1491 		if (chan->in_synchronous && diff >= 0) {
1492 			checkpoint = complete_checkpoint;
1493 			checkpoint_for_complete = true;
1494 		} else {
1495 			checkpoint = fifo_checkpoint;
1496 			checkpoint_for_complete = false;
1497 		}
1498 
1499 		leap = (checkpoint - chan->in_current_checkpoint) << sh;
1500 
1501 		/*
1502 		 * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
1503 		 * data is consumed, it's issued only if it moves the
1504 		 * checkpoint by at least an 8th of the FIFO's size, or if
1505 		 * it's necessary to complete the number of bytes requested by
1506 		 * the read() call.
1507 		 *
1508 		 * chan->read_data_ok is checked to spare an unnecessary
1509 		 * submission after receiving EOF, however it's harmless if
1510 		 * such slips away.
1511 		 */
1512 
1513 		if (chan->read_data_ok &&
1514 		    (leap > (fifo->size >> 3) ||
1515 		     (checkpoint_for_complete && leap > 0))) {
1516 			chan->in_current_checkpoint = checkpoint;
1517 			rc = xillyusb_send_opcode(xdev, chan_num,
1518 						  OPCODE_SET_CHECKPOINT,
1519 						  checkpoint);
1520 
1521 			if (rc)
1522 				break;
1523 		}
1524 
1525 		if (bytes_done == count ||
1526 		    (left_to_sleep <= 0 && bytes_done))
1527 			break;
1528 
1529 		/*
1530 		 * Reaching here means that the FIFO was empty when
1531 		 * fifo_read() returned, but not necessarily right now. Error
1532 		 * and EOF are checked and reported only now, so that no data
1533 		 * that managed its way to the FIFO is lost.
1534 		 */
1535 
1536 		if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
1537 			/* Has data slipped into the FIFO since fifo_read()? */
1538 			smp_rmb();
1539 			if (READ_ONCE(fifo->fill))
1540 				continue;
1541 
1542 			rc = 0;
1543 			break;
1544 		}
1545 
1546 		if (xdev->error) {
1547 			rc = xdev->error;
1548 			break;
1549 		}
1550 
1551 		if (filp->f_flags & O_NONBLOCK) {
1552 			rc = -EAGAIN;
1553 			break;
1554 		}
1555 
1556 		if (!sent_set_push) {
1557 			rc = xillyusb_send_opcode(xdev, chan_num,
1558 						  OPCODE_SET_PUSH,
1559 						  complete_checkpoint);
1560 
1561 			if (rc)
1562 				break;
1563 
1564 			sent_set_push = true;
1565 		}
1566 
1567 		if (left_to_sleep > 0) {
1568 			/*
1569 			 * Note that when xdev->error is set (e.g. when the
1570 			 * device is unplugged), read_data_ok turns zero and
1571 			 * fifo->waitq is awaken.
1572 			 * Therefore no special attention to xdev->error.
1573 			 */
1574 
1575 			rc = wait_event_interruptible_timeout
1576 				(fifo->waitq,
1577 				 fifo->fill || !chan->read_data_ok,
1578 				 left_to_sleep);
1579 		} else { /* bytes_done == 0 */
1580 			/* Tell FPGA to send anything it has */
1581 			rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
1582 
1583 			if (rc)
1584 				break;
1585 
1586 			rc = wait_event_interruptible
1587 				(fifo->waitq,
1588 				 fifo->fill || !chan->read_data_ok);
1589 		}
1590 
1591 		if (rc < 0) {
1592 			rc = -EINTR;
1593 			break;
1594 		}
1595 	}
1596 
1597 	if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
1598 	    !READ_ONCE(fifo->fill))
1599 		request_read_anything(chan, OPCODE_SET_PUSH);
1600 
1601 	mutex_unlock(&chan->in_mutex);
1602 
1603 	if (bytes_done)
1604 		return bytes_done;
1605 
1606 	return rc;
1607 }
1608 
1609 static int xillyusb_flush(struct file *filp, fl_owner_t id)
1610 {
1611 	struct xillyusb_channel *chan = filp->private_data;
1612 	int rc;
1613 
1614 	if (!(filp->f_mode & FMODE_WRITE))
1615 		return 0;
1616 
1617 	rc = mutex_lock_interruptible(&chan->out_mutex);
1618 
1619 	if (rc)
1620 		return rc;
1621 
1622 	/*
1623 	 * One second's timeout on flushing. Interrupts are ignored, because if
1624 	 * the user pressed CTRL-C, that interrupt will still be in flight by
1625 	 * the time we reach here, and the opportunity to flush is lost.
1626 	 */
1627 	rc = flush_downstream(chan, HZ, false);
1628 
1629 	mutex_unlock(&chan->out_mutex);
1630 
1631 	if (rc == -ETIMEDOUT) {
1632 		/* The things you do to use dev_warn() and not pr_warn() */
1633 		struct xillyusb_dev *xdev = chan->xdev;
1634 
1635 		mutex_lock(&chan->lock);
1636 		if (!xdev->error)
1637 			dev_warn(xdev->dev,
1638 				 "Timed out while flushing. Output data may be lost.\n");
1639 		mutex_unlock(&chan->lock);
1640 	}
1641 
1642 	return rc;
1643 }
1644 
1645 static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
1646 			      size_t count, loff_t *f_pos)
1647 {
1648 	struct xillyusb_channel *chan = filp->private_data;
1649 	struct xillyusb_dev *xdev = chan->xdev;
1650 	struct xillyfifo *fifo = &chan->out_ep->fifo;
1651 	int rc;
1652 
1653 	rc = mutex_lock_interruptible(&chan->out_mutex);
1654 
1655 	if (rc)
1656 		return rc;
1657 
1658 	while (1) {
1659 		if (xdev->error) {
1660 			rc = xdev->error;
1661 			break;
1662 		}
1663 
1664 		if (count == 0)
1665 			break;
1666 
1667 		rc = fifo_write(fifo, (__force void *)userbuf, count,
1668 				xilly_copy_from_user);
1669 
1670 		if (rc != 0)
1671 			break;
1672 
1673 		if (filp->f_flags & O_NONBLOCK) {
1674 			rc = -EAGAIN;
1675 			break;
1676 		}
1677 
1678 		if (wait_event_interruptible
1679 		    (fifo->waitq,
1680 		     fifo->fill != fifo->size || xdev->error)) {
1681 			rc = -EINTR;
1682 			break;
1683 		}
1684 	}
1685 
1686 	if (rc < 0)
1687 		goto done;
1688 
1689 	chan->out_bytes += rc;
1690 
1691 	if (rc) {
1692 		try_queue_bulk_out(chan->out_ep);
1693 		chan->flushed = 0;
1694 	}
1695 
1696 	if (chan->out_synchronous) {
1697 		int flush_rc = flush_downstream(chan, 0, true);
1698 
1699 		if (flush_rc && !rc)
1700 			rc = flush_rc;
1701 	}
1702 
1703 done:
1704 	mutex_unlock(&chan->out_mutex);
1705 
1706 	return rc;
1707 }
1708 
1709 static int xillyusb_release(struct inode *inode, struct file *filp)
1710 {
1711 	struct xillyusb_channel *chan = filp->private_data;
1712 	struct xillyusb_dev *xdev = chan->xdev;
1713 	int rc_read = 0, rc_write = 0;
1714 
1715 	if (filp->f_mode & FMODE_READ) {
1716 		struct xillyfifo *in_fifo = chan->in_fifo;
1717 
1718 		rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1719 					       OPCODE_CLOSE, 0);
1720 		/*
1721 		 * If rc_read is nonzero, xdev->error indicates a global
1722 		 * device error. The error is reported later, so that
1723 		 * resources are freed.
1724 		 *
1725 		 * Looping on wait_event_interruptible() kinda breaks the idea
1726 		 * of being interruptible, and this should have been
1727 		 * wait_event(). Only it's being waken with
1728 		 * wake_up_interruptible() for the sake of other uses. If
1729 		 * there's a global device error, chan->read_data_ok is
1730 		 * deasserted and the wait queue is awaken, so this is covered.
1731 		 */
1732 
1733 		while (wait_event_interruptible(in_fifo->waitq,
1734 						!chan->read_data_ok))
1735 			; /* Empty loop */
1736 
1737 		safely_assign_in_fifo(chan, NULL);
1738 		fifo_mem_release(in_fifo);
1739 		kfree(in_fifo);
1740 
1741 		mutex_lock(&chan->lock);
1742 		chan->open_for_read = 0;
1743 		mutex_unlock(&chan->lock);
1744 	}
1745 
1746 	if (filp->f_mode & FMODE_WRITE) {
1747 		struct xillyusb_endpoint *ep = chan->out_ep;
1748 		/*
1749 		 * chan->flushing isn't zeroed. If the pre-release flush timed
1750 		 * out, a cancel request will be sent before the next
1751 		 * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
1752 		 * This is despite that the FPGA forgets about the checkpoint
1753 		 * request as the file closes. Still, in an exceptional race
1754 		 * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
1755 		 * just before closing that would reach the host after the
1756 		 * file has re-opened.
1757 		 */
1758 
1759 		mutex_lock(&chan->lock);
1760 		chan->out_ep = NULL;
1761 		mutex_unlock(&chan->lock);
1762 
1763 		endpoint_quiesce(ep);
1764 		endpoint_dealloc(ep);
1765 
1766 		/* See comments on rc_read above */
1767 		rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1768 						OPCODE_CLOSE, 0);
1769 
1770 		mutex_lock(&chan->lock);
1771 		chan->open_for_write = 0;
1772 		mutex_unlock(&chan->lock);
1773 	}
1774 
1775 	kref_put(&xdev->kref, cleanup_dev);
1776 
1777 	return rc_read ? rc_read : rc_write;
1778 }
1779 
1780 /*
1781  * Xillybus' API allows device nodes to be seekable, giving the user
1782  * application access to a RAM array on the FPGA (or logic emulating it).
1783  */
1784 
1785 static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
1786 {
1787 	struct xillyusb_channel *chan = filp->private_data;
1788 	struct xillyusb_dev *xdev = chan->xdev;
1789 	loff_t pos = filp->f_pos;
1790 	int rc = 0;
1791 	unsigned int log2_element_size = chan->readable ?
1792 		chan->in_log2_element_size : chan->out_log2_element_size;
1793 
1794 	/*
1795 	 * Take both mutexes not allowing interrupts, since it seems like
1796 	 * common applications don't expect an -EINTR here. Besides, multiple
1797 	 * access to a single file descriptor on seekable devices is a mess
1798 	 * anyhow.
1799 	 */
1800 
1801 	mutex_lock(&chan->out_mutex);
1802 	mutex_lock(&chan->in_mutex);
1803 
1804 	switch (whence) {
1805 	case SEEK_SET:
1806 		pos = offset;
1807 		break;
1808 	case SEEK_CUR:
1809 		pos += offset;
1810 		break;
1811 	case SEEK_END:
1812 		pos = offset; /* Going to the end => to the beginning */
1813 		break;
1814 	default:
1815 		rc = -EINVAL;
1816 		goto end;
1817 	}
1818 
1819 	/* In any case, we must finish on an element boundary */
1820 	if (pos & ((1 << log2_element_size) - 1)) {
1821 		rc = -EINVAL;
1822 		goto end;
1823 	}
1824 
1825 	rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1826 				  OPCODE_SET_ADDR,
1827 				  pos >> log2_element_size);
1828 
1829 	if (rc)
1830 		goto end;
1831 
1832 	if (chan->writable) {
1833 		chan->flushed = 0;
1834 		rc = flush_downstream(chan, HZ, false);
1835 	}
1836 
1837 end:
1838 	mutex_unlock(&chan->out_mutex);
1839 	mutex_unlock(&chan->in_mutex);
1840 
1841 	if (rc) /* Return error after releasing mutexes */
1842 		return rc;
1843 
1844 	filp->f_pos = pos;
1845 
1846 	return pos;
1847 }
1848 
1849 static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
1850 {
1851 	struct xillyusb_channel *chan = filp->private_data;
1852 	__poll_t mask = 0;
1853 
1854 	if (chan->in_fifo)
1855 		poll_wait(filp, &chan->in_fifo->waitq, wait);
1856 
1857 	if (chan->out_ep)
1858 		poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
1859 
1860 	/*
1861 	 * If this is the first time poll() is called, and the file is
1862 	 * readable, set the relevant flag. Also tell the FPGA to send all it
1863 	 * has, to kickstart the mechanism that ensures there's always some
1864 	 * data in in_fifo unless the stream is dry end-to-end. Note that the
1865 	 * first poll() may not return a EPOLLIN, even if there's data on the
1866 	 * FPGA. Rather, the data will arrive soon, and trigger the relevant
1867 	 * wait queue.
1868 	 */
1869 
1870 	if (!chan->poll_used && chan->in_fifo) {
1871 		chan->poll_used = 1;
1872 		request_read_anything(chan, OPCODE_SET_PUSH);
1873 	}
1874 
1875 	/*
1876 	 * poll() won't play ball regarding read() channels which
1877 	 * are synchronous. Allowing that will create situations where data has
1878 	 * been delivered at the FPGA, and users expecting select() to wake up,
1879 	 * which it may not. So make it never work.
1880 	 */
1881 
1882 	if (chan->in_fifo && !chan->in_synchronous &&
1883 	    (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
1884 		mask |= EPOLLIN | EPOLLRDNORM;
1885 
1886 	if (chan->out_ep &&
1887 	    (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
1888 		mask |= EPOLLOUT | EPOLLWRNORM;
1889 
1890 	if (chan->xdev->error)
1891 		mask |= EPOLLERR;
1892 
1893 	return mask;
1894 }
1895 
1896 static const struct file_operations xillyusb_fops = {
1897 	.owner      = THIS_MODULE,
1898 	.read       = xillyusb_read,
1899 	.write      = xillyusb_write,
1900 	.open       = xillyusb_open,
1901 	.flush      = xillyusb_flush,
1902 	.release    = xillyusb_release,
1903 	.llseek     = xillyusb_llseek,
1904 	.poll       = xillyusb_poll,
1905 };
1906 
1907 static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
1908 {
1909 	xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
1910 				      bulk_out_work, 1, 2);
1911 	if (!xdev->msg_ep)
1912 		return -ENOMEM;
1913 
1914 	if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
1915 		goto dealloc;
1916 
1917 	xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
1918 
1919 	xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
1920 				     bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
1921 	if (!xdev->in_ep)
1922 		goto dealloc;
1923 
1924 	try_queue_bulk_in(xdev->in_ep);
1925 
1926 	return 0;
1927 
1928 dealloc:
1929 	endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
1930 	xdev->msg_ep = NULL;
1931 	return -ENOMEM;
1932 }
1933 
1934 static int setup_channels(struct xillyusb_dev *xdev,
1935 			  __le16 *chandesc,
1936 			  int num_channels)
1937 {
1938 	struct xillyusb_channel *chan;
1939 	int i;
1940 
1941 	chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
1942 	if (!chan)
1943 		return -ENOMEM;
1944 
1945 	xdev->channels = chan;
1946 
1947 	for (i = 0; i < num_channels; i++, chan++) {
1948 		unsigned int in_desc = le16_to_cpu(*chandesc++);
1949 		unsigned int out_desc = le16_to_cpu(*chandesc++);
1950 
1951 		chan->xdev = xdev;
1952 		mutex_init(&chan->in_mutex);
1953 		mutex_init(&chan->out_mutex);
1954 		mutex_init(&chan->lock);
1955 		init_waitqueue_head(&chan->flushq);
1956 
1957 		chan->chan_idx = i;
1958 
1959 		if (in_desc & 0x80) { /* Entry is valid */
1960 			chan->readable = 1;
1961 			chan->in_synchronous = !!(in_desc & 0x40);
1962 			chan->in_seekable = !!(in_desc & 0x20);
1963 			chan->in_log2_element_size = in_desc & 0x0f;
1964 			chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
1965 		}
1966 
1967 		/*
1968 		 * A downstream channel should never exist above index 13,
1969 		 * as it would request a nonexistent BULK endpoint > 15.
1970 		 * In the peculiar case that it does, it's ignored silently.
1971 		 */
1972 
1973 		if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
1974 			chan->writable = 1;
1975 			chan->out_synchronous = !!(out_desc & 0x40);
1976 			chan->out_seekable = !!(out_desc & 0x20);
1977 			chan->out_log2_element_size = out_desc & 0x0f;
1978 			chan->out_log2_fifo_size =
1979 				((out_desc >> 8) & 0x1f) + 16;
1980 		}
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 static int xillyusb_discovery(struct usb_interface *interface)
1987 {
1988 	int rc;
1989 	struct xillyusb_dev *xdev = usb_get_intfdata(interface);
1990 	__le16 bogus_chandesc[2];
1991 	struct xillyfifo idt_fifo;
1992 	struct xillyusb_channel *chan;
1993 	unsigned int idt_len, names_offset;
1994 	unsigned char *idt;
1995 	int num_channels;
1996 
1997 	rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
1998 
1999 	if (rc) {
2000 		dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
2001 		return rc;
2002 	}
2003 
2004 	/* Phase I: Set up one fake upstream channel and obtain IDT */
2005 
2006 	/* Set up a fake IDT with one async IN stream */
2007 	bogus_chandesc[0] = cpu_to_le16(0x80);
2008 	bogus_chandesc[1] = cpu_to_le16(0);
2009 
2010 	rc = setup_channels(xdev, bogus_chandesc, 1);
2011 
2012 	if (rc)
2013 		return rc;
2014 
2015 	rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
2016 
2017 	if (rc)
2018 		return rc;
2019 
2020 	chan = xdev->channels;
2021 
2022 	chan->in_fifo = &idt_fifo;
2023 	chan->read_data_ok = 1;
2024 
2025 	xdev->num_channels = 1;
2026 
2027 	rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
2028 
2029 	if (rc) {
2030 		dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
2031 		goto unfifo;
2032 	}
2033 
2034 	rc = wait_event_interruptible_timeout(idt_fifo.waitq,
2035 					      !chan->read_data_ok,
2036 					      XILLY_RESPONSE_TIMEOUT);
2037 
2038 	if (xdev->error) {
2039 		rc = xdev->error;
2040 		goto unfifo;
2041 	}
2042 
2043 	if (rc < 0) {
2044 		rc = -EINTR; /* Interrupt on probe method? Interesting. */
2045 		goto unfifo;
2046 	}
2047 
2048 	if (chan->read_data_ok) {
2049 		rc = -ETIMEDOUT;
2050 		dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
2051 		goto unfifo;
2052 	}
2053 
2054 	idt_len = READ_ONCE(idt_fifo.fill);
2055 	idt = kmalloc(idt_len, GFP_KERNEL);
2056 
2057 	if (!idt) {
2058 		rc = -ENOMEM;
2059 		goto unfifo;
2060 	}
2061 
2062 	fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
2063 
2064 	if (crc32_le(~0, idt, idt_len) != 0) {
2065 		dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
2066 		rc = -ENODEV;
2067 		goto unidt;
2068 	}
2069 
2070 	if (*idt > 0x90) {
2071 		dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
2072 			(int)*idt);
2073 		rc = -ENODEV;
2074 		goto unidt;
2075 	}
2076 
2077 	/* Phase II: Set up the streams as defined in IDT */
2078 
2079 	num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
2080 	names_offset = 3 + num_channels * 4;
2081 	idt_len -= 4; /* Exclude CRC */
2082 
2083 	if (idt_len < names_offset) {
2084 		dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
2085 		rc = -ENODEV;
2086 		goto unidt;
2087 	}
2088 
2089 	rc = setup_channels(xdev, (void *)idt + 3, num_channels);
2090 
2091 	if (rc)
2092 		goto unidt;
2093 
2094 	/*
2095 	 * Except for wildly misbehaving hardware, or if it was disconnected
2096 	 * just after responding with the IDT, there is no reason for any
2097 	 * work item to be running now. To be sure that xdev->channels
2098 	 * is updated on anything that might run in parallel, flush the
2099 	 * workqueue, which rarely does anything.
2100 	 */
2101 	flush_workqueue(xdev->workq);
2102 
2103 	xdev->num_channels = num_channels;
2104 
2105 	fifo_mem_release(&idt_fifo);
2106 	kfree(chan);
2107 
2108 	rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
2109 				  THIS_MODULE, xdev,
2110 				  idt + names_offset,
2111 				  idt_len - names_offset,
2112 				  num_channels,
2113 				  xillyname, true);
2114 
2115 	kfree(idt);
2116 
2117 	return rc;
2118 
2119 unidt:
2120 	kfree(idt);
2121 
2122 unfifo:
2123 	safely_assign_in_fifo(chan, NULL);
2124 	fifo_mem_release(&idt_fifo);
2125 
2126 	return rc;
2127 }
2128 
2129 static int xillyusb_probe(struct usb_interface *interface,
2130 			  const struct usb_device_id *id)
2131 {
2132 	struct xillyusb_dev *xdev;
2133 	int rc;
2134 
2135 	xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
2136 	if (!xdev)
2137 		return -ENOMEM;
2138 
2139 	kref_init(&xdev->kref);
2140 	mutex_init(&xdev->process_in_mutex);
2141 	mutex_init(&xdev->msg_mutex);
2142 
2143 	xdev->udev = usb_get_dev(interface_to_usbdev(interface));
2144 	xdev->dev = &interface->dev;
2145 	xdev->error = 0;
2146 	spin_lock_init(&xdev->error_lock);
2147 	xdev->in_counter = 0;
2148 	xdev->in_bytes_left = 0;
2149 	xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
2150 
2151 	if (!xdev->workq) {
2152 		dev_err(&interface->dev, "Failed to allocate work queue\n");
2153 		rc = -ENOMEM;
2154 		goto fail;
2155 	}
2156 
2157 	INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
2158 
2159 	usb_set_intfdata(interface, xdev);
2160 
2161 	rc = xillyusb_setup_base_eps(xdev);
2162 	if (rc)
2163 		goto fail;
2164 
2165 	rc = xillyusb_discovery(interface);
2166 	if (rc)
2167 		goto latefail;
2168 
2169 	return 0;
2170 
2171 latefail:
2172 	endpoint_quiesce(xdev->in_ep);
2173 	endpoint_quiesce(xdev->msg_ep);
2174 
2175 fail:
2176 	usb_set_intfdata(interface, NULL);
2177 	kref_put(&xdev->kref, cleanup_dev);
2178 	return rc;
2179 }
2180 
2181 static void xillyusb_disconnect(struct usb_interface *interface)
2182 {
2183 	struct xillyusb_dev *xdev = usb_get_intfdata(interface);
2184 	struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
2185 	struct xillyfifo *fifo = &msg_ep->fifo;
2186 	int rc;
2187 	int i;
2188 
2189 	xillybus_cleanup_chrdev(xdev, &interface->dev);
2190 
2191 	/*
2192 	 * Try to send OPCODE_QUIESCE, which will fail silently if the device
2193 	 * was disconnected, but makes sense on module unload.
2194 	 */
2195 
2196 	msg_ep->wake_on_drain = true;
2197 	xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
2198 
2199 	/*
2200 	 * If the device has been disconnected, sending the opcode causes
2201 	 * a global device error with xdev->error, if such error didn't
2202 	 * occur earlier. Hence timing out means that the USB link is fine,
2203 	 * but somehow the message wasn't sent. Should never happen.
2204 	 */
2205 
2206 	rc = wait_event_interruptible_timeout(fifo->waitq,
2207 					      msg_ep->drained || xdev->error,
2208 					      XILLY_RESPONSE_TIMEOUT);
2209 
2210 	if (!rc)
2211 		dev_err(&interface->dev,
2212 			"Weird timeout condition on sending quiesce request.\n");
2213 
2214 	report_io_error(xdev, -ENODEV); /* Discourage further activity */
2215 
2216 	/*
2217 	 * This device driver is declared with soft_unbind set, or else
2218 	 * sending OPCODE_QUIESCE above would always fail. The price is
2219 	 * that the USB framework didn't kill outstanding URBs, so it has
2220 	 * to be done explicitly before returning from this call.
2221 	 */
2222 
2223 	for (i = 0; i < xdev->num_channels; i++) {
2224 		struct xillyusb_channel *chan = &xdev->channels[i];
2225 
2226 		/*
2227 		 * Lock taken to prevent chan->out_ep from changing. It also
2228 		 * ensures xillyusb_open() and xillyusb_flush() don't access
2229 		 * xdev->dev after being nullified below.
2230 		 */
2231 		mutex_lock(&chan->lock);
2232 		if (chan->out_ep)
2233 			endpoint_quiesce(chan->out_ep);
2234 		mutex_unlock(&chan->lock);
2235 	}
2236 
2237 	endpoint_quiesce(xdev->in_ep);
2238 	endpoint_quiesce(xdev->msg_ep);
2239 
2240 	usb_set_intfdata(interface, NULL);
2241 
2242 	xdev->dev = NULL;
2243 
2244 	mutex_lock(&kref_mutex);
2245 	kref_put(&xdev->kref, cleanup_dev);
2246 	mutex_unlock(&kref_mutex);
2247 }
2248 
2249 static struct usb_driver xillyusb_driver = {
2250 	.name = xillyname,
2251 	.id_table = xillyusb_table,
2252 	.probe = xillyusb_probe,
2253 	.disconnect = xillyusb_disconnect,
2254 	.soft_unbind = 1,
2255 };
2256 
2257 static int __init xillyusb_init(void)
2258 {
2259 	int rc = 0;
2260 
2261 	if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
2262 		fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
2263 	else
2264 		fifo_buf_order = 0;
2265 
2266 	rc = usb_register(&xillyusb_driver);
2267 
2268 	return rc;
2269 }
2270 
2271 static void __exit xillyusb_exit(void)
2272 {
2273 	usb_deregister(&xillyusb_driver);
2274 }
2275 
2276 module_init(xillyusb_init);
2277 module_exit(xillyusb_exit);
2278