xref: /openbmc/linux/drivers/char/xillybus/xillybus_core.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1a6377d90SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27051924fSEli Billauer /*
37051924fSEli Billauer  * linux/drivers/misc/xillybus_core.c
47051924fSEli Billauer  *
57051924fSEli Billauer  * Copyright 2011 Xillybus Ltd, http://xillybus.com
67051924fSEli Billauer  *
77051924fSEli Billauer  * Driver for the Xillybus FPGA/host framework.
87051924fSEli Billauer  *
97051924fSEli Billauer  * This driver interfaces with a special IP core in an FPGA, setting up
107051924fSEli Billauer  * a pipe between a hardware FIFO in the programmable logic and a device
117051924fSEli Billauer  * file in the host. The number of such pipes and their attributes are
127051924fSEli Billauer  * set up on the logic. This driver detects these automatically and
137051924fSEli Billauer  * creates the device files accordingly.
147051924fSEli Billauer  */
157051924fSEli Billauer 
167051924fSEli Billauer #include <linux/list.h>
177051924fSEli Billauer #include <linux/device.h>
187051924fSEli Billauer #include <linux/module.h>
197051924fSEli Billauer #include <linux/io.h>
207051924fSEli Billauer #include <linux/dma-mapping.h>
217051924fSEli Billauer #include <linux/interrupt.h>
227051924fSEli Billauer #include <linux/sched.h>
237051924fSEli Billauer #include <linux/fs.h>
247051924fSEli Billauer #include <linux/spinlock.h>
257051924fSEli Billauer #include <linux/mutex.h>
267051924fSEli Billauer #include <linux/crc32.h>
277051924fSEli Billauer #include <linux/poll.h>
287051924fSEli Billauer #include <linux/delay.h>
297051924fSEli Billauer #include <linux/slab.h>
307051924fSEli Billauer #include <linux/workqueue.h>
317051924fSEli Billauer #include "xillybus.h"
328cb5d216SEli Billauer #include "xillybus_class.h"
337051924fSEli Billauer 
347051924fSEli Billauer MODULE_DESCRIPTION("Xillybus core functions");
357051924fSEli Billauer MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
367051924fSEli Billauer MODULE_ALIAS("xillybus_core");
377051924fSEli Billauer MODULE_LICENSE("GPL v2");
387051924fSEli Billauer 
397051924fSEli Billauer /* General timeout is 100 ms, rx timeout is 10 ms */
407051924fSEli Billauer #define XILLY_RX_TIMEOUT (10*HZ/1000)
417051924fSEli Billauer #define XILLY_TIMEOUT (100*HZ/1000)
427051924fSEli Billauer 
437051924fSEli Billauer #define fpga_msg_ctrl_reg              0x0008
447051924fSEli Billauer #define fpga_dma_control_reg           0x0020
457051924fSEli Billauer #define fpga_dma_bufno_reg             0x0024
467051924fSEli Billauer #define fpga_dma_bufaddr_lowaddr_reg   0x0028
477051924fSEli Billauer #define fpga_dma_bufaddr_highaddr_reg  0x002c
487051924fSEli Billauer #define fpga_buf_ctrl_reg              0x0030
497051924fSEli Billauer #define fpga_buf_offset_reg            0x0034
507051924fSEli Billauer #define fpga_endian_reg                0x0040
517051924fSEli Billauer 
527051924fSEli Billauer #define XILLYMSG_OPCODE_RELEASEBUF 1
537051924fSEli Billauer #define XILLYMSG_OPCODE_QUIESCEACK 2
547051924fSEli Billauer #define XILLYMSG_OPCODE_FIFOEOF 3
557051924fSEli Billauer #define XILLYMSG_OPCODE_FATAL_ERROR 4
567051924fSEli Billauer #define XILLYMSG_OPCODE_NONEMPTY 5
577051924fSEli Billauer 
587051924fSEli Billauer static const char xillyname[] = "xillybus";
597051924fSEli Billauer 
607051924fSEli Billauer static struct workqueue_struct *xillybus_wq;
617051924fSEli Billauer 
627051924fSEli Billauer /*
637051924fSEli Billauer  * Locking scheme: Mutexes protect invocations of character device methods.
647051924fSEli Billauer  * If both locks are taken, wr_mutex is taken first, rd_mutex second.
657051924fSEli Billauer  *
667051924fSEli Billauer  * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
677051924fSEli Billauer  * buffers' end_offset fields against changes made by IRQ handler (and in
687051924fSEli Billauer  * theory, other file request handlers, but the mutex handles that). Nothing
697051924fSEli Billauer  * else.
707051924fSEli Billauer  * They are held for short direct memory manipulations. Needless to say,
717051924fSEli Billauer  * no mutex locking is allowed when a spinlock is held.
727051924fSEli Billauer  *
737051924fSEli Billauer  * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
747051924fSEli Billauer  *
757051924fSEli Billauer  * register_mutex is endpoint-specific, and is held when non-atomic
767051924fSEli Billauer  * register operations are performed. wr_mutex and rd_mutex may be
777051924fSEli Billauer  * held when register_mutex is taken, but none of the spinlocks. Note that
787051924fSEli Billauer  * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
797051924fSEli Billauer  * which are unrelated to buf_offset_reg, since they are harmless.
807051924fSEli Billauer  *
817051924fSEli Billauer  * Blocking on the wait queues is allowed with mutexes held, but not with
827051924fSEli Billauer  * spinlocks.
837051924fSEli Billauer  *
847051924fSEli Billauer  * Only interruptible blocking is allowed on mutexes and wait queues.
857051924fSEli Billauer  *
867051924fSEli Billauer  * All in all, the locking order goes (with skips allowed, of course):
877051924fSEli Billauer  * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
887051924fSEli Billauer  */
897051924fSEli Billauer 
malformed_message(struct xilly_endpoint * endpoint,u32 * buf)907051924fSEli Billauer static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
917051924fSEli Billauer {
927051924fSEli Billauer 	int opcode;
937051924fSEli Billauer 	int msg_channel, msg_bufno, msg_data, msg_dir;
947051924fSEli Billauer 
957051924fSEli Billauer 	opcode = (buf[0] >> 24) & 0xff;
967051924fSEli Billauer 	msg_dir = buf[0] & 1;
977051924fSEli Billauer 	msg_channel = (buf[0] >> 1) & 0x7ff;
987051924fSEli Billauer 	msg_bufno = (buf[0] >> 12) & 0x3ff;
997051924fSEli Billauer 	msg_data = buf[1] & 0xfffffff;
1007051924fSEli Billauer 
1017051924fSEli Billauer 	dev_warn(endpoint->dev,
1027051924fSEli Billauer 		 "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
1037051924fSEli Billauer 		 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
1047051924fSEli Billauer }
1057051924fSEli Billauer 
1067051924fSEli Billauer /*
1077051924fSEli Billauer  * xillybus_isr assumes the interrupt is allocated exclusively to it,
1087051924fSEli Billauer  * which is the natural case MSI and several other hardware-oriented
1097051924fSEli Billauer  * interrupts. Sharing is not allowed.
1107051924fSEli Billauer  */
1117051924fSEli Billauer 
xillybus_isr(int irq,void * data)1127051924fSEli Billauer irqreturn_t xillybus_isr(int irq, void *data)
1137051924fSEli Billauer {
1147051924fSEli Billauer 	struct xilly_endpoint *ep = data;
1157051924fSEli Billauer 	u32 *buf;
1167051924fSEli Billauer 	unsigned int buf_size;
1177051924fSEli Billauer 	int i;
1187051924fSEli Billauer 	int opcode;
1197051924fSEli Billauer 	unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
1207051924fSEli Billauer 	struct xilly_channel *channel;
1217051924fSEli Billauer 
1227051924fSEli Billauer 	buf = ep->msgbuf_addr;
1237051924fSEli Billauer 	buf_size = ep->msg_buf_size/sizeof(u32);
1247051924fSEli Billauer 
125*c31bbc14SEli Billauer 	dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
126*c31bbc14SEli Billauer 				ep->msg_buf_size, DMA_FROM_DEVICE);
1277051924fSEli Billauer 
1287051924fSEli Billauer 	for (i = 0; i < buf_size; i += 2) {
1297051924fSEli Billauer 		if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
1307051924fSEli Billauer 			malformed_message(ep, &buf[i]);
1317051924fSEli Billauer 			dev_warn(ep->dev,
1327051924fSEli Billauer 				 "Sending a NACK on counter %x (instead of %x) on entry %d\n",
1337051924fSEli Billauer 				 ((buf[i+1] >> 28) & 0xf),
1347051924fSEli Billauer 				 ep->msg_counter,
1357051924fSEli Billauer 				 i/2);
1367051924fSEli Billauer 
1377051924fSEli Billauer 			if (++ep->failed_messages > 10) {
1387051924fSEli Billauer 				dev_err(ep->dev,
1397051924fSEli Billauer 					"Lost sync with interrupt messages. Stopping.\n");
1407051924fSEli Billauer 			} else {
141*c31bbc14SEli Billauer 				dma_sync_single_for_device(ep->dev,
1427051924fSEli Billauer 							   ep->msgbuf_dma_addr,
1437051924fSEli Billauer 							   ep->msg_buf_size,
1447051924fSEli Billauer 							   DMA_FROM_DEVICE);
1457051924fSEli Billauer 
1467051924fSEli Billauer 				iowrite32(0x01,  /* Message NACK */
1477051924fSEli Billauer 					  ep->registers + fpga_msg_ctrl_reg);
1487051924fSEli Billauer 			}
1497051924fSEli Billauer 			return IRQ_HANDLED;
1507051924fSEli Billauer 		} else if (buf[i] & (1 << 22)) /* Last message */
1517051924fSEli Billauer 			break;
1527051924fSEli Billauer 	}
1537051924fSEli Billauer 
1547051924fSEli Billauer 	if (i >= buf_size) {
1557051924fSEli Billauer 		dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
1567051924fSEli Billauer 		return IRQ_HANDLED;
1577051924fSEli Billauer 	}
1587051924fSEli Billauer 
1597051924fSEli Billauer 	buf_size = i + 2;
1607051924fSEli Billauer 
1617051924fSEli Billauer 	for (i = 0; i < buf_size; i += 2) { /* Scan through messages */
1627051924fSEli Billauer 		opcode = (buf[i] >> 24) & 0xff;
1637051924fSEli Billauer 
1647051924fSEli Billauer 		msg_dir = buf[i] & 1;
1657051924fSEli Billauer 		msg_channel = (buf[i] >> 1) & 0x7ff;
1667051924fSEli Billauer 		msg_bufno = (buf[i] >> 12) & 0x3ff;
1677051924fSEli Billauer 		msg_data = buf[i+1] & 0xfffffff;
1687051924fSEli Billauer 
1697051924fSEli Billauer 		switch (opcode) {
1707051924fSEli Billauer 		case XILLYMSG_OPCODE_RELEASEBUF:
1717051924fSEli Billauer 			if ((msg_channel > ep->num_channels) ||
1727051924fSEli Billauer 			    (msg_channel == 0)) {
1737051924fSEli Billauer 				malformed_message(ep, &buf[i]);
1747051924fSEli Billauer 				break;
1757051924fSEli Billauer 			}
1767051924fSEli Billauer 
1777051924fSEli Billauer 			channel = ep->channels[msg_channel];
1787051924fSEli Billauer 
1797051924fSEli Billauer 			if (msg_dir) { /* Write channel */
1807051924fSEli Billauer 				if (msg_bufno >= channel->num_wr_buffers) {
1817051924fSEli Billauer 					malformed_message(ep, &buf[i]);
1827051924fSEli Billauer 					break;
1837051924fSEli Billauer 				}
1847051924fSEli Billauer 				spin_lock(&channel->wr_spinlock);
1857051924fSEli Billauer 				channel->wr_buffers[msg_bufno]->end_offset =
1867051924fSEli Billauer 					msg_data;
1877051924fSEli Billauer 				channel->wr_fpga_buf_idx = msg_bufno;
1887051924fSEli Billauer 				channel->wr_empty = 0;
1897051924fSEli Billauer 				channel->wr_sleepy = 0;
1907051924fSEli Billauer 				spin_unlock(&channel->wr_spinlock);
1917051924fSEli Billauer 
1927051924fSEli Billauer 				wake_up_interruptible(&channel->wr_wait);
1937051924fSEli Billauer 
1947051924fSEli Billauer 			} else {
1957051924fSEli Billauer 				/* Read channel */
1967051924fSEli Billauer 
1977051924fSEli Billauer 				if (msg_bufno >= channel->num_rd_buffers) {
1987051924fSEli Billauer 					malformed_message(ep, &buf[i]);
1997051924fSEli Billauer 					break;
2007051924fSEli Billauer 				}
2017051924fSEli Billauer 
2027051924fSEli Billauer 				spin_lock(&channel->rd_spinlock);
2037051924fSEli Billauer 				channel->rd_fpga_buf_idx = msg_bufno;
2047051924fSEli Billauer 				channel->rd_full = 0;
2057051924fSEli Billauer 				spin_unlock(&channel->rd_spinlock);
2067051924fSEli Billauer 
2077051924fSEli Billauer 				wake_up_interruptible(&channel->rd_wait);
2087051924fSEli Billauer 				if (!channel->rd_synchronous)
2097051924fSEli Billauer 					queue_delayed_work(
2107051924fSEli Billauer 						xillybus_wq,
2117051924fSEli Billauer 						&channel->rd_workitem,
2127051924fSEli Billauer 						XILLY_RX_TIMEOUT);
2137051924fSEli Billauer 			}
2147051924fSEli Billauer 
2157051924fSEli Billauer 			break;
2167051924fSEli Billauer 		case XILLYMSG_OPCODE_NONEMPTY:
2177051924fSEli Billauer 			if ((msg_channel > ep->num_channels) ||
2187051924fSEli Billauer 			    (msg_channel == 0) || (!msg_dir) ||
2197051924fSEli Billauer 			    !ep->channels[msg_channel]->wr_supports_nonempty) {
2207051924fSEli Billauer 				malformed_message(ep, &buf[i]);
2217051924fSEli Billauer 				break;
2227051924fSEli Billauer 			}
2237051924fSEli Billauer 
2247051924fSEli Billauer 			channel = ep->channels[msg_channel];
2257051924fSEli Billauer 
2267051924fSEli Billauer 			if (msg_bufno >= channel->num_wr_buffers) {
2277051924fSEli Billauer 				malformed_message(ep, &buf[i]);
2287051924fSEli Billauer 				break;
2297051924fSEli Billauer 			}
2307051924fSEli Billauer 			spin_lock(&channel->wr_spinlock);
2317051924fSEli Billauer 			if (msg_bufno == channel->wr_host_buf_idx)
2327051924fSEli Billauer 				channel->wr_ready = 1;
2337051924fSEli Billauer 			spin_unlock(&channel->wr_spinlock);
2347051924fSEli Billauer 
2357051924fSEli Billauer 			wake_up_interruptible(&channel->wr_ready_wait);
2367051924fSEli Billauer 
2377051924fSEli Billauer 			break;
2387051924fSEli Billauer 		case XILLYMSG_OPCODE_QUIESCEACK:
2397051924fSEli Billauer 			ep->idtlen = msg_data;
2407051924fSEli Billauer 			wake_up_interruptible(&ep->ep_wait);
2417051924fSEli Billauer 
2427051924fSEli Billauer 			break;
2437051924fSEli Billauer 		case XILLYMSG_OPCODE_FIFOEOF:
2447051924fSEli Billauer 			if ((msg_channel > ep->num_channels) ||
2457051924fSEli Billauer 			    (msg_channel == 0) || (!msg_dir) ||
2467051924fSEli Billauer 			    !ep->channels[msg_channel]->num_wr_buffers) {
2477051924fSEli Billauer 				malformed_message(ep, &buf[i]);
2487051924fSEli Billauer 				break;
2497051924fSEli Billauer 			}
2507051924fSEli Billauer 			channel = ep->channels[msg_channel];
2517051924fSEli Billauer 			spin_lock(&channel->wr_spinlock);
2527051924fSEli Billauer 			channel->wr_eof = msg_bufno;
2537051924fSEli Billauer 			channel->wr_sleepy = 0;
2547051924fSEli Billauer 
2557051924fSEli Billauer 			channel->wr_hangup = channel->wr_empty &&
2567051924fSEli Billauer 				(channel->wr_host_buf_idx == msg_bufno);
2577051924fSEli Billauer 
2587051924fSEli Billauer 			spin_unlock(&channel->wr_spinlock);
2597051924fSEli Billauer 
2607051924fSEli Billauer 			wake_up_interruptible(&channel->wr_wait);
2617051924fSEli Billauer 
2627051924fSEli Billauer 			break;
2637051924fSEli Billauer 		case XILLYMSG_OPCODE_FATAL_ERROR:
2647051924fSEli Billauer 			ep->fatal_error = 1;
2657051924fSEli Billauer 			wake_up_interruptible(&ep->ep_wait); /* For select() */
2667051924fSEli Billauer 			dev_err(ep->dev,
2677051924fSEli Billauer 				"FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
2687051924fSEli Billauer 			break;
2697051924fSEli Billauer 		default:
2707051924fSEli Billauer 			malformed_message(ep, &buf[i]);
2717051924fSEli Billauer 			break;
2727051924fSEli Billauer 		}
2737051924fSEli Billauer 	}
2747051924fSEli Billauer 
275*c31bbc14SEli Billauer 	dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
276*c31bbc14SEli Billauer 				   ep->msg_buf_size, DMA_FROM_DEVICE);
2777051924fSEli Billauer 
2787051924fSEli Billauer 	ep->msg_counter = (ep->msg_counter + 1) & 0xf;
2797051924fSEli Billauer 	ep->failed_messages = 0;
2807051924fSEli Billauer 	iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
2817051924fSEli Billauer 
2827051924fSEli Billauer 	return IRQ_HANDLED;
2837051924fSEli Billauer }
2847051924fSEli Billauer EXPORT_SYMBOL(xillybus_isr);
2857051924fSEli Billauer 
2867051924fSEli Billauer /*
2877051924fSEli Billauer  * A few trivial memory management functions.
2887051924fSEli Billauer  * NOTE: These functions are used only on probe and remove, and therefore
2897051924fSEli Billauer  * no locks are applied!
2907051924fSEli Billauer  */
2917051924fSEli Billauer 
2927051924fSEli Billauer static void xillybus_autoflush(struct work_struct *work);
2937051924fSEli Billauer 
2947051924fSEli Billauer struct xilly_alloc_state {
2957051924fSEli Billauer 	void *salami;
2967051924fSEli Billauer 	int left_of_salami;
2977051924fSEli Billauer 	int nbuffer;
2987051924fSEli Billauer 	enum dma_data_direction direction;
2997051924fSEli Billauer 	u32 regdirection;
3007051924fSEli Billauer };
3017051924fSEli Billauer 
xilly_unmap(void * ptr)302*c31bbc14SEli Billauer static void xilly_unmap(void *ptr)
303*c31bbc14SEli Billauer {
304*c31bbc14SEli Billauer 	struct xilly_mapping *data = ptr;
305*c31bbc14SEli Billauer 
306*c31bbc14SEli Billauer 	dma_unmap_single(data->device, data->dma_addr,
307*c31bbc14SEli Billauer 			 data->size, data->direction);
308*c31bbc14SEli Billauer 
309*c31bbc14SEli Billauer 	kfree(ptr);
310*c31bbc14SEli Billauer }
311*c31bbc14SEli Billauer 
xilly_map_single(struct xilly_endpoint * ep,void * ptr,size_t size,int direction,dma_addr_t * ret_dma_handle)312*c31bbc14SEli Billauer static int xilly_map_single(struct xilly_endpoint *ep,
313*c31bbc14SEli Billauer 			    void *ptr,
314*c31bbc14SEli Billauer 			    size_t size,
315*c31bbc14SEli Billauer 			    int direction,
316*c31bbc14SEli Billauer 			    dma_addr_t *ret_dma_handle
317*c31bbc14SEli Billauer 	)
318*c31bbc14SEli Billauer {
319*c31bbc14SEli Billauer 	dma_addr_t addr;
320*c31bbc14SEli Billauer 	struct xilly_mapping *this;
321*c31bbc14SEli Billauer 
322*c31bbc14SEli Billauer 	this = kzalloc(sizeof(*this), GFP_KERNEL);
323*c31bbc14SEli Billauer 	if (!this)
324*c31bbc14SEli Billauer 		return -ENOMEM;
325*c31bbc14SEli Billauer 
326*c31bbc14SEli Billauer 	addr = dma_map_single(ep->dev, ptr, size, direction);
327*c31bbc14SEli Billauer 
328*c31bbc14SEli Billauer 	if (dma_mapping_error(ep->dev, addr)) {
329*c31bbc14SEli Billauer 		kfree(this);
330*c31bbc14SEli Billauer 		return -ENODEV;
331*c31bbc14SEli Billauer 	}
332*c31bbc14SEli Billauer 
333*c31bbc14SEli Billauer 	this->device = ep->dev;
334*c31bbc14SEli Billauer 	this->dma_addr = addr;
335*c31bbc14SEli Billauer 	this->size = size;
336*c31bbc14SEli Billauer 	this->direction = direction;
337*c31bbc14SEli Billauer 
338*c31bbc14SEli Billauer 	*ret_dma_handle = addr;
339*c31bbc14SEli Billauer 
340*c31bbc14SEli Billauer 	return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
341*c31bbc14SEli Billauer }
342*c31bbc14SEli Billauer 
xilly_get_dma_buffers(struct xilly_endpoint * ep,struct xilly_alloc_state * s,struct xilly_buffer ** buffers,int bufnum,int bytebufsize)3437051924fSEli Billauer static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
3447051924fSEli Billauer 				 struct xilly_alloc_state *s,
3457051924fSEli Billauer 				 struct xilly_buffer **buffers,
3467051924fSEli Billauer 				 int bufnum, int bytebufsize)
3477051924fSEli Billauer {
3487051924fSEli Billauer 	int i, rc;
3497051924fSEli Billauer 	dma_addr_t dma_addr;
3507051924fSEli Billauer 	struct device *dev = ep->dev;
3517051924fSEli Billauer 	struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
3527051924fSEli Billauer 
3537051924fSEli Billauer 	if (buffers) { /* Not the message buffer */
3547051924fSEli Billauer 		this_buffer = devm_kcalloc(dev, bufnum,
3557051924fSEli Billauer 					   sizeof(struct xilly_buffer),
3567051924fSEli Billauer 					   GFP_KERNEL);
3577051924fSEli Billauer 		if (!this_buffer)
3587051924fSEli Billauer 			return -ENOMEM;
3597051924fSEli Billauer 	}
3607051924fSEli Billauer 
3617051924fSEli Billauer 	for (i = 0; i < bufnum; i++) {
3627051924fSEli Billauer 		/*
3637051924fSEli Billauer 		 * Buffers are expected in descending size order, so there
3647051924fSEli Billauer 		 * is either enough space for this buffer or none at all.
3657051924fSEli Billauer 		 */
3667051924fSEli Billauer 
3677051924fSEli Billauer 		if ((s->left_of_salami < bytebufsize) &&
3687051924fSEli Billauer 		    (s->left_of_salami > 0)) {
3697051924fSEli Billauer 			dev_err(ep->dev,
3707051924fSEli Billauer 				"Corrupt buffer allocation in IDT. Aborting.\n");
3717051924fSEli Billauer 			return -ENODEV;
3727051924fSEli Billauer 		}
3737051924fSEli Billauer 
3747051924fSEli Billauer 		if (s->left_of_salami == 0) {
3757051924fSEli Billauer 			int allocorder, allocsize;
3767051924fSEli Billauer 
3777051924fSEli Billauer 			allocsize = PAGE_SIZE;
3787051924fSEli Billauer 			allocorder = 0;
3797051924fSEli Billauer 			while (bytebufsize > allocsize) {
3807051924fSEli Billauer 				allocsize *= 2;
3817051924fSEli Billauer 				allocorder++;
3827051924fSEli Billauer 			}
3837051924fSEli Billauer 
3847051924fSEli Billauer 			s->salami = (void *) devm_get_free_pages(
3857051924fSEli Billauer 				dev,
3867051924fSEli Billauer 				GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
3877051924fSEli Billauer 				allocorder);
3887051924fSEli Billauer 			if (!s->salami)
3897051924fSEli Billauer 				return -ENOMEM;
3907051924fSEli Billauer 
3917051924fSEli Billauer 			s->left_of_salami = allocsize;
3927051924fSEli Billauer 		}
3937051924fSEli Billauer 
394*c31bbc14SEli Billauer 		rc = xilly_map_single(ep, s->salami,
3957051924fSEli Billauer 				      bytebufsize, s->direction,
3967051924fSEli Billauer 				      &dma_addr);
3977051924fSEli Billauer 		if (rc)
3987051924fSEli Billauer 			return rc;
3997051924fSEli Billauer 
4007051924fSEli Billauer 		iowrite32((u32) (dma_addr & 0xffffffff),
4017051924fSEli Billauer 			  ep->registers + fpga_dma_bufaddr_lowaddr_reg);
4027051924fSEli Billauer 		iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
4037051924fSEli Billauer 			  ep->registers + fpga_dma_bufaddr_highaddr_reg);
4047051924fSEli Billauer 
4057051924fSEli Billauer 		if (buffers) { /* Not the message buffer */
4067051924fSEli Billauer 			this_buffer->addr = s->salami;
4077051924fSEli Billauer 			this_buffer->dma_addr = dma_addr;
4087051924fSEli Billauer 			buffers[i] = this_buffer++;
4097051924fSEli Billauer 
4107051924fSEli Billauer 			iowrite32(s->regdirection | s->nbuffer++,
4117051924fSEli Billauer 				  ep->registers + fpga_dma_bufno_reg);
4127051924fSEli Billauer 		} else {
4137051924fSEli Billauer 			ep->msgbuf_addr = s->salami;
4147051924fSEli Billauer 			ep->msgbuf_dma_addr = dma_addr;
4157051924fSEli Billauer 			ep->msg_buf_size = bytebufsize;
4167051924fSEli Billauer 
4177051924fSEli Billauer 			iowrite32(s->regdirection,
4187051924fSEli Billauer 				  ep->registers + fpga_dma_bufno_reg);
4197051924fSEli Billauer 		}
4207051924fSEli Billauer 
4217051924fSEli Billauer 		s->left_of_salami -= bytebufsize;
4227051924fSEli Billauer 		s->salami += bytebufsize;
4237051924fSEli Billauer 	}
4247051924fSEli Billauer 	return 0;
4257051924fSEli Billauer }
4267051924fSEli Billauer 
xilly_setupchannels(struct xilly_endpoint * ep,unsigned char * chandesc,int entries)4277051924fSEli Billauer static int xilly_setupchannels(struct xilly_endpoint *ep,
4287051924fSEli Billauer 			       unsigned char *chandesc,
4297051924fSEli Billauer 			       int entries)
4307051924fSEli Billauer {
4317051924fSEli Billauer 	struct device *dev = ep->dev;
4327051924fSEli Billauer 	int i, entry, rc;
4337051924fSEli Billauer 	struct xilly_channel *channel;
4347051924fSEli Billauer 	int channelnum, bufnum, bufsize, format, is_writebuf;
4357051924fSEli Billauer 	int bytebufsize;
4367051924fSEli Billauer 	int synchronous, allowpartial, exclusive_open, seekable;
4377051924fSEli Billauer 	int supports_nonempty;
4387051924fSEli Billauer 	int msg_buf_done = 0;
4397051924fSEli Billauer 
4407051924fSEli Billauer 	struct xilly_alloc_state rd_alloc = {
4417051924fSEli Billauer 		.salami = NULL,
4427051924fSEli Billauer 		.left_of_salami = 0,
4437051924fSEli Billauer 		.nbuffer = 1,
4447051924fSEli Billauer 		.direction = DMA_TO_DEVICE,
4457051924fSEli Billauer 		.regdirection = 0,
4467051924fSEli Billauer 	};
4477051924fSEli Billauer 
4487051924fSEli Billauer 	struct xilly_alloc_state wr_alloc = {
4497051924fSEli Billauer 		.salami = NULL,
4507051924fSEli Billauer 		.left_of_salami = 0,
4517051924fSEli Billauer 		.nbuffer = 1,
4527051924fSEli Billauer 		.direction = DMA_FROM_DEVICE,
4537051924fSEli Billauer 		.regdirection = 0x80000000,
4547051924fSEli Billauer 	};
4557051924fSEli Billauer 
4567051924fSEli Billauer 	channel = devm_kcalloc(dev, ep->num_channels,
4577051924fSEli Billauer 			       sizeof(struct xilly_channel), GFP_KERNEL);
4587051924fSEli Billauer 	if (!channel)
4597051924fSEli Billauer 		return -ENOMEM;
4607051924fSEli Billauer 
4617051924fSEli Billauer 	ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
4627051924fSEli Billauer 				    sizeof(struct xilly_channel *),
4637051924fSEli Billauer 				    GFP_KERNEL);
4647051924fSEli Billauer 	if (!ep->channels)
4657051924fSEli Billauer 		return -ENOMEM;
4667051924fSEli Billauer 
4677051924fSEli Billauer 	ep->channels[0] = NULL; /* Channel 0 is message buf. */
4687051924fSEli Billauer 
4697051924fSEli Billauer 	/* Initialize all channels with defaults */
4707051924fSEli Billauer 
4717051924fSEli Billauer 	for (i = 1; i <= ep->num_channels; i++) {
4727051924fSEli Billauer 		channel->wr_buffers = NULL;
4737051924fSEli Billauer 		channel->rd_buffers = NULL;
4747051924fSEli Billauer 		channel->num_wr_buffers = 0;
4757051924fSEli Billauer 		channel->num_rd_buffers = 0;
4767051924fSEli Billauer 		channel->wr_fpga_buf_idx = -1;
4777051924fSEli Billauer 		channel->wr_host_buf_idx = 0;
4787051924fSEli Billauer 		channel->wr_host_buf_pos = 0;
4797051924fSEli Billauer 		channel->wr_empty = 1;
4807051924fSEli Billauer 		channel->wr_ready = 0;
4817051924fSEli Billauer 		channel->wr_sleepy = 1;
4827051924fSEli Billauer 		channel->rd_fpga_buf_idx = 0;
4837051924fSEli Billauer 		channel->rd_host_buf_idx = 0;
4847051924fSEli Billauer 		channel->rd_host_buf_pos = 0;
4857051924fSEli Billauer 		channel->rd_full = 0;
4867051924fSEli Billauer 		channel->wr_ref_count = 0;
4877051924fSEli Billauer 		channel->rd_ref_count = 0;
4887051924fSEli Billauer 
4897051924fSEli Billauer 		spin_lock_init(&channel->wr_spinlock);
4907051924fSEli Billauer 		spin_lock_init(&channel->rd_spinlock);
4917051924fSEli Billauer 		mutex_init(&channel->wr_mutex);
4927051924fSEli Billauer 		mutex_init(&channel->rd_mutex);
4937051924fSEli Billauer 		init_waitqueue_head(&channel->rd_wait);
4947051924fSEli Billauer 		init_waitqueue_head(&channel->wr_wait);
4957051924fSEli Billauer 		init_waitqueue_head(&channel->wr_ready_wait);
4967051924fSEli Billauer 
4977051924fSEli Billauer 		INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
4987051924fSEli Billauer 
4997051924fSEli Billauer 		channel->endpoint = ep;
5007051924fSEli Billauer 		channel->chan_num = i;
5017051924fSEli Billauer 
5027051924fSEli Billauer 		channel->log2_element_size = 0;
5037051924fSEli Billauer 
5047051924fSEli Billauer 		ep->channels[i] = channel++;
5057051924fSEli Billauer 	}
5067051924fSEli Billauer 
5077051924fSEli Billauer 	for (entry = 0; entry < entries; entry++, chandesc += 4) {
5087051924fSEli Billauer 		struct xilly_buffer **buffers = NULL;
5097051924fSEli Billauer 
5107051924fSEli Billauer 		is_writebuf = chandesc[0] & 0x01;
5117051924fSEli Billauer 		channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
5127051924fSEli Billauer 		format = (chandesc[1] >> 4) & 0x03;
5137051924fSEli Billauer 		allowpartial = (chandesc[1] >> 6) & 0x01;
5147051924fSEli Billauer 		synchronous = (chandesc[1] >> 7) & 0x01;
5157051924fSEli Billauer 		bufsize = 1 << (chandesc[2] & 0x1f);
5167051924fSEli Billauer 		bufnum = 1 << (chandesc[3] & 0x0f);
5177051924fSEli Billauer 		exclusive_open = (chandesc[2] >> 7) & 0x01;
5187051924fSEli Billauer 		seekable = (chandesc[2] >> 6) & 0x01;
5197051924fSEli Billauer 		supports_nonempty = (chandesc[2] >> 5) & 0x01;
5207051924fSEli Billauer 
5217051924fSEli Billauer 		if ((channelnum > ep->num_channels) ||
5227051924fSEli Billauer 		    ((channelnum == 0) && !is_writebuf)) {
5237051924fSEli Billauer 			dev_err(ep->dev,
5247051924fSEli Billauer 				"IDT requests channel out of range. Aborting.\n");
5257051924fSEli Billauer 			return -ENODEV;
5267051924fSEli Billauer 		}
5277051924fSEli Billauer 
5287051924fSEli Billauer 		channel = ep->channels[channelnum]; /* NULL for msg channel */
5297051924fSEli Billauer 
5307051924fSEli Billauer 		if (!is_writebuf || channelnum > 0) {
5317051924fSEli Billauer 			channel->log2_element_size = ((format > 2) ?
5327051924fSEli Billauer 						      2 : format);
5337051924fSEli Billauer 
534ba327173SEli Billauer 			bytebufsize = bufsize *
5357051924fSEli Billauer 				(1 << channel->log2_element_size);
5367051924fSEli Billauer 
5377051924fSEli Billauer 			buffers = devm_kcalloc(dev, bufnum,
5387051924fSEli Billauer 					       sizeof(struct xilly_buffer *),
5397051924fSEli Billauer 					       GFP_KERNEL);
5407051924fSEli Billauer 			if (!buffers)
5417051924fSEli Billauer 				return -ENOMEM;
5427051924fSEli Billauer 		} else {
5437051924fSEli Billauer 			bytebufsize = bufsize << 2;
5447051924fSEli Billauer 		}
5457051924fSEli Billauer 
5467051924fSEli Billauer 		if (!is_writebuf) {
5477051924fSEli Billauer 			channel->num_rd_buffers = bufnum;
548ba327173SEli Billauer 			channel->rd_buf_size = bytebufsize;
5497051924fSEli Billauer 			channel->rd_allow_partial = allowpartial;
5507051924fSEli Billauer 			channel->rd_synchronous = synchronous;
5517051924fSEli Billauer 			channel->rd_exclusive_open = exclusive_open;
5527051924fSEli Billauer 			channel->seekable = seekable;
5537051924fSEli Billauer 
5547051924fSEli Billauer 			channel->rd_buffers = buffers;
5557051924fSEli Billauer 			rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
5567051924fSEli Billauer 						   bufnum, bytebufsize);
5577051924fSEli Billauer 		} else if (channelnum > 0) {
5587051924fSEli Billauer 			channel->num_wr_buffers = bufnum;
559ba327173SEli Billauer 			channel->wr_buf_size = bytebufsize;
5607051924fSEli Billauer 
5617051924fSEli Billauer 			channel->seekable = seekable;
5627051924fSEli Billauer 			channel->wr_supports_nonempty = supports_nonempty;
5637051924fSEli Billauer 
5647051924fSEli Billauer 			channel->wr_allow_partial = allowpartial;
5657051924fSEli Billauer 			channel->wr_synchronous = synchronous;
5667051924fSEli Billauer 			channel->wr_exclusive_open = exclusive_open;
5677051924fSEli Billauer 
5687051924fSEli Billauer 			channel->wr_buffers = buffers;
5697051924fSEli Billauer 			rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
5707051924fSEli Billauer 						   bufnum, bytebufsize);
5717051924fSEli Billauer 		} else {
5727051924fSEli Billauer 			rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
5737051924fSEli Billauer 						   bufnum, bytebufsize);
5747051924fSEli Billauer 			msg_buf_done++;
5757051924fSEli Billauer 		}
5767051924fSEli Billauer 
5777051924fSEli Billauer 		if (rc)
5787051924fSEli Billauer 			return -ENOMEM;
5797051924fSEli Billauer 	}
5807051924fSEli Billauer 
5817051924fSEli Billauer 	if (!msg_buf_done) {
5827051924fSEli Billauer 		dev_err(ep->dev,
5837051924fSEli Billauer 			"Corrupt IDT: No message buffer. Aborting.\n");
5847051924fSEli Billauer 		return -ENODEV;
5857051924fSEli Billauer 	}
5867051924fSEli Billauer 	return 0;
5877051924fSEli Billauer }
5887051924fSEli Billauer 
xilly_scan_idt(struct xilly_endpoint * endpoint,struct xilly_idt_handle * idt_handle)5897051924fSEli Billauer static int xilly_scan_idt(struct xilly_endpoint *endpoint,
5907051924fSEli Billauer 			  struct xilly_idt_handle *idt_handle)
5917051924fSEli Billauer {
5927051924fSEli Billauer 	int count = 0;
5937051924fSEli Billauer 	unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
5947051924fSEli Billauer 	unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
5957051924fSEli Billauer 	unsigned char *scan;
5967051924fSEli Billauer 	int len;
5977051924fSEli Billauer 
5988cb5d216SEli Billauer 	scan = idt + 1;
5998cb5d216SEli Billauer 	idt_handle->names = scan;
6007051924fSEli Billauer 
6017051924fSEli Billauer 	while ((scan <= end_of_idt) && *scan) {
6027051924fSEli Billauer 		while ((scan <= end_of_idt) && *scan++)
6037051924fSEli Billauer 			/* Do nothing, just scan thru string */;
6047051924fSEli Billauer 		count++;
6057051924fSEli Billauer 	}
6067051924fSEli Billauer 
6078cb5d216SEli Billauer 	idt_handle->names_len = scan - idt_handle->names;
6088cb5d216SEli Billauer 
6097051924fSEli Billauer 	scan++;
6107051924fSEli Billauer 
6117051924fSEli Billauer 	if (scan > end_of_idt) {
6127051924fSEli Billauer 		dev_err(endpoint->dev,
6137051924fSEli Billauer 			"IDT device name list overflow. Aborting.\n");
6147051924fSEli Billauer 		return -ENODEV;
6157051924fSEli Billauer 	}
6167051924fSEli Billauer 	idt_handle->chandesc = scan;
6177051924fSEli Billauer 
6187051924fSEli Billauer 	len = endpoint->idtlen - (3 + ((int) (scan - idt)));
6197051924fSEli Billauer 
6207051924fSEli Billauer 	if (len & 0x03) {
6217051924fSEli Billauer 		dev_err(endpoint->dev,
6227051924fSEli Billauer 			"Corrupt IDT device name list. Aborting.\n");
6237051924fSEli Billauer 		return -ENODEV;
6247051924fSEli Billauer 	}
6257051924fSEli Billauer 
6267051924fSEli Billauer 	idt_handle->entries = len >> 2;
6277051924fSEli Billauer 	endpoint->num_channels = count;
6287051924fSEli Billauer 
6297051924fSEli Billauer 	return 0;
6307051924fSEli Billauer }
6317051924fSEli Billauer 
xilly_obtain_idt(struct xilly_endpoint * endpoint)6327051924fSEli Billauer static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
6337051924fSEli Billauer {
6347051924fSEli Billauer 	struct xilly_channel *channel;
6357051924fSEli Billauer 	unsigned char *version;
6367051924fSEli Billauer 	long t;
6377051924fSEli Billauer 
6387051924fSEli Billauer 	channel = endpoint->channels[1]; /* This should be generated ad-hoc */
6397051924fSEli Billauer 
6407051924fSEli Billauer 	channel->wr_sleepy = 1;
6417051924fSEli Billauer 
6427051924fSEli Billauer 	iowrite32(1 |
6437051924fSEli Billauer 		  (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
6447051924fSEli Billauer 		  endpoint->registers + fpga_buf_ctrl_reg);
6457051924fSEli Billauer 
6467051924fSEli Billauer 	t = wait_event_interruptible_timeout(channel->wr_wait,
6477051924fSEli Billauer 					     (!channel->wr_sleepy),
6487051924fSEli Billauer 					     XILLY_TIMEOUT);
6497051924fSEli Billauer 
6507051924fSEli Billauer 	if (t <= 0) {
6517051924fSEli Billauer 		dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
6527051924fSEli Billauer 
6537051924fSEli Billauer 		if (endpoint->fatal_error)
6547051924fSEli Billauer 			return -EIO;
6557051924fSEli Billauer 
6567051924fSEli Billauer 		return -ENODEV;
6577051924fSEli Billauer 	}
6587051924fSEli Billauer 
659*c31bbc14SEli Billauer 	dma_sync_single_for_cpu(channel->endpoint->dev,
6607051924fSEli Billauer 				channel->wr_buffers[0]->dma_addr,
6617051924fSEli Billauer 				channel->wr_buf_size,
6627051924fSEli Billauer 				DMA_FROM_DEVICE);
6637051924fSEli Billauer 
6647051924fSEli Billauer 	if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
6657051924fSEli Billauer 		dev_err(endpoint->dev,
6667051924fSEli Billauer 			"IDT length mismatch (%d != %d). Aborting.\n",
6677051924fSEli Billauer 			channel->wr_buffers[0]->end_offset, endpoint->idtlen);
6687051924fSEli Billauer 		return -ENODEV;
6697051924fSEli Billauer 	}
6707051924fSEli Billauer 
6717051924fSEli Billauer 	if (crc32_le(~0, channel->wr_buffers[0]->addr,
6727051924fSEli Billauer 		     endpoint->idtlen+1) != 0) {
6737051924fSEli Billauer 		dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
6747051924fSEli Billauer 		return -ENODEV;
6757051924fSEli Billauer 	}
6767051924fSEli Billauer 
6777051924fSEli Billauer 	version = channel->wr_buffers[0]->addr;
6787051924fSEli Billauer 
679d61f3088SEli Billauer 	/* Check version number. Reject anything above 0x82. */
6807051924fSEli Billauer 	if (*version > 0x82) {
6817051924fSEli Billauer 		dev_err(endpoint->dev,
682d61f3088SEli Billauer 			"No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
6837051924fSEli Billauer 			*version);
6847051924fSEli Billauer 		return -ENODEV;
6857051924fSEli Billauer 	}
6867051924fSEli Billauer 
6877051924fSEli Billauer 	return 0;
6887051924fSEli Billauer }
6897051924fSEli Billauer 
xillybus_read(struct file * filp,char __user * userbuf,size_t count,loff_t * f_pos)6907051924fSEli Billauer static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
6917051924fSEli Billauer 			     size_t count, loff_t *f_pos)
6927051924fSEli Billauer {
6937051924fSEli Billauer 	ssize_t rc;
6947051924fSEli Billauer 	unsigned long flags;
6957051924fSEli Billauer 	int bytes_done = 0;
6967051924fSEli Billauer 	int no_time_left = 0;
6977051924fSEli Billauer 	long deadline, left_to_sleep;
6987051924fSEli Billauer 	struct xilly_channel *channel = filp->private_data;
6997051924fSEli Billauer 
7007051924fSEli Billauer 	int empty, reached_eof, exhausted, ready;
7017051924fSEli Billauer 	/* Initializations are there only to silence warnings */
7027051924fSEli Billauer 
7037051924fSEli Billauer 	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
7047051924fSEli Billauer 	int waiting_bufidx;
7057051924fSEli Billauer 
7067051924fSEli Billauer 	if (channel->endpoint->fatal_error)
7077051924fSEli Billauer 		return -EIO;
7087051924fSEli Billauer 
7097051924fSEli Billauer 	deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
7107051924fSEli Billauer 
7117051924fSEli Billauer 	rc = mutex_lock_interruptible(&channel->wr_mutex);
7127051924fSEli Billauer 	if (rc)
7137051924fSEli Billauer 		return rc;
7147051924fSEli Billauer 
7157051924fSEli Billauer 	while (1) { /* Note that we may drop mutex within this loop */
7167051924fSEli Billauer 		int bytes_to_do = count - bytes_done;
7177051924fSEli Billauer 
7187051924fSEli Billauer 		spin_lock_irqsave(&channel->wr_spinlock, flags);
7197051924fSEli Billauer 
7207051924fSEli Billauer 		empty = channel->wr_empty;
7217051924fSEli Billauer 		ready = !empty || channel->wr_ready;
7227051924fSEli Billauer 
7237051924fSEli Billauer 		if (!empty) {
7247051924fSEli Billauer 			bufidx = channel->wr_host_buf_idx;
7257051924fSEli Billauer 			bufpos = channel->wr_host_buf_pos;
7267051924fSEli Billauer 			howmany = ((channel->wr_buffers[bufidx]->end_offset
7277051924fSEli Billauer 				    + 1) << channel->log2_element_size)
7287051924fSEli Billauer 				- bufpos;
7297051924fSEli Billauer 
7307051924fSEli Billauer 			/* Update wr_host_* to its post-operation state */
7317051924fSEli Billauer 			if (howmany > bytes_to_do) {
7327051924fSEli Billauer 				bufferdone = 0;
7337051924fSEli Billauer 
7347051924fSEli Billauer 				howmany = bytes_to_do;
7357051924fSEli Billauer 				channel->wr_host_buf_pos += howmany;
7367051924fSEli Billauer 			} else {
7377051924fSEli Billauer 				bufferdone = 1;
7387051924fSEli Billauer 
7397051924fSEli Billauer 				channel->wr_host_buf_pos = 0;
7407051924fSEli Billauer 
7417051924fSEli Billauer 				if (bufidx == channel->wr_fpga_buf_idx) {
7427051924fSEli Billauer 					channel->wr_empty = 1;
7437051924fSEli Billauer 					channel->wr_sleepy = 1;
7447051924fSEli Billauer 					channel->wr_ready = 0;
7457051924fSEli Billauer 				}
7467051924fSEli Billauer 
7477051924fSEli Billauer 				if (bufidx >= (channel->num_wr_buffers - 1))
7487051924fSEli Billauer 					channel->wr_host_buf_idx = 0;
7497051924fSEli Billauer 				else
7507051924fSEli Billauer 					channel->wr_host_buf_idx++;
7517051924fSEli Billauer 			}
7527051924fSEli Billauer 		}
7537051924fSEli Billauer 
7547051924fSEli Billauer 		/*
7557051924fSEli Billauer 		 * Marking our situation after the possible changes above,
7567051924fSEli Billauer 		 * for use after releasing the spinlock.
7577051924fSEli Billauer 		 *
7587051924fSEli Billauer 		 * empty = empty before change
7597051924fSEli Billauer 		 * exhasted = empty after possible change
7607051924fSEli Billauer 		 */
7617051924fSEli Billauer 
7627051924fSEli Billauer 		reached_eof = channel->wr_empty &&
7637051924fSEli Billauer 			(channel->wr_host_buf_idx == channel->wr_eof);
7647051924fSEli Billauer 		channel->wr_hangup = reached_eof;
7657051924fSEli Billauer 		exhausted = channel->wr_empty;
7667051924fSEli Billauer 		waiting_bufidx = channel->wr_host_buf_idx;
7677051924fSEli Billauer 
7687051924fSEli Billauer 		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
7697051924fSEli Billauer 
7707051924fSEli Billauer 		if (!empty) { /* Go on, now without the spinlock */
7717051924fSEli Billauer 
7727051924fSEli Billauer 			if (bufpos == 0) /* Position zero means it's virgin */
773*c31bbc14SEli Billauer 				dma_sync_single_for_cpu(channel->endpoint->dev,
7747051924fSEli Billauer 							channel->wr_buffers[bufidx]->dma_addr,
7757051924fSEli Billauer 							channel->wr_buf_size,
7767051924fSEli Billauer 							DMA_FROM_DEVICE);
7777051924fSEli Billauer 
7787051924fSEli Billauer 			if (copy_to_user(
7797051924fSEli Billauer 				    userbuf,
7807051924fSEli Billauer 				    channel->wr_buffers[bufidx]->addr
7817051924fSEli Billauer 				    + bufpos, howmany))
7827051924fSEli Billauer 				rc = -EFAULT;
7837051924fSEli Billauer 
7847051924fSEli Billauer 			userbuf += howmany;
7857051924fSEli Billauer 			bytes_done += howmany;
7867051924fSEli Billauer 
7877051924fSEli Billauer 			if (bufferdone) {
788*c31bbc14SEli Billauer 				dma_sync_single_for_device(channel->endpoint->dev,
7897051924fSEli Billauer 							   channel->wr_buffers[bufidx]->dma_addr,
7907051924fSEli Billauer 							   channel->wr_buf_size,
7917051924fSEli Billauer 							   DMA_FROM_DEVICE);
7927051924fSEli Billauer 
7937051924fSEli Billauer 				/*
7947051924fSEli Billauer 				 * Tell FPGA the buffer is done with. It's an
7957051924fSEli Billauer 				 * atomic operation to the FPGA, so what
7967051924fSEli Billauer 				 * happens with other channels doesn't matter,
7977051924fSEli Billauer 				 * and the certain channel is protected with
7987051924fSEli Billauer 				 * the channel-specific mutex.
7997051924fSEli Billauer 				 */
8007051924fSEli Billauer 
8017051924fSEli Billauer 				iowrite32(1 | (channel->chan_num << 1) |
8027051924fSEli Billauer 					  (bufidx << 12),
8037051924fSEli Billauer 					  channel->endpoint->registers +
8047051924fSEli Billauer 					  fpga_buf_ctrl_reg);
8057051924fSEli Billauer 			}
8067051924fSEli Billauer 
8077051924fSEli Billauer 			if (rc) {
8087051924fSEli Billauer 				mutex_unlock(&channel->wr_mutex);
8097051924fSEli Billauer 				return rc;
8107051924fSEli Billauer 			}
8117051924fSEli Billauer 		}
8127051924fSEli Billauer 
8137051924fSEli Billauer 		/* This includes a zero-count return = EOF */
8147051924fSEli Billauer 		if ((bytes_done >= count) || reached_eof)
8157051924fSEli Billauer 			break;
8167051924fSEli Billauer 
8177051924fSEli Billauer 		if (!exhausted)
8187051924fSEli Billauer 			continue; /* More in RAM buffer(s)? Just go on. */
8197051924fSEli Billauer 
8207051924fSEli Billauer 		if ((bytes_done > 0) &&
8217051924fSEli Billauer 		    (no_time_left ||
8227051924fSEli Billauer 		     (channel->wr_synchronous && channel->wr_allow_partial)))
8237051924fSEli Billauer 			break;
8247051924fSEli Billauer 
8257051924fSEli Billauer 		/*
8267051924fSEli Billauer 		 * Nonblocking read: The "ready" flag tells us that the FPGA
8277051924fSEli Billauer 		 * has data to send. In non-blocking mode, if it isn't on,
8287051924fSEli Billauer 		 * just return. But if there is, we jump directly to the point
8297051924fSEli Billauer 		 * where we ask for the FPGA to send all it has, and wait
8307051924fSEli Billauer 		 * until that data arrives. So in a sense, we *do* block in
8317051924fSEli Billauer 		 * nonblocking mode, but only for a very short time.
8327051924fSEli Billauer 		 */
8337051924fSEli Billauer 
8347051924fSEli Billauer 		if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
8357051924fSEli Billauer 			if (bytes_done > 0)
8367051924fSEli Billauer 				break;
8377051924fSEli Billauer 
8387051924fSEli Billauer 			if (ready)
8397051924fSEli Billauer 				goto desperate;
8407051924fSEli Billauer 
8417051924fSEli Billauer 			rc = -EAGAIN;
8427051924fSEli Billauer 			break;
8437051924fSEli Billauer 		}
8447051924fSEli Billauer 
8457051924fSEli Billauer 		if (!no_time_left || (bytes_done > 0)) {
8467051924fSEli Billauer 			/*
8477051924fSEli Billauer 			 * Note that in case of an element-misaligned read
8487051924fSEli Billauer 			 * request, offsetlimit will include the last element,
8497051924fSEli Billauer 			 * which will be partially read from.
8507051924fSEli Billauer 			 */
8517051924fSEli Billauer 			int offsetlimit = ((count - bytes_done) - 1) >>
8527051924fSEli Billauer 				channel->log2_element_size;
8537051924fSEli Billauer 			int buf_elements = channel->wr_buf_size >>
8547051924fSEli Billauer 				channel->log2_element_size;
8557051924fSEli Billauer 
8567051924fSEli Billauer 			/*
8577051924fSEli Billauer 			 * In synchronous mode, always send an offset limit.
8587051924fSEli Billauer 			 * Just don't send a value too big.
8597051924fSEli Billauer 			 */
8607051924fSEli Billauer 
8617051924fSEli Billauer 			if (channel->wr_synchronous) {
8627051924fSEli Billauer 				/* Don't request more than one buffer */
8637051924fSEli Billauer 				if (channel->wr_allow_partial &&
8647051924fSEli Billauer 				    (offsetlimit >= buf_elements))
8657051924fSEli Billauer 					offsetlimit = buf_elements - 1;
8667051924fSEli Billauer 
8677051924fSEli Billauer 				/* Don't request more than all buffers */
8687051924fSEli Billauer 				if (!channel->wr_allow_partial &&
8697051924fSEli Billauer 				    (offsetlimit >=
8707051924fSEli Billauer 				     (buf_elements * channel->num_wr_buffers)))
8717051924fSEli Billauer 					offsetlimit = buf_elements *
8727051924fSEli Billauer 						channel->num_wr_buffers - 1;
8737051924fSEli Billauer 			}
8747051924fSEli Billauer 
8757051924fSEli Billauer 			/*
8767051924fSEli Billauer 			 * In asynchronous mode, force early flush of a buffer
8777051924fSEli Billauer 			 * only if that will allow returning a full count. The
8787051924fSEli Billauer 			 * "offsetlimit < ( ... )" rather than "<=" excludes
8797051924fSEli Billauer 			 * requesting a full buffer, which would obviously
8807051924fSEli Billauer 			 * cause a buffer transmission anyhow
8817051924fSEli Billauer 			 */
8827051924fSEli Billauer 
8837051924fSEli Billauer 			if (channel->wr_synchronous ||
8847051924fSEli Billauer 			    (offsetlimit < (buf_elements - 1))) {
8857051924fSEli Billauer 				mutex_lock(&channel->endpoint->register_mutex);
8867051924fSEli Billauer 
8877051924fSEli Billauer 				iowrite32(offsetlimit,
8887051924fSEli Billauer 					  channel->endpoint->registers +
8897051924fSEli Billauer 					  fpga_buf_offset_reg);
8907051924fSEli Billauer 
8917051924fSEli Billauer 				iowrite32(1 | (channel->chan_num << 1) |
8927051924fSEli Billauer 					  (2 << 24) |  /* 2 = offset limit */
8937051924fSEli Billauer 					  (waiting_bufidx << 12),
8947051924fSEli Billauer 					  channel->endpoint->registers +
8957051924fSEli Billauer 					  fpga_buf_ctrl_reg);
8967051924fSEli Billauer 
8977051924fSEli Billauer 				mutex_unlock(&channel->endpoint->
8987051924fSEli Billauer 					     register_mutex);
8997051924fSEli Billauer 			}
9007051924fSEli Billauer 		}
9017051924fSEli Billauer 
9027051924fSEli Billauer 		/*
9037051924fSEli Billauer 		 * If partial completion is disallowed, there is no point in
9047051924fSEli Billauer 		 * timeout sleeping. Neither if no_time_left is set and
9057051924fSEli Billauer 		 * there's no data.
9067051924fSEli Billauer 		 */
9077051924fSEli Billauer 
9087051924fSEli Billauer 		if (!channel->wr_allow_partial ||
9097051924fSEli Billauer 		    (no_time_left && (bytes_done == 0))) {
9107051924fSEli Billauer 			/*
9117051924fSEli Billauer 			 * This do-loop will run more than once if another
9127051924fSEli Billauer 			 * thread reasserted wr_sleepy before we got the mutex
9137051924fSEli Billauer 			 * back, so we try again.
9147051924fSEli Billauer 			 */
9157051924fSEli Billauer 
9167051924fSEli Billauer 			do {
9177051924fSEli Billauer 				mutex_unlock(&channel->wr_mutex);
9187051924fSEli Billauer 
9197051924fSEli Billauer 				if (wait_event_interruptible(
9207051924fSEli Billauer 					    channel->wr_wait,
9217051924fSEli Billauer 					    (!channel->wr_sleepy)))
9227051924fSEli Billauer 					goto interrupted;
9237051924fSEli Billauer 
9247051924fSEli Billauer 				if (mutex_lock_interruptible(
9257051924fSEli Billauer 					    &channel->wr_mutex))
9267051924fSEli Billauer 					goto interrupted;
9277051924fSEli Billauer 			} while (channel->wr_sleepy);
9287051924fSEli Billauer 
9297051924fSEli Billauer 			continue;
9307051924fSEli Billauer 
9317051924fSEli Billauer interrupted: /* Mutex is not held if got here */
9327051924fSEli Billauer 			if (channel->endpoint->fatal_error)
9337051924fSEli Billauer 				return -EIO;
9347051924fSEli Billauer 			if (bytes_done)
9357051924fSEli Billauer 				return bytes_done;
9367051924fSEli Billauer 			if (filp->f_flags & O_NONBLOCK)
9377051924fSEli Billauer 				return -EAGAIN; /* Don't admit snoozing */
9387051924fSEli Billauer 			return -EINTR;
9397051924fSEli Billauer 		}
9407051924fSEli Billauer 
9417051924fSEli Billauer 		left_to_sleep = deadline - ((long) jiffies);
9427051924fSEli Billauer 
9437051924fSEli Billauer 		/*
9447051924fSEli Billauer 		 * If our time is out, skip the waiting. We may miss wr_sleepy
9457051924fSEli Billauer 		 * being deasserted but hey, almost missing the train is like
9467051924fSEli Billauer 		 * missing it.
9477051924fSEli Billauer 		 */
9487051924fSEli Billauer 
9497051924fSEli Billauer 		if (left_to_sleep > 0) {
9507051924fSEli Billauer 			left_to_sleep =
9517051924fSEli Billauer 				wait_event_interruptible_timeout(
9527051924fSEli Billauer 					channel->wr_wait,
9537051924fSEli Billauer 					(!channel->wr_sleepy),
9547051924fSEli Billauer 					left_to_sleep);
9557051924fSEli Billauer 
9567051924fSEli Billauer 			if (left_to_sleep > 0) /* wr_sleepy deasserted */
9577051924fSEli Billauer 				continue;
9587051924fSEli Billauer 
9597051924fSEli Billauer 			if (left_to_sleep < 0) { /* Interrupt */
9607051924fSEli Billauer 				mutex_unlock(&channel->wr_mutex);
9617051924fSEli Billauer 				if (channel->endpoint->fatal_error)
9627051924fSEli Billauer 					return -EIO;
9637051924fSEli Billauer 				if (bytes_done)
9647051924fSEli Billauer 					return bytes_done;
9657051924fSEli Billauer 				return -EINTR;
9667051924fSEli Billauer 			}
9677051924fSEli Billauer 		}
9687051924fSEli Billauer 
9697051924fSEli Billauer desperate:
9707051924fSEli Billauer 		no_time_left = 1; /* We're out of sleeping time. Desperate! */
9717051924fSEli Billauer 
9727051924fSEli Billauer 		if (bytes_done == 0) {
9737051924fSEli Billauer 			/*
9747051924fSEli Billauer 			 * Reaching here means that we allow partial return,
9757051924fSEli Billauer 			 * that we've run out of time, and that we have
9767051924fSEli Billauer 			 * nothing to return.
9777051924fSEli Billauer 			 * So tell the FPGA to send anything it has or gets.
9787051924fSEli Billauer 			 */
9797051924fSEli Billauer 
9807051924fSEli Billauer 			iowrite32(1 | (channel->chan_num << 1) |
9817051924fSEli Billauer 				  (3 << 24) |  /* Opcode 3, flush it all! */
9827051924fSEli Billauer 				  (waiting_bufidx << 12),
9837051924fSEli Billauer 				  channel->endpoint->registers +
9847051924fSEli Billauer 				  fpga_buf_ctrl_reg);
9857051924fSEli Billauer 		}
9867051924fSEli Billauer 
9877051924fSEli Billauer 		/*
9887051924fSEli Billauer 		 * Reaching here means that we *do* have data in the buffer,
9897051924fSEli Billauer 		 * but the "partial" flag disallows returning less than
9907051924fSEli Billauer 		 * required. And we don't have as much. So loop again,
9917051924fSEli Billauer 		 * which is likely to end up blocking indefinitely until
9927051924fSEli Billauer 		 * enough data has arrived.
9937051924fSEli Billauer 		 */
9947051924fSEli Billauer 	}
9957051924fSEli Billauer 
9967051924fSEli Billauer 	mutex_unlock(&channel->wr_mutex);
9977051924fSEli Billauer 
9987051924fSEli Billauer 	if (channel->endpoint->fatal_error)
9997051924fSEli Billauer 		return -EIO;
10007051924fSEli Billauer 
10017051924fSEli Billauer 	if (rc)
10027051924fSEli Billauer 		return rc;
10037051924fSEli Billauer 
10047051924fSEli Billauer 	return bytes_done;
10057051924fSEli Billauer }
10067051924fSEli Billauer 
10077051924fSEli Billauer /*
10087051924fSEli Billauer  * The timeout argument takes values as follows:
10097051924fSEli Billauer  *  >0 : Flush with timeout
10107051924fSEli Billauer  * ==0 : Flush, and wait idefinitely for the flush to complete
10117051924fSEli Billauer  *  <0 : Autoflush: Flush only if there's a single buffer occupied
10127051924fSEli Billauer  */
10137051924fSEli Billauer 
xillybus_myflush(struct xilly_channel * channel,long timeout)10147051924fSEli Billauer static int xillybus_myflush(struct xilly_channel *channel, long timeout)
10157051924fSEli Billauer {
10167051924fSEli Billauer 	int rc;
10177051924fSEli Billauer 	unsigned long flags;
10187051924fSEli Billauer 
10197051924fSEli Billauer 	int end_offset_plus1;
10207051924fSEli Billauer 	int bufidx, bufidx_minus1;
10217051924fSEli Billauer 	int i;
10227051924fSEli Billauer 	int empty;
10237051924fSEli Billauer 	int new_rd_host_buf_pos;
10247051924fSEli Billauer 
10257051924fSEli Billauer 	if (channel->endpoint->fatal_error)
10267051924fSEli Billauer 		return -EIO;
10277051924fSEli Billauer 	rc = mutex_lock_interruptible(&channel->rd_mutex);
10287051924fSEli Billauer 	if (rc)
10297051924fSEli Billauer 		return rc;
10307051924fSEli Billauer 
10317051924fSEli Billauer 	/*
10327051924fSEli Billauer 	 * Don't flush a closed channel. This can happen when the work queued
10337051924fSEli Billauer 	 * autoflush thread fires off after the file has closed. This is not
10347051924fSEli Billauer 	 * an error, just something to dismiss.
10357051924fSEli Billauer 	 */
10367051924fSEli Billauer 
10377051924fSEli Billauer 	if (!channel->rd_ref_count)
10387051924fSEli Billauer 		goto done;
10397051924fSEli Billauer 
10407051924fSEli Billauer 	bufidx = channel->rd_host_buf_idx;
10417051924fSEli Billauer 
10427051924fSEli Billauer 	bufidx_minus1 = (bufidx == 0) ?
10437051924fSEli Billauer 		channel->num_rd_buffers - 1 :
10447051924fSEli Billauer 		bufidx - 1;
10457051924fSEli Billauer 
10467051924fSEli Billauer 	end_offset_plus1 = channel->rd_host_buf_pos >>
10477051924fSEli Billauer 		channel->log2_element_size;
10487051924fSEli Billauer 
10497051924fSEli Billauer 	new_rd_host_buf_pos = channel->rd_host_buf_pos -
10507051924fSEli Billauer 		(end_offset_plus1 << channel->log2_element_size);
10517051924fSEli Billauer 
10527051924fSEli Billauer 	/* Submit the current buffer if it's nonempty */
10537051924fSEli Billauer 	if (end_offset_plus1) {
10547051924fSEli Billauer 		unsigned char *tail = channel->rd_buffers[bufidx]->addr +
10557051924fSEli Billauer 			(end_offset_plus1 << channel->log2_element_size);
10567051924fSEli Billauer 
10577051924fSEli Billauer 		/* Copy  unflushed data, so we can put it in next buffer */
10587051924fSEli Billauer 		for (i = 0; i < new_rd_host_buf_pos; i++)
10597051924fSEli Billauer 			channel->rd_leftovers[i] = *tail++;
10607051924fSEli Billauer 
10617051924fSEli Billauer 		spin_lock_irqsave(&channel->rd_spinlock, flags);
10627051924fSEli Billauer 
10637051924fSEli Billauer 		/* Autoflush only if a single buffer is occupied */
10647051924fSEli Billauer 
10657051924fSEli Billauer 		if ((timeout < 0) &&
10667051924fSEli Billauer 		    (channel->rd_full ||
10677051924fSEli Billauer 		     (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
10687051924fSEli Billauer 			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
10697051924fSEli Billauer 			/*
10707051924fSEli Billauer 			 * A new work item may be queued by the ISR exactly
10717051924fSEli Billauer 			 * now, since the execution of a work item allows the
10727051924fSEli Billauer 			 * queuing of a new one while it's running.
10737051924fSEli Billauer 			 */
10747051924fSEli Billauer 			goto done;
10757051924fSEli Billauer 		}
10767051924fSEli Billauer 
10777051924fSEli Billauer 		/* The 4th element is never needed for data, so it's a flag */
10787051924fSEli Billauer 		channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
10797051924fSEli Billauer 
10807051924fSEli Billauer 		/* Set up rd_full to reflect a certain moment's state */
10817051924fSEli Billauer 
10827051924fSEli Billauer 		if (bufidx == channel->rd_fpga_buf_idx)
10837051924fSEli Billauer 			channel->rd_full = 1;
10847051924fSEli Billauer 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
10857051924fSEli Billauer 
10867051924fSEli Billauer 		if (bufidx >= (channel->num_rd_buffers - 1))
10877051924fSEli Billauer 			channel->rd_host_buf_idx = 0;
10887051924fSEli Billauer 		else
10897051924fSEli Billauer 			channel->rd_host_buf_idx++;
10907051924fSEli Billauer 
1091*c31bbc14SEli Billauer 		dma_sync_single_for_device(channel->endpoint->dev,
10927051924fSEli Billauer 					   channel->rd_buffers[bufidx]->dma_addr,
10937051924fSEli Billauer 					   channel->rd_buf_size,
10947051924fSEli Billauer 					   DMA_TO_DEVICE);
10957051924fSEli Billauer 
10967051924fSEli Billauer 		mutex_lock(&channel->endpoint->register_mutex);
10977051924fSEli Billauer 
10987051924fSEli Billauer 		iowrite32(end_offset_plus1 - 1,
10997051924fSEli Billauer 			  channel->endpoint->registers + fpga_buf_offset_reg);
11007051924fSEli Billauer 
11017051924fSEli Billauer 		iowrite32((channel->chan_num << 1) | /* Channel ID */
11027051924fSEli Billauer 			  (2 << 24) |  /* Opcode 2, submit buffer */
11037051924fSEli Billauer 			  (bufidx << 12),
11047051924fSEli Billauer 			  channel->endpoint->registers + fpga_buf_ctrl_reg);
11057051924fSEli Billauer 
11067051924fSEli Billauer 		mutex_unlock(&channel->endpoint->register_mutex);
11077051924fSEli Billauer 	} else if (bufidx == 0) {
11087051924fSEli Billauer 		bufidx = channel->num_rd_buffers - 1;
11097051924fSEli Billauer 	} else {
11107051924fSEli Billauer 		bufidx--;
11117051924fSEli Billauer 	}
11127051924fSEli Billauer 
11137051924fSEli Billauer 	channel->rd_host_buf_pos = new_rd_host_buf_pos;
11147051924fSEli Billauer 
11157051924fSEli Billauer 	if (timeout < 0)
11167051924fSEli Billauer 		goto done; /* Autoflush */
11177051924fSEli Billauer 
11187051924fSEli Billauer 	/*
11197051924fSEli Billauer 	 * bufidx is now the last buffer written to (or equal to
11207051924fSEli Billauer 	 * rd_fpga_buf_idx if buffer was never written to), and
11217051924fSEli Billauer 	 * channel->rd_host_buf_idx the one after it.
11227051924fSEli Billauer 	 *
11237051924fSEli Billauer 	 * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
11247051924fSEli Billauer 	 */
11257051924fSEli Billauer 
11267051924fSEli Billauer 	while (1) { /* Loop waiting for draining of buffers */
11277051924fSEli Billauer 		spin_lock_irqsave(&channel->rd_spinlock, flags);
11287051924fSEli Billauer 
11297051924fSEli Billauer 		if (bufidx != channel->rd_fpga_buf_idx)
11307051924fSEli Billauer 			channel->rd_full = 1; /*
11317051924fSEli Billauer 					       * Not really full,
11327051924fSEli Billauer 					       * but needs waiting.
11337051924fSEli Billauer 					       */
11347051924fSEli Billauer 
11357051924fSEli Billauer 		empty = !channel->rd_full;
11367051924fSEli Billauer 
11377051924fSEli Billauer 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
11387051924fSEli Billauer 
11397051924fSEli Billauer 		if (empty)
11407051924fSEli Billauer 			break;
11417051924fSEli Billauer 
11427051924fSEli Billauer 		/*
11437051924fSEli Billauer 		 * Indefinite sleep with mutex taken. With data waiting for
11447051924fSEli Billauer 		 * flushing user should not be surprised if open() for write
11457051924fSEli Billauer 		 * sleeps.
11467051924fSEli Billauer 		 */
11477051924fSEli Billauer 		if (timeout == 0)
11487051924fSEli Billauer 			wait_event_interruptible(channel->rd_wait,
11497051924fSEli Billauer 						 (!channel->rd_full));
11507051924fSEli Billauer 
11517051924fSEli Billauer 		else if (wait_event_interruptible_timeout(
11527051924fSEli Billauer 				 channel->rd_wait,
11537051924fSEli Billauer 				 (!channel->rd_full),
11547051924fSEli Billauer 				 timeout) == 0) {
11557051924fSEli Billauer 			dev_warn(channel->endpoint->dev,
11567051924fSEli Billauer 				 "Timed out while flushing. Output data may be lost.\n");
11577051924fSEli Billauer 
11587051924fSEli Billauer 			rc = -ETIMEDOUT;
11597051924fSEli Billauer 			break;
11607051924fSEli Billauer 		}
11617051924fSEli Billauer 
11627051924fSEli Billauer 		if (channel->rd_full) {
11637051924fSEli Billauer 			rc = -EINTR;
11647051924fSEli Billauer 			break;
11657051924fSEli Billauer 		}
11667051924fSEli Billauer 	}
11677051924fSEli Billauer 
11687051924fSEli Billauer done:
11697051924fSEli Billauer 	mutex_unlock(&channel->rd_mutex);
11707051924fSEli Billauer 
11717051924fSEli Billauer 	if (channel->endpoint->fatal_error)
11727051924fSEli Billauer 		return -EIO;
11737051924fSEli Billauer 
11747051924fSEli Billauer 	return rc;
11757051924fSEli Billauer }
11767051924fSEli Billauer 
xillybus_flush(struct file * filp,fl_owner_t id)11777051924fSEli Billauer static int xillybus_flush(struct file *filp, fl_owner_t id)
11787051924fSEli Billauer {
11797051924fSEli Billauer 	if (!(filp->f_mode & FMODE_WRITE))
11807051924fSEli Billauer 		return 0;
11817051924fSEli Billauer 
11827051924fSEli Billauer 	return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
11837051924fSEli Billauer }
11847051924fSEli Billauer 
xillybus_autoflush(struct work_struct * work)11857051924fSEli Billauer static void xillybus_autoflush(struct work_struct *work)
11867051924fSEli Billauer {
11877051924fSEli Billauer 	struct delayed_work *workitem = container_of(
11887051924fSEli Billauer 		work, struct delayed_work, work);
11897051924fSEli Billauer 	struct xilly_channel *channel = container_of(
11907051924fSEli Billauer 		workitem, struct xilly_channel, rd_workitem);
11917051924fSEli Billauer 	int rc;
11927051924fSEli Billauer 
11937051924fSEli Billauer 	rc = xillybus_myflush(channel, -1);
11947051924fSEli Billauer 	if (rc == -EINTR)
11957051924fSEli Billauer 		dev_warn(channel->endpoint->dev,
11967051924fSEli Billauer 			 "Autoflush failed because work queue thread got a signal.\n");
11977051924fSEli Billauer 	else if (rc)
11987051924fSEli Billauer 		dev_err(channel->endpoint->dev,
11997051924fSEli Billauer 			"Autoflush failed under weird circumstances.\n");
12007051924fSEli Billauer }
12017051924fSEli Billauer 
xillybus_write(struct file * filp,const char __user * userbuf,size_t count,loff_t * f_pos)12027051924fSEli Billauer static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
12037051924fSEli Billauer 			      size_t count, loff_t *f_pos)
12047051924fSEli Billauer {
12057051924fSEli Billauer 	ssize_t rc;
12067051924fSEli Billauer 	unsigned long flags;
12077051924fSEli Billauer 	int bytes_done = 0;
12087051924fSEli Billauer 	struct xilly_channel *channel = filp->private_data;
12097051924fSEli Billauer 
12107051924fSEli Billauer 	int full, exhausted;
12117051924fSEli Billauer 	/* Initializations are there only to silence warnings */
12127051924fSEli Billauer 
12137051924fSEli Billauer 	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
12147051924fSEli Billauer 	int end_offset_plus1 = 0;
12157051924fSEli Billauer 
12167051924fSEli Billauer 	if (channel->endpoint->fatal_error)
12177051924fSEli Billauer 		return -EIO;
12187051924fSEli Billauer 
12197051924fSEli Billauer 	rc = mutex_lock_interruptible(&channel->rd_mutex);
12207051924fSEli Billauer 	if (rc)
12217051924fSEli Billauer 		return rc;
12227051924fSEli Billauer 
12237051924fSEli Billauer 	while (1) {
12247051924fSEli Billauer 		int bytes_to_do = count - bytes_done;
12257051924fSEli Billauer 
12267051924fSEli Billauer 		spin_lock_irqsave(&channel->rd_spinlock, flags);
12277051924fSEli Billauer 
12287051924fSEli Billauer 		full = channel->rd_full;
12297051924fSEli Billauer 
12307051924fSEli Billauer 		if (!full) {
12317051924fSEli Billauer 			bufidx = channel->rd_host_buf_idx;
12327051924fSEli Billauer 			bufpos = channel->rd_host_buf_pos;
12337051924fSEli Billauer 			howmany = channel->rd_buf_size - bufpos;
12347051924fSEli Billauer 
12357051924fSEli Billauer 			/*
12367051924fSEli Billauer 			 * Update rd_host_* to its state after this operation.
12377051924fSEli Billauer 			 * count=0 means committing the buffer immediately,
12387051924fSEli Billauer 			 * which is like flushing, but not necessarily block.
12397051924fSEli Billauer 			 */
12407051924fSEli Billauer 
12417051924fSEli Billauer 			if ((howmany > bytes_to_do) &&
12427051924fSEli Billauer 			    (count ||
12437051924fSEli Billauer 			     ((bufpos >> channel->log2_element_size) == 0))) {
12447051924fSEli Billauer 				bufferdone = 0;
12457051924fSEli Billauer 
12467051924fSEli Billauer 				howmany = bytes_to_do;
12477051924fSEli Billauer 				channel->rd_host_buf_pos += howmany;
12487051924fSEli Billauer 			} else {
12497051924fSEli Billauer 				bufferdone = 1;
12507051924fSEli Billauer 
12517051924fSEli Billauer 				if (count) {
12527051924fSEli Billauer 					end_offset_plus1 =
12537051924fSEli Billauer 						channel->rd_buf_size >>
12547051924fSEli Billauer 						channel->log2_element_size;
12557051924fSEli Billauer 					channel->rd_host_buf_pos = 0;
12567051924fSEli Billauer 				} else {
12577051924fSEli Billauer 					unsigned char *tail;
12587051924fSEli Billauer 					int i;
12597051924fSEli Billauer 
1260127af882SEli Billauer 					howmany = 0;
1261127af882SEli Billauer 
12627051924fSEli Billauer 					end_offset_plus1 = bufpos >>
12637051924fSEli Billauer 						channel->log2_element_size;
12647051924fSEli Billauer 
12657051924fSEli Billauer 					channel->rd_host_buf_pos -=
12667051924fSEli Billauer 						end_offset_plus1 <<
12677051924fSEli Billauer 						channel->log2_element_size;
12687051924fSEli Billauer 
12697051924fSEli Billauer 					tail = channel->
12707051924fSEli Billauer 						rd_buffers[bufidx]->addr +
12717051924fSEli Billauer 						(end_offset_plus1 <<
12727051924fSEli Billauer 						 channel->log2_element_size);
12737051924fSEli Billauer 
12747051924fSEli Billauer 					for (i = 0;
12757051924fSEli Billauer 					     i < channel->rd_host_buf_pos;
12767051924fSEli Billauer 					     i++)
12777051924fSEli Billauer 						channel->rd_leftovers[i] =
12787051924fSEli Billauer 							*tail++;
12797051924fSEli Billauer 				}
12807051924fSEli Billauer 
12817051924fSEli Billauer 				if (bufidx == channel->rd_fpga_buf_idx)
12827051924fSEli Billauer 					channel->rd_full = 1;
12837051924fSEli Billauer 
12847051924fSEli Billauer 				if (bufidx >= (channel->num_rd_buffers - 1))
12857051924fSEli Billauer 					channel->rd_host_buf_idx = 0;
12867051924fSEli Billauer 				else
12877051924fSEli Billauer 					channel->rd_host_buf_idx++;
12887051924fSEli Billauer 			}
12897051924fSEli Billauer 		}
12907051924fSEli Billauer 
12917051924fSEli Billauer 		/*
12927051924fSEli Billauer 		 * Marking our situation after the possible changes above,
12937051924fSEli Billauer 		 * for use  after releasing the spinlock.
12947051924fSEli Billauer 		 *
12957051924fSEli Billauer 		 * full = full before change
12967051924fSEli Billauer 		 * exhasted = full after possible change
12977051924fSEli Billauer 		 */
12987051924fSEli Billauer 
12997051924fSEli Billauer 		exhausted = channel->rd_full;
13007051924fSEli Billauer 
13017051924fSEli Billauer 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
13027051924fSEli Billauer 
13037051924fSEli Billauer 		if (!full) { /* Go on, now without the spinlock */
13047051924fSEli Billauer 			unsigned char *head =
13057051924fSEli Billauer 				channel->rd_buffers[bufidx]->addr;
13067051924fSEli Billauer 			int i;
13077051924fSEli Billauer 
13087051924fSEli Billauer 			if ((bufpos == 0) || /* Zero means it's virgin */
13097051924fSEli Billauer 			    (channel->rd_leftovers[3] != 0)) {
1310*c31bbc14SEli Billauer 				dma_sync_single_for_cpu(channel->endpoint->dev,
13117051924fSEli Billauer 							channel->rd_buffers[bufidx]->dma_addr,
13127051924fSEli Billauer 							channel->rd_buf_size,
13137051924fSEli Billauer 							DMA_TO_DEVICE);
13147051924fSEli Billauer 
13157051924fSEli Billauer 				/* Virgin, but leftovers are due */
13167051924fSEli Billauer 				for (i = 0; i < bufpos; i++)
13177051924fSEli Billauer 					*head++ = channel->rd_leftovers[i];
13187051924fSEli Billauer 
13197051924fSEli Billauer 				channel->rd_leftovers[3] = 0; /* Clear flag */
13207051924fSEli Billauer 			}
13217051924fSEli Billauer 
13227051924fSEli Billauer 			if (copy_from_user(
13237051924fSEli Billauer 				    channel->rd_buffers[bufidx]->addr + bufpos,
13247051924fSEli Billauer 				    userbuf, howmany))
13257051924fSEli Billauer 				rc = -EFAULT;
13267051924fSEli Billauer 
13277051924fSEli Billauer 			userbuf += howmany;
13287051924fSEli Billauer 			bytes_done += howmany;
13297051924fSEli Billauer 
13307051924fSEli Billauer 			if (bufferdone) {
1331*c31bbc14SEli Billauer 				dma_sync_single_for_device(channel->endpoint->dev,
13327051924fSEli Billauer 							   channel->rd_buffers[bufidx]->dma_addr,
13337051924fSEli Billauer 							   channel->rd_buf_size,
13347051924fSEli Billauer 							   DMA_TO_DEVICE);
13357051924fSEli Billauer 
13367051924fSEli Billauer 				mutex_lock(&channel->endpoint->register_mutex);
13377051924fSEli Billauer 
13387051924fSEli Billauer 				iowrite32(end_offset_plus1 - 1,
13397051924fSEli Billauer 					  channel->endpoint->registers +
13407051924fSEli Billauer 					  fpga_buf_offset_reg);
13417051924fSEli Billauer 
13427051924fSEli Billauer 				iowrite32((channel->chan_num << 1) |
13437051924fSEli Billauer 					  (2 << 24) |  /* 2 = submit buffer */
13447051924fSEli Billauer 					  (bufidx << 12),
13457051924fSEli Billauer 					  channel->endpoint->registers +
13467051924fSEli Billauer 					  fpga_buf_ctrl_reg);
13477051924fSEli Billauer 
13487051924fSEli Billauer 				mutex_unlock(&channel->endpoint->
13497051924fSEli Billauer 					     register_mutex);
13507051924fSEli Billauer 
13517051924fSEli Billauer 				channel->rd_leftovers[3] =
13527051924fSEli Billauer 					(channel->rd_host_buf_pos != 0);
13537051924fSEli Billauer 			}
13547051924fSEli Billauer 
13557051924fSEli Billauer 			if (rc) {
13567051924fSEli Billauer 				mutex_unlock(&channel->rd_mutex);
13577051924fSEli Billauer 
13587051924fSEli Billauer 				if (channel->endpoint->fatal_error)
13597051924fSEli Billauer 					return -EIO;
13607051924fSEli Billauer 
13617051924fSEli Billauer 				if (!channel->rd_synchronous)
13627051924fSEli Billauer 					queue_delayed_work(
13637051924fSEli Billauer 						xillybus_wq,
13647051924fSEli Billauer 						&channel->rd_workitem,
13657051924fSEli Billauer 						XILLY_RX_TIMEOUT);
13667051924fSEli Billauer 
13677051924fSEli Billauer 				return rc;
13687051924fSEli Billauer 			}
13697051924fSEli Billauer 		}
13707051924fSEli Billauer 
13717051924fSEli Billauer 		if (bytes_done >= count)
13727051924fSEli Billauer 			break;
13737051924fSEli Billauer 
13747051924fSEli Billauer 		if (!exhausted)
13757051924fSEli Billauer 			continue; /* If there's more space, just go on */
13767051924fSEli Billauer 
13777051924fSEli Billauer 		if ((bytes_done > 0) && channel->rd_allow_partial)
13787051924fSEli Billauer 			break;
13797051924fSEli Billauer 
13807051924fSEli Billauer 		/*
13817051924fSEli Billauer 		 * Indefinite sleep with mutex taken. With data waiting for
13827051924fSEli Billauer 		 * flushing, user should not be surprised if open() for write
13837051924fSEli Billauer 		 * sleeps.
13847051924fSEli Billauer 		 */
13857051924fSEli Billauer 
13867051924fSEli Billauer 		if (filp->f_flags & O_NONBLOCK) {
13877051924fSEli Billauer 			rc = -EAGAIN;
13887051924fSEli Billauer 			break;
13897051924fSEli Billauer 		}
13907051924fSEli Billauer 
13917051924fSEli Billauer 		if (wait_event_interruptible(channel->rd_wait,
13927051924fSEli Billauer 					     (!channel->rd_full))) {
13937051924fSEli Billauer 			mutex_unlock(&channel->rd_mutex);
13947051924fSEli Billauer 
13957051924fSEli Billauer 			if (channel->endpoint->fatal_error)
13967051924fSEli Billauer 				return -EIO;
13977051924fSEli Billauer 
13987051924fSEli Billauer 			if (bytes_done)
13997051924fSEli Billauer 				return bytes_done;
14007051924fSEli Billauer 			return -EINTR;
14017051924fSEli Billauer 		}
14027051924fSEli Billauer 	}
14037051924fSEli Billauer 
14047051924fSEli Billauer 	mutex_unlock(&channel->rd_mutex);
14057051924fSEli Billauer 
14067051924fSEli Billauer 	if (!channel->rd_synchronous)
14077051924fSEli Billauer 		queue_delayed_work(xillybus_wq,
14087051924fSEli Billauer 				   &channel->rd_workitem,
14097051924fSEli Billauer 				   XILLY_RX_TIMEOUT);
14107051924fSEli Billauer 
14117051924fSEli Billauer 	if (channel->endpoint->fatal_error)
14127051924fSEli Billauer 		return -EIO;
14137051924fSEli Billauer 
14147051924fSEli Billauer 	if (rc)
14157051924fSEli Billauer 		return rc;
14167051924fSEli Billauer 
14177051924fSEli Billauer 	if ((channel->rd_synchronous) && (bytes_done > 0)) {
14187051924fSEli Billauer 		rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
14197051924fSEli Billauer 
14207051924fSEli Billauer 		if (rc && (rc != -EINTR))
14217051924fSEli Billauer 			return rc;
14227051924fSEli Billauer 	}
14237051924fSEli Billauer 
14247051924fSEli Billauer 	return bytes_done;
14257051924fSEli Billauer }
14267051924fSEli Billauer 
xillybus_open(struct inode * inode,struct file * filp)14277051924fSEli Billauer static int xillybus_open(struct inode *inode, struct file *filp)
14287051924fSEli Billauer {
14298cb5d216SEli Billauer 	int rc;
14307051924fSEli Billauer 	unsigned long flags;
14318cb5d216SEli Billauer 	struct xilly_endpoint *endpoint;
14327051924fSEli Billauer 	struct xilly_channel *channel;
14338cb5d216SEli Billauer 	int index;
14347051924fSEli Billauer 
14358cb5d216SEli Billauer 	rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
14368cb5d216SEli Billauer 	if (rc)
14378cb5d216SEli Billauer 		return rc;
14387051924fSEli Billauer 
14397051924fSEli Billauer 	if (endpoint->fatal_error)
14407051924fSEli Billauer 		return -EIO;
14417051924fSEli Billauer 
14428cb5d216SEli Billauer 	channel = endpoint->channels[1 + index];
14437051924fSEli Billauer 	filp->private_data = channel;
14447051924fSEli Billauer 
14457051924fSEli Billauer 	/*
14467051924fSEli Billauer 	 * It gets complicated because:
14477051924fSEli Billauer 	 * 1. We don't want to take a mutex we don't have to
14487051924fSEli Billauer 	 * 2. We don't want to open one direction if the other will fail.
14497051924fSEli Billauer 	 */
14507051924fSEli Billauer 
14517051924fSEli Billauer 	if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
14527051924fSEli Billauer 		return -ENODEV;
14537051924fSEli Billauer 
14547051924fSEli Billauer 	if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
14557051924fSEli Billauer 		return -ENODEV;
14567051924fSEli Billauer 
14577051924fSEli Billauer 	if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
14587051924fSEli Billauer 	    (channel->wr_synchronous || !channel->wr_allow_partial ||
14597051924fSEli Billauer 	     !channel->wr_supports_nonempty)) {
14607051924fSEli Billauer 		dev_err(endpoint->dev,
14617051924fSEli Billauer 			"open() failed: O_NONBLOCK not allowed for read on this device\n");
14627051924fSEli Billauer 		return -ENODEV;
14637051924fSEli Billauer 	}
14647051924fSEli Billauer 
14657051924fSEli Billauer 	if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
14667051924fSEli Billauer 	    (channel->rd_synchronous || !channel->rd_allow_partial)) {
14677051924fSEli Billauer 		dev_err(endpoint->dev,
14687051924fSEli Billauer 			"open() failed: O_NONBLOCK not allowed for write on this device\n");
14697051924fSEli Billauer 		return -ENODEV;
14707051924fSEli Billauer 	}
14717051924fSEli Billauer 
14727051924fSEli Billauer 	/*
14737051924fSEli Billauer 	 * Note: open() may block on getting mutexes despite O_NONBLOCK.
14747051924fSEli Billauer 	 * This shouldn't occur normally, since multiple open of the same
14757051924fSEli Billauer 	 * file descriptor is almost always prohibited anyhow
14767051924fSEli Billauer 	 * (*_exclusive_open is normally set in real-life systems).
14777051924fSEli Billauer 	 */
14787051924fSEli Billauer 
14797051924fSEli Billauer 	if (filp->f_mode & FMODE_READ) {
14807051924fSEli Billauer 		rc = mutex_lock_interruptible(&channel->wr_mutex);
14817051924fSEli Billauer 		if (rc)
14827051924fSEli Billauer 			return rc;
14837051924fSEli Billauer 	}
14847051924fSEli Billauer 
14857051924fSEli Billauer 	if (filp->f_mode & FMODE_WRITE) {
14867051924fSEli Billauer 		rc = mutex_lock_interruptible(&channel->rd_mutex);
14877051924fSEli Billauer 		if (rc)
14887051924fSEli Billauer 			goto unlock_wr;
14897051924fSEli Billauer 	}
14907051924fSEli Billauer 
14917051924fSEli Billauer 	if ((filp->f_mode & FMODE_READ) &&
14927051924fSEli Billauer 	    (channel->wr_ref_count != 0) &&
14937051924fSEli Billauer 	    (channel->wr_exclusive_open)) {
14947051924fSEli Billauer 		rc = -EBUSY;
14957051924fSEli Billauer 		goto unlock;
14967051924fSEli Billauer 	}
14977051924fSEli Billauer 
14987051924fSEli Billauer 	if ((filp->f_mode & FMODE_WRITE) &&
14997051924fSEli Billauer 	    (channel->rd_ref_count != 0) &&
15007051924fSEli Billauer 	    (channel->rd_exclusive_open)) {
15017051924fSEli Billauer 		rc = -EBUSY;
15027051924fSEli Billauer 		goto unlock;
15037051924fSEli Billauer 	}
15047051924fSEli Billauer 
15057051924fSEli Billauer 	if (filp->f_mode & FMODE_READ) {
15067051924fSEli Billauer 		if (channel->wr_ref_count == 0) { /* First open of file */
15077051924fSEli Billauer 			/* Move the host to first buffer */
15087051924fSEli Billauer 			spin_lock_irqsave(&channel->wr_spinlock, flags);
15097051924fSEli Billauer 			channel->wr_host_buf_idx = 0;
15107051924fSEli Billauer 			channel->wr_host_buf_pos = 0;
15117051924fSEli Billauer 			channel->wr_fpga_buf_idx = -1;
15127051924fSEli Billauer 			channel->wr_empty = 1;
15137051924fSEli Billauer 			channel->wr_ready = 0;
15147051924fSEli Billauer 			channel->wr_sleepy = 1;
15157051924fSEli Billauer 			channel->wr_eof = -1;
15167051924fSEli Billauer 			channel->wr_hangup = 0;
15177051924fSEli Billauer 
15187051924fSEli Billauer 			spin_unlock_irqrestore(&channel->wr_spinlock, flags);
15197051924fSEli Billauer 
15207051924fSEli Billauer 			iowrite32(1 | (channel->chan_num << 1) |
15217051924fSEli Billauer 				  (4 << 24) |  /* Opcode 4, open channel */
15227051924fSEli Billauer 				  ((channel->wr_synchronous & 1) << 23),
15237051924fSEli Billauer 				  channel->endpoint->registers +
15247051924fSEli Billauer 				  fpga_buf_ctrl_reg);
15257051924fSEli Billauer 		}
15267051924fSEli Billauer 
15277051924fSEli Billauer 		channel->wr_ref_count++;
15287051924fSEli Billauer 	}
15297051924fSEli Billauer 
15307051924fSEli Billauer 	if (filp->f_mode & FMODE_WRITE) {
15317051924fSEli Billauer 		if (channel->rd_ref_count == 0) { /* First open of file */
15327051924fSEli Billauer 			/* Move the host to first buffer */
15337051924fSEli Billauer 			spin_lock_irqsave(&channel->rd_spinlock, flags);
15347051924fSEli Billauer 			channel->rd_host_buf_idx = 0;
15357051924fSEli Billauer 			channel->rd_host_buf_pos = 0;
15367051924fSEli Billauer 			channel->rd_leftovers[3] = 0; /* No leftovers. */
15377051924fSEli Billauer 			channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
15387051924fSEli Billauer 			channel->rd_full = 0;
15397051924fSEli Billauer 
15407051924fSEli Billauer 			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
15417051924fSEli Billauer 
15427051924fSEli Billauer 			iowrite32((channel->chan_num << 1) |
15437051924fSEli Billauer 				  (4 << 24),   /* Opcode 4, open channel */
15447051924fSEli Billauer 				  channel->endpoint->registers +
15457051924fSEli Billauer 				  fpga_buf_ctrl_reg);
15467051924fSEli Billauer 		}
15477051924fSEli Billauer 
15487051924fSEli Billauer 		channel->rd_ref_count++;
15497051924fSEli Billauer 	}
15507051924fSEli Billauer 
15517051924fSEli Billauer unlock:
15527051924fSEli Billauer 	if (filp->f_mode & FMODE_WRITE)
15537051924fSEli Billauer 		mutex_unlock(&channel->rd_mutex);
15547051924fSEli Billauer unlock_wr:
15557051924fSEli Billauer 	if (filp->f_mode & FMODE_READ)
15567051924fSEli Billauer 		mutex_unlock(&channel->wr_mutex);
15577051924fSEli Billauer 
15587051924fSEli Billauer 	if (!rc && (!channel->seekable))
15597051924fSEli Billauer 		return nonseekable_open(inode, filp);
15607051924fSEli Billauer 
15617051924fSEli Billauer 	return rc;
15627051924fSEli Billauer }
15637051924fSEli Billauer 
xillybus_release(struct inode * inode,struct file * filp)15647051924fSEli Billauer static int xillybus_release(struct inode *inode, struct file *filp)
15657051924fSEli Billauer {
15667051924fSEli Billauer 	unsigned long flags;
15677051924fSEli Billauer 	struct xilly_channel *channel = filp->private_data;
15687051924fSEli Billauer 
15697051924fSEli Billauer 	int buf_idx;
15707051924fSEli Billauer 	int eof;
15717051924fSEli Billauer 
15727051924fSEli Billauer 	if (channel->endpoint->fatal_error)
15737051924fSEli Billauer 		return -EIO;
15747051924fSEli Billauer 
15757051924fSEli Billauer 	if (filp->f_mode & FMODE_WRITE) {
15767051924fSEli Billauer 		mutex_lock(&channel->rd_mutex);
15777051924fSEli Billauer 
15787051924fSEli Billauer 		channel->rd_ref_count--;
15797051924fSEli Billauer 
15807051924fSEli Billauer 		if (channel->rd_ref_count == 0) {
15817051924fSEli Billauer 			/*
15827051924fSEli Billauer 			 * We rely on the kernel calling flush()
15837051924fSEli Billauer 			 * before we get here.
15847051924fSEli Billauer 			 */
15857051924fSEli Billauer 
15867051924fSEli Billauer 			iowrite32((channel->chan_num << 1) | /* Channel ID */
15877051924fSEli Billauer 				  (5 << 24),  /* Opcode 5, close channel */
15887051924fSEli Billauer 				  channel->endpoint->registers +
15897051924fSEli Billauer 				  fpga_buf_ctrl_reg);
15907051924fSEli Billauer 		}
15917051924fSEli Billauer 		mutex_unlock(&channel->rd_mutex);
15927051924fSEli Billauer 	}
15937051924fSEli Billauer 
15947051924fSEli Billauer 	if (filp->f_mode & FMODE_READ) {
15957051924fSEli Billauer 		mutex_lock(&channel->wr_mutex);
15967051924fSEli Billauer 
15977051924fSEli Billauer 		channel->wr_ref_count--;
15987051924fSEli Billauer 
15997051924fSEli Billauer 		if (channel->wr_ref_count == 0) {
16007051924fSEli Billauer 			iowrite32(1 | (channel->chan_num << 1) |
16017051924fSEli Billauer 				  (5 << 24),  /* Opcode 5, close channel */
16027051924fSEli Billauer 				  channel->endpoint->registers +
16037051924fSEli Billauer 				  fpga_buf_ctrl_reg);
16047051924fSEli Billauer 
16057051924fSEli Billauer 			/*
16067051924fSEli Billauer 			 * This is crazily cautious: We make sure that not
16077051924fSEli Billauer 			 * only that we got an EOF (be it because we closed
16087051924fSEli Billauer 			 * the channel or because of a user's EOF), but verify
16097051924fSEli Billauer 			 * that it's one beyond the last buffer arrived, so
16107051924fSEli Billauer 			 * we have no leftover buffers pending before wrapping
16117051924fSEli Billauer 			 * up (which can only happen in asynchronous channels,
16127051924fSEli Billauer 			 * BTW)
16137051924fSEli Billauer 			 */
16147051924fSEli Billauer 
16157051924fSEli Billauer 			while (1) {
16167051924fSEli Billauer 				spin_lock_irqsave(&channel->wr_spinlock,
16177051924fSEli Billauer 						  flags);
16187051924fSEli Billauer 				buf_idx = channel->wr_fpga_buf_idx;
16197051924fSEli Billauer 				eof = channel->wr_eof;
16207051924fSEli Billauer 				channel->wr_sleepy = 1;
16217051924fSEli Billauer 				spin_unlock_irqrestore(&channel->wr_spinlock,
16227051924fSEli Billauer 						       flags);
16237051924fSEli Billauer 
16247051924fSEli Billauer 				/*
16257051924fSEli Billauer 				 * Check if eof points at the buffer after
16267051924fSEli Billauer 				 * the last one the FPGA submitted. Note that
16277051924fSEli Billauer 				 * no EOF is marked by negative eof.
16287051924fSEli Billauer 				 */
16297051924fSEli Billauer 
16307051924fSEli Billauer 				buf_idx++;
16317051924fSEli Billauer 				if (buf_idx == channel->num_wr_buffers)
16327051924fSEli Billauer 					buf_idx = 0;
16337051924fSEli Billauer 
16347051924fSEli Billauer 				if (buf_idx == eof)
16357051924fSEli Billauer 					break;
16367051924fSEli Billauer 
16377051924fSEli Billauer 				/*
16387051924fSEli Billauer 				 * Steal extra 100 ms if awaken by interrupt.
16397051924fSEli Billauer 				 * This is a simple workaround for an
16407051924fSEli Billauer 				 * interrupt pending when entering, which would
16417051924fSEli Billauer 				 * otherwise result in declaring the hardware
16427051924fSEli Billauer 				 * non-responsive.
16437051924fSEli Billauer 				 */
16447051924fSEli Billauer 
16457051924fSEli Billauer 				if (wait_event_interruptible(
16467051924fSEli Billauer 					    channel->wr_wait,
16477051924fSEli Billauer 					    (!channel->wr_sleepy)))
16487051924fSEli Billauer 					msleep(100);
16497051924fSEli Billauer 
16507051924fSEli Billauer 				if (channel->wr_sleepy) {
16517051924fSEli Billauer 					mutex_unlock(&channel->wr_mutex);
16527051924fSEli Billauer 					dev_warn(channel->endpoint->dev,
16537051924fSEli Billauer 						 "Hardware failed to respond to close command, therefore left in messy state.\n");
16547051924fSEli Billauer 					return -EINTR;
16557051924fSEli Billauer 				}
16567051924fSEli Billauer 			}
16577051924fSEli Billauer 		}
16587051924fSEli Billauer 
16597051924fSEli Billauer 		mutex_unlock(&channel->wr_mutex);
16607051924fSEli Billauer 	}
16617051924fSEli Billauer 
16627051924fSEli Billauer 	return 0;
16637051924fSEli Billauer }
16647051924fSEli Billauer 
xillybus_llseek(struct file * filp,loff_t offset,int whence)16657051924fSEli Billauer static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
16667051924fSEli Billauer {
16677051924fSEli Billauer 	struct xilly_channel *channel = filp->private_data;
16687051924fSEli Billauer 	loff_t pos = filp->f_pos;
16697051924fSEli Billauer 	int rc = 0;
16707051924fSEli Billauer 
16717051924fSEli Billauer 	/*
16727051924fSEli Billauer 	 * Take both mutexes not allowing interrupts, since it seems like
16737051924fSEli Billauer 	 * common applications don't expect an -EINTR here. Besides, multiple
16747051924fSEli Billauer 	 * access to a single file descriptor on seekable devices is a mess
16757051924fSEli Billauer 	 * anyhow.
16767051924fSEli Billauer 	 */
16777051924fSEli Billauer 
16787051924fSEli Billauer 	if (channel->endpoint->fatal_error)
16797051924fSEli Billauer 		return -EIO;
16807051924fSEli Billauer 
16817051924fSEli Billauer 	mutex_lock(&channel->wr_mutex);
16827051924fSEli Billauer 	mutex_lock(&channel->rd_mutex);
16837051924fSEli Billauer 
16847051924fSEli Billauer 	switch (whence) {
16857051924fSEli Billauer 	case SEEK_SET:
16867051924fSEli Billauer 		pos = offset;
16877051924fSEli Billauer 		break;
16887051924fSEli Billauer 	case SEEK_CUR:
16897051924fSEli Billauer 		pos += offset;
16907051924fSEli Billauer 		break;
16917051924fSEli Billauer 	case SEEK_END:
16927051924fSEli Billauer 		pos = offset; /* Going to the end => to the beginning */
16937051924fSEli Billauer 		break;
16947051924fSEli Billauer 	default:
16957051924fSEli Billauer 		rc = -EINVAL;
16967051924fSEli Billauer 		goto end;
16977051924fSEli Billauer 	}
16987051924fSEli Billauer 
16997051924fSEli Billauer 	/* In any case, we must finish on an element boundary */
17007051924fSEli Billauer 	if (pos & ((1 << channel->log2_element_size) - 1)) {
17017051924fSEli Billauer 		rc = -EINVAL;
17027051924fSEli Billauer 		goto end;
17037051924fSEli Billauer 	}
17047051924fSEli Billauer 
17057051924fSEli Billauer 	mutex_lock(&channel->endpoint->register_mutex);
17067051924fSEli Billauer 
17077051924fSEli Billauer 	iowrite32(pos >> channel->log2_element_size,
17087051924fSEli Billauer 		  channel->endpoint->registers + fpga_buf_offset_reg);
17097051924fSEli Billauer 
17107051924fSEli Billauer 	iowrite32((channel->chan_num << 1) |
17117051924fSEli Billauer 		  (6 << 24),  /* Opcode 6, set address */
17127051924fSEli Billauer 		  channel->endpoint->registers + fpga_buf_ctrl_reg);
17137051924fSEli Billauer 
17147051924fSEli Billauer 	mutex_unlock(&channel->endpoint->register_mutex);
17157051924fSEli Billauer 
17167051924fSEli Billauer end:
17177051924fSEli Billauer 	mutex_unlock(&channel->rd_mutex);
17187051924fSEli Billauer 	mutex_unlock(&channel->wr_mutex);
17197051924fSEli Billauer 
17207051924fSEli Billauer 	if (rc) /* Return error after releasing mutexes */
17217051924fSEli Billauer 		return rc;
17227051924fSEli Billauer 
17237051924fSEli Billauer 	filp->f_pos = pos;
17247051924fSEli Billauer 
17257051924fSEli Billauer 	/*
17267051924fSEli Billauer 	 * Since seekable devices are allowed only when the channel is
17277051924fSEli Billauer 	 * synchronous, we assume that there is no data pending in either
17287051924fSEli Billauer 	 * direction (which holds true as long as no concurrent access on the
17297051924fSEli Billauer 	 * file descriptor takes place).
17307051924fSEli Billauer 	 * The only thing we may need to throw away is leftovers from partial
17317051924fSEli Billauer 	 * write() flush.
17327051924fSEli Billauer 	 */
17337051924fSEli Billauer 
17347051924fSEli Billauer 	channel->rd_leftovers[3] = 0;
17357051924fSEli Billauer 
17367051924fSEli Billauer 	return pos;
17377051924fSEli Billauer }
17387051924fSEli Billauer 
xillybus_poll(struct file * filp,poll_table * wait)1739afc9a42bSAl Viro static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
17407051924fSEli Billauer {
17417051924fSEli Billauer 	struct xilly_channel *channel = filp->private_data;
1742afc9a42bSAl Viro 	__poll_t mask = 0;
17437051924fSEli Billauer 	unsigned long flags;
17447051924fSEli Billauer 
17457051924fSEli Billauer 	poll_wait(filp, &channel->endpoint->ep_wait, wait);
17467051924fSEli Billauer 
17477051924fSEli Billauer 	/*
17487051924fSEli Billauer 	 * poll() won't play ball regarding read() channels which
17497051924fSEli Billauer 	 * aren't asynchronous and support the nonempty message. Allowing
17507051924fSEli Billauer 	 * that will create situations where data has been delivered at
17517051924fSEli Billauer 	 * the FPGA, and users expecting select() to wake up, which it may
17527051924fSEli Billauer 	 * not.
17537051924fSEli Billauer 	 */
17547051924fSEli Billauer 
17557051924fSEli Billauer 	if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
17567051924fSEli Billauer 		poll_wait(filp, &channel->wr_wait, wait);
17577051924fSEli Billauer 		poll_wait(filp, &channel->wr_ready_wait, wait);
17587051924fSEli Billauer 
17597051924fSEli Billauer 		spin_lock_irqsave(&channel->wr_spinlock, flags);
17607051924fSEli Billauer 		if (!channel->wr_empty || channel->wr_ready)
1761a9a08845SLinus Torvalds 			mask |= EPOLLIN | EPOLLRDNORM;
17627051924fSEli Billauer 
17637051924fSEli Billauer 		if (channel->wr_hangup)
17647051924fSEli Billauer 			/*
1765a9a08845SLinus Torvalds 			 * Not EPOLLHUP, because its behavior is in the
1766a9a08845SLinus Torvalds 			 * mist, and EPOLLIN does what we want: Wake up
17677051924fSEli Billauer 			 * the read file descriptor so it sees EOF.
17687051924fSEli Billauer 			 */
1769a9a08845SLinus Torvalds 			mask |=  EPOLLIN | EPOLLRDNORM;
17707051924fSEli Billauer 		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
17717051924fSEli Billauer 	}
17727051924fSEli Billauer 
17737051924fSEli Billauer 	/*
17747051924fSEli Billauer 	 * If partial data write is disallowed on a write() channel,
17757051924fSEli Billauer 	 * it's pointless to ever signal OK to write, because is could
17767051924fSEli Billauer 	 * block despite some space being available.
17777051924fSEli Billauer 	 */
17787051924fSEli Billauer 
17797051924fSEli Billauer 	if (channel->rd_allow_partial) {
17807051924fSEli Billauer 		poll_wait(filp, &channel->rd_wait, wait);
17817051924fSEli Billauer 
17827051924fSEli Billauer 		spin_lock_irqsave(&channel->rd_spinlock, flags);
17837051924fSEli Billauer 		if (!channel->rd_full)
1784a9a08845SLinus Torvalds 			mask |= EPOLLOUT | EPOLLWRNORM;
17857051924fSEli Billauer 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
17867051924fSEli Billauer 	}
17877051924fSEli Billauer 
17887051924fSEli Billauer 	if (channel->endpoint->fatal_error)
1789a9a08845SLinus Torvalds 		mask |= EPOLLERR;
17907051924fSEli Billauer 
17917051924fSEli Billauer 	return mask;
17927051924fSEli Billauer }
17937051924fSEli Billauer 
17947051924fSEli Billauer static const struct file_operations xillybus_fops = {
17957051924fSEli Billauer 	.owner      = THIS_MODULE,
17967051924fSEli Billauer 	.read       = xillybus_read,
17977051924fSEli Billauer 	.write      = xillybus_write,
17987051924fSEli Billauer 	.open       = xillybus_open,
17997051924fSEli Billauer 	.flush      = xillybus_flush,
18007051924fSEli Billauer 	.release    = xillybus_release,
18017051924fSEli Billauer 	.llseek     = xillybus_llseek,
18027051924fSEli Billauer 	.poll       = xillybus_poll,
18037051924fSEli Billauer };
18047051924fSEli Billauer 
xillybus_init_endpoint(struct device * dev)1805*c31bbc14SEli Billauer struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
18067051924fSEli Billauer {
18077051924fSEli Billauer 	struct xilly_endpoint *endpoint;
18087051924fSEli Billauer 
18097051924fSEli Billauer 	endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
18107051924fSEli Billauer 	if (!endpoint)
18117051924fSEli Billauer 		return NULL;
18127051924fSEli Billauer 
18137051924fSEli Billauer 	endpoint->dev = dev;
18147051924fSEli Billauer 	endpoint->msg_counter = 0x0b;
18157051924fSEli Billauer 	endpoint->failed_messages = 0;
18167051924fSEli Billauer 	endpoint->fatal_error = 0;
18177051924fSEli Billauer 
18187051924fSEli Billauer 	init_waitqueue_head(&endpoint->ep_wait);
18197051924fSEli Billauer 	mutex_init(&endpoint->register_mutex);
18207051924fSEli Billauer 
18217051924fSEli Billauer 	return endpoint;
18227051924fSEli Billauer }
18237051924fSEli Billauer EXPORT_SYMBOL(xillybus_init_endpoint);
18247051924fSEli Billauer 
xilly_quiesce(struct xilly_endpoint * endpoint)18257051924fSEli Billauer static int xilly_quiesce(struct xilly_endpoint *endpoint)
18267051924fSEli Billauer {
18277051924fSEli Billauer 	long t;
18287051924fSEli Billauer 
18297051924fSEli Billauer 	endpoint->idtlen = -1;
18307051924fSEli Billauer 
18317051924fSEli Billauer 	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
18327051924fSEli Billauer 		  endpoint->registers + fpga_dma_control_reg);
18337051924fSEli Billauer 
18347051924fSEli Billauer 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
18357051924fSEli Billauer 					     (endpoint->idtlen >= 0),
18367051924fSEli Billauer 					     XILLY_TIMEOUT);
18377051924fSEli Billauer 	if (t <= 0) {
18387051924fSEli Billauer 		dev_err(endpoint->dev,
18397051924fSEli Billauer 			"Failed to quiesce the device on exit.\n");
18407051924fSEli Billauer 		return -ENODEV;
18417051924fSEli Billauer 	}
18427051924fSEli Billauer 	return 0;
18437051924fSEli Billauer }
18447051924fSEli Billauer 
xillybus_endpoint_discovery(struct xilly_endpoint * endpoint)18457051924fSEli Billauer int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
18467051924fSEli Billauer {
18477051924fSEli Billauer 	int rc;
18487051924fSEli Billauer 	long t;
18497051924fSEli Billauer 
18507051924fSEli Billauer 	void *bootstrap_resources;
18517051924fSEli Billauer 	int idtbuffersize = (1 << PAGE_SHIFT);
18527051924fSEli Billauer 	struct device *dev = endpoint->dev;
18537051924fSEli Billauer 
18547051924fSEli Billauer 	/*
18557051924fSEli Billauer 	 * The bogus IDT is used during bootstrap for allocating the initial
18567051924fSEli Billauer 	 * message buffer, and then the message buffer and space for the IDT
18577051924fSEli Billauer 	 * itself. The initial message buffer is of a single page's size, but
18587051924fSEli Billauer 	 * it's soon replaced with a more modest one (and memory is freed).
18597051924fSEli Billauer 	 */
18607051924fSEli Billauer 
18617051924fSEli Billauer 	unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
18627051924fSEli Billauer 				       3, 192, PAGE_SHIFT, 0 };
18637051924fSEli Billauer 	struct xilly_idt_handle idt_handle;
18647051924fSEli Billauer 
18657051924fSEli Billauer 	/*
18667051924fSEli Billauer 	 * Writing the value 0x00000001 to Endianness register signals which
18677051924fSEli Billauer 	 * endianness this processor is using, so the FPGA can swap words as
18687051924fSEli Billauer 	 * necessary.
18697051924fSEli Billauer 	 */
18707051924fSEli Billauer 
18717051924fSEli Billauer 	iowrite32(1, endpoint->registers + fpga_endian_reg);
18727051924fSEli Billauer 
18737051924fSEli Billauer 	/* Bootstrap phase I: Allocate temporary message buffer */
18747051924fSEli Billauer 
18757051924fSEli Billauer 	bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
18767051924fSEli Billauer 	if (!bootstrap_resources)
18777051924fSEli Billauer 		return -ENOMEM;
18787051924fSEli Billauer 
18797051924fSEli Billauer 	endpoint->num_channels = 0;
18807051924fSEli Billauer 
18817051924fSEli Billauer 	rc = xilly_setupchannels(endpoint, bogus_idt, 1);
18827051924fSEli Billauer 	if (rc)
18837051924fSEli Billauer 		return rc;
18847051924fSEli Billauer 
18857051924fSEli Billauer 	/* Clear the message subsystem (and counter in particular) */
18867051924fSEli Billauer 	iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
18877051924fSEli Billauer 
18887051924fSEli Billauer 	endpoint->idtlen = -1;
18897051924fSEli Billauer 
18907051924fSEli Billauer 	/*
18917051924fSEli Billauer 	 * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
18927051924fSEli Billauer 	 * buffer size.
18937051924fSEli Billauer 	 */
18947051924fSEli Billauer 	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
18957051924fSEli Billauer 		  endpoint->registers + fpga_dma_control_reg);
18967051924fSEli Billauer 
18977051924fSEli Billauer 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
18987051924fSEli Billauer 					     (endpoint->idtlen >= 0),
18997051924fSEli Billauer 					     XILLY_TIMEOUT);
19007051924fSEli Billauer 	if (t <= 0) {
19017051924fSEli Billauer 		dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
19027051924fSEli Billauer 		return -ENODEV;
19037051924fSEli Billauer 	}
19047051924fSEli Billauer 
19057051924fSEli Billauer 	/* Enable DMA */
19067051924fSEli Billauer 	iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
19077051924fSEli Billauer 		  endpoint->registers + fpga_dma_control_reg);
19087051924fSEli Billauer 
19097051924fSEli Billauer 	/* Bootstrap phase II: Allocate buffer for IDT and obtain it */
19107051924fSEli Billauer 	while (endpoint->idtlen >= idtbuffersize) {
19117051924fSEli Billauer 		idtbuffersize *= 2;
19127051924fSEli Billauer 		bogus_idt[6]++;
19137051924fSEli Billauer 	}
19147051924fSEli Billauer 
19157051924fSEli Billauer 	endpoint->num_channels = 1;
19167051924fSEli Billauer 
19177051924fSEli Billauer 	rc = xilly_setupchannels(endpoint, bogus_idt, 2);
19187051924fSEli Billauer 	if (rc)
19197051924fSEli Billauer 		goto failed_idt;
19207051924fSEli Billauer 
19217051924fSEli Billauer 	rc = xilly_obtain_idt(endpoint);
19227051924fSEli Billauer 	if (rc)
19237051924fSEli Billauer 		goto failed_idt;
19247051924fSEli Billauer 
19257051924fSEli Billauer 	rc = xilly_scan_idt(endpoint, &idt_handle);
19267051924fSEli Billauer 	if (rc)
19277051924fSEli Billauer 		goto failed_idt;
19287051924fSEli Billauer 
19297051924fSEli Billauer 	devres_close_group(dev, bootstrap_resources);
19307051924fSEli Billauer 
19317051924fSEli Billauer 	/* Bootstrap phase III: Allocate buffers according to IDT */
19327051924fSEli Billauer 
19337051924fSEli Billauer 	rc = xilly_setupchannels(endpoint,
19347051924fSEli Billauer 				 idt_handle.chandesc,
19357051924fSEli Billauer 				 idt_handle.entries);
19367051924fSEli Billauer 	if (rc)
19377051924fSEli Billauer 		goto failed_idt;
19387051924fSEli Billauer 
19398cb5d216SEli Billauer 	rc = xillybus_init_chrdev(dev, &xillybus_fops,
1940*c31bbc14SEli Billauer 				  endpoint->owner, endpoint,
19418cb5d216SEli Billauer 				  idt_handle.names,
19428cb5d216SEli Billauer 				  idt_handle.names_len,
19438cb5d216SEli Billauer 				  endpoint->num_channels,
19448cb5d216SEli Billauer 				  xillyname, false);
19457051924fSEli Billauer 
19467051924fSEli Billauer 	if (rc)
19478cb5d216SEli Billauer 		goto failed_idt;
19487051924fSEli Billauer 
19497051924fSEli Billauer 	devres_release_group(dev, bootstrap_resources);
19507051924fSEli Billauer 
19517051924fSEli Billauer 	return 0;
19527051924fSEli Billauer 
19537051924fSEli Billauer failed_idt:
19547051924fSEli Billauer 	xilly_quiesce(endpoint);
19557051924fSEli Billauer 	flush_workqueue(xillybus_wq);
19567051924fSEli Billauer 
19577051924fSEli Billauer 	return rc;
19587051924fSEli Billauer }
19597051924fSEli Billauer EXPORT_SYMBOL(xillybus_endpoint_discovery);
19607051924fSEli Billauer 
xillybus_endpoint_remove(struct xilly_endpoint * endpoint)19617051924fSEli Billauer void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
19627051924fSEli Billauer {
19638cb5d216SEli Billauer 	xillybus_cleanup_chrdev(endpoint, endpoint->dev);
19647051924fSEli Billauer 
19657051924fSEli Billauer 	xilly_quiesce(endpoint);
19667051924fSEli Billauer 
19677051924fSEli Billauer 	/*
19687051924fSEli Billauer 	 * Flushing is done upon endpoint release to prevent access to memory
19697051924fSEli Billauer 	 * just about to be released. This makes the quiesce complete.
19707051924fSEli Billauer 	 */
19717051924fSEli Billauer 	flush_workqueue(xillybus_wq);
19727051924fSEli Billauer }
19737051924fSEli Billauer EXPORT_SYMBOL(xillybus_endpoint_remove);
19747051924fSEli Billauer 
xillybus_init(void)19757051924fSEli Billauer static int __init xillybus_init(void)
19767051924fSEli Billauer {
19777051924fSEli Billauer 	xillybus_wq = alloc_workqueue(xillyname, 0, 0);
19788cb5d216SEli Billauer 	if (!xillybus_wq)
19797051924fSEli Billauer 		return -ENOMEM;
19807051924fSEli Billauer 
19817051924fSEli Billauer 	return 0;
19827051924fSEli Billauer }
19837051924fSEli Billauer 
xillybus_exit(void)19847051924fSEli Billauer static void __exit xillybus_exit(void)
19857051924fSEli Billauer {
19867051924fSEli Billauer 	/* flush_workqueue() was called for each endpoint released */
19877051924fSEli Billauer 	destroy_workqueue(xillybus_wq);
19887051924fSEli Billauer }
19897051924fSEli Billauer 
19907051924fSEli Billauer module_init(xillybus_init);
19917051924fSEli Billauer module_exit(xillybus_exit);
1992