xref: /openbmc/linux/drivers/mfd/dln2.c (revision e1e38ea1)
1 /*
2  * Driver for the Diolan DLN-2 USB adapter
3  *
4  * Copyright (c) 2014 Intel Corporation
5  *
6  * Derived from:
7  *  i2c-diolan-u2c.c
8  *  Copyright (c) 2010-2011 Ericsson AB
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation, version 2.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/usb.h>
20 #include <linux/i2c.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/mfd/core.h>
24 #include <linux/mfd/dln2.h>
25 #include <linux/rculist.h>
26 
27 struct dln2_header {
28 	__le16 size;
29 	__le16 id;
30 	__le16 echo;
31 	__le16 handle;
32 };
33 
34 struct dln2_response {
35 	struct dln2_header hdr;
36 	__le16 result;
37 };
38 
39 #define DLN2_GENERIC_MODULE_ID		0x00
40 #define DLN2_GENERIC_CMD(cmd)		DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
41 #define CMD_GET_DEVICE_VER		DLN2_GENERIC_CMD(0x30)
42 #define CMD_GET_DEVICE_SN		DLN2_GENERIC_CMD(0x31)
43 
44 #define DLN2_HW_ID			0x200
45 #define DLN2_USB_TIMEOUT		200	/* in ms */
46 #define DLN2_MAX_RX_SLOTS		16
47 #define DLN2_MAX_URBS			16
48 #define DLN2_RX_BUF_SIZE		512
49 
50 enum dln2_handle {
51 	DLN2_HANDLE_EVENT = 0,		/* don't change, hardware defined */
52 	DLN2_HANDLE_CTRL,
53 	DLN2_HANDLE_GPIO,
54 	DLN2_HANDLE_I2C,
55 	DLN2_HANDLE_SPI,
56 	DLN2_HANDLES
57 };
58 
59 /*
60  * Receive context used between the receive demultiplexer and the transfer
61  * routine. While sending a request the transfer routine will look for a free
62  * receive context and use it to wait for a response and to receive the URB and
63  * thus the response data.
64  */
65 struct dln2_rx_context {
66 	/* completion used to wait for a response */
67 	struct completion done;
68 
69 	/* if non-NULL the URB contains the response */
70 	struct urb *urb;
71 
72 	/* if true then this context is used to wait for a response */
73 	bool in_use;
74 };
75 
76 /*
77  * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
78  * handle header field to identify the module in dln2_dev.mod_rx_slots and then
79  * the echo header field to index the slots field and find the receive context
80  * for a particular request.
81  */
82 struct dln2_mod_rx_slots {
83 	/* RX slots bitmap */
84 	DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS);
85 
86 	/* used to wait for a free RX slot */
87 	wait_queue_head_t wq;
88 
89 	/* used to wait for an RX operation to complete */
90 	struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS];
91 
92 	/* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
93 	spinlock_t lock;
94 };
95 
96 struct dln2_dev {
97 	struct usb_device *usb_dev;
98 	struct usb_interface *interface;
99 	u8 ep_in;
100 	u8 ep_out;
101 
102 	struct urb *rx_urb[DLN2_MAX_URBS];
103 	void *rx_buf[DLN2_MAX_URBS];
104 
105 	struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES];
106 
107 	struct list_head event_cb_list;
108 	spinlock_t event_cb_lock;
109 
110 	bool disconnect;
111 	int active_transfers;
112 	wait_queue_head_t disconnect_wq;
113 	spinlock_t disconnect_lock;
114 };
115 
116 struct dln2_event_cb_entry {
117 	struct list_head list;
118 	u16 id;
119 	struct platform_device *pdev;
120 	dln2_event_cb_t callback;
121 };
122 
123 int dln2_register_event_cb(struct platform_device *pdev, u16 id,
124 			   dln2_event_cb_t event_cb)
125 {
126 	struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
127 	struct dln2_event_cb_entry *i, *entry;
128 	unsigned long flags;
129 	int ret = 0;
130 
131 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
132 	if (!entry)
133 		return -ENOMEM;
134 
135 	entry->id = id;
136 	entry->callback = event_cb;
137 	entry->pdev = pdev;
138 
139 	spin_lock_irqsave(&dln2->event_cb_lock, flags);
140 
141 	list_for_each_entry(i, &dln2->event_cb_list, list) {
142 		if (i->id == id) {
143 			ret = -EBUSY;
144 			break;
145 		}
146 	}
147 
148 	if (!ret)
149 		list_add_rcu(&entry->list, &dln2->event_cb_list);
150 
151 	spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
152 
153 	if (ret)
154 		kfree(entry);
155 
156 	return ret;
157 }
158 EXPORT_SYMBOL(dln2_register_event_cb);
159 
160 void dln2_unregister_event_cb(struct platform_device *pdev, u16 id)
161 {
162 	struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
163 	struct dln2_event_cb_entry *i;
164 	unsigned long flags;
165 	bool found = false;
166 
167 	spin_lock_irqsave(&dln2->event_cb_lock, flags);
168 
169 	list_for_each_entry(i, &dln2->event_cb_list, list) {
170 		if (i->id == id) {
171 			list_del_rcu(&i->list);
172 			found = true;
173 			break;
174 		}
175 	}
176 
177 	spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
178 
179 	if (found) {
180 		synchronize_rcu();
181 		kfree(i);
182 	}
183 }
184 EXPORT_SYMBOL(dln2_unregister_event_cb);
185 
186 /*
187  * Returns true if a valid transfer slot is found. In this case the URB must not
188  * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
189  * is woke up. It will be resubmitted there.
190  */
191 static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
192 				   u16 handle, u16 rx_slot)
193 {
194 	struct device *dev = &dln2->interface->dev;
195 	struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
196 	struct dln2_rx_context *rxc;
197 	unsigned long flags;
198 	bool valid_slot = false;
199 
200 	if (rx_slot >= DLN2_MAX_RX_SLOTS)
201 		goto out;
202 
203 	rxc = &rxs->slots[rx_slot];
204 
205 	spin_lock_irqsave(&rxs->lock, flags);
206 	if (rxc->in_use && !rxc->urb) {
207 		rxc->urb = urb;
208 		complete(&rxc->done);
209 		valid_slot = true;
210 	}
211 	spin_unlock_irqrestore(&rxs->lock, flags);
212 
213 out:
214 	if (!valid_slot)
215 		dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot);
216 
217 	return valid_slot;
218 }
219 
220 static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo,
221 				     void *data, int len)
222 {
223 	struct dln2_event_cb_entry *i;
224 
225 	rcu_read_lock();
226 
227 	list_for_each_entry_rcu(i, &dln2->event_cb_list, list) {
228 		if (i->id == id) {
229 			i->callback(i->pdev, echo, data, len);
230 			break;
231 		}
232 	}
233 
234 	rcu_read_unlock();
235 }
236 
237 static void dln2_rx(struct urb *urb)
238 {
239 	struct dln2_dev *dln2 = urb->context;
240 	struct dln2_header *hdr = urb->transfer_buffer;
241 	struct device *dev = &dln2->interface->dev;
242 	u16 id, echo, handle, size;
243 	u8 *data;
244 	int len;
245 	int err;
246 
247 	switch (urb->status) {
248 	case 0:
249 		/* success */
250 		break;
251 	case -ECONNRESET:
252 	case -ENOENT:
253 	case -ESHUTDOWN:
254 	case -EPIPE:
255 		/* this urb is terminated, clean up */
256 		dev_dbg(dev, "urb shutting down with status %d\n", urb->status);
257 		return;
258 	default:
259 		dev_dbg(dev, "nonzero urb status received %d\n", urb->status);
260 		goto out;
261 	}
262 
263 	if (urb->actual_length < sizeof(struct dln2_header)) {
264 		dev_err(dev, "short response: %d\n", urb->actual_length);
265 		goto out;
266 	}
267 
268 	handle = le16_to_cpu(hdr->handle);
269 	id = le16_to_cpu(hdr->id);
270 	echo = le16_to_cpu(hdr->echo);
271 	size = le16_to_cpu(hdr->size);
272 
273 	if (size != urb->actual_length) {
274 		dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
275 			handle, id, echo, size, urb->actual_length);
276 		goto out;
277 	}
278 
279 	if (handle >= DLN2_HANDLES) {
280 		dev_warn(dev, "invalid handle %d\n", handle);
281 		goto out;
282 	}
283 
284 	data = urb->transfer_buffer + sizeof(struct dln2_header);
285 	len = urb->actual_length - sizeof(struct dln2_header);
286 
287 	if (handle == DLN2_HANDLE_EVENT) {
288 		dln2_run_event_callbacks(dln2, id, echo, data, len);
289 	} else {
290 		/* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
291 		if (dln2_transfer_complete(dln2, urb, handle, echo))
292 			return;
293 	}
294 
295 out:
296 	err = usb_submit_urb(urb, GFP_ATOMIC);
297 	if (err < 0)
298 		dev_err(dev, "failed to resubmit RX URB: %d\n", err);
299 }
300 
301 static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf,
302 			   int *obuf_len, gfp_t gfp)
303 {
304 	int len;
305 	void *buf;
306 	struct dln2_header *hdr;
307 
308 	len = *obuf_len + sizeof(*hdr);
309 	buf = kmalloc(len, gfp);
310 	if (!buf)
311 		return NULL;
312 
313 	hdr = (struct dln2_header *)buf;
314 	hdr->id = cpu_to_le16(cmd);
315 	hdr->size = cpu_to_le16(len);
316 	hdr->echo = cpu_to_le16(echo);
317 	hdr->handle = cpu_to_le16(handle);
318 
319 	memcpy(buf + sizeof(*hdr), obuf, *obuf_len);
320 
321 	*obuf_len = len;
322 
323 	return buf;
324 }
325 
326 static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo,
327 			  const void *obuf, int obuf_len)
328 {
329 	int ret = 0;
330 	int len = obuf_len;
331 	void *buf;
332 	int actual;
333 
334 	buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL);
335 	if (!buf)
336 		return -ENOMEM;
337 
338 	ret = usb_bulk_msg(dln2->usb_dev,
339 			   usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out),
340 			   buf, len, &actual, DLN2_USB_TIMEOUT);
341 
342 	kfree(buf);
343 
344 	return ret;
345 }
346 
347 static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot)
348 {
349 	struct dln2_mod_rx_slots *rxs;
350 	unsigned long flags;
351 
352 	if (dln2->disconnect) {
353 		*slot = -ENODEV;
354 		return true;
355 	}
356 
357 	rxs = &dln2->mod_rx_slots[handle];
358 
359 	spin_lock_irqsave(&rxs->lock, flags);
360 
361 	*slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS);
362 
363 	if (*slot < DLN2_MAX_RX_SLOTS) {
364 		struct dln2_rx_context *rxc = &rxs->slots[*slot];
365 
366 		set_bit(*slot, rxs->bmap);
367 		rxc->in_use = true;
368 	}
369 
370 	spin_unlock_irqrestore(&rxs->lock, flags);
371 
372 	return *slot < DLN2_MAX_RX_SLOTS;
373 }
374 
375 static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle)
376 {
377 	int ret;
378 	int slot;
379 
380 	/*
381 	 * No need to timeout here, the wait is bounded by the timeout in
382 	 * _dln2_transfer.
383 	 */
384 	ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq,
385 				       find_free_slot(dln2, handle, &slot));
386 	if (ret < 0)
387 		return ret;
388 
389 	return slot;
390 }
391 
392 static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot)
393 {
394 	struct dln2_mod_rx_slots *rxs;
395 	struct urb *urb = NULL;
396 	unsigned long flags;
397 	struct dln2_rx_context *rxc;
398 
399 	rxs = &dln2->mod_rx_slots[handle];
400 
401 	spin_lock_irqsave(&rxs->lock, flags);
402 
403 	clear_bit(slot, rxs->bmap);
404 
405 	rxc = &rxs->slots[slot];
406 	rxc->in_use = false;
407 	urb = rxc->urb;
408 	rxc->urb = NULL;
409 	reinit_completion(&rxc->done);
410 
411 	spin_unlock_irqrestore(&rxs->lock, flags);
412 
413 	if (urb) {
414 		int err;
415 		struct device *dev = &dln2->interface->dev;
416 
417 		err = usb_submit_urb(urb, GFP_KERNEL);
418 		if (err < 0)
419 			dev_err(dev, "failed to resubmit RX URB: %d\n", err);
420 	}
421 
422 	wake_up_interruptible(&rxs->wq);
423 }
424 
425 static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
426 			  const void *obuf, unsigned obuf_len,
427 			  void *ibuf, unsigned *ibuf_len)
428 {
429 	int ret = 0;
430 	int rx_slot;
431 	struct dln2_response *rsp;
432 	struct dln2_rx_context *rxc;
433 	struct device *dev = &dln2->interface->dev;
434 	const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT);
435 	struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
436 	int size;
437 
438 	spin_lock(&dln2->disconnect_lock);
439 	if (!dln2->disconnect)
440 		dln2->active_transfers++;
441 	else
442 		ret = -ENODEV;
443 	spin_unlock(&dln2->disconnect_lock);
444 
445 	if (ret)
446 		return ret;
447 
448 	rx_slot = alloc_rx_slot(dln2, handle);
449 	if (rx_slot < 0) {
450 		ret = rx_slot;
451 		goto out_decr;
452 	}
453 
454 	ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len);
455 	if (ret < 0) {
456 		dev_err(dev, "USB write failed: %d\n", ret);
457 		goto out_free_rx_slot;
458 	}
459 
460 	rxc = &rxs->slots[rx_slot];
461 
462 	ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout);
463 	if (ret <= 0) {
464 		if (!ret)
465 			ret = -ETIMEDOUT;
466 		goto out_free_rx_slot;
467 	} else {
468 		ret = 0;
469 	}
470 
471 	if (dln2->disconnect) {
472 		ret = -ENODEV;
473 		goto out_free_rx_slot;
474 	}
475 
476 	/* if we got here we know that the response header has been checked */
477 	rsp = rxc->urb->transfer_buffer;
478 	size = le16_to_cpu(rsp->hdr.size);
479 
480 	if (size < sizeof(*rsp)) {
481 		ret = -EPROTO;
482 		goto out_free_rx_slot;
483 	}
484 
485 	if (le16_to_cpu(rsp->result) > 0x80) {
486 		dev_dbg(dev, "%d received response with error %d\n",
487 			handle, le16_to_cpu(rsp->result));
488 		ret = -EREMOTEIO;
489 		goto out_free_rx_slot;
490 	}
491 
492 	if (!ibuf)
493 		goto out_free_rx_slot;
494 
495 	if (*ibuf_len > size - sizeof(*rsp))
496 		*ibuf_len = size - sizeof(*rsp);
497 
498 	memcpy(ibuf, rsp + 1, *ibuf_len);
499 
500 out_free_rx_slot:
501 	free_rx_slot(dln2, handle, rx_slot);
502 out_decr:
503 	spin_lock(&dln2->disconnect_lock);
504 	dln2->active_transfers--;
505 	spin_unlock(&dln2->disconnect_lock);
506 	if (dln2->disconnect)
507 		wake_up(&dln2->disconnect_wq);
508 
509 	return ret;
510 }
511 
512 int dln2_transfer(struct platform_device *pdev, u16 cmd,
513 		  const void *obuf, unsigned obuf_len,
514 		  void *ibuf, unsigned *ibuf_len)
515 {
516 	struct dln2_platform_data *dln2_pdata;
517 	struct dln2_dev *dln2;
518 	u16 handle;
519 
520 	dln2 = dev_get_drvdata(pdev->dev.parent);
521 	dln2_pdata = dev_get_platdata(&pdev->dev);
522 	handle = dln2_pdata->handle;
523 
524 	return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf,
525 			      ibuf_len);
526 }
527 EXPORT_SYMBOL(dln2_transfer);
528 
529 static int dln2_check_hw(struct dln2_dev *dln2)
530 {
531 	int ret;
532 	__le32 hw_type;
533 	int len = sizeof(hw_type);
534 
535 	ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER,
536 			     NULL, 0, &hw_type, &len);
537 	if (ret < 0)
538 		return ret;
539 	if (len < sizeof(hw_type))
540 		return -EREMOTEIO;
541 
542 	if (le32_to_cpu(hw_type) != DLN2_HW_ID) {
543 		dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n",
544 			le32_to_cpu(hw_type));
545 		return -ENODEV;
546 	}
547 
548 	return 0;
549 }
550 
551 static int dln2_print_serialno(struct dln2_dev *dln2)
552 {
553 	int ret;
554 	__le32 serial_no;
555 	int len = sizeof(serial_no);
556 	struct device *dev = &dln2->interface->dev;
557 
558 	ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0,
559 			     &serial_no, &len);
560 	if (ret < 0)
561 		return ret;
562 	if (len < sizeof(serial_no))
563 		return -EREMOTEIO;
564 
565 	dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no));
566 
567 	return 0;
568 }
569 
570 static int dln2_hw_init(struct dln2_dev *dln2)
571 {
572 	int ret;
573 
574 	ret = dln2_check_hw(dln2);
575 	if (ret < 0)
576 		return ret;
577 
578 	return dln2_print_serialno(dln2);
579 }
580 
581 static void dln2_free_rx_urbs(struct dln2_dev *dln2)
582 {
583 	int i;
584 
585 	for (i = 0; i < DLN2_MAX_URBS; i++) {
586 		usb_free_urb(dln2->rx_urb[i]);
587 		kfree(dln2->rx_buf[i]);
588 	}
589 }
590 
591 static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
592 {
593 	int i;
594 
595 	for (i = 0; i < DLN2_MAX_URBS; i++)
596 		usb_kill_urb(dln2->rx_urb[i]);
597 }
598 
599 static void dln2_free(struct dln2_dev *dln2)
600 {
601 	dln2_free_rx_urbs(dln2);
602 	usb_put_dev(dln2->usb_dev);
603 	kfree(dln2);
604 }
605 
606 static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
607 			      struct usb_host_interface *hostif)
608 {
609 	int i;
610 	const int rx_max_size = DLN2_RX_BUF_SIZE;
611 
612 	for (i = 0; i < DLN2_MAX_URBS; i++) {
613 		dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
614 		if (!dln2->rx_buf[i])
615 			return -ENOMEM;
616 
617 		dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
618 		if (!dln2->rx_urb[i])
619 			return -ENOMEM;
620 
621 		usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
622 				  usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
623 				  dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
624 	}
625 
626 	return 0;
627 }
628 
629 static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
630 {
631 	struct device *dev = &dln2->interface->dev;
632 	int ret;
633 	int i;
634 
635 	for (i = 0; i < DLN2_MAX_URBS; i++) {
636 		ret = usb_submit_urb(dln2->rx_urb[i], gfp);
637 		if (ret < 0) {
638 			dev_err(dev, "failed to submit RX URB: %d\n", ret);
639 			return ret;
640 		}
641 	}
642 
643 	return 0;
644 }
645 
646 static struct dln2_platform_data dln2_pdata_gpio = {
647 	.handle = DLN2_HANDLE_GPIO,
648 };
649 
650 /* Only one I2C port seems to be supported on current hardware */
651 static struct dln2_platform_data dln2_pdata_i2c = {
652 	.handle = DLN2_HANDLE_I2C,
653 	.port = 0,
654 };
655 
656 /* Only one SPI port supported */
657 static struct dln2_platform_data dln2_pdata_spi = {
658 	.handle = DLN2_HANDLE_SPI,
659 	.port = 0,
660 };
661 
662 static const struct mfd_cell dln2_devs[] = {
663 	{
664 		.name = "dln2-gpio",
665 		.platform_data = &dln2_pdata_gpio,
666 		.pdata_size = sizeof(struct dln2_platform_data),
667 	},
668 	{
669 		.name = "dln2-i2c",
670 		.platform_data = &dln2_pdata_i2c,
671 		.pdata_size = sizeof(struct dln2_platform_data),
672 	},
673 	{
674 		.name = "dln2-spi",
675 		.platform_data = &dln2_pdata_spi,
676 		.pdata_size = sizeof(struct dln2_platform_data),
677 	},
678 };
679 
680 static void dln2_stop(struct dln2_dev *dln2)
681 {
682 	int i, j;
683 
684 	/* don't allow starting new transfers */
685 	spin_lock(&dln2->disconnect_lock);
686 	dln2->disconnect = true;
687 	spin_unlock(&dln2->disconnect_lock);
688 
689 	/* cancel in progress transfers */
690 	for (i = 0; i < DLN2_HANDLES; i++) {
691 		struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i];
692 		unsigned long flags;
693 
694 		spin_lock_irqsave(&rxs->lock, flags);
695 
696 		/* cancel all response waiters */
697 		for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) {
698 			struct dln2_rx_context *rxc = &rxs->slots[j];
699 
700 			if (rxc->in_use)
701 				complete(&rxc->done);
702 		}
703 
704 		spin_unlock_irqrestore(&rxs->lock, flags);
705 	}
706 
707 	/* wait for transfers to end */
708 	wait_event(dln2->disconnect_wq, !dln2->active_transfers);
709 
710 	dln2_stop_rx_urbs(dln2);
711 }
712 
713 static void dln2_disconnect(struct usb_interface *interface)
714 {
715 	struct dln2_dev *dln2 = usb_get_intfdata(interface);
716 
717 	dln2_stop(dln2);
718 
719 	mfd_remove_devices(&interface->dev);
720 
721 	dln2_free(dln2);
722 }
723 
724 static int dln2_probe(struct usb_interface *interface,
725 		      const struct usb_device_id *usb_id)
726 {
727 	struct usb_host_interface *hostif = interface->cur_altsetting;
728 	struct device *dev = &interface->dev;
729 	struct dln2_dev *dln2;
730 	int ret;
731 	int i, j;
732 
733 	if (hostif->desc.bInterfaceNumber != 0 ||
734 	    hostif->desc.bNumEndpoints < 2)
735 		return -ENODEV;
736 
737 	dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
738 	if (!dln2)
739 		return -ENOMEM;
740 
741 	dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
742 	dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
743 	dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
744 	dln2->interface = interface;
745 	usb_set_intfdata(interface, dln2);
746 	init_waitqueue_head(&dln2->disconnect_wq);
747 
748 	for (i = 0; i < DLN2_HANDLES; i++) {
749 		init_waitqueue_head(&dln2->mod_rx_slots[i].wq);
750 		spin_lock_init(&dln2->mod_rx_slots[i].lock);
751 		for (j = 0; j < DLN2_MAX_RX_SLOTS; j++)
752 			init_completion(&dln2->mod_rx_slots[i].slots[j].done);
753 	}
754 
755 	spin_lock_init(&dln2->event_cb_lock);
756 	spin_lock_init(&dln2->disconnect_lock);
757 	INIT_LIST_HEAD(&dln2->event_cb_list);
758 
759 	ret = dln2_setup_rx_urbs(dln2, hostif);
760 	if (ret)
761 		goto out_free;
762 
763 	ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
764 	if (ret)
765 		goto out_stop_rx;
766 
767 	ret = dln2_hw_init(dln2);
768 	if (ret < 0) {
769 		dev_err(dev, "failed to initialize hardware\n");
770 		goto out_stop_rx;
771 	}
772 
773 	ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
774 	if (ret != 0) {
775 		dev_err(dev, "failed to add mfd devices to core\n");
776 		goto out_stop_rx;
777 	}
778 
779 	return 0;
780 
781 out_stop_rx:
782 	dln2_stop_rx_urbs(dln2);
783 
784 out_free:
785 	dln2_free(dln2);
786 
787 	return ret;
788 }
789 
790 static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
791 {
792 	struct dln2_dev *dln2 = usb_get_intfdata(iface);
793 
794 	dln2_stop(dln2);
795 
796 	return 0;
797 }
798 
799 static int dln2_resume(struct usb_interface *iface)
800 {
801 	struct dln2_dev *dln2 = usb_get_intfdata(iface);
802 
803 	dln2->disconnect = false;
804 
805 	return dln2_start_rx_urbs(dln2, GFP_NOIO);
806 }
807 
808 static const struct usb_device_id dln2_table[] = {
809 	{ USB_DEVICE(0xa257, 0x2013) },
810 	{ }
811 };
812 
813 MODULE_DEVICE_TABLE(usb, dln2_table);
814 
815 static struct usb_driver dln2_driver = {
816 	.name = "dln2",
817 	.probe = dln2_probe,
818 	.disconnect = dln2_disconnect,
819 	.id_table = dln2_table,
820 	.suspend = dln2_suspend,
821 	.resume = dln2_resume,
822 };
823 
824 module_usb_driver(dln2_driver);
825 
826 MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
827 MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
828 MODULE_LICENSE("GPL v2");
829