1 /*
2  * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/tty_flip.h>
24 #include <linux/slab.h>
25 #include <linux/usb/cdc.h>
26 
27 #include "gdm_mux.h"
28 
29 static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
30 
31 #define USB_DEVICE_CDC_DATA(vid, pid) \
32 	.match_flags = \
33 		USB_DEVICE_ID_MATCH_DEVICE |\
34 		USB_DEVICE_ID_MATCH_INT_CLASS |\
35 		USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
36 	.idVendor = vid,\
37 	.idProduct = pid,\
38 	.bInterfaceClass = USB_CLASS_COMM,\
39 	.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
40 
41 static const struct usb_device_id id_table[] = {
42 	{ USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
43 	{ USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
44 	{ USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
45 	{ USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
46 	{}
47 };
48 
49 MODULE_DEVICE_TABLE(usb, id_table);
50 
51 static int packet_type_to_index(u16 packetType)
52 {
53 	int i;
54 
55 	for (i = 0; i < TTY_MAX_COUNT; i++) {
56 		if (packet_type[i] == packetType)
57 			return i;
58 	}
59 
60 	return -1;
61 }
62 
63 static struct mux_tx *alloc_mux_tx(int len)
64 {
65 	struct mux_tx *t = NULL;
66 
67 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
68 	if (!t)
69 		return NULL;
70 
71 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
72 	t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
73 	if (!t->urb || !t->buf) {
74 		usb_free_urb(t->urb);
75 		kfree(t->buf);
76 		kfree(t);
77 		return NULL;
78 	}
79 
80 	return t;
81 }
82 
83 static void free_mux_tx(struct mux_tx *t)
84 {
85 	if (t) {
86 		usb_free_urb(t->urb);
87 		kfree(t->buf);
88 		kfree(t);
89 	}
90 }
91 
92 static struct mux_rx *alloc_mux_rx(void)
93 {
94 	struct mux_rx *r = NULL;
95 
96 	r = kzalloc(sizeof(*r), GFP_KERNEL);
97 	if (!r)
98 		return NULL;
99 
100 	r->urb = usb_alloc_urb(0, GFP_KERNEL);
101 	r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
102 	if (!r->urb || !r->buf) {
103 		usb_free_urb(r->urb);
104 		kfree(r->buf);
105 		kfree(r);
106 		return NULL;
107 	}
108 
109 	return r;
110 }
111 
112 static void free_mux_rx(struct mux_rx *r)
113 {
114 	if (r) {
115 		usb_free_urb(r->urb);
116 		kfree(r->buf);
117 		kfree(r);
118 	}
119 }
120 
121 static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
122 {
123 	struct mux_rx *r;
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&rx->free_list_lock, flags);
127 
128 	if (list_empty(&rx->rx_free_list)) {
129 		spin_unlock_irqrestore(&rx->free_list_lock, flags);
130 		return NULL;
131 	}
132 
133 	r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
134 	list_del(&r->free_list);
135 
136 	spin_unlock_irqrestore(&rx->free_list_lock, flags);
137 
138 	return r;
139 }
140 
141 static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
142 {
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&rx->free_list_lock, flags);
146 	list_add_tail(&r->free_list, &rx->rx_free_list);
147 	spin_unlock_irqrestore(&rx->free_list_lock, flags);
148 }
149 
150 static int up_to_host(struct mux_rx *r)
151 {
152 	struct mux_dev *mux_dev = r->mux_dev;
153 	struct mux_pkt_header *mux_header;
154 	unsigned int start_flag;
155 	unsigned int payload_size;
156 	unsigned short packet_type;
157 	int total_len;
158 	u32 packet_size_sum = r->offset;
159 	int index;
160 	int ret = TO_HOST_INVALID_PACKET;
161 	int len = r->len;
162 
163 	while (1) {
164 		mux_header = (struct mux_pkt_header *)(r->buf +
165 						       packet_size_sum);
166 		start_flag = __le32_to_cpu(mux_header->start_flag);
167 		payload_size = __le32_to_cpu(mux_header->payload_size);
168 		packet_type = __le16_to_cpu(mux_header->packet_type);
169 
170 		if (start_flag != START_FLAG) {
171 			pr_err("invalid START_FLAG %x\n", start_flag);
172 			break;
173 		}
174 
175 		total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
176 
177 		if (len - packet_size_sum <
178 			total_len) {
179 			pr_err("invalid payload : %d %d %04x\n",
180 			       payload_size, len, packet_type);
181 			break;
182 		}
183 
184 		index = packet_type_to_index(packet_type);
185 		if (index < 0) {
186 			pr_err("invalid index %d\n", index);
187 			break;
188 		}
189 
190 		ret = r->callback(mux_header->data,
191 				payload_size,
192 				index,
193 				mux_dev->tty_dev,
194 				RECV_PACKET_PROCESS_CONTINUE
195 				);
196 		if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
197 			r->offset += packet_size_sum;
198 			break;
199 		}
200 
201 		packet_size_sum += total_len;
202 		if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
203 			ret = r->callback(NULL,
204 					0,
205 					index,
206 					mux_dev->tty_dev,
207 					RECV_PACKET_PROCESS_COMPLETE
208 					);
209 			break;
210 		}
211 	}
212 
213 	return ret;
214 }
215 
216 static void do_rx(struct work_struct *work)
217 {
218 	struct mux_dev *mux_dev =
219 		container_of(work, struct mux_dev, work_rx.work);
220 	struct mux_rx *r;
221 	struct rx_cxt *rx = &mux_dev->rx;
222 	unsigned long flags;
223 	int ret = 0;
224 
225 	while (1) {
226 		spin_lock_irqsave(&rx->to_host_lock, flags);
227 		if (list_empty(&rx->to_host_list)) {
228 			spin_unlock_irqrestore(&rx->to_host_lock, flags);
229 			break;
230 		}
231 		r = list_entry(rx->to_host_list.next, struct mux_rx,
232 			       to_host_list);
233 		list_del(&r->to_host_list);
234 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
235 
236 		ret = up_to_host(r);
237 		if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
238 			pr_err("failed to send mux data to host\n");
239 		else
240 			put_rx_struct(rx, r);
241 	}
242 }
243 
244 static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
245 {
246 	unsigned long flags;
247 	struct mux_rx	*r_remove, *r_remove_next;
248 
249 	spin_lock_irqsave(&rx->submit_list_lock, flags);
250 	list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
251 				 rx_submit_list) {
252 		if (r == r_remove)
253 			list_del(&r->rx_submit_list);
254 	}
255 	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
256 }
257 
258 static void gdm_mux_rcv_complete(struct urb *urb)
259 {
260 	struct mux_rx *r = urb->context;
261 	struct mux_dev *mux_dev = r->mux_dev;
262 	struct rx_cxt *rx = &mux_dev->rx;
263 	unsigned long flags;
264 
265 	remove_rx_submit_list(r, rx);
266 
267 	if (urb->status) {
268 		if (mux_dev->usb_state == PM_NORMAL)
269 			dev_err(&urb->dev->dev, "%s: urb status error %d\n",
270 				__func__, urb->status);
271 		put_rx_struct(rx, r);
272 	} else {
273 		r->len = r->urb->actual_length;
274 		spin_lock_irqsave(&rx->to_host_lock, flags);
275 		list_add_tail(&r->to_host_list, &rx->to_host_list);
276 		schedule_work(&mux_dev->work_rx.work);
277 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
278 	}
279 }
280 
281 static int gdm_mux_recv(void *priv_dev,
282 			int (*cb)(void *data, int len, int tty_index,
283 				  struct tty_dev *tty_dev, int complete))
284 {
285 	struct mux_dev *mux_dev = priv_dev;
286 	struct usb_device *usbdev = mux_dev->usbdev;
287 	struct mux_rx *r;
288 	struct rx_cxt *rx = &mux_dev->rx;
289 	unsigned long flags;
290 	int ret;
291 
292 	if (!usbdev) {
293 		pr_err("device is disconnected\n");
294 		return -ENODEV;
295 	}
296 
297 	r = get_rx_struct(rx);
298 	if (!r) {
299 		pr_err("get_rx_struct fail\n");
300 		return -ENOMEM;
301 	}
302 
303 	r->offset = 0;
304 	r->mux_dev = (void *)mux_dev;
305 	r->callback = cb;
306 	mux_dev->rx_cb = cb;
307 
308 	usb_fill_bulk_urb(r->urb,
309 			  usbdev,
310 			  usb_rcvbulkpipe(usbdev, 0x86),
311 			  r->buf,
312 			  MUX_RX_MAX_SIZE,
313 			  gdm_mux_rcv_complete,
314 			  r);
315 
316 	spin_lock_irqsave(&rx->submit_list_lock, flags);
317 	list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
318 	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
319 
320 	ret = usb_submit_urb(r->urb, GFP_KERNEL);
321 
322 	if (ret) {
323 		spin_lock_irqsave(&rx->submit_list_lock, flags);
324 		list_del(&r->rx_submit_list);
325 		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
326 
327 		put_rx_struct(rx, r);
328 
329 		pr_err("usb_submit_urb ret=%d\n", ret);
330 	}
331 
332 	usb_mark_last_busy(usbdev);
333 
334 	return ret;
335 }
336 
337 static void gdm_mux_send_complete(struct urb *urb)
338 {
339 	struct mux_tx *t = urb->context;
340 
341 	if (urb->status == -ECONNRESET) {
342 		dev_info(&urb->dev->dev, "CONNRESET\n");
343 		free_mux_tx(t);
344 		return;
345 	}
346 
347 	if (t->callback)
348 		t->callback(t->cb_data);
349 
350 	free_mux_tx(t);
351 }
352 
353 static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
354 			void (*cb)(void *data), void *cb_data)
355 {
356 	struct mux_dev *mux_dev = priv_dev;
357 	struct usb_device *usbdev = mux_dev->usbdev;
358 	struct mux_pkt_header *mux_header;
359 	struct mux_tx *t = NULL;
360 	static u32 seq_num = 1;
361 	int total_len;
362 	int ret;
363 	unsigned long flags;
364 
365 	if (mux_dev->usb_state == PM_SUSPEND) {
366 		ret = usb_autopm_get_interface(mux_dev->intf);
367 		if (!ret)
368 			usb_autopm_put_interface(mux_dev->intf);
369 	}
370 
371 	spin_lock_irqsave(&mux_dev->write_lock, flags);
372 
373 	total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
374 
375 	t = alloc_mux_tx(total_len);
376 	if (!t) {
377 		pr_err("alloc_mux_tx fail\n");
378 		spin_unlock_irqrestore(&mux_dev->write_lock, flags);
379 		return -ENOMEM;
380 	}
381 
382 	mux_header = (struct mux_pkt_header *)t->buf;
383 	mux_header->start_flag = __cpu_to_le32(START_FLAG);
384 	mux_header->seq_num = __cpu_to_le32(seq_num++);
385 	mux_header->payload_size = __cpu_to_le32((u32)len);
386 	mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
387 
388 	memcpy(t->buf + MUX_HEADER_SIZE, data, len);
389 	memset(t->buf + MUX_HEADER_SIZE + len, 0, total_len - MUX_HEADER_SIZE -
390 	       len);
391 
392 	t->len = total_len;
393 	t->callback = cb;
394 	t->cb_data = cb_data;
395 
396 	usb_fill_bulk_urb(t->urb,
397 			  usbdev,
398 			  usb_sndbulkpipe(usbdev, 5),
399 			  t->buf,
400 			  total_len,
401 			  gdm_mux_send_complete,
402 			  t);
403 
404 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
405 
406 	spin_unlock_irqrestore(&mux_dev->write_lock, flags);
407 
408 	if (ret)
409 		pr_err("usb_submit_urb Error: %d\n", ret);
410 
411 	usb_mark_last_busy(usbdev);
412 
413 	return ret;
414 }
415 
416 static int gdm_mux_send_control(void *priv_dev, int request, int value,
417 				void *buf, int len)
418 {
419 	struct mux_dev *mux_dev = priv_dev;
420 	struct usb_device *usbdev = mux_dev->usbdev;
421 	int ret;
422 
423 	ret = usb_control_msg(usbdev,
424 			      usb_sndctrlpipe(usbdev, 0),
425 			      request,
426 			      USB_RT_ACM,
427 			      value,
428 			      2,
429 			      buf,
430 			      len,
431 			      5000
432 			     );
433 
434 	if (ret < 0)
435 		pr_err("usb_control_msg error: %d\n", ret);
436 
437 	return min(ret, 0);
438 }
439 
440 static void release_usb(struct mux_dev *mux_dev)
441 {
442 	struct rx_cxt		*rx = &mux_dev->rx;
443 	struct mux_rx		*r, *r_next;
444 	unsigned long		flags;
445 
446 	cancel_delayed_work(&mux_dev->work_rx);
447 
448 	spin_lock_irqsave(&rx->submit_list_lock, flags);
449 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
450 				 rx_submit_list) {
451 		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
452 		usb_kill_urb(r->urb);
453 		spin_lock_irqsave(&rx->submit_list_lock, flags);
454 	}
455 	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
456 
457 	spin_lock_irqsave(&rx->free_list_lock, flags);
458 	list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
459 		list_del(&r->free_list);
460 		free_mux_rx(r);
461 	}
462 	spin_unlock_irqrestore(&rx->free_list_lock, flags);
463 
464 	spin_lock_irqsave(&rx->to_host_lock, flags);
465 	list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
466 		if (r->mux_dev == (void *)mux_dev) {
467 			list_del(&r->to_host_list);
468 			free_mux_rx(r);
469 		}
470 	}
471 	spin_unlock_irqrestore(&rx->to_host_lock, flags);
472 }
473 
474 static int init_usb(struct mux_dev *mux_dev)
475 {
476 	struct mux_rx *r;
477 	struct rx_cxt *rx = &mux_dev->rx;
478 	int ret = 0;
479 	int i;
480 
481 	spin_lock_init(&mux_dev->write_lock);
482 	INIT_LIST_HEAD(&rx->to_host_list);
483 	INIT_LIST_HEAD(&rx->rx_submit_list);
484 	INIT_LIST_HEAD(&rx->rx_free_list);
485 	spin_lock_init(&rx->to_host_lock);
486 	spin_lock_init(&rx->submit_list_lock);
487 	spin_lock_init(&rx->free_list_lock);
488 
489 	for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
490 		r = alloc_mux_rx();
491 		if (!r) {
492 			ret = -ENOMEM;
493 			break;
494 		}
495 
496 		list_add(&r->free_list, &rx->rx_free_list);
497 	}
498 
499 	INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
500 
501 	return ret;
502 }
503 
504 static int gdm_mux_probe(struct usb_interface *intf,
505 			 const struct usb_device_id *id)
506 {
507 	struct mux_dev *mux_dev;
508 	struct tty_dev *tty_dev;
509 	u16 idVendor, idProduct;
510 	int bInterfaceNumber;
511 	int ret;
512 	int i;
513 	struct usb_device *usbdev = interface_to_usbdev(intf);
514 
515 	bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
516 
517 	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
518 	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
519 
520 	pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
521 
522 	if (bInterfaceNumber != 2)
523 		return -ENODEV;
524 
525 	mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
526 	if (!mux_dev)
527 		return -ENOMEM;
528 
529 	tty_dev = kzalloc(sizeof(*tty_dev), GFP_KERNEL);
530 	if (!tty_dev) {
531 		ret = -ENOMEM;
532 		goto err_free_mux;
533 	}
534 
535 	mux_dev->usbdev = usbdev;
536 	mux_dev->control_intf = intf;
537 
538 	ret = init_usb(mux_dev);
539 	if (ret)
540 		goto err_free_usb;
541 
542 	tty_dev->priv_dev = (void *)mux_dev;
543 	tty_dev->send_func = gdm_mux_send;
544 	tty_dev->recv_func = gdm_mux_recv;
545 	tty_dev->send_control = gdm_mux_send_control;
546 
547 	ret = register_lte_tty_device(tty_dev, &intf->dev);
548 	if (ret)
549 		goto err_unregister_tty;
550 
551 	for (i = 0; i < TTY_MAX_COUNT; i++)
552 		mux_dev->tty_dev = tty_dev;
553 
554 	mux_dev->intf = intf;
555 	mux_dev->usb_state = PM_NORMAL;
556 
557 	usb_get_dev(usbdev);
558 	usb_set_intfdata(intf, tty_dev);
559 
560 	return 0;
561 
562 err_unregister_tty:
563 	unregister_lte_tty_device(tty_dev);
564 err_free_usb:
565 	release_usb(mux_dev);
566 	kfree(tty_dev);
567 err_free_mux:
568 	kfree(mux_dev);
569 
570 	return ret;
571 }
572 
573 static void gdm_mux_disconnect(struct usb_interface *intf)
574 {
575 	struct tty_dev *tty_dev;
576 	struct mux_dev *mux_dev;
577 	struct usb_device *usbdev = interface_to_usbdev(intf);
578 
579 	tty_dev = usb_get_intfdata(intf);
580 
581 	mux_dev = tty_dev->priv_dev;
582 
583 	release_usb(mux_dev);
584 	unregister_lte_tty_device(tty_dev);
585 
586 	kfree(mux_dev);
587 	kfree(tty_dev);
588 
589 	usb_put_dev(usbdev);
590 }
591 
592 static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
593 {
594 	struct tty_dev *tty_dev;
595 	struct mux_dev *mux_dev;
596 	struct rx_cxt *rx;
597 	struct mux_rx *r, *r_next;
598 	unsigned long flags;
599 
600 	tty_dev = usb_get_intfdata(intf);
601 	mux_dev = tty_dev->priv_dev;
602 	rx = &mux_dev->rx;
603 
604 	cancel_work_sync(&mux_dev->work_rx.work);
605 
606 	if (mux_dev->usb_state != PM_NORMAL) {
607 		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
608 		return -1;
609 	}
610 
611 	mux_dev->usb_state = PM_SUSPEND;
612 
613 	spin_lock_irqsave(&rx->submit_list_lock, flags);
614 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
615 				 rx_submit_list) {
616 		spin_unlock_irqrestore(&rx->submit_list_lock, flags);
617 		usb_kill_urb(r->urb);
618 		spin_lock_irqsave(&rx->submit_list_lock, flags);
619 	}
620 	spin_unlock_irqrestore(&rx->submit_list_lock, flags);
621 
622 	return 0;
623 }
624 
625 static int gdm_mux_resume(struct usb_interface *intf)
626 {
627 	struct tty_dev *tty_dev;
628 	struct mux_dev *mux_dev;
629 	u8 i;
630 
631 	tty_dev = usb_get_intfdata(intf);
632 	mux_dev = tty_dev->priv_dev;
633 
634 	if (mux_dev->usb_state != PM_SUSPEND) {
635 		dev_err(intf->usb_dev, "usb resume - invalid state\n");
636 		return -1;
637 	}
638 
639 	mux_dev->usb_state = PM_NORMAL;
640 
641 	for (i = 0; i < MAX_ISSUE_NUM; i++)
642 		gdm_mux_recv(mux_dev, mux_dev->rx_cb);
643 
644 	return 0;
645 }
646 
647 static struct usb_driver gdm_mux_driver = {
648 	.name = "gdm_mux",
649 	.probe = gdm_mux_probe,
650 	.disconnect = gdm_mux_disconnect,
651 	.id_table = id_table,
652 	.supports_autosuspend = 1,
653 	.suspend = gdm_mux_suspend,
654 	.resume = gdm_mux_resume,
655 	.reset_resume = gdm_mux_resume,
656 };
657 
658 static int __init gdm_usb_mux_init(void)
659 {
660 	register_lte_tty_driver();
661 
662 	return usb_register(&gdm_mux_driver);
663 }
664 
665 static void __exit gdm_usb_mux_exit(void)
666 {
667 	unregister_lte_tty_driver();
668 
669 	usb_deregister(&gdm_mux_driver);
670 }
671 
672 module_init(gdm_usb_mux_init);
673 module_exit(gdm_usb_mux_exit);
674 
675 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
676 MODULE_LICENSE("GPL");
677