1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/usb.h>
9 #include <linux/sched.h>
10 #include <linux/kthread.h>
11 #include <linux/usb/cdc.h>
12 #include <linux/wait.h>
13 #include <linux/if_ether.h>
14 #include <linux/pm_runtime.h>
15 
16 #include "gdm_usb.h"
17 #include "gdm_lte.h"
18 #include "hci.h"
19 #include "hci_packet.h"
20 #include "gdm_endian.h"
21 
22 #define USB_DEVICE_CDC_DATA(vid, pid) \
23 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
24 		USB_DEVICE_ID_MATCH_INT_CLASS | \
25 		USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
26 	.idVendor = vid,\
27 	.idProduct = pid,\
28 	.bInterfaceClass = USB_CLASS_COMM,\
29 	.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
30 
31 #define USB_DEVICE_MASS_DATA(vid, pid) \
32 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
33 		USB_DEVICE_ID_MATCH_INT_INFO,\
34 	.idVendor = vid,\
35 	.idProduct = pid,\
36 	.bInterfaceSubClass = USB_SC_SCSI, \
37 	.bInterfaceClass = USB_CLASS_MASS_STORAGE,\
38 	.bInterfaceProtocol = USB_PR_BULK
39 
40 static const struct usb_device_id id_table[] = {
41 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
42 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
43 	{ }
44 };
45 
46 MODULE_DEVICE_TABLE(usb, id_table);
47 
48 static void do_tx(struct work_struct *work);
49 static void do_rx(struct work_struct *work);
50 
51 static int gdm_usb_recv(void *priv_dev,
52 			int (*cb)(void *cb_data,
53 				  void *data, int len, int context),
54 			void *cb_data,
55 			int context);
56 
request_mac_address(struct lte_udev * udev)57 static int request_mac_address(struct lte_udev *udev)
58 {
59 	struct hci_packet *hci;
60 	struct usb_device *usbdev = udev->usbdev;
61 	int actual;
62 	int ret = -1;
63 
64 	hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
65 	if (!hci)
66 		return -ENOMEM;
67 
68 	hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
69 	hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
70 	hci->data[0] = MAC_ADDRESS;
71 
72 	ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
73 			   &actual, 1000);
74 
75 	udev->request_mac_addr = 1;
76 	kfree(hci);
77 
78 	return ret;
79 }
80 
alloc_tx_struct(int len)81 static struct usb_tx *alloc_tx_struct(int len)
82 {
83 	struct usb_tx *t = NULL;
84 	int ret = 0;
85 
86 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
87 	if (!t) {
88 		ret = -ENOMEM;
89 		goto out;
90 	}
91 
92 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
93 	if (!(len % 512))
94 		len++;
95 
96 	t->buf = kmalloc(len, GFP_ATOMIC);
97 	if (!t->urb || !t->buf) {
98 		ret = -ENOMEM;
99 		goto out;
100 	}
101 
102 out:
103 	if (ret < 0) {
104 		if (t) {
105 			usb_free_urb(t->urb);
106 			kfree(t->buf);
107 			kfree(t);
108 		}
109 		return NULL;
110 	}
111 
112 	return t;
113 }
114 
alloc_tx_sdu_struct(void)115 static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
116 {
117 	struct usb_tx_sdu *t_sdu;
118 
119 	t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
120 	if (!t_sdu)
121 		return NULL;
122 
123 	t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
124 	if (!t_sdu->buf) {
125 		kfree(t_sdu);
126 		return NULL;
127 	}
128 
129 	return t_sdu;
130 }
131 
free_tx_struct(struct usb_tx * t)132 static void free_tx_struct(struct usb_tx *t)
133 {
134 	if (t) {
135 		usb_free_urb(t->urb);
136 		kfree(t->buf);
137 		kfree(t);
138 	}
139 }
140 
free_tx_sdu_struct(struct usb_tx_sdu * t_sdu)141 static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
142 {
143 	if (t_sdu) {
144 		kfree(t_sdu->buf);
145 		kfree(t_sdu);
146 	}
147 }
148 
get_tx_sdu_struct(struct tx_cxt * tx,int * no_spc)149 static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
150 {
151 	struct usb_tx_sdu *t_sdu;
152 
153 	if (list_empty(&tx->free_list))
154 		return NULL;
155 
156 	t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
157 	list_del(&t_sdu->list);
158 
159 	tx->avail_count--;
160 
161 	*no_spc = list_empty(&tx->free_list) ? 1 : 0;
162 
163 	return t_sdu;
164 }
165 
put_tx_struct(struct tx_cxt * tx,struct usb_tx_sdu * t_sdu)166 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
167 {
168 	list_add_tail(&t_sdu->list, &tx->free_list);
169 	tx->avail_count++;
170 }
171 
alloc_rx_struct(void)172 static struct usb_rx *alloc_rx_struct(void)
173 {
174 	struct usb_rx *r = NULL;
175 	int ret = 0;
176 
177 	r = kmalloc(sizeof(*r), GFP_KERNEL);
178 	if (!r) {
179 		ret = -ENOMEM;
180 		goto out;
181 	}
182 
183 	r->urb = usb_alloc_urb(0, GFP_KERNEL);
184 	r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
185 	if (!r->urb || !r->buf) {
186 		ret = -ENOMEM;
187 		goto out;
188 	}
189 out:
190 
191 	if (ret < 0) {
192 		if (r) {
193 			usb_free_urb(r->urb);
194 			kfree(r->buf);
195 			kfree(r);
196 		}
197 		return NULL;
198 	}
199 
200 	return r;
201 }
202 
free_rx_struct(struct usb_rx * r)203 static void free_rx_struct(struct usb_rx *r)
204 {
205 	if (r) {
206 		usb_free_urb(r->urb);
207 		kfree(r->buf);
208 		kfree(r);
209 	}
210 }
211 
get_rx_struct(struct rx_cxt * rx,int * no_spc)212 static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
213 {
214 	struct usb_rx *r;
215 	unsigned long flags;
216 
217 	spin_lock_irqsave(&rx->rx_lock, flags);
218 
219 	if (list_empty(&rx->free_list)) {
220 		spin_unlock_irqrestore(&rx->rx_lock, flags);
221 		return NULL;
222 	}
223 
224 	r = list_entry(rx->free_list.next, struct usb_rx, free_list);
225 	list_del(&r->free_list);
226 
227 	rx->avail_count--;
228 
229 	*no_spc = list_empty(&rx->free_list) ? 1 : 0;
230 
231 	spin_unlock_irqrestore(&rx->rx_lock, flags);
232 
233 	return r;
234 }
235 
put_rx_struct(struct rx_cxt * rx,struct usb_rx * r)236 static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
237 {
238 	unsigned long flags;
239 
240 	spin_lock_irqsave(&rx->rx_lock, flags);
241 
242 	list_add_tail(&r->free_list, &rx->free_list);
243 	rx->avail_count++;
244 
245 	spin_unlock_irqrestore(&rx->rx_lock, flags);
246 }
247 
release_usb(struct lte_udev * udev)248 static void release_usb(struct lte_udev *udev)
249 {
250 	struct rx_cxt	*rx = &udev->rx;
251 	struct tx_cxt	*tx = &udev->tx;
252 	struct usb_tx	*t, *t_next;
253 	struct usb_rx	*r, *r_next;
254 	struct usb_tx_sdu	*t_sdu, *t_sdu_next;
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&tx->lock, flags);
258 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
259 		list_del(&t_sdu->list);
260 		free_tx_sdu_struct(t_sdu);
261 	}
262 
263 	list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
264 		list_del(&t->list);
265 		free_tx_struct(t);
266 	}
267 
268 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
269 		list_del(&t_sdu->list);
270 		free_tx_sdu_struct(t_sdu);
271 	}
272 	spin_unlock_irqrestore(&tx->lock, flags);
273 
274 	spin_lock_irqsave(&rx->submit_lock, flags);
275 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
276 				 rx_submit_list) {
277 		spin_unlock_irqrestore(&rx->submit_lock, flags);
278 		usb_kill_urb(r->urb);
279 		spin_lock_irqsave(&rx->submit_lock, flags);
280 	}
281 	spin_unlock_irqrestore(&rx->submit_lock, flags);
282 
283 	spin_lock_irqsave(&rx->rx_lock, flags);
284 	list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
285 		list_del(&r->free_list);
286 		free_rx_struct(r);
287 	}
288 	spin_unlock_irqrestore(&rx->rx_lock, flags);
289 
290 	spin_lock_irqsave(&rx->to_host_lock, flags);
291 	list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
292 		if (r->index == (void *)udev) {
293 			list_del(&r->to_host_list);
294 			free_rx_struct(r);
295 		}
296 	}
297 	spin_unlock_irqrestore(&rx->to_host_lock, flags);
298 }
299 
init_usb(struct lte_udev * udev)300 static int init_usb(struct lte_udev *udev)
301 {
302 	int ret = 0;
303 	int i;
304 	struct tx_cxt *tx = &udev->tx;
305 	struct rx_cxt *rx = &udev->rx;
306 	struct usb_tx_sdu *t_sdu = NULL;
307 	struct usb_rx *r = NULL;
308 
309 	udev->send_complete = 1;
310 	udev->tx_stop = 0;
311 	udev->request_mac_addr = 0;
312 	udev->usb_state = PM_NORMAL;
313 
314 	INIT_LIST_HEAD(&tx->sdu_list);
315 	INIT_LIST_HEAD(&tx->hci_list);
316 	INIT_LIST_HEAD(&tx->free_list);
317 	INIT_LIST_HEAD(&rx->rx_submit_list);
318 	INIT_LIST_HEAD(&rx->free_list);
319 	INIT_LIST_HEAD(&rx->to_host_list);
320 	spin_lock_init(&tx->lock);
321 	spin_lock_init(&rx->rx_lock);
322 	spin_lock_init(&rx->submit_lock);
323 	spin_lock_init(&rx->to_host_lock);
324 
325 	tx->avail_count = 0;
326 	rx->avail_count = 0;
327 
328 	udev->rx_cb = NULL;
329 
330 	for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
331 		t_sdu = alloc_tx_sdu_struct();
332 		if (!t_sdu) {
333 			ret = -ENOMEM;
334 			goto fail;
335 		}
336 
337 		list_add(&t_sdu->list, &tx->free_list);
338 		tx->avail_count++;
339 	}
340 
341 	for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
342 		r = alloc_rx_struct();
343 		if (!r) {
344 			ret = -ENOMEM;
345 			goto fail;
346 		}
347 
348 		list_add(&r->free_list, &rx->free_list);
349 		rx->avail_count++;
350 	}
351 	INIT_DELAYED_WORK(&udev->work_tx, do_tx);
352 	INIT_DELAYED_WORK(&udev->work_rx, do_rx);
353 	return 0;
354 fail:
355 	release_usb(udev);
356 	return ret;
357 }
358 
set_mac_address(u8 * data,void * arg)359 static int set_mac_address(u8 *data, void *arg)
360 {
361 	struct phy_dev *phy_dev = arg;
362 	struct lte_udev *udev = phy_dev->priv_dev;
363 	struct tlv *tlv = (struct tlv *)data;
364 	u8 mac_address[ETH_ALEN] = {0, };
365 
366 	if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
367 		memcpy(mac_address, tlv->data, tlv->len);
368 
369 		if (register_lte_device(phy_dev,
370 					&udev->intf->dev, mac_address) < 0)
371 			pr_err("register lte device failed\n");
372 
373 		udev->request_mac_addr = 0;
374 
375 		return 1;
376 	}
377 
378 	return 0;
379 }
380 
do_rx(struct work_struct * work)381 static void do_rx(struct work_struct *work)
382 {
383 	struct lte_udev *udev =
384 		container_of(work, struct lte_udev, work_rx.work);
385 	struct rx_cxt *rx = &udev->rx;
386 	struct usb_rx *r;
387 	struct hci_packet *hci;
388 	struct phy_dev *phy_dev;
389 	u16 cmd_evt;
390 	int ret;
391 	unsigned long flags;
392 
393 	while (1) {
394 		spin_lock_irqsave(&rx->to_host_lock, flags);
395 		if (list_empty(&rx->to_host_list)) {
396 			spin_unlock_irqrestore(&rx->to_host_lock, flags);
397 			break;
398 		}
399 		r = list_entry(rx->to_host_list.next,
400 			       struct usb_rx, to_host_list);
401 		list_del(&r->to_host_list);
402 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
403 
404 		phy_dev = r->cb_data;
405 		udev = phy_dev->priv_dev;
406 		hci = (struct hci_packet *)r->buf;
407 		cmd_evt = gdm_dev16_to_cpu(udev->gdm_ed, hci->cmd_evt);
408 
409 		switch (cmd_evt) {
410 		case LTE_GET_INFORMATION_RESULT:
411 			if (set_mac_address(hci->data, r->cb_data) == 0) {
412 				r->callback(r->cb_data,
413 					    r->buf,
414 					    r->urb->actual_length,
415 					    KERNEL_THREAD);
416 			}
417 			break;
418 
419 		default:
420 			if (r->callback) {
421 				ret = r->callback(r->cb_data,
422 						  r->buf,
423 						  r->urb->actual_length,
424 						  KERNEL_THREAD);
425 
426 				if (ret == -EAGAIN)
427 					pr_err("failed to send received data\n");
428 			}
429 			break;
430 		}
431 
432 		put_rx_struct(rx, r);
433 
434 		gdm_usb_recv(udev,
435 			     r->callback,
436 			     r->cb_data,
437 			     USB_COMPLETE);
438 	}
439 }
440 
remove_rx_submit_list(struct usb_rx * r,struct rx_cxt * rx)441 static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
442 {
443 	unsigned long flags;
444 	struct usb_rx	*r_remove, *r_remove_next;
445 
446 	spin_lock_irqsave(&rx->submit_lock, flags);
447 	list_for_each_entry_safe(r_remove, r_remove_next,
448 				 &rx->rx_submit_list, rx_submit_list) {
449 		if (r == r_remove) {
450 			list_del(&r->rx_submit_list);
451 			break;
452 		}
453 	}
454 	spin_unlock_irqrestore(&rx->submit_lock, flags);
455 }
456 
gdm_usb_rcv_complete(struct urb * urb)457 static void gdm_usb_rcv_complete(struct urb *urb)
458 {
459 	struct usb_rx *r = urb->context;
460 	struct rx_cxt *rx = r->rx;
461 	unsigned long flags;
462 	struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
463 	struct usb_device *usbdev = udev->usbdev;
464 
465 	remove_rx_submit_list(r, rx);
466 
467 	if (!urb->status && r->callback) {
468 		spin_lock_irqsave(&rx->to_host_lock, flags);
469 		list_add_tail(&r->to_host_list, &rx->to_host_list);
470 		schedule_work(&udev->work_rx.work);
471 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
472 	} else {
473 		if (urb->status && udev->usb_state == PM_NORMAL)
474 			dev_err(&urb->dev->dev, "%s: urb status error %d\n",
475 				__func__, urb->status);
476 
477 		put_rx_struct(rx, r);
478 	}
479 
480 	usb_mark_last_busy(usbdev);
481 }
482 
gdm_usb_recv(void * priv_dev,int (* cb)(void * cb_data,void * data,int len,int context),void * cb_data,int context)483 static int gdm_usb_recv(void *priv_dev,
484 			int (*cb)(void *cb_data,
485 				  void *data, int len, int context),
486 			void *cb_data,
487 			int context)
488 {
489 	struct lte_udev *udev = priv_dev;
490 	struct usb_device *usbdev = udev->usbdev;
491 	struct rx_cxt *rx = &udev->rx;
492 	struct usb_rx *r;
493 	int no_spc;
494 	int ret;
495 	unsigned long flags;
496 
497 	if (!udev->usbdev) {
498 		pr_err("invalid device\n");
499 		return -ENODEV;
500 	}
501 
502 	r = get_rx_struct(rx, &no_spc);
503 	if (!r) {
504 		pr_err("Out of Memory\n");
505 		return -ENOMEM;
506 	}
507 
508 	udev->rx_cb = cb;
509 	r->callback = cb;
510 	r->cb_data = cb_data;
511 	r->index = (void *)udev;
512 	r->rx = rx;
513 
514 	usb_fill_bulk_urb(r->urb,
515 			  usbdev,
516 			  usb_rcvbulkpipe(usbdev, 0x83),
517 			  r->buf,
518 			  RX_BUF_SIZE,
519 			  gdm_usb_rcv_complete,
520 			  r);
521 
522 	spin_lock_irqsave(&rx->submit_lock, flags);
523 	list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
524 	spin_unlock_irqrestore(&rx->submit_lock, flags);
525 
526 	if (context == KERNEL_THREAD)
527 		ret = usb_submit_urb(r->urb, GFP_KERNEL);
528 	else
529 		ret = usb_submit_urb(r->urb, GFP_ATOMIC);
530 
531 	if (ret) {
532 		spin_lock_irqsave(&rx->submit_lock, flags);
533 		list_del(&r->rx_submit_list);
534 		spin_unlock_irqrestore(&rx->submit_lock, flags);
535 
536 		pr_err("usb_submit_urb failed (%p)\n", r);
537 		put_rx_struct(rx, r);
538 	}
539 
540 	return ret;
541 }
542 
gdm_usb_send_complete(struct urb * urb)543 static void gdm_usb_send_complete(struct urb *urb)
544 {
545 	struct usb_tx *t = urb->context;
546 	struct tx_cxt *tx = t->tx;
547 	struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
548 	unsigned long flags;
549 
550 	if (urb->status == -ECONNRESET) {
551 		dev_info(&urb->dev->dev, "CONNRESET\n");
552 		return;
553 	}
554 
555 	if (t->callback)
556 		t->callback(t->cb_data);
557 
558 	free_tx_struct(t);
559 
560 	spin_lock_irqsave(&tx->lock, flags);
561 	udev->send_complete = 1;
562 	schedule_work(&udev->work_tx.work);
563 	spin_unlock_irqrestore(&tx->lock, flags);
564 }
565 
send_tx_packet(struct usb_device * usbdev,struct usb_tx * t,u32 len)566 static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
567 {
568 	int ret = 0;
569 
570 	if (!(len % 512))
571 		len++;
572 
573 	usb_fill_bulk_urb(t->urb,
574 			  usbdev,
575 			  usb_sndbulkpipe(usbdev, 2),
576 			  t->buf,
577 			  len,
578 			  gdm_usb_send_complete,
579 			  t);
580 
581 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
582 
583 	if (ret)
584 		dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
585 			ret);
586 
587 	usb_mark_last_busy(usbdev);
588 
589 	return ret;
590 }
591 
packet_aggregation(struct lte_udev * udev,u8 * send_buf)592 static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
593 {
594 	struct tx_cxt *tx = &udev->tx;
595 	struct usb_tx_sdu *t_sdu = NULL;
596 	struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
597 	u16 send_len = 0;
598 	u16 num_packet = 0;
599 	unsigned long flags;
600 
601 	multi_sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_MULTI_SDU);
602 
603 	while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
604 		spin_lock_irqsave(&tx->lock, flags);
605 		if (list_empty(&tx->sdu_list)) {
606 			spin_unlock_irqrestore(&tx->lock, flags);
607 			break;
608 		}
609 
610 		t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
611 		if (send_len + t_sdu->len > MAX_SDU_SIZE) {
612 			spin_unlock_irqrestore(&tx->lock, flags);
613 			break;
614 		}
615 
616 		list_del(&t_sdu->list);
617 		spin_unlock_irqrestore(&tx->lock, flags);
618 
619 		memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
620 
621 		send_len += (t_sdu->len + 3) & 0xfffc;
622 		num_packet++;
623 
624 		if (tx->avail_count > 10)
625 			t_sdu->callback(t_sdu->cb_data);
626 
627 		spin_lock_irqsave(&tx->lock, flags);
628 		put_tx_struct(tx, t_sdu);
629 		spin_unlock_irqrestore(&tx->lock, flags);
630 	}
631 
632 	multi_sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
633 	multi_sdu->num_packet = gdm_cpu_to_dev16(udev->gdm_ed, num_packet);
634 
635 	return send_len + offsetof(struct multi_sdu, data);
636 }
637 
do_tx(struct work_struct * work)638 static void do_tx(struct work_struct *work)
639 {
640 	struct lte_udev *udev =
641 		container_of(work, struct lte_udev, work_tx.work);
642 	struct usb_device *usbdev = udev->usbdev;
643 	struct tx_cxt *tx = &udev->tx;
644 	struct usb_tx *t = NULL;
645 	int is_send = 0;
646 	u32 len = 0;
647 	unsigned long flags;
648 
649 	if (!usb_autopm_get_interface(udev->intf))
650 		usb_autopm_put_interface(udev->intf);
651 
652 	if (udev->usb_state == PM_SUSPEND)
653 		return;
654 
655 	spin_lock_irqsave(&tx->lock, flags);
656 	if (!udev->send_complete) {
657 		spin_unlock_irqrestore(&tx->lock, flags);
658 		return;
659 	}
660 	udev->send_complete = 0;
661 
662 	if (!list_empty(&tx->hci_list)) {
663 		t = list_entry(tx->hci_list.next, struct usb_tx, list);
664 		list_del(&t->list);
665 		len = t->len;
666 		t->is_sdu = 0;
667 		is_send = 1;
668 	} else if (!list_empty(&tx->sdu_list)) {
669 		if (udev->tx_stop) {
670 			udev->send_complete = 1;
671 			spin_unlock_irqrestore(&tx->lock, flags);
672 			return;
673 		}
674 
675 		t = alloc_tx_struct(TX_BUF_SIZE);
676 		if (!t) {
677 			spin_unlock_irqrestore(&tx->lock, flags);
678 			return;
679 		}
680 		t->callback = NULL;
681 		t->tx = tx;
682 		t->is_sdu = 1;
683 		is_send = 1;
684 	}
685 
686 	if (!is_send) {
687 		udev->send_complete = 1;
688 		spin_unlock_irqrestore(&tx->lock, flags);
689 		return;
690 	}
691 	spin_unlock_irqrestore(&tx->lock, flags);
692 
693 	if (t->is_sdu)
694 		len = packet_aggregation(udev, t->buf);
695 
696 	if (send_tx_packet(usbdev, t, len)) {
697 		pr_err("send_tx_packet failed\n");
698 		t->callback = NULL;
699 		gdm_usb_send_complete(t->urb);
700 	}
701 }
702 
703 #define SDU_PARAM_LEN 12
gdm_usb_sdu_send(void * priv_dev,void * data,int len,unsigned int dft_eps_ID,unsigned int eps_ID,void (* cb)(void * data),void * cb_data,int dev_idx,int nic_type)704 static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
705 			    unsigned int dft_eps_ID, unsigned int eps_ID,
706 			    void (*cb)(void *data), void *cb_data,
707 			    int dev_idx, int nic_type)
708 {
709 	struct lte_udev *udev = priv_dev;
710 	struct tx_cxt *tx = &udev->tx;
711 	struct usb_tx_sdu *t_sdu;
712 	struct sdu *sdu = NULL;
713 	unsigned long flags;
714 	int no_spc = 0;
715 	u16 send_len;
716 
717 	if (!udev->usbdev) {
718 		pr_err("sdu send - invalid device\n");
719 		return TX_NO_DEV;
720 	}
721 
722 	spin_lock_irqsave(&tx->lock, flags);
723 	t_sdu = get_tx_sdu_struct(tx, &no_spc);
724 	spin_unlock_irqrestore(&tx->lock, flags);
725 
726 	if (!t_sdu) {
727 		pr_err("sdu send - free list empty\n");
728 		return TX_NO_SPC;
729 	}
730 
731 	sdu = (struct sdu *)t_sdu->buf;
732 	sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_SDU);
733 	if (nic_type == NIC_TYPE_ARP) {
734 		send_len = len + SDU_PARAM_LEN;
735 		memcpy(sdu->data, data, len);
736 	} else {
737 		send_len = len - ETH_HLEN;
738 		send_len += SDU_PARAM_LEN;
739 		memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
740 	}
741 
742 	sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
743 	sdu->dft_eps_ID = gdm_cpu_to_dev32(udev->gdm_ed, dft_eps_ID);
744 	sdu->bearer_ID = gdm_cpu_to_dev32(udev->gdm_ed, eps_ID);
745 	sdu->nic_type = gdm_cpu_to_dev32(udev->gdm_ed, nic_type);
746 
747 	t_sdu->len = send_len + HCI_HEADER_SIZE;
748 	t_sdu->callback = cb;
749 	t_sdu->cb_data = cb_data;
750 
751 	spin_lock_irqsave(&tx->lock, flags);
752 	list_add_tail(&t_sdu->list, &tx->sdu_list);
753 	schedule_work(&udev->work_tx.work);
754 	spin_unlock_irqrestore(&tx->lock, flags);
755 
756 	if (no_spc)
757 		return TX_NO_BUFFER;
758 
759 	return 0;
760 }
761 
gdm_usb_hci_send(void * priv_dev,void * data,int len,void (* cb)(void * data),void * cb_data)762 static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
763 			    void (*cb)(void *data), void *cb_data)
764 {
765 	struct lte_udev *udev = priv_dev;
766 	struct tx_cxt *tx = &udev->tx;
767 	struct usb_tx *t;
768 	unsigned long flags;
769 
770 	if (!udev->usbdev) {
771 		pr_err("hci send - invalid device\n");
772 		return -ENODEV;
773 	}
774 
775 	t = alloc_tx_struct(len);
776 	if (!t) {
777 		pr_err("hci_send - out of memory\n");
778 		return -ENOMEM;
779 	}
780 
781 	memcpy(t->buf, data, len);
782 	t->callback = cb;
783 	t->cb_data = cb_data;
784 	t->len = len;
785 	t->tx = tx;
786 	t->is_sdu = 0;
787 
788 	spin_lock_irqsave(&tx->lock, flags);
789 	list_add_tail(&t->list, &tx->hci_list);
790 	schedule_work(&udev->work_tx.work);
791 	spin_unlock_irqrestore(&tx->lock, flags);
792 
793 	return 0;
794 }
795 
gdm_usb_get_endian(void * priv_dev)796 static u8 gdm_usb_get_endian(void *priv_dev)
797 {
798 	struct lte_udev *udev = priv_dev;
799 
800 	return udev->gdm_ed;
801 }
802 
gdm_usb_probe(struct usb_interface * intf,const struct usb_device_id * id)803 static int gdm_usb_probe(struct usb_interface *intf,
804 			 const struct usb_device_id *id)
805 {
806 	int ret = 0;
807 	struct phy_dev *phy_dev = NULL;
808 	struct lte_udev *udev = NULL;
809 	u16 idVendor, idProduct;
810 	int bInterfaceNumber;
811 	struct usb_device *usbdev = interface_to_usbdev(intf);
812 
813 	bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
814 	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
815 	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
816 
817 	pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
818 
819 	if (bInterfaceNumber > NETWORK_INTERFACE) {
820 		pr_info("not a network device\n");
821 		return -ENODEV;
822 	}
823 
824 	phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
825 	if (!phy_dev)
826 		return -ENOMEM;
827 
828 	udev = kzalloc(sizeof(*udev), GFP_KERNEL);
829 	if (!udev) {
830 		ret = -ENOMEM;
831 		goto err_udev;
832 	}
833 
834 	phy_dev->priv_dev = (void *)udev;
835 	phy_dev->send_hci_func = gdm_usb_hci_send;
836 	phy_dev->send_sdu_func = gdm_usb_sdu_send;
837 	phy_dev->rcv_func = gdm_usb_recv;
838 	phy_dev->get_endian = gdm_usb_get_endian;
839 
840 	udev->usbdev = usbdev;
841 	ret = init_usb(udev);
842 	if (ret < 0) {
843 		dev_err(intf->usb_dev, "init_usb func failed\n");
844 		goto err_init_usb;
845 	}
846 	udev->intf = intf;
847 
848 	intf->needs_remote_wakeup = 1;
849 	usb_enable_autosuspend(usbdev);
850 	pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
851 
852 	/* List up hosts with big endians, otherwise,
853 	 * defaults to little endian
854 	 */
855 	if (idProduct == PID_GDM7243)
856 		udev->gdm_ed = ENDIANNESS_BIG;
857 	else
858 		udev->gdm_ed = ENDIANNESS_LITTLE;
859 
860 	ret = request_mac_address(udev);
861 	if (ret < 0) {
862 		dev_err(intf->usb_dev, "request Mac address failed\n");
863 		goto err_mac_address;
864 	}
865 
866 	start_rx_proc(phy_dev);
867 	usb_get_dev(usbdev);
868 	usb_set_intfdata(intf, phy_dev);
869 
870 	return 0;
871 
872 err_mac_address:
873 	release_usb(udev);
874 err_init_usb:
875 	kfree(udev);
876 err_udev:
877 	kfree(phy_dev);
878 
879 	return ret;
880 }
881 
gdm_usb_disconnect(struct usb_interface * intf)882 static void gdm_usb_disconnect(struct usb_interface *intf)
883 {
884 	struct phy_dev *phy_dev;
885 	struct lte_udev *udev;
886 	struct usb_device *usbdev;
887 
888 	usbdev = interface_to_usbdev(intf);
889 	phy_dev = usb_get_intfdata(intf);
890 
891 	udev = phy_dev->priv_dev;
892 	unregister_lte_device(phy_dev);
893 
894 	release_usb(udev);
895 
896 	kfree(udev);
897 	udev = NULL;
898 
899 	kfree(phy_dev);
900 	phy_dev = NULL;
901 
902 	usb_put_dev(usbdev);
903 }
904 
gdm_usb_suspend(struct usb_interface * intf,pm_message_t pm_msg)905 static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
906 {
907 	struct phy_dev *phy_dev;
908 	struct lte_udev *udev;
909 	struct rx_cxt *rx;
910 	struct usb_rx *r;
911 	struct usb_rx *r_next;
912 	unsigned long flags;
913 
914 	phy_dev = usb_get_intfdata(intf);
915 	udev = phy_dev->priv_dev;
916 	rx = &udev->rx;
917 	if (udev->usb_state != PM_NORMAL) {
918 		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
919 		return -1;
920 	}
921 
922 	udev->usb_state = PM_SUSPEND;
923 
924 	spin_lock_irqsave(&rx->submit_lock, flags);
925 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
926 				 rx_submit_list) {
927 		spin_unlock_irqrestore(&rx->submit_lock, flags);
928 		usb_kill_urb(r->urb);
929 		spin_lock_irqsave(&rx->submit_lock, flags);
930 	}
931 	spin_unlock_irqrestore(&rx->submit_lock, flags);
932 
933 	cancel_work_sync(&udev->work_tx.work);
934 	cancel_work_sync(&udev->work_rx.work);
935 
936 	return 0;
937 }
938 
gdm_usb_resume(struct usb_interface * intf)939 static int gdm_usb_resume(struct usb_interface *intf)
940 {
941 	struct phy_dev *phy_dev;
942 	struct lte_udev *udev;
943 	struct tx_cxt *tx;
944 	struct rx_cxt *rx;
945 	unsigned long flags;
946 	int issue_count;
947 	int i;
948 
949 	phy_dev = usb_get_intfdata(intf);
950 	udev = phy_dev->priv_dev;
951 	rx = &udev->rx;
952 
953 	if (udev->usb_state != PM_SUSPEND) {
954 		dev_err(intf->usb_dev, "usb resume - invalid state\n");
955 		return -1;
956 	}
957 	udev->usb_state = PM_NORMAL;
958 
959 	spin_lock_irqsave(&rx->rx_lock, flags);
960 	issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
961 	spin_unlock_irqrestore(&rx->rx_lock, flags);
962 
963 	if (issue_count >= 0) {
964 		for (i = 0; i < issue_count; i++)
965 			gdm_usb_recv(phy_dev->priv_dev,
966 				     udev->rx_cb,
967 				     phy_dev,
968 				     USB_COMPLETE);
969 	}
970 
971 	tx = &udev->tx;
972 	spin_lock_irqsave(&tx->lock, flags);
973 	schedule_work(&udev->work_tx.work);
974 	spin_unlock_irqrestore(&tx->lock, flags);
975 
976 	return 0;
977 }
978 
979 static struct usb_driver gdm_usb_lte_driver = {
980 	.name = "gdm_lte",
981 	.probe = gdm_usb_probe,
982 	.disconnect = gdm_usb_disconnect,
983 	.id_table = id_table,
984 	.supports_autosuspend = 1,
985 	.suspend = gdm_usb_suspend,
986 	.resume = gdm_usb_resume,
987 	.reset_resume = gdm_usb_resume,
988 };
989 
gdm_usb_lte_init(void)990 static int __init gdm_usb_lte_init(void)
991 {
992 	if (gdm_lte_event_init() < 0) {
993 		pr_err("error creating event\n");
994 		return -1;
995 	}
996 
997 	return usb_register(&gdm_usb_lte_driver);
998 }
999 
gdm_usb_lte_exit(void)1000 static void __exit gdm_usb_lte_exit(void)
1001 {
1002 	gdm_lte_event_exit();
1003 
1004 	usb_deregister(&gdm_usb_lte_driver);
1005 }
1006 
1007 module_init(gdm_usb_lte_init);
1008 module_exit(gdm_usb_lte_exit);
1009 
1010 MODULE_VERSION(DRIVER_VERSION);
1011 MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1012 MODULE_LICENSE("GPL");
1013