1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
6 * Krzysztof Opasiak <k.opasiak@samsung.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/platform_device.h>
13 #include <linux/usb.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/usb/hcd.h>
16 #include <linux/kthread.h>
17 #include <linux/file.h>
18 #include <linux/byteorder/generic.h>
19
20 #include "usbip_common.h"
21 #include "vudc.h"
22
23 #define VIRTUAL_ENDPOINTS (1 /* ep0 */ + 15 /* in eps */ + 15 /* out eps */)
24
25 /* urb-related structures alloc / free */
26
27
free_urb(struct urb * urb)28 static void free_urb(struct urb *urb)
29 {
30 if (!urb)
31 return;
32
33 kfree(urb->setup_packet);
34 urb->setup_packet = NULL;
35
36 kfree(urb->transfer_buffer);
37 urb->transfer_buffer = NULL;
38
39 usb_free_urb(urb);
40 }
41
alloc_urbp(void)42 struct urbp *alloc_urbp(void)
43 {
44 struct urbp *urb_p;
45
46 urb_p = kzalloc(sizeof(*urb_p), GFP_KERNEL);
47 if (!urb_p)
48 return urb_p;
49
50 urb_p->urb = NULL;
51 urb_p->ep = NULL;
52 INIT_LIST_HEAD(&urb_p->urb_entry);
53 return urb_p;
54 }
55
free_urbp(struct urbp * urb_p)56 static void free_urbp(struct urbp *urb_p)
57 {
58 kfree(urb_p);
59 }
60
free_urbp_and_urb(struct urbp * urb_p)61 void free_urbp_and_urb(struct urbp *urb_p)
62 {
63 if (!urb_p)
64 return;
65 free_urb(urb_p->urb);
66 free_urbp(urb_p);
67 }
68
69
70 /* utilities ; almost verbatim from dummy_hcd.c */
71
72 /* called with spinlock held */
nuke(struct vudc * udc,struct vep * ep)73 static void nuke(struct vudc *udc, struct vep *ep)
74 {
75 struct vrequest *req;
76
77 while (!list_empty(&ep->req_queue)) {
78 req = list_first_entry(&ep->req_queue, struct vrequest,
79 req_entry);
80 list_del_init(&req->req_entry);
81 req->req.status = -ESHUTDOWN;
82
83 spin_unlock(&udc->lock);
84 usb_gadget_giveback_request(&ep->ep, &req->req);
85 spin_lock(&udc->lock);
86 }
87 }
88
89 /* caller must hold lock */
stop_activity(struct vudc * udc)90 static void stop_activity(struct vudc *udc)
91 {
92 int i;
93 struct urbp *urb_p, *tmp;
94
95 udc->address = 0;
96
97 for (i = 0; i < VIRTUAL_ENDPOINTS; i++)
98 nuke(udc, &udc->ep[i]);
99
100 list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
101 list_del(&urb_p->urb_entry);
102 free_urbp_and_urb(urb_p);
103 }
104 }
105
vudc_find_endpoint(struct vudc * udc,u8 address)106 struct vep *vudc_find_endpoint(struct vudc *udc, u8 address)
107 {
108 int i;
109
110 if ((address & ~USB_DIR_IN) == 0)
111 return &udc->ep[0];
112
113 for (i = 1; i < VIRTUAL_ENDPOINTS; i++) {
114 struct vep *ep = &udc->ep[i];
115
116 if (!ep->desc)
117 continue;
118 if (ep->desc->bEndpointAddress == address)
119 return ep;
120 }
121 return NULL;
122 }
123
124 /* gadget ops */
125
vgadget_get_frame(struct usb_gadget * _gadget)126 static int vgadget_get_frame(struct usb_gadget *_gadget)
127 {
128 struct timespec64 now;
129 struct vudc *udc = usb_gadget_to_vudc(_gadget);
130
131 ktime_get_ts64(&now);
132 return ((now.tv_sec - udc->start_time.tv_sec) * 1000 +
133 (now.tv_nsec - udc->start_time.tv_nsec) / NSEC_PER_MSEC)
134 & 0x7FF;
135 }
136
vgadget_set_selfpowered(struct usb_gadget * _gadget,int value)137 static int vgadget_set_selfpowered(struct usb_gadget *_gadget, int value)
138 {
139 struct vudc *udc = usb_gadget_to_vudc(_gadget);
140
141 if (value)
142 udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
143 else
144 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
145 return 0;
146 }
147
vgadget_pullup(struct usb_gadget * _gadget,int value)148 static int vgadget_pullup(struct usb_gadget *_gadget, int value)
149 {
150 struct vudc *udc = usb_gadget_to_vudc(_gadget);
151 unsigned long flags;
152 int ret;
153
154
155 spin_lock_irqsave(&udc->lock, flags);
156 value = !!value;
157 if (value == udc->pullup)
158 goto unlock;
159
160 udc->pullup = value;
161 if (value) {
162 udc->gadget.speed = min_t(u8, USB_SPEED_HIGH,
163 udc->driver->max_speed);
164 udc->ep[0].ep.maxpacket = 64;
165 /*
166 * This is the first place where we can ask our
167 * gadget driver for descriptors.
168 */
169 ret = get_gadget_descs(udc);
170 if (ret) {
171 dev_err(&udc->gadget.dev, "Unable go get desc: %d", ret);
172 goto unlock;
173 }
174
175 spin_unlock_irqrestore(&udc->lock, flags);
176 usbip_start_eh(&udc->ud);
177 } else {
178 /* Invalidate descriptors */
179 udc->desc_cached = 0;
180
181 spin_unlock_irqrestore(&udc->lock, flags);
182 usbip_event_add(&udc->ud, VUDC_EVENT_REMOVED);
183 usbip_stop_eh(&udc->ud); /* Wait for eh completion */
184 }
185
186 return 0;
187
188 unlock:
189 spin_unlock_irqrestore(&udc->lock, flags);
190 return 0;
191 }
192
vgadget_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)193 static int vgadget_udc_start(struct usb_gadget *g,
194 struct usb_gadget_driver *driver)
195 {
196 struct vudc *udc = usb_gadget_to_vudc(g);
197 unsigned long flags;
198
199 spin_lock_irqsave(&udc->lock, flags);
200 udc->driver = driver;
201 udc->pullup = udc->connected = udc->desc_cached = 0;
202 spin_unlock_irqrestore(&udc->lock, flags);
203
204 return 0;
205 }
206
vgadget_udc_stop(struct usb_gadget * g)207 static int vgadget_udc_stop(struct usb_gadget *g)
208 {
209 struct vudc *udc = usb_gadget_to_vudc(g);
210 unsigned long flags;
211
212 spin_lock_irqsave(&udc->lock, flags);
213 udc->driver = NULL;
214 spin_unlock_irqrestore(&udc->lock, flags);
215 return 0;
216 }
217
218 static const struct usb_gadget_ops vgadget_ops = {
219 .get_frame = vgadget_get_frame,
220 .set_selfpowered = vgadget_set_selfpowered,
221 .pullup = vgadget_pullup,
222 .udc_start = vgadget_udc_start,
223 .udc_stop = vgadget_udc_stop,
224 };
225
226
227 /* endpoint ops */
228
vep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)229 static int vep_enable(struct usb_ep *_ep,
230 const struct usb_endpoint_descriptor *desc)
231 {
232 struct vep *ep;
233 struct vudc *udc;
234 unsigned int maxp;
235 unsigned long flags;
236
237 ep = to_vep(_ep);
238 udc = ep_to_vudc(ep);
239
240 if (!_ep || !desc || ep->desc || _ep->caps.type_control
241 || desc->bDescriptorType != USB_DT_ENDPOINT)
242 return -EINVAL;
243
244 if (!udc->driver)
245 return -ESHUTDOWN;
246
247 spin_lock_irqsave(&udc->lock, flags);
248
249 maxp = usb_endpoint_maxp(desc);
250 _ep->maxpacket = maxp;
251 ep->desc = desc;
252 ep->type = usb_endpoint_type(desc);
253 ep->halted = ep->wedged = 0;
254
255 spin_unlock_irqrestore(&udc->lock, flags);
256
257 return 0;
258 }
259
vep_disable(struct usb_ep * _ep)260 static int vep_disable(struct usb_ep *_ep)
261 {
262 struct vep *ep;
263 struct vudc *udc;
264 unsigned long flags;
265
266 ep = to_vep(_ep);
267 udc = ep_to_vudc(ep);
268 if (!_ep || !ep->desc || _ep->caps.type_control)
269 return -EINVAL;
270
271 spin_lock_irqsave(&udc->lock, flags);
272 ep->desc = NULL;
273 nuke(udc, ep);
274 spin_unlock_irqrestore(&udc->lock, flags);
275
276 return 0;
277 }
278
vep_alloc_request(struct usb_ep * _ep,gfp_t mem_flags)279 static struct usb_request *vep_alloc_request(struct usb_ep *_ep,
280 gfp_t mem_flags)
281 {
282 struct vrequest *req;
283
284 if (!_ep)
285 return NULL;
286
287 req = kzalloc(sizeof(*req), mem_flags);
288 if (!req)
289 return NULL;
290
291 INIT_LIST_HEAD(&req->req_entry);
292
293 return &req->req;
294 }
295
vep_free_request(struct usb_ep * _ep,struct usb_request * _req)296 static void vep_free_request(struct usb_ep *_ep, struct usb_request *_req)
297 {
298 struct vrequest *req;
299
300 /* ep is always valid here - see usb_ep_free_request() */
301 if (!_req)
302 return;
303
304 req = to_vrequest(_req);
305 kfree(req);
306 }
307
vep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t mem_flags)308 static int vep_queue(struct usb_ep *_ep, struct usb_request *_req,
309 gfp_t mem_flags)
310 {
311 struct vep *ep;
312 struct vrequest *req;
313 struct vudc *udc;
314 unsigned long flags;
315
316 if (!_ep || !_req)
317 return -EINVAL;
318
319 ep = to_vep(_ep);
320 req = to_vrequest(_req);
321 udc = ep_to_vudc(ep);
322
323 spin_lock_irqsave(&udc->lock, flags);
324 _req->actual = 0;
325 _req->status = -EINPROGRESS;
326
327 list_add_tail(&req->req_entry, &ep->req_queue);
328 spin_unlock_irqrestore(&udc->lock, flags);
329
330 return 0;
331 }
332
vep_dequeue(struct usb_ep * _ep,struct usb_request * _req)333 static int vep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
334 {
335 struct vep *ep;
336 struct vrequest *req;
337 struct vudc *udc;
338 struct vrequest *lst;
339 unsigned long flags;
340 int ret = -EINVAL;
341
342 if (!_ep || !_req)
343 return ret;
344
345 ep = to_vep(_ep);
346 req = to_vrequest(_req);
347 udc = req->udc;
348
349 if (!udc->driver)
350 return -ESHUTDOWN;
351
352 spin_lock_irqsave(&udc->lock, flags);
353 list_for_each_entry(lst, &ep->req_queue, req_entry) {
354 if (&lst->req == _req) {
355 list_del_init(&lst->req_entry);
356 _req->status = -ECONNRESET;
357 ret = 0;
358 break;
359 }
360 }
361 spin_unlock_irqrestore(&udc->lock, flags);
362
363 if (ret == 0)
364 usb_gadget_giveback_request(_ep, _req);
365
366 return ret;
367 }
368
369 static int
vep_set_halt_and_wedge(struct usb_ep * _ep,int value,int wedged)370 vep_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
371 {
372 struct vep *ep;
373 struct vudc *udc;
374 unsigned long flags;
375 int ret = 0;
376
377 ep = to_vep(_ep);
378 if (!_ep)
379 return -EINVAL;
380
381 udc = ep_to_vudc(ep);
382 if (!udc->driver)
383 return -ESHUTDOWN;
384
385 spin_lock_irqsave(&udc->lock, flags);
386 if (!value)
387 ep->halted = ep->wedged = 0;
388 else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
389 !list_empty(&ep->req_queue))
390 ret = -EAGAIN;
391 else {
392 ep->halted = 1;
393 if (wedged)
394 ep->wedged = 1;
395 }
396
397 spin_unlock_irqrestore(&udc->lock, flags);
398 return ret;
399 }
400
401 static int
vep_set_halt(struct usb_ep * _ep,int value)402 vep_set_halt(struct usb_ep *_ep, int value)
403 {
404 return vep_set_halt_and_wedge(_ep, value, 0);
405 }
406
vep_set_wedge(struct usb_ep * _ep)407 static int vep_set_wedge(struct usb_ep *_ep)
408 {
409 return vep_set_halt_and_wedge(_ep, 1, 1);
410 }
411
412 static const struct usb_ep_ops vep_ops = {
413 .enable = vep_enable,
414 .disable = vep_disable,
415
416 .alloc_request = vep_alloc_request,
417 .free_request = vep_free_request,
418
419 .queue = vep_queue,
420 .dequeue = vep_dequeue,
421
422 .set_halt = vep_set_halt,
423 .set_wedge = vep_set_wedge,
424 };
425
426
427 /* shutdown / reset / error handlers */
428
vudc_shutdown(struct usbip_device * ud)429 static void vudc_shutdown(struct usbip_device *ud)
430 {
431 struct vudc *udc = container_of(ud, struct vudc, ud);
432 int call_disconnect = 0;
433 unsigned long flags;
434
435 dev_dbg(&udc->pdev->dev, "device shutdown");
436 if (ud->tcp_socket)
437 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
438
439 if (ud->tcp_rx) {
440 kthread_stop_put(ud->tcp_rx);
441 ud->tcp_rx = NULL;
442 }
443 if (ud->tcp_tx) {
444 kthread_stop_put(ud->tcp_tx);
445 ud->tcp_tx = NULL;
446 }
447
448 if (ud->tcp_socket) {
449 sockfd_put(ud->tcp_socket);
450 ud->tcp_socket = NULL;
451 }
452
453 spin_lock_irqsave(&udc->lock, flags);
454 stop_activity(udc);
455 if (udc->connected && udc->driver->disconnect)
456 call_disconnect = 1;
457 udc->connected = 0;
458 spin_unlock_irqrestore(&udc->lock, flags);
459 if (call_disconnect)
460 udc->driver->disconnect(&udc->gadget);
461 }
462
vudc_device_reset(struct usbip_device * ud)463 static void vudc_device_reset(struct usbip_device *ud)
464 {
465 struct vudc *udc = container_of(ud, struct vudc, ud);
466 unsigned long flags;
467
468 dev_dbg(&udc->pdev->dev, "device reset");
469 spin_lock_irqsave(&udc->lock, flags);
470 stop_activity(udc);
471 spin_unlock_irqrestore(&udc->lock, flags);
472 if (udc->driver)
473 usb_gadget_udc_reset(&udc->gadget, udc->driver);
474 spin_lock_irqsave(&ud->lock, flags);
475 ud->status = SDEV_ST_AVAILABLE;
476 spin_unlock_irqrestore(&ud->lock, flags);
477 }
478
vudc_device_unusable(struct usbip_device * ud)479 static void vudc_device_unusable(struct usbip_device *ud)
480 {
481 unsigned long flags;
482
483 spin_lock_irqsave(&ud->lock, flags);
484 ud->status = SDEV_ST_ERROR;
485 spin_unlock_irqrestore(&ud->lock, flags);
486 }
487
488 /* device setup / cleanup */
489
alloc_vudc_device(int devid)490 struct vudc_device *alloc_vudc_device(int devid)
491 {
492 struct vudc_device *udc_dev;
493
494 udc_dev = kzalloc(sizeof(*udc_dev), GFP_KERNEL);
495 if (!udc_dev)
496 return NULL;
497
498 INIT_LIST_HEAD(&udc_dev->dev_entry);
499
500 udc_dev->pdev = platform_device_alloc(GADGET_NAME, devid);
501 if (!udc_dev->pdev) {
502 kfree(udc_dev);
503 udc_dev = NULL;
504 }
505
506 return udc_dev;
507 }
508
put_vudc_device(struct vudc_device * udc_dev)509 void put_vudc_device(struct vudc_device *udc_dev)
510 {
511 platform_device_put(udc_dev->pdev);
512 kfree(udc_dev);
513 }
514
init_vudc_hw(struct vudc * udc)515 static int init_vudc_hw(struct vudc *udc)
516 {
517 int i;
518 struct usbip_device *ud = &udc->ud;
519 struct vep *ep;
520
521 udc->ep = kcalloc(VIRTUAL_ENDPOINTS, sizeof(*udc->ep), GFP_KERNEL);
522 if (!udc->ep)
523 goto nomem_ep;
524
525 INIT_LIST_HEAD(&udc->gadget.ep_list);
526
527 /* create ep0 and 15 in, 15 out general purpose eps */
528 for (i = 0; i < VIRTUAL_ENDPOINTS; ++i) {
529 int is_out = i % 2;
530 int num = (i + 1) / 2;
531
532 ep = &udc->ep[i];
533
534 sprintf(ep->name, "ep%d%s", num,
535 i ? (is_out ? "out" : "in") : "");
536 ep->ep.name = ep->name;
537
538 ep->ep.ops = &vep_ops;
539
540 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
541 ep->ep.max_streams = 16;
542 ep->gadget = &udc->gadget;
543 INIT_LIST_HEAD(&ep->req_queue);
544
545 if (i == 0) {
546 /* ep0 */
547 ep->ep.caps.type_control = true;
548 ep->ep.caps.dir_out = true;
549 ep->ep.caps.dir_in = true;
550
551 udc->gadget.ep0 = &ep->ep;
552 } else {
553 /* All other eps */
554 ep->ep.caps.type_iso = true;
555 ep->ep.caps.type_int = true;
556 ep->ep.caps.type_bulk = true;
557
558 if (is_out)
559 ep->ep.caps.dir_out = true;
560 else
561 ep->ep.caps.dir_in = true;
562
563 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
564 }
565 }
566
567 spin_lock_init(&udc->lock);
568 spin_lock_init(&udc->lock_tx);
569 INIT_LIST_HEAD(&udc->urb_queue);
570 INIT_LIST_HEAD(&udc->tx_queue);
571 init_waitqueue_head(&udc->tx_waitq);
572
573 spin_lock_init(&ud->lock);
574 mutex_init(&ud->sysfs_lock);
575 ud->status = SDEV_ST_AVAILABLE;
576 ud->side = USBIP_VUDC;
577
578 ud->eh_ops.shutdown = vudc_shutdown;
579 ud->eh_ops.reset = vudc_device_reset;
580 ud->eh_ops.unusable = vudc_device_unusable;
581
582 v_init_timer(udc);
583 return 0;
584
585 nomem_ep:
586 return -ENOMEM;
587 }
588
cleanup_vudc_hw(struct vudc * udc)589 static void cleanup_vudc_hw(struct vudc *udc)
590 {
591 kfree(udc->ep);
592 }
593
594 /* platform driver ops */
595
vudc_probe(struct platform_device * pdev)596 int vudc_probe(struct platform_device *pdev)
597 {
598 struct vudc *udc;
599 int ret = -ENOMEM;
600
601 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
602 if (!udc)
603 goto out;
604
605 udc->gadget.name = GADGET_NAME;
606 udc->gadget.ops = &vgadget_ops;
607 udc->gadget.max_speed = USB_SPEED_HIGH;
608 udc->gadget.dev.parent = &pdev->dev;
609 udc->pdev = pdev;
610
611 ret = init_vudc_hw(udc);
612 if (ret)
613 goto err_init_vudc_hw;
614
615 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
616 if (ret < 0)
617 goto err_add_udc;
618
619 platform_set_drvdata(pdev, udc);
620
621 return ret;
622
623 err_add_udc:
624 cleanup_vudc_hw(udc);
625 err_init_vudc_hw:
626 kfree(udc);
627 out:
628 return ret;
629 }
630
vudc_remove(struct platform_device * pdev)631 int vudc_remove(struct platform_device *pdev)
632 {
633 struct vudc *udc = platform_get_drvdata(pdev);
634
635 usb_del_gadget_udc(&udc->gadget);
636 cleanup_vudc_hw(udc);
637 kfree(udc);
638 return 0;
639 }
640