1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Skeleton driver - 2.2
4 *
5 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
6 *
7 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
8 * but has been rewritten to be easier to read and use.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/kref.h>
16 #include <linux/uaccess.h>
17 #include <linux/usb.h>
18 #include <linux/mutex.h>
19
20
21 /* Define these values to match your devices */
22 #define USB_SKEL_VENDOR_ID 0xfff0
23 #define USB_SKEL_PRODUCT_ID 0xfff0
24
25 /* table of devices that work with this driver */
26 static const struct usb_device_id skel_table[] = {
27 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
28 { } /* Terminating entry */
29 };
30 MODULE_DEVICE_TABLE(usb, skel_table);
31
32
33 /* Get a minor range for your devices from the usb maintainer */
34 #define USB_SKEL_MINOR_BASE 192
35
36 /* our private defines. if this grows any larger, use your own .h file */
37 #define MAX_TRANSFER (PAGE_SIZE - 512)
38 /*
39 * MAX_TRANSFER is chosen so that the VM is not stressed by
40 * allocations > PAGE_SIZE and the number of packets in a page
41 * is an integer 512 is the largest possible packet on EHCI
42 */
43 #define WRITES_IN_FLIGHT 8
44 /* arbitrarily chosen */
45
46 /* Structure to hold all of our device specific stuff */
47 struct usb_skel {
48 struct usb_device *udev; /* the usb device for this device */
49 struct usb_interface *interface; /* the interface for this device */
50 struct semaphore limit_sem; /* limiting the number of writes in progress */
51 struct usb_anchor submitted; /* in case we need to retract our submissions */
52 struct urb *bulk_in_urb; /* the urb to read data with */
53 unsigned char *bulk_in_buffer; /* the buffer to receive data */
54 size_t bulk_in_size; /* the size of the receive buffer */
55 size_t bulk_in_filled; /* number of bytes in the buffer */
56 size_t bulk_in_copied; /* already copied to user space */
57 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
58 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
59 int errors; /* the last request tanked */
60 bool ongoing_read; /* a read is going on */
61 spinlock_t err_lock; /* lock for errors */
62 struct kref kref;
63 struct mutex io_mutex; /* synchronize I/O with disconnect */
64 unsigned long disconnected:1;
65 wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
66 };
67 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
68
69 static struct usb_driver skel_driver;
70 static void skel_draw_down(struct usb_skel *dev);
71
skel_delete(struct kref * kref)72 static void skel_delete(struct kref *kref)
73 {
74 struct usb_skel *dev = to_skel_dev(kref);
75
76 usb_free_urb(dev->bulk_in_urb);
77 usb_put_intf(dev->interface);
78 usb_put_dev(dev->udev);
79 kfree(dev->bulk_in_buffer);
80 kfree(dev);
81 }
82
skel_open(struct inode * inode,struct file * file)83 static int skel_open(struct inode *inode, struct file *file)
84 {
85 struct usb_skel *dev;
86 struct usb_interface *interface;
87 int subminor;
88 int retval = 0;
89
90 subminor = iminor(inode);
91
92 interface = usb_find_interface(&skel_driver, subminor);
93 if (!interface) {
94 pr_err("%s - error, can't find device for minor %d\n",
95 __func__, subminor);
96 retval = -ENODEV;
97 goto exit;
98 }
99
100 dev = usb_get_intfdata(interface);
101 if (!dev) {
102 retval = -ENODEV;
103 goto exit;
104 }
105
106 retval = usb_autopm_get_interface(interface);
107 if (retval)
108 goto exit;
109
110 /* increment our usage count for the device */
111 kref_get(&dev->kref);
112
113 /* save our object in the file's private structure */
114 file->private_data = dev;
115
116 exit:
117 return retval;
118 }
119
skel_release(struct inode * inode,struct file * file)120 static int skel_release(struct inode *inode, struct file *file)
121 {
122 struct usb_skel *dev;
123
124 dev = file->private_data;
125 if (dev == NULL)
126 return -ENODEV;
127
128 /* allow the device to be autosuspended */
129 usb_autopm_put_interface(dev->interface);
130
131 /* decrement the count on our device */
132 kref_put(&dev->kref, skel_delete);
133 return 0;
134 }
135
skel_flush(struct file * file,fl_owner_t id)136 static int skel_flush(struct file *file, fl_owner_t id)
137 {
138 struct usb_skel *dev;
139 int res;
140
141 dev = file->private_data;
142 if (dev == NULL)
143 return -ENODEV;
144
145 /* wait for io to stop */
146 mutex_lock(&dev->io_mutex);
147 skel_draw_down(dev);
148
149 /* read out errors, leave subsequent opens a clean slate */
150 spin_lock_irq(&dev->err_lock);
151 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
152 dev->errors = 0;
153 spin_unlock_irq(&dev->err_lock);
154
155 mutex_unlock(&dev->io_mutex);
156
157 return res;
158 }
159
skel_read_bulk_callback(struct urb * urb)160 static void skel_read_bulk_callback(struct urb *urb)
161 {
162 struct usb_skel *dev;
163 unsigned long flags;
164
165 dev = urb->context;
166
167 spin_lock_irqsave(&dev->err_lock, flags);
168 /* sync/async unlink faults aren't errors */
169 if (urb->status) {
170 if (!(urb->status == -ENOENT ||
171 urb->status == -ECONNRESET ||
172 urb->status == -ESHUTDOWN))
173 dev_err(&dev->interface->dev,
174 "%s - nonzero write bulk status received: %d\n",
175 __func__, urb->status);
176
177 dev->errors = urb->status;
178 } else {
179 dev->bulk_in_filled = urb->actual_length;
180 }
181 dev->ongoing_read = 0;
182 spin_unlock_irqrestore(&dev->err_lock, flags);
183
184 wake_up_interruptible(&dev->bulk_in_wait);
185 }
186
skel_do_read_io(struct usb_skel * dev,size_t count)187 static int skel_do_read_io(struct usb_skel *dev, size_t count)
188 {
189 int rv;
190
191 /* prepare a read */
192 usb_fill_bulk_urb(dev->bulk_in_urb,
193 dev->udev,
194 usb_rcvbulkpipe(dev->udev,
195 dev->bulk_in_endpointAddr),
196 dev->bulk_in_buffer,
197 min(dev->bulk_in_size, count),
198 skel_read_bulk_callback,
199 dev);
200 /* tell everybody to leave the URB alone */
201 spin_lock_irq(&dev->err_lock);
202 dev->ongoing_read = 1;
203 spin_unlock_irq(&dev->err_lock);
204
205 /* submit bulk in urb, which means no data to deliver */
206 dev->bulk_in_filled = 0;
207 dev->bulk_in_copied = 0;
208
209 /* do it */
210 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
211 if (rv < 0) {
212 dev_err(&dev->interface->dev,
213 "%s - failed submitting read urb, error %d\n",
214 __func__, rv);
215 rv = (rv == -ENOMEM) ? rv : -EIO;
216 spin_lock_irq(&dev->err_lock);
217 dev->ongoing_read = 0;
218 spin_unlock_irq(&dev->err_lock);
219 }
220
221 return rv;
222 }
223
skel_read(struct file * file,char * buffer,size_t count,loff_t * ppos)224 static ssize_t skel_read(struct file *file, char *buffer, size_t count,
225 loff_t *ppos)
226 {
227 struct usb_skel *dev;
228 int rv;
229 bool ongoing_io;
230
231 dev = file->private_data;
232
233 if (!count)
234 return 0;
235
236 /* no concurrent readers */
237 rv = mutex_lock_interruptible(&dev->io_mutex);
238 if (rv < 0)
239 return rv;
240
241 if (dev->disconnected) { /* disconnect() was called */
242 rv = -ENODEV;
243 goto exit;
244 }
245
246 /* if IO is under way, we must not touch things */
247 retry:
248 spin_lock_irq(&dev->err_lock);
249 ongoing_io = dev->ongoing_read;
250 spin_unlock_irq(&dev->err_lock);
251
252 if (ongoing_io) {
253 /* nonblocking IO shall not wait */
254 if (file->f_flags & O_NONBLOCK) {
255 rv = -EAGAIN;
256 goto exit;
257 }
258 /*
259 * IO may take forever
260 * hence wait in an interruptible state
261 */
262 rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
263 if (rv < 0)
264 goto exit;
265 }
266
267 /* errors must be reported */
268 rv = dev->errors;
269 if (rv < 0) {
270 /* any error is reported once */
271 dev->errors = 0;
272 /* to preserve notifications about reset */
273 rv = (rv == -EPIPE) ? rv : -EIO;
274 /* report it */
275 goto exit;
276 }
277
278 /*
279 * if the buffer is filled we may satisfy the read
280 * else we need to start IO
281 */
282
283 if (dev->bulk_in_filled) {
284 /* we had read data */
285 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
286 size_t chunk = min(available, count);
287
288 if (!available) {
289 /*
290 * all data has been used
291 * actual IO needs to be done
292 */
293 rv = skel_do_read_io(dev, count);
294 if (rv < 0)
295 goto exit;
296 else
297 goto retry;
298 }
299 /*
300 * data is available
301 * chunk tells us how much shall be copied
302 */
303
304 if (copy_to_user(buffer,
305 dev->bulk_in_buffer + dev->bulk_in_copied,
306 chunk))
307 rv = -EFAULT;
308 else
309 rv = chunk;
310
311 dev->bulk_in_copied += chunk;
312
313 /*
314 * if we are asked for more than we have,
315 * we start IO but don't wait
316 */
317 if (available < count)
318 skel_do_read_io(dev, count - chunk);
319 } else {
320 /* no data in the buffer */
321 rv = skel_do_read_io(dev, count);
322 if (rv < 0)
323 goto exit;
324 else
325 goto retry;
326 }
327 exit:
328 mutex_unlock(&dev->io_mutex);
329 return rv;
330 }
331
skel_write_bulk_callback(struct urb * urb)332 static void skel_write_bulk_callback(struct urb *urb)
333 {
334 struct usb_skel *dev;
335 unsigned long flags;
336
337 dev = urb->context;
338
339 /* sync/async unlink faults aren't errors */
340 if (urb->status) {
341 if (!(urb->status == -ENOENT ||
342 urb->status == -ECONNRESET ||
343 urb->status == -ESHUTDOWN))
344 dev_err(&dev->interface->dev,
345 "%s - nonzero write bulk status received: %d\n",
346 __func__, urb->status);
347
348 spin_lock_irqsave(&dev->err_lock, flags);
349 dev->errors = urb->status;
350 spin_unlock_irqrestore(&dev->err_lock, flags);
351 }
352
353 /* free up our allocated buffer */
354 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
355 urb->transfer_buffer, urb->transfer_dma);
356 up(&dev->limit_sem);
357 }
358
skel_write(struct file * file,const char * user_buffer,size_t count,loff_t * ppos)359 static ssize_t skel_write(struct file *file, const char *user_buffer,
360 size_t count, loff_t *ppos)
361 {
362 struct usb_skel *dev;
363 int retval = 0;
364 struct urb *urb = NULL;
365 char *buf = NULL;
366 size_t writesize = min_t(size_t, count, MAX_TRANSFER);
367
368 dev = file->private_data;
369
370 /* verify that we actually have some data to write */
371 if (count == 0)
372 goto exit;
373
374 /*
375 * limit the number of URBs in flight to stop a user from using up all
376 * RAM
377 */
378 if (!(file->f_flags & O_NONBLOCK)) {
379 if (down_interruptible(&dev->limit_sem)) {
380 retval = -ERESTARTSYS;
381 goto exit;
382 }
383 } else {
384 if (down_trylock(&dev->limit_sem)) {
385 retval = -EAGAIN;
386 goto exit;
387 }
388 }
389
390 spin_lock_irq(&dev->err_lock);
391 retval = dev->errors;
392 if (retval < 0) {
393 /* any error is reported once */
394 dev->errors = 0;
395 /* to preserve notifications about reset */
396 retval = (retval == -EPIPE) ? retval : -EIO;
397 }
398 spin_unlock_irq(&dev->err_lock);
399 if (retval < 0)
400 goto error;
401
402 /* create a urb, and a buffer for it, and copy the data to the urb */
403 urb = usb_alloc_urb(0, GFP_KERNEL);
404 if (!urb) {
405 retval = -ENOMEM;
406 goto error;
407 }
408
409 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
410 &urb->transfer_dma);
411 if (!buf) {
412 retval = -ENOMEM;
413 goto error;
414 }
415
416 if (copy_from_user(buf, user_buffer, writesize)) {
417 retval = -EFAULT;
418 goto error;
419 }
420
421 /* this lock makes sure we don't submit URBs to gone devices */
422 mutex_lock(&dev->io_mutex);
423 if (dev->disconnected) { /* disconnect() was called */
424 mutex_unlock(&dev->io_mutex);
425 retval = -ENODEV;
426 goto error;
427 }
428
429 /* initialize the urb properly */
430 usb_fill_bulk_urb(urb, dev->udev,
431 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
432 buf, writesize, skel_write_bulk_callback, dev);
433 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
434 usb_anchor_urb(urb, &dev->submitted);
435
436 /* send the data out the bulk port */
437 retval = usb_submit_urb(urb, GFP_KERNEL);
438 mutex_unlock(&dev->io_mutex);
439 if (retval) {
440 dev_err(&dev->interface->dev,
441 "%s - failed submitting write urb, error %d\n",
442 __func__, retval);
443 goto error_unanchor;
444 }
445
446 /*
447 * release our reference to this urb, the USB core will eventually free
448 * it entirely
449 */
450 usb_free_urb(urb);
451
452
453 return writesize;
454
455 error_unanchor:
456 usb_unanchor_urb(urb);
457 error:
458 if (urb) {
459 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
460 usb_free_urb(urb);
461 }
462 up(&dev->limit_sem);
463
464 exit:
465 return retval;
466 }
467
468 static const struct file_operations skel_fops = {
469 .owner = THIS_MODULE,
470 .read = skel_read,
471 .write = skel_write,
472 .open = skel_open,
473 .release = skel_release,
474 .flush = skel_flush,
475 .llseek = noop_llseek,
476 };
477
478 /*
479 * usb class driver info in order to get a minor number from the usb core,
480 * and to have the device registered with the driver core
481 */
482 static struct usb_class_driver skel_class = {
483 .name = "skel%d",
484 .fops = &skel_fops,
485 .minor_base = USB_SKEL_MINOR_BASE,
486 };
487
skel_probe(struct usb_interface * interface,const struct usb_device_id * id)488 static int skel_probe(struct usb_interface *interface,
489 const struct usb_device_id *id)
490 {
491 struct usb_skel *dev;
492 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
493 int retval;
494
495 /* allocate memory for our device state and initialize it */
496 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
497 if (!dev)
498 return -ENOMEM;
499
500 kref_init(&dev->kref);
501 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
502 mutex_init(&dev->io_mutex);
503 spin_lock_init(&dev->err_lock);
504 init_usb_anchor(&dev->submitted);
505 init_waitqueue_head(&dev->bulk_in_wait);
506
507 dev->udev = usb_get_dev(interface_to_usbdev(interface));
508 dev->interface = usb_get_intf(interface);
509
510 /* set up the endpoint information */
511 /* use only the first bulk-in and bulk-out endpoints */
512 retval = usb_find_common_endpoints(interface->cur_altsetting,
513 &bulk_in, &bulk_out, NULL, NULL);
514 if (retval) {
515 dev_err(&interface->dev,
516 "Could not find both bulk-in and bulk-out endpoints\n");
517 goto error;
518 }
519
520 dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
521 dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
522 dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
523 if (!dev->bulk_in_buffer) {
524 retval = -ENOMEM;
525 goto error;
526 }
527 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
528 if (!dev->bulk_in_urb) {
529 retval = -ENOMEM;
530 goto error;
531 }
532
533 dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
534
535 /* save our data pointer in this interface device */
536 usb_set_intfdata(interface, dev);
537
538 /* we can register the device now, as it is ready */
539 retval = usb_register_dev(interface, &skel_class);
540 if (retval) {
541 /* something prevented us from registering this driver */
542 dev_err(&interface->dev,
543 "Not able to get a minor for this device.\n");
544 usb_set_intfdata(interface, NULL);
545 goto error;
546 }
547
548 /* let the user know what node this device is now attached to */
549 dev_info(&interface->dev,
550 "USB Skeleton device now attached to USBSkel-%d",
551 interface->minor);
552 return 0;
553
554 error:
555 /* this frees allocated memory */
556 kref_put(&dev->kref, skel_delete);
557
558 return retval;
559 }
560
skel_disconnect(struct usb_interface * interface)561 static void skel_disconnect(struct usb_interface *interface)
562 {
563 struct usb_skel *dev;
564 int minor = interface->minor;
565
566 dev = usb_get_intfdata(interface);
567
568 /* give back our minor */
569 usb_deregister_dev(interface, &skel_class);
570
571 /* prevent more I/O from starting */
572 mutex_lock(&dev->io_mutex);
573 dev->disconnected = 1;
574 mutex_unlock(&dev->io_mutex);
575
576 usb_kill_urb(dev->bulk_in_urb);
577 usb_kill_anchored_urbs(&dev->submitted);
578
579 /* decrement our usage count */
580 kref_put(&dev->kref, skel_delete);
581
582 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
583 }
584
skel_draw_down(struct usb_skel * dev)585 static void skel_draw_down(struct usb_skel *dev)
586 {
587 int time;
588
589 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
590 if (!time)
591 usb_kill_anchored_urbs(&dev->submitted);
592 usb_kill_urb(dev->bulk_in_urb);
593 }
594
skel_suspend(struct usb_interface * intf,pm_message_t message)595 static int skel_suspend(struct usb_interface *intf, pm_message_t message)
596 {
597 struct usb_skel *dev = usb_get_intfdata(intf);
598
599 if (!dev)
600 return 0;
601 skel_draw_down(dev);
602 return 0;
603 }
604
skel_resume(struct usb_interface * intf)605 static int skel_resume(struct usb_interface *intf)
606 {
607 return 0;
608 }
609
skel_pre_reset(struct usb_interface * intf)610 static int skel_pre_reset(struct usb_interface *intf)
611 {
612 struct usb_skel *dev = usb_get_intfdata(intf);
613
614 mutex_lock(&dev->io_mutex);
615 skel_draw_down(dev);
616
617 return 0;
618 }
619
skel_post_reset(struct usb_interface * intf)620 static int skel_post_reset(struct usb_interface *intf)
621 {
622 struct usb_skel *dev = usb_get_intfdata(intf);
623
624 /* we are sure no URBs are active - no locking needed */
625 dev->errors = -EPIPE;
626 mutex_unlock(&dev->io_mutex);
627
628 return 0;
629 }
630
631 static struct usb_driver skel_driver = {
632 .name = "skeleton",
633 .probe = skel_probe,
634 .disconnect = skel_disconnect,
635 .suspend = skel_suspend,
636 .resume = skel_resume,
637 .pre_reset = skel_pre_reset,
638 .post_reset = skel_post_reset,
639 .id_table = skel_table,
640 .supports_autosuspend = 1,
641 };
642
643 module_usb_driver(skel_driver);
644
645 MODULE_LICENSE("GPL v2");
646