1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * f_mass_storage.c -- Mass Storage USB Composite Function 4 * 5 * Copyright (C) 2003-2008 Alan Stern 6 * Copyright (C) 2009 Samsung Electronics 7 * Author: Michal Nazarewicz <mina86@mina86.com> 8 * All rights reserved. 9 */ 10 11 /* 12 * The Mass Storage Function acts as a USB Mass Storage device, 13 * appearing to the host as a disk drive or as a CD-ROM drive. In 14 * addition to providing an example of a genuinely useful composite 15 * function for a USB device, it also illustrates a technique of 16 * double-buffering for increased throughput. 17 * 18 * For more information about MSF and in particular its module 19 * parameters and sysfs interface read the 20 * <Documentation/usb/mass-storage.rst> file. 21 */ 22 23 /* 24 * MSF is configured by specifying a fsg_config structure. It has the 25 * following fields: 26 * 27 * nluns Number of LUNs function have (anywhere from 1 28 * to FSG_MAX_LUNS). 29 * luns An array of LUN configuration values. This 30 * should be filled for each LUN that 31 * function will include (ie. for "nluns" 32 * LUNs). Each element of the array has 33 * the following fields: 34 * ->filename The path to the backing file for the LUN. 35 * Required if LUN is not marked as 36 * removable. 37 * ->ro Flag specifying access to the LUN shall be 38 * read-only. This is implied if CD-ROM 39 * emulation is enabled as well as when 40 * it was impossible to open "filename" 41 * in R/W mode. 42 * ->removable Flag specifying that LUN shall be indicated as 43 * being removable. 44 * ->cdrom Flag specifying that LUN shall be reported as 45 * being a CD-ROM. 46 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12) 47 * commands for this LUN shall be ignored. 48 * 49 * vendor_name 50 * product_name 51 * release Information used as a reply to INQUIRY 52 * request. To use default set to NULL, 53 * NULL, 0xffff respectively. The first 54 * field should be 8 and the second 16 55 * characters or less. 56 * 57 * can_stall Set to permit function to halt bulk endpoints. 58 * Disabled on some USB devices known not 59 * to work correctly. You should set it 60 * to true. 61 * 62 * If "removable" is not set for a LUN then a backing file must be 63 * specified. If it is set, then NULL filename means the LUN's medium 64 * is not loaded (an empty string as "filename" in the fsg_config 65 * structure causes error). The CD-ROM emulation includes a single 66 * data track and no audio tracks; hence there need be only one 67 * backing file per LUN. 68 * 69 * This function is heavily based on "File-backed Storage Gadget" by 70 * Alan Stern which in turn is heavily based on "Gadget Zero" by David 71 * Brownell. The driver's SCSI command interface was based on the 72 * "Information technology - Small Computer System Interface - 2" 73 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93, 74 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. 75 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which 76 * was based on the "Universal Serial Bus Mass Storage Class UFI 77 * Command Specification" document, Revision 1.0, December 14, 1998, 78 * available at 79 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 80 */ 81 82 /* 83 * Driver Design 84 * 85 * The MSF is fairly straightforward. There is a main kernel 86 * thread that handles most of the work. Interrupt routines field 87 * callbacks from the controller driver: bulk- and interrupt-request 88 * completion notifications, endpoint-0 events, and disconnect events. 89 * Completion events are passed to the main thread by wakeup calls. Many 90 * ep0 requests are handled at interrupt time, but SetInterface, 91 * SetConfiguration, and device reset requests are forwarded to the 92 * thread in the form of "exceptions" using SIGUSR1 signals (since they 93 * should interrupt any ongoing file I/O operations). 94 * 95 * The thread's main routine implements the standard command/data/status 96 * parts of a SCSI interaction. It and its subroutines are full of tests 97 * for pending signals/exceptions -- all this polling is necessary since 98 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 99 * indication that the driver really wants to be running in userspace.) 100 * An important point is that so long as the thread is alive it keeps an 101 * open reference to the backing file. This will prevent unmounting 102 * the backing file's underlying filesystem and could cause problems 103 * during system shutdown, for example. To prevent such problems, the 104 * thread catches INT, TERM, and KILL signals and converts them into 105 * an EXIT exception. 106 * 107 * In normal operation the main thread is started during the gadget's 108 * fsg_bind() callback and stopped during fsg_unbind(). But it can 109 * also exit when it receives a signal, and there's no point leaving 110 * the gadget running when the thread is dead. As of this moment, MSF 111 * provides no way to deregister the gadget when thread dies -- maybe 112 * a callback functions is needed. 113 * 114 * To provide maximum throughput, the driver uses a circular pipeline of 115 * buffer heads (struct fsg_buffhd). In principle the pipeline can be 116 * arbitrarily long; in practice the benefits don't justify having more 117 * than 2 stages (i.e., double buffering). But it helps to think of the 118 * pipeline as being a long one. Each buffer head contains a bulk-in and 119 * a bulk-out request pointer (since the buffer can be used for both 120 * output and input -- directions always are given from the host's 121 * point of view) as well as a pointer to the buffer and various state 122 * variables. 123 * 124 * Use of the pipeline follows a simple protocol. There is a variable 125 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 126 * At any time that buffer head may still be in use from an earlier 127 * request, so each buffer head has a state variable indicating whether 128 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 129 * buffer head to be EMPTY, filling the buffer either by file I/O or by 130 * USB I/O (during which the buffer head is BUSY), and marking the buffer 131 * head FULL when the I/O is complete. Then the buffer will be emptied 132 * (again possibly by USB I/O, during which it is marked BUSY) and 133 * finally marked EMPTY again (possibly by a completion routine). 134 * 135 * A module parameter tells the driver to avoid stalling the bulk 136 * endpoints wherever the transport specification allows. This is 137 * necessary for some UDCs like the SuperH, which cannot reliably clear a 138 * halt on a bulk endpoint. However, under certain circumstances the 139 * Bulk-only specification requires a stall. In such cases the driver 140 * will halt the endpoint and set a flag indicating that it should clear 141 * the halt in software during the next device reset. Hopefully this 142 * will permit everything to work correctly. Furthermore, although the 143 * specification allows the bulk-out endpoint to halt when the host sends 144 * too much data, implementing this would cause an unavoidable race. 145 * The driver will always use the "no-stall" approach for OUT transfers. 146 * 147 * One subtle point concerns sending status-stage responses for ep0 148 * requests. Some of these requests, such as device reset, can involve 149 * interrupting an ongoing file I/O operation, which might take an 150 * arbitrarily long time. During that delay the host might give up on 151 * the original ep0 request and issue a new one. When that happens the 152 * driver should not notify the host about completion of the original 153 * request, as the host will no longer be waiting for it. So the driver 154 * assigns to each ep0 request a unique tag, and it keeps track of the 155 * tag value of the request associated with a long-running exception 156 * (device-reset, interface-change, or configuration-change). When the 157 * exception handler is finished, the status-stage response is submitted 158 * only if the current ep0 request tag is equal to the exception request 159 * tag. Thus only the most recently received ep0 request will get a 160 * status-stage response. 161 * 162 * Warning: This driver source file is too long. It ought to be split up 163 * into a header file plus about 3 separate .c files, to handle the details 164 * of the Gadget, USB Mass Storage, and SCSI protocols. 165 */ 166 167 168 /* #define VERBOSE_DEBUG */ 169 /* #define DUMP_MSGS */ 170 171 #include <linux/blkdev.h> 172 #include <linux/completion.h> 173 #include <linux/dcache.h> 174 #include <linux/delay.h> 175 #include <linux/device.h> 176 #include <linux/fcntl.h> 177 #include <linux/file.h> 178 #include <linux/fs.h> 179 #include <linux/kthread.h> 180 #include <linux/sched/signal.h> 181 #include <linux/limits.h> 182 #include <linux/rwsem.h> 183 #include <linux/slab.h> 184 #include <linux/spinlock.h> 185 #include <linux/string.h> 186 #include <linux/freezer.h> 187 #include <linux/module.h> 188 #include <linux/uaccess.h> 189 #include <asm/unaligned.h> 190 191 #include <linux/usb/ch9.h> 192 #include <linux/usb/gadget.h> 193 #include <linux/usb/composite.h> 194 195 #include <linux/nospec.h> 196 197 #include "configfs.h" 198 199 200 /*------------------------------------------------------------------------*/ 201 202 #define FSG_DRIVER_DESC "Mass Storage Function" 203 #define FSG_DRIVER_VERSION "2009/09/11" 204 205 static const char fsg_string_interface[] = "Mass Storage"; 206 207 #include "storage_common.h" 208 #include "f_mass_storage.h" 209 210 /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ 211 static struct usb_string fsg_strings[] = { 212 {FSG_STRING_INTERFACE, fsg_string_interface}, 213 {} 214 }; 215 216 static struct usb_gadget_strings fsg_stringtab = { 217 .language = 0x0409, /* en-us */ 218 .strings = fsg_strings, 219 }; 220 221 static struct usb_gadget_strings *fsg_strings_array[] = { 222 &fsg_stringtab, 223 NULL, 224 }; 225 226 /*-------------------------------------------------------------------------*/ 227 228 struct fsg_dev; 229 struct fsg_common; 230 231 /* Data shared by all the FSG instances. */ 232 struct fsg_common { 233 struct usb_gadget *gadget; 234 struct usb_composite_dev *cdev; 235 struct fsg_dev *fsg; 236 wait_queue_head_t io_wait; 237 wait_queue_head_t fsg_wait; 238 239 /* filesem protects: backing files in use */ 240 struct rw_semaphore filesem; 241 242 /* lock protects: state and thread_task */ 243 spinlock_t lock; 244 245 struct usb_ep *ep0; /* Copy of gadget->ep0 */ 246 struct usb_request *ep0req; /* Copy of cdev->req */ 247 unsigned int ep0_req_tag; 248 249 struct fsg_buffhd *next_buffhd_to_fill; 250 struct fsg_buffhd *next_buffhd_to_drain; 251 struct fsg_buffhd *buffhds; 252 unsigned int fsg_num_buffers; 253 254 int cmnd_size; 255 u8 cmnd[MAX_COMMAND_SIZE]; 256 257 unsigned int lun; 258 struct fsg_lun *luns[FSG_MAX_LUNS]; 259 struct fsg_lun *curlun; 260 261 unsigned int bulk_out_maxpacket; 262 enum fsg_state state; /* For exception handling */ 263 unsigned int exception_req_tag; 264 void *exception_arg; 265 266 enum data_direction data_dir; 267 u32 data_size; 268 u32 data_size_from_cmnd; 269 u32 tag; 270 u32 residue; 271 u32 usb_amount_left; 272 273 unsigned int can_stall:1; 274 unsigned int free_storage_on_release:1; 275 unsigned int phase_error:1; 276 unsigned int short_packet_received:1; 277 unsigned int bad_lun_okay:1; 278 unsigned int running:1; 279 unsigned int sysfs:1; 280 281 struct completion thread_notifier; 282 struct task_struct *thread_task; 283 284 /* Gadget's private data. */ 285 void *private_data; 286 287 char inquiry_string[INQUIRY_STRING_LEN]; 288 }; 289 290 struct fsg_dev { 291 struct usb_function function; 292 struct usb_gadget *gadget; /* Copy of cdev->gadget */ 293 struct fsg_common *common; 294 295 u16 interface_number; 296 297 unsigned int bulk_in_enabled:1; 298 unsigned int bulk_out_enabled:1; 299 300 unsigned long atomic_bitflags; 301 #define IGNORE_BULK_OUT 0 302 303 struct usb_ep *bulk_in; 304 struct usb_ep *bulk_out; 305 }; 306 307 static inline int __fsg_is_set(struct fsg_common *common, 308 const char *func, unsigned line) 309 { 310 if (common->fsg) 311 return 1; 312 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); 313 WARN_ON(1); 314 return 0; 315 } 316 317 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) 318 319 static inline struct fsg_dev *fsg_from_func(struct usb_function *f) 320 { 321 return container_of(f, struct fsg_dev, function); 322 } 323 324 static int exception_in_progress(struct fsg_common *common) 325 { 326 return common->state > FSG_STATE_NORMAL; 327 } 328 329 /* Make bulk-out requests be divisible by the maxpacket size */ 330 static void set_bulk_out_req_length(struct fsg_common *common, 331 struct fsg_buffhd *bh, unsigned int length) 332 { 333 unsigned int rem; 334 335 bh->bulk_out_intended_length = length; 336 rem = length % common->bulk_out_maxpacket; 337 if (rem > 0) 338 length += common->bulk_out_maxpacket - rem; 339 bh->outreq->length = length; 340 } 341 342 343 /*-------------------------------------------------------------------------*/ 344 345 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 346 { 347 const char *name; 348 349 if (ep == fsg->bulk_in) 350 name = "bulk-in"; 351 else if (ep == fsg->bulk_out) 352 name = "bulk-out"; 353 else 354 name = ep->name; 355 DBG(fsg, "%s set halt\n", name); 356 return usb_ep_set_halt(ep); 357 } 358 359 360 /*-------------------------------------------------------------------------*/ 361 362 /* These routines may be called in process context or in_irq */ 363 364 static void __raise_exception(struct fsg_common *common, enum fsg_state new_state, 365 void *arg) 366 { 367 unsigned long flags; 368 369 /* 370 * Do nothing if a higher-priority exception is already in progress. 371 * If a lower-or-equal priority exception is in progress, preempt it 372 * and notify the main thread by sending it a signal. 373 */ 374 spin_lock_irqsave(&common->lock, flags); 375 if (common->state <= new_state) { 376 common->exception_req_tag = common->ep0_req_tag; 377 common->state = new_state; 378 common->exception_arg = arg; 379 if (common->thread_task) 380 send_sig_info(SIGUSR1, SEND_SIG_PRIV, 381 common->thread_task); 382 } 383 spin_unlock_irqrestore(&common->lock, flags); 384 } 385 386 static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 387 { 388 __raise_exception(common, new_state, NULL); 389 } 390 391 /*-------------------------------------------------------------------------*/ 392 393 static int ep0_queue(struct fsg_common *common) 394 { 395 int rc; 396 397 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); 398 common->ep0->driver_data = common; 399 if (rc != 0 && rc != -ESHUTDOWN) { 400 /* We can't do much more than wait for a reset */ 401 WARNING(common, "error in submission: %s --> %d\n", 402 common->ep0->name, rc); 403 } 404 return rc; 405 } 406 407 408 /*-------------------------------------------------------------------------*/ 409 410 /* Completion handlers. These always run in_irq. */ 411 412 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 413 { 414 struct fsg_common *common = ep->driver_data; 415 struct fsg_buffhd *bh = req->context; 416 417 if (req->status || req->actual != req->length) 418 DBG(common, "%s --> %d, %u/%u\n", __func__, 419 req->status, req->actual, req->length); 420 if (req->status == -ECONNRESET) /* Request was cancelled */ 421 usb_ep_fifo_flush(ep); 422 423 /* Synchronize with the smp_load_acquire() in sleep_thread() */ 424 smp_store_release(&bh->state, BUF_STATE_EMPTY); 425 wake_up(&common->io_wait); 426 } 427 428 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 429 { 430 struct fsg_common *common = ep->driver_data; 431 struct fsg_buffhd *bh = req->context; 432 433 dump_msg(common, "bulk-out", req->buf, req->actual); 434 if (req->status || req->actual != bh->bulk_out_intended_length) 435 DBG(common, "%s --> %d, %u/%u\n", __func__, 436 req->status, req->actual, bh->bulk_out_intended_length); 437 if (req->status == -ECONNRESET) /* Request was cancelled */ 438 usb_ep_fifo_flush(ep); 439 440 /* Synchronize with the smp_load_acquire() in sleep_thread() */ 441 smp_store_release(&bh->state, BUF_STATE_FULL); 442 wake_up(&common->io_wait); 443 } 444 445 static int _fsg_common_get_max_lun(struct fsg_common *common) 446 { 447 int i = ARRAY_SIZE(common->luns) - 1; 448 449 while (i >= 0 && !common->luns[i]) 450 --i; 451 452 return i; 453 } 454 455 static int fsg_setup(struct usb_function *f, 456 const struct usb_ctrlrequest *ctrl) 457 { 458 struct fsg_dev *fsg = fsg_from_func(f); 459 struct usb_request *req = fsg->common->ep0req; 460 u16 w_index = le16_to_cpu(ctrl->wIndex); 461 u16 w_value = le16_to_cpu(ctrl->wValue); 462 u16 w_length = le16_to_cpu(ctrl->wLength); 463 464 if (!fsg_is_set(fsg->common)) 465 return -EOPNOTSUPP; 466 467 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */ 468 req->context = NULL; 469 req->length = 0; 470 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 471 472 switch (ctrl->bRequest) { 473 474 case US_BULK_RESET_REQUEST: 475 if (ctrl->bRequestType != 476 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 477 break; 478 if (w_index != fsg->interface_number || w_value != 0 || 479 w_length != 0) 480 return -EDOM; 481 482 /* 483 * Raise an exception to stop the current operation 484 * and reinitialize our state. 485 */ 486 DBG(fsg, "bulk reset request\n"); 487 raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET); 488 return USB_GADGET_DELAYED_STATUS; 489 490 case US_BULK_GET_MAX_LUN: 491 if (ctrl->bRequestType != 492 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 493 break; 494 if (w_index != fsg->interface_number || w_value != 0 || 495 w_length != 1) 496 return -EDOM; 497 VDBG(fsg, "get max LUN\n"); 498 *(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common); 499 500 /* Respond with data/status */ 501 req->length = min((u16)1, w_length); 502 return ep0_queue(fsg->common); 503 } 504 505 VDBG(fsg, 506 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n", 507 ctrl->bRequestType, ctrl->bRequest, 508 le16_to_cpu(ctrl->wValue), w_index, w_length); 509 return -EOPNOTSUPP; 510 } 511 512 513 /*-------------------------------------------------------------------------*/ 514 515 /* All the following routines run in process context */ 516 517 /* Use this for bulk or interrupt transfers, not ep0 */ 518 static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 519 struct usb_request *req) 520 { 521 int rc; 522 523 if (ep == fsg->bulk_in) 524 dump_msg(fsg, "bulk-in", req->buf, req->length); 525 526 rc = usb_ep_queue(ep, req, GFP_KERNEL); 527 if (rc) { 528 529 /* We can't do much more than wait for a reset */ 530 req->status = rc; 531 532 /* 533 * Note: currently the net2280 driver fails zero-length 534 * submissions if DMA is enabled. 535 */ 536 if (rc != -ESHUTDOWN && 537 !(rc == -EOPNOTSUPP && req->length == 0)) 538 WARNING(fsg, "error in submission: %s --> %d\n", 539 ep->name, rc); 540 } 541 return rc; 542 } 543 544 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 545 { 546 if (!fsg_is_set(common)) 547 return false; 548 bh->state = BUF_STATE_SENDING; 549 if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq)) 550 bh->state = BUF_STATE_EMPTY; 551 return true; 552 } 553 554 static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 555 { 556 if (!fsg_is_set(common)) 557 return false; 558 bh->state = BUF_STATE_RECEIVING; 559 if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq)) 560 bh->state = BUF_STATE_FULL; 561 return true; 562 } 563 564 static int sleep_thread(struct fsg_common *common, bool can_freeze, 565 struct fsg_buffhd *bh) 566 { 567 int rc; 568 569 /* Wait until a signal arrives or bh is no longer busy */ 570 if (can_freeze) 571 /* 572 * synchronize with the smp_store_release(&bh->state) in 573 * bulk_in_complete() or bulk_out_complete() 574 */ 575 rc = wait_event_freezable(common->io_wait, 576 bh && smp_load_acquire(&bh->state) >= 577 BUF_STATE_EMPTY); 578 else 579 rc = wait_event_interruptible(common->io_wait, 580 bh && smp_load_acquire(&bh->state) >= 581 BUF_STATE_EMPTY); 582 return rc ? -EINTR : 0; 583 } 584 585 586 /*-------------------------------------------------------------------------*/ 587 588 static int do_read(struct fsg_common *common) 589 { 590 struct fsg_lun *curlun = common->curlun; 591 u32 lba; 592 struct fsg_buffhd *bh; 593 int rc; 594 u32 amount_left; 595 loff_t file_offset, file_offset_tmp; 596 unsigned int amount; 597 ssize_t nread; 598 599 /* 600 * Get the starting Logical Block Address and check that it's 601 * not too big. 602 */ 603 if (common->cmnd[0] == READ_6) 604 lba = get_unaligned_be24(&common->cmnd[1]); 605 else { 606 lba = get_unaligned_be32(&common->cmnd[2]); 607 608 /* 609 * We allow DPO (Disable Page Out = don't save data in the 610 * cache) and FUA (Force Unit Access = don't read from the 611 * cache), but we don't implement them. 612 */ 613 if ((common->cmnd[1] & ~0x18) != 0) { 614 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 615 return -EINVAL; 616 } 617 } 618 if (lba >= curlun->num_sectors) { 619 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 620 return -EINVAL; 621 } 622 file_offset = ((loff_t) lba) << curlun->blkbits; 623 624 /* Carry out the file reads */ 625 amount_left = common->data_size_from_cmnd; 626 if (unlikely(amount_left == 0)) 627 return -EIO; /* No default reply */ 628 629 for (;;) { 630 /* 631 * Figure out how much we need to read: 632 * Try to read the remaining amount. 633 * But don't read more than the buffer size. 634 * And don't try to read past the end of the file. 635 */ 636 amount = min(amount_left, FSG_BUFLEN); 637 amount = min((loff_t)amount, 638 curlun->file_length - file_offset); 639 640 /* Wait for the next buffer to become available */ 641 bh = common->next_buffhd_to_fill; 642 rc = sleep_thread(common, false, bh); 643 if (rc) 644 return rc; 645 646 /* 647 * If we were asked to read past the end of file, 648 * end with an empty buffer. 649 */ 650 if (amount == 0) { 651 curlun->sense_data = 652 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 653 curlun->sense_data_info = 654 file_offset >> curlun->blkbits; 655 curlun->info_valid = 1; 656 bh->inreq->length = 0; 657 bh->state = BUF_STATE_FULL; 658 break; 659 } 660 661 /* Perform the read */ 662 file_offset_tmp = file_offset; 663 nread = kernel_read(curlun->filp, bh->buf, amount, 664 &file_offset_tmp); 665 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 666 (unsigned long long)file_offset, (int)nread); 667 if (signal_pending(current)) 668 return -EINTR; 669 670 if (nread < 0) { 671 LDBG(curlun, "error in file read: %d\n", (int)nread); 672 nread = 0; 673 } else if (nread < amount) { 674 LDBG(curlun, "partial file read: %d/%u\n", 675 (int)nread, amount); 676 nread = round_down(nread, curlun->blksize); 677 } 678 file_offset += nread; 679 amount_left -= nread; 680 common->residue -= nread; 681 682 /* 683 * Except at the end of the transfer, nread will be 684 * equal to the buffer size, which is divisible by the 685 * bulk-in maxpacket size. 686 */ 687 bh->inreq->length = nread; 688 bh->state = BUF_STATE_FULL; 689 690 /* If an error occurred, report it and its position */ 691 if (nread < amount) { 692 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 693 curlun->sense_data_info = 694 file_offset >> curlun->blkbits; 695 curlun->info_valid = 1; 696 break; 697 } 698 699 if (amount_left == 0) 700 break; /* No more left to read */ 701 702 /* Send this buffer and go read some more */ 703 bh->inreq->zero = 0; 704 if (!start_in_transfer(common, bh)) 705 /* Don't know what to do if common->fsg is NULL */ 706 return -EIO; 707 common->next_buffhd_to_fill = bh->next; 708 } 709 710 return -EIO; /* No default reply */ 711 } 712 713 714 /*-------------------------------------------------------------------------*/ 715 716 static int do_write(struct fsg_common *common) 717 { 718 struct fsg_lun *curlun = common->curlun; 719 u32 lba; 720 struct fsg_buffhd *bh; 721 int get_some_more; 722 u32 amount_left_to_req, amount_left_to_write; 723 loff_t usb_offset, file_offset, file_offset_tmp; 724 unsigned int amount; 725 ssize_t nwritten; 726 int rc; 727 728 if (curlun->ro) { 729 curlun->sense_data = SS_WRITE_PROTECTED; 730 return -EINVAL; 731 } 732 spin_lock(&curlun->filp->f_lock); 733 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ 734 spin_unlock(&curlun->filp->f_lock); 735 736 /* 737 * Get the starting Logical Block Address and check that it's 738 * not too big 739 */ 740 if (common->cmnd[0] == WRITE_6) 741 lba = get_unaligned_be24(&common->cmnd[1]); 742 else { 743 lba = get_unaligned_be32(&common->cmnd[2]); 744 745 /* 746 * We allow DPO (Disable Page Out = don't save data in the 747 * cache) and FUA (Force Unit Access = write directly to the 748 * medium). We don't implement DPO; we implement FUA by 749 * performing synchronous output. 750 */ 751 if (common->cmnd[1] & ~0x18) { 752 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 753 return -EINVAL; 754 } 755 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */ 756 spin_lock(&curlun->filp->f_lock); 757 curlun->filp->f_flags |= O_SYNC; 758 spin_unlock(&curlun->filp->f_lock); 759 } 760 } 761 if (lba >= curlun->num_sectors) { 762 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 763 return -EINVAL; 764 } 765 766 /* Carry out the file writes */ 767 get_some_more = 1; 768 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; 769 amount_left_to_req = common->data_size_from_cmnd; 770 amount_left_to_write = common->data_size_from_cmnd; 771 772 while (amount_left_to_write > 0) { 773 774 /* Queue a request for more data from the host */ 775 bh = common->next_buffhd_to_fill; 776 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 777 778 /* 779 * Figure out how much we want to get: 780 * Try to get the remaining amount, 781 * but not more than the buffer size. 782 */ 783 amount = min(amount_left_to_req, FSG_BUFLEN); 784 785 /* Beyond the end of the backing file? */ 786 if (usb_offset >= curlun->file_length) { 787 get_some_more = 0; 788 curlun->sense_data = 789 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 790 curlun->sense_data_info = 791 usb_offset >> curlun->blkbits; 792 curlun->info_valid = 1; 793 continue; 794 } 795 796 /* Get the next buffer */ 797 usb_offset += amount; 798 common->usb_amount_left -= amount; 799 amount_left_to_req -= amount; 800 if (amount_left_to_req == 0) 801 get_some_more = 0; 802 803 /* 804 * Except at the end of the transfer, amount will be 805 * equal to the buffer size, which is divisible by 806 * the bulk-out maxpacket size. 807 */ 808 set_bulk_out_req_length(common, bh, amount); 809 if (!start_out_transfer(common, bh)) 810 /* Dunno what to do if common->fsg is NULL */ 811 return -EIO; 812 common->next_buffhd_to_fill = bh->next; 813 continue; 814 } 815 816 /* Write the received data to the backing file */ 817 bh = common->next_buffhd_to_drain; 818 if (bh->state == BUF_STATE_EMPTY && !get_some_more) 819 break; /* We stopped early */ 820 821 /* Wait for the data to be received */ 822 rc = sleep_thread(common, false, bh); 823 if (rc) 824 return rc; 825 826 common->next_buffhd_to_drain = bh->next; 827 bh->state = BUF_STATE_EMPTY; 828 829 /* Did something go wrong with the transfer? */ 830 if (bh->outreq->status != 0) { 831 curlun->sense_data = SS_COMMUNICATION_FAILURE; 832 curlun->sense_data_info = 833 file_offset >> curlun->blkbits; 834 curlun->info_valid = 1; 835 break; 836 } 837 838 amount = bh->outreq->actual; 839 if (curlun->file_length - file_offset < amount) { 840 LERROR(curlun, "write %u @ %llu beyond end %llu\n", 841 amount, (unsigned long long)file_offset, 842 (unsigned long long)curlun->file_length); 843 amount = curlun->file_length - file_offset; 844 } 845 846 /* 847 * Don't accept excess data. The spec doesn't say 848 * what to do in this case. We'll ignore the error. 849 */ 850 amount = min(amount, bh->bulk_out_intended_length); 851 852 /* Don't write a partial block */ 853 amount = round_down(amount, curlun->blksize); 854 if (amount == 0) 855 goto empty_write; 856 857 /* Perform the write */ 858 file_offset_tmp = file_offset; 859 nwritten = kernel_write(curlun->filp, bh->buf, amount, 860 &file_offset_tmp); 861 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 862 (unsigned long long)file_offset, (int)nwritten); 863 if (signal_pending(current)) 864 return -EINTR; /* Interrupted! */ 865 866 if (nwritten < 0) { 867 LDBG(curlun, "error in file write: %d\n", 868 (int) nwritten); 869 nwritten = 0; 870 } else if (nwritten < amount) { 871 LDBG(curlun, "partial file write: %d/%u\n", 872 (int) nwritten, amount); 873 nwritten = round_down(nwritten, curlun->blksize); 874 } 875 file_offset += nwritten; 876 amount_left_to_write -= nwritten; 877 common->residue -= nwritten; 878 879 /* If an error occurred, report it and its position */ 880 if (nwritten < amount) { 881 curlun->sense_data = SS_WRITE_ERROR; 882 curlun->sense_data_info = 883 file_offset >> curlun->blkbits; 884 curlun->info_valid = 1; 885 break; 886 } 887 888 empty_write: 889 /* Did the host decide to stop early? */ 890 if (bh->outreq->actual < bh->bulk_out_intended_length) { 891 common->short_packet_received = 1; 892 break; 893 } 894 } 895 896 return -EIO; /* No default reply */ 897 } 898 899 900 /*-------------------------------------------------------------------------*/ 901 902 static int do_synchronize_cache(struct fsg_common *common) 903 { 904 struct fsg_lun *curlun = common->curlun; 905 int rc; 906 907 /* We ignore the requested LBA and write out all file's 908 * dirty data buffers. */ 909 rc = fsg_lun_fsync_sub(curlun); 910 if (rc) 911 curlun->sense_data = SS_WRITE_ERROR; 912 return 0; 913 } 914 915 916 /*-------------------------------------------------------------------------*/ 917 918 static void invalidate_sub(struct fsg_lun *curlun) 919 { 920 struct file *filp = curlun->filp; 921 struct inode *inode = file_inode(filp); 922 unsigned long rc; 923 924 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 925 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); 926 } 927 928 static int do_verify(struct fsg_common *common) 929 { 930 struct fsg_lun *curlun = common->curlun; 931 u32 lba; 932 u32 verification_length; 933 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 934 loff_t file_offset, file_offset_tmp; 935 u32 amount_left; 936 unsigned int amount; 937 ssize_t nread; 938 939 /* 940 * Get the starting Logical Block Address and check that it's 941 * not too big. 942 */ 943 lba = get_unaligned_be32(&common->cmnd[2]); 944 if (lba >= curlun->num_sectors) { 945 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 946 return -EINVAL; 947 } 948 949 /* 950 * We allow DPO (Disable Page Out = don't save data in the 951 * cache) but we don't implement it. 952 */ 953 if (common->cmnd[1] & ~0x10) { 954 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 955 return -EINVAL; 956 } 957 958 verification_length = get_unaligned_be16(&common->cmnd[7]); 959 if (unlikely(verification_length == 0)) 960 return -EIO; /* No default reply */ 961 962 /* Prepare to carry out the file verify */ 963 amount_left = verification_length << curlun->blkbits; 964 file_offset = ((loff_t) lba) << curlun->blkbits; 965 966 /* Write out all the dirty buffers before invalidating them */ 967 fsg_lun_fsync_sub(curlun); 968 if (signal_pending(current)) 969 return -EINTR; 970 971 invalidate_sub(curlun); 972 if (signal_pending(current)) 973 return -EINTR; 974 975 /* Just try to read the requested blocks */ 976 while (amount_left > 0) { 977 /* 978 * Figure out how much we need to read: 979 * Try to read the remaining amount, but not more than 980 * the buffer size. 981 * And don't try to read past the end of the file. 982 */ 983 amount = min(amount_left, FSG_BUFLEN); 984 amount = min((loff_t)amount, 985 curlun->file_length - file_offset); 986 if (amount == 0) { 987 curlun->sense_data = 988 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 989 curlun->sense_data_info = 990 file_offset >> curlun->blkbits; 991 curlun->info_valid = 1; 992 break; 993 } 994 995 /* Perform the read */ 996 file_offset_tmp = file_offset; 997 nread = kernel_read(curlun->filp, bh->buf, amount, 998 &file_offset_tmp); 999 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1000 (unsigned long long) file_offset, 1001 (int) nread); 1002 if (signal_pending(current)) 1003 return -EINTR; 1004 1005 if (nread < 0) { 1006 LDBG(curlun, "error in file verify: %d\n", (int)nread); 1007 nread = 0; 1008 } else if (nread < amount) { 1009 LDBG(curlun, "partial file verify: %d/%u\n", 1010 (int)nread, amount); 1011 nread = round_down(nread, curlun->blksize); 1012 } 1013 if (nread == 0) { 1014 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1015 curlun->sense_data_info = 1016 file_offset >> curlun->blkbits; 1017 curlun->info_valid = 1; 1018 break; 1019 } 1020 file_offset += nread; 1021 amount_left -= nread; 1022 } 1023 return 0; 1024 } 1025 1026 1027 /*-------------------------------------------------------------------------*/ 1028 1029 static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) 1030 { 1031 struct fsg_lun *curlun = common->curlun; 1032 u8 *buf = (u8 *) bh->buf; 1033 1034 if (!curlun) { /* Unsupported LUNs are okay */ 1035 common->bad_lun_okay = 1; 1036 memset(buf, 0, 36); 1037 buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */ 1038 buf[4] = 31; /* Additional length */ 1039 return 36; 1040 } 1041 1042 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK; 1043 buf[1] = curlun->removable ? 0x80 : 0; 1044 buf[2] = 2; /* ANSI SCSI level 2 */ 1045 buf[3] = 2; /* SCSI-2 INQUIRY data format */ 1046 buf[4] = 31; /* Additional length */ 1047 buf[5] = 0; /* No special options */ 1048 buf[6] = 0; 1049 buf[7] = 0; 1050 if (curlun->inquiry_string[0]) 1051 memcpy(buf + 8, curlun->inquiry_string, 1052 sizeof(curlun->inquiry_string)); 1053 else 1054 memcpy(buf + 8, common->inquiry_string, 1055 sizeof(common->inquiry_string)); 1056 return 36; 1057 } 1058 1059 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1060 { 1061 struct fsg_lun *curlun = common->curlun; 1062 u8 *buf = (u8 *) bh->buf; 1063 u32 sd, sdinfo; 1064 int valid; 1065 1066 /* 1067 * From the SCSI-2 spec., section 7.9 (Unit attention condition): 1068 * 1069 * If a REQUEST SENSE command is received from an initiator 1070 * with a pending unit attention condition (before the target 1071 * generates the contingent allegiance condition), then the 1072 * target shall either: 1073 * a) report any pending sense data and preserve the unit 1074 * attention condition on the logical unit, or, 1075 * b) report the unit attention condition, may discard any 1076 * pending sense data, and clear the unit attention 1077 * condition on the logical unit for that initiator. 1078 * 1079 * FSG normally uses option a); enable this code to use option b). 1080 */ 1081 #if 0 1082 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { 1083 curlun->sense_data = curlun->unit_attention_data; 1084 curlun->unit_attention_data = SS_NO_SENSE; 1085 } 1086 #endif 1087 1088 if (!curlun) { /* Unsupported LUNs are okay */ 1089 common->bad_lun_okay = 1; 1090 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1091 sdinfo = 0; 1092 valid = 0; 1093 } else { 1094 sd = curlun->sense_data; 1095 sdinfo = curlun->sense_data_info; 1096 valid = curlun->info_valid << 7; 1097 curlun->sense_data = SS_NO_SENSE; 1098 curlun->sense_data_info = 0; 1099 curlun->info_valid = 0; 1100 } 1101 1102 memset(buf, 0, 18); 1103 buf[0] = valid | 0x70; /* Valid, current error */ 1104 buf[2] = SK(sd); 1105 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ 1106 buf[7] = 18 - 8; /* Additional sense length */ 1107 buf[12] = ASC(sd); 1108 buf[13] = ASCQ(sd); 1109 return 18; 1110 } 1111 1112 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) 1113 { 1114 struct fsg_lun *curlun = common->curlun; 1115 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1116 int pmi = common->cmnd[8]; 1117 u8 *buf = (u8 *)bh->buf; 1118 1119 /* Check the PMI and LBA fields */ 1120 if (pmi > 1 || (pmi == 0 && lba != 0)) { 1121 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1122 return -EINVAL; 1123 } 1124 1125 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); 1126 /* Max logical block */ 1127 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1128 return 8; 1129 } 1130 1131 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) 1132 { 1133 struct fsg_lun *curlun = common->curlun; 1134 int msf = common->cmnd[1] & 0x02; 1135 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1136 u8 *buf = (u8 *)bh->buf; 1137 1138 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ 1139 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1140 return -EINVAL; 1141 } 1142 if (lba >= curlun->num_sectors) { 1143 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1144 return -EINVAL; 1145 } 1146 1147 memset(buf, 0, 8); 1148 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ 1149 store_cdrom_address(&buf[4], msf, lba); 1150 return 8; 1151 } 1152 1153 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) 1154 { 1155 struct fsg_lun *curlun = common->curlun; 1156 int msf = common->cmnd[1] & 0x02; 1157 int start_track = common->cmnd[6]; 1158 u8 *buf = (u8 *)bh->buf; 1159 1160 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1161 start_track > 1) { 1162 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1163 return -EINVAL; 1164 } 1165 1166 memset(buf, 0, 20); 1167 buf[1] = (20-2); /* TOC data length */ 1168 buf[2] = 1; /* First track number */ 1169 buf[3] = 1; /* Last track number */ 1170 buf[5] = 0x16; /* Data track, copying allowed */ 1171 buf[6] = 0x01; /* Only track is number 1 */ 1172 store_cdrom_address(&buf[8], msf, 0); 1173 1174 buf[13] = 0x16; /* Lead-out track is data */ 1175 buf[14] = 0xAA; /* Lead-out track number */ 1176 store_cdrom_address(&buf[16], msf, curlun->num_sectors); 1177 return 20; 1178 } 1179 1180 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1181 { 1182 struct fsg_lun *curlun = common->curlun; 1183 int mscmnd = common->cmnd[0]; 1184 u8 *buf = (u8 *) bh->buf; 1185 u8 *buf0 = buf; 1186 int pc, page_code; 1187 int changeable_values, all_pages; 1188 int valid_page = 0; 1189 int len, limit; 1190 1191 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ 1192 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1193 return -EINVAL; 1194 } 1195 pc = common->cmnd[2] >> 6; 1196 page_code = common->cmnd[2] & 0x3f; 1197 if (pc == 3) { 1198 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 1199 return -EINVAL; 1200 } 1201 changeable_values = (pc == 1); 1202 all_pages = (page_code == 0x3f); 1203 1204 /* 1205 * Write the mode parameter header. Fixed values are: default 1206 * medium type, no cache control (DPOFUA), and no block descriptors. 1207 * The only variable value is the WriteProtect bit. We will fill in 1208 * the mode data length later. 1209 */ 1210 memset(buf, 0, 8); 1211 if (mscmnd == MODE_SENSE) { 1212 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1213 buf += 4; 1214 limit = 255; 1215 } else { /* MODE_SENSE_10 */ 1216 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1217 buf += 8; 1218 limit = 65535; /* Should really be FSG_BUFLEN */ 1219 } 1220 1221 /* No block descriptors */ 1222 1223 /* 1224 * The mode pages, in numerical order. The only page we support 1225 * is the Caching page. 1226 */ 1227 if (page_code == 0x08 || all_pages) { 1228 valid_page = 1; 1229 buf[0] = 0x08; /* Page code */ 1230 buf[1] = 10; /* Page length */ 1231 memset(buf+2, 0, 10); /* None of the fields are changeable */ 1232 1233 if (!changeable_values) { 1234 buf[2] = 0x04; /* Write cache enable, */ 1235 /* Read cache not disabled */ 1236 /* No cache retention priorities */ 1237 put_unaligned_be16(0xffff, &buf[4]); 1238 /* Don't disable prefetch */ 1239 /* Minimum prefetch = 0 */ 1240 put_unaligned_be16(0xffff, &buf[8]); 1241 /* Maximum prefetch */ 1242 put_unaligned_be16(0xffff, &buf[10]); 1243 /* Maximum prefetch ceiling */ 1244 } 1245 buf += 12; 1246 } 1247 1248 /* 1249 * Check that a valid page was requested and the mode data length 1250 * isn't too long. 1251 */ 1252 len = buf - buf0; 1253 if (!valid_page || len > limit) { 1254 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1255 return -EINVAL; 1256 } 1257 1258 /* Store the mode data length */ 1259 if (mscmnd == MODE_SENSE) 1260 buf0[0] = len - 1; 1261 else 1262 put_unaligned_be16(len - 2, buf0); 1263 return len; 1264 } 1265 1266 static int do_start_stop(struct fsg_common *common) 1267 { 1268 struct fsg_lun *curlun = common->curlun; 1269 int loej, start; 1270 1271 if (!curlun) { 1272 return -EINVAL; 1273 } else if (!curlun->removable) { 1274 curlun->sense_data = SS_INVALID_COMMAND; 1275 return -EINVAL; 1276 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */ 1277 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */ 1278 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1279 return -EINVAL; 1280 } 1281 1282 loej = common->cmnd[4] & 0x02; 1283 start = common->cmnd[4] & 0x01; 1284 1285 /* 1286 * Our emulation doesn't support mounting; the medium is 1287 * available for use as soon as it is loaded. 1288 */ 1289 if (start) { 1290 if (!fsg_lun_is_open(curlun)) { 1291 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1292 return -EINVAL; 1293 } 1294 return 0; 1295 } 1296 1297 /* Are we allowed to unload the media? */ 1298 if (curlun->prevent_medium_removal) { 1299 LDBG(curlun, "unload attempt prevented\n"); 1300 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 1301 return -EINVAL; 1302 } 1303 1304 if (!loej) 1305 return 0; 1306 1307 up_read(&common->filesem); 1308 down_write(&common->filesem); 1309 fsg_lun_close(curlun); 1310 up_write(&common->filesem); 1311 down_read(&common->filesem); 1312 1313 return 0; 1314 } 1315 1316 static int do_prevent_allow(struct fsg_common *common) 1317 { 1318 struct fsg_lun *curlun = common->curlun; 1319 int prevent; 1320 1321 if (!common->curlun) { 1322 return -EINVAL; 1323 } else if (!common->curlun->removable) { 1324 common->curlun->sense_data = SS_INVALID_COMMAND; 1325 return -EINVAL; 1326 } 1327 1328 prevent = common->cmnd[4] & 0x01; 1329 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ 1330 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1331 return -EINVAL; 1332 } 1333 1334 if (curlun->prevent_medium_removal && !prevent) 1335 fsg_lun_fsync_sub(curlun); 1336 curlun->prevent_medium_removal = prevent; 1337 return 0; 1338 } 1339 1340 static int do_read_format_capacities(struct fsg_common *common, 1341 struct fsg_buffhd *bh) 1342 { 1343 struct fsg_lun *curlun = common->curlun; 1344 u8 *buf = (u8 *) bh->buf; 1345 1346 buf[0] = buf[1] = buf[2] = 0; 1347 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */ 1348 buf += 4; 1349 1350 put_unaligned_be32(curlun->num_sectors, &buf[0]); 1351 /* Number of blocks */ 1352 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1353 buf[4] = 0x02; /* Current capacity */ 1354 return 12; 1355 } 1356 1357 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) 1358 { 1359 struct fsg_lun *curlun = common->curlun; 1360 1361 /* We don't support MODE SELECT */ 1362 if (curlun) 1363 curlun->sense_data = SS_INVALID_COMMAND; 1364 return -EINVAL; 1365 } 1366 1367 1368 /*-------------------------------------------------------------------------*/ 1369 1370 static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 1371 { 1372 int rc; 1373 1374 rc = fsg_set_halt(fsg, fsg->bulk_in); 1375 if (rc == -EAGAIN) 1376 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 1377 while (rc != 0) { 1378 if (rc != -EAGAIN) { 1379 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); 1380 rc = 0; 1381 break; 1382 } 1383 1384 /* Wait for a short time and then try again */ 1385 if (msleep_interruptible(100) != 0) 1386 return -EINTR; 1387 rc = usb_ep_set_halt(fsg->bulk_in); 1388 } 1389 return rc; 1390 } 1391 1392 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 1393 { 1394 int rc; 1395 1396 DBG(fsg, "bulk-in set wedge\n"); 1397 rc = usb_ep_set_wedge(fsg->bulk_in); 1398 if (rc == -EAGAIN) 1399 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 1400 while (rc != 0) { 1401 if (rc != -EAGAIN) { 1402 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); 1403 rc = 0; 1404 break; 1405 } 1406 1407 /* Wait for a short time and then try again */ 1408 if (msleep_interruptible(100) != 0) 1409 return -EINTR; 1410 rc = usb_ep_set_wedge(fsg->bulk_in); 1411 } 1412 return rc; 1413 } 1414 1415 static int throw_away_data(struct fsg_common *common) 1416 { 1417 struct fsg_buffhd *bh, *bh2; 1418 u32 amount; 1419 int rc; 1420 1421 for (bh = common->next_buffhd_to_drain; 1422 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; 1423 bh = common->next_buffhd_to_drain) { 1424 1425 /* Try to submit another request if we need one */ 1426 bh2 = common->next_buffhd_to_fill; 1427 if (bh2->state == BUF_STATE_EMPTY && 1428 common->usb_amount_left > 0) { 1429 amount = min(common->usb_amount_left, FSG_BUFLEN); 1430 1431 /* 1432 * Except at the end of the transfer, amount will be 1433 * equal to the buffer size, which is divisible by 1434 * the bulk-out maxpacket size. 1435 */ 1436 set_bulk_out_req_length(common, bh2, amount); 1437 if (!start_out_transfer(common, bh2)) 1438 /* Dunno what to do if common->fsg is NULL */ 1439 return -EIO; 1440 common->next_buffhd_to_fill = bh2->next; 1441 common->usb_amount_left -= amount; 1442 continue; 1443 } 1444 1445 /* Wait for the data to be received */ 1446 rc = sleep_thread(common, false, bh); 1447 if (rc) 1448 return rc; 1449 1450 /* Throw away the data in a filled buffer */ 1451 bh->state = BUF_STATE_EMPTY; 1452 common->next_buffhd_to_drain = bh->next; 1453 1454 /* A short packet or an error ends everything */ 1455 if (bh->outreq->actual < bh->bulk_out_intended_length || 1456 bh->outreq->status != 0) { 1457 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1458 return -EINTR; 1459 } 1460 } 1461 return 0; 1462 } 1463 1464 static int finish_reply(struct fsg_common *common) 1465 { 1466 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1467 int rc = 0; 1468 1469 switch (common->data_dir) { 1470 case DATA_DIR_NONE: 1471 break; /* Nothing to send */ 1472 1473 /* 1474 * If we don't know whether the host wants to read or write, 1475 * this must be CB or CBI with an unknown command. We mustn't 1476 * try to send or receive any data. So stall both bulk pipes 1477 * if we can and wait for a reset. 1478 */ 1479 case DATA_DIR_UNKNOWN: 1480 if (!common->can_stall) { 1481 /* Nothing */ 1482 } else if (fsg_is_set(common)) { 1483 fsg_set_halt(common->fsg, common->fsg->bulk_out); 1484 rc = halt_bulk_in_endpoint(common->fsg); 1485 } else { 1486 /* Don't know what to do if common->fsg is NULL */ 1487 rc = -EIO; 1488 } 1489 break; 1490 1491 /* All but the last buffer of data must have already been sent */ 1492 case DATA_DIR_TO_HOST: 1493 if (common->data_size == 0) { 1494 /* Nothing to send */ 1495 1496 /* Don't know what to do if common->fsg is NULL */ 1497 } else if (!fsg_is_set(common)) { 1498 rc = -EIO; 1499 1500 /* If there's no residue, simply send the last buffer */ 1501 } else if (common->residue == 0) { 1502 bh->inreq->zero = 0; 1503 if (!start_in_transfer(common, bh)) 1504 return -EIO; 1505 common->next_buffhd_to_fill = bh->next; 1506 1507 /* 1508 * For Bulk-only, mark the end of the data with a short 1509 * packet. If we are allowed to stall, halt the bulk-in 1510 * endpoint. (Note: This violates the Bulk-Only Transport 1511 * specification, which requires us to pad the data if we 1512 * don't halt the endpoint. Presumably nobody will mind.) 1513 */ 1514 } else { 1515 bh->inreq->zero = 1; 1516 if (!start_in_transfer(common, bh)) 1517 rc = -EIO; 1518 common->next_buffhd_to_fill = bh->next; 1519 if (common->can_stall) 1520 rc = halt_bulk_in_endpoint(common->fsg); 1521 } 1522 break; 1523 1524 /* 1525 * We have processed all we want from the data the host has sent. 1526 * There may still be outstanding bulk-out requests. 1527 */ 1528 case DATA_DIR_FROM_HOST: 1529 if (common->residue == 0) { 1530 /* Nothing to receive */ 1531 1532 /* Did the host stop sending unexpectedly early? */ 1533 } else if (common->short_packet_received) { 1534 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1535 rc = -EINTR; 1536 1537 /* 1538 * We haven't processed all the incoming data. Even though 1539 * we may be allowed to stall, doing so would cause a race. 1540 * The controller may already have ACK'ed all the remaining 1541 * bulk-out packets, in which case the host wouldn't see a 1542 * STALL. Not realizing the endpoint was halted, it wouldn't 1543 * clear the halt -- leading to problems later on. 1544 */ 1545 #if 0 1546 } else if (common->can_stall) { 1547 if (fsg_is_set(common)) 1548 fsg_set_halt(common->fsg, 1549 common->fsg->bulk_out); 1550 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1551 rc = -EINTR; 1552 #endif 1553 1554 /* 1555 * We can't stall. Read in the excess data and throw it 1556 * all away. 1557 */ 1558 } else { 1559 rc = throw_away_data(common); 1560 } 1561 break; 1562 } 1563 return rc; 1564 } 1565 1566 static void send_status(struct fsg_common *common) 1567 { 1568 struct fsg_lun *curlun = common->curlun; 1569 struct fsg_buffhd *bh; 1570 struct bulk_cs_wrap *csw; 1571 int rc; 1572 u8 status = US_BULK_STAT_OK; 1573 u32 sd, sdinfo = 0; 1574 1575 /* Wait for the next buffer to become available */ 1576 bh = common->next_buffhd_to_fill; 1577 rc = sleep_thread(common, false, bh); 1578 if (rc) 1579 return; 1580 1581 if (curlun) { 1582 sd = curlun->sense_data; 1583 sdinfo = curlun->sense_data_info; 1584 } else if (common->bad_lun_okay) 1585 sd = SS_NO_SENSE; 1586 else 1587 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1588 1589 if (common->phase_error) { 1590 DBG(common, "sending phase-error status\n"); 1591 status = US_BULK_STAT_PHASE; 1592 sd = SS_INVALID_COMMAND; 1593 } else if (sd != SS_NO_SENSE) { 1594 DBG(common, "sending command-failure status\n"); 1595 status = US_BULK_STAT_FAIL; 1596 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 1597 " info x%x\n", 1598 SK(sd), ASC(sd), ASCQ(sd), sdinfo); 1599 } 1600 1601 /* Store and send the Bulk-only CSW */ 1602 csw = (void *)bh->buf; 1603 1604 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); 1605 csw->Tag = common->tag; 1606 csw->Residue = cpu_to_le32(common->residue); 1607 csw->Status = status; 1608 1609 bh->inreq->length = US_BULK_CS_WRAP_LEN; 1610 bh->inreq->zero = 0; 1611 if (!start_in_transfer(common, bh)) 1612 /* Don't know what to do if common->fsg is NULL */ 1613 return; 1614 1615 common->next_buffhd_to_fill = bh->next; 1616 return; 1617 } 1618 1619 1620 /*-------------------------------------------------------------------------*/ 1621 1622 /* 1623 * Check whether the command is properly formed and whether its data size 1624 * and direction agree with the values we already have. 1625 */ 1626 static int check_command(struct fsg_common *common, int cmnd_size, 1627 enum data_direction data_dir, unsigned int mask, 1628 int needs_medium, const char *name) 1629 { 1630 int i; 1631 unsigned int lun = common->cmnd[1] >> 5; 1632 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 1633 char hdlen[20]; 1634 struct fsg_lun *curlun; 1635 1636 hdlen[0] = 0; 1637 if (common->data_dir != DATA_DIR_UNKNOWN) 1638 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], 1639 common->data_size); 1640 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 1641 name, cmnd_size, dirletter[(int) data_dir], 1642 common->data_size_from_cmnd, common->cmnd_size, hdlen); 1643 1644 /* 1645 * We can't reply at all until we know the correct data direction 1646 * and size. 1647 */ 1648 if (common->data_size_from_cmnd == 0) 1649 data_dir = DATA_DIR_NONE; 1650 if (common->data_size < common->data_size_from_cmnd) { 1651 /* 1652 * Host data size < Device data size is a phase error. 1653 * Carry out the command, but only transfer as much as 1654 * we are allowed. 1655 */ 1656 common->data_size_from_cmnd = common->data_size; 1657 common->phase_error = 1; 1658 } 1659 common->residue = common->data_size; 1660 common->usb_amount_left = common->data_size; 1661 1662 /* Conflicting data directions is a phase error */ 1663 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) { 1664 common->phase_error = 1; 1665 return -EINVAL; 1666 } 1667 1668 /* Verify the length of the command itself */ 1669 if (cmnd_size != common->cmnd_size) { 1670 1671 /* 1672 * Special case workaround: There are plenty of buggy SCSI 1673 * implementations. Many have issues with cbw->Length 1674 * field passing a wrong command size. For those cases we 1675 * always try to work around the problem by using the length 1676 * sent by the host side provided it is at least as large 1677 * as the correct command length. 1678 * Examples of such cases would be MS-Windows, which issues 1679 * REQUEST SENSE with cbw->Length == 12 where it should 1680 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and 1681 * REQUEST SENSE with cbw->Length == 10 where it should 1682 * be 6 as well. 1683 */ 1684 if (cmnd_size <= common->cmnd_size) { 1685 DBG(common, "%s is buggy! Expected length %d " 1686 "but we got %d\n", name, 1687 cmnd_size, common->cmnd_size); 1688 cmnd_size = common->cmnd_size; 1689 } else { 1690 common->phase_error = 1; 1691 return -EINVAL; 1692 } 1693 } 1694 1695 /* Check that the LUN values are consistent */ 1696 if (common->lun != lun) 1697 DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n", 1698 common->lun, lun); 1699 1700 /* Check the LUN */ 1701 curlun = common->curlun; 1702 if (curlun) { 1703 if (common->cmnd[0] != REQUEST_SENSE) { 1704 curlun->sense_data = SS_NO_SENSE; 1705 curlun->sense_data_info = 0; 1706 curlun->info_valid = 0; 1707 } 1708 } else { 1709 common->bad_lun_okay = 0; 1710 1711 /* 1712 * INQUIRY and REQUEST SENSE commands are explicitly allowed 1713 * to use unsupported LUNs; all others may not. 1714 */ 1715 if (common->cmnd[0] != INQUIRY && 1716 common->cmnd[0] != REQUEST_SENSE) { 1717 DBG(common, "unsupported LUN %u\n", common->lun); 1718 return -EINVAL; 1719 } 1720 } 1721 1722 /* 1723 * If a unit attention condition exists, only INQUIRY and 1724 * REQUEST SENSE commands are allowed; anything else must fail. 1725 */ 1726 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1727 common->cmnd[0] != INQUIRY && 1728 common->cmnd[0] != REQUEST_SENSE) { 1729 curlun->sense_data = curlun->unit_attention_data; 1730 curlun->unit_attention_data = SS_NO_SENSE; 1731 return -EINVAL; 1732 } 1733 1734 /* Check that only command bytes listed in the mask are non-zero */ 1735 common->cmnd[1] &= 0x1f; /* Mask away the LUN */ 1736 for (i = 1; i < cmnd_size; ++i) { 1737 if (common->cmnd[i] && !(mask & (1 << i))) { 1738 if (curlun) 1739 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1740 return -EINVAL; 1741 } 1742 } 1743 1744 /* If the medium isn't mounted and the command needs to access 1745 * it, return an error. */ 1746 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { 1747 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1748 return -EINVAL; 1749 } 1750 1751 return 0; 1752 } 1753 1754 /* wrapper of check_command for data size in blocks handling */ 1755 static int check_command_size_in_blocks(struct fsg_common *common, 1756 int cmnd_size, enum data_direction data_dir, 1757 unsigned int mask, int needs_medium, const char *name) 1758 { 1759 if (common->curlun) 1760 common->data_size_from_cmnd <<= common->curlun->blkbits; 1761 return check_command(common, cmnd_size, data_dir, 1762 mask, needs_medium, name); 1763 } 1764 1765 static int do_scsi_command(struct fsg_common *common) 1766 { 1767 struct fsg_buffhd *bh; 1768 int rc; 1769 int reply = -EINVAL; 1770 int i; 1771 static char unknown[16]; 1772 1773 dump_cdb(common); 1774 1775 /* Wait for the next buffer to become available for data or status */ 1776 bh = common->next_buffhd_to_fill; 1777 common->next_buffhd_to_drain = bh; 1778 rc = sleep_thread(common, false, bh); 1779 if (rc) 1780 return rc; 1781 1782 common->phase_error = 0; 1783 common->short_packet_received = 0; 1784 1785 down_read(&common->filesem); /* We're using the backing file */ 1786 switch (common->cmnd[0]) { 1787 1788 case INQUIRY: 1789 common->data_size_from_cmnd = common->cmnd[4]; 1790 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1791 (1<<4), 0, 1792 "INQUIRY"); 1793 if (reply == 0) 1794 reply = do_inquiry(common, bh); 1795 break; 1796 1797 case MODE_SELECT: 1798 common->data_size_from_cmnd = common->cmnd[4]; 1799 reply = check_command(common, 6, DATA_DIR_FROM_HOST, 1800 (1<<1) | (1<<4), 0, 1801 "MODE SELECT(6)"); 1802 if (reply == 0) 1803 reply = do_mode_select(common, bh); 1804 break; 1805 1806 case MODE_SELECT_10: 1807 common->data_size_from_cmnd = 1808 get_unaligned_be16(&common->cmnd[7]); 1809 reply = check_command(common, 10, DATA_DIR_FROM_HOST, 1810 (1<<1) | (3<<7), 0, 1811 "MODE SELECT(10)"); 1812 if (reply == 0) 1813 reply = do_mode_select(common, bh); 1814 break; 1815 1816 case MODE_SENSE: 1817 common->data_size_from_cmnd = common->cmnd[4]; 1818 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1819 (1<<1) | (1<<2) | (1<<4), 0, 1820 "MODE SENSE(6)"); 1821 if (reply == 0) 1822 reply = do_mode_sense(common, bh); 1823 break; 1824 1825 case MODE_SENSE_10: 1826 common->data_size_from_cmnd = 1827 get_unaligned_be16(&common->cmnd[7]); 1828 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1829 (1<<1) | (1<<2) | (3<<7), 0, 1830 "MODE SENSE(10)"); 1831 if (reply == 0) 1832 reply = do_mode_sense(common, bh); 1833 break; 1834 1835 case ALLOW_MEDIUM_REMOVAL: 1836 common->data_size_from_cmnd = 0; 1837 reply = check_command(common, 6, DATA_DIR_NONE, 1838 (1<<4), 0, 1839 "PREVENT-ALLOW MEDIUM REMOVAL"); 1840 if (reply == 0) 1841 reply = do_prevent_allow(common); 1842 break; 1843 1844 case READ_6: 1845 i = common->cmnd[4]; 1846 common->data_size_from_cmnd = (i == 0) ? 256 : i; 1847 reply = check_command_size_in_blocks(common, 6, 1848 DATA_DIR_TO_HOST, 1849 (7<<1) | (1<<4), 1, 1850 "READ(6)"); 1851 if (reply == 0) 1852 reply = do_read(common); 1853 break; 1854 1855 case READ_10: 1856 common->data_size_from_cmnd = 1857 get_unaligned_be16(&common->cmnd[7]); 1858 reply = check_command_size_in_blocks(common, 10, 1859 DATA_DIR_TO_HOST, 1860 (1<<1) | (0xf<<2) | (3<<7), 1, 1861 "READ(10)"); 1862 if (reply == 0) 1863 reply = do_read(common); 1864 break; 1865 1866 case READ_12: 1867 common->data_size_from_cmnd = 1868 get_unaligned_be32(&common->cmnd[6]); 1869 reply = check_command_size_in_blocks(common, 12, 1870 DATA_DIR_TO_HOST, 1871 (1<<1) | (0xf<<2) | (0xf<<6), 1, 1872 "READ(12)"); 1873 if (reply == 0) 1874 reply = do_read(common); 1875 break; 1876 1877 case READ_CAPACITY: 1878 common->data_size_from_cmnd = 8; 1879 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1880 (0xf<<2) | (1<<8), 1, 1881 "READ CAPACITY"); 1882 if (reply == 0) 1883 reply = do_read_capacity(common, bh); 1884 break; 1885 1886 case READ_HEADER: 1887 if (!common->curlun || !common->curlun->cdrom) 1888 goto unknown_cmnd; 1889 common->data_size_from_cmnd = 1890 get_unaligned_be16(&common->cmnd[7]); 1891 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1892 (3<<7) | (0x1f<<1), 1, 1893 "READ HEADER"); 1894 if (reply == 0) 1895 reply = do_read_header(common, bh); 1896 break; 1897 1898 case READ_TOC: 1899 if (!common->curlun || !common->curlun->cdrom) 1900 goto unknown_cmnd; 1901 common->data_size_from_cmnd = 1902 get_unaligned_be16(&common->cmnd[7]); 1903 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1904 (7<<6) | (1<<1), 1, 1905 "READ TOC"); 1906 if (reply == 0) 1907 reply = do_read_toc(common, bh); 1908 break; 1909 1910 case READ_FORMAT_CAPACITIES: 1911 common->data_size_from_cmnd = 1912 get_unaligned_be16(&common->cmnd[7]); 1913 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1914 (3<<7), 1, 1915 "READ FORMAT CAPACITIES"); 1916 if (reply == 0) 1917 reply = do_read_format_capacities(common, bh); 1918 break; 1919 1920 case REQUEST_SENSE: 1921 common->data_size_from_cmnd = common->cmnd[4]; 1922 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1923 (1<<4), 0, 1924 "REQUEST SENSE"); 1925 if (reply == 0) 1926 reply = do_request_sense(common, bh); 1927 break; 1928 1929 case START_STOP: 1930 common->data_size_from_cmnd = 0; 1931 reply = check_command(common, 6, DATA_DIR_NONE, 1932 (1<<1) | (1<<4), 0, 1933 "START-STOP UNIT"); 1934 if (reply == 0) 1935 reply = do_start_stop(common); 1936 break; 1937 1938 case SYNCHRONIZE_CACHE: 1939 common->data_size_from_cmnd = 0; 1940 reply = check_command(common, 10, DATA_DIR_NONE, 1941 (0xf<<2) | (3<<7), 1, 1942 "SYNCHRONIZE CACHE"); 1943 if (reply == 0) 1944 reply = do_synchronize_cache(common); 1945 break; 1946 1947 case TEST_UNIT_READY: 1948 common->data_size_from_cmnd = 0; 1949 reply = check_command(common, 6, DATA_DIR_NONE, 1950 0, 1, 1951 "TEST UNIT READY"); 1952 break; 1953 1954 /* 1955 * Although optional, this command is used by MS-Windows. We 1956 * support a minimal version: BytChk must be 0. 1957 */ 1958 case VERIFY: 1959 common->data_size_from_cmnd = 0; 1960 reply = check_command(common, 10, DATA_DIR_NONE, 1961 (1<<1) | (0xf<<2) | (3<<7), 1, 1962 "VERIFY"); 1963 if (reply == 0) 1964 reply = do_verify(common); 1965 break; 1966 1967 case WRITE_6: 1968 i = common->cmnd[4]; 1969 common->data_size_from_cmnd = (i == 0) ? 256 : i; 1970 reply = check_command_size_in_blocks(common, 6, 1971 DATA_DIR_FROM_HOST, 1972 (7<<1) | (1<<4), 1, 1973 "WRITE(6)"); 1974 if (reply == 0) 1975 reply = do_write(common); 1976 break; 1977 1978 case WRITE_10: 1979 common->data_size_from_cmnd = 1980 get_unaligned_be16(&common->cmnd[7]); 1981 reply = check_command_size_in_blocks(common, 10, 1982 DATA_DIR_FROM_HOST, 1983 (1<<1) | (0xf<<2) | (3<<7), 1, 1984 "WRITE(10)"); 1985 if (reply == 0) 1986 reply = do_write(common); 1987 break; 1988 1989 case WRITE_12: 1990 common->data_size_from_cmnd = 1991 get_unaligned_be32(&common->cmnd[6]); 1992 reply = check_command_size_in_blocks(common, 12, 1993 DATA_DIR_FROM_HOST, 1994 (1<<1) | (0xf<<2) | (0xf<<6), 1, 1995 "WRITE(12)"); 1996 if (reply == 0) 1997 reply = do_write(common); 1998 break; 1999 2000 /* 2001 * Some mandatory commands that we recognize but don't implement. 2002 * They don't mean much in this setting. It's left as an exercise 2003 * for anyone interested to implement RESERVE and RELEASE in terms 2004 * of Posix locks. 2005 */ 2006 case FORMAT_UNIT: 2007 case RELEASE: 2008 case RESERVE: 2009 case SEND_DIAGNOSTIC: 2010 2011 default: 2012 unknown_cmnd: 2013 common->data_size_from_cmnd = 0; 2014 sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2015 reply = check_command(common, common->cmnd_size, 2016 DATA_DIR_UNKNOWN, ~0, 0, unknown); 2017 if (reply == 0) { 2018 common->curlun->sense_data = SS_INVALID_COMMAND; 2019 reply = -EINVAL; 2020 } 2021 break; 2022 } 2023 up_read(&common->filesem); 2024 2025 if (reply == -EINTR || signal_pending(current)) 2026 return -EINTR; 2027 2028 /* Set up the single reply buffer for finish_reply() */ 2029 if (reply == -EINVAL) 2030 reply = 0; /* Error reply length */ 2031 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { 2032 reply = min((u32)reply, common->data_size_from_cmnd); 2033 bh->inreq->length = reply; 2034 bh->state = BUF_STATE_FULL; 2035 common->residue -= reply; 2036 } /* Otherwise it's already set */ 2037 2038 return 0; 2039 } 2040 2041 2042 /*-------------------------------------------------------------------------*/ 2043 2044 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2045 { 2046 struct usb_request *req = bh->outreq; 2047 struct bulk_cb_wrap *cbw = req->buf; 2048 struct fsg_common *common = fsg->common; 2049 2050 /* Was this a real packet? Should it be ignored? */ 2051 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2052 return -EINVAL; 2053 2054 /* Is the CBW valid? */ 2055 if (req->actual != US_BULK_CB_WRAP_LEN || 2056 cbw->Signature != cpu_to_le32( 2057 US_BULK_CB_SIGN)) { 2058 DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2059 req->actual, 2060 le32_to_cpu(cbw->Signature)); 2061 2062 /* 2063 * The Bulk-only spec says we MUST stall the IN endpoint 2064 * (6.6.1), so it's unavoidable. It also says we must 2065 * retain this state until the next reset, but there's 2066 * no way to tell the controller driver it should ignore 2067 * Clear-Feature(HALT) requests. 2068 * 2069 * We aren't required to halt the OUT endpoint; instead 2070 * we can simply accept and discard any data received 2071 * until the next reset. 2072 */ 2073 wedge_bulk_in_endpoint(fsg); 2074 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2075 return -EINVAL; 2076 } 2077 2078 /* Is the CBW meaningful? */ 2079 if (cbw->Lun >= ARRAY_SIZE(common->luns) || 2080 cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 || 2081 cbw->Length > MAX_COMMAND_SIZE) { 2082 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2083 "cmdlen %u\n", 2084 cbw->Lun, cbw->Flags, cbw->Length); 2085 2086 /* 2087 * We can do anything we want here, so let's stall the 2088 * bulk pipes if we are allowed to. 2089 */ 2090 if (common->can_stall) { 2091 fsg_set_halt(fsg, fsg->bulk_out); 2092 halt_bulk_in_endpoint(fsg); 2093 } 2094 return -EINVAL; 2095 } 2096 2097 /* Save the command for later */ 2098 common->cmnd_size = cbw->Length; 2099 memcpy(common->cmnd, cbw->CDB, common->cmnd_size); 2100 if (cbw->Flags & US_BULK_FLAG_IN) 2101 common->data_dir = DATA_DIR_TO_HOST; 2102 else 2103 common->data_dir = DATA_DIR_FROM_HOST; 2104 common->data_size = le32_to_cpu(cbw->DataTransferLength); 2105 if (common->data_size == 0) 2106 common->data_dir = DATA_DIR_NONE; 2107 common->lun = cbw->Lun; 2108 if (common->lun < ARRAY_SIZE(common->luns)) 2109 common->curlun = common->luns[common->lun]; 2110 else 2111 common->curlun = NULL; 2112 common->tag = cbw->Tag; 2113 return 0; 2114 } 2115 2116 static int get_next_command(struct fsg_common *common) 2117 { 2118 struct fsg_buffhd *bh; 2119 int rc = 0; 2120 2121 /* Wait for the next buffer to become available */ 2122 bh = common->next_buffhd_to_fill; 2123 rc = sleep_thread(common, true, bh); 2124 if (rc) 2125 return rc; 2126 2127 /* Queue a request to read a Bulk-only CBW */ 2128 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN); 2129 if (!start_out_transfer(common, bh)) 2130 /* Don't know what to do if common->fsg is NULL */ 2131 return -EIO; 2132 2133 /* 2134 * We will drain the buffer in software, which means we 2135 * can reuse it for the next filling. No need to advance 2136 * next_buffhd_to_fill. 2137 */ 2138 2139 /* Wait for the CBW to arrive */ 2140 rc = sleep_thread(common, true, bh); 2141 if (rc) 2142 return rc; 2143 2144 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO; 2145 bh->state = BUF_STATE_EMPTY; 2146 2147 return rc; 2148 } 2149 2150 2151 /*-------------------------------------------------------------------------*/ 2152 2153 static int alloc_request(struct fsg_common *common, struct usb_ep *ep, 2154 struct usb_request **preq) 2155 { 2156 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 2157 if (*preq) 2158 return 0; 2159 ERROR(common, "can't allocate request for %s\n", ep->name); 2160 return -ENOMEM; 2161 } 2162 2163 /* Reset interface setting and re-init endpoint state (toggle etc). */ 2164 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) 2165 { 2166 struct fsg_dev *fsg; 2167 int i, rc = 0; 2168 2169 if (common->running) 2170 DBG(common, "reset interface\n"); 2171 2172 reset: 2173 /* Deallocate the requests */ 2174 if (common->fsg) { 2175 fsg = common->fsg; 2176 2177 for (i = 0; i < common->fsg_num_buffers; ++i) { 2178 struct fsg_buffhd *bh = &common->buffhds[i]; 2179 2180 if (bh->inreq) { 2181 usb_ep_free_request(fsg->bulk_in, bh->inreq); 2182 bh->inreq = NULL; 2183 } 2184 if (bh->outreq) { 2185 usb_ep_free_request(fsg->bulk_out, bh->outreq); 2186 bh->outreq = NULL; 2187 } 2188 } 2189 2190 /* Disable the endpoints */ 2191 if (fsg->bulk_in_enabled) { 2192 usb_ep_disable(fsg->bulk_in); 2193 fsg->bulk_in_enabled = 0; 2194 } 2195 if (fsg->bulk_out_enabled) { 2196 usb_ep_disable(fsg->bulk_out); 2197 fsg->bulk_out_enabled = 0; 2198 } 2199 2200 common->fsg = NULL; 2201 wake_up(&common->fsg_wait); 2202 } 2203 2204 common->running = 0; 2205 if (!new_fsg || rc) 2206 return rc; 2207 2208 common->fsg = new_fsg; 2209 fsg = common->fsg; 2210 2211 /* Enable the endpoints */ 2212 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in); 2213 if (rc) 2214 goto reset; 2215 rc = usb_ep_enable(fsg->bulk_in); 2216 if (rc) 2217 goto reset; 2218 fsg->bulk_in->driver_data = common; 2219 fsg->bulk_in_enabled = 1; 2220 2221 rc = config_ep_by_speed(common->gadget, &(fsg->function), 2222 fsg->bulk_out); 2223 if (rc) 2224 goto reset; 2225 rc = usb_ep_enable(fsg->bulk_out); 2226 if (rc) 2227 goto reset; 2228 fsg->bulk_out->driver_data = common; 2229 fsg->bulk_out_enabled = 1; 2230 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc); 2231 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2232 2233 /* Allocate the requests */ 2234 for (i = 0; i < common->fsg_num_buffers; ++i) { 2235 struct fsg_buffhd *bh = &common->buffhds[i]; 2236 2237 rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2238 if (rc) 2239 goto reset; 2240 rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2241 if (rc) 2242 goto reset; 2243 bh->inreq->buf = bh->outreq->buf = bh->buf; 2244 bh->inreq->context = bh->outreq->context = bh; 2245 bh->inreq->complete = bulk_in_complete; 2246 bh->outreq->complete = bulk_out_complete; 2247 } 2248 2249 common->running = 1; 2250 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) 2251 if (common->luns[i]) 2252 common->luns[i]->unit_attention_data = 2253 SS_RESET_OCCURRED; 2254 return rc; 2255 } 2256 2257 2258 /****************************** ALT CONFIGS ******************************/ 2259 2260 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2261 { 2262 struct fsg_dev *fsg = fsg_from_func(f); 2263 2264 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg); 2265 return USB_GADGET_DELAYED_STATUS; 2266 } 2267 2268 static void fsg_disable(struct usb_function *f) 2269 { 2270 struct fsg_dev *fsg = fsg_from_func(f); 2271 2272 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); 2273 } 2274 2275 2276 /*-------------------------------------------------------------------------*/ 2277 2278 static void handle_exception(struct fsg_common *common) 2279 { 2280 int i; 2281 struct fsg_buffhd *bh; 2282 enum fsg_state old_state; 2283 struct fsg_lun *curlun; 2284 unsigned int exception_req_tag; 2285 struct fsg_dev *new_fsg; 2286 2287 /* 2288 * Clear the existing signals. Anything but SIGUSR1 is converted 2289 * into a high-priority EXIT exception. 2290 */ 2291 for (;;) { 2292 int sig = kernel_dequeue_signal(); 2293 if (!sig) 2294 break; 2295 if (sig != SIGUSR1) { 2296 spin_lock_irq(&common->lock); 2297 if (common->state < FSG_STATE_EXIT) 2298 DBG(common, "Main thread exiting on signal\n"); 2299 common->state = FSG_STATE_EXIT; 2300 spin_unlock_irq(&common->lock); 2301 } 2302 } 2303 2304 /* Cancel all the pending transfers */ 2305 if (likely(common->fsg)) { 2306 for (i = 0; i < common->fsg_num_buffers; ++i) { 2307 bh = &common->buffhds[i]; 2308 if (bh->state == BUF_STATE_SENDING) 2309 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq); 2310 if (bh->state == BUF_STATE_RECEIVING) 2311 usb_ep_dequeue(common->fsg->bulk_out, 2312 bh->outreq); 2313 2314 /* Wait for a transfer to become idle */ 2315 if (sleep_thread(common, false, bh)) 2316 return; 2317 } 2318 2319 /* Clear out the controller's fifos */ 2320 if (common->fsg->bulk_in_enabled) 2321 usb_ep_fifo_flush(common->fsg->bulk_in); 2322 if (common->fsg->bulk_out_enabled) 2323 usb_ep_fifo_flush(common->fsg->bulk_out); 2324 } 2325 2326 /* 2327 * Reset the I/O buffer states and pointers, the SCSI 2328 * state, and the exception. Then invoke the handler. 2329 */ 2330 spin_lock_irq(&common->lock); 2331 2332 for (i = 0; i < common->fsg_num_buffers; ++i) { 2333 bh = &common->buffhds[i]; 2334 bh->state = BUF_STATE_EMPTY; 2335 } 2336 common->next_buffhd_to_fill = &common->buffhds[0]; 2337 common->next_buffhd_to_drain = &common->buffhds[0]; 2338 exception_req_tag = common->exception_req_tag; 2339 new_fsg = common->exception_arg; 2340 old_state = common->state; 2341 common->state = FSG_STATE_NORMAL; 2342 2343 if (old_state != FSG_STATE_ABORT_BULK_OUT) { 2344 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) { 2345 curlun = common->luns[i]; 2346 if (!curlun) 2347 continue; 2348 curlun->prevent_medium_removal = 0; 2349 curlun->sense_data = SS_NO_SENSE; 2350 curlun->unit_attention_data = SS_NO_SENSE; 2351 curlun->sense_data_info = 0; 2352 curlun->info_valid = 0; 2353 } 2354 } 2355 spin_unlock_irq(&common->lock); 2356 2357 /* Carry out any extra actions required for the exception */ 2358 switch (old_state) { 2359 case FSG_STATE_NORMAL: 2360 break; 2361 2362 case FSG_STATE_ABORT_BULK_OUT: 2363 send_status(common); 2364 break; 2365 2366 case FSG_STATE_PROTOCOL_RESET: 2367 /* 2368 * In case we were forced against our will to halt a 2369 * bulk endpoint, clear the halt now. (The SuperH UDC 2370 * requires this.) 2371 */ 2372 if (!fsg_is_set(common)) 2373 break; 2374 if (test_and_clear_bit(IGNORE_BULK_OUT, 2375 &common->fsg->atomic_bitflags)) 2376 usb_ep_clear_halt(common->fsg->bulk_in); 2377 2378 if (common->ep0_req_tag == exception_req_tag) 2379 ep0_queue(common); /* Complete the status stage */ 2380 2381 /* 2382 * Technically this should go here, but it would only be 2383 * a waste of time. Ditto for the INTERFACE_CHANGE and 2384 * CONFIG_CHANGE cases. 2385 */ 2386 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */ 2387 /* if (common->luns[i]) */ 2388 /* common->luns[i]->unit_attention_data = */ 2389 /* SS_RESET_OCCURRED; */ 2390 break; 2391 2392 case FSG_STATE_CONFIG_CHANGE: 2393 do_set_interface(common, new_fsg); 2394 if (new_fsg) 2395 usb_composite_setup_continue(common->cdev); 2396 break; 2397 2398 case FSG_STATE_EXIT: 2399 do_set_interface(common, NULL); /* Free resources */ 2400 spin_lock_irq(&common->lock); 2401 common->state = FSG_STATE_TERMINATED; /* Stop the thread */ 2402 spin_unlock_irq(&common->lock); 2403 break; 2404 2405 case FSG_STATE_TERMINATED: 2406 break; 2407 } 2408 } 2409 2410 2411 /*-------------------------------------------------------------------------*/ 2412 2413 static int fsg_main_thread(void *common_) 2414 { 2415 struct fsg_common *common = common_; 2416 int i; 2417 2418 /* 2419 * Allow the thread to be killed by a signal, but set the signal mask 2420 * to block everything but INT, TERM, KILL, and USR1. 2421 */ 2422 allow_signal(SIGINT); 2423 allow_signal(SIGTERM); 2424 allow_signal(SIGKILL); 2425 allow_signal(SIGUSR1); 2426 2427 /* Allow the thread to be frozen */ 2428 set_freezable(); 2429 2430 /* The main loop */ 2431 while (common->state != FSG_STATE_TERMINATED) { 2432 if (exception_in_progress(common) || signal_pending(current)) { 2433 handle_exception(common); 2434 continue; 2435 } 2436 2437 if (!common->running) { 2438 sleep_thread(common, true, NULL); 2439 continue; 2440 } 2441 2442 if (get_next_command(common) || exception_in_progress(common)) 2443 continue; 2444 if (do_scsi_command(common) || exception_in_progress(common)) 2445 continue; 2446 if (finish_reply(common) || exception_in_progress(common)) 2447 continue; 2448 send_status(common); 2449 } 2450 2451 spin_lock_irq(&common->lock); 2452 common->thread_task = NULL; 2453 spin_unlock_irq(&common->lock); 2454 2455 /* Eject media from all LUNs */ 2456 2457 down_write(&common->filesem); 2458 for (i = 0; i < ARRAY_SIZE(common->luns); i++) { 2459 struct fsg_lun *curlun = common->luns[i]; 2460 2461 if (curlun && fsg_lun_is_open(curlun)) 2462 fsg_lun_close(curlun); 2463 } 2464 up_write(&common->filesem); 2465 2466 /* Let fsg_unbind() know the thread has exited */ 2467 complete_and_exit(&common->thread_notifier, 0); 2468 } 2469 2470 2471 /*************************** DEVICE ATTRIBUTES ***************************/ 2472 2473 static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf) 2474 { 2475 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2476 2477 return fsg_show_ro(curlun, buf); 2478 } 2479 2480 static ssize_t nofua_show(struct device *dev, struct device_attribute *attr, 2481 char *buf) 2482 { 2483 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2484 2485 return fsg_show_nofua(curlun, buf); 2486 } 2487 2488 static ssize_t file_show(struct device *dev, struct device_attribute *attr, 2489 char *buf) 2490 { 2491 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2492 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2493 2494 return fsg_show_file(curlun, filesem, buf); 2495 } 2496 2497 static ssize_t ro_store(struct device *dev, struct device_attribute *attr, 2498 const char *buf, size_t count) 2499 { 2500 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2501 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2502 2503 return fsg_store_ro(curlun, filesem, buf, count); 2504 } 2505 2506 static ssize_t nofua_store(struct device *dev, struct device_attribute *attr, 2507 const char *buf, size_t count) 2508 { 2509 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2510 2511 return fsg_store_nofua(curlun, buf, count); 2512 } 2513 2514 static ssize_t file_store(struct device *dev, struct device_attribute *attr, 2515 const char *buf, size_t count) 2516 { 2517 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2518 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2519 2520 return fsg_store_file(curlun, filesem, buf, count); 2521 } 2522 2523 static DEVICE_ATTR_RW(nofua); 2524 /* mode wil be set in fsg_lun_attr_is_visible() */ 2525 static DEVICE_ATTR(ro, 0, ro_show, ro_store); 2526 static DEVICE_ATTR(file, 0, file_show, file_store); 2527 2528 /****************************** FSG COMMON ******************************/ 2529 2530 static void fsg_lun_release(struct device *dev) 2531 { 2532 /* Nothing needs to be done */ 2533 } 2534 2535 static struct fsg_common *fsg_common_setup(struct fsg_common *common) 2536 { 2537 if (!common) { 2538 common = kzalloc(sizeof(*common), GFP_KERNEL); 2539 if (!common) 2540 return ERR_PTR(-ENOMEM); 2541 common->free_storage_on_release = 1; 2542 } else { 2543 common->free_storage_on_release = 0; 2544 } 2545 init_rwsem(&common->filesem); 2546 spin_lock_init(&common->lock); 2547 init_completion(&common->thread_notifier); 2548 init_waitqueue_head(&common->io_wait); 2549 init_waitqueue_head(&common->fsg_wait); 2550 common->state = FSG_STATE_TERMINATED; 2551 memset(common->luns, 0, sizeof(common->luns)); 2552 2553 return common; 2554 } 2555 2556 void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs) 2557 { 2558 common->sysfs = sysfs; 2559 } 2560 EXPORT_SYMBOL_GPL(fsg_common_set_sysfs); 2561 2562 static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n) 2563 { 2564 if (buffhds) { 2565 struct fsg_buffhd *bh = buffhds; 2566 while (n--) { 2567 kfree(bh->buf); 2568 ++bh; 2569 } 2570 kfree(buffhds); 2571 } 2572 } 2573 2574 int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n) 2575 { 2576 struct fsg_buffhd *bh, *buffhds; 2577 int i; 2578 2579 buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL); 2580 if (!buffhds) 2581 return -ENOMEM; 2582 2583 /* Data buffers cyclic list */ 2584 bh = buffhds; 2585 i = n; 2586 goto buffhds_first_it; 2587 do { 2588 bh->next = bh + 1; 2589 ++bh; 2590 buffhds_first_it: 2591 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL); 2592 if (unlikely(!bh->buf)) 2593 goto error_release; 2594 } while (--i); 2595 bh->next = buffhds; 2596 2597 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2598 common->fsg_num_buffers = n; 2599 common->buffhds = buffhds; 2600 2601 return 0; 2602 2603 error_release: 2604 /* 2605 * "buf"s pointed to by heads after n - i are NULL 2606 * so releasing them won't hurt 2607 */ 2608 _fsg_common_free_buffers(buffhds, n); 2609 2610 return -ENOMEM; 2611 } 2612 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers); 2613 2614 void fsg_common_remove_lun(struct fsg_lun *lun) 2615 { 2616 if (device_is_registered(&lun->dev)) 2617 device_unregister(&lun->dev); 2618 fsg_lun_close(lun); 2619 kfree(lun); 2620 } 2621 EXPORT_SYMBOL_GPL(fsg_common_remove_lun); 2622 2623 static void _fsg_common_remove_luns(struct fsg_common *common, int n) 2624 { 2625 int i; 2626 2627 for (i = 0; i < n; ++i) 2628 if (common->luns[i]) { 2629 fsg_common_remove_lun(common->luns[i]); 2630 common->luns[i] = NULL; 2631 } 2632 } 2633 2634 void fsg_common_remove_luns(struct fsg_common *common) 2635 { 2636 _fsg_common_remove_luns(common, ARRAY_SIZE(common->luns)); 2637 } 2638 EXPORT_SYMBOL_GPL(fsg_common_remove_luns); 2639 2640 void fsg_common_free_buffers(struct fsg_common *common) 2641 { 2642 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2643 common->buffhds = NULL; 2644 } 2645 EXPORT_SYMBOL_GPL(fsg_common_free_buffers); 2646 2647 int fsg_common_set_cdev(struct fsg_common *common, 2648 struct usb_composite_dev *cdev, bool can_stall) 2649 { 2650 struct usb_string *us; 2651 2652 common->gadget = cdev->gadget; 2653 common->ep0 = cdev->gadget->ep0; 2654 common->ep0req = cdev->req; 2655 common->cdev = cdev; 2656 2657 us = usb_gstrings_attach(cdev, fsg_strings_array, 2658 ARRAY_SIZE(fsg_strings)); 2659 if (IS_ERR(us)) 2660 return PTR_ERR(us); 2661 2662 fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id; 2663 2664 /* 2665 * Some peripheral controllers are known not to be able to 2666 * halt bulk endpoints correctly. If one of them is present, 2667 * disable stalls. 2668 */ 2669 common->can_stall = can_stall && 2670 gadget_is_stall_supported(common->gadget); 2671 2672 return 0; 2673 } 2674 EXPORT_SYMBOL_GPL(fsg_common_set_cdev); 2675 2676 static struct attribute *fsg_lun_dev_attrs[] = { 2677 &dev_attr_ro.attr, 2678 &dev_attr_file.attr, 2679 &dev_attr_nofua.attr, 2680 NULL 2681 }; 2682 2683 static umode_t fsg_lun_dev_is_visible(struct kobject *kobj, 2684 struct attribute *attr, int idx) 2685 { 2686 struct device *dev = kobj_to_dev(kobj); 2687 struct fsg_lun *lun = fsg_lun_from_dev(dev); 2688 2689 if (attr == &dev_attr_ro.attr) 2690 return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO); 2691 if (attr == &dev_attr_file.attr) 2692 return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO; 2693 return attr->mode; 2694 } 2695 2696 static const struct attribute_group fsg_lun_dev_group = { 2697 .attrs = fsg_lun_dev_attrs, 2698 .is_visible = fsg_lun_dev_is_visible, 2699 }; 2700 2701 static const struct attribute_group *fsg_lun_dev_groups[] = { 2702 &fsg_lun_dev_group, 2703 NULL 2704 }; 2705 2706 int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, 2707 unsigned int id, const char *name, 2708 const char **name_pfx) 2709 { 2710 struct fsg_lun *lun; 2711 char *pathbuf, *p; 2712 int rc = -ENOMEM; 2713 2714 if (id >= ARRAY_SIZE(common->luns)) 2715 return -ENODEV; 2716 2717 if (common->luns[id]) 2718 return -EBUSY; 2719 2720 if (!cfg->filename && !cfg->removable) { 2721 pr_err("no file given for LUN%d\n", id); 2722 return -EINVAL; 2723 } 2724 2725 lun = kzalloc(sizeof(*lun), GFP_KERNEL); 2726 if (!lun) 2727 return -ENOMEM; 2728 2729 lun->name_pfx = name_pfx; 2730 2731 lun->cdrom = !!cfg->cdrom; 2732 lun->ro = cfg->cdrom || cfg->ro; 2733 lun->initially_ro = lun->ro; 2734 lun->removable = !!cfg->removable; 2735 2736 if (!common->sysfs) { 2737 /* we DON'T own the name!*/ 2738 lun->name = name; 2739 } else { 2740 lun->dev.release = fsg_lun_release; 2741 lun->dev.parent = &common->gadget->dev; 2742 lun->dev.groups = fsg_lun_dev_groups; 2743 dev_set_drvdata(&lun->dev, &common->filesem); 2744 dev_set_name(&lun->dev, "%s", name); 2745 lun->name = dev_name(&lun->dev); 2746 2747 rc = device_register(&lun->dev); 2748 if (rc) { 2749 pr_info("failed to register LUN%d: %d\n", id, rc); 2750 put_device(&lun->dev); 2751 goto error_sysfs; 2752 } 2753 } 2754 2755 common->luns[id] = lun; 2756 2757 if (cfg->filename) { 2758 rc = fsg_lun_open(lun, cfg->filename); 2759 if (rc) 2760 goto error_lun; 2761 } 2762 2763 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 2764 p = "(no medium)"; 2765 if (fsg_lun_is_open(lun)) { 2766 p = "(error)"; 2767 if (pathbuf) { 2768 p = file_path(lun->filp, pathbuf, PATH_MAX); 2769 if (IS_ERR(p)) 2770 p = "(error)"; 2771 } 2772 } 2773 pr_info("LUN: %s%s%sfile: %s\n", 2774 lun->removable ? "removable " : "", 2775 lun->ro ? "read only " : "", 2776 lun->cdrom ? "CD-ROM " : "", 2777 p); 2778 kfree(pathbuf); 2779 2780 return 0; 2781 2782 error_lun: 2783 if (device_is_registered(&lun->dev)) 2784 device_unregister(&lun->dev); 2785 fsg_lun_close(lun); 2786 common->luns[id] = NULL; 2787 error_sysfs: 2788 kfree(lun); 2789 return rc; 2790 } 2791 EXPORT_SYMBOL_GPL(fsg_common_create_lun); 2792 2793 int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg) 2794 { 2795 char buf[8]; /* enough for 100000000 different numbers, decimal */ 2796 int i, rc; 2797 2798 fsg_common_remove_luns(common); 2799 2800 for (i = 0; i < cfg->nluns; ++i) { 2801 snprintf(buf, sizeof(buf), "lun%d", i); 2802 rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL); 2803 if (rc) 2804 goto fail; 2805 } 2806 2807 pr_info("Number of LUNs=%d\n", cfg->nluns); 2808 2809 return 0; 2810 2811 fail: 2812 _fsg_common_remove_luns(common, i); 2813 return rc; 2814 } 2815 EXPORT_SYMBOL_GPL(fsg_common_create_luns); 2816 2817 void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, 2818 const char *pn) 2819 { 2820 int i; 2821 2822 /* Prepare inquiryString */ 2823 i = get_default_bcdDevice(); 2824 snprintf(common->inquiry_string, sizeof(common->inquiry_string), 2825 "%-8s%-16s%04x", vn ?: "Linux", 2826 /* Assume product name dependent on the first LUN */ 2827 pn ?: ((*common->luns)->cdrom 2828 ? "File-CD Gadget" 2829 : "File-Stor Gadget"), 2830 i); 2831 } 2832 EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string); 2833 2834 static void fsg_common_release(struct fsg_common *common) 2835 { 2836 int i; 2837 2838 /* If the thread isn't already dead, tell it to exit now */ 2839 if (common->state != FSG_STATE_TERMINATED) { 2840 raise_exception(common, FSG_STATE_EXIT); 2841 wait_for_completion(&common->thread_notifier); 2842 } 2843 2844 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) { 2845 struct fsg_lun *lun = common->luns[i]; 2846 if (!lun) 2847 continue; 2848 fsg_lun_close(lun); 2849 if (device_is_registered(&lun->dev)) 2850 device_unregister(&lun->dev); 2851 kfree(lun); 2852 } 2853 2854 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2855 if (common->free_storage_on_release) 2856 kfree(common); 2857 } 2858 2859 2860 /*-------------------------------------------------------------------------*/ 2861 2862 static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 2863 { 2864 struct fsg_dev *fsg = fsg_from_func(f); 2865 struct fsg_common *common = fsg->common; 2866 struct usb_gadget *gadget = c->cdev->gadget; 2867 int i; 2868 struct usb_ep *ep; 2869 unsigned max_burst; 2870 int ret; 2871 struct fsg_opts *opts; 2872 2873 /* Don't allow to bind if we don't have at least one LUN */ 2874 ret = _fsg_common_get_max_lun(common); 2875 if (ret < 0) { 2876 pr_err("There should be at least one LUN.\n"); 2877 return -EINVAL; 2878 } 2879 2880 opts = fsg_opts_from_func_inst(f->fi); 2881 if (!opts->no_configfs) { 2882 ret = fsg_common_set_cdev(fsg->common, c->cdev, 2883 fsg->common->can_stall); 2884 if (ret) 2885 return ret; 2886 fsg_common_set_inquiry_string(fsg->common, NULL, NULL); 2887 } 2888 2889 if (!common->thread_task) { 2890 common->state = FSG_STATE_NORMAL; 2891 common->thread_task = 2892 kthread_create(fsg_main_thread, common, "file-storage"); 2893 if (IS_ERR(common->thread_task)) { 2894 ret = PTR_ERR(common->thread_task); 2895 common->thread_task = NULL; 2896 common->state = FSG_STATE_TERMINATED; 2897 return ret; 2898 } 2899 DBG(common, "I/O thread pid: %d\n", 2900 task_pid_nr(common->thread_task)); 2901 wake_up_process(common->thread_task); 2902 } 2903 2904 fsg->gadget = gadget; 2905 2906 /* New interface */ 2907 i = usb_interface_id(c, f); 2908 if (i < 0) 2909 goto fail; 2910 fsg_intf_desc.bInterfaceNumber = i; 2911 fsg->interface_number = i; 2912 2913 /* Find all the endpoints we will use */ 2914 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); 2915 if (!ep) 2916 goto autoconf_fail; 2917 fsg->bulk_in = ep; 2918 2919 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); 2920 if (!ep) 2921 goto autoconf_fail; 2922 fsg->bulk_out = ep; 2923 2924 /* Assume endpoint addresses are the same for both speeds */ 2925 fsg_hs_bulk_in_desc.bEndpointAddress = 2926 fsg_fs_bulk_in_desc.bEndpointAddress; 2927 fsg_hs_bulk_out_desc.bEndpointAddress = 2928 fsg_fs_bulk_out_desc.bEndpointAddress; 2929 2930 /* Calculate bMaxBurst, we know packet size is 1024 */ 2931 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15); 2932 2933 fsg_ss_bulk_in_desc.bEndpointAddress = 2934 fsg_fs_bulk_in_desc.bEndpointAddress; 2935 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; 2936 2937 fsg_ss_bulk_out_desc.bEndpointAddress = 2938 fsg_fs_bulk_out_desc.bEndpointAddress; 2939 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; 2940 2941 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function, 2942 fsg_ss_function, fsg_ss_function); 2943 if (ret) 2944 goto autoconf_fail; 2945 2946 return 0; 2947 2948 autoconf_fail: 2949 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 2950 i = -ENOTSUPP; 2951 fail: 2952 /* terminate the thread */ 2953 if (fsg->common->state != FSG_STATE_TERMINATED) { 2954 raise_exception(fsg->common, FSG_STATE_EXIT); 2955 wait_for_completion(&fsg->common->thread_notifier); 2956 } 2957 return i; 2958 } 2959 2960 /****************************** ALLOCATE FUNCTION *************************/ 2961 2962 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 2963 { 2964 struct fsg_dev *fsg = fsg_from_func(f); 2965 struct fsg_common *common = fsg->common; 2966 2967 DBG(fsg, "unbind\n"); 2968 if (fsg->common->fsg == fsg) { 2969 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); 2970 /* FIXME: make interruptible or killable somehow? */ 2971 wait_event(common->fsg_wait, common->fsg != fsg); 2972 } 2973 2974 usb_free_all_descriptors(&fsg->function); 2975 } 2976 2977 static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item) 2978 { 2979 return container_of(to_config_group(item), struct fsg_lun_opts, group); 2980 } 2981 2982 static inline struct fsg_opts *to_fsg_opts(struct config_item *item) 2983 { 2984 return container_of(to_config_group(item), struct fsg_opts, 2985 func_inst.group); 2986 } 2987 2988 static void fsg_lun_attr_release(struct config_item *item) 2989 { 2990 struct fsg_lun_opts *lun_opts; 2991 2992 lun_opts = to_fsg_lun_opts(item); 2993 kfree(lun_opts); 2994 } 2995 2996 static struct configfs_item_operations fsg_lun_item_ops = { 2997 .release = fsg_lun_attr_release, 2998 }; 2999 3000 static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page) 3001 { 3002 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3003 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3004 3005 return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page); 3006 } 3007 3008 static ssize_t fsg_lun_opts_file_store(struct config_item *item, 3009 const char *page, size_t len) 3010 { 3011 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3012 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3013 3014 return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len); 3015 } 3016 3017 CONFIGFS_ATTR(fsg_lun_opts_, file); 3018 3019 static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page) 3020 { 3021 return fsg_show_ro(to_fsg_lun_opts(item)->lun, page); 3022 } 3023 3024 static ssize_t fsg_lun_opts_ro_store(struct config_item *item, 3025 const char *page, size_t len) 3026 { 3027 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3028 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3029 3030 return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len); 3031 } 3032 3033 CONFIGFS_ATTR(fsg_lun_opts_, ro); 3034 3035 static ssize_t fsg_lun_opts_removable_show(struct config_item *item, 3036 char *page) 3037 { 3038 return fsg_show_removable(to_fsg_lun_opts(item)->lun, page); 3039 } 3040 3041 static ssize_t fsg_lun_opts_removable_store(struct config_item *item, 3042 const char *page, size_t len) 3043 { 3044 return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len); 3045 } 3046 3047 CONFIGFS_ATTR(fsg_lun_opts_, removable); 3048 3049 static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page) 3050 { 3051 return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page); 3052 } 3053 3054 static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item, 3055 const char *page, size_t len) 3056 { 3057 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3058 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3059 3060 return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page, 3061 len); 3062 } 3063 3064 CONFIGFS_ATTR(fsg_lun_opts_, cdrom); 3065 3066 static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page) 3067 { 3068 return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page); 3069 } 3070 3071 static ssize_t fsg_lun_opts_nofua_store(struct config_item *item, 3072 const char *page, size_t len) 3073 { 3074 return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len); 3075 } 3076 3077 CONFIGFS_ATTR(fsg_lun_opts_, nofua); 3078 3079 static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item, 3080 char *page) 3081 { 3082 return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page); 3083 } 3084 3085 static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item, 3086 const char *page, size_t len) 3087 { 3088 return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len); 3089 } 3090 3091 CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string); 3092 3093 static struct configfs_attribute *fsg_lun_attrs[] = { 3094 &fsg_lun_opts_attr_file, 3095 &fsg_lun_opts_attr_ro, 3096 &fsg_lun_opts_attr_removable, 3097 &fsg_lun_opts_attr_cdrom, 3098 &fsg_lun_opts_attr_nofua, 3099 &fsg_lun_opts_attr_inquiry_string, 3100 NULL, 3101 }; 3102 3103 static const struct config_item_type fsg_lun_type = { 3104 .ct_item_ops = &fsg_lun_item_ops, 3105 .ct_attrs = fsg_lun_attrs, 3106 .ct_owner = THIS_MODULE, 3107 }; 3108 3109 static struct config_group *fsg_lun_make(struct config_group *group, 3110 const char *name) 3111 { 3112 struct fsg_lun_opts *opts; 3113 struct fsg_opts *fsg_opts; 3114 struct fsg_lun_config config; 3115 char *num_str; 3116 u8 num; 3117 int ret; 3118 3119 num_str = strchr(name, '.'); 3120 if (!num_str) { 3121 pr_err("Unable to locate . in LUN.NUMBER\n"); 3122 return ERR_PTR(-EINVAL); 3123 } 3124 num_str++; 3125 3126 ret = kstrtou8(num_str, 0, &num); 3127 if (ret) 3128 return ERR_PTR(ret); 3129 3130 fsg_opts = to_fsg_opts(&group->cg_item); 3131 if (num >= FSG_MAX_LUNS) 3132 return ERR_PTR(-ERANGE); 3133 num = array_index_nospec(num, FSG_MAX_LUNS); 3134 3135 mutex_lock(&fsg_opts->lock); 3136 if (fsg_opts->refcnt || fsg_opts->common->luns[num]) { 3137 ret = -EBUSY; 3138 goto out; 3139 } 3140 3141 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 3142 if (!opts) { 3143 ret = -ENOMEM; 3144 goto out; 3145 } 3146 3147 memset(&config, 0, sizeof(config)); 3148 config.removable = true; 3149 3150 ret = fsg_common_create_lun(fsg_opts->common, &config, num, name, 3151 (const char **)&group->cg_item.ci_name); 3152 if (ret) { 3153 kfree(opts); 3154 goto out; 3155 } 3156 opts->lun = fsg_opts->common->luns[num]; 3157 opts->lun_id = num; 3158 mutex_unlock(&fsg_opts->lock); 3159 3160 config_group_init_type_name(&opts->group, name, &fsg_lun_type); 3161 3162 return &opts->group; 3163 out: 3164 mutex_unlock(&fsg_opts->lock); 3165 return ERR_PTR(ret); 3166 } 3167 3168 static void fsg_lun_drop(struct config_group *group, struct config_item *item) 3169 { 3170 struct fsg_lun_opts *lun_opts; 3171 struct fsg_opts *fsg_opts; 3172 3173 lun_opts = to_fsg_lun_opts(item); 3174 fsg_opts = to_fsg_opts(&group->cg_item); 3175 3176 mutex_lock(&fsg_opts->lock); 3177 if (fsg_opts->refcnt) { 3178 struct config_item *gadget; 3179 3180 gadget = group->cg_item.ci_parent->ci_parent; 3181 unregister_gadget_item(gadget); 3182 } 3183 3184 fsg_common_remove_lun(lun_opts->lun); 3185 fsg_opts->common->luns[lun_opts->lun_id] = NULL; 3186 lun_opts->lun_id = 0; 3187 mutex_unlock(&fsg_opts->lock); 3188 3189 config_item_put(item); 3190 } 3191 3192 static void fsg_attr_release(struct config_item *item) 3193 { 3194 struct fsg_opts *opts = to_fsg_opts(item); 3195 3196 usb_put_function_instance(&opts->func_inst); 3197 } 3198 3199 static struct configfs_item_operations fsg_item_ops = { 3200 .release = fsg_attr_release, 3201 }; 3202 3203 static ssize_t fsg_opts_stall_show(struct config_item *item, char *page) 3204 { 3205 struct fsg_opts *opts = to_fsg_opts(item); 3206 int result; 3207 3208 mutex_lock(&opts->lock); 3209 result = sprintf(page, "%d", opts->common->can_stall); 3210 mutex_unlock(&opts->lock); 3211 3212 return result; 3213 } 3214 3215 static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page, 3216 size_t len) 3217 { 3218 struct fsg_opts *opts = to_fsg_opts(item); 3219 int ret; 3220 bool stall; 3221 3222 mutex_lock(&opts->lock); 3223 3224 if (opts->refcnt) { 3225 mutex_unlock(&opts->lock); 3226 return -EBUSY; 3227 } 3228 3229 ret = strtobool(page, &stall); 3230 if (!ret) { 3231 opts->common->can_stall = stall; 3232 ret = len; 3233 } 3234 3235 mutex_unlock(&opts->lock); 3236 3237 return ret; 3238 } 3239 3240 CONFIGFS_ATTR(fsg_opts_, stall); 3241 3242 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 3243 static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page) 3244 { 3245 struct fsg_opts *opts = to_fsg_opts(item); 3246 int result; 3247 3248 mutex_lock(&opts->lock); 3249 result = sprintf(page, "%d", opts->common->fsg_num_buffers); 3250 mutex_unlock(&opts->lock); 3251 3252 return result; 3253 } 3254 3255 static ssize_t fsg_opts_num_buffers_store(struct config_item *item, 3256 const char *page, size_t len) 3257 { 3258 struct fsg_opts *opts = to_fsg_opts(item); 3259 int ret; 3260 u8 num; 3261 3262 mutex_lock(&opts->lock); 3263 if (opts->refcnt) { 3264 ret = -EBUSY; 3265 goto end; 3266 } 3267 ret = kstrtou8(page, 0, &num); 3268 if (ret) 3269 goto end; 3270 3271 ret = fsg_common_set_num_buffers(opts->common, num); 3272 if (ret) 3273 goto end; 3274 ret = len; 3275 3276 end: 3277 mutex_unlock(&opts->lock); 3278 return ret; 3279 } 3280 3281 CONFIGFS_ATTR(fsg_opts_, num_buffers); 3282 #endif 3283 3284 static struct configfs_attribute *fsg_attrs[] = { 3285 &fsg_opts_attr_stall, 3286 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 3287 &fsg_opts_attr_num_buffers, 3288 #endif 3289 NULL, 3290 }; 3291 3292 static struct configfs_group_operations fsg_group_ops = { 3293 .make_group = fsg_lun_make, 3294 .drop_item = fsg_lun_drop, 3295 }; 3296 3297 static const struct config_item_type fsg_func_type = { 3298 .ct_item_ops = &fsg_item_ops, 3299 .ct_group_ops = &fsg_group_ops, 3300 .ct_attrs = fsg_attrs, 3301 .ct_owner = THIS_MODULE, 3302 }; 3303 3304 static void fsg_free_inst(struct usb_function_instance *fi) 3305 { 3306 struct fsg_opts *opts; 3307 3308 opts = fsg_opts_from_func_inst(fi); 3309 fsg_common_release(opts->common); 3310 kfree(opts); 3311 } 3312 3313 static struct usb_function_instance *fsg_alloc_inst(void) 3314 { 3315 struct fsg_opts *opts; 3316 struct fsg_lun_config config; 3317 int rc; 3318 3319 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 3320 if (!opts) 3321 return ERR_PTR(-ENOMEM); 3322 mutex_init(&opts->lock); 3323 opts->func_inst.free_func_inst = fsg_free_inst; 3324 opts->common = fsg_common_setup(opts->common); 3325 if (IS_ERR(opts->common)) { 3326 rc = PTR_ERR(opts->common); 3327 goto release_opts; 3328 } 3329 3330 rc = fsg_common_set_num_buffers(opts->common, 3331 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS); 3332 if (rc) 3333 goto release_common; 3334 3335 pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); 3336 3337 memset(&config, 0, sizeof(config)); 3338 config.removable = true; 3339 rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0", 3340 (const char **)&opts->func_inst.group.cg_item.ci_name); 3341 if (rc) 3342 goto release_buffers; 3343 3344 opts->lun0.lun = opts->common->luns[0]; 3345 opts->lun0.lun_id = 0; 3346 3347 config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type); 3348 3349 config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type); 3350 configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group); 3351 3352 return &opts->func_inst; 3353 3354 release_buffers: 3355 fsg_common_free_buffers(opts->common); 3356 release_common: 3357 kfree(opts->common); 3358 release_opts: 3359 kfree(opts); 3360 return ERR_PTR(rc); 3361 } 3362 3363 static void fsg_free(struct usb_function *f) 3364 { 3365 struct fsg_dev *fsg; 3366 struct fsg_opts *opts; 3367 3368 fsg = container_of(f, struct fsg_dev, function); 3369 opts = container_of(f->fi, struct fsg_opts, func_inst); 3370 3371 mutex_lock(&opts->lock); 3372 opts->refcnt--; 3373 mutex_unlock(&opts->lock); 3374 3375 kfree(fsg); 3376 } 3377 3378 static struct usb_function *fsg_alloc(struct usb_function_instance *fi) 3379 { 3380 struct fsg_opts *opts = fsg_opts_from_func_inst(fi); 3381 struct fsg_common *common = opts->common; 3382 struct fsg_dev *fsg; 3383 3384 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); 3385 if (unlikely(!fsg)) 3386 return ERR_PTR(-ENOMEM); 3387 3388 mutex_lock(&opts->lock); 3389 opts->refcnt++; 3390 mutex_unlock(&opts->lock); 3391 3392 fsg->function.name = FSG_DRIVER_DESC; 3393 fsg->function.bind = fsg_bind; 3394 fsg->function.unbind = fsg_unbind; 3395 fsg->function.setup = fsg_setup; 3396 fsg->function.set_alt = fsg_set_alt; 3397 fsg->function.disable = fsg_disable; 3398 fsg->function.free_func = fsg_free; 3399 3400 fsg->common = common; 3401 3402 return &fsg->function; 3403 } 3404 3405 DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc); 3406 MODULE_LICENSE("GPL"); 3407 MODULE_AUTHOR("Michal Nazarewicz"); 3408 3409 /************************* Module parameters *************************/ 3410 3411 3412 void fsg_config_from_params(struct fsg_config *cfg, 3413 const struct fsg_module_parameters *params, 3414 unsigned int fsg_num_buffers) 3415 { 3416 struct fsg_lun_config *lun; 3417 unsigned i; 3418 3419 /* Configure LUNs */ 3420 cfg->nluns = 3421 min(params->luns ?: (params->file_count ?: 1u), 3422 (unsigned)FSG_MAX_LUNS); 3423 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) { 3424 lun->ro = !!params->ro[i]; 3425 lun->cdrom = !!params->cdrom[i]; 3426 lun->removable = !!params->removable[i]; 3427 lun->filename = 3428 params->file_count > i && params->file[i][0] 3429 ? params->file[i] 3430 : NULL; 3431 } 3432 3433 /* Let MSF use defaults */ 3434 cfg->vendor_name = NULL; 3435 cfg->product_name = NULL; 3436 3437 cfg->ops = NULL; 3438 cfg->private_data = NULL; 3439 3440 /* Finalise */ 3441 cfg->can_stall = params->stall; 3442 cfg->fsg_num_buffers = fsg_num_buffers; 3443 } 3444 EXPORT_SYMBOL_GPL(fsg_config_from_params); 3445