1 /* 2 * f_mass_storage.c -- Mass Storage USB Composite Function 3 * 4 * Copyright (C) 2003-2008 Alan Stern 5 * Copyright (C) 2009 Samsung Electronics 6 * Author: Michal Nazarewicz <mina86@mina86.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation, either version 2 of that License or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * The Mass Storage Function acts as a USB Mass Storage device, 42 * appearing to the host as a disk drive or as a CD-ROM drive. In 43 * addition to providing an example of a genuinely useful composite 44 * function for a USB device, it also illustrates a technique of 45 * double-buffering for increased throughput. 46 * 47 * For more information about MSF and in particular its module 48 * parameters and sysfs interface read the 49 * <Documentation/usb/mass-storage.txt> file. 50 */ 51 52 /* 53 * MSF is configured by specifying a fsg_config structure. It has the 54 * following fields: 55 * 56 * nluns Number of LUNs function have (anywhere from 1 57 * to FSG_MAX_LUNS). 58 * luns An array of LUN configuration values. This 59 * should be filled for each LUN that 60 * function will include (ie. for "nluns" 61 * LUNs). Each element of the array has 62 * the following fields: 63 * ->filename The path to the backing file for the LUN. 64 * Required if LUN is not marked as 65 * removable. 66 * ->ro Flag specifying access to the LUN shall be 67 * read-only. This is implied if CD-ROM 68 * emulation is enabled as well as when 69 * it was impossible to open "filename" 70 * in R/W mode. 71 * ->removable Flag specifying that LUN shall be indicated as 72 * being removable. 73 * ->cdrom Flag specifying that LUN shall be reported as 74 * being a CD-ROM. 75 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12) 76 * commands for this LUN shall be ignored. 77 * 78 * vendor_name 79 * product_name 80 * release Information used as a reply to INQUIRY 81 * request. To use default set to NULL, 82 * NULL, 0xffff respectively. The first 83 * field should be 8 and the second 16 84 * characters or less. 85 * 86 * can_stall Set to permit function to halt bulk endpoints. 87 * Disabled on some USB devices known not 88 * to work correctly. You should set it 89 * to true. 90 * 91 * If "removable" is not set for a LUN then a backing file must be 92 * specified. If it is set, then NULL filename means the LUN's medium 93 * is not loaded (an empty string as "filename" in the fsg_config 94 * structure causes error). The CD-ROM emulation includes a single 95 * data track and no audio tracks; hence there need be only one 96 * backing file per LUN. 97 * 98 * This function is heavily based on "File-backed Storage Gadget" by 99 * Alan Stern which in turn is heavily based on "Gadget Zero" by David 100 * Brownell. The driver's SCSI command interface was based on the 101 * "Information technology - Small Computer System Interface - 2" 102 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93, 103 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. 104 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which 105 * was based on the "Universal Serial Bus Mass Storage Class UFI 106 * Command Specification" document, Revision 1.0, December 14, 1998, 107 * available at 108 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 109 */ 110 111 /* 112 * Driver Design 113 * 114 * The MSF is fairly straightforward. There is a main kernel 115 * thread that handles most of the work. Interrupt routines field 116 * callbacks from the controller driver: bulk- and interrupt-request 117 * completion notifications, endpoint-0 events, and disconnect events. 118 * Completion events are passed to the main thread by wakeup calls. Many 119 * ep0 requests are handled at interrupt time, but SetInterface, 120 * SetConfiguration, and device reset requests are forwarded to the 121 * thread in the form of "exceptions" using SIGUSR1 signals (since they 122 * should interrupt any ongoing file I/O operations). 123 * 124 * The thread's main routine implements the standard command/data/status 125 * parts of a SCSI interaction. It and its subroutines are full of tests 126 * for pending signals/exceptions -- all this polling is necessary since 127 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 128 * indication that the driver really wants to be running in userspace.) 129 * An important point is that so long as the thread is alive it keeps an 130 * open reference to the backing file. This will prevent unmounting 131 * the backing file's underlying filesystem and could cause problems 132 * during system shutdown, for example. To prevent such problems, the 133 * thread catches INT, TERM, and KILL signals and converts them into 134 * an EXIT exception. 135 * 136 * In normal operation the main thread is started during the gadget's 137 * fsg_bind() callback and stopped during fsg_unbind(). But it can 138 * also exit when it receives a signal, and there's no point leaving 139 * the gadget running when the thread is dead. As of this moment, MSF 140 * provides no way to deregister the gadget when thread dies -- maybe 141 * a callback functions is needed. 142 * 143 * To provide maximum throughput, the driver uses a circular pipeline of 144 * buffer heads (struct fsg_buffhd). In principle the pipeline can be 145 * arbitrarily long; in practice the benefits don't justify having more 146 * than 2 stages (i.e., double buffering). But it helps to think of the 147 * pipeline as being a long one. Each buffer head contains a bulk-in and 148 * a bulk-out request pointer (since the buffer can be used for both 149 * output and input -- directions always are given from the host's 150 * point of view) as well as a pointer to the buffer and various state 151 * variables. 152 * 153 * Use of the pipeline follows a simple protocol. There is a variable 154 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 155 * At any time that buffer head may still be in use from an earlier 156 * request, so each buffer head has a state variable indicating whether 157 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 158 * buffer head to be EMPTY, filling the buffer either by file I/O or by 159 * USB I/O (during which the buffer head is BUSY), and marking the buffer 160 * head FULL when the I/O is complete. Then the buffer will be emptied 161 * (again possibly by USB I/O, during which it is marked BUSY) and 162 * finally marked EMPTY again (possibly by a completion routine). 163 * 164 * A module parameter tells the driver to avoid stalling the bulk 165 * endpoints wherever the transport specification allows. This is 166 * necessary for some UDCs like the SuperH, which cannot reliably clear a 167 * halt on a bulk endpoint. However, under certain circumstances the 168 * Bulk-only specification requires a stall. In such cases the driver 169 * will halt the endpoint and set a flag indicating that it should clear 170 * the halt in software during the next device reset. Hopefully this 171 * will permit everything to work correctly. Furthermore, although the 172 * specification allows the bulk-out endpoint to halt when the host sends 173 * too much data, implementing this would cause an unavoidable race. 174 * The driver will always use the "no-stall" approach for OUT transfers. 175 * 176 * One subtle point concerns sending status-stage responses for ep0 177 * requests. Some of these requests, such as device reset, can involve 178 * interrupting an ongoing file I/O operation, which might take an 179 * arbitrarily long time. During that delay the host might give up on 180 * the original ep0 request and issue a new one. When that happens the 181 * driver should not notify the host about completion of the original 182 * request, as the host will no longer be waiting for it. So the driver 183 * assigns to each ep0 request a unique tag, and it keeps track of the 184 * tag value of the request associated with a long-running exception 185 * (device-reset, interface-change, or configuration-change). When the 186 * exception handler is finished, the status-stage response is submitted 187 * only if the current ep0 request tag is equal to the exception request 188 * tag. Thus only the most recently received ep0 request will get a 189 * status-stage response. 190 * 191 * Warning: This driver source file is too long. It ought to be split up 192 * into a header file plus about 3 separate .c files, to handle the details 193 * of the Gadget, USB Mass Storage, and SCSI protocols. 194 */ 195 196 197 /* #define VERBOSE_DEBUG */ 198 /* #define DUMP_MSGS */ 199 200 #include <linux/blkdev.h> 201 #include <linux/completion.h> 202 #include <linux/dcache.h> 203 #include <linux/delay.h> 204 #include <linux/device.h> 205 #include <linux/fcntl.h> 206 #include <linux/file.h> 207 #include <linux/fs.h> 208 #include <linux/kref.h> 209 #include <linux/kthread.h> 210 #include <linux/sched/signal.h> 211 #include <linux/limits.h> 212 #include <linux/rwsem.h> 213 #include <linux/slab.h> 214 #include <linux/spinlock.h> 215 #include <linux/string.h> 216 #include <linux/freezer.h> 217 #include <linux/module.h> 218 #include <linux/uaccess.h> 219 220 #include <linux/usb/ch9.h> 221 #include <linux/usb/gadget.h> 222 #include <linux/usb/composite.h> 223 224 #include "configfs.h" 225 226 227 /*------------------------------------------------------------------------*/ 228 229 #define FSG_DRIVER_DESC "Mass Storage Function" 230 #define FSG_DRIVER_VERSION "2009/09/11" 231 232 static const char fsg_string_interface[] = "Mass Storage"; 233 234 #include "storage_common.h" 235 #include "f_mass_storage.h" 236 237 /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ 238 static struct usb_string fsg_strings[] = { 239 {FSG_STRING_INTERFACE, fsg_string_interface}, 240 {} 241 }; 242 243 static struct usb_gadget_strings fsg_stringtab = { 244 .language = 0x0409, /* en-us */ 245 .strings = fsg_strings, 246 }; 247 248 static struct usb_gadget_strings *fsg_strings_array[] = { 249 &fsg_stringtab, 250 NULL, 251 }; 252 253 /*-------------------------------------------------------------------------*/ 254 255 struct fsg_dev; 256 struct fsg_common; 257 258 /* Data shared by all the FSG instances. */ 259 struct fsg_common { 260 struct usb_gadget *gadget; 261 struct usb_composite_dev *cdev; 262 struct fsg_dev *fsg, *new_fsg; 263 wait_queue_head_t fsg_wait; 264 265 /* filesem protects: backing files in use */ 266 struct rw_semaphore filesem; 267 268 /* lock protects: state, all the req_busy's */ 269 spinlock_t lock; 270 271 struct usb_ep *ep0; /* Copy of gadget->ep0 */ 272 struct usb_request *ep0req; /* Copy of cdev->req */ 273 unsigned int ep0_req_tag; 274 275 struct fsg_buffhd *next_buffhd_to_fill; 276 struct fsg_buffhd *next_buffhd_to_drain; 277 struct fsg_buffhd *buffhds; 278 unsigned int fsg_num_buffers; 279 280 int cmnd_size; 281 u8 cmnd[MAX_COMMAND_SIZE]; 282 283 unsigned int lun; 284 struct fsg_lun *luns[FSG_MAX_LUNS]; 285 struct fsg_lun *curlun; 286 287 unsigned int bulk_out_maxpacket; 288 enum fsg_state state; /* For exception handling */ 289 unsigned int exception_req_tag; 290 291 enum data_direction data_dir; 292 u32 data_size; 293 u32 data_size_from_cmnd; 294 u32 tag; 295 u32 residue; 296 u32 usb_amount_left; 297 298 unsigned int can_stall:1; 299 unsigned int free_storage_on_release:1; 300 unsigned int phase_error:1; 301 unsigned int short_packet_received:1; 302 unsigned int bad_lun_okay:1; 303 unsigned int running:1; 304 unsigned int sysfs:1; 305 306 int thread_wakeup_needed; 307 struct completion thread_notifier; 308 struct task_struct *thread_task; 309 310 /* Callback functions. */ 311 const struct fsg_operations *ops; 312 /* Gadget's private data. */ 313 void *private_data; 314 315 char inquiry_string[INQUIRY_STRING_LEN]; 316 317 struct kref ref; 318 }; 319 320 struct fsg_dev { 321 struct usb_function function; 322 struct usb_gadget *gadget; /* Copy of cdev->gadget */ 323 struct fsg_common *common; 324 325 u16 interface_number; 326 327 unsigned int bulk_in_enabled:1; 328 unsigned int bulk_out_enabled:1; 329 330 unsigned long atomic_bitflags; 331 #define IGNORE_BULK_OUT 0 332 333 struct usb_ep *bulk_in; 334 struct usb_ep *bulk_out; 335 }; 336 337 static inline int __fsg_is_set(struct fsg_common *common, 338 const char *func, unsigned line) 339 { 340 if (common->fsg) 341 return 1; 342 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); 343 WARN_ON(1); 344 return 0; 345 } 346 347 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) 348 349 static inline struct fsg_dev *fsg_from_func(struct usb_function *f) 350 { 351 return container_of(f, struct fsg_dev, function); 352 } 353 354 typedef void (*fsg_routine_t)(struct fsg_dev *); 355 356 static int exception_in_progress(struct fsg_common *common) 357 { 358 return common->state > FSG_STATE_IDLE; 359 } 360 361 /* Make bulk-out requests be divisible by the maxpacket size */ 362 static void set_bulk_out_req_length(struct fsg_common *common, 363 struct fsg_buffhd *bh, unsigned int length) 364 { 365 unsigned int rem; 366 367 bh->bulk_out_intended_length = length; 368 rem = length % common->bulk_out_maxpacket; 369 if (rem > 0) 370 length += common->bulk_out_maxpacket - rem; 371 bh->outreq->length = length; 372 } 373 374 375 /*-------------------------------------------------------------------------*/ 376 377 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 378 { 379 const char *name; 380 381 if (ep == fsg->bulk_in) 382 name = "bulk-in"; 383 else if (ep == fsg->bulk_out) 384 name = "bulk-out"; 385 else 386 name = ep->name; 387 DBG(fsg, "%s set halt\n", name); 388 return usb_ep_set_halt(ep); 389 } 390 391 392 /*-------------------------------------------------------------------------*/ 393 394 /* These routines may be called in process context or in_irq */ 395 396 /* Caller must hold fsg->lock */ 397 static void wakeup_thread(struct fsg_common *common) 398 { 399 smp_wmb(); /* ensure the write of bh->state is complete */ 400 /* Tell the main thread that something has happened */ 401 common->thread_wakeup_needed = 1; 402 if (common->thread_task) 403 wake_up_process(common->thread_task); 404 } 405 406 static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 407 { 408 unsigned long flags; 409 410 /* 411 * Do nothing if a higher-priority exception is already in progress. 412 * If a lower-or-equal priority exception is in progress, preempt it 413 * and notify the main thread by sending it a signal. 414 */ 415 spin_lock_irqsave(&common->lock, flags); 416 if (common->state <= new_state) { 417 common->exception_req_tag = common->ep0_req_tag; 418 common->state = new_state; 419 if (common->thread_task) 420 send_sig_info(SIGUSR1, SEND_SIG_FORCED, 421 common->thread_task); 422 } 423 spin_unlock_irqrestore(&common->lock, flags); 424 } 425 426 427 /*-------------------------------------------------------------------------*/ 428 429 static int ep0_queue(struct fsg_common *common) 430 { 431 int rc; 432 433 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); 434 common->ep0->driver_data = common; 435 if (rc != 0 && rc != -ESHUTDOWN) { 436 /* We can't do much more than wait for a reset */ 437 WARNING(common, "error in submission: %s --> %d\n", 438 common->ep0->name, rc); 439 } 440 return rc; 441 } 442 443 444 /*-------------------------------------------------------------------------*/ 445 446 /* Completion handlers. These always run in_irq. */ 447 448 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 449 { 450 struct fsg_common *common = ep->driver_data; 451 struct fsg_buffhd *bh = req->context; 452 453 if (req->status || req->actual != req->length) 454 DBG(common, "%s --> %d, %u/%u\n", __func__, 455 req->status, req->actual, req->length); 456 if (req->status == -ECONNRESET) /* Request was cancelled */ 457 usb_ep_fifo_flush(ep); 458 459 /* Hold the lock while we update the request and buffer states */ 460 smp_wmb(); 461 spin_lock(&common->lock); 462 bh->inreq_busy = 0; 463 bh->state = BUF_STATE_EMPTY; 464 wakeup_thread(common); 465 spin_unlock(&common->lock); 466 } 467 468 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 469 { 470 struct fsg_common *common = ep->driver_data; 471 struct fsg_buffhd *bh = req->context; 472 473 dump_msg(common, "bulk-out", req->buf, req->actual); 474 if (req->status || req->actual != bh->bulk_out_intended_length) 475 DBG(common, "%s --> %d, %u/%u\n", __func__, 476 req->status, req->actual, bh->bulk_out_intended_length); 477 if (req->status == -ECONNRESET) /* Request was cancelled */ 478 usb_ep_fifo_flush(ep); 479 480 /* Hold the lock while we update the request and buffer states */ 481 smp_wmb(); 482 spin_lock(&common->lock); 483 bh->outreq_busy = 0; 484 bh->state = BUF_STATE_FULL; 485 wakeup_thread(common); 486 spin_unlock(&common->lock); 487 } 488 489 static int _fsg_common_get_max_lun(struct fsg_common *common) 490 { 491 int i = ARRAY_SIZE(common->luns) - 1; 492 493 while (i >= 0 && !common->luns[i]) 494 --i; 495 496 return i; 497 } 498 499 static int fsg_setup(struct usb_function *f, 500 const struct usb_ctrlrequest *ctrl) 501 { 502 struct fsg_dev *fsg = fsg_from_func(f); 503 struct usb_request *req = fsg->common->ep0req; 504 u16 w_index = le16_to_cpu(ctrl->wIndex); 505 u16 w_value = le16_to_cpu(ctrl->wValue); 506 u16 w_length = le16_to_cpu(ctrl->wLength); 507 508 if (!fsg_is_set(fsg->common)) 509 return -EOPNOTSUPP; 510 511 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */ 512 req->context = NULL; 513 req->length = 0; 514 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 515 516 switch (ctrl->bRequest) { 517 518 case US_BULK_RESET_REQUEST: 519 if (ctrl->bRequestType != 520 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 521 break; 522 if (w_index != fsg->interface_number || w_value != 0 || 523 w_length != 0) 524 return -EDOM; 525 526 /* 527 * Raise an exception to stop the current operation 528 * and reinitialize our state. 529 */ 530 DBG(fsg, "bulk reset request\n"); 531 raise_exception(fsg->common, FSG_STATE_RESET); 532 return USB_GADGET_DELAYED_STATUS; 533 534 case US_BULK_GET_MAX_LUN: 535 if (ctrl->bRequestType != 536 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 537 break; 538 if (w_index != fsg->interface_number || w_value != 0 || 539 w_length != 1) 540 return -EDOM; 541 VDBG(fsg, "get max LUN\n"); 542 *(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common); 543 544 /* Respond with data/status */ 545 req->length = min((u16)1, w_length); 546 return ep0_queue(fsg->common); 547 } 548 549 VDBG(fsg, 550 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n", 551 ctrl->bRequestType, ctrl->bRequest, 552 le16_to_cpu(ctrl->wValue), w_index, w_length); 553 return -EOPNOTSUPP; 554 } 555 556 557 /*-------------------------------------------------------------------------*/ 558 559 /* All the following routines run in process context */ 560 561 /* Use this for bulk or interrupt transfers, not ep0 */ 562 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 563 struct usb_request *req, int *pbusy, 564 enum fsg_buffer_state *state) 565 { 566 int rc; 567 568 if (ep == fsg->bulk_in) 569 dump_msg(fsg, "bulk-in", req->buf, req->length); 570 571 spin_lock_irq(&fsg->common->lock); 572 *pbusy = 1; 573 *state = BUF_STATE_BUSY; 574 spin_unlock_irq(&fsg->common->lock); 575 576 rc = usb_ep_queue(ep, req, GFP_KERNEL); 577 if (rc == 0) 578 return; /* All good, we're done */ 579 580 *pbusy = 0; 581 *state = BUF_STATE_EMPTY; 582 583 /* We can't do much more than wait for a reset */ 584 585 /* 586 * Note: currently the net2280 driver fails zero-length 587 * submissions if DMA is enabled. 588 */ 589 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0)) 590 WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc); 591 } 592 593 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 594 { 595 if (!fsg_is_set(common)) 596 return false; 597 start_transfer(common->fsg, common->fsg->bulk_in, 598 bh->inreq, &bh->inreq_busy, &bh->state); 599 return true; 600 } 601 602 static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 603 { 604 if (!fsg_is_set(common)) 605 return false; 606 start_transfer(common->fsg, common->fsg->bulk_out, 607 bh->outreq, &bh->outreq_busy, &bh->state); 608 return true; 609 } 610 611 static int sleep_thread(struct fsg_common *common, bool can_freeze) 612 { 613 int rc = 0; 614 615 /* Wait until a signal arrives or we are woken up */ 616 for (;;) { 617 if (can_freeze) 618 try_to_freeze(); 619 set_current_state(TASK_INTERRUPTIBLE); 620 if (signal_pending(current)) { 621 rc = -EINTR; 622 break; 623 } 624 if (common->thread_wakeup_needed) 625 break; 626 schedule(); 627 } 628 __set_current_state(TASK_RUNNING); 629 common->thread_wakeup_needed = 0; 630 smp_rmb(); /* ensure the latest bh->state is visible */ 631 return rc; 632 } 633 634 635 /*-------------------------------------------------------------------------*/ 636 637 static int do_read(struct fsg_common *common) 638 { 639 struct fsg_lun *curlun = common->curlun; 640 u32 lba; 641 struct fsg_buffhd *bh; 642 int rc; 643 u32 amount_left; 644 loff_t file_offset, file_offset_tmp; 645 unsigned int amount; 646 ssize_t nread; 647 648 /* 649 * Get the starting Logical Block Address and check that it's 650 * not too big. 651 */ 652 if (common->cmnd[0] == READ_6) 653 lba = get_unaligned_be24(&common->cmnd[1]); 654 else { 655 lba = get_unaligned_be32(&common->cmnd[2]); 656 657 /* 658 * We allow DPO (Disable Page Out = don't save data in the 659 * cache) and FUA (Force Unit Access = don't read from the 660 * cache), but we don't implement them. 661 */ 662 if ((common->cmnd[1] & ~0x18) != 0) { 663 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 664 return -EINVAL; 665 } 666 } 667 if (lba >= curlun->num_sectors) { 668 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 669 return -EINVAL; 670 } 671 file_offset = ((loff_t) lba) << curlun->blkbits; 672 673 /* Carry out the file reads */ 674 amount_left = common->data_size_from_cmnd; 675 if (unlikely(amount_left == 0)) 676 return -EIO; /* No default reply */ 677 678 for (;;) { 679 /* 680 * Figure out how much we need to read: 681 * Try to read the remaining amount. 682 * But don't read more than the buffer size. 683 * And don't try to read past the end of the file. 684 */ 685 amount = min(amount_left, FSG_BUFLEN); 686 amount = min((loff_t)amount, 687 curlun->file_length - file_offset); 688 689 /* Wait for the next buffer to become available */ 690 bh = common->next_buffhd_to_fill; 691 while (bh->state != BUF_STATE_EMPTY) { 692 rc = sleep_thread(common, false); 693 if (rc) 694 return rc; 695 } 696 697 /* 698 * If we were asked to read past the end of file, 699 * end with an empty buffer. 700 */ 701 if (amount == 0) { 702 curlun->sense_data = 703 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 704 curlun->sense_data_info = 705 file_offset >> curlun->blkbits; 706 curlun->info_valid = 1; 707 bh->inreq->length = 0; 708 bh->state = BUF_STATE_FULL; 709 break; 710 } 711 712 /* Perform the read */ 713 file_offset_tmp = file_offset; 714 nread = vfs_read(curlun->filp, 715 (char __user *)bh->buf, 716 amount, &file_offset_tmp); 717 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 718 (unsigned long long)file_offset, (int)nread); 719 if (signal_pending(current)) 720 return -EINTR; 721 722 if (nread < 0) { 723 LDBG(curlun, "error in file read: %d\n", (int)nread); 724 nread = 0; 725 } else if (nread < amount) { 726 LDBG(curlun, "partial file read: %d/%u\n", 727 (int)nread, amount); 728 nread = round_down(nread, curlun->blksize); 729 } 730 file_offset += nread; 731 amount_left -= nread; 732 common->residue -= nread; 733 734 /* 735 * Except at the end of the transfer, nread will be 736 * equal to the buffer size, which is divisible by the 737 * bulk-in maxpacket size. 738 */ 739 bh->inreq->length = nread; 740 bh->state = BUF_STATE_FULL; 741 742 /* If an error occurred, report it and its position */ 743 if (nread < amount) { 744 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 745 curlun->sense_data_info = 746 file_offset >> curlun->blkbits; 747 curlun->info_valid = 1; 748 break; 749 } 750 751 if (amount_left == 0) 752 break; /* No more left to read */ 753 754 /* Send this buffer and go read some more */ 755 bh->inreq->zero = 0; 756 if (!start_in_transfer(common, bh)) 757 /* Don't know what to do if common->fsg is NULL */ 758 return -EIO; 759 common->next_buffhd_to_fill = bh->next; 760 } 761 762 return -EIO; /* No default reply */ 763 } 764 765 766 /*-------------------------------------------------------------------------*/ 767 768 static int do_write(struct fsg_common *common) 769 { 770 struct fsg_lun *curlun = common->curlun; 771 u32 lba; 772 struct fsg_buffhd *bh; 773 int get_some_more; 774 u32 amount_left_to_req, amount_left_to_write; 775 loff_t usb_offset, file_offset, file_offset_tmp; 776 unsigned int amount; 777 ssize_t nwritten; 778 int rc; 779 780 if (curlun->ro) { 781 curlun->sense_data = SS_WRITE_PROTECTED; 782 return -EINVAL; 783 } 784 spin_lock(&curlun->filp->f_lock); 785 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ 786 spin_unlock(&curlun->filp->f_lock); 787 788 /* 789 * Get the starting Logical Block Address and check that it's 790 * not too big 791 */ 792 if (common->cmnd[0] == WRITE_6) 793 lba = get_unaligned_be24(&common->cmnd[1]); 794 else { 795 lba = get_unaligned_be32(&common->cmnd[2]); 796 797 /* 798 * We allow DPO (Disable Page Out = don't save data in the 799 * cache) and FUA (Force Unit Access = write directly to the 800 * medium). We don't implement DPO; we implement FUA by 801 * performing synchronous output. 802 */ 803 if (common->cmnd[1] & ~0x18) { 804 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 805 return -EINVAL; 806 } 807 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */ 808 spin_lock(&curlun->filp->f_lock); 809 curlun->filp->f_flags |= O_SYNC; 810 spin_unlock(&curlun->filp->f_lock); 811 } 812 } 813 if (lba >= curlun->num_sectors) { 814 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 815 return -EINVAL; 816 } 817 818 /* Carry out the file writes */ 819 get_some_more = 1; 820 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; 821 amount_left_to_req = common->data_size_from_cmnd; 822 amount_left_to_write = common->data_size_from_cmnd; 823 824 while (amount_left_to_write > 0) { 825 826 /* Queue a request for more data from the host */ 827 bh = common->next_buffhd_to_fill; 828 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 829 830 /* 831 * Figure out how much we want to get: 832 * Try to get the remaining amount, 833 * but not more than the buffer size. 834 */ 835 amount = min(amount_left_to_req, FSG_BUFLEN); 836 837 /* Beyond the end of the backing file? */ 838 if (usb_offset >= curlun->file_length) { 839 get_some_more = 0; 840 curlun->sense_data = 841 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 842 curlun->sense_data_info = 843 usb_offset >> curlun->blkbits; 844 curlun->info_valid = 1; 845 continue; 846 } 847 848 /* Get the next buffer */ 849 usb_offset += amount; 850 common->usb_amount_left -= amount; 851 amount_left_to_req -= amount; 852 if (amount_left_to_req == 0) 853 get_some_more = 0; 854 855 /* 856 * Except at the end of the transfer, amount will be 857 * equal to the buffer size, which is divisible by 858 * the bulk-out maxpacket size. 859 */ 860 set_bulk_out_req_length(common, bh, amount); 861 if (!start_out_transfer(common, bh)) 862 /* Dunno what to do if common->fsg is NULL */ 863 return -EIO; 864 common->next_buffhd_to_fill = bh->next; 865 continue; 866 } 867 868 /* Write the received data to the backing file */ 869 bh = common->next_buffhd_to_drain; 870 if (bh->state == BUF_STATE_EMPTY && !get_some_more) 871 break; /* We stopped early */ 872 if (bh->state == BUF_STATE_FULL) { 873 smp_rmb(); 874 common->next_buffhd_to_drain = bh->next; 875 bh->state = BUF_STATE_EMPTY; 876 877 /* Did something go wrong with the transfer? */ 878 if (bh->outreq->status != 0) { 879 curlun->sense_data = SS_COMMUNICATION_FAILURE; 880 curlun->sense_data_info = 881 file_offset >> curlun->blkbits; 882 curlun->info_valid = 1; 883 break; 884 } 885 886 amount = bh->outreq->actual; 887 if (curlun->file_length - file_offset < amount) { 888 LERROR(curlun, 889 "write %u @ %llu beyond end %llu\n", 890 amount, (unsigned long long)file_offset, 891 (unsigned long long)curlun->file_length); 892 amount = curlun->file_length - file_offset; 893 } 894 895 /* Don't accept excess data. The spec doesn't say 896 * what to do in this case. We'll ignore the error. 897 */ 898 amount = min(amount, bh->bulk_out_intended_length); 899 900 /* Don't write a partial block */ 901 amount = round_down(amount, curlun->blksize); 902 if (amount == 0) 903 goto empty_write; 904 905 /* Perform the write */ 906 file_offset_tmp = file_offset; 907 nwritten = vfs_write(curlun->filp, 908 (char __user *)bh->buf, 909 amount, &file_offset_tmp); 910 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 911 (unsigned long long)file_offset, (int)nwritten); 912 if (signal_pending(current)) 913 return -EINTR; /* Interrupted! */ 914 915 if (nwritten < 0) { 916 LDBG(curlun, "error in file write: %d\n", 917 (int)nwritten); 918 nwritten = 0; 919 } else if (nwritten < amount) { 920 LDBG(curlun, "partial file write: %d/%u\n", 921 (int)nwritten, amount); 922 nwritten = round_down(nwritten, curlun->blksize); 923 } 924 file_offset += nwritten; 925 amount_left_to_write -= nwritten; 926 common->residue -= nwritten; 927 928 /* If an error occurred, report it and its position */ 929 if (nwritten < amount) { 930 curlun->sense_data = SS_WRITE_ERROR; 931 curlun->sense_data_info = 932 file_offset >> curlun->blkbits; 933 curlun->info_valid = 1; 934 break; 935 } 936 937 empty_write: 938 /* Did the host decide to stop early? */ 939 if (bh->outreq->actual < bh->bulk_out_intended_length) { 940 common->short_packet_received = 1; 941 break; 942 } 943 continue; 944 } 945 946 /* Wait for something to happen */ 947 rc = sleep_thread(common, false); 948 if (rc) 949 return rc; 950 } 951 952 return -EIO; /* No default reply */ 953 } 954 955 956 /*-------------------------------------------------------------------------*/ 957 958 static int do_synchronize_cache(struct fsg_common *common) 959 { 960 struct fsg_lun *curlun = common->curlun; 961 int rc; 962 963 /* We ignore the requested LBA and write out all file's 964 * dirty data buffers. */ 965 rc = fsg_lun_fsync_sub(curlun); 966 if (rc) 967 curlun->sense_data = SS_WRITE_ERROR; 968 return 0; 969 } 970 971 972 /*-------------------------------------------------------------------------*/ 973 974 static void invalidate_sub(struct fsg_lun *curlun) 975 { 976 struct file *filp = curlun->filp; 977 struct inode *inode = file_inode(filp); 978 unsigned long rc; 979 980 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 981 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); 982 } 983 984 static int do_verify(struct fsg_common *common) 985 { 986 struct fsg_lun *curlun = common->curlun; 987 u32 lba; 988 u32 verification_length; 989 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 990 loff_t file_offset, file_offset_tmp; 991 u32 amount_left; 992 unsigned int amount; 993 ssize_t nread; 994 995 /* 996 * Get the starting Logical Block Address and check that it's 997 * not too big. 998 */ 999 lba = get_unaligned_be32(&common->cmnd[2]); 1000 if (lba >= curlun->num_sectors) { 1001 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1002 return -EINVAL; 1003 } 1004 1005 /* 1006 * We allow DPO (Disable Page Out = don't save data in the 1007 * cache) but we don't implement it. 1008 */ 1009 if (common->cmnd[1] & ~0x10) { 1010 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1011 return -EINVAL; 1012 } 1013 1014 verification_length = get_unaligned_be16(&common->cmnd[7]); 1015 if (unlikely(verification_length == 0)) 1016 return -EIO; /* No default reply */ 1017 1018 /* Prepare to carry out the file verify */ 1019 amount_left = verification_length << curlun->blkbits; 1020 file_offset = ((loff_t) lba) << curlun->blkbits; 1021 1022 /* Write out all the dirty buffers before invalidating them */ 1023 fsg_lun_fsync_sub(curlun); 1024 if (signal_pending(current)) 1025 return -EINTR; 1026 1027 invalidate_sub(curlun); 1028 if (signal_pending(current)) 1029 return -EINTR; 1030 1031 /* Just try to read the requested blocks */ 1032 while (amount_left > 0) { 1033 /* 1034 * Figure out how much we need to read: 1035 * Try to read the remaining amount, but not more than 1036 * the buffer size. 1037 * And don't try to read past the end of the file. 1038 */ 1039 amount = min(amount_left, FSG_BUFLEN); 1040 amount = min((loff_t)amount, 1041 curlun->file_length - file_offset); 1042 if (amount == 0) { 1043 curlun->sense_data = 1044 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1045 curlun->sense_data_info = 1046 file_offset >> curlun->blkbits; 1047 curlun->info_valid = 1; 1048 break; 1049 } 1050 1051 /* Perform the read */ 1052 file_offset_tmp = file_offset; 1053 nread = vfs_read(curlun->filp, 1054 (char __user *) bh->buf, 1055 amount, &file_offset_tmp); 1056 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1057 (unsigned long long) file_offset, 1058 (int) nread); 1059 if (signal_pending(current)) 1060 return -EINTR; 1061 1062 if (nread < 0) { 1063 LDBG(curlun, "error in file verify: %d\n", (int)nread); 1064 nread = 0; 1065 } else if (nread < amount) { 1066 LDBG(curlun, "partial file verify: %d/%u\n", 1067 (int)nread, amount); 1068 nread = round_down(nread, curlun->blksize); 1069 } 1070 if (nread == 0) { 1071 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1072 curlun->sense_data_info = 1073 file_offset >> curlun->blkbits; 1074 curlun->info_valid = 1; 1075 break; 1076 } 1077 file_offset += nread; 1078 amount_left -= nread; 1079 } 1080 return 0; 1081 } 1082 1083 1084 /*-------------------------------------------------------------------------*/ 1085 1086 static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) 1087 { 1088 struct fsg_lun *curlun = common->curlun; 1089 u8 *buf = (u8 *) bh->buf; 1090 1091 if (!curlun) { /* Unsupported LUNs are okay */ 1092 common->bad_lun_okay = 1; 1093 memset(buf, 0, 36); 1094 buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */ 1095 buf[4] = 31; /* Additional length */ 1096 return 36; 1097 } 1098 1099 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK; 1100 buf[1] = curlun->removable ? 0x80 : 0; 1101 buf[2] = 2; /* ANSI SCSI level 2 */ 1102 buf[3] = 2; /* SCSI-2 INQUIRY data format */ 1103 buf[4] = 31; /* Additional length */ 1104 buf[5] = 0; /* No special options */ 1105 buf[6] = 0; 1106 buf[7] = 0; 1107 if (curlun->inquiry_string[0]) 1108 memcpy(buf + 8, curlun->inquiry_string, 1109 sizeof(curlun->inquiry_string)); 1110 else 1111 memcpy(buf + 8, common->inquiry_string, 1112 sizeof(common->inquiry_string)); 1113 return 36; 1114 } 1115 1116 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1117 { 1118 struct fsg_lun *curlun = common->curlun; 1119 u8 *buf = (u8 *) bh->buf; 1120 u32 sd, sdinfo; 1121 int valid; 1122 1123 /* 1124 * From the SCSI-2 spec., section 7.9 (Unit attention condition): 1125 * 1126 * If a REQUEST SENSE command is received from an initiator 1127 * with a pending unit attention condition (before the target 1128 * generates the contingent allegiance condition), then the 1129 * target shall either: 1130 * a) report any pending sense data and preserve the unit 1131 * attention condition on the logical unit, or, 1132 * b) report the unit attention condition, may discard any 1133 * pending sense data, and clear the unit attention 1134 * condition on the logical unit for that initiator. 1135 * 1136 * FSG normally uses option a); enable this code to use option b). 1137 */ 1138 #if 0 1139 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { 1140 curlun->sense_data = curlun->unit_attention_data; 1141 curlun->unit_attention_data = SS_NO_SENSE; 1142 } 1143 #endif 1144 1145 if (!curlun) { /* Unsupported LUNs are okay */ 1146 common->bad_lun_okay = 1; 1147 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1148 sdinfo = 0; 1149 valid = 0; 1150 } else { 1151 sd = curlun->sense_data; 1152 sdinfo = curlun->sense_data_info; 1153 valid = curlun->info_valid << 7; 1154 curlun->sense_data = SS_NO_SENSE; 1155 curlun->sense_data_info = 0; 1156 curlun->info_valid = 0; 1157 } 1158 1159 memset(buf, 0, 18); 1160 buf[0] = valid | 0x70; /* Valid, current error */ 1161 buf[2] = SK(sd); 1162 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ 1163 buf[7] = 18 - 8; /* Additional sense length */ 1164 buf[12] = ASC(sd); 1165 buf[13] = ASCQ(sd); 1166 return 18; 1167 } 1168 1169 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) 1170 { 1171 struct fsg_lun *curlun = common->curlun; 1172 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1173 int pmi = common->cmnd[8]; 1174 u8 *buf = (u8 *)bh->buf; 1175 1176 /* Check the PMI and LBA fields */ 1177 if (pmi > 1 || (pmi == 0 && lba != 0)) { 1178 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1179 return -EINVAL; 1180 } 1181 1182 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); 1183 /* Max logical block */ 1184 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1185 return 8; 1186 } 1187 1188 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) 1189 { 1190 struct fsg_lun *curlun = common->curlun; 1191 int msf = common->cmnd[1] & 0x02; 1192 u32 lba = get_unaligned_be32(&common->cmnd[2]); 1193 u8 *buf = (u8 *)bh->buf; 1194 1195 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ 1196 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1197 return -EINVAL; 1198 } 1199 if (lba >= curlun->num_sectors) { 1200 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1201 return -EINVAL; 1202 } 1203 1204 memset(buf, 0, 8); 1205 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ 1206 store_cdrom_address(&buf[4], msf, lba); 1207 return 8; 1208 } 1209 1210 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) 1211 { 1212 struct fsg_lun *curlun = common->curlun; 1213 int msf = common->cmnd[1] & 0x02; 1214 int start_track = common->cmnd[6]; 1215 u8 *buf = (u8 *)bh->buf; 1216 1217 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1218 start_track > 1) { 1219 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1220 return -EINVAL; 1221 } 1222 1223 memset(buf, 0, 20); 1224 buf[1] = (20-2); /* TOC data length */ 1225 buf[2] = 1; /* First track number */ 1226 buf[3] = 1; /* Last track number */ 1227 buf[5] = 0x16; /* Data track, copying allowed */ 1228 buf[6] = 0x01; /* Only track is number 1 */ 1229 store_cdrom_address(&buf[8], msf, 0); 1230 1231 buf[13] = 0x16; /* Lead-out track is data */ 1232 buf[14] = 0xAA; /* Lead-out track number */ 1233 store_cdrom_address(&buf[16], msf, curlun->num_sectors); 1234 return 20; 1235 } 1236 1237 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1238 { 1239 struct fsg_lun *curlun = common->curlun; 1240 int mscmnd = common->cmnd[0]; 1241 u8 *buf = (u8 *) bh->buf; 1242 u8 *buf0 = buf; 1243 int pc, page_code; 1244 int changeable_values, all_pages; 1245 int valid_page = 0; 1246 int len, limit; 1247 1248 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ 1249 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1250 return -EINVAL; 1251 } 1252 pc = common->cmnd[2] >> 6; 1253 page_code = common->cmnd[2] & 0x3f; 1254 if (pc == 3) { 1255 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 1256 return -EINVAL; 1257 } 1258 changeable_values = (pc == 1); 1259 all_pages = (page_code == 0x3f); 1260 1261 /* 1262 * Write the mode parameter header. Fixed values are: default 1263 * medium type, no cache control (DPOFUA), and no block descriptors. 1264 * The only variable value is the WriteProtect bit. We will fill in 1265 * the mode data length later. 1266 */ 1267 memset(buf, 0, 8); 1268 if (mscmnd == MODE_SENSE) { 1269 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1270 buf += 4; 1271 limit = 255; 1272 } else { /* MODE_SENSE_10 */ 1273 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1274 buf += 8; 1275 limit = 65535; /* Should really be FSG_BUFLEN */ 1276 } 1277 1278 /* No block descriptors */ 1279 1280 /* 1281 * The mode pages, in numerical order. The only page we support 1282 * is the Caching page. 1283 */ 1284 if (page_code == 0x08 || all_pages) { 1285 valid_page = 1; 1286 buf[0] = 0x08; /* Page code */ 1287 buf[1] = 10; /* Page length */ 1288 memset(buf+2, 0, 10); /* None of the fields are changeable */ 1289 1290 if (!changeable_values) { 1291 buf[2] = 0x04; /* Write cache enable, */ 1292 /* Read cache not disabled */ 1293 /* No cache retention priorities */ 1294 put_unaligned_be16(0xffff, &buf[4]); 1295 /* Don't disable prefetch */ 1296 /* Minimum prefetch = 0 */ 1297 put_unaligned_be16(0xffff, &buf[8]); 1298 /* Maximum prefetch */ 1299 put_unaligned_be16(0xffff, &buf[10]); 1300 /* Maximum prefetch ceiling */ 1301 } 1302 buf += 12; 1303 } 1304 1305 /* 1306 * Check that a valid page was requested and the mode data length 1307 * isn't too long. 1308 */ 1309 len = buf - buf0; 1310 if (!valid_page || len > limit) { 1311 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1312 return -EINVAL; 1313 } 1314 1315 /* Store the mode data length */ 1316 if (mscmnd == MODE_SENSE) 1317 buf0[0] = len - 1; 1318 else 1319 put_unaligned_be16(len - 2, buf0); 1320 return len; 1321 } 1322 1323 static int do_start_stop(struct fsg_common *common) 1324 { 1325 struct fsg_lun *curlun = common->curlun; 1326 int loej, start; 1327 1328 if (!curlun) { 1329 return -EINVAL; 1330 } else if (!curlun->removable) { 1331 curlun->sense_data = SS_INVALID_COMMAND; 1332 return -EINVAL; 1333 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */ 1334 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */ 1335 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1336 return -EINVAL; 1337 } 1338 1339 loej = common->cmnd[4] & 0x02; 1340 start = common->cmnd[4] & 0x01; 1341 1342 /* 1343 * Our emulation doesn't support mounting; the medium is 1344 * available for use as soon as it is loaded. 1345 */ 1346 if (start) { 1347 if (!fsg_lun_is_open(curlun)) { 1348 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1349 return -EINVAL; 1350 } 1351 return 0; 1352 } 1353 1354 /* Are we allowed to unload the media? */ 1355 if (curlun->prevent_medium_removal) { 1356 LDBG(curlun, "unload attempt prevented\n"); 1357 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 1358 return -EINVAL; 1359 } 1360 1361 if (!loej) 1362 return 0; 1363 1364 up_read(&common->filesem); 1365 down_write(&common->filesem); 1366 fsg_lun_close(curlun); 1367 up_write(&common->filesem); 1368 down_read(&common->filesem); 1369 1370 return 0; 1371 } 1372 1373 static int do_prevent_allow(struct fsg_common *common) 1374 { 1375 struct fsg_lun *curlun = common->curlun; 1376 int prevent; 1377 1378 if (!common->curlun) { 1379 return -EINVAL; 1380 } else if (!common->curlun->removable) { 1381 common->curlun->sense_data = SS_INVALID_COMMAND; 1382 return -EINVAL; 1383 } 1384 1385 prevent = common->cmnd[4] & 0x01; 1386 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ 1387 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1388 return -EINVAL; 1389 } 1390 1391 if (curlun->prevent_medium_removal && !prevent) 1392 fsg_lun_fsync_sub(curlun); 1393 curlun->prevent_medium_removal = prevent; 1394 return 0; 1395 } 1396 1397 static int do_read_format_capacities(struct fsg_common *common, 1398 struct fsg_buffhd *bh) 1399 { 1400 struct fsg_lun *curlun = common->curlun; 1401 u8 *buf = (u8 *) bh->buf; 1402 1403 buf[0] = buf[1] = buf[2] = 0; 1404 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */ 1405 buf += 4; 1406 1407 put_unaligned_be32(curlun->num_sectors, &buf[0]); 1408 /* Number of blocks */ 1409 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1410 buf[4] = 0x02; /* Current capacity */ 1411 return 12; 1412 } 1413 1414 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) 1415 { 1416 struct fsg_lun *curlun = common->curlun; 1417 1418 /* We don't support MODE SELECT */ 1419 if (curlun) 1420 curlun->sense_data = SS_INVALID_COMMAND; 1421 return -EINVAL; 1422 } 1423 1424 1425 /*-------------------------------------------------------------------------*/ 1426 1427 static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 1428 { 1429 int rc; 1430 1431 rc = fsg_set_halt(fsg, fsg->bulk_in); 1432 if (rc == -EAGAIN) 1433 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 1434 while (rc != 0) { 1435 if (rc != -EAGAIN) { 1436 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); 1437 rc = 0; 1438 break; 1439 } 1440 1441 /* Wait for a short time and then try again */ 1442 if (msleep_interruptible(100) != 0) 1443 return -EINTR; 1444 rc = usb_ep_set_halt(fsg->bulk_in); 1445 } 1446 return rc; 1447 } 1448 1449 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 1450 { 1451 int rc; 1452 1453 DBG(fsg, "bulk-in set wedge\n"); 1454 rc = usb_ep_set_wedge(fsg->bulk_in); 1455 if (rc == -EAGAIN) 1456 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 1457 while (rc != 0) { 1458 if (rc != -EAGAIN) { 1459 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); 1460 rc = 0; 1461 break; 1462 } 1463 1464 /* Wait for a short time and then try again */ 1465 if (msleep_interruptible(100) != 0) 1466 return -EINTR; 1467 rc = usb_ep_set_wedge(fsg->bulk_in); 1468 } 1469 return rc; 1470 } 1471 1472 static int throw_away_data(struct fsg_common *common) 1473 { 1474 struct fsg_buffhd *bh; 1475 u32 amount; 1476 int rc; 1477 1478 for (bh = common->next_buffhd_to_drain; 1479 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; 1480 bh = common->next_buffhd_to_drain) { 1481 1482 /* Throw away the data in a filled buffer */ 1483 if (bh->state == BUF_STATE_FULL) { 1484 smp_rmb(); 1485 bh->state = BUF_STATE_EMPTY; 1486 common->next_buffhd_to_drain = bh->next; 1487 1488 /* A short packet or an error ends everything */ 1489 if (bh->outreq->actual < bh->bulk_out_intended_length || 1490 bh->outreq->status != 0) { 1491 raise_exception(common, 1492 FSG_STATE_ABORT_BULK_OUT); 1493 return -EINTR; 1494 } 1495 continue; 1496 } 1497 1498 /* Try to submit another request if we need one */ 1499 bh = common->next_buffhd_to_fill; 1500 if (bh->state == BUF_STATE_EMPTY 1501 && common->usb_amount_left > 0) { 1502 amount = min(common->usb_amount_left, FSG_BUFLEN); 1503 1504 /* 1505 * Except at the end of the transfer, amount will be 1506 * equal to the buffer size, which is divisible by 1507 * the bulk-out maxpacket size. 1508 */ 1509 set_bulk_out_req_length(common, bh, amount); 1510 if (!start_out_transfer(common, bh)) 1511 /* Dunno what to do if common->fsg is NULL */ 1512 return -EIO; 1513 common->next_buffhd_to_fill = bh->next; 1514 common->usb_amount_left -= amount; 1515 continue; 1516 } 1517 1518 /* Otherwise wait for something to happen */ 1519 rc = sleep_thread(common, true); 1520 if (rc) 1521 return rc; 1522 } 1523 return 0; 1524 } 1525 1526 static int finish_reply(struct fsg_common *common) 1527 { 1528 struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1529 int rc = 0; 1530 1531 switch (common->data_dir) { 1532 case DATA_DIR_NONE: 1533 break; /* Nothing to send */ 1534 1535 /* 1536 * If we don't know whether the host wants to read or write, 1537 * this must be CB or CBI with an unknown command. We mustn't 1538 * try to send or receive any data. So stall both bulk pipes 1539 * if we can and wait for a reset. 1540 */ 1541 case DATA_DIR_UNKNOWN: 1542 if (!common->can_stall) { 1543 /* Nothing */ 1544 } else if (fsg_is_set(common)) { 1545 fsg_set_halt(common->fsg, common->fsg->bulk_out); 1546 rc = halt_bulk_in_endpoint(common->fsg); 1547 } else { 1548 /* Don't know what to do if common->fsg is NULL */ 1549 rc = -EIO; 1550 } 1551 break; 1552 1553 /* All but the last buffer of data must have already been sent */ 1554 case DATA_DIR_TO_HOST: 1555 if (common->data_size == 0) { 1556 /* Nothing to send */ 1557 1558 /* Don't know what to do if common->fsg is NULL */ 1559 } else if (!fsg_is_set(common)) { 1560 rc = -EIO; 1561 1562 /* If there's no residue, simply send the last buffer */ 1563 } else if (common->residue == 0) { 1564 bh->inreq->zero = 0; 1565 if (!start_in_transfer(common, bh)) 1566 return -EIO; 1567 common->next_buffhd_to_fill = bh->next; 1568 1569 /* 1570 * For Bulk-only, mark the end of the data with a short 1571 * packet. If we are allowed to stall, halt the bulk-in 1572 * endpoint. (Note: This violates the Bulk-Only Transport 1573 * specification, which requires us to pad the data if we 1574 * don't halt the endpoint. Presumably nobody will mind.) 1575 */ 1576 } else { 1577 bh->inreq->zero = 1; 1578 if (!start_in_transfer(common, bh)) 1579 rc = -EIO; 1580 common->next_buffhd_to_fill = bh->next; 1581 if (common->can_stall) 1582 rc = halt_bulk_in_endpoint(common->fsg); 1583 } 1584 break; 1585 1586 /* 1587 * We have processed all we want from the data the host has sent. 1588 * There may still be outstanding bulk-out requests. 1589 */ 1590 case DATA_DIR_FROM_HOST: 1591 if (common->residue == 0) { 1592 /* Nothing to receive */ 1593 1594 /* Did the host stop sending unexpectedly early? */ 1595 } else if (common->short_packet_received) { 1596 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1597 rc = -EINTR; 1598 1599 /* 1600 * We haven't processed all the incoming data. Even though 1601 * we may be allowed to stall, doing so would cause a race. 1602 * The controller may already have ACK'ed all the remaining 1603 * bulk-out packets, in which case the host wouldn't see a 1604 * STALL. Not realizing the endpoint was halted, it wouldn't 1605 * clear the halt -- leading to problems later on. 1606 */ 1607 #if 0 1608 } else if (common->can_stall) { 1609 if (fsg_is_set(common)) 1610 fsg_set_halt(common->fsg, 1611 common->fsg->bulk_out); 1612 raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1613 rc = -EINTR; 1614 #endif 1615 1616 /* 1617 * We can't stall. Read in the excess data and throw it 1618 * all away. 1619 */ 1620 } else { 1621 rc = throw_away_data(common); 1622 } 1623 break; 1624 } 1625 return rc; 1626 } 1627 1628 static int send_status(struct fsg_common *common) 1629 { 1630 struct fsg_lun *curlun = common->curlun; 1631 struct fsg_buffhd *bh; 1632 struct bulk_cs_wrap *csw; 1633 int rc; 1634 u8 status = US_BULK_STAT_OK; 1635 u32 sd, sdinfo = 0; 1636 1637 /* Wait for the next buffer to become available */ 1638 bh = common->next_buffhd_to_fill; 1639 while (bh->state != BUF_STATE_EMPTY) { 1640 rc = sleep_thread(common, true); 1641 if (rc) 1642 return rc; 1643 } 1644 1645 if (curlun) { 1646 sd = curlun->sense_data; 1647 sdinfo = curlun->sense_data_info; 1648 } else if (common->bad_lun_okay) 1649 sd = SS_NO_SENSE; 1650 else 1651 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1652 1653 if (common->phase_error) { 1654 DBG(common, "sending phase-error status\n"); 1655 status = US_BULK_STAT_PHASE; 1656 sd = SS_INVALID_COMMAND; 1657 } else if (sd != SS_NO_SENSE) { 1658 DBG(common, "sending command-failure status\n"); 1659 status = US_BULK_STAT_FAIL; 1660 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 1661 " info x%x\n", 1662 SK(sd), ASC(sd), ASCQ(sd), sdinfo); 1663 } 1664 1665 /* Store and send the Bulk-only CSW */ 1666 csw = (void *)bh->buf; 1667 1668 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); 1669 csw->Tag = common->tag; 1670 csw->Residue = cpu_to_le32(common->residue); 1671 csw->Status = status; 1672 1673 bh->inreq->length = US_BULK_CS_WRAP_LEN; 1674 bh->inreq->zero = 0; 1675 if (!start_in_transfer(common, bh)) 1676 /* Don't know what to do if common->fsg is NULL */ 1677 return -EIO; 1678 1679 common->next_buffhd_to_fill = bh->next; 1680 return 0; 1681 } 1682 1683 1684 /*-------------------------------------------------------------------------*/ 1685 1686 /* 1687 * Check whether the command is properly formed and whether its data size 1688 * and direction agree with the values we already have. 1689 */ 1690 static int check_command(struct fsg_common *common, int cmnd_size, 1691 enum data_direction data_dir, unsigned int mask, 1692 int needs_medium, const char *name) 1693 { 1694 int i; 1695 unsigned int lun = common->cmnd[1] >> 5; 1696 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 1697 char hdlen[20]; 1698 struct fsg_lun *curlun; 1699 1700 hdlen[0] = 0; 1701 if (common->data_dir != DATA_DIR_UNKNOWN) 1702 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], 1703 common->data_size); 1704 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 1705 name, cmnd_size, dirletter[(int) data_dir], 1706 common->data_size_from_cmnd, common->cmnd_size, hdlen); 1707 1708 /* 1709 * We can't reply at all until we know the correct data direction 1710 * and size. 1711 */ 1712 if (common->data_size_from_cmnd == 0) 1713 data_dir = DATA_DIR_NONE; 1714 if (common->data_size < common->data_size_from_cmnd) { 1715 /* 1716 * Host data size < Device data size is a phase error. 1717 * Carry out the command, but only transfer as much as 1718 * we are allowed. 1719 */ 1720 common->data_size_from_cmnd = common->data_size; 1721 common->phase_error = 1; 1722 } 1723 common->residue = common->data_size; 1724 common->usb_amount_left = common->data_size; 1725 1726 /* Conflicting data directions is a phase error */ 1727 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) { 1728 common->phase_error = 1; 1729 return -EINVAL; 1730 } 1731 1732 /* Verify the length of the command itself */ 1733 if (cmnd_size != common->cmnd_size) { 1734 1735 /* 1736 * Special case workaround: There are plenty of buggy SCSI 1737 * implementations. Many have issues with cbw->Length 1738 * field passing a wrong command size. For those cases we 1739 * always try to work around the problem by using the length 1740 * sent by the host side provided it is at least as large 1741 * as the correct command length. 1742 * Examples of such cases would be MS-Windows, which issues 1743 * REQUEST SENSE with cbw->Length == 12 where it should 1744 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and 1745 * REQUEST SENSE with cbw->Length == 10 where it should 1746 * be 6 as well. 1747 */ 1748 if (cmnd_size <= common->cmnd_size) { 1749 DBG(common, "%s is buggy! Expected length %d " 1750 "but we got %d\n", name, 1751 cmnd_size, common->cmnd_size); 1752 cmnd_size = common->cmnd_size; 1753 } else { 1754 common->phase_error = 1; 1755 return -EINVAL; 1756 } 1757 } 1758 1759 /* Check that the LUN values are consistent */ 1760 if (common->lun != lun) 1761 DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n", 1762 common->lun, lun); 1763 1764 /* Check the LUN */ 1765 curlun = common->curlun; 1766 if (curlun) { 1767 if (common->cmnd[0] != REQUEST_SENSE) { 1768 curlun->sense_data = SS_NO_SENSE; 1769 curlun->sense_data_info = 0; 1770 curlun->info_valid = 0; 1771 } 1772 } else { 1773 common->bad_lun_okay = 0; 1774 1775 /* 1776 * INQUIRY and REQUEST SENSE commands are explicitly allowed 1777 * to use unsupported LUNs; all others may not. 1778 */ 1779 if (common->cmnd[0] != INQUIRY && 1780 common->cmnd[0] != REQUEST_SENSE) { 1781 DBG(common, "unsupported LUN %u\n", common->lun); 1782 return -EINVAL; 1783 } 1784 } 1785 1786 /* 1787 * If a unit attention condition exists, only INQUIRY and 1788 * REQUEST SENSE commands are allowed; anything else must fail. 1789 */ 1790 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1791 common->cmnd[0] != INQUIRY && 1792 common->cmnd[0] != REQUEST_SENSE) { 1793 curlun->sense_data = curlun->unit_attention_data; 1794 curlun->unit_attention_data = SS_NO_SENSE; 1795 return -EINVAL; 1796 } 1797 1798 /* Check that only command bytes listed in the mask are non-zero */ 1799 common->cmnd[1] &= 0x1f; /* Mask away the LUN */ 1800 for (i = 1; i < cmnd_size; ++i) { 1801 if (common->cmnd[i] && !(mask & (1 << i))) { 1802 if (curlun) 1803 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1804 return -EINVAL; 1805 } 1806 } 1807 1808 /* If the medium isn't mounted and the command needs to access 1809 * it, return an error. */ 1810 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { 1811 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1812 return -EINVAL; 1813 } 1814 1815 return 0; 1816 } 1817 1818 /* wrapper of check_command for data size in blocks handling */ 1819 static int check_command_size_in_blocks(struct fsg_common *common, 1820 int cmnd_size, enum data_direction data_dir, 1821 unsigned int mask, int needs_medium, const char *name) 1822 { 1823 if (common->curlun) 1824 common->data_size_from_cmnd <<= common->curlun->blkbits; 1825 return check_command(common, cmnd_size, data_dir, 1826 mask, needs_medium, name); 1827 } 1828 1829 static int do_scsi_command(struct fsg_common *common) 1830 { 1831 struct fsg_buffhd *bh; 1832 int rc; 1833 int reply = -EINVAL; 1834 int i; 1835 static char unknown[16]; 1836 1837 dump_cdb(common); 1838 1839 /* Wait for the next buffer to become available for data or status */ 1840 bh = common->next_buffhd_to_fill; 1841 common->next_buffhd_to_drain = bh; 1842 while (bh->state != BUF_STATE_EMPTY) { 1843 rc = sleep_thread(common, true); 1844 if (rc) 1845 return rc; 1846 } 1847 common->phase_error = 0; 1848 common->short_packet_received = 0; 1849 1850 down_read(&common->filesem); /* We're using the backing file */ 1851 switch (common->cmnd[0]) { 1852 1853 case INQUIRY: 1854 common->data_size_from_cmnd = common->cmnd[4]; 1855 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1856 (1<<4), 0, 1857 "INQUIRY"); 1858 if (reply == 0) 1859 reply = do_inquiry(common, bh); 1860 break; 1861 1862 case MODE_SELECT: 1863 common->data_size_from_cmnd = common->cmnd[4]; 1864 reply = check_command(common, 6, DATA_DIR_FROM_HOST, 1865 (1<<1) | (1<<4), 0, 1866 "MODE SELECT(6)"); 1867 if (reply == 0) 1868 reply = do_mode_select(common, bh); 1869 break; 1870 1871 case MODE_SELECT_10: 1872 common->data_size_from_cmnd = 1873 get_unaligned_be16(&common->cmnd[7]); 1874 reply = check_command(common, 10, DATA_DIR_FROM_HOST, 1875 (1<<1) | (3<<7), 0, 1876 "MODE SELECT(10)"); 1877 if (reply == 0) 1878 reply = do_mode_select(common, bh); 1879 break; 1880 1881 case MODE_SENSE: 1882 common->data_size_from_cmnd = common->cmnd[4]; 1883 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1884 (1<<1) | (1<<2) | (1<<4), 0, 1885 "MODE SENSE(6)"); 1886 if (reply == 0) 1887 reply = do_mode_sense(common, bh); 1888 break; 1889 1890 case MODE_SENSE_10: 1891 common->data_size_from_cmnd = 1892 get_unaligned_be16(&common->cmnd[7]); 1893 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1894 (1<<1) | (1<<2) | (3<<7), 0, 1895 "MODE SENSE(10)"); 1896 if (reply == 0) 1897 reply = do_mode_sense(common, bh); 1898 break; 1899 1900 case ALLOW_MEDIUM_REMOVAL: 1901 common->data_size_from_cmnd = 0; 1902 reply = check_command(common, 6, DATA_DIR_NONE, 1903 (1<<4), 0, 1904 "PREVENT-ALLOW MEDIUM REMOVAL"); 1905 if (reply == 0) 1906 reply = do_prevent_allow(common); 1907 break; 1908 1909 case READ_6: 1910 i = common->cmnd[4]; 1911 common->data_size_from_cmnd = (i == 0) ? 256 : i; 1912 reply = check_command_size_in_blocks(common, 6, 1913 DATA_DIR_TO_HOST, 1914 (7<<1) | (1<<4), 1, 1915 "READ(6)"); 1916 if (reply == 0) 1917 reply = do_read(common); 1918 break; 1919 1920 case READ_10: 1921 common->data_size_from_cmnd = 1922 get_unaligned_be16(&common->cmnd[7]); 1923 reply = check_command_size_in_blocks(common, 10, 1924 DATA_DIR_TO_HOST, 1925 (1<<1) | (0xf<<2) | (3<<7), 1, 1926 "READ(10)"); 1927 if (reply == 0) 1928 reply = do_read(common); 1929 break; 1930 1931 case READ_12: 1932 common->data_size_from_cmnd = 1933 get_unaligned_be32(&common->cmnd[6]); 1934 reply = check_command_size_in_blocks(common, 12, 1935 DATA_DIR_TO_HOST, 1936 (1<<1) | (0xf<<2) | (0xf<<6), 1, 1937 "READ(12)"); 1938 if (reply == 0) 1939 reply = do_read(common); 1940 break; 1941 1942 case READ_CAPACITY: 1943 common->data_size_from_cmnd = 8; 1944 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1945 (0xf<<2) | (1<<8), 1, 1946 "READ CAPACITY"); 1947 if (reply == 0) 1948 reply = do_read_capacity(common, bh); 1949 break; 1950 1951 case READ_HEADER: 1952 if (!common->curlun || !common->curlun->cdrom) 1953 goto unknown_cmnd; 1954 common->data_size_from_cmnd = 1955 get_unaligned_be16(&common->cmnd[7]); 1956 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1957 (3<<7) | (0x1f<<1), 1, 1958 "READ HEADER"); 1959 if (reply == 0) 1960 reply = do_read_header(common, bh); 1961 break; 1962 1963 case READ_TOC: 1964 if (!common->curlun || !common->curlun->cdrom) 1965 goto unknown_cmnd; 1966 common->data_size_from_cmnd = 1967 get_unaligned_be16(&common->cmnd[7]); 1968 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1969 (7<<6) | (1<<1), 1, 1970 "READ TOC"); 1971 if (reply == 0) 1972 reply = do_read_toc(common, bh); 1973 break; 1974 1975 case READ_FORMAT_CAPACITIES: 1976 common->data_size_from_cmnd = 1977 get_unaligned_be16(&common->cmnd[7]); 1978 reply = check_command(common, 10, DATA_DIR_TO_HOST, 1979 (3<<7), 1, 1980 "READ FORMAT CAPACITIES"); 1981 if (reply == 0) 1982 reply = do_read_format_capacities(common, bh); 1983 break; 1984 1985 case REQUEST_SENSE: 1986 common->data_size_from_cmnd = common->cmnd[4]; 1987 reply = check_command(common, 6, DATA_DIR_TO_HOST, 1988 (1<<4), 0, 1989 "REQUEST SENSE"); 1990 if (reply == 0) 1991 reply = do_request_sense(common, bh); 1992 break; 1993 1994 case START_STOP: 1995 common->data_size_from_cmnd = 0; 1996 reply = check_command(common, 6, DATA_DIR_NONE, 1997 (1<<1) | (1<<4), 0, 1998 "START-STOP UNIT"); 1999 if (reply == 0) 2000 reply = do_start_stop(common); 2001 break; 2002 2003 case SYNCHRONIZE_CACHE: 2004 common->data_size_from_cmnd = 0; 2005 reply = check_command(common, 10, DATA_DIR_NONE, 2006 (0xf<<2) | (3<<7), 1, 2007 "SYNCHRONIZE CACHE"); 2008 if (reply == 0) 2009 reply = do_synchronize_cache(common); 2010 break; 2011 2012 case TEST_UNIT_READY: 2013 common->data_size_from_cmnd = 0; 2014 reply = check_command(common, 6, DATA_DIR_NONE, 2015 0, 1, 2016 "TEST UNIT READY"); 2017 break; 2018 2019 /* 2020 * Although optional, this command is used by MS-Windows. We 2021 * support a minimal version: BytChk must be 0. 2022 */ 2023 case VERIFY: 2024 common->data_size_from_cmnd = 0; 2025 reply = check_command(common, 10, DATA_DIR_NONE, 2026 (1<<1) | (0xf<<2) | (3<<7), 1, 2027 "VERIFY"); 2028 if (reply == 0) 2029 reply = do_verify(common); 2030 break; 2031 2032 case WRITE_6: 2033 i = common->cmnd[4]; 2034 common->data_size_from_cmnd = (i == 0) ? 256 : i; 2035 reply = check_command_size_in_blocks(common, 6, 2036 DATA_DIR_FROM_HOST, 2037 (7<<1) | (1<<4), 1, 2038 "WRITE(6)"); 2039 if (reply == 0) 2040 reply = do_write(common); 2041 break; 2042 2043 case WRITE_10: 2044 common->data_size_from_cmnd = 2045 get_unaligned_be16(&common->cmnd[7]); 2046 reply = check_command_size_in_blocks(common, 10, 2047 DATA_DIR_FROM_HOST, 2048 (1<<1) | (0xf<<2) | (3<<7), 1, 2049 "WRITE(10)"); 2050 if (reply == 0) 2051 reply = do_write(common); 2052 break; 2053 2054 case WRITE_12: 2055 common->data_size_from_cmnd = 2056 get_unaligned_be32(&common->cmnd[6]); 2057 reply = check_command_size_in_blocks(common, 12, 2058 DATA_DIR_FROM_HOST, 2059 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2060 "WRITE(12)"); 2061 if (reply == 0) 2062 reply = do_write(common); 2063 break; 2064 2065 /* 2066 * Some mandatory commands that we recognize but don't implement. 2067 * They don't mean much in this setting. It's left as an exercise 2068 * for anyone interested to implement RESERVE and RELEASE in terms 2069 * of Posix locks. 2070 */ 2071 case FORMAT_UNIT: 2072 case RELEASE: 2073 case RESERVE: 2074 case SEND_DIAGNOSTIC: 2075 /* Fall through */ 2076 2077 default: 2078 unknown_cmnd: 2079 common->data_size_from_cmnd = 0; 2080 sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2081 reply = check_command(common, common->cmnd_size, 2082 DATA_DIR_UNKNOWN, ~0, 0, unknown); 2083 if (reply == 0) { 2084 common->curlun->sense_data = SS_INVALID_COMMAND; 2085 reply = -EINVAL; 2086 } 2087 break; 2088 } 2089 up_read(&common->filesem); 2090 2091 if (reply == -EINTR || signal_pending(current)) 2092 return -EINTR; 2093 2094 /* Set up the single reply buffer for finish_reply() */ 2095 if (reply == -EINVAL) 2096 reply = 0; /* Error reply length */ 2097 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { 2098 reply = min((u32)reply, common->data_size_from_cmnd); 2099 bh->inreq->length = reply; 2100 bh->state = BUF_STATE_FULL; 2101 common->residue -= reply; 2102 } /* Otherwise it's already set */ 2103 2104 return 0; 2105 } 2106 2107 2108 /*-------------------------------------------------------------------------*/ 2109 2110 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2111 { 2112 struct usb_request *req = bh->outreq; 2113 struct bulk_cb_wrap *cbw = req->buf; 2114 struct fsg_common *common = fsg->common; 2115 2116 /* Was this a real packet? Should it be ignored? */ 2117 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2118 return -EINVAL; 2119 2120 /* Is the CBW valid? */ 2121 if (req->actual != US_BULK_CB_WRAP_LEN || 2122 cbw->Signature != cpu_to_le32( 2123 US_BULK_CB_SIGN)) { 2124 DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2125 req->actual, 2126 le32_to_cpu(cbw->Signature)); 2127 2128 /* 2129 * The Bulk-only spec says we MUST stall the IN endpoint 2130 * (6.6.1), so it's unavoidable. It also says we must 2131 * retain this state until the next reset, but there's 2132 * no way to tell the controller driver it should ignore 2133 * Clear-Feature(HALT) requests. 2134 * 2135 * We aren't required to halt the OUT endpoint; instead 2136 * we can simply accept and discard any data received 2137 * until the next reset. 2138 */ 2139 wedge_bulk_in_endpoint(fsg); 2140 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2141 return -EINVAL; 2142 } 2143 2144 /* Is the CBW meaningful? */ 2145 if (cbw->Lun >= ARRAY_SIZE(common->luns) || 2146 cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 || 2147 cbw->Length > MAX_COMMAND_SIZE) { 2148 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2149 "cmdlen %u\n", 2150 cbw->Lun, cbw->Flags, cbw->Length); 2151 2152 /* 2153 * We can do anything we want here, so let's stall the 2154 * bulk pipes if we are allowed to. 2155 */ 2156 if (common->can_stall) { 2157 fsg_set_halt(fsg, fsg->bulk_out); 2158 halt_bulk_in_endpoint(fsg); 2159 } 2160 return -EINVAL; 2161 } 2162 2163 /* Save the command for later */ 2164 common->cmnd_size = cbw->Length; 2165 memcpy(common->cmnd, cbw->CDB, common->cmnd_size); 2166 if (cbw->Flags & US_BULK_FLAG_IN) 2167 common->data_dir = DATA_DIR_TO_HOST; 2168 else 2169 common->data_dir = DATA_DIR_FROM_HOST; 2170 common->data_size = le32_to_cpu(cbw->DataTransferLength); 2171 if (common->data_size == 0) 2172 common->data_dir = DATA_DIR_NONE; 2173 common->lun = cbw->Lun; 2174 if (common->lun < ARRAY_SIZE(common->luns)) 2175 common->curlun = common->luns[common->lun]; 2176 else 2177 common->curlun = NULL; 2178 common->tag = cbw->Tag; 2179 return 0; 2180 } 2181 2182 static int get_next_command(struct fsg_common *common) 2183 { 2184 struct fsg_buffhd *bh; 2185 int rc = 0; 2186 2187 /* Wait for the next buffer to become available */ 2188 bh = common->next_buffhd_to_fill; 2189 while (bh->state != BUF_STATE_EMPTY) { 2190 rc = sleep_thread(common, true); 2191 if (rc) 2192 return rc; 2193 } 2194 2195 /* Queue a request to read a Bulk-only CBW */ 2196 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN); 2197 if (!start_out_transfer(common, bh)) 2198 /* Don't know what to do if common->fsg is NULL */ 2199 return -EIO; 2200 2201 /* 2202 * We will drain the buffer in software, which means we 2203 * can reuse it for the next filling. No need to advance 2204 * next_buffhd_to_fill. 2205 */ 2206 2207 /* Wait for the CBW to arrive */ 2208 while (bh->state != BUF_STATE_FULL) { 2209 rc = sleep_thread(common, true); 2210 if (rc) 2211 return rc; 2212 } 2213 smp_rmb(); 2214 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO; 2215 bh->state = BUF_STATE_EMPTY; 2216 2217 return rc; 2218 } 2219 2220 2221 /*-------------------------------------------------------------------------*/ 2222 2223 static int alloc_request(struct fsg_common *common, struct usb_ep *ep, 2224 struct usb_request **preq) 2225 { 2226 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 2227 if (*preq) 2228 return 0; 2229 ERROR(common, "can't allocate request for %s\n", ep->name); 2230 return -ENOMEM; 2231 } 2232 2233 /* Reset interface setting and re-init endpoint state (toggle etc). */ 2234 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) 2235 { 2236 struct fsg_dev *fsg; 2237 int i, rc = 0; 2238 2239 if (common->running) 2240 DBG(common, "reset interface\n"); 2241 2242 reset: 2243 /* Deallocate the requests */ 2244 if (common->fsg) { 2245 fsg = common->fsg; 2246 2247 for (i = 0; i < common->fsg_num_buffers; ++i) { 2248 struct fsg_buffhd *bh = &common->buffhds[i]; 2249 2250 if (bh->inreq) { 2251 usb_ep_free_request(fsg->bulk_in, bh->inreq); 2252 bh->inreq = NULL; 2253 } 2254 if (bh->outreq) { 2255 usb_ep_free_request(fsg->bulk_out, bh->outreq); 2256 bh->outreq = NULL; 2257 } 2258 } 2259 2260 /* Disable the endpoints */ 2261 if (fsg->bulk_in_enabled) { 2262 usb_ep_disable(fsg->bulk_in); 2263 fsg->bulk_in_enabled = 0; 2264 } 2265 if (fsg->bulk_out_enabled) { 2266 usb_ep_disable(fsg->bulk_out); 2267 fsg->bulk_out_enabled = 0; 2268 } 2269 2270 common->fsg = NULL; 2271 wake_up(&common->fsg_wait); 2272 } 2273 2274 common->running = 0; 2275 if (!new_fsg || rc) 2276 return rc; 2277 2278 common->fsg = new_fsg; 2279 fsg = common->fsg; 2280 2281 /* Enable the endpoints */ 2282 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in); 2283 if (rc) 2284 goto reset; 2285 rc = usb_ep_enable(fsg->bulk_in); 2286 if (rc) 2287 goto reset; 2288 fsg->bulk_in->driver_data = common; 2289 fsg->bulk_in_enabled = 1; 2290 2291 rc = config_ep_by_speed(common->gadget, &(fsg->function), 2292 fsg->bulk_out); 2293 if (rc) 2294 goto reset; 2295 rc = usb_ep_enable(fsg->bulk_out); 2296 if (rc) 2297 goto reset; 2298 fsg->bulk_out->driver_data = common; 2299 fsg->bulk_out_enabled = 1; 2300 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc); 2301 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2302 2303 /* Allocate the requests */ 2304 for (i = 0; i < common->fsg_num_buffers; ++i) { 2305 struct fsg_buffhd *bh = &common->buffhds[i]; 2306 2307 rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2308 if (rc) 2309 goto reset; 2310 rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2311 if (rc) 2312 goto reset; 2313 bh->inreq->buf = bh->outreq->buf = bh->buf; 2314 bh->inreq->context = bh->outreq->context = bh; 2315 bh->inreq->complete = bulk_in_complete; 2316 bh->outreq->complete = bulk_out_complete; 2317 } 2318 2319 common->running = 1; 2320 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) 2321 if (common->luns[i]) 2322 common->luns[i]->unit_attention_data = 2323 SS_RESET_OCCURRED; 2324 return rc; 2325 } 2326 2327 2328 /****************************** ALT CONFIGS ******************************/ 2329 2330 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2331 { 2332 struct fsg_dev *fsg = fsg_from_func(f); 2333 fsg->common->new_fsg = fsg; 2334 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2335 return USB_GADGET_DELAYED_STATUS; 2336 } 2337 2338 static void fsg_disable(struct usb_function *f) 2339 { 2340 struct fsg_dev *fsg = fsg_from_func(f); 2341 fsg->common->new_fsg = NULL; 2342 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2343 } 2344 2345 2346 /*-------------------------------------------------------------------------*/ 2347 2348 static void handle_exception(struct fsg_common *common) 2349 { 2350 int i; 2351 struct fsg_buffhd *bh; 2352 enum fsg_state old_state; 2353 struct fsg_lun *curlun; 2354 unsigned int exception_req_tag; 2355 2356 /* 2357 * Clear the existing signals. Anything but SIGUSR1 is converted 2358 * into a high-priority EXIT exception. 2359 */ 2360 for (;;) { 2361 int sig = kernel_dequeue_signal(NULL); 2362 if (!sig) 2363 break; 2364 if (sig != SIGUSR1) { 2365 if (common->state < FSG_STATE_EXIT) 2366 DBG(common, "Main thread exiting on signal\n"); 2367 raise_exception(common, FSG_STATE_EXIT); 2368 } 2369 } 2370 2371 /* Cancel all the pending transfers */ 2372 if (likely(common->fsg)) { 2373 for (i = 0; i < common->fsg_num_buffers; ++i) { 2374 bh = &common->buffhds[i]; 2375 if (bh->inreq_busy) 2376 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq); 2377 if (bh->outreq_busy) 2378 usb_ep_dequeue(common->fsg->bulk_out, 2379 bh->outreq); 2380 } 2381 2382 /* Wait until everything is idle */ 2383 for (;;) { 2384 int num_active = 0; 2385 for (i = 0; i < common->fsg_num_buffers; ++i) { 2386 bh = &common->buffhds[i]; 2387 num_active += bh->inreq_busy + bh->outreq_busy; 2388 } 2389 if (num_active == 0) 2390 break; 2391 if (sleep_thread(common, true)) 2392 return; 2393 } 2394 2395 /* Clear out the controller's fifos */ 2396 if (common->fsg->bulk_in_enabled) 2397 usb_ep_fifo_flush(common->fsg->bulk_in); 2398 if (common->fsg->bulk_out_enabled) 2399 usb_ep_fifo_flush(common->fsg->bulk_out); 2400 } 2401 2402 /* 2403 * Reset the I/O buffer states and pointers, the SCSI 2404 * state, and the exception. Then invoke the handler. 2405 */ 2406 spin_lock_irq(&common->lock); 2407 2408 for (i = 0; i < common->fsg_num_buffers; ++i) { 2409 bh = &common->buffhds[i]; 2410 bh->state = BUF_STATE_EMPTY; 2411 } 2412 common->next_buffhd_to_fill = &common->buffhds[0]; 2413 common->next_buffhd_to_drain = &common->buffhds[0]; 2414 exception_req_tag = common->exception_req_tag; 2415 old_state = common->state; 2416 2417 if (old_state == FSG_STATE_ABORT_BULK_OUT) 2418 common->state = FSG_STATE_STATUS_PHASE; 2419 else { 2420 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) { 2421 curlun = common->luns[i]; 2422 if (!curlun) 2423 continue; 2424 curlun->prevent_medium_removal = 0; 2425 curlun->sense_data = SS_NO_SENSE; 2426 curlun->unit_attention_data = SS_NO_SENSE; 2427 curlun->sense_data_info = 0; 2428 curlun->info_valid = 0; 2429 } 2430 common->state = FSG_STATE_IDLE; 2431 } 2432 spin_unlock_irq(&common->lock); 2433 2434 /* Carry out any extra actions required for the exception */ 2435 switch (old_state) { 2436 case FSG_STATE_ABORT_BULK_OUT: 2437 send_status(common); 2438 spin_lock_irq(&common->lock); 2439 if (common->state == FSG_STATE_STATUS_PHASE) 2440 common->state = FSG_STATE_IDLE; 2441 spin_unlock_irq(&common->lock); 2442 break; 2443 2444 case FSG_STATE_RESET: 2445 /* 2446 * In case we were forced against our will to halt a 2447 * bulk endpoint, clear the halt now. (The SuperH UDC 2448 * requires this.) 2449 */ 2450 if (!fsg_is_set(common)) 2451 break; 2452 if (test_and_clear_bit(IGNORE_BULK_OUT, 2453 &common->fsg->atomic_bitflags)) 2454 usb_ep_clear_halt(common->fsg->bulk_in); 2455 2456 if (common->ep0_req_tag == exception_req_tag) 2457 ep0_queue(common); /* Complete the status stage */ 2458 2459 /* 2460 * Technically this should go here, but it would only be 2461 * a waste of time. Ditto for the INTERFACE_CHANGE and 2462 * CONFIG_CHANGE cases. 2463 */ 2464 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */ 2465 /* if (common->luns[i]) */ 2466 /* common->luns[i]->unit_attention_data = */ 2467 /* SS_RESET_OCCURRED; */ 2468 break; 2469 2470 case FSG_STATE_CONFIG_CHANGE: 2471 do_set_interface(common, common->new_fsg); 2472 if (common->new_fsg) 2473 usb_composite_setup_continue(common->cdev); 2474 break; 2475 2476 case FSG_STATE_EXIT: 2477 case FSG_STATE_TERMINATED: 2478 do_set_interface(common, NULL); /* Free resources */ 2479 spin_lock_irq(&common->lock); 2480 common->state = FSG_STATE_TERMINATED; /* Stop the thread */ 2481 spin_unlock_irq(&common->lock); 2482 break; 2483 2484 case FSG_STATE_INTERFACE_CHANGE: 2485 case FSG_STATE_DISCONNECT: 2486 case FSG_STATE_COMMAND_PHASE: 2487 case FSG_STATE_DATA_PHASE: 2488 case FSG_STATE_STATUS_PHASE: 2489 case FSG_STATE_IDLE: 2490 break; 2491 } 2492 } 2493 2494 2495 /*-------------------------------------------------------------------------*/ 2496 2497 static int fsg_main_thread(void *common_) 2498 { 2499 struct fsg_common *common = common_; 2500 2501 /* 2502 * Allow the thread to be killed by a signal, but set the signal mask 2503 * to block everything but INT, TERM, KILL, and USR1. 2504 */ 2505 allow_signal(SIGINT); 2506 allow_signal(SIGTERM); 2507 allow_signal(SIGKILL); 2508 allow_signal(SIGUSR1); 2509 2510 /* Allow the thread to be frozen */ 2511 set_freezable(); 2512 2513 /* 2514 * Arrange for userspace references to be interpreted as kernel 2515 * pointers. That way we can pass a kernel pointer to a routine 2516 * that expects a __user pointer and it will work okay. 2517 */ 2518 set_fs(get_ds()); 2519 2520 /* The main loop */ 2521 while (common->state != FSG_STATE_TERMINATED) { 2522 if (exception_in_progress(common) || signal_pending(current)) { 2523 handle_exception(common); 2524 continue; 2525 } 2526 2527 if (!common->running) { 2528 sleep_thread(common, true); 2529 continue; 2530 } 2531 2532 if (get_next_command(common)) 2533 continue; 2534 2535 spin_lock_irq(&common->lock); 2536 if (!exception_in_progress(common)) 2537 common->state = FSG_STATE_DATA_PHASE; 2538 spin_unlock_irq(&common->lock); 2539 2540 if (do_scsi_command(common) || finish_reply(common)) 2541 continue; 2542 2543 spin_lock_irq(&common->lock); 2544 if (!exception_in_progress(common)) 2545 common->state = FSG_STATE_STATUS_PHASE; 2546 spin_unlock_irq(&common->lock); 2547 2548 if (send_status(common)) 2549 continue; 2550 2551 spin_lock_irq(&common->lock); 2552 if (!exception_in_progress(common)) 2553 common->state = FSG_STATE_IDLE; 2554 spin_unlock_irq(&common->lock); 2555 } 2556 2557 spin_lock_irq(&common->lock); 2558 common->thread_task = NULL; 2559 spin_unlock_irq(&common->lock); 2560 2561 if (!common->ops || !common->ops->thread_exits 2562 || common->ops->thread_exits(common) < 0) { 2563 int i; 2564 2565 down_write(&common->filesem); 2566 for (i = 0; i < ARRAY_SIZE(common->luns); --i) { 2567 struct fsg_lun *curlun = common->luns[i]; 2568 if (!curlun || !fsg_lun_is_open(curlun)) 2569 continue; 2570 2571 fsg_lun_close(curlun); 2572 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; 2573 } 2574 up_write(&common->filesem); 2575 } 2576 2577 /* Let fsg_unbind() know the thread has exited */ 2578 complete_and_exit(&common->thread_notifier, 0); 2579 } 2580 2581 2582 /*************************** DEVICE ATTRIBUTES ***************************/ 2583 2584 static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf) 2585 { 2586 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2587 2588 return fsg_show_ro(curlun, buf); 2589 } 2590 2591 static ssize_t nofua_show(struct device *dev, struct device_attribute *attr, 2592 char *buf) 2593 { 2594 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2595 2596 return fsg_show_nofua(curlun, buf); 2597 } 2598 2599 static ssize_t file_show(struct device *dev, struct device_attribute *attr, 2600 char *buf) 2601 { 2602 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2603 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2604 2605 return fsg_show_file(curlun, filesem, buf); 2606 } 2607 2608 static ssize_t ro_store(struct device *dev, struct device_attribute *attr, 2609 const char *buf, size_t count) 2610 { 2611 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2612 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2613 2614 return fsg_store_ro(curlun, filesem, buf, count); 2615 } 2616 2617 static ssize_t nofua_store(struct device *dev, struct device_attribute *attr, 2618 const char *buf, size_t count) 2619 { 2620 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2621 2622 return fsg_store_nofua(curlun, buf, count); 2623 } 2624 2625 static ssize_t file_store(struct device *dev, struct device_attribute *attr, 2626 const char *buf, size_t count) 2627 { 2628 struct fsg_lun *curlun = fsg_lun_from_dev(dev); 2629 struct rw_semaphore *filesem = dev_get_drvdata(dev); 2630 2631 return fsg_store_file(curlun, filesem, buf, count); 2632 } 2633 2634 static DEVICE_ATTR_RW(nofua); 2635 /* mode wil be set in fsg_lun_attr_is_visible() */ 2636 static DEVICE_ATTR(ro, 0, ro_show, ro_store); 2637 static DEVICE_ATTR(file, 0, file_show, file_store); 2638 2639 /****************************** FSG COMMON ******************************/ 2640 2641 static void fsg_common_release(struct kref *ref); 2642 2643 static void fsg_lun_release(struct device *dev) 2644 { 2645 /* Nothing needs to be done */ 2646 } 2647 2648 void fsg_common_get(struct fsg_common *common) 2649 { 2650 kref_get(&common->ref); 2651 } 2652 EXPORT_SYMBOL_GPL(fsg_common_get); 2653 2654 void fsg_common_put(struct fsg_common *common) 2655 { 2656 kref_put(&common->ref, fsg_common_release); 2657 } 2658 EXPORT_SYMBOL_GPL(fsg_common_put); 2659 2660 static struct fsg_common *fsg_common_setup(struct fsg_common *common) 2661 { 2662 if (!common) { 2663 common = kzalloc(sizeof(*common), GFP_KERNEL); 2664 if (!common) 2665 return ERR_PTR(-ENOMEM); 2666 common->free_storage_on_release = 1; 2667 } else { 2668 common->free_storage_on_release = 0; 2669 } 2670 init_rwsem(&common->filesem); 2671 spin_lock_init(&common->lock); 2672 kref_init(&common->ref); 2673 init_completion(&common->thread_notifier); 2674 init_waitqueue_head(&common->fsg_wait); 2675 common->state = FSG_STATE_TERMINATED; 2676 memset(common->luns, 0, sizeof(common->luns)); 2677 2678 return common; 2679 } 2680 2681 void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs) 2682 { 2683 common->sysfs = sysfs; 2684 } 2685 EXPORT_SYMBOL_GPL(fsg_common_set_sysfs); 2686 2687 static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n) 2688 { 2689 if (buffhds) { 2690 struct fsg_buffhd *bh = buffhds; 2691 while (n--) { 2692 kfree(bh->buf); 2693 ++bh; 2694 } 2695 kfree(buffhds); 2696 } 2697 } 2698 2699 int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n) 2700 { 2701 struct fsg_buffhd *bh, *buffhds; 2702 int i; 2703 2704 buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL); 2705 if (!buffhds) 2706 return -ENOMEM; 2707 2708 /* Data buffers cyclic list */ 2709 bh = buffhds; 2710 i = n; 2711 goto buffhds_first_it; 2712 do { 2713 bh->next = bh + 1; 2714 ++bh; 2715 buffhds_first_it: 2716 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL); 2717 if (unlikely(!bh->buf)) 2718 goto error_release; 2719 } while (--i); 2720 bh->next = buffhds; 2721 2722 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2723 common->fsg_num_buffers = n; 2724 common->buffhds = buffhds; 2725 2726 return 0; 2727 2728 error_release: 2729 /* 2730 * "buf"s pointed to by heads after n - i are NULL 2731 * so releasing them won't hurt 2732 */ 2733 _fsg_common_free_buffers(buffhds, n); 2734 2735 return -ENOMEM; 2736 } 2737 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers); 2738 2739 void fsg_common_remove_lun(struct fsg_lun *lun) 2740 { 2741 if (device_is_registered(&lun->dev)) 2742 device_unregister(&lun->dev); 2743 fsg_lun_close(lun); 2744 kfree(lun); 2745 } 2746 EXPORT_SYMBOL_GPL(fsg_common_remove_lun); 2747 2748 static void _fsg_common_remove_luns(struct fsg_common *common, int n) 2749 { 2750 int i; 2751 2752 for (i = 0; i < n; ++i) 2753 if (common->luns[i]) { 2754 fsg_common_remove_lun(common->luns[i]); 2755 common->luns[i] = NULL; 2756 } 2757 } 2758 2759 void fsg_common_remove_luns(struct fsg_common *common) 2760 { 2761 _fsg_common_remove_luns(common, ARRAY_SIZE(common->luns)); 2762 } 2763 EXPORT_SYMBOL_GPL(fsg_common_remove_luns); 2764 2765 void fsg_common_set_ops(struct fsg_common *common, 2766 const struct fsg_operations *ops) 2767 { 2768 common->ops = ops; 2769 } 2770 EXPORT_SYMBOL_GPL(fsg_common_set_ops); 2771 2772 void fsg_common_free_buffers(struct fsg_common *common) 2773 { 2774 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2775 common->buffhds = NULL; 2776 } 2777 EXPORT_SYMBOL_GPL(fsg_common_free_buffers); 2778 2779 int fsg_common_set_cdev(struct fsg_common *common, 2780 struct usb_composite_dev *cdev, bool can_stall) 2781 { 2782 struct usb_string *us; 2783 2784 common->gadget = cdev->gadget; 2785 common->ep0 = cdev->gadget->ep0; 2786 common->ep0req = cdev->req; 2787 common->cdev = cdev; 2788 2789 us = usb_gstrings_attach(cdev, fsg_strings_array, 2790 ARRAY_SIZE(fsg_strings)); 2791 if (IS_ERR(us)) 2792 return PTR_ERR(us); 2793 2794 fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id; 2795 2796 /* 2797 * Some peripheral controllers are known not to be able to 2798 * halt bulk endpoints correctly. If one of them is present, 2799 * disable stalls. 2800 */ 2801 common->can_stall = can_stall && 2802 gadget_is_stall_supported(common->gadget); 2803 2804 return 0; 2805 } 2806 EXPORT_SYMBOL_GPL(fsg_common_set_cdev); 2807 2808 static struct attribute *fsg_lun_dev_attrs[] = { 2809 &dev_attr_ro.attr, 2810 &dev_attr_file.attr, 2811 &dev_attr_nofua.attr, 2812 NULL 2813 }; 2814 2815 static umode_t fsg_lun_dev_is_visible(struct kobject *kobj, 2816 struct attribute *attr, int idx) 2817 { 2818 struct device *dev = kobj_to_dev(kobj); 2819 struct fsg_lun *lun = fsg_lun_from_dev(dev); 2820 2821 if (attr == &dev_attr_ro.attr) 2822 return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO); 2823 if (attr == &dev_attr_file.attr) 2824 return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO; 2825 return attr->mode; 2826 } 2827 2828 static const struct attribute_group fsg_lun_dev_group = { 2829 .attrs = fsg_lun_dev_attrs, 2830 .is_visible = fsg_lun_dev_is_visible, 2831 }; 2832 2833 static const struct attribute_group *fsg_lun_dev_groups[] = { 2834 &fsg_lun_dev_group, 2835 NULL 2836 }; 2837 2838 int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, 2839 unsigned int id, const char *name, 2840 const char **name_pfx) 2841 { 2842 struct fsg_lun *lun; 2843 char *pathbuf, *p; 2844 int rc = -ENOMEM; 2845 2846 if (id >= ARRAY_SIZE(common->luns)) 2847 return -ENODEV; 2848 2849 if (common->luns[id]) 2850 return -EBUSY; 2851 2852 if (!cfg->filename && !cfg->removable) { 2853 pr_err("no file given for LUN%d\n", id); 2854 return -EINVAL; 2855 } 2856 2857 lun = kzalloc(sizeof(*lun), GFP_KERNEL); 2858 if (!lun) 2859 return -ENOMEM; 2860 2861 lun->name_pfx = name_pfx; 2862 2863 lun->cdrom = !!cfg->cdrom; 2864 lun->ro = cfg->cdrom || cfg->ro; 2865 lun->initially_ro = lun->ro; 2866 lun->removable = !!cfg->removable; 2867 2868 if (!common->sysfs) { 2869 /* we DON'T own the name!*/ 2870 lun->name = name; 2871 } else { 2872 lun->dev.release = fsg_lun_release; 2873 lun->dev.parent = &common->gadget->dev; 2874 lun->dev.groups = fsg_lun_dev_groups; 2875 dev_set_drvdata(&lun->dev, &common->filesem); 2876 dev_set_name(&lun->dev, "%s", name); 2877 lun->name = dev_name(&lun->dev); 2878 2879 rc = device_register(&lun->dev); 2880 if (rc) { 2881 pr_info("failed to register LUN%d: %d\n", id, rc); 2882 put_device(&lun->dev); 2883 goto error_sysfs; 2884 } 2885 } 2886 2887 common->luns[id] = lun; 2888 2889 if (cfg->filename) { 2890 rc = fsg_lun_open(lun, cfg->filename); 2891 if (rc) 2892 goto error_lun; 2893 } 2894 2895 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 2896 p = "(no medium)"; 2897 if (fsg_lun_is_open(lun)) { 2898 p = "(error)"; 2899 if (pathbuf) { 2900 p = file_path(lun->filp, pathbuf, PATH_MAX); 2901 if (IS_ERR(p)) 2902 p = "(error)"; 2903 } 2904 } 2905 pr_info("LUN: %s%s%sfile: %s\n", 2906 lun->removable ? "removable " : "", 2907 lun->ro ? "read only " : "", 2908 lun->cdrom ? "CD-ROM " : "", 2909 p); 2910 kfree(pathbuf); 2911 2912 return 0; 2913 2914 error_lun: 2915 if (device_is_registered(&lun->dev)) 2916 device_unregister(&lun->dev); 2917 fsg_lun_close(lun); 2918 common->luns[id] = NULL; 2919 error_sysfs: 2920 kfree(lun); 2921 return rc; 2922 } 2923 EXPORT_SYMBOL_GPL(fsg_common_create_lun); 2924 2925 int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg) 2926 { 2927 char buf[8]; /* enough for 100000000 different numbers, decimal */ 2928 int i, rc; 2929 2930 fsg_common_remove_luns(common); 2931 2932 for (i = 0; i < cfg->nluns; ++i) { 2933 snprintf(buf, sizeof(buf), "lun%d", i); 2934 rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL); 2935 if (rc) 2936 goto fail; 2937 } 2938 2939 pr_info("Number of LUNs=%d\n", cfg->nluns); 2940 2941 return 0; 2942 2943 fail: 2944 _fsg_common_remove_luns(common, i); 2945 return rc; 2946 } 2947 EXPORT_SYMBOL_GPL(fsg_common_create_luns); 2948 2949 void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, 2950 const char *pn) 2951 { 2952 int i; 2953 2954 /* Prepare inquiryString */ 2955 i = get_default_bcdDevice(); 2956 snprintf(common->inquiry_string, sizeof(common->inquiry_string), 2957 "%-8s%-16s%04x", vn ?: "Linux", 2958 /* Assume product name dependent on the first LUN */ 2959 pn ?: ((*common->luns)->cdrom 2960 ? "File-CD Gadget" 2961 : "File-Stor Gadget"), 2962 i); 2963 } 2964 EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string); 2965 2966 static void fsg_common_release(struct kref *ref) 2967 { 2968 struct fsg_common *common = container_of(ref, struct fsg_common, ref); 2969 int i; 2970 2971 /* If the thread isn't already dead, tell it to exit now */ 2972 if (common->state != FSG_STATE_TERMINATED) { 2973 raise_exception(common, FSG_STATE_EXIT); 2974 wait_for_completion(&common->thread_notifier); 2975 common->thread_task = NULL; 2976 } 2977 2978 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) { 2979 struct fsg_lun *lun = common->luns[i]; 2980 if (!lun) 2981 continue; 2982 fsg_lun_close(lun); 2983 if (device_is_registered(&lun->dev)) 2984 device_unregister(&lun->dev); 2985 kfree(lun); 2986 } 2987 2988 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2989 if (common->free_storage_on_release) 2990 kfree(common); 2991 } 2992 2993 2994 /*-------------------------------------------------------------------------*/ 2995 2996 static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 2997 { 2998 struct fsg_dev *fsg = fsg_from_func(f); 2999 struct fsg_common *common = fsg->common; 3000 struct usb_gadget *gadget = c->cdev->gadget; 3001 int i; 3002 struct usb_ep *ep; 3003 unsigned max_burst; 3004 int ret; 3005 struct fsg_opts *opts; 3006 3007 /* Don't allow to bind if we don't have at least one LUN */ 3008 ret = _fsg_common_get_max_lun(common); 3009 if (ret < 0) { 3010 pr_err("There should be at least one LUN.\n"); 3011 return -EINVAL; 3012 } 3013 3014 opts = fsg_opts_from_func_inst(f->fi); 3015 if (!opts->no_configfs) { 3016 ret = fsg_common_set_cdev(fsg->common, c->cdev, 3017 fsg->common->can_stall); 3018 if (ret) 3019 return ret; 3020 fsg_common_set_inquiry_string(fsg->common, NULL, NULL); 3021 } 3022 3023 if (!common->thread_task) { 3024 common->state = FSG_STATE_IDLE; 3025 common->thread_task = 3026 kthread_create(fsg_main_thread, common, "file-storage"); 3027 if (IS_ERR(common->thread_task)) { 3028 int ret = PTR_ERR(common->thread_task); 3029 common->thread_task = NULL; 3030 common->state = FSG_STATE_TERMINATED; 3031 return ret; 3032 } 3033 DBG(common, "I/O thread pid: %d\n", 3034 task_pid_nr(common->thread_task)); 3035 wake_up_process(common->thread_task); 3036 } 3037 3038 fsg->gadget = gadget; 3039 3040 /* New interface */ 3041 i = usb_interface_id(c, f); 3042 if (i < 0) 3043 goto fail; 3044 fsg_intf_desc.bInterfaceNumber = i; 3045 fsg->interface_number = i; 3046 3047 /* Find all the endpoints we will use */ 3048 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); 3049 if (!ep) 3050 goto autoconf_fail; 3051 fsg->bulk_in = ep; 3052 3053 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); 3054 if (!ep) 3055 goto autoconf_fail; 3056 fsg->bulk_out = ep; 3057 3058 /* Assume endpoint addresses are the same for both speeds */ 3059 fsg_hs_bulk_in_desc.bEndpointAddress = 3060 fsg_fs_bulk_in_desc.bEndpointAddress; 3061 fsg_hs_bulk_out_desc.bEndpointAddress = 3062 fsg_fs_bulk_out_desc.bEndpointAddress; 3063 3064 /* Calculate bMaxBurst, we know packet size is 1024 */ 3065 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15); 3066 3067 fsg_ss_bulk_in_desc.bEndpointAddress = 3068 fsg_fs_bulk_in_desc.bEndpointAddress; 3069 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; 3070 3071 fsg_ss_bulk_out_desc.bEndpointAddress = 3072 fsg_fs_bulk_out_desc.bEndpointAddress; 3073 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; 3074 3075 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function, 3076 fsg_ss_function, fsg_ss_function); 3077 if (ret) 3078 goto autoconf_fail; 3079 3080 return 0; 3081 3082 autoconf_fail: 3083 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 3084 i = -ENOTSUPP; 3085 fail: 3086 /* terminate the thread */ 3087 if (fsg->common->state != FSG_STATE_TERMINATED) { 3088 raise_exception(fsg->common, FSG_STATE_EXIT); 3089 wait_for_completion(&fsg->common->thread_notifier); 3090 } 3091 return i; 3092 } 3093 3094 /****************************** ALLOCATE FUNCTION *************************/ 3095 3096 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 3097 { 3098 struct fsg_dev *fsg = fsg_from_func(f); 3099 struct fsg_common *common = fsg->common; 3100 3101 DBG(fsg, "unbind\n"); 3102 if (fsg->common->fsg == fsg) { 3103 fsg->common->new_fsg = NULL; 3104 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 3105 /* FIXME: make interruptible or killable somehow? */ 3106 wait_event(common->fsg_wait, common->fsg != fsg); 3107 } 3108 3109 usb_free_all_descriptors(&fsg->function); 3110 } 3111 3112 static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item) 3113 { 3114 return container_of(to_config_group(item), struct fsg_lun_opts, group); 3115 } 3116 3117 static inline struct fsg_opts *to_fsg_opts(struct config_item *item) 3118 { 3119 return container_of(to_config_group(item), struct fsg_opts, 3120 func_inst.group); 3121 } 3122 3123 static void fsg_lun_attr_release(struct config_item *item) 3124 { 3125 struct fsg_lun_opts *lun_opts; 3126 3127 lun_opts = to_fsg_lun_opts(item); 3128 kfree(lun_opts); 3129 } 3130 3131 static struct configfs_item_operations fsg_lun_item_ops = { 3132 .release = fsg_lun_attr_release, 3133 }; 3134 3135 static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page) 3136 { 3137 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3138 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3139 3140 return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page); 3141 } 3142 3143 static ssize_t fsg_lun_opts_file_store(struct config_item *item, 3144 const char *page, size_t len) 3145 { 3146 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3147 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3148 3149 return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len); 3150 } 3151 3152 CONFIGFS_ATTR(fsg_lun_opts_, file); 3153 3154 static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page) 3155 { 3156 return fsg_show_ro(to_fsg_lun_opts(item)->lun, page); 3157 } 3158 3159 static ssize_t fsg_lun_opts_ro_store(struct config_item *item, 3160 const char *page, size_t len) 3161 { 3162 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3163 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3164 3165 return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len); 3166 } 3167 3168 CONFIGFS_ATTR(fsg_lun_opts_, ro); 3169 3170 static ssize_t fsg_lun_opts_removable_show(struct config_item *item, 3171 char *page) 3172 { 3173 return fsg_show_removable(to_fsg_lun_opts(item)->lun, page); 3174 } 3175 3176 static ssize_t fsg_lun_opts_removable_store(struct config_item *item, 3177 const char *page, size_t len) 3178 { 3179 return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len); 3180 } 3181 3182 CONFIGFS_ATTR(fsg_lun_opts_, removable); 3183 3184 static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page) 3185 { 3186 return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page); 3187 } 3188 3189 static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item, 3190 const char *page, size_t len) 3191 { 3192 struct fsg_lun_opts *opts = to_fsg_lun_opts(item); 3193 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); 3194 3195 return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page, 3196 len); 3197 } 3198 3199 CONFIGFS_ATTR(fsg_lun_opts_, cdrom); 3200 3201 static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page) 3202 { 3203 return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page); 3204 } 3205 3206 static ssize_t fsg_lun_opts_nofua_store(struct config_item *item, 3207 const char *page, size_t len) 3208 { 3209 return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len); 3210 } 3211 3212 CONFIGFS_ATTR(fsg_lun_opts_, nofua); 3213 3214 static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item, 3215 char *page) 3216 { 3217 return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page); 3218 } 3219 3220 static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item, 3221 const char *page, size_t len) 3222 { 3223 return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len); 3224 } 3225 3226 CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string); 3227 3228 static struct configfs_attribute *fsg_lun_attrs[] = { 3229 &fsg_lun_opts_attr_file, 3230 &fsg_lun_opts_attr_ro, 3231 &fsg_lun_opts_attr_removable, 3232 &fsg_lun_opts_attr_cdrom, 3233 &fsg_lun_opts_attr_nofua, 3234 &fsg_lun_opts_attr_inquiry_string, 3235 NULL, 3236 }; 3237 3238 static struct config_item_type fsg_lun_type = { 3239 .ct_item_ops = &fsg_lun_item_ops, 3240 .ct_attrs = fsg_lun_attrs, 3241 .ct_owner = THIS_MODULE, 3242 }; 3243 3244 static struct config_group *fsg_lun_make(struct config_group *group, 3245 const char *name) 3246 { 3247 struct fsg_lun_opts *opts; 3248 struct fsg_opts *fsg_opts; 3249 struct fsg_lun_config config; 3250 char *num_str; 3251 u8 num; 3252 int ret; 3253 3254 num_str = strchr(name, '.'); 3255 if (!num_str) { 3256 pr_err("Unable to locate . in LUN.NUMBER\n"); 3257 return ERR_PTR(-EINVAL); 3258 } 3259 num_str++; 3260 3261 ret = kstrtou8(num_str, 0, &num); 3262 if (ret) 3263 return ERR_PTR(ret); 3264 3265 fsg_opts = to_fsg_opts(&group->cg_item); 3266 if (num >= FSG_MAX_LUNS) 3267 return ERR_PTR(-ERANGE); 3268 3269 mutex_lock(&fsg_opts->lock); 3270 if (fsg_opts->refcnt || fsg_opts->common->luns[num]) { 3271 ret = -EBUSY; 3272 goto out; 3273 } 3274 3275 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 3276 if (!opts) { 3277 ret = -ENOMEM; 3278 goto out; 3279 } 3280 3281 memset(&config, 0, sizeof(config)); 3282 config.removable = true; 3283 3284 ret = fsg_common_create_lun(fsg_opts->common, &config, num, name, 3285 (const char **)&group->cg_item.ci_name); 3286 if (ret) { 3287 kfree(opts); 3288 goto out; 3289 } 3290 opts->lun = fsg_opts->common->luns[num]; 3291 opts->lun_id = num; 3292 mutex_unlock(&fsg_opts->lock); 3293 3294 config_group_init_type_name(&opts->group, name, &fsg_lun_type); 3295 3296 return &opts->group; 3297 out: 3298 mutex_unlock(&fsg_opts->lock); 3299 return ERR_PTR(ret); 3300 } 3301 3302 static void fsg_lun_drop(struct config_group *group, struct config_item *item) 3303 { 3304 struct fsg_lun_opts *lun_opts; 3305 struct fsg_opts *fsg_opts; 3306 3307 lun_opts = to_fsg_lun_opts(item); 3308 fsg_opts = to_fsg_opts(&group->cg_item); 3309 3310 mutex_lock(&fsg_opts->lock); 3311 if (fsg_opts->refcnt) { 3312 struct config_item *gadget; 3313 3314 gadget = group->cg_item.ci_parent->ci_parent; 3315 unregister_gadget_item(gadget); 3316 } 3317 3318 fsg_common_remove_lun(lun_opts->lun); 3319 fsg_opts->common->luns[lun_opts->lun_id] = NULL; 3320 lun_opts->lun_id = 0; 3321 mutex_unlock(&fsg_opts->lock); 3322 3323 config_item_put(item); 3324 } 3325 3326 static void fsg_attr_release(struct config_item *item) 3327 { 3328 struct fsg_opts *opts = to_fsg_opts(item); 3329 3330 usb_put_function_instance(&opts->func_inst); 3331 } 3332 3333 static struct configfs_item_operations fsg_item_ops = { 3334 .release = fsg_attr_release, 3335 }; 3336 3337 static ssize_t fsg_opts_stall_show(struct config_item *item, char *page) 3338 { 3339 struct fsg_opts *opts = to_fsg_opts(item); 3340 int result; 3341 3342 mutex_lock(&opts->lock); 3343 result = sprintf(page, "%d", opts->common->can_stall); 3344 mutex_unlock(&opts->lock); 3345 3346 return result; 3347 } 3348 3349 static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page, 3350 size_t len) 3351 { 3352 struct fsg_opts *opts = to_fsg_opts(item); 3353 int ret; 3354 bool stall; 3355 3356 mutex_lock(&opts->lock); 3357 3358 if (opts->refcnt) { 3359 mutex_unlock(&opts->lock); 3360 return -EBUSY; 3361 } 3362 3363 ret = strtobool(page, &stall); 3364 if (!ret) { 3365 opts->common->can_stall = stall; 3366 ret = len; 3367 } 3368 3369 mutex_unlock(&opts->lock); 3370 3371 return ret; 3372 } 3373 3374 CONFIGFS_ATTR(fsg_opts_, stall); 3375 3376 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 3377 static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page) 3378 { 3379 struct fsg_opts *opts = to_fsg_opts(item); 3380 int result; 3381 3382 mutex_lock(&opts->lock); 3383 result = sprintf(page, "%d", opts->common->fsg_num_buffers); 3384 mutex_unlock(&opts->lock); 3385 3386 return result; 3387 } 3388 3389 static ssize_t fsg_opts_num_buffers_store(struct config_item *item, 3390 const char *page, size_t len) 3391 { 3392 struct fsg_opts *opts = to_fsg_opts(item); 3393 int ret; 3394 u8 num; 3395 3396 mutex_lock(&opts->lock); 3397 if (opts->refcnt) { 3398 ret = -EBUSY; 3399 goto end; 3400 } 3401 ret = kstrtou8(page, 0, &num); 3402 if (ret) 3403 goto end; 3404 3405 fsg_common_set_num_buffers(opts->common, num); 3406 ret = len; 3407 3408 end: 3409 mutex_unlock(&opts->lock); 3410 return ret; 3411 } 3412 3413 CONFIGFS_ATTR(fsg_opts_, num_buffers); 3414 #endif 3415 3416 static struct configfs_attribute *fsg_attrs[] = { 3417 &fsg_opts_attr_stall, 3418 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 3419 &fsg_opts_attr_num_buffers, 3420 #endif 3421 NULL, 3422 }; 3423 3424 static struct configfs_group_operations fsg_group_ops = { 3425 .make_group = fsg_lun_make, 3426 .drop_item = fsg_lun_drop, 3427 }; 3428 3429 static struct config_item_type fsg_func_type = { 3430 .ct_item_ops = &fsg_item_ops, 3431 .ct_group_ops = &fsg_group_ops, 3432 .ct_attrs = fsg_attrs, 3433 .ct_owner = THIS_MODULE, 3434 }; 3435 3436 static void fsg_free_inst(struct usb_function_instance *fi) 3437 { 3438 struct fsg_opts *opts; 3439 3440 opts = fsg_opts_from_func_inst(fi); 3441 fsg_common_put(opts->common); 3442 kfree(opts); 3443 } 3444 3445 static struct usb_function_instance *fsg_alloc_inst(void) 3446 { 3447 struct fsg_opts *opts; 3448 struct fsg_lun_config config; 3449 int rc; 3450 3451 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 3452 if (!opts) 3453 return ERR_PTR(-ENOMEM); 3454 mutex_init(&opts->lock); 3455 opts->func_inst.free_func_inst = fsg_free_inst; 3456 opts->common = fsg_common_setup(opts->common); 3457 if (IS_ERR(opts->common)) { 3458 rc = PTR_ERR(opts->common); 3459 goto release_opts; 3460 } 3461 3462 rc = fsg_common_set_num_buffers(opts->common, 3463 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS); 3464 if (rc) 3465 goto release_opts; 3466 3467 pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); 3468 3469 memset(&config, 0, sizeof(config)); 3470 config.removable = true; 3471 rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0", 3472 (const char **)&opts->func_inst.group.cg_item.ci_name); 3473 if (rc) 3474 goto release_buffers; 3475 3476 opts->lun0.lun = opts->common->luns[0]; 3477 opts->lun0.lun_id = 0; 3478 3479 config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type); 3480 3481 config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type); 3482 configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group); 3483 3484 return &opts->func_inst; 3485 3486 release_buffers: 3487 fsg_common_free_buffers(opts->common); 3488 release_opts: 3489 kfree(opts); 3490 return ERR_PTR(rc); 3491 } 3492 3493 static void fsg_free(struct usb_function *f) 3494 { 3495 struct fsg_dev *fsg; 3496 struct fsg_opts *opts; 3497 3498 fsg = container_of(f, struct fsg_dev, function); 3499 opts = container_of(f->fi, struct fsg_opts, func_inst); 3500 3501 mutex_lock(&opts->lock); 3502 opts->refcnt--; 3503 mutex_unlock(&opts->lock); 3504 3505 kfree(fsg); 3506 } 3507 3508 static struct usb_function *fsg_alloc(struct usb_function_instance *fi) 3509 { 3510 struct fsg_opts *opts = fsg_opts_from_func_inst(fi); 3511 struct fsg_common *common = opts->common; 3512 struct fsg_dev *fsg; 3513 3514 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); 3515 if (unlikely(!fsg)) 3516 return ERR_PTR(-ENOMEM); 3517 3518 mutex_lock(&opts->lock); 3519 opts->refcnt++; 3520 mutex_unlock(&opts->lock); 3521 3522 fsg->function.name = FSG_DRIVER_DESC; 3523 fsg->function.bind = fsg_bind; 3524 fsg->function.unbind = fsg_unbind; 3525 fsg->function.setup = fsg_setup; 3526 fsg->function.set_alt = fsg_set_alt; 3527 fsg->function.disable = fsg_disable; 3528 fsg->function.free_func = fsg_free; 3529 3530 fsg->common = common; 3531 3532 return &fsg->function; 3533 } 3534 3535 DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc); 3536 MODULE_LICENSE("GPL"); 3537 MODULE_AUTHOR("Michal Nazarewicz"); 3538 3539 /************************* Module parameters *************************/ 3540 3541 3542 void fsg_config_from_params(struct fsg_config *cfg, 3543 const struct fsg_module_parameters *params, 3544 unsigned int fsg_num_buffers) 3545 { 3546 struct fsg_lun_config *lun; 3547 unsigned i; 3548 3549 /* Configure LUNs */ 3550 cfg->nluns = 3551 min(params->luns ?: (params->file_count ?: 1u), 3552 (unsigned)FSG_MAX_LUNS); 3553 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) { 3554 lun->ro = !!params->ro[i]; 3555 lun->cdrom = !!params->cdrom[i]; 3556 lun->removable = !!params->removable[i]; 3557 lun->filename = 3558 params->file_count > i && params->file[i][0] 3559 ? params->file[i] 3560 : NULL; 3561 } 3562 3563 /* Let MSF use defaults */ 3564 cfg->vendor_name = NULL; 3565 cfg->product_name = NULL; 3566 3567 cfg->ops = NULL; 3568 cfg->private_data = NULL; 3569 3570 /* Finalise */ 3571 cfg->can_stall = params->stall; 3572 cfg->fsg_num_buffers = fsg_num_buffers; 3573 } 3574 EXPORT_SYMBOL_GPL(fsg_config_from_params); 3575