1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/fs.h> 38 #include <linux/kernel.h> 39 #include <linux/mm.h> 40 #include <linux/bio.h> 41 #include <linux/genhd.h> 42 #include <linux/hdreg.h> 43 #include <linux/errno.h> 44 #include <linux/idr.h> 45 #include <linux/interrupt.h> 46 #include <linux/init.h> 47 #include <linux/blkdev.h> 48 #include <linux/blkpg.h> 49 #include <linux/blk-pm.h> 50 #include <linux/delay.h> 51 #include <linux/major.h> 52 #include <linux/mutex.h> 53 #include <linux/string_helpers.h> 54 #include <linux/slab.h> 55 #include <linux/sed-opal.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/pr.h> 58 #include <linux/t10-pi.h> 59 #include <linux/uaccess.h> 60 #include <asm/unaligned.h> 61 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_dbg.h> 65 #include <scsi/scsi_device.h> 66 #include <scsi/scsi_driver.h> 67 #include <scsi/scsi_eh.h> 68 #include <scsi/scsi_host.h> 69 #include <scsi/scsi_ioctl.h> 70 #include <scsi/scsicam.h> 71 72 #include "sd.h" 73 #include "scsi_priv.h" 74 #include "scsi_logging.h" 75 76 MODULE_AUTHOR("Eric Youngdale"); 77 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 78 MODULE_LICENSE("GPL"); 79 80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 100 101 #define SD_MINORS 16 102 103 static void sd_config_discard(struct scsi_disk *, unsigned int); 104 static void sd_config_write_same(struct scsi_disk *); 105 static int sd_revalidate_disk(struct gendisk *); 106 static void sd_unlock_native_capacity(struct gendisk *disk); 107 static int sd_probe(struct device *); 108 static int sd_remove(struct device *); 109 static void sd_shutdown(struct device *); 110 static int sd_suspend_system(struct device *); 111 static int sd_suspend_runtime(struct device *); 112 static int sd_resume_system(struct device *); 113 static int sd_resume_runtime(struct device *); 114 static void sd_rescan(struct device *); 115 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); 116 static void sd_uninit_command(struct scsi_cmnd *SCpnt); 117 static int sd_done(struct scsi_cmnd *); 118 static void sd_eh_reset(struct scsi_cmnd *); 119 static int sd_eh_action(struct scsi_cmnd *, int); 120 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 121 static void scsi_disk_release(struct device *cdev); 122 123 static DEFINE_IDA(sd_index_ida); 124 125 /* This semaphore is used to mediate the 0->1 reference get in the 126 * face of object destruction (i.e. we can't allow a get on an 127 * object after last put) */ 128 static DEFINE_MUTEX(sd_ref_mutex); 129 130 static struct kmem_cache *sd_cdb_cache; 131 static mempool_t *sd_cdb_pool; 132 static mempool_t *sd_page_pool; 133 static struct lock_class_key sd_bio_compl_lkclass; 134 135 static const char *sd_cache_types[] = { 136 "write through", "none", "write back", 137 "write back, no read (daft)" 138 }; 139 140 static void sd_set_flush_flag(struct scsi_disk *sdkp) 141 { 142 bool wc = false, fua = false; 143 144 if (sdkp->WCE) { 145 wc = true; 146 if (sdkp->DPOFUA) 147 fua = true; 148 } 149 150 blk_queue_write_cache(sdkp->disk->queue, wc, fua); 151 } 152 153 static ssize_t 154 cache_type_store(struct device *dev, struct device_attribute *attr, 155 const char *buf, size_t count) 156 { 157 int ct, rcd, wce, sp; 158 struct scsi_disk *sdkp = to_scsi_disk(dev); 159 struct scsi_device *sdp = sdkp->device; 160 char buffer[64]; 161 char *buffer_data; 162 struct scsi_mode_data data; 163 struct scsi_sense_hdr sshdr; 164 static const char temp[] = "temporary "; 165 int len; 166 167 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 168 /* no cache control on RBC devices; theoretically they 169 * can do it, but there's probably so many exceptions 170 * it's not worth the risk */ 171 return -EINVAL; 172 173 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 174 buf += sizeof(temp) - 1; 175 sdkp->cache_override = 1; 176 } else { 177 sdkp->cache_override = 0; 178 } 179 180 ct = sysfs_match_string(sd_cache_types, buf); 181 if (ct < 0) 182 return -EINVAL; 183 184 rcd = ct & 0x01 ? 1 : 0; 185 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 186 187 if (sdkp->cache_override) { 188 sdkp->WCE = wce; 189 sdkp->RCD = rcd; 190 sd_set_flush_flag(sdkp); 191 return count; 192 } 193 194 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 195 sdkp->max_retries, &data, NULL)) 196 return -EINVAL; 197 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 198 data.block_descriptor_length); 199 buffer_data = buffer + data.header_length + 200 data.block_descriptor_length; 201 buffer_data[2] &= ~0x05; 202 buffer_data[2] |= wce << 2 | rcd; 203 sp = buffer_data[0] & 0x80 ? 1 : 0; 204 buffer_data[0] &= ~0x80; 205 206 /* 207 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 208 * received mode parameter buffer before doing MODE SELECT. 209 */ 210 data.device_specific = 0; 211 212 if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, 213 sdkp->max_retries, &data, &sshdr)) { 214 if (scsi_sense_valid(&sshdr)) 215 sd_print_sense_hdr(sdkp, &sshdr); 216 return -EINVAL; 217 } 218 sd_revalidate_disk(sdkp->disk); 219 return count; 220 } 221 222 static ssize_t 223 manage_start_stop_show(struct device *dev, struct device_attribute *attr, 224 char *buf) 225 { 226 struct scsi_disk *sdkp = to_scsi_disk(dev); 227 struct scsi_device *sdp = sdkp->device; 228 229 return sprintf(buf, "%u\n", sdp->manage_start_stop); 230 } 231 232 static ssize_t 233 manage_start_stop_store(struct device *dev, struct device_attribute *attr, 234 const char *buf, size_t count) 235 { 236 struct scsi_disk *sdkp = to_scsi_disk(dev); 237 struct scsi_device *sdp = sdkp->device; 238 bool v; 239 240 if (!capable(CAP_SYS_ADMIN)) 241 return -EACCES; 242 243 if (kstrtobool(buf, &v)) 244 return -EINVAL; 245 246 sdp->manage_start_stop = v; 247 248 return count; 249 } 250 static DEVICE_ATTR_RW(manage_start_stop); 251 252 static ssize_t 253 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 254 { 255 struct scsi_disk *sdkp = to_scsi_disk(dev); 256 257 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 258 } 259 260 static ssize_t 261 allow_restart_store(struct device *dev, struct device_attribute *attr, 262 const char *buf, size_t count) 263 { 264 bool v; 265 struct scsi_disk *sdkp = to_scsi_disk(dev); 266 struct scsi_device *sdp = sdkp->device; 267 268 if (!capable(CAP_SYS_ADMIN)) 269 return -EACCES; 270 271 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 272 return -EINVAL; 273 274 if (kstrtobool(buf, &v)) 275 return -EINVAL; 276 277 sdp->allow_restart = v; 278 279 return count; 280 } 281 static DEVICE_ATTR_RW(allow_restart); 282 283 static ssize_t 284 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 285 { 286 struct scsi_disk *sdkp = to_scsi_disk(dev); 287 int ct = sdkp->RCD + 2*sdkp->WCE; 288 289 return sprintf(buf, "%s\n", sd_cache_types[ct]); 290 } 291 static DEVICE_ATTR_RW(cache_type); 292 293 static ssize_t 294 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 295 { 296 struct scsi_disk *sdkp = to_scsi_disk(dev); 297 298 return sprintf(buf, "%u\n", sdkp->DPOFUA); 299 } 300 static DEVICE_ATTR_RO(FUA); 301 302 static ssize_t 303 protection_type_show(struct device *dev, struct device_attribute *attr, 304 char *buf) 305 { 306 struct scsi_disk *sdkp = to_scsi_disk(dev); 307 308 return sprintf(buf, "%u\n", sdkp->protection_type); 309 } 310 311 static ssize_t 312 protection_type_store(struct device *dev, struct device_attribute *attr, 313 const char *buf, size_t count) 314 { 315 struct scsi_disk *sdkp = to_scsi_disk(dev); 316 unsigned int val; 317 int err; 318 319 if (!capable(CAP_SYS_ADMIN)) 320 return -EACCES; 321 322 err = kstrtouint(buf, 10, &val); 323 324 if (err) 325 return err; 326 327 if (val <= T10_PI_TYPE3_PROTECTION) 328 sdkp->protection_type = val; 329 330 return count; 331 } 332 static DEVICE_ATTR_RW(protection_type); 333 334 static ssize_t 335 protection_mode_show(struct device *dev, struct device_attribute *attr, 336 char *buf) 337 { 338 struct scsi_disk *sdkp = to_scsi_disk(dev); 339 struct scsi_device *sdp = sdkp->device; 340 unsigned int dif, dix; 341 342 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 343 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 344 345 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 346 dif = 0; 347 dix = 1; 348 } 349 350 if (!dif && !dix) 351 return sprintf(buf, "none\n"); 352 353 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 354 } 355 static DEVICE_ATTR_RO(protection_mode); 356 357 static ssize_t 358 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 359 { 360 struct scsi_disk *sdkp = to_scsi_disk(dev); 361 362 return sprintf(buf, "%u\n", sdkp->ATO); 363 } 364 static DEVICE_ATTR_RO(app_tag_own); 365 366 static ssize_t 367 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 368 char *buf) 369 { 370 struct scsi_disk *sdkp = to_scsi_disk(dev); 371 372 return sprintf(buf, "%u\n", sdkp->lbpme); 373 } 374 static DEVICE_ATTR_RO(thin_provisioning); 375 376 /* sysfs_match_string() requires dense arrays */ 377 static const char *lbp_mode[] = { 378 [SD_LBP_FULL] = "full", 379 [SD_LBP_UNMAP] = "unmap", 380 [SD_LBP_WS16] = "writesame_16", 381 [SD_LBP_WS10] = "writesame_10", 382 [SD_LBP_ZERO] = "writesame_zero", 383 [SD_LBP_DISABLE] = "disabled", 384 }; 385 386 static ssize_t 387 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 388 char *buf) 389 { 390 struct scsi_disk *sdkp = to_scsi_disk(dev); 391 392 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 393 } 394 395 static ssize_t 396 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 397 const char *buf, size_t count) 398 { 399 struct scsi_disk *sdkp = to_scsi_disk(dev); 400 struct scsi_device *sdp = sdkp->device; 401 int mode; 402 403 if (!capable(CAP_SYS_ADMIN)) 404 return -EACCES; 405 406 if (sd_is_zoned(sdkp)) { 407 sd_config_discard(sdkp, SD_LBP_DISABLE); 408 return count; 409 } 410 411 if (sdp->type != TYPE_DISK) 412 return -EINVAL; 413 414 mode = sysfs_match_string(lbp_mode, buf); 415 if (mode < 0) 416 return -EINVAL; 417 418 sd_config_discard(sdkp, mode); 419 420 return count; 421 } 422 static DEVICE_ATTR_RW(provisioning_mode); 423 424 /* sysfs_match_string() requires dense arrays */ 425 static const char *zeroing_mode[] = { 426 [SD_ZERO_WRITE] = "write", 427 [SD_ZERO_WS] = "writesame", 428 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 429 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 430 }; 431 432 static ssize_t 433 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 434 char *buf) 435 { 436 struct scsi_disk *sdkp = to_scsi_disk(dev); 437 438 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 439 } 440 441 static ssize_t 442 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 443 const char *buf, size_t count) 444 { 445 struct scsi_disk *sdkp = to_scsi_disk(dev); 446 int mode; 447 448 if (!capable(CAP_SYS_ADMIN)) 449 return -EACCES; 450 451 mode = sysfs_match_string(zeroing_mode, buf); 452 if (mode < 0) 453 return -EINVAL; 454 455 sdkp->zeroing_mode = mode; 456 457 return count; 458 } 459 static DEVICE_ATTR_RW(zeroing_mode); 460 461 static ssize_t 462 max_medium_access_timeouts_show(struct device *dev, 463 struct device_attribute *attr, char *buf) 464 { 465 struct scsi_disk *sdkp = to_scsi_disk(dev); 466 467 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 468 } 469 470 static ssize_t 471 max_medium_access_timeouts_store(struct device *dev, 472 struct device_attribute *attr, const char *buf, 473 size_t count) 474 { 475 struct scsi_disk *sdkp = to_scsi_disk(dev); 476 int err; 477 478 if (!capable(CAP_SYS_ADMIN)) 479 return -EACCES; 480 481 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 482 483 return err ? err : count; 484 } 485 static DEVICE_ATTR_RW(max_medium_access_timeouts); 486 487 static ssize_t 488 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 489 char *buf) 490 { 491 struct scsi_disk *sdkp = to_scsi_disk(dev); 492 493 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 494 } 495 496 static ssize_t 497 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 498 const char *buf, size_t count) 499 { 500 struct scsi_disk *sdkp = to_scsi_disk(dev); 501 struct scsi_device *sdp = sdkp->device; 502 unsigned long max; 503 int err; 504 505 if (!capable(CAP_SYS_ADMIN)) 506 return -EACCES; 507 508 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 509 return -EINVAL; 510 511 err = kstrtoul(buf, 10, &max); 512 513 if (err) 514 return err; 515 516 if (max == 0) 517 sdp->no_write_same = 1; 518 else if (max <= SD_MAX_WS16_BLOCKS) { 519 sdp->no_write_same = 0; 520 sdkp->max_ws_blocks = max; 521 } 522 523 sd_config_write_same(sdkp); 524 525 return count; 526 } 527 static DEVICE_ATTR_RW(max_write_same_blocks); 528 529 static ssize_t 530 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) 531 { 532 struct scsi_disk *sdkp = to_scsi_disk(dev); 533 534 if (sdkp->device->type == TYPE_ZBC) 535 return sprintf(buf, "host-managed\n"); 536 if (sdkp->zoned == 1) 537 return sprintf(buf, "host-aware\n"); 538 if (sdkp->zoned == 2) 539 return sprintf(buf, "drive-managed\n"); 540 return sprintf(buf, "none\n"); 541 } 542 static DEVICE_ATTR_RO(zoned_cap); 543 544 static ssize_t 545 max_retries_store(struct device *dev, struct device_attribute *attr, 546 const char *buf, size_t count) 547 { 548 struct scsi_disk *sdkp = to_scsi_disk(dev); 549 struct scsi_device *sdev = sdkp->device; 550 int retries, err; 551 552 err = kstrtoint(buf, 10, &retries); 553 if (err) 554 return err; 555 556 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { 557 sdkp->max_retries = retries; 558 return count; 559 } 560 561 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", 562 SD_MAX_RETRIES); 563 return -EINVAL; 564 } 565 566 static ssize_t 567 max_retries_show(struct device *dev, struct device_attribute *attr, 568 char *buf) 569 { 570 struct scsi_disk *sdkp = to_scsi_disk(dev); 571 572 return sprintf(buf, "%d\n", sdkp->max_retries); 573 } 574 575 static DEVICE_ATTR_RW(max_retries); 576 577 static struct attribute *sd_disk_attrs[] = { 578 &dev_attr_cache_type.attr, 579 &dev_attr_FUA.attr, 580 &dev_attr_allow_restart.attr, 581 &dev_attr_manage_start_stop.attr, 582 &dev_attr_protection_type.attr, 583 &dev_attr_protection_mode.attr, 584 &dev_attr_app_tag_own.attr, 585 &dev_attr_thin_provisioning.attr, 586 &dev_attr_provisioning_mode.attr, 587 &dev_attr_zeroing_mode.attr, 588 &dev_attr_max_write_same_blocks.attr, 589 &dev_attr_max_medium_access_timeouts.attr, 590 &dev_attr_zoned_cap.attr, 591 &dev_attr_max_retries.attr, 592 NULL, 593 }; 594 ATTRIBUTE_GROUPS(sd_disk); 595 596 static struct class sd_disk_class = { 597 .name = "scsi_disk", 598 .owner = THIS_MODULE, 599 .dev_release = scsi_disk_release, 600 .dev_groups = sd_disk_groups, 601 }; 602 603 static const struct dev_pm_ops sd_pm_ops = { 604 .suspend = sd_suspend_system, 605 .resume = sd_resume_system, 606 .poweroff = sd_suspend_system, 607 .restore = sd_resume_system, 608 .runtime_suspend = sd_suspend_runtime, 609 .runtime_resume = sd_resume_runtime, 610 }; 611 612 static struct scsi_driver sd_template = { 613 .gendrv = { 614 .name = "sd", 615 .owner = THIS_MODULE, 616 .probe = sd_probe, 617 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 618 .remove = sd_remove, 619 .shutdown = sd_shutdown, 620 .pm = &sd_pm_ops, 621 }, 622 .rescan = sd_rescan, 623 .init_command = sd_init_command, 624 .uninit_command = sd_uninit_command, 625 .done = sd_done, 626 .eh_action = sd_eh_action, 627 .eh_reset = sd_eh_reset, 628 }; 629 630 /* 631 * Don't request a new module, as that could deadlock in multipath 632 * environment. 633 */ 634 static void sd_default_probe(dev_t devt) 635 { 636 } 637 638 /* 639 * Device no to disk mapping: 640 * 641 * major disc2 disc p1 642 * |............|.............|....|....| <- dev_t 643 * 31 20 19 8 7 4 3 0 644 * 645 * Inside a major, we have 16k disks, however mapped non- 646 * contiguously. The first 16 disks are for major0, the next 647 * ones with major1, ... Disk 256 is for major0 again, disk 272 648 * for major1, ... 649 * As we stay compatible with our numbering scheme, we can reuse 650 * the well-know SCSI majors 8, 65--71, 136--143. 651 */ 652 static int sd_major(int major_idx) 653 { 654 switch (major_idx) { 655 case 0: 656 return SCSI_DISK0_MAJOR; 657 case 1 ... 7: 658 return SCSI_DISK1_MAJOR + major_idx - 1; 659 case 8 ... 15: 660 return SCSI_DISK8_MAJOR + major_idx - 8; 661 default: 662 BUG(); 663 return 0; /* shut up gcc */ 664 } 665 } 666 667 static struct scsi_disk *scsi_disk_get(struct gendisk *disk) 668 { 669 struct scsi_disk *sdkp = NULL; 670 671 mutex_lock(&sd_ref_mutex); 672 673 if (disk->private_data) { 674 sdkp = scsi_disk(disk); 675 if (scsi_device_get(sdkp->device) == 0) 676 get_device(&sdkp->dev); 677 else 678 sdkp = NULL; 679 } 680 mutex_unlock(&sd_ref_mutex); 681 return sdkp; 682 } 683 684 static void scsi_disk_put(struct scsi_disk *sdkp) 685 { 686 struct scsi_device *sdev = sdkp->device; 687 688 mutex_lock(&sd_ref_mutex); 689 put_device(&sdkp->dev); 690 scsi_device_put(sdev); 691 mutex_unlock(&sd_ref_mutex); 692 } 693 694 #ifdef CONFIG_BLK_SED_OPAL 695 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 696 size_t len, bool send) 697 { 698 struct scsi_disk *sdkp = data; 699 struct scsi_device *sdev = sdkp->device; 700 u8 cdb[12] = { 0, }; 701 int ret; 702 703 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 704 cdb[1] = secp; 705 put_unaligned_be16(spsp, &cdb[2]); 706 put_unaligned_be32(len, &cdb[6]); 707 708 ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 709 buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0, 710 RQF_PM, NULL); 711 return ret <= 0 ? ret : -EIO; 712 } 713 #endif /* CONFIG_BLK_SED_OPAL */ 714 715 /* 716 * Look up the DIX operation based on whether the command is read or 717 * write and whether dix and dif are enabled. 718 */ 719 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 720 { 721 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 722 static const unsigned int ops[] = { /* wrt dix dif */ 723 SCSI_PROT_NORMAL, /* 0 0 0 */ 724 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 725 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 726 SCSI_PROT_READ_PASS, /* 0 1 1 */ 727 SCSI_PROT_NORMAL, /* 1 0 0 */ 728 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 729 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 730 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 731 }; 732 733 return ops[write << 2 | dix << 1 | dif]; 734 } 735 736 /* 737 * Returns a mask of the protection flags that are valid for a given DIX 738 * operation. 739 */ 740 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 741 { 742 static const unsigned int flag_mask[] = { 743 [SCSI_PROT_NORMAL] = 0, 744 745 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 746 SCSI_PROT_GUARD_CHECK | 747 SCSI_PROT_REF_CHECK | 748 SCSI_PROT_REF_INCREMENT, 749 750 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 751 SCSI_PROT_IP_CHECKSUM, 752 753 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 754 SCSI_PROT_GUARD_CHECK | 755 SCSI_PROT_REF_CHECK | 756 SCSI_PROT_REF_INCREMENT | 757 SCSI_PROT_IP_CHECKSUM, 758 759 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 760 SCSI_PROT_REF_INCREMENT, 761 762 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 763 SCSI_PROT_REF_CHECK | 764 SCSI_PROT_REF_INCREMENT | 765 SCSI_PROT_IP_CHECKSUM, 766 767 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 768 SCSI_PROT_GUARD_CHECK | 769 SCSI_PROT_REF_CHECK | 770 SCSI_PROT_REF_INCREMENT | 771 SCSI_PROT_IP_CHECKSUM, 772 }; 773 774 return flag_mask[prot_op]; 775 } 776 777 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 778 unsigned int dix, unsigned int dif) 779 { 780 struct request *rq = scsi_cmd_to_rq(scmd); 781 struct bio *bio = rq->bio; 782 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); 783 unsigned int protect = 0; 784 785 if (dix) { /* DIX Type 0, 1, 2, 3 */ 786 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 787 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 788 789 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 790 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 791 } 792 793 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 794 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 795 796 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 797 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 798 } 799 800 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 801 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 802 803 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 804 protect = 3 << 5; /* Disable target PI checking */ 805 else 806 protect = 1 << 5; /* Enable target PI checking */ 807 } 808 809 scsi_set_prot_op(scmd, prot_op); 810 scsi_set_prot_type(scmd, dif); 811 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 812 813 return protect; 814 } 815 816 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) 817 { 818 struct request_queue *q = sdkp->disk->queue; 819 unsigned int logical_block_size = sdkp->device->sector_size; 820 unsigned int max_blocks = 0; 821 822 q->limits.discard_alignment = 823 sdkp->unmap_alignment * logical_block_size; 824 q->limits.discard_granularity = 825 max(sdkp->physical_block_size, 826 sdkp->unmap_granularity * logical_block_size); 827 sdkp->provisioning_mode = mode; 828 829 switch (mode) { 830 831 case SD_LBP_FULL: 832 case SD_LBP_DISABLE: 833 blk_queue_max_discard_sectors(q, 0); 834 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); 835 return; 836 837 case SD_LBP_UNMAP: 838 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 839 (u32)SD_MAX_WS16_BLOCKS); 840 break; 841 842 case SD_LBP_WS16: 843 if (sdkp->device->unmap_limit_for_ws) 844 max_blocks = sdkp->max_unmap_blocks; 845 else 846 max_blocks = sdkp->max_ws_blocks; 847 848 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 849 break; 850 851 case SD_LBP_WS10: 852 if (sdkp->device->unmap_limit_for_ws) 853 max_blocks = sdkp->max_unmap_blocks; 854 else 855 max_blocks = sdkp->max_ws_blocks; 856 857 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 858 break; 859 860 case SD_LBP_ZERO: 861 max_blocks = min_not_zero(sdkp->max_ws_blocks, 862 (u32)SD_MAX_WS10_BLOCKS); 863 break; 864 } 865 866 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); 867 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 868 } 869 870 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 871 { 872 struct scsi_device *sdp = cmd->device; 873 struct request *rq = scsi_cmd_to_rq(cmd); 874 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 875 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 876 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 877 unsigned int data_len = 24; 878 char *buf; 879 880 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 881 if (!rq->special_vec.bv_page) 882 return BLK_STS_RESOURCE; 883 clear_highpage(rq->special_vec.bv_page); 884 rq->special_vec.bv_offset = 0; 885 rq->special_vec.bv_len = data_len; 886 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 887 888 cmd->cmd_len = 10; 889 cmd->cmnd[0] = UNMAP; 890 cmd->cmnd[8] = 24; 891 892 buf = bvec_virt(&rq->special_vec); 893 put_unaligned_be16(6 + 16, &buf[0]); 894 put_unaligned_be16(16, &buf[2]); 895 put_unaligned_be64(lba, &buf[8]); 896 put_unaligned_be32(nr_blocks, &buf[16]); 897 898 cmd->allowed = sdkp->max_retries; 899 cmd->transfersize = data_len; 900 rq->timeout = SD_TIMEOUT; 901 902 return scsi_alloc_sgtables(cmd); 903 } 904 905 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 906 bool unmap) 907 { 908 struct scsi_device *sdp = cmd->device; 909 struct request *rq = scsi_cmd_to_rq(cmd); 910 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 911 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 912 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 913 u32 data_len = sdp->sector_size; 914 915 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 916 if (!rq->special_vec.bv_page) 917 return BLK_STS_RESOURCE; 918 clear_highpage(rq->special_vec.bv_page); 919 rq->special_vec.bv_offset = 0; 920 rq->special_vec.bv_len = data_len; 921 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 922 923 cmd->cmd_len = 16; 924 cmd->cmnd[0] = WRITE_SAME_16; 925 if (unmap) 926 cmd->cmnd[1] = 0x8; /* UNMAP */ 927 put_unaligned_be64(lba, &cmd->cmnd[2]); 928 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 929 930 cmd->allowed = sdkp->max_retries; 931 cmd->transfersize = data_len; 932 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 933 934 return scsi_alloc_sgtables(cmd); 935 } 936 937 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 938 bool unmap) 939 { 940 struct scsi_device *sdp = cmd->device; 941 struct request *rq = scsi_cmd_to_rq(cmd); 942 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 943 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 944 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 945 u32 data_len = sdp->sector_size; 946 947 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 948 if (!rq->special_vec.bv_page) 949 return BLK_STS_RESOURCE; 950 clear_highpage(rq->special_vec.bv_page); 951 rq->special_vec.bv_offset = 0; 952 rq->special_vec.bv_len = data_len; 953 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 954 955 cmd->cmd_len = 10; 956 cmd->cmnd[0] = WRITE_SAME; 957 if (unmap) 958 cmd->cmnd[1] = 0x8; /* UNMAP */ 959 put_unaligned_be32(lba, &cmd->cmnd[2]); 960 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 961 962 cmd->allowed = sdkp->max_retries; 963 cmd->transfersize = data_len; 964 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 965 966 return scsi_alloc_sgtables(cmd); 967 } 968 969 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 970 { 971 struct request *rq = scsi_cmd_to_rq(cmd); 972 struct scsi_device *sdp = cmd->device; 973 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 974 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 975 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 976 977 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 978 switch (sdkp->zeroing_mode) { 979 case SD_ZERO_WS16_UNMAP: 980 return sd_setup_write_same16_cmnd(cmd, true); 981 case SD_ZERO_WS10_UNMAP: 982 return sd_setup_write_same10_cmnd(cmd, true); 983 } 984 } 985 986 if (sdp->no_write_same) { 987 rq->rq_flags |= RQF_QUIET; 988 return BLK_STS_TARGET; 989 } 990 991 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 992 return sd_setup_write_same16_cmnd(cmd, false); 993 994 return sd_setup_write_same10_cmnd(cmd, false); 995 } 996 997 static void sd_config_write_same(struct scsi_disk *sdkp) 998 { 999 struct request_queue *q = sdkp->disk->queue; 1000 unsigned int logical_block_size = sdkp->device->sector_size; 1001 1002 if (sdkp->device->no_write_same) { 1003 sdkp->max_ws_blocks = 0; 1004 goto out; 1005 } 1006 1007 /* Some devices can not handle block counts above 0xffff despite 1008 * supporting WRITE SAME(16). Consequently we default to 64k 1009 * blocks per I/O unless the device explicitly advertises a 1010 * bigger limit. 1011 */ 1012 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 1013 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 1014 (u32)SD_MAX_WS16_BLOCKS); 1015 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 1016 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 1017 (u32)SD_MAX_WS10_BLOCKS); 1018 else { 1019 sdkp->device->no_write_same = 1; 1020 sdkp->max_ws_blocks = 0; 1021 } 1022 1023 if (sdkp->lbprz && sdkp->lbpws) 1024 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 1025 else if (sdkp->lbprz && sdkp->lbpws10) 1026 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 1027 else if (sdkp->max_ws_blocks) 1028 sdkp->zeroing_mode = SD_ZERO_WS; 1029 else 1030 sdkp->zeroing_mode = SD_ZERO_WRITE; 1031 1032 if (sdkp->max_ws_blocks && 1033 sdkp->physical_block_size > logical_block_size) { 1034 /* 1035 * Reporting a maximum number of blocks that is not aligned 1036 * on the device physical size would cause a large write same 1037 * request to be split into physically unaligned chunks by 1038 * __blkdev_issue_write_zeroes() even if the caller of this 1039 * functions took care to align the large request. So make sure 1040 * the maximum reported is aligned to the device physical block 1041 * size. This is only an optional optimization for regular 1042 * disks, but this is mandatory to avoid failure of large write 1043 * same requests directed at sequential write required zones of 1044 * host-managed ZBC disks. 1045 */ 1046 sdkp->max_ws_blocks = 1047 round_down(sdkp->max_ws_blocks, 1048 bytes_to_logical(sdkp->device, 1049 sdkp->physical_block_size)); 1050 } 1051 1052 out: 1053 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * 1054 (logical_block_size >> 9)); 1055 } 1056 1057 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 1058 { 1059 struct request *rq = scsi_cmd_to_rq(cmd); 1060 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1061 1062 /* flush requests don't perform I/O, zero the S/G table */ 1063 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 1065 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 1066 cmd->cmd_len = 10; 1067 cmd->transfersize = 0; 1068 cmd->allowed = sdkp->max_retries; 1069 1070 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1071 return BLK_STS_OK; 1072 } 1073 1074 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1075 sector_t lba, unsigned int nr_blocks, 1076 unsigned char flags) 1077 { 1078 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); 1079 if (unlikely(cmd->cmnd == NULL)) 1080 return BLK_STS_RESOURCE; 1081 1082 cmd->cmd_len = SD_EXT_CDB_SIZE; 1083 memset(cmd->cmnd, 0, cmd->cmd_len); 1084 1085 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1086 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1087 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1088 cmd->cmnd[10] = flags; 1089 put_unaligned_be64(lba, &cmd->cmnd[12]); 1090 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1091 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1092 1093 return BLK_STS_OK; 1094 } 1095 1096 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1097 sector_t lba, unsigned int nr_blocks, 1098 unsigned char flags) 1099 { 1100 cmd->cmd_len = 16; 1101 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1102 cmd->cmnd[1] = flags; 1103 cmd->cmnd[14] = 0; 1104 cmd->cmnd[15] = 0; 1105 put_unaligned_be64(lba, &cmd->cmnd[2]); 1106 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1107 1108 return BLK_STS_OK; 1109 } 1110 1111 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1112 sector_t lba, unsigned int nr_blocks, 1113 unsigned char flags) 1114 { 1115 cmd->cmd_len = 10; 1116 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1117 cmd->cmnd[1] = flags; 1118 cmd->cmnd[6] = 0; 1119 cmd->cmnd[9] = 0; 1120 put_unaligned_be32(lba, &cmd->cmnd[2]); 1121 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1122 1123 return BLK_STS_OK; 1124 } 1125 1126 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1127 sector_t lba, unsigned int nr_blocks, 1128 unsigned char flags) 1129 { 1130 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1131 if (WARN_ON_ONCE(nr_blocks == 0)) 1132 return BLK_STS_IOERR; 1133 1134 if (unlikely(flags & 0x8)) { 1135 /* 1136 * This happens only if this drive failed 10byte rw 1137 * command with ILLEGAL_REQUEST during operation and 1138 * thus turned off use_10_for_rw. 1139 */ 1140 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1141 return BLK_STS_IOERR; 1142 } 1143 1144 cmd->cmd_len = 6; 1145 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1146 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1147 cmd->cmnd[2] = (lba >> 8) & 0xff; 1148 cmd->cmnd[3] = lba & 0xff; 1149 cmd->cmnd[4] = nr_blocks; 1150 cmd->cmnd[5] = 0; 1151 1152 return BLK_STS_OK; 1153 } 1154 1155 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1156 { 1157 struct request *rq = scsi_cmd_to_rq(cmd); 1158 struct scsi_device *sdp = cmd->device; 1159 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1160 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1161 sector_t threshold; 1162 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1163 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1164 bool write = rq_data_dir(rq) == WRITE; 1165 unsigned char protect, fua; 1166 blk_status_t ret; 1167 unsigned int dif; 1168 bool dix; 1169 1170 ret = scsi_alloc_sgtables(cmd); 1171 if (ret != BLK_STS_OK) 1172 return ret; 1173 1174 ret = BLK_STS_IOERR; 1175 if (!scsi_device_online(sdp) || sdp->changed) { 1176 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1177 goto fail; 1178 } 1179 1180 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { 1181 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1182 goto fail; 1183 } 1184 1185 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1186 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1187 goto fail; 1188 } 1189 1190 /* 1191 * Some SD card readers can't handle accesses which touch the 1192 * last one or two logical blocks. Split accesses as needed. 1193 */ 1194 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1195 1196 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1197 if (lba < threshold) { 1198 /* Access up to the threshold but not beyond */ 1199 nr_blocks = threshold - lba; 1200 } else { 1201 /* Access only a single logical block */ 1202 nr_blocks = 1; 1203 } 1204 } 1205 1206 if (req_op(rq) == REQ_OP_ZONE_APPEND) { 1207 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); 1208 if (ret) 1209 goto fail; 1210 } 1211 1212 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1213 dix = scsi_prot_sg_count(cmd); 1214 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1215 1216 if (dif || dix) 1217 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1218 else 1219 protect = 0; 1220 1221 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1222 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1223 protect | fua); 1224 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1225 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1226 protect | fua); 1227 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1228 sdp->use_10_for_rw || protect) { 1229 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1230 protect | fua); 1231 } else { 1232 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1233 protect | fua); 1234 } 1235 1236 if (unlikely(ret != BLK_STS_OK)) 1237 goto fail; 1238 1239 /* 1240 * We shouldn't disconnect in the middle of a sector, so with a dumb 1241 * host adapter, it's safe to assume that we can at least transfer 1242 * this many bytes between each connect / disconnect. 1243 */ 1244 cmd->transfersize = sdp->sector_size; 1245 cmd->underflow = nr_blocks << 9; 1246 cmd->allowed = sdkp->max_retries; 1247 cmd->sdb.length = nr_blocks * sdp->sector_size; 1248 1249 SCSI_LOG_HLQUEUE(1, 1250 scmd_printk(KERN_INFO, cmd, 1251 "%s: block=%llu, count=%d\n", __func__, 1252 (unsigned long long)blk_rq_pos(rq), 1253 blk_rq_sectors(rq))); 1254 SCSI_LOG_HLQUEUE(2, 1255 scmd_printk(KERN_INFO, cmd, 1256 "%s %d/%u 512 byte blocks.\n", 1257 write ? "writing" : "reading", nr_blocks, 1258 blk_rq_sectors(rq))); 1259 1260 /* 1261 * This indicates that the command is ready from our end to be queued. 1262 */ 1263 return BLK_STS_OK; 1264 fail: 1265 scsi_free_sgtables(cmd); 1266 return ret; 1267 } 1268 1269 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1270 { 1271 struct request *rq = scsi_cmd_to_rq(cmd); 1272 1273 switch (req_op(rq)) { 1274 case REQ_OP_DISCARD: 1275 switch (scsi_disk(rq->q->disk)->provisioning_mode) { 1276 case SD_LBP_UNMAP: 1277 return sd_setup_unmap_cmnd(cmd); 1278 case SD_LBP_WS16: 1279 return sd_setup_write_same16_cmnd(cmd, true); 1280 case SD_LBP_WS10: 1281 return sd_setup_write_same10_cmnd(cmd, true); 1282 case SD_LBP_ZERO: 1283 return sd_setup_write_same10_cmnd(cmd, false); 1284 default: 1285 return BLK_STS_TARGET; 1286 } 1287 case REQ_OP_WRITE_ZEROES: 1288 return sd_setup_write_zeroes_cmnd(cmd); 1289 case REQ_OP_FLUSH: 1290 return sd_setup_flush_cmnd(cmd); 1291 case REQ_OP_READ: 1292 case REQ_OP_WRITE: 1293 case REQ_OP_ZONE_APPEND: 1294 return sd_setup_read_write_cmnd(cmd); 1295 case REQ_OP_ZONE_RESET: 1296 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1297 false); 1298 case REQ_OP_ZONE_RESET_ALL: 1299 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1300 true); 1301 case REQ_OP_ZONE_OPEN: 1302 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1303 case REQ_OP_ZONE_CLOSE: 1304 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1305 case REQ_OP_ZONE_FINISH: 1306 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1307 default: 1308 WARN_ON_ONCE(1); 1309 return BLK_STS_NOTSUPP; 1310 } 1311 } 1312 1313 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1314 { 1315 struct request *rq = scsi_cmd_to_rq(SCpnt); 1316 u8 *cmnd; 1317 1318 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1319 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1320 1321 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1322 cmnd = SCpnt->cmnd; 1323 SCpnt->cmnd = NULL; 1324 SCpnt->cmd_len = 0; 1325 mempool_free(cmnd, sd_cdb_pool); 1326 } 1327 } 1328 1329 static bool sd_need_revalidate(struct block_device *bdev, 1330 struct scsi_disk *sdkp) 1331 { 1332 if (sdkp->device->removable || sdkp->write_prot) { 1333 if (bdev_check_media_change(bdev)) 1334 return true; 1335 } 1336 1337 /* 1338 * Force a full rescan after ioctl(BLKRRPART). While the disk state has 1339 * nothing to do with partitions, BLKRRPART is used to force a full 1340 * revalidate after things like a format for historical reasons. 1341 */ 1342 return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1343 } 1344 1345 /** 1346 * sd_open - open a scsi disk device 1347 * @bdev: Block device of the scsi disk to open 1348 * @mode: FMODE_* mask 1349 * 1350 * Returns 0 if successful. Returns a negated errno value in case 1351 * of error. 1352 * 1353 * Note: This can be called from a user context (e.g. fsck(1) ) 1354 * or from within the kernel (e.g. as a result of a mount(1) ). 1355 * In the latter case @inode and @filp carry an abridged amount 1356 * of information as noted above. 1357 * 1358 * Locking: called with bdev->bd_disk->open_mutex held. 1359 **/ 1360 static int sd_open(struct block_device *bdev, fmode_t mode) 1361 { 1362 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk); 1363 struct scsi_device *sdev; 1364 int retval; 1365 1366 if (!sdkp) 1367 return -ENXIO; 1368 1369 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1370 1371 sdev = sdkp->device; 1372 1373 /* 1374 * If the device is in error recovery, wait until it is done. 1375 * If the device is offline, then disallow any access to it. 1376 */ 1377 retval = -ENXIO; 1378 if (!scsi_block_when_processing_errors(sdev)) 1379 goto error_out; 1380 1381 if (sd_need_revalidate(bdev, sdkp)) 1382 sd_revalidate_disk(bdev->bd_disk); 1383 1384 /* 1385 * If the drive is empty, just let the open fail. 1386 */ 1387 retval = -ENOMEDIUM; 1388 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) 1389 goto error_out; 1390 1391 /* 1392 * If the device has the write protect tab set, have the open fail 1393 * if the user expects to be able to write to the thing. 1394 */ 1395 retval = -EROFS; 1396 if (sdkp->write_prot && (mode & FMODE_WRITE)) 1397 goto error_out; 1398 1399 /* 1400 * It is possible that the disk changing stuff resulted in 1401 * the device being taken offline. If this is the case, 1402 * report this to the user, and don't pretend that the 1403 * open actually succeeded. 1404 */ 1405 retval = -ENXIO; 1406 if (!scsi_device_online(sdev)) 1407 goto error_out; 1408 1409 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1410 if (scsi_block_when_processing_errors(sdev)) 1411 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1412 } 1413 1414 return 0; 1415 1416 error_out: 1417 scsi_disk_put(sdkp); 1418 return retval; 1419 } 1420 1421 /** 1422 * sd_release - invoked when the (last) close(2) is called on this 1423 * scsi disk. 1424 * @disk: disk to release 1425 * @mode: FMODE_* mask 1426 * 1427 * Returns 0. 1428 * 1429 * Note: may block (uninterruptible) if error recovery is underway 1430 * on this disk. 1431 * 1432 * Locking: called with bdev->bd_disk->open_mutex held. 1433 **/ 1434 static void sd_release(struct gendisk *disk, fmode_t mode) 1435 { 1436 struct scsi_disk *sdkp = scsi_disk(disk); 1437 struct scsi_device *sdev = sdkp->device; 1438 1439 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1440 1441 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1442 if (scsi_block_when_processing_errors(sdev)) 1443 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1444 } 1445 1446 scsi_disk_put(sdkp); 1447 } 1448 1449 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1450 { 1451 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1452 struct scsi_device *sdp = sdkp->device; 1453 struct Scsi_Host *host = sdp->host; 1454 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1455 int diskinfo[4]; 1456 1457 /* default to most commonly used values */ 1458 diskinfo[0] = 0x40; /* 1 << 6 */ 1459 diskinfo[1] = 0x20; /* 1 << 5 */ 1460 diskinfo[2] = capacity >> 11; 1461 1462 /* override with calculated, extended default, or driver values */ 1463 if (host->hostt->bios_param) 1464 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); 1465 else 1466 scsicam_bios_param(bdev, capacity, diskinfo); 1467 1468 geo->heads = diskinfo[0]; 1469 geo->sectors = diskinfo[1]; 1470 geo->cylinders = diskinfo[2]; 1471 return 0; 1472 } 1473 1474 /** 1475 * sd_ioctl - process an ioctl 1476 * @bdev: target block device 1477 * @mode: FMODE_* mask 1478 * @cmd: ioctl command number 1479 * @arg: this is third argument given to ioctl(2) system call. 1480 * Often contains a pointer. 1481 * 1482 * Returns 0 if successful (some ioctls return positive numbers on 1483 * success as well). Returns a negated errno value in case of error. 1484 * 1485 * Note: most ioctls are forward onto the block subsystem or further 1486 * down in the scsi subsystem. 1487 **/ 1488 static int sd_ioctl(struct block_device *bdev, fmode_t mode, 1489 unsigned int cmd, unsigned long arg) 1490 { 1491 struct gendisk *disk = bdev->bd_disk; 1492 struct scsi_disk *sdkp = scsi_disk(disk); 1493 struct scsi_device *sdp = sdkp->device; 1494 void __user *p = (void __user *)arg; 1495 int error; 1496 1497 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1498 "cmd=0x%x\n", disk->disk_name, cmd)); 1499 1500 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) 1501 return -ENOIOCTLCMD; 1502 1503 /* 1504 * If we are in the middle of error recovery, don't let anyone 1505 * else try and use this device. Also, if error recovery fails, it 1506 * may try and take the device offline, in which case all further 1507 * access to the device is prohibited. 1508 */ 1509 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1510 (mode & FMODE_NDELAY) != 0); 1511 if (error) 1512 return error; 1513 1514 if (is_sed_ioctl(cmd)) 1515 return sed_ioctl(sdkp->opal_dev, cmd, p); 1516 return scsi_ioctl(sdp, mode, cmd, p); 1517 } 1518 1519 static void set_media_not_present(struct scsi_disk *sdkp) 1520 { 1521 if (sdkp->media_present) 1522 sdkp->device->changed = 1; 1523 1524 if (sdkp->device->removable) { 1525 sdkp->media_present = 0; 1526 sdkp->capacity = 0; 1527 } 1528 } 1529 1530 static int media_not_present(struct scsi_disk *sdkp, 1531 struct scsi_sense_hdr *sshdr) 1532 { 1533 if (!scsi_sense_valid(sshdr)) 1534 return 0; 1535 1536 /* not invoked for commands that could return deferred errors */ 1537 switch (sshdr->sense_key) { 1538 case UNIT_ATTENTION: 1539 case NOT_READY: 1540 /* medium not present */ 1541 if (sshdr->asc == 0x3A) { 1542 set_media_not_present(sdkp); 1543 return 1; 1544 } 1545 } 1546 return 0; 1547 } 1548 1549 /** 1550 * sd_check_events - check media events 1551 * @disk: kernel device descriptor 1552 * @clearing: disk events currently being cleared 1553 * 1554 * Returns mask of DISK_EVENT_*. 1555 * 1556 * Note: this function is invoked from the block subsystem. 1557 **/ 1558 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1559 { 1560 struct scsi_disk *sdkp = scsi_disk_get(disk); 1561 struct scsi_device *sdp; 1562 int retval; 1563 bool disk_changed; 1564 1565 if (!sdkp) 1566 return 0; 1567 1568 sdp = sdkp->device; 1569 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1570 1571 /* 1572 * If the device is offline, don't send any commands - just pretend as 1573 * if the command failed. If the device ever comes back online, we 1574 * can deal with it then. It is only because of unrecoverable errors 1575 * that we would ever take a device offline in the first place. 1576 */ 1577 if (!scsi_device_online(sdp)) { 1578 set_media_not_present(sdkp); 1579 goto out; 1580 } 1581 1582 /* 1583 * Using TEST_UNIT_READY enables differentiation between drive with 1584 * no cartridge loaded - NOT READY, drive with changed cartridge - 1585 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1586 * 1587 * Drives that auto spin down. eg iomega jaz 1G, will be started 1588 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1589 * sd_revalidate() is called. 1590 */ 1591 if (scsi_block_when_processing_errors(sdp)) { 1592 struct scsi_sense_hdr sshdr = { 0, }; 1593 1594 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, 1595 &sshdr); 1596 1597 /* failed to execute TUR, assume media not present */ 1598 if (retval < 0 || host_byte(retval)) { 1599 set_media_not_present(sdkp); 1600 goto out; 1601 } 1602 1603 if (media_not_present(sdkp, &sshdr)) 1604 goto out; 1605 } 1606 1607 /* 1608 * For removable scsi disk we have to recognise the presence 1609 * of a disk in the drive. 1610 */ 1611 if (!sdkp->media_present) 1612 sdp->changed = 1; 1613 sdkp->media_present = 1; 1614 out: 1615 /* 1616 * sdp->changed is set under the following conditions: 1617 * 1618 * Medium present state has changed in either direction. 1619 * Device has indicated UNIT_ATTENTION. 1620 */ 1621 disk_changed = sdp->changed; 1622 sdp->changed = 0; 1623 scsi_disk_put(sdkp); 1624 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1625 } 1626 1627 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 1628 { 1629 int retries, res; 1630 struct scsi_device *sdp = sdkp->device; 1631 const int timeout = sdp->request_queue->rq_timeout 1632 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1633 struct scsi_sense_hdr my_sshdr; 1634 1635 if (!scsi_device_online(sdp)) 1636 return -ENODEV; 1637 1638 /* caller might not be interested in sense, but we need it */ 1639 if (!sshdr) 1640 sshdr = &my_sshdr; 1641 1642 for (retries = 3; retries > 0; --retries) { 1643 unsigned char cmd[10] = { 0 }; 1644 1645 cmd[0] = SYNCHRONIZE_CACHE; 1646 /* 1647 * Leave the rest of the command zero to indicate 1648 * flush everything. 1649 */ 1650 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, 1651 timeout, sdkp->max_retries, 0, RQF_PM, NULL); 1652 if (res == 0) 1653 break; 1654 } 1655 1656 if (res) { 1657 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1658 1659 if (res < 0) 1660 return res; 1661 1662 if (scsi_status_is_check_condition(res) && 1663 scsi_sense_valid(sshdr)) { 1664 sd_print_sense_hdr(sdkp, sshdr); 1665 1666 /* we need to evaluate the error return */ 1667 if (sshdr->asc == 0x3a || /* medium not present */ 1668 sshdr->asc == 0x20 || /* invalid command */ 1669 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */ 1670 /* this is no error here */ 1671 return 0; 1672 } 1673 1674 switch (host_byte(res)) { 1675 /* ignore errors due to racing a disconnection */ 1676 case DID_BAD_TARGET: 1677 case DID_NO_CONNECT: 1678 return 0; 1679 /* signal the upper layer it might try again */ 1680 case DID_BUS_BUSY: 1681 case DID_IMM_RETRY: 1682 case DID_REQUEUE: 1683 case DID_SOFT_ERROR: 1684 return -EBUSY; 1685 default: 1686 return -EIO; 1687 } 1688 } 1689 return 0; 1690 } 1691 1692 static void sd_rescan(struct device *dev) 1693 { 1694 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1695 1696 sd_revalidate_disk(sdkp->disk); 1697 } 1698 1699 static int sd_get_unique_id(struct gendisk *disk, u8 id[16], 1700 enum blk_unique_id type) 1701 { 1702 struct scsi_device *sdev = scsi_disk(disk)->device; 1703 const struct scsi_vpd *vpd; 1704 const unsigned char *d; 1705 int ret = -ENXIO, len; 1706 1707 rcu_read_lock(); 1708 vpd = rcu_dereference(sdev->vpd_pg83); 1709 if (!vpd) 1710 goto out_unlock; 1711 1712 ret = -EINVAL; 1713 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { 1714 /* we only care about designators with LU association */ 1715 if (((d[1] >> 4) & 0x3) != 0x00) 1716 continue; 1717 if ((d[1] & 0xf) != type) 1718 continue; 1719 1720 /* 1721 * Only exit early if a 16-byte descriptor was found. Otherwise 1722 * keep looking as one with more entropy might still show up. 1723 */ 1724 len = d[3]; 1725 if (len != 8 && len != 12 && len != 16) 1726 continue; 1727 ret = len; 1728 memcpy(id, d + 4, len); 1729 if (len == 16) 1730 break; 1731 } 1732 out_unlock: 1733 rcu_read_unlock(); 1734 return ret; 1735 } 1736 1737 static char sd_pr_type(enum pr_type type) 1738 { 1739 switch (type) { 1740 case PR_WRITE_EXCLUSIVE: 1741 return 0x01; 1742 case PR_EXCLUSIVE_ACCESS: 1743 return 0x03; 1744 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1745 return 0x05; 1746 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1747 return 0x06; 1748 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1749 return 0x07; 1750 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1751 return 0x08; 1752 default: 1753 return 0; 1754 } 1755 }; 1756 1757 static int sd_pr_command(struct block_device *bdev, u8 sa, 1758 u64 key, u64 sa_key, u8 type, u8 flags) 1759 { 1760 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1761 struct scsi_device *sdev = sdkp->device; 1762 struct scsi_sense_hdr sshdr; 1763 int result; 1764 u8 cmd[16] = { 0, }; 1765 u8 data[24] = { 0, }; 1766 1767 cmd[0] = PERSISTENT_RESERVE_OUT; 1768 cmd[1] = sa; 1769 cmd[2] = type; 1770 put_unaligned_be32(sizeof(data), &cmd[5]); 1771 1772 put_unaligned_be64(key, &data[0]); 1773 put_unaligned_be64(sa_key, &data[8]); 1774 data[20] = flags; 1775 1776 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), 1777 &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL); 1778 1779 if (scsi_status_is_check_condition(result) && 1780 scsi_sense_valid(&sshdr)) { 1781 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1782 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1783 } 1784 1785 return result; 1786 } 1787 1788 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 1789 u32 flags) 1790 { 1791 if (flags & ~PR_FL_IGNORE_KEY) 1792 return -EOPNOTSUPP; 1793 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1794 old_key, new_key, 0, 1795 (1 << 0) /* APTPL */); 1796 } 1797 1798 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 1799 u32 flags) 1800 { 1801 if (flags) 1802 return -EOPNOTSUPP; 1803 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); 1804 } 1805 1806 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1807 { 1808 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); 1809 } 1810 1811 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1812 enum pr_type type, bool abort) 1813 { 1814 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1815 sd_pr_type(type), 0); 1816 } 1817 1818 static int sd_pr_clear(struct block_device *bdev, u64 key) 1819 { 1820 return sd_pr_command(bdev, 0x03, key, 0, 0, 0); 1821 } 1822 1823 static const struct pr_ops sd_pr_ops = { 1824 .pr_register = sd_pr_register, 1825 .pr_reserve = sd_pr_reserve, 1826 .pr_release = sd_pr_release, 1827 .pr_preempt = sd_pr_preempt, 1828 .pr_clear = sd_pr_clear, 1829 }; 1830 1831 static const struct block_device_operations sd_fops = { 1832 .owner = THIS_MODULE, 1833 .open = sd_open, 1834 .release = sd_release, 1835 .ioctl = sd_ioctl, 1836 .getgeo = sd_getgeo, 1837 .compat_ioctl = blkdev_compat_ptr_ioctl, 1838 .check_events = sd_check_events, 1839 .unlock_native_capacity = sd_unlock_native_capacity, 1840 .report_zones = sd_zbc_report_zones, 1841 .get_unique_id = sd_get_unique_id, 1842 .pr_ops = &sd_pr_ops, 1843 }; 1844 1845 /** 1846 * sd_eh_reset - reset error handling callback 1847 * @scmd: sd-issued command that has failed 1848 * 1849 * This function is called by the SCSI midlayer before starting 1850 * SCSI EH. When counting medium access failures we have to be 1851 * careful to register it only only once per device and SCSI EH run; 1852 * there might be several timed out commands which will cause the 1853 * 'max_medium_access_timeouts' counter to trigger after the first 1854 * SCSI EH run already and set the device to offline. 1855 * So this function resets the internal counter before starting SCSI EH. 1856 **/ 1857 static void sd_eh_reset(struct scsi_cmnd *scmd) 1858 { 1859 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1860 1861 /* New SCSI EH run, reset gate variable */ 1862 sdkp->ignore_medium_access_errors = false; 1863 } 1864 1865 /** 1866 * sd_eh_action - error handling callback 1867 * @scmd: sd-issued command that has failed 1868 * @eh_disp: The recovery disposition suggested by the midlayer 1869 * 1870 * This function is called by the SCSI midlayer upon completion of an 1871 * error test command (currently TEST UNIT READY). The result of sending 1872 * the eh command is passed in eh_disp. We're looking for devices that 1873 * fail medium access commands but are OK with non access commands like 1874 * test unit ready (so wrongly see the device as having a successful 1875 * recovery) 1876 **/ 1877 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 1878 { 1879 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1880 struct scsi_device *sdev = scmd->device; 1881 1882 if (!scsi_device_online(sdev) || 1883 !scsi_medium_access_command(scmd) || 1884 host_byte(scmd->result) != DID_TIME_OUT || 1885 eh_disp != SUCCESS) 1886 return eh_disp; 1887 1888 /* 1889 * The device has timed out executing a medium access command. 1890 * However, the TEST UNIT READY command sent during error 1891 * handling completed successfully. Either the device is in the 1892 * process of recovering or has it suffered an internal failure 1893 * that prevents access to the storage medium. 1894 */ 1895 if (!sdkp->ignore_medium_access_errors) { 1896 sdkp->medium_access_timed_out++; 1897 sdkp->ignore_medium_access_errors = true; 1898 } 1899 1900 /* 1901 * If the device keeps failing read/write commands but TEST UNIT 1902 * READY always completes successfully we assume that medium 1903 * access is no longer possible and take the device offline. 1904 */ 1905 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 1906 scmd_printk(KERN_ERR, scmd, 1907 "Medium access timeout failure. Offlining disk!\n"); 1908 mutex_lock(&sdev->state_mutex); 1909 scsi_device_set_state(sdev, SDEV_OFFLINE); 1910 mutex_unlock(&sdev->state_mutex); 1911 1912 return SUCCESS; 1913 } 1914 1915 return eh_disp; 1916 } 1917 1918 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1919 { 1920 struct request *req = scsi_cmd_to_rq(scmd); 1921 struct scsi_device *sdev = scmd->device; 1922 unsigned int transferred, good_bytes; 1923 u64 start_lba, end_lba, bad_lba; 1924 1925 /* 1926 * Some commands have a payload smaller than the device logical 1927 * block size (e.g. INQUIRY on a 4K disk). 1928 */ 1929 if (scsi_bufflen(scmd) <= sdev->sector_size) 1930 return 0; 1931 1932 /* Check if we have a 'bad_lba' information */ 1933 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 1934 SCSI_SENSE_BUFFERSIZE, 1935 &bad_lba)) 1936 return 0; 1937 1938 /* 1939 * If the bad lba was reported incorrectly, we have no idea where 1940 * the error is. 1941 */ 1942 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 1943 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 1944 if (bad_lba < start_lba || bad_lba >= end_lba) 1945 return 0; 1946 1947 /* 1948 * resid is optional but mostly filled in. When it's unused, 1949 * its value is zero, so we assume the whole buffer transferred 1950 */ 1951 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 1952 1953 /* This computation should always be done in terms of the 1954 * resolution of the device's medium. 1955 */ 1956 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 1957 1958 return min(good_bytes, transferred); 1959 } 1960 1961 /** 1962 * sd_done - bottom half handler: called when the lower level 1963 * driver has completed (successfully or otherwise) a scsi command. 1964 * @SCpnt: mid-level's per command structure. 1965 * 1966 * Note: potentially run from within an ISR. Must not block. 1967 **/ 1968 static int sd_done(struct scsi_cmnd *SCpnt) 1969 { 1970 int result = SCpnt->result; 1971 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1972 unsigned int sector_size = SCpnt->device->sector_size; 1973 unsigned int resid; 1974 struct scsi_sense_hdr sshdr; 1975 struct request *req = scsi_cmd_to_rq(SCpnt); 1976 struct scsi_disk *sdkp = scsi_disk(req->q->disk); 1977 int sense_valid = 0; 1978 int sense_deferred = 0; 1979 1980 switch (req_op(req)) { 1981 case REQ_OP_DISCARD: 1982 case REQ_OP_WRITE_ZEROES: 1983 case REQ_OP_ZONE_RESET: 1984 case REQ_OP_ZONE_RESET_ALL: 1985 case REQ_OP_ZONE_OPEN: 1986 case REQ_OP_ZONE_CLOSE: 1987 case REQ_OP_ZONE_FINISH: 1988 if (!result) { 1989 good_bytes = blk_rq_bytes(req); 1990 scsi_set_resid(SCpnt, 0); 1991 } else { 1992 good_bytes = 0; 1993 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1994 } 1995 break; 1996 default: 1997 /* 1998 * In case of bogus fw or device, we could end up having 1999 * an unaligned partial completion. Check this here and force 2000 * alignment. 2001 */ 2002 resid = scsi_get_resid(SCpnt); 2003 if (resid & (sector_size - 1)) { 2004 sd_printk(KERN_INFO, sdkp, 2005 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2006 resid, sector_size); 2007 scsi_print_command(SCpnt); 2008 resid = min(scsi_bufflen(SCpnt), 2009 round_up(resid, sector_size)); 2010 scsi_set_resid(SCpnt, resid); 2011 } 2012 } 2013 2014 if (result) { 2015 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2016 if (sense_valid) 2017 sense_deferred = scsi_sense_is_deferred(&sshdr); 2018 } 2019 sdkp->medium_access_timed_out = 0; 2020 2021 if (!scsi_status_is_check_condition(result) && 2022 (!sense_valid || sense_deferred)) 2023 goto out; 2024 2025 switch (sshdr.sense_key) { 2026 case HARDWARE_ERROR: 2027 case MEDIUM_ERROR: 2028 good_bytes = sd_completed_bytes(SCpnt); 2029 break; 2030 case RECOVERED_ERROR: 2031 good_bytes = scsi_bufflen(SCpnt); 2032 break; 2033 case NO_SENSE: 2034 /* This indicates a false check condition, so ignore it. An 2035 * unknown amount of data was transferred so treat it as an 2036 * error. 2037 */ 2038 SCpnt->result = 0; 2039 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2040 break; 2041 case ABORTED_COMMAND: 2042 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2043 good_bytes = sd_completed_bytes(SCpnt); 2044 break; 2045 case ILLEGAL_REQUEST: 2046 switch (sshdr.asc) { 2047 case 0x10: /* DIX: Host detected corruption */ 2048 good_bytes = sd_completed_bytes(SCpnt); 2049 break; 2050 case 0x20: /* INVALID COMMAND OPCODE */ 2051 case 0x24: /* INVALID FIELD IN CDB */ 2052 switch (SCpnt->cmnd[0]) { 2053 case UNMAP: 2054 sd_config_discard(sdkp, SD_LBP_DISABLE); 2055 break; 2056 case WRITE_SAME_16: 2057 case WRITE_SAME: 2058 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2059 sd_config_discard(sdkp, SD_LBP_DISABLE); 2060 } else { 2061 sdkp->device->no_write_same = 1; 2062 sd_config_write_same(sdkp); 2063 req->rq_flags |= RQF_QUIET; 2064 } 2065 break; 2066 } 2067 } 2068 break; 2069 default: 2070 break; 2071 } 2072 2073 out: 2074 if (sd_is_zoned(sdkp)) 2075 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2076 2077 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2078 "sd_done: completed %d of %d bytes\n", 2079 good_bytes, scsi_bufflen(SCpnt))); 2080 2081 return good_bytes; 2082 } 2083 2084 /* 2085 * spinup disk - called only in sd_revalidate_disk() 2086 */ 2087 static void 2088 sd_spinup_disk(struct scsi_disk *sdkp) 2089 { 2090 unsigned char cmd[10]; 2091 unsigned long spintime_expire = 0; 2092 int retries, spintime; 2093 unsigned int the_result; 2094 struct scsi_sense_hdr sshdr; 2095 int sense_valid = 0; 2096 2097 spintime = 0; 2098 2099 /* Spin up drives, as required. Only do this at boot time */ 2100 /* Spinup needs to be done for module loads too. */ 2101 do { 2102 retries = 0; 2103 2104 do { 2105 bool media_was_present = sdkp->media_present; 2106 2107 cmd[0] = TEST_UNIT_READY; 2108 memset((void *) &cmd[1], 0, 9); 2109 2110 the_result = scsi_execute_req(sdkp->device, cmd, 2111 DMA_NONE, NULL, 0, 2112 &sshdr, SD_TIMEOUT, 2113 sdkp->max_retries, NULL); 2114 2115 /* 2116 * If the drive has indicated to us that it 2117 * doesn't have any media in it, don't bother 2118 * with any more polling. 2119 */ 2120 if (media_not_present(sdkp, &sshdr)) { 2121 if (media_was_present) 2122 sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2123 return; 2124 } 2125 2126 if (the_result) 2127 sense_valid = scsi_sense_valid(&sshdr); 2128 retries++; 2129 } while (retries < 3 && 2130 (!scsi_status_is_good(the_result) || 2131 (scsi_status_is_check_condition(the_result) && 2132 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 2133 2134 if (!scsi_status_is_check_condition(the_result)) { 2135 /* no sense, TUR either succeeded or failed 2136 * with a status error */ 2137 if(!spintime && !scsi_status_is_good(the_result)) { 2138 sd_print_result(sdkp, "Test Unit Ready failed", 2139 the_result); 2140 } 2141 break; 2142 } 2143 2144 /* 2145 * The device does not want the automatic start to be issued. 2146 */ 2147 if (sdkp->device->no_start_on_add) 2148 break; 2149 2150 if (sense_valid && sshdr.sense_key == NOT_READY) { 2151 if (sshdr.asc == 4 && sshdr.ascq == 3) 2152 break; /* manual intervention required */ 2153 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2154 break; /* standby */ 2155 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2156 break; /* unavailable */ 2157 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2158 break; /* sanitize in progress */ 2159 /* 2160 * Issue command to spin up drive when not ready 2161 */ 2162 if (!spintime) { 2163 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2164 cmd[0] = START_STOP; 2165 cmd[1] = 1; /* Return immediately */ 2166 memset((void *) &cmd[2], 0, 8); 2167 cmd[4] = 1; /* Start spin cycle */ 2168 if (sdkp->device->start_stop_pwr_cond) 2169 cmd[4] |= 1 << 4; 2170 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 2171 NULL, 0, &sshdr, 2172 SD_TIMEOUT, sdkp->max_retries, 2173 NULL); 2174 spintime_expire = jiffies + 100 * HZ; 2175 spintime = 1; 2176 } 2177 /* Wait 1 second for next try */ 2178 msleep(1000); 2179 printk(KERN_CONT "."); 2180 2181 /* 2182 * Wait for USB flash devices with slow firmware. 2183 * Yes, this sense key/ASC combination shouldn't 2184 * occur here. It's characteristic of these devices. 2185 */ 2186 } else if (sense_valid && 2187 sshdr.sense_key == UNIT_ATTENTION && 2188 sshdr.asc == 0x28) { 2189 if (!spintime) { 2190 spintime_expire = jiffies + 5 * HZ; 2191 spintime = 1; 2192 } 2193 /* Wait 1 second for next try */ 2194 msleep(1000); 2195 } else { 2196 /* we don't understand the sense code, so it's 2197 * probably pointless to loop */ 2198 if(!spintime) { 2199 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2200 sd_print_sense_hdr(sdkp, &sshdr); 2201 } 2202 break; 2203 } 2204 2205 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2206 2207 if (spintime) { 2208 if (scsi_status_is_good(the_result)) 2209 printk(KERN_CONT "ready\n"); 2210 else 2211 printk(KERN_CONT "not responding...\n"); 2212 } 2213 } 2214 2215 /* 2216 * Determine whether disk supports Data Integrity Field. 2217 */ 2218 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2219 { 2220 struct scsi_device *sdp = sdkp->device; 2221 u8 type; 2222 int ret = 0; 2223 2224 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { 2225 sdkp->protection_type = 0; 2226 return ret; 2227 } 2228 2229 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2230 2231 if (type > T10_PI_TYPE3_PROTECTION) 2232 ret = -ENODEV; 2233 else if (scsi_host_dif_capable(sdp->host, type)) 2234 ret = 1; 2235 2236 if (sdkp->first_scan || type != sdkp->protection_type) 2237 switch (ret) { 2238 case -ENODEV: 2239 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2240 " protection type %u. Disabling disk!\n", 2241 type); 2242 break; 2243 case 1: 2244 sd_printk(KERN_NOTICE, sdkp, 2245 "Enabling DIF Type %u protection\n", type); 2246 break; 2247 case 0: 2248 sd_printk(KERN_NOTICE, sdkp, 2249 "Disabling DIF Type %u protection\n", type); 2250 break; 2251 } 2252 2253 sdkp->protection_type = type; 2254 2255 return ret; 2256 } 2257 2258 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2259 struct scsi_sense_hdr *sshdr, int sense_valid, 2260 int the_result) 2261 { 2262 if (sense_valid) 2263 sd_print_sense_hdr(sdkp, sshdr); 2264 else 2265 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2266 2267 /* 2268 * Set dirty bit for removable devices if not ready - 2269 * sometimes drives will not report this properly. 2270 */ 2271 if (sdp->removable && 2272 sense_valid && sshdr->sense_key == NOT_READY) 2273 set_media_not_present(sdkp); 2274 2275 /* 2276 * We used to set media_present to 0 here to indicate no media 2277 * in the drive, but some drives fail read capacity even with 2278 * media present, so we can't do that. 2279 */ 2280 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2281 } 2282 2283 #define RC16_LEN 32 2284 #if RC16_LEN > SD_BUF_SIZE 2285 #error RC16_LEN must not be more than SD_BUF_SIZE 2286 #endif 2287 2288 #define READ_CAPACITY_RETRIES_ON_RESET 10 2289 2290 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2291 unsigned char *buffer) 2292 { 2293 unsigned char cmd[16]; 2294 struct scsi_sense_hdr sshdr; 2295 int sense_valid = 0; 2296 int the_result; 2297 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2298 unsigned int alignment; 2299 unsigned long long lba; 2300 unsigned sector_size; 2301 2302 if (sdp->no_read_capacity_16) 2303 return -EINVAL; 2304 2305 do { 2306 memset(cmd, 0, 16); 2307 cmd[0] = SERVICE_ACTION_IN_16; 2308 cmd[1] = SAI_READ_CAPACITY_16; 2309 cmd[13] = RC16_LEN; 2310 memset(buffer, 0, RC16_LEN); 2311 2312 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2313 buffer, RC16_LEN, &sshdr, 2314 SD_TIMEOUT, sdkp->max_retries, NULL); 2315 2316 if (media_not_present(sdkp, &sshdr)) 2317 return -ENODEV; 2318 2319 if (the_result > 0) { 2320 sense_valid = scsi_sense_valid(&sshdr); 2321 if (sense_valid && 2322 sshdr.sense_key == ILLEGAL_REQUEST && 2323 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2324 sshdr.ascq == 0x00) 2325 /* Invalid Command Operation Code or 2326 * Invalid Field in CDB, just retry 2327 * silently with RC10 */ 2328 return -EINVAL; 2329 if (sense_valid && 2330 sshdr.sense_key == UNIT_ATTENTION && 2331 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2332 /* Device reset might occur several times, 2333 * give it one more chance */ 2334 if (--reset_retries > 0) 2335 continue; 2336 } 2337 retries--; 2338 2339 } while (the_result && retries); 2340 2341 if (the_result) { 2342 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2343 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2344 return -EINVAL; 2345 } 2346 2347 sector_size = get_unaligned_be32(&buffer[8]); 2348 lba = get_unaligned_be64(&buffer[0]); 2349 2350 if (sd_read_protection_type(sdkp, buffer) < 0) { 2351 sdkp->capacity = 0; 2352 return -ENODEV; 2353 } 2354 2355 /* Logical blocks per physical block exponent */ 2356 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2357 2358 /* RC basis */ 2359 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2360 2361 /* Lowest aligned logical block */ 2362 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2363 blk_queue_alignment_offset(sdp->request_queue, alignment); 2364 if (alignment && sdkp->first_scan) 2365 sd_printk(KERN_NOTICE, sdkp, 2366 "physical block alignment offset: %u\n", alignment); 2367 2368 if (buffer[14] & 0x80) { /* LBPME */ 2369 sdkp->lbpme = 1; 2370 2371 if (buffer[14] & 0x40) /* LBPRZ */ 2372 sdkp->lbprz = 1; 2373 2374 sd_config_discard(sdkp, SD_LBP_WS16); 2375 } 2376 2377 sdkp->capacity = lba + 1; 2378 return sector_size; 2379 } 2380 2381 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2382 unsigned char *buffer) 2383 { 2384 unsigned char cmd[16]; 2385 struct scsi_sense_hdr sshdr; 2386 int sense_valid = 0; 2387 int the_result; 2388 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2389 sector_t lba; 2390 unsigned sector_size; 2391 2392 do { 2393 cmd[0] = READ_CAPACITY; 2394 memset(&cmd[1], 0, 9); 2395 memset(buffer, 0, 8); 2396 2397 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2398 buffer, 8, &sshdr, 2399 SD_TIMEOUT, sdkp->max_retries, NULL); 2400 2401 if (media_not_present(sdkp, &sshdr)) 2402 return -ENODEV; 2403 2404 if (the_result > 0) { 2405 sense_valid = scsi_sense_valid(&sshdr); 2406 if (sense_valid && 2407 sshdr.sense_key == UNIT_ATTENTION && 2408 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2409 /* Device reset might occur several times, 2410 * give it one more chance */ 2411 if (--reset_retries > 0) 2412 continue; 2413 } 2414 retries--; 2415 2416 } while (the_result && retries); 2417 2418 if (the_result) { 2419 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2420 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2421 return -EINVAL; 2422 } 2423 2424 sector_size = get_unaligned_be32(&buffer[4]); 2425 lba = get_unaligned_be32(&buffer[0]); 2426 2427 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2428 /* Some buggy (usb cardreader) devices return an lba of 2429 0xffffffff when the want to report a size of 0 (with 2430 which they really mean no media is present) */ 2431 sdkp->capacity = 0; 2432 sdkp->physical_block_size = sector_size; 2433 return sector_size; 2434 } 2435 2436 sdkp->capacity = lba + 1; 2437 sdkp->physical_block_size = sector_size; 2438 return sector_size; 2439 } 2440 2441 static int sd_try_rc16_first(struct scsi_device *sdp) 2442 { 2443 if (sdp->host->max_cmd_len < 16) 2444 return 0; 2445 if (sdp->try_rc_10_first) 2446 return 0; 2447 if (sdp->scsi_level > SCSI_SPC_2) 2448 return 1; 2449 if (scsi_device_protection(sdp)) 2450 return 1; 2451 return 0; 2452 } 2453 2454 /* 2455 * read disk capacity 2456 */ 2457 static void 2458 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 2459 { 2460 int sector_size; 2461 struct scsi_device *sdp = sdkp->device; 2462 2463 if (sd_try_rc16_first(sdp)) { 2464 sector_size = read_capacity_16(sdkp, sdp, buffer); 2465 if (sector_size == -EOVERFLOW) 2466 goto got_data; 2467 if (sector_size == -ENODEV) 2468 return; 2469 if (sector_size < 0) 2470 sector_size = read_capacity_10(sdkp, sdp, buffer); 2471 if (sector_size < 0) 2472 return; 2473 } else { 2474 sector_size = read_capacity_10(sdkp, sdp, buffer); 2475 if (sector_size == -EOVERFLOW) 2476 goto got_data; 2477 if (sector_size < 0) 2478 return; 2479 if ((sizeof(sdkp->capacity) > 4) && 2480 (sdkp->capacity > 0xffffffffULL)) { 2481 int old_sector_size = sector_size; 2482 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2483 "Trying to use READ CAPACITY(16).\n"); 2484 sector_size = read_capacity_16(sdkp, sdp, buffer); 2485 if (sector_size < 0) { 2486 sd_printk(KERN_NOTICE, sdkp, 2487 "Using 0xffffffff as device size\n"); 2488 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2489 sector_size = old_sector_size; 2490 goto got_data; 2491 } 2492 /* Remember that READ CAPACITY(16) succeeded */ 2493 sdp->try_rc_10_first = 0; 2494 } 2495 } 2496 2497 /* Some devices are known to return the total number of blocks, 2498 * not the highest block number. Some devices have versions 2499 * which do this and others which do not. Some devices we might 2500 * suspect of doing this but we don't know for certain. 2501 * 2502 * If we know the reported capacity is wrong, decrement it. If 2503 * we can only guess, then assume the number of blocks is even 2504 * (usually true but not always) and err on the side of lowering 2505 * the capacity. 2506 */ 2507 if (sdp->fix_capacity || 2508 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2509 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2510 "from its reported value: %llu\n", 2511 (unsigned long long) sdkp->capacity); 2512 --sdkp->capacity; 2513 } 2514 2515 got_data: 2516 if (sector_size == 0) { 2517 sector_size = 512; 2518 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2519 "assuming 512.\n"); 2520 } 2521 2522 if (sector_size != 512 && 2523 sector_size != 1024 && 2524 sector_size != 2048 && 2525 sector_size != 4096) { 2526 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2527 sector_size); 2528 /* 2529 * The user might want to re-format the drive with 2530 * a supported sectorsize. Once this happens, it 2531 * would be relatively trivial to set the thing up. 2532 * For this reason, we leave the thing in the table. 2533 */ 2534 sdkp->capacity = 0; 2535 /* 2536 * set a bogus sector size so the normal read/write 2537 * logic in the block layer will eventually refuse any 2538 * request on this device without tripping over power 2539 * of two sector size assumptions 2540 */ 2541 sector_size = 512; 2542 } 2543 blk_queue_logical_block_size(sdp->request_queue, sector_size); 2544 blk_queue_physical_block_size(sdp->request_queue, 2545 sdkp->physical_block_size); 2546 sdkp->device->sector_size = sector_size; 2547 2548 if (sdkp->capacity > 0xffffffff) 2549 sdp->use_16_for_rw = 1; 2550 2551 } 2552 2553 /* 2554 * Print disk capacity 2555 */ 2556 static void 2557 sd_print_capacity(struct scsi_disk *sdkp, 2558 sector_t old_capacity) 2559 { 2560 int sector_size = sdkp->device->sector_size; 2561 char cap_str_2[10], cap_str_10[10]; 2562 2563 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2564 return; 2565 2566 string_get_size(sdkp->capacity, sector_size, 2567 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2568 string_get_size(sdkp->capacity, sector_size, 2569 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2570 2571 sd_printk(KERN_NOTICE, sdkp, 2572 "%llu %d-byte logical blocks: (%s/%s)\n", 2573 (unsigned long long)sdkp->capacity, 2574 sector_size, cap_str_10, cap_str_2); 2575 2576 if (sdkp->physical_block_size != sector_size) 2577 sd_printk(KERN_NOTICE, sdkp, 2578 "%u-byte physical blocks\n", 2579 sdkp->physical_block_size); 2580 } 2581 2582 /* called with buffer of length 512 */ 2583 static inline int 2584 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, 2585 unsigned char *buffer, int len, struct scsi_mode_data *data, 2586 struct scsi_sense_hdr *sshdr) 2587 { 2588 /* 2589 * If we must use MODE SENSE(10), make sure that the buffer length 2590 * is at least 8 bytes so that the mode sense header fits. 2591 */ 2592 if (sdkp->device->use_10_for_ms && len < 8) 2593 len = 8; 2594 2595 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len, 2596 SD_TIMEOUT, sdkp->max_retries, data, 2597 sshdr); 2598 } 2599 2600 /* 2601 * read write protect setting, if possible - called only in sd_revalidate_disk() 2602 * called with buffer of length SD_BUF_SIZE 2603 */ 2604 static void 2605 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2606 { 2607 int res; 2608 struct scsi_device *sdp = sdkp->device; 2609 struct scsi_mode_data data; 2610 int old_wp = sdkp->write_prot; 2611 2612 set_disk_ro(sdkp->disk, 0); 2613 if (sdp->skip_ms_page_3f) { 2614 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2615 return; 2616 } 2617 2618 if (sdp->use_192_bytes_for_3f) { 2619 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); 2620 } else { 2621 /* 2622 * First attempt: ask for all pages (0x3F), but only 4 bytes. 2623 * We have to start carefully: some devices hang if we ask 2624 * for more than is available. 2625 */ 2626 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); 2627 2628 /* 2629 * Second attempt: ask for page 0 When only page 0 is 2630 * implemented, a request for page 3F may return Sense Key 2631 * 5: Illegal Request, Sense Code 24: Invalid field in 2632 * CDB. 2633 */ 2634 if (res < 0) 2635 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); 2636 2637 /* 2638 * Third attempt: ask 255 bytes, as we did earlier. 2639 */ 2640 if (res < 0) 2641 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, 2642 &data, NULL); 2643 } 2644 2645 if (res < 0) { 2646 sd_first_printk(KERN_WARNING, sdkp, 2647 "Test WP failed, assume Write Enabled\n"); 2648 } else { 2649 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2650 set_disk_ro(sdkp->disk, sdkp->write_prot); 2651 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2652 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2653 sdkp->write_prot ? "on" : "off"); 2654 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 2655 } 2656 } 2657 } 2658 2659 /* 2660 * sd_read_cache_type - called only from sd_revalidate_disk() 2661 * called with buffer of length SD_BUF_SIZE 2662 */ 2663 static void 2664 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 2665 { 2666 int len = 0, res; 2667 struct scsi_device *sdp = sdkp->device; 2668 2669 int dbd; 2670 int modepage; 2671 int first_len; 2672 struct scsi_mode_data data; 2673 struct scsi_sense_hdr sshdr; 2674 int old_wce = sdkp->WCE; 2675 int old_rcd = sdkp->RCD; 2676 int old_dpofua = sdkp->DPOFUA; 2677 2678 2679 if (sdkp->cache_override) 2680 return; 2681 2682 first_len = 4; 2683 if (sdp->skip_ms_page_8) { 2684 if (sdp->type == TYPE_RBC) 2685 goto defaults; 2686 else { 2687 if (sdp->skip_ms_page_3f) 2688 goto defaults; 2689 modepage = 0x3F; 2690 if (sdp->use_192_bytes_for_3f) 2691 first_len = 192; 2692 dbd = 0; 2693 } 2694 } else if (sdp->type == TYPE_RBC) { 2695 modepage = 6; 2696 dbd = 8; 2697 } else { 2698 modepage = 8; 2699 dbd = 0; 2700 } 2701 2702 /* cautiously ask */ 2703 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, 2704 &data, &sshdr); 2705 2706 if (res < 0) 2707 goto bad_sense; 2708 2709 if (!data.header_length) { 2710 modepage = 6; 2711 first_len = 0; 2712 sd_first_printk(KERN_ERR, sdkp, 2713 "Missing header in MODE_SENSE response\n"); 2714 } 2715 2716 /* that went OK, now ask for the proper length */ 2717 len = data.length; 2718 2719 /* 2720 * We're only interested in the first three bytes, actually. 2721 * But the data cache page is defined for the first 20. 2722 */ 2723 if (len < 3) 2724 goto bad_sense; 2725 else if (len > SD_BUF_SIZE) { 2726 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2727 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2728 len = SD_BUF_SIZE; 2729 } 2730 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 2731 len = 192; 2732 2733 /* Get the data */ 2734 if (len > first_len) 2735 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, 2736 &data, &sshdr); 2737 2738 if (!res) { 2739 int offset = data.header_length + data.block_descriptor_length; 2740 2741 while (offset < len) { 2742 u8 page_code = buffer[offset] & 0x3F; 2743 u8 spf = buffer[offset] & 0x40; 2744 2745 if (page_code == 8 || page_code == 6) { 2746 /* We're interested only in the first 3 bytes. 2747 */ 2748 if (len - offset <= 2) { 2749 sd_first_printk(KERN_ERR, sdkp, 2750 "Incomplete mode parameter " 2751 "data\n"); 2752 goto defaults; 2753 } else { 2754 modepage = page_code; 2755 goto Page_found; 2756 } 2757 } else { 2758 /* Go to the next page */ 2759 if (spf && len - offset > 3) 2760 offset += 4 + (buffer[offset+2] << 8) + 2761 buffer[offset+3]; 2762 else if (!spf && len - offset > 1) 2763 offset += 2 + buffer[offset+1]; 2764 else { 2765 sd_first_printk(KERN_ERR, sdkp, 2766 "Incomplete mode " 2767 "parameter data\n"); 2768 goto defaults; 2769 } 2770 } 2771 } 2772 2773 sd_first_printk(KERN_WARNING, sdkp, 2774 "No Caching mode page found\n"); 2775 goto defaults; 2776 2777 Page_found: 2778 if (modepage == 8) { 2779 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2780 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2781 } else { 2782 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 2783 sdkp->RCD = 0; 2784 } 2785 2786 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2787 if (sdp->broken_fua) { 2788 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2789 sdkp->DPOFUA = 0; 2790 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 2791 !sdkp->device->use_16_for_rw) { 2792 sd_first_printk(KERN_NOTICE, sdkp, 2793 "Uses READ/WRITE(6), disabling FUA\n"); 2794 sdkp->DPOFUA = 0; 2795 } 2796 2797 /* No cache flush allowed for write protected devices */ 2798 if (sdkp->WCE && sdkp->write_prot) 2799 sdkp->WCE = 0; 2800 2801 if (sdkp->first_scan || old_wce != sdkp->WCE || 2802 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2803 sd_printk(KERN_NOTICE, sdkp, 2804 "Write cache: %s, read cache: %s, %s\n", 2805 sdkp->WCE ? "enabled" : "disabled", 2806 sdkp->RCD ? "disabled" : "enabled", 2807 sdkp->DPOFUA ? "supports DPO and FUA" 2808 : "doesn't support DPO or FUA"); 2809 2810 return; 2811 } 2812 2813 bad_sense: 2814 if (scsi_sense_valid(&sshdr) && 2815 sshdr.sense_key == ILLEGAL_REQUEST && 2816 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2817 /* Invalid field in CDB */ 2818 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2819 else 2820 sd_first_printk(KERN_ERR, sdkp, 2821 "Asking for cache data failed\n"); 2822 2823 defaults: 2824 if (sdp->wce_default_on) { 2825 sd_first_printk(KERN_NOTICE, sdkp, 2826 "Assuming drive cache: write back\n"); 2827 sdkp->WCE = 1; 2828 } else { 2829 sd_first_printk(KERN_WARNING, sdkp, 2830 "Assuming drive cache: write through\n"); 2831 sdkp->WCE = 0; 2832 } 2833 sdkp->RCD = 0; 2834 sdkp->DPOFUA = 0; 2835 } 2836 2837 /* 2838 * The ATO bit indicates whether the DIF application tag is available 2839 * for use by the operating system. 2840 */ 2841 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 2842 { 2843 int res, offset; 2844 struct scsi_device *sdp = sdkp->device; 2845 struct scsi_mode_data data; 2846 struct scsi_sense_hdr sshdr; 2847 2848 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 2849 return; 2850 2851 if (sdkp->protection_type == 0) 2852 return; 2853 2854 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, 2855 sdkp->max_retries, &data, &sshdr); 2856 2857 if (res < 0 || !data.header_length || 2858 data.length < 6) { 2859 sd_first_printk(KERN_WARNING, sdkp, 2860 "getting Control mode page failed, assume no ATO\n"); 2861 2862 if (scsi_sense_valid(&sshdr)) 2863 sd_print_sense_hdr(sdkp, &sshdr); 2864 2865 return; 2866 } 2867 2868 offset = data.header_length + data.block_descriptor_length; 2869 2870 if ((buffer[offset] & 0x3f) != 0x0a) { 2871 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2872 return; 2873 } 2874 2875 if ((buffer[offset + 5] & 0x80) == 0) 2876 return; 2877 2878 sdkp->ATO = 1; 2879 2880 return; 2881 } 2882 2883 /** 2884 * sd_read_block_limits - Query disk device for preferred I/O sizes. 2885 * @sdkp: disk to query 2886 */ 2887 static void sd_read_block_limits(struct scsi_disk *sdkp) 2888 { 2889 unsigned int sector_sz = sdkp->device->sector_size; 2890 const int vpd_len = 64; 2891 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2892 2893 if (!buffer || 2894 /* Block Limits VPD */ 2895 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2896 goto out; 2897 2898 blk_queue_io_min(sdkp->disk->queue, 2899 get_unaligned_be16(&buffer[6]) * sector_sz); 2900 2901 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); 2902 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); 2903 2904 if (buffer[3] == 0x3c) { 2905 unsigned int lba_count, desc_count; 2906 2907 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); 2908 2909 if (!sdkp->lbpme) 2910 goto out; 2911 2912 lba_count = get_unaligned_be32(&buffer[20]); 2913 desc_count = get_unaligned_be32(&buffer[24]); 2914 2915 if (lba_count && desc_count) 2916 sdkp->max_unmap_blocks = lba_count; 2917 2918 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); 2919 2920 if (buffer[32] & 0x80) 2921 sdkp->unmap_alignment = 2922 get_unaligned_be32(&buffer[32]) & ~(1 << 31); 2923 2924 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ 2925 2926 if (sdkp->max_unmap_blocks) 2927 sd_config_discard(sdkp, SD_LBP_UNMAP); 2928 else 2929 sd_config_discard(sdkp, SD_LBP_WS16); 2930 2931 } else { /* LBP VPD page tells us what to use */ 2932 if (sdkp->lbpu && sdkp->max_unmap_blocks) 2933 sd_config_discard(sdkp, SD_LBP_UNMAP); 2934 else if (sdkp->lbpws) 2935 sd_config_discard(sdkp, SD_LBP_WS16); 2936 else if (sdkp->lbpws10) 2937 sd_config_discard(sdkp, SD_LBP_WS10); 2938 else 2939 sd_config_discard(sdkp, SD_LBP_DISABLE); 2940 } 2941 } 2942 2943 out: 2944 kfree(buffer); 2945 } 2946 2947 /** 2948 * sd_read_block_characteristics - Query block dev. characteristics 2949 * @sdkp: disk to query 2950 */ 2951 static void sd_read_block_characteristics(struct scsi_disk *sdkp) 2952 { 2953 struct request_queue *q = sdkp->disk->queue; 2954 unsigned char *buffer; 2955 u16 rot; 2956 const int vpd_len = 64; 2957 2958 buffer = kmalloc(vpd_len, GFP_KERNEL); 2959 2960 if (!buffer || 2961 /* Block Device Characteristics VPD */ 2962 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len)) 2963 goto out; 2964 2965 rot = get_unaligned_be16(&buffer[4]); 2966 2967 if (rot == 1) { 2968 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2969 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2970 } 2971 2972 if (sdkp->device->type == TYPE_ZBC) { 2973 /* Host-managed */ 2974 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM); 2975 } else { 2976 sdkp->zoned = (buffer[8] >> 4) & 3; 2977 if (sdkp->zoned == 1) { 2978 /* Host-aware */ 2979 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA); 2980 } else { 2981 /* Regular disk or drive managed disk */ 2982 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE); 2983 } 2984 } 2985 2986 if (!sdkp->first_scan) 2987 goto out; 2988 2989 if (blk_queue_is_zoned(q)) { 2990 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2991 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2992 } else { 2993 if (sdkp->zoned == 1) 2994 sd_printk(KERN_NOTICE, sdkp, 2995 "Host-aware SMR disk used as regular disk\n"); 2996 else if (sdkp->zoned == 2) 2997 sd_printk(KERN_NOTICE, sdkp, 2998 "Drive-managed SMR disk\n"); 2999 } 3000 3001 out: 3002 kfree(buffer); 3003 } 3004 3005 /** 3006 * sd_read_block_provisioning - Query provisioning VPD page 3007 * @sdkp: disk to query 3008 */ 3009 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 3010 { 3011 unsigned char *buffer; 3012 const int vpd_len = 8; 3013 3014 if (sdkp->lbpme == 0) 3015 return; 3016 3017 buffer = kmalloc(vpd_len, GFP_KERNEL); 3018 3019 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) 3020 goto out; 3021 3022 sdkp->lbpvpd = 1; 3023 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ 3024 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ 3025 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ 3026 3027 out: 3028 kfree(buffer); 3029 } 3030 3031 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3032 { 3033 struct scsi_device *sdev = sdkp->device; 3034 3035 if (sdev->host->no_write_same) { 3036 sdev->no_write_same = 1; 3037 3038 return; 3039 } 3040 3041 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 3042 /* too large values might cause issues with arcmsr */ 3043 int vpd_buf_len = 64; 3044 3045 sdev->no_report_opcodes = 1; 3046 3047 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 3048 * CODES is unsupported and the device has an ATA 3049 * Information VPD page (SAT). 3050 */ 3051 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) 3052 sdev->no_write_same = 1; 3053 } 3054 3055 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) 3056 sdkp->ws16 = 1; 3057 3058 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) 3059 sdkp->ws10 = 1; 3060 } 3061 3062 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3063 { 3064 struct scsi_device *sdev = sdkp->device; 3065 3066 if (!sdev->security_supported) 3067 return; 3068 3069 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3070 SECURITY_PROTOCOL_IN) == 1 && 3071 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3072 SECURITY_PROTOCOL_OUT) == 1) 3073 sdkp->security = 1; 3074 } 3075 3076 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) 3077 { 3078 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); 3079 } 3080 3081 /** 3082 * sd_read_cpr - Query concurrent positioning ranges 3083 * @sdkp: disk to query 3084 */ 3085 static void sd_read_cpr(struct scsi_disk *sdkp) 3086 { 3087 struct blk_independent_access_ranges *iars = NULL; 3088 unsigned char *buffer = NULL; 3089 unsigned int nr_cpr = 0; 3090 int i, vpd_len, buf_len = SD_BUF_SIZE; 3091 u8 *desc; 3092 3093 /* 3094 * We need to have the capacity set first for the block layer to be 3095 * able to check the ranges. 3096 */ 3097 if (sdkp->first_scan) 3098 return; 3099 3100 if (!sdkp->capacity) 3101 goto out; 3102 3103 /* 3104 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, 3105 * leading to a maximum page size of 64 + 256*32 bytes. 3106 */ 3107 buf_len = 64 + 256*32; 3108 buffer = kmalloc(buf_len, GFP_KERNEL); 3109 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) 3110 goto out; 3111 3112 /* We must have at least a 64B header and one 32B range descriptor */ 3113 vpd_len = get_unaligned_be16(&buffer[2]) + 3; 3114 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { 3115 sd_printk(KERN_ERR, sdkp, 3116 "Invalid Concurrent Positioning Ranges VPD page\n"); 3117 goto out; 3118 } 3119 3120 nr_cpr = (vpd_len - 64) / 32; 3121 if (nr_cpr == 1) { 3122 nr_cpr = 0; 3123 goto out; 3124 } 3125 3126 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); 3127 if (!iars) { 3128 nr_cpr = 0; 3129 goto out; 3130 } 3131 3132 desc = &buffer[64]; 3133 for (i = 0; i < nr_cpr; i++, desc += 32) { 3134 if (desc[0] != i) { 3135 sd_printk(KERN_ERR, sdkp, 3136 "Invalid Concurrent Positioning Range number\n"); 3137 nr_cpr = 0; 3138 break; 3139 } 3140 3141 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); 3142 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); 3143 } 3144 3145 out: 3146 disk_set_independent_access_ranges(sdkp->disk, iars); 3147 if (nr_cpr && sdkp->nr_actuators != nr_cpr) { 3148 sd_printk(KERN_NOTICE, sdkp, 3149 "%u concurrent positioning ranges\n", nr_cpr); 3150 sdkp->nr_actuators = nr_cpr; 3151 } 3152 3153 kfree(buffer); 3154 } 3155 3156 /* 3157 * Determine the device's preferred I/O size for reads and writes 3158 * unless the reported value is unreasonably small, large, not a 3159 * multiple of the physical block size, or simply garbage. 3160 */ 3161 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3162 unsigned int dev_max) 3163 { 3164 struct scsi_device *sdp = sdkp->device; 3165 unsigned int opt_xfer_bytes = 3166 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3167 3168 if (sdkp->opt_xfer_blocks == 0) 3169 return false; 3170 3171 if (sdkp->opt_xfer_blocks > dev_max) { 3172 sd_first_printk(KERN_WARNING, sdkp, 3173 "Optimal transfer size %u logical blocks " \ 3174 "> dev_max (%u logical blocks)\n", 3175 sdkp->opt_xfer_blocks, dev_max); 3176 return false; 3177 } 3178 3179 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3180 sd_first_printk(KERN_WARNING, sdkp, 3181 "Optimal transfer size %u logical blocks " \ 3182 "> sd driver limit (%u logical blocks)\n", 3183 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3184 return false; 3185 } 3186 3187 if (opt_xfer_bytes < PAGE_SIZE) { 3188 sd_first_printk(KERN_WARNING, sdkp, 3189 "Optimal transfer size %u bytes < " \ 3190 "PAGE_SIZE (%u bytes)\n", 3191 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3192 return false; 3193 } 3194 3195 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3196 sd_first_printk(KERN_WARNING, sdkp, 3197 "Optimal transfer size %u bytes not a " \ 3198 "multiple of physical block size (%u bytes)\n", 3199 opt_xfer_bytes, sdkp->physical_block_size); 3200 return false; 3201 } 3202 3203 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3204 opt_xfer_bytes); 3205 return true; 3206 } 3207 3208 /** 3209 * sd_revalidate_disk - called the first time a new disk is seen, 3210 * performs disk spin up, read_capacity, etc. 3211 * @disk: struct gendisk we care about 3212 **/ 3213 static int sd_revalidate_disk(struct gendisk *disk) 3214 { 3215 struct scsi_disk *sdkp = scsi_disk(disk); 3216 struct scsi_device *sdp = sdkp->device; 3217 struct request_queue *q = sdkp->disk->queue; 3218 sector_t old_capacity = sdkp->capacity; 3219 unsigned char *buffer; 3220 unsigned int dev_max, rw_max; 3221 3222 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3223 "sd_revalidate_disk\n")); 3224 3225 /* 3226 * If the device is offline, don't try and read capacity or any 3227 * of the other niceties. 3228 */ 3229 if (!scsi_device_online(sdp)) 3230 goto out; 3231 3232 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3233 if (!buffer) { 3234 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 3235 "allocation failure.\n"); 3236 goto out; 3237 } 3238 3239 sd_spinup_disk(sdkp); 3240 3241 /* 3242 * Without media there is no reason to ask; moreover, some devices 3243 * react badly if we do. 3244 */ 3245 if (sdkp->media_present) { 3246 sd_read_capacity(sdkp, buffer); 3247 3248 /* 3249 * set the default to rotational. All non-rotational devices 3250 * support the block characteristics VPD page, which will 3251 * cause this to be updated correctly and any device which 3252 * doesn't support it should be treated as rotational. 3253 */ 3254 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 3255 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); 3256 3257 if (scsi_device_supports_vpd(sdp)) { 3258 sd_read_block_provisioning(sdkp); 3259 sd_read_block_limits(sdkp); 3260 sd_read_block_characteristics(sdkp); 3261 sd_zbc_read_zones(sdkp, buffer); 3262 } 3263 3264 sd_print_capacity(sdkp, old_capacity); 3265 3266 sd_read_write_protect_flag(sdkp, buffer); 3267 sd_read_cache_type(sdkp, buffer); 3268 sd_read_app_tag_own(sdkp, buffer); 3269 sd_read_write_same(sdkp, buffer); 3270 sd_read_security(sdkp, buffer); 3271 sd_read_cpr(sdkp); 3272 } 3273 3274 /* 3275 * We now have all cache related info, determine how we deal 3276 * with flush requests. 3277 */ 3278 sd_set_flush_flag(sdkp); 3279 3280 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3281 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3282 3283 /* Some devices report a maximum block count for READ/WRITE requests. */ 3284 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3285 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3286 3287 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3288 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3289 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 3290 } else { 3291 q->limits.io_opt = 0; 3292 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3293 (sector_t)BLK_DEF_MAX_SECTORS); 3294 } 3295 3296 /* Do not exceed controller limit */ 3297 rw_max = min(rw_max, queue_max_hw_sectors(q)); 3298 3299 /* 3300 * Only update max_sectors if previously unset or if the current value 3301 * exceeds the capabilities of the hardware. 3302 */ 3303 if (sdkp->first_scan || 3304 q->limits.max_sectors > q->limits.max_dev_sectors || 3305 q->limits.max_sectors > q->limits.max_hw_sectors) 3306 q->limits.max_sectors = rw_max; 3307 3308 sdkp->first_scan = 0; 3309 3310 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); 3311 sd_config_write_same(sdkp); 3312 kfree(buffer); 3313 3314 /* 3315 * For a zoned drive, revalidating the zones can be done only once 3316 * the gendisk capacity is set. So if this fails, set back the gendisk 3317 * capacity to 0. 3318 */ 3319 if (sd_zbc_revalidate_zones(sdkp)) 3320 set_capacity_and_notify(disk, 0); 3321 3322 out: 3323 return 0; 3324 } 3325 3326 /** 3327 * sd_unlock_native_capacity - unlock native capacity 3328 * @disk: struct gendisk to set capacity for 3329 * 3330 * Block layer calls this function if it detects that partitions 3331 * on @disk reach beyond the end of the device. If the SCSI host 3332 * implements ->unlock_native_capacity() method, it's invoked to 3333 * give it a chance to adjust the device capacity. 3334 * 3335 * CONTEXT: 3336 * Defined by block layer. Might sleep. 3337 */ 3338 static void sd_unlock_native_capacity(struct gendisk *disk) 3339 { 3340 struct scsi_device *sdev = scsi_disk(disk)->device; 3341 3342 if (sdev->host->hostt->unlock_native_capacity) 3343 sdev->host->hostt->unlock_native_capacity(sdev); 3344 } 3345 3346 /** 3347 * sd_format_disk_name - format disk name 3348 * @prefix: name prefix - ie. "sd" for SCSI disks 3349 * @index: index of the disk to format name for 3350 * @buf: output buffer 3351 * @buflen: length of the output buffer 3352 * 3353 * SCSI disk names starts at sda. The 26th device is sdz and the 3354 * 27th is sdaa. The last one for two lettered suffix is sdzz 3355 * which is followed by sdaaa. 3356 * 3357 * This is basically 26 base counting with one extra 'nil' entry 3358 * at the beginning from the second digit on and can be 3359 * determined using similar method as 26 base conversion with the 3360 * index shifted -1 after each digit is computed. 3361 * 3362 * CONTEXT: 3363 * Don't care. 3364 * 3365 * RETURNS: 3366 * 0 on success, -errno on failure. 3367 */ 3368 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3369 { 3370 const int base = 'z' - 'a' + 1; 3371 char *begin = buf + strlen(prefix); 3372 char *end = buf + buflen; 3373 char *p; 3374 int unit; 3375 3376 p = end - 1; 3377 *p = '\0'; 3378 unit = base; 3379 do { 3380 if (p == begin) 3381 return -EINVAL; 3382 *--p = 'a' + (index % unit); 3383 index = (index / unit) - 1; 3384 } while (index >= 0); 3385 3386 memmove(begin, p, end - p); 3387 memcpy(buf, prefix, strlen(prefix)); 3388 3389 return 0; 3390 } 3391 3392 /** 3393 * sd_probe - called during driver initialization and whenever a 3394 * new scsi device is attached to the system. It is called once 3395 * for each scsi device (not just disks) present. 3396 * @dev: pointer to device object 3397 * 3398 * Returns 0 if successful (or not interested in this scsi device 3399 * (e.g. scanner)); 1 when there is an error. 3400 * 3401 * Note: this function is invoked from the scsi mid-level. 3402 * This function sets up the mapping between a given 3403 * <host,channel,id,lun> (found in sdp) and new device name 3404 * (e.g. /dev/sda). More precisely it is the block device major 3405 * and minor number that is chosen here. 3406 * 3407 * Assume sd_probe is not re-entrant (for time being) 3408 * Also think about sd_probe() and sd_remove() running coincidentally. 3409 **/ 3410 static int sd_probe(struct device *dev) 3411 { 3412 struct scsi_device *sdp = to_scsi_device(dev); 3413 struct scsi_disk *sdkp; 3414 struct gendisk *gd; 3415 int index; 3416 int error; 3417 3418 scsi_autopm_get_device(sdp); 3419 error = -ENODEV; 3420 if (sdp->type != TYPE_DISK && 3421 sdp->type != TYPE_ZBC && 3422 sdp->type != TYPE_MOD && 3423 sdp->type != TYPE_RBC) 3424 goto out; 3425 3426 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { 3427 sdev_printk(KERN_WARNING, sdp, 3428 "Unsupported ZBC host-managed device.\n"); 3429 goto out; 3430 } 3431 3432 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3433 "sd_probe\n")); 3434 3435 error = -ENOMEM; 3436 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3437 if (!sdkp) 3438 goto out; 3439 3440 gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE, 3441 &sd_bio_compl_lkclass); 3442 if (!gd) 3443 goto out_free; 3444 3445 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3446 if (index < 0) { 3447 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3448 goto out_put; 3449 } 3450 3451 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3452 if (error) { 3453 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3454 goto out_free_index; 3455 } 3456 3457 sdkp->device = sdp; 3458 sdkp->driver = &sd_template; 3459 sdkp->disk = gd; 3460 sdkp->index = index; 3461 sdkp->max_retries = SD_MAX_RETRIES; 3462 atomic_set(&sdkp->openers, 0); 3463 atomic_set(&sdkp->device->ioerr_cnt, 0); 3464 3465 if (!sdp->request_queue->rq_timeout) { 3466 if (sdp->type != TYPE_MOD) 3467 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 3468 else 3469 blk_queue_rq_timeout(sdp->request_queue, 3470 SD_MOD_TIMEOUT); 3471 } 3472 3473 device_initialize(&sdkp->dev); 3474 sdkp->dev.parent = get_device(dev); 3475 sdkp->dev.class = &sd_disk_class; 3476 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3477 3478 error = device_add(&sdkp->dev); 3479 if (error) { 3480 put_device(&sdkp->dev); 3481 goto out; 3482 } 3483 3484 dev_set_drvdata(dev, sdkp); 3485 3486 gd->major = sd_major((index & 0xf0) >> 4); 3487 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 3488 gd->minors = SD_MINORS; 3489 3490 gd->fops = &sd_fops; 3491 gd->private_data = &sdkp->driver; 3492 3493 /* defaults, until the device tells us otherwise */ 3494 sdp->sector_size = 512; 3495 sdkp->capacity = 0; 3496 sdkp->media_present = 1; 3497 sdkp->write_prot = 0; 3498 sdkp->cache_override = 0; 3499 sdkp->WCE = 0; 3500 sdkp->RCD = 0; 3501 sdkp->ATO = 0; 3502 sdkp->first_scan = 1; 3503 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 3504 3505 sd_revalidate_disk(gd); 3506 3507 if (sdp->removable) { 3508 gd->flags |= GENHD_FL_REMOVABLE; 3509 gd->events |= DISK_EVENT_MEDIA_CHANGE; 3510 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 3511 } 3512 3513 blk_pm_runtime_init(sdp->request_queue, dev); 3514 if (sdp->rpm_autosuspend) { 3515 pm_runtime_set_autosuspend_delay(dev, 3516 sdp->host->hostt->rpm_autosuspend_delay); 3517 } 3518 3519 error = device_add_disk(dev, gd, NULL); 3520 if (error) { 3521 put_device(&sdkp->dev); 3522 goto out; 3523 } 3524 3525 if (sdkp->capacity) 3526 sd_dif_config_host(sdkp); 3527 3528 sd_revalidate_disk(gd); 3529 3530 if (sdkp->security) { 3531 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); 3532 if (sdkp->opal_dev) 3533 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 3534 } 3535 3536 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 3537 sdp->removable ? "removable " : ""); 3538 scsi_autopm_put_device(sdp); 3539 3540 return 0; 3541 3542 out_free_index: 3543 ida_free(&sd_index_ida, index); 3544 out_put: 3545 put_disk(gd); 3546 out_free: 3547 sd_zbc_release_disk(sdkp); 3548 kfree(sdkp); 3549 out: 3550 scsi_autopm_put_device(sdp); 3551 return error; 3552 } 3553 3554 /** 3555 * sd_remove - called whenever a scsi disk (previously recognized by 3556 * sd_probe) is detached from the system. It is called (potentially 3557 * multiple times) during sd module unload. 3558 * @dev: pointer to device object 3559 * 3560 * Note: this function is invoked from the scsi mid-level. 3561 * This function potentially frees up a device name (e.g. /dev/sdc) 3562 * that could be re-used by a subsequent sd_probe(). 3563 * This function is not called when the built-in sd driver is "exit-ed". 3564 **/ 3565 static int sd_remove(struct device *dev) 3566 { 3567 struct scsi_disk *sdkp; 3568 3569 sdkp = dev_get_drvdata(dev); 3570 scsi_autopm_get_device(sdkp->device); 3571 3572 device_del(&sdkp->dev); 3573 del_gendisk(sdkp->disk); 3574 sd_shutdown(dev); 3575 3576 free_opal_dev(sdkp->opal_dev); 3577 3578 mutex_lock(&sd_ref_mutex); 3579 dev_set_drvdata(dev, NULL); 3580 put_device(&sdkp->dev); 3581 mutex_unlock(&sd_ref_mutex); 3582 3583 return 0; 3584 } 3585 3586 /** 3587 * scsi_disk_release - Called to free the scsi_disk structure 3588 * @dev: pointer to embedded class device 3589 * 3590 * sd_ref_mutex must be held entering this routine. Because it is 3591 * called on last put, you should always use the scsi_disk_get() 3592 * scsi_disk_put() helpers which manipulate the semaphore directly 3593 * and never do a direct put_device. 3594 **/ 3595 static void scsi_disk_release(struct device *dev) 3596 { 3597 struct scsi_disk *sdkp = to_scsi_disk(dev); 3598 struct gendisk *disk = sdkp->disk; 3599 struct request_queue *q = disk->queue; 3600 3601 ida_free(&sd_index_ida, sdkp->index); 3602 3603 /* 3604 * Wait until all requests that are in progress have completed. 3605 * This is necessary to avoid that e.g. scsi_end_request() crashes 3606 * due to clearing the disk->private_data pointer. Wait from inside 3607 * scsi_disk_release() instead of from sd_release() to avoid that 3608 * freezing and unfreezing the request queue affects user space I/O 3609 * in case multiple processes open a /dev/sd... node concurrently. 3610 */ 3611 blk_mq_freeze_queue(q); 3612 blk_mq_unfreeze_queue(q); 3613 3614 disk->private_data = NULL; 3615 put_disk(disk); 3616 put_device(&sdkp->device->sdev_gendev); 3617 3618 sd_zbc_release_disk(sdkp); 3619 3620 kfree(sdkp); 3621 } 3622 3623 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 3624 { 3625 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 3626 struct scsi_sense_hdr sshdr; 3627 struct scsi_device *sdp = sdkp->device; 3628 int res; 3629 3630 if (start) 3631 cmd[4] |= 1; /* START */ 3632 3633 if (sdp->start_stop_pwr_cond) 3634 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 3635 3636 if (!scsi_device_online(sdp)) 3637 return -ENODEV; 3638 3639 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 3640 SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL); 3641 if (res) { 3642 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3643 if (res > 0 && scsi_sense_valid(&sshdr)) { 3644 sd_print_sense_hdr(sdkp, &sshdr); 3645 /* 0x3a is medium not present */ 3646 if (sshdr.asc == 0x3a) 3647 res = 0; 3648 } 3649 } 3650 3651 /* SCSI error codes must not go to the generic layer */ 3652 if (res) 3653 return -EIO; 3654 3655 return 0; 3656 } 3657 3658 /* 3659 * Send a SYNCHRONIZE CACHE instruction down to the device through 3660 * the normal SCSI command structure. Wait for the command to 3661 * complete. 3662 */ 3663 static void sd_shutdown(struct device *dev) 3664 { 3665 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3666 3667 if (!sdkp) 3668 return; /* this can happen */ 3669 3670 if (pm_runtime_suspended(dev)) 3671 return; 3672 3673 if (sdkp->WCE && sdkp->media_present) { 3674 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3675 sd_sync_cache(sdkp, NULL); 3676 } 3677 3678 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3679 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3680 sd_start_stop_device(sdkp, 0); 3681 } 3682 } 3683 3684 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3685 { 3686 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3687 struct scsi_sense_hdr sshdr; 3688 int ret = 0; 3689 3690 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3691 return 0; 3692 3693 if (sdkp->WCE && sdkp->media_present) { 3694 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3695 ret = sd_sync_cache(sdkp, &sshdr); 3696 3697 if (ret) { 3698 /* ignore OFFLINE device */ 3699 if (ret == -ENODEV) 3700 return 0; 3701 3702 if (!scsi_sense_valid(&sshdr) || 3703 sshdr.sense_key != ILLEGAL_REQUEST) 3704 return ret; 3705 3706 /* 3707 * sshdr.sense_key == ILLEGAL_REQUEST means this drive 3708 * doesn't support sync. There's not much to do and 3709 * suspend shouldn't fail. 3710 */ 3711 ret = 0; 3712 } 3713 } 3714 3715 if (sdkp->device->manage_start_stop) { 3716 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3717 /* an error is not worth aborting a system sleep */ 3718 ret = sd_start_stop_device(sdkp, 0); 3719 if (ignore_stop_errors) 3720 ret = 0; 3721 } 3722 3723 return ret; 3724 } 3725 3726 static int sd_suspend_system(struct device *dev) 3727 { 3728 if (pm_runtime_suspended(dev)) 3729 return 0; 3730 3731 return sd_suspend_common(dev, true); 3732 } 3733 3734 static int sd_suspend_runtime(struct device *dev) 3735 { 3736 return sd_suspend_common(dev, false); 3737 } 3738 3739 static int sd_resume(struct device *dev) 3740 { 3741 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3742 int ret; 3743 3744 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3745 return 0; 3746 3747 if (!sdkp->device->manage_start_stop) 3748 return 0; 3749 3750 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 3751 ret = sd_start_stop_device(sdkp, 1); 3752 if (!ret) 3753 opal_unlock_from_suspend(sdkp->opal_dev); 3754 return ret; 3755 } 3756 3757 static int sd_resume_system(struct device *dev) 3758 { 3759 if (pm_runtime_suspended(dev)) 3760 return 0; 3761 3762 return sd_resume(dev); 3763 } 3764 3765 static int sd_resume_runtime(struct device *dev) 3766 { 3767 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3768 struct scsi_device *sdp; 3769 3770 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3771 return 0; 3772 3773 sdp = sdkp->device; 3774 3775 if (sdp->ignore_media_change) { 3776 /* clear the device's sense data */ 3777 static const u8 cmd[10] = { REQUEST_SENSE }; 3778 3779 if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, 3780 NULL, sdp->request_queue->rq_timeout, 1, 0, 3781 RQF_PM, NULL)) 3782 sd_printk(KERN_NOTICE, sdkp, 3783 "Failed to clear sense data\n"); 3784 } 3785 3786 return sd_resume(dev); 3787 } 3788 3789 /** 3790 * init_sd - entry point for this driver (both when built in or when 3791 * a module). 3792 * 3793 * Note: this function registers this driver with the scsi mid-level. 3794 **/ 3795 static int __init init_sd(void) 3796 { 3797 int majors = 0, i, err; 3798 3799 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 3800 3801 for (i = 0; i < SD_MAJORS; i++) { 3802 if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) 3803 continue; 3804 majors++; 3805 } 3806 3807 if (!majors) 3808 return -ENODEV; 3809 3810 err = class_register(&sd_disk_class); 3811 if (err) 3812 goto err_out; 3813 3814 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 3815 0, 0, NULL); 3816 if (!sd_cdb_cache) { 3817 printk(KERN_ERR "sd: can't init extended cdb cache\n"); 3818 err = -ENOMEM; 3819 goto err_out_class; 3820 } 3821 3822 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); 3823 if (!sd_cdb_pool) { 3824 printk(KERN_ERR "sd: can't init extended cdb pool\n"); 3825 err = -ENOMEM; 3826 goto err_out_cache; 3827 } 3828 3829 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 3830 if (!sd_page_pool) { 3831 printk(KERN_ERR "sd: can't init discard page pool\n"); 3832 err = -ENOMEM; 3833 goto err_out_ppool; 3834 } 3835 3836 err = scsi_register_driver(&sd_template.gendrv); 3837 if (err) 3838 goto err_out_driver; 3839 3840 return 0; 3841 3842 err_out_driver: 3843 mempool_destroy(sd_page_pool); 3844 3845 err_out_ppool: 3846 mempool_destroy(sd_cdb_pool); 3847 3848 err_out_cache: 3849 kmem_cache_destroy(sd_cdb_cache); 3850 3851 err_out_class: 3852 class_unregister(&sd_disk_class); 3853 err_out: 3854 for (i = 0; i < SD_MAJORS; i++) 3855 unregister_blkdev(sd_major(i), "sd"); 3856 return err; 3857 } 3858 3859 /** 3860 * exit_sd - exit point for this driver (when it is a module). 3861 * 3862 * Note: this function unregisters this driver from the scsi mid-level. 3863 **/ 3864 static void __exit exit_sd(void) 3865 { 3866 int i; 3867 3868 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 3869 3870 scsi_unregister_driver(&sd_template.gendrv); 3871 mempool_destroy(sd_cdb_pool); 3872 mempool_destroy(sd_page_pool); 3873 kmem_cache_destroy(sd_cdb_cache); 3874 3875 class_unregister(&sd_disk_class); 3876 3877 for (i = 0; i < SD_MAJORS; i++) 3878 unregister_blkdev(sd_major(i), "sd"); 3879 } 3880 3881 module_init(init_sd); 3882 module_exit(exit_sd); 3883 3884 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 3885 { 3886 scsi_print_sense_hdr(sdkp->device, 3887 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 3888 } 3889 3890 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) 3891 { 3892 const char *hb_string = scsi_hostbyte_string(result); 3893 3894 if (hb_string) 3895 sd_printk(KERN_INFO, sdkp, 3896 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 3897 hb_string ? hb_string : "invalid", 3898 "DRIVER_OK"); 3899 else 3900 sd_printk(KERN_INFO, sdkp, 3901 "%s: Result: hostbyte=0x%02x driverbyte=%s\n", 3902 msg, host_byte(result), "DRIVER_OK"); 3903 } 3904