1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/fs.h> 38 #include <linux/kernel.h> 39 #include <linux/mm.h> 40 #include <linux/bio.h> 41 #include <linux/hdreg.h> 42 #include <linux/errno.h> 43 #include <linux/idr.h> 44 #include <linux/interrupt.h> 45 #include <linux/init.h> 46 #include <linux/blkdev.h> 47 #include <linux/blkpg.h> 48 #include <linux/blk-pm.h> 49 #include <linux/delay.h> 50 #include <linux/major.h> 51 #include <linux/mutex.h> 52 #include <linux/string_helpers.h> 53 #include <linux/slab.h> 54 #include <linux/sed-opal.h> 55 #include <linux/pm_runtime.h> 56 #include <linux/pr.h> 57 #include <linux/t10-pi.h> 58 #include <linux/uaccess.h> 59 #include <asm/unaligned.h> 60 61 #include <scsi/scsi.h> 62 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_dbg.h> 64 #include <scsi/scsi_device.h> 65 #include <scsi/scsi_driver.h> 66 #include <scsi/scsi_eh.h> 67 #include <scsi/scsi_host.h> 68 #include <scsi/scsi_ioctl.h> 69 #include <scsi/scsicam.h> 70 71 #include "sd.h" 72 #include "scsi_priv.h" 73 #include "scsi_logging.h" 74 75 MODULE_AUTHOR("Eric Youngdale"); 76 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 77 MODULE_LICENSE("GPL"); 78 79 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 95 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 96 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 97 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 99 100 #define SD_MINORS 16 101 102 static void sd_config_discard(struct scsi_disk *, unsigned int); 103 static void sd_config_write_same(struct scsi_disk *); 104 static int sd_revalidate_disk(struct gendisk *); 105 static void sd_unlock_native_capacity(struct gendisk *disk); 106 static int sd_probe(struct device *); 107 static int sd_remove(struct device *); 108 static void sd_shutdown(struct device *); 109 static int sd_suspend_system(struct device *); 110 static int sd_suspend_runtime(struct device *); 111 static int sd_resume_system(struct device *); 112 static int sd_resume_runtime(struct device *); 113 static void sd_rescan(struct device *); 114 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); 115 static void sd_uninit_command(struct scsi_cmnd *SCpnt); 116 static int sd_done(struct scsi_cmnd *); 117 static void sd_eh_reset(struct scsi_cmnd *); 118 static int sd_eh_action(struct scsi_cmnd *, int); 119 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 120 static void scsi_disk_release(struct device *cdev); 121 122 static DEFINE_IDA(sd_index_ida); 123 124 static struct kmem_cache *sd_cdb_cache; 125 static mempool_t *sd_page_pool; 126 static struct lock_class_key sd_bio_compl_lkclass; 127 128 static const char *sd_cache_types[] = { 129 "write through", "none", "write back", 130 "write back, no read (daft)" 131 }; 132 133 static void sd_set_flush_flag(struct scsi_disk *sdkp) 134 { 135 bool wc = false, fua = false; 136 137 if (sdkp->WCE) { 138 wc = true; 139 if (sdkp->DPOFUA) 140 fua = true; 141 } 142 143 blk_queue_write_cache(sdkp->disk->queue, wc, fua); 144 } 145 146 static ssize_t 147 cache_type_store(struct device *dev, struct device_attribute *attr, 148 const char *buf, size_t count) 149 { 150 int ct, rcd, wce, sp; 151 struct scsi_disk *sdkp = to_scsi_disk(dev); 152 struct scsi_device *sdp = sdkp->device; 153 char buffer[64]; 154 char *buffer_data; 155 struct scsi_mode_data data; 156 struct scsi_sense_hdr sshdr; 157 static const char temp[] = "temporary "; 158 int len; 159 160 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 161 /* no cache control on RBC devices; theoretically they 162 * can do it, but there's probably so many exceptions 163 * it's not worth the risk */ 164 return -EINVAL; 165 166 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 167 buf += sizeof(temp) - 1; 168 sdkp->cache_override = 1; 169 } else { 170 sdkp->cache_override = 0; 171 } 172 173 ct = sysfs_match_string(sd_cache_types, buf); 174 if (ct < 0) 175 return -EINVAL; 176 177 rcd = ct & 0x01 ? 1 : 0; 178 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 179 180 if (sdkp->cache_override) { 181 sdkp->WCE = wce; 182 sdkp->RCD = rcd; 183 sd_set_flush_flag(sdkp); 184 return count; 185 } 186 187 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 188 sdkp->max_retries, &data, NULL)) 189 return -EINVAL; 190 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 191 data.block_descriptor_length); 192 buffer_data = buffer + data.header_length + 193 data.block_descriptor_length; 194 buffer_data[2] &= ~0x05; 195 buffer_data[2] |= wce << 2 | rcd; 196 sp = buffer_data[0] & 0x80 ? 1 : 0; 197 buffer_data[0] &= ~0x80; 198 199 /* 200 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 201 * received mode parameter buffer before doing MODE SELECT. 202 */ 203 data.device_specific = 0; 204 205 if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, 206 sdkp->max_retries, &data, &sshdr)) { 207 if (scsi_sense_valid(&sshdr)) 208 sd_print_sense_hdr(sdkp, &sshdr); 209 return -EINVAL; 210 } 211 sd_revalidate_disk(sdkp->disk); 212 return count; 213 } 214 215 static ssize_t 216 manage_start_stop_show(struct device *dev, struct device_attribute *attr, 217 char *buf) 218 { 219 struct scsi_disk *sdkp = to_scsi_disk(dev); 220 struct scsi_device *sdp = sdkp->device; 221 222 return sprintf(buf, "%u\n", sdp->manage_start_stop); 223 } 224 225 static ssize_t 226 manage_start_stop_store(struct device *dev, struct device_attribute *attr, 227 const char *buf, size_t count) 228 { 229 struct scsi_disk *sdkp = to_scsi_disk(dev); 230 struct scsi_device *sdp = sdkp->device; 231 bool v; 232 233 if (!capable(CAP_SYS_ADMIN)) 234 return -EACCES; 235 236 if (kstrtobool(buf, &v)) 237 return -EINVAL; 238 239 sdp->manage_start_stop = v; 240 241 return count; 242 } 243 static DEVICE_ATTR_RW(manage_start_stop); 244 245 static ssize_t 246 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 247 { 248 struct scsi_disk *sdkp = to_scsi_disk(dev); 249 250 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 251 } 252 253 static ssize_t 254 allow_restart_store(struct device *dev, struct device_attribute *attr, 255 const char *buf, size_t count) 256 { 257 bool v; 258 struct scsi_disk *sdkp = to_scsi_disk(dev); 259 struct scsi_device *sdp = sdkp->device; 260 261 if (!capable(CAP_SYS_ADMIN)) 262 return -EACCES; 263 264 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 265 return -EINVAL; 266 267 if (kstrtobool(buf, &v)) 268 return -EINVAL; 269 270 sdp->allow_restart = v; 271 272 return count; 273 } 274 static DEVICE_ATTR_RW(allow_restart); 275 276 static ssize_t 277 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 278 { 279 struct scsi_disk *sdkp = to_scsi_disk(dev); 280 int ct = sdkp->RCD + 2*sdkp->WCE; 281 282 return sprintf(buf, "%s\n", sd_cache_types[ct]); 283 } 284 static DEVICE_ATTR_RW(cache_type); 285 286 static ssize_t 287 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 288 { 289 struct scsi_disk *sdkp = to_scsi_disk(dev); 290 291 return sprintf(buf, "%u\n", sdkp->DPOFUA); 292 } 293 static DEVICE_ATTR_RO(FUA); 294 295 static ssize_t 296 protection_type_show(struct device *dev, struct device_attribute *attr, 297 char *buf) 298 { 299 struct scsi_disk *sdkp = to_scsi_disk(dev); 300 301 return sprintf(buf, "%u\n", sdkp->protection_type); 302 } 303 304 static ssize_t 305 protection_type_store(struct device *dev, struct device_attribute *attr, 306 const char *buf, size_t count) 307 { 308 struct scsi_disk *sdkp = to_scsi_disk(dev); 309 unsigned int val; 310 int err; 311 312 if (!capable(CAP_SYS_ADMIN)) 313 return -EACCES; 314 315 err = kstrtouint(buf, 10, &val); 316 317 if (err) 318 return err; 319 320 if (val <= T10_PI_TYPE3_PROTECTION) 321 sdkp->protection_type = val; 322 323 return count; 324 } 325 static DEVICE_ATTR_RW(protection_type); 326 327 static ssize_t 328 protection_mode_show(struct device *dev, struct device_attribute *attr, 329 char *buf) 330 { 331 struct scsi_disk *sdkp = to_scsi_disk(dev); 332 struct scsi_device *sdp = sdkp->device; 333 unsigned int dif, dix; 334 335 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 336 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 337 338 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 339 dif = 0; 340 dix = 1; 341 } 342 343 if (!dif && !dix) 344 return sprintf(buf, "none\n"); 345 346 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 347 } 348 static DEVICE_ATTR_RO(protection_mode); 349 350 static ssize_t 351 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 352 { 353 struct scsi_disk *sdkp = to_scsi_disk(dev); 354 355 return sprintf(buf, "%u\n", sdkp->ATO); 356 } 357 static DEVICE_ATTR_RO(app_tag_own); 358 359 static ssize_t 360 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 361 char *buf) 362 { 363 struct scsi_disk *sdkp = to_scsi_disk(dev); 364 365 return sprintf(buf, "%u\n", sdkp->lbpme); 366 } 367 static DEVICE_ATTR_RO(thin_provisioning); 368 369 /* sysfs_match_string() requires dense arrays */ 370 static const char *lbp_mode[] = { 371 [SD_LBP_FULL] = "full", 372 [SD_LBP_UNMAP] = "unmap", 373 [SD_LBP_WS16] = "writesame_16", 374 [SD_LBP_WS10] = "writesame_10", 375 [SD_LBP_ZERO] = "writesame_zero", 376 [SD_LBP_DISABLE] = "disabled", 377 }; 378 379 static ssize_t 380 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 381 char *buf) 382 { 383 struct scsi_disk *sdkp = to_scsi_disk(dev); 384 385 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 386 } 387 388 static ssize_t 389 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 390 const char *buf, size_t count) 391 { 392 struct scsi_disk *sdkp = to_scsi_disk(dev); 393 struct scsi_device *sdp = sdkp->device; 394 int mode; 395 396 if (!capable(CAP_SYS_ADMIN)) 397 return -EACCES; 398 399 if (sd_is_zoned(sdkp)) { 400 sd_config_discard(sdkp, SD_LBP_DISABLE); 401 return count; 402 } 403 404 if (sdp->type != TYPE_DISK) 405 return -EINVAL; 406 407 mode = sysfs_match_string(lbp_mode, buf); 408 if (mode < 0) 409 return -EINVAL; 410 411 sd_config_discard(sdkp, mode); 412 413 return count; 414 } 415 static DEVICE_ATTR_RW(provisioning_mode); 416 417 /* sysfs_match_string() requires dense arrays */ 418 static const char *zeroing_mode[] = { 419 [SD_ZERO_WRITE] = "write", 420 [SD_ZERO_WS] = "writesame", 421 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 422 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 423 }; 424 425 static ssize_t 426 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 427 char *buf) 428 { 429 struct scsi_disk *sdkp = to_scsi_disk(dev); 430 431 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 432 } 433 434 static ssize_t 435 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 436 const char *buf, size_t count) 437 { 438 struct scsi_disk *sdkp = to_scsi_disk(dev); 439 int mode; 440 441 if (!capable(CAP_SYS_ADMIN)) 442 return -EACCES; 443 444 mode = sysfs_match_string(zeroing_mode, buf); 445 if (mode < 0) 446 return -EINVAL; 447 448 sdkp->zeroing_mode = mode; 449 450 return count; 451 } 452 static DEVICE_ATTR_RW(zeroing_mode); 453 454 static ssize_t 455 max_medium_access_timeouts_show(struct device *dev, 456 struct device_attribute *attr, char *buf) 457 { 458 struct scsi_disk *sdkp = to_scsi_disk(dev); 459 460 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 461 } 462 463 static ssize_t 464 max_medium_access_timeouts_store(struct device *dev, 465 struct device_attribute *attr, const char *buf, 466 size_t count) 467 { 468 struct scsi_disk *sdkp = to_scsi_disk(dev); 469 int err; 470 471 if (!capable(CAP_SYS_ADMIN)) 472 return -EACCES; 473 474 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 475 476 return err ? err : count; 477 } 478 static DEVICE_ATTR_RW(max_medium_access_timeouts); 479 480 static ssize_t 481 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 482 char *buf) 483 { 484 struct scsi_disk *sdkp = to_scsi_disk(dev); 485 486 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 487 } 488 489 static ssize_t 490 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 491 const char *buf, size_t count) 492 { 493 struct scsi_disk *sdkp = to_scsi_disk(dev); 494 struct scsi_device *sdp = sdkp->device; 495 unsigned long max; 496 int err; 497 498 if (!capable(CAP_SYS_ADMIN)) 499 return -EACCES; 500 501 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 502 return -EINVAL; 503 504 err = kstrtoul(buf, 10, &max); 505 506 if (err) 507 return err; 508 509 if (max == 0) 510 sdp->no_write_same = 1; 511 else if (max <= SD_MAX_WS16_BLOCKS) { 512 sdp->no_write_same = 0; 513 sdkp->max_ws_blocks = max; 514 } 515 516 sd_config_write_same(sdkp); 517 518 return count; 519 } 520 static DEVICE_ATTR_RW(max_write_same_blocks); 521 522 static ssize_t 523 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) 524 { 525 struct scsi_disk *sdkp = to_scsi_disk(dev); 526 527 if (sdkp->device->type == TYPE_ZBC) 528 return sprintf(buf, "host-managed\n"); 529 if (sdkp->zoned == 1) 530 return sprintf(buf, "host-aware\n"); 531 if (sdkp->zoned == 2) 532 return sprintf(buf, "drive-managed\n"); 533 return sprintf(buf, "none\n"); 534 } 535 static DEVICE_ATTR_RO(zoned_cap); 536 537 static ssize_t 538 max_retries_store(struct device *dev, struct device_attribute *attr, 539 const char *buf, size_t count) 540 { 541 struct scsi_disk *sdkp = to_scsi_disk(dev); 542 struct scsi_device *sdev = sdkp->device; 543 int retries, err; 544 545 err = kstrtoint(buf, 10, &retries); 546 if (err) 547 return err; 548 549 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { 550 sdkp->max_retries = retries; 551 return count; 552 } 553 554 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", 555 SD_MAX_RETRIES); 556 return -EINVAL; 557 } 558 559 static ssize_t 560 max_retries_show(struct device *dev, struct device_attribute *attr, 561 char *buf) 562 { 563 struct scsi_disk *sdkp = to_scsi_disk(dev); 564 565 return sprintf(buf, "%d\n", sdkp->max_retries); 566 } 567 568 static DEVICE_ATTR_RW(max_retries); 569 570 static struct attribute *sd_disk_attrs[] = { 571 &dev_attr_cache_type.attr, 572 &dev_attr_FUA.attr, 573 &dev_attr_allow_restart.attr, 574 &dev_attr_manage_start_stop.attr, 575 &dev_attr_protection_type.attr, 576 &dev_attr_protection_mode.attr, 577 &dev_attr_app_tag_own.attr, 578 &dev_attr_thin_provisioning.attr, 579 &dev_attr_provisioning_mode.attr, 580 &dev_attr_zeroing_mode.attr, 581 &dev_attr_max_write_same_blocks.attr, 582 &dev_attr_max_medium_access_timeouts.attr, 583 &dev_attr_zoned_cap.attr, 584 &dev_attr_max_retries.attr, 585 NULL, 586 }; 587 ATTRIBUTE_GROUPS(sd_disk); 588 589 static struct class sd_disk_class = { 590 .name = "scsi_disk", 591 .owner = THIS_MODULE, 592 .dev_release = scsi_disk_release, 593 .dev_groups = sd_disk_groups, 594 }; 595 596 static const struct dev_pm_ops sd_pm_ops = { 597 .suspend = sd_suspend_system, 598 .resume = sd_resume_system, 599 .poweroff = sd_suspend_system, 600 .restore = sd_resume_system, 601 .runtime_suspend = sd_suspend_runtime, 602 .runtime_resume = sd_resume_runtime, 603 }; 604 605 static struct scsi_driver sd_template = { 606 .gendrv = { 607 .name = "sd", 608 .owner = THIS_MODULE, 609 .probe = sd_probe, 610 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 611 .remove = sd_remove, 612 .shutdown = sd_shutdown, 613 .pm = &sd_pm_ops, 614 }, 615 .rescan = sd_rescan, 616 .init_command = sd_init_command, 617 .uninit_command = sd_uninit_command, 618 .done = sd_done, 619 .eh_action = sd_eh_action, 620 .eh_reset = sd_eh_reset, 621 }; 622 623 /* 624 * Don't request a new module, as that could deadlock in multipath 625 * environment. 626 */ 627 static void sd_default_probe(dev_t devt) 628 { 629 } 630 631 /* 632 * Device no to disk mapping: 633 * 634 * major disc2 disc p1 635 * |............|.............|....|....| <- dev_t 636 * 31 20 19 8 7 4 3 0 637 * 638 * Inside a major, we have 16k disks, however mapped non- 639 * contiguously. The first 16 disks are for major0, the next 640 * ones with major1, ... Disk 256 is for major0 again, disk 272 641 * for major1, ... 642 * As we stay compatible with our numbering scheme, we can reuse 643 * the well-know SCSI majors 8, 65--71, 136--143. 644 */ 645 static int sd_major(int major_idx) 646 { 647 switch (major_idx) { 648 case 0: 649 return SCSI_DISK0_MAJOR; 650 case 1 ... 7: 651 return SCSI_DISK1_MAJOR + major_idx - 1; 652 case 8 ... 15: 653 return SCSI_DISK8_MAJOR + major_idx - 8; 654 default: 655 BUG(); 656 return 0; /* shut up gcc */ 657 } 658 } 659 660 #ifdef CONFIG_BLK_SED_OPAL 661 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 662 size_t len, bool send) 663 { 664 struct scsi_disk *sdkp = data; 665 struct scsi_device *sdev = sdkp->device; 666 u8 cdb[12] = { 0, }; 667 int ret; 668 669 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 670 cdb[1] = secp; 671 put_unaligned_be16(spsp, &cdb[2]); 672 put_unaligned_be32(len, &cdb[6]); 673 674 ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 675 buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0, 676 RQF_PM, NULL); 677 return ret <= 0 ? ret : -EIO; 678 } 679 #endif /* CONFIG_BLK_SED_OPAL */ 680 681 /* 682 * Look up the DIX operation based on whether the command is read or 683 * write and whether dix and dif are enabled. 684 */ 685 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 686 { 687 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 688 static const unsigned int ops[] = { /* wrt dix dif */ 689 SCSI_PROT_NORMAL, /* 0 0 0 */ 690 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 691 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 692 SCSI_PROT_READ_PASS, /* 0 1 1 */ 693 SCSI_PROT_NORMAL, /* 1 0 0 */ 694 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 695 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 696 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 697 }; 698 699 return ops[write << 2 | dix << 1 | dif]; 700 } 701 702 /* 703 * Returns a mask of the protection flags that are valid for a given DIX 704 * operation. 705 */ 706 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 707 { 708 static const unsigned int flag_mask[] = { 709 [SCSI_PROT_NORMAL] = 0, 710 711 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 712 SCSI_PROT_GUARD_CHECK | 713 SCSI_PROT_REF_CHECK | 714 SCSI_PROT_REF_INCREMENT, 715 716 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 717 SCSI_PROT_IP_CHECKSUM, 718 719 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 720 SCSI_PROT_GUARD_CHECK | 721 SCSI_PROT_REF_CHECK | 722 SCSI_PROT_REF_INCREMENT | 723 SCSI_PROT_IP_CHECKSUM, 724 725 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 726 SCSI_PROT_REF_INCREMENT, 727 728 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 729 SCSI_PROT_REF_CHECK | 730 SCSI_PROT_REF_INCREMENT | 731 SCSI_PROT_IP_CHECKSUM, 732 733 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 734 SCSI_PROT_GUARD_CHECK | 735 SCSI_PROT_REF_CHECK | 736 SCSI_PROT_REF_INCREMENT | 737 SCSI_PROT_IP_CHECKSUM, 738 }; 739 740 return flag_mask[prot_op]; 741 } 742 743 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 744 unsigned int dix, unsigned int dif) 745 { 746 struct request *rq = scsi_cmd_to_rq(scmd); 747 struct bio *bio = rq->bio; 748 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); 749 unsigned int protect = 0; 750 751 if (dix) { /* DIX Type 0, 1, 2, 3 */ 752 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 753 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 754 755 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 756 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 757 } 758 759 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 760 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 761 762 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 763 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 764 } 765 766 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 767 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 768 769 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 770 protect = 3 << 5; /* Disable target PI checking */ 771 else 772 protect = 1 << 5; /* Enable target PI checking */ 773 } 774 775 scsi_set_prot_op(scmd, prot_op); 776 scsi_set_prot_type(scmd, dif); 777 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 778 779 return protect; 780 } 781 782 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) 783 { 784 struct request_queue *q = sdkp->disk->queue; 785 unsigned int logical_block_size = sdkp->device->sector_size; 786 unsigned int max_blocks = 0; 787 788 q->limits.discard_alignment = 789 sdkp->unmap_alignment * logical_block_size; 790 q->limits.discard_granularity = 791 max(sdkp->physical_block_size, 792 sdkp->unmap_granularity * logical_block_size); 793 sdkp->provisioning_mode = mode; 794 795 switch (mode) { 796 797 case SD_LBP_FULL: 798 case SD_LBP_DISABLE: 799 blk_queue_max_discard_sectors(q, 0); 800 return; 801 802 case SD_LBP_UNMAP: 803 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 804 (u32)SD_MAX_WS16_BLOCKS); 805 break; 806 807 case SD_LBP_WS16: 808 if (sdkp->device->unmap_limit_for_ws) 809 max_blocks = sdkp->max_unmap_blocks; 810 else 811 max_blocks = sdkp->max_ws_blocks; 812 813 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 814 break; 815 816 case SD_LBP_WS10: 817 if (sdkp->device->unmap_limit_for_ws) 818 max_blocks = sdkp->max_unmap_blocks; 819 else 820 max_blocks = sdkp->max_ws_blocks; 821 822 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 823 break; 824 825 case SD_LBP_ZERO: 826 max_blocks = min_not_zero(sdkp->max_ws_blocks, 827 (u32)SD_MAX_WS10_BLOCKS); 828 break; 829 } 830 831 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); 832 } 833 834 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) 835 { 836 struct page *page; 837 838 page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 839 if (!page) 840 return NULL; 841 clear_highpage(page); 842 bvec_set_page(&rq->special_vec, page, data_len, 0); 843 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 844 return bvec_virt(&rq->special_vec); 845 } 846 847 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 848 { 849 struct scsi_device *sdp = cmd->device; 850 struct request *rq = scsi_cmd_to_rq(cmd); 851 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 852 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 853 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 854 unsigned int data_len = 24; 855 char *buf; 856 857 buf = sd_set_special_bvec(rq, data_len); 858 if (!buf) 859 return BLK_STS_RESOURCE; 860 861 cmd->cmd_len = 10; 862 cmd->cmnd[0] = UNMAP; 863 cmd->cmnd[8] = 24; 864 865 put_unaligned_be16(6 + 16, &buf[0]); 866 put_unaligned_be16(16, &buf[2]); 867 put_unaligned_be64(lba, &buf[8]); 868 put_unaligned_be32(nr_blocks, &buf[16]); 869 870 cmd->allowed = sdkp->max_retries; 871 cmd->transfersize = data_len; 872 rq->timeout = SD_TIMEOUT; 873 874 return scsi_alloc_sgtables(cmd); 875 } 876 877 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 878 bool unmap) 879 { 880 struct scsi_device *sdp = cmd->device; 881 struct request *rq = scsi_cmd_to_rq(cmd); 882 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 883 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 884 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 885 u32 data_len = sdp->sector_size; 886 887 if (!sd_set_special_bvec(rq, data_len)) 888 return BLK_STS_RESOURCE; 889 890 cmd->cmd_len = 16; 891 cmd->cmnd[0] = WRITE_SAME_16; 892 if (unmap) 893 cmd->cmnd[1] = 0x8; /* UNMAP */ 894 put_unaligned_be64(lba, &cmd->cmnd[2]); 895 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 896 897 cmd->allowed = sdkp->max_retries; 898 cmd->transfersize = data_len; 899 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 900 901 return scsi_alloc_sgtables(cmd); 902 } 903 904 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 905 bool unmap) 906 { 907 struct scsi_device *sdp = cmd->device; 908 struct request *rq = scsi_cmd_to_rq(cmd); 909 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 910 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 911 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 912 u32 data_len = sdp->sector_size; 913 914 if (!sd_set_special_bvec(rq, data_len)) 915 return BLK_STS_RESOURCE; 916 917 cmd->cmd_len = 10; 918 cmd->cmnd[0] = WRITE_SAME; 919 if (unmap) 920 cmd->cmnd[1] = 0x8; /* UNMAP */ 921 put_unaligned_be32(lba, &cmd->cmnd[2]); 922 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 923 924 cmd->allowed = sdkp->max_retries; 925 cmd->transfersize = data_len; 926 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 927 928 return scsi_alloc_sgtables(cmd); 929 } 930 931 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 932 { 933 struct request *rq = scsi_cmd_to_rq(cmd); 934 struct scsi_device *sdp = cmd->device; 935 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 936 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 937 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 938 939 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 940 switch (sdkp->zeroing_mode) { 941 case SD_ZERO_WS16_UNMAP: 942 return sd_setup_write_same16_cmnd(cmd, true); 943 case SD_ZERO_WS10_UNMAP: 944 return sd_setup_write_same10_cmnd(cmd, true); 945 } 946 } 947 948 if (sdp->no_write_same) { 949 rq->rq_flags |= RQF_QUIET; 950 return BLK_STS_TARGET; 951 } 952 953 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 954 return sd_setup_write_same16_cmnd(cmd, false); 955 956 return sd_setup_write_same10_cmnd(cmd, false); 957 } 958 959 static void sd_config_write_same(struct scsi_disk *sdkp) 960 { 961 struct request_queue *q = sdkp->disk->queue; 962 unsigned int logical_block_size = sdkp->device->sector_size; 963 964 if (sdkp->device->no_write_same) { 965 sdkp->max_ws_blocks = 0; 966 goto out; 967 } 968 969 /* Some devices can not handle block counts above 0xffff despite 970 * supporting WRITE SAME(16). Consequently we default to 64k 971 * blocks per I/O unless the device explicitly advertises a 972 * bigger limit. 973 */ 974 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 975 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 976 (u32)SD_MAX_WS16_BLOCKS); 977 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 978 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 979 (u32)SD_MAX_WS10_BLOCKS); 980 else { 981 sdkp->device->no_write_same = 1; 982 sdkp->max_ws_blocks = 0; 983 } 984 985 if (sdkp->lbprz && sdkp->lbpws) 986 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 987 else if (sdkp->lbprz && sdkp->lbpws10) 988 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 989 else if (sdkp->max_ws_blocks) 990 sdkp->zeroing_mode = SD_ZERO_WS; 991 else 992 sdkp->zeroing_mode = SD_ZERO_WRITE; 993 994 if (sdkp->max_ws_blocks && 995 sdkp->physical_block_size > logical_block_size) { 996 /* 997 * Reporting a maximum number of blocks that is not aligned 998 * on the device physical size would cause a large write same 999 * request to be split into physically unaligned chunks by 1000 * __blkdev_issue_write_zeroes() even if the caller of this 1001 * functions took care to align the large request. So make sure 1002 * the maximum reported is aligned to the device physical block 1003 * size. This is only an optional optimization for regular 1004 * disks, but this is mandatory to avoid failure of large write 1005 * same requests directed at sequential write required zones of 1006 * host-managed ZBC disks. 1007 */ 1008 sdkp->max_ws_blocks = 1009 round_down(sdkp->max_ws_blocks, 1010 bytes_to_logical(sdkp->device, 1011 sdkp->physical_block_size)); 1012 } 1013 1014 out: 1015 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * 1016 (logical_block_size >> 9)); 1017 } 1018 1019 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 1020 { 1021 struct request *rq = scsi_cmd_to_rq(cmd); 1022 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1023 1024 /* flush requests don't perform I/O, zero the S/G table */ 1025 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1026 1027 if (cmd->device->use_16_for_sync) { 1028 cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; 1029 cmd->cmd_len = 16; 1030 } else { 1031 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 1032 cmd->cmd_len = 10; 1033 } 1034 cmd->transfersize = 0; 1035 cmd->allowed = sdkp->max_retries; 1036 1037 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1038 return BLK_STS_OK; 1039 } 1040 1041 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1042 sector_t lba, unsigned int nr_blocks, 1043 unsigned char flags) 1044 { 1045 cmd->cmd_len = SD_EXT_CDB_SIZE; 1046 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1047 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1048 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1049 cmd->cmnd[10] = flags; 1050 put_unaligned_be64(lba, &cmd->cmnd[12]); 1051 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1052 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1053 1054 return BLK_STS_OK; 1055 } 1056 1057 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1058 sector_t lba, unsigned int nr_blocks, 1059 unsigned char flags) 1060 { 1061 cmd->cmd_len = 16; 1062 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1063 cmd->cmnd[1] = flags; 1064 cmd->cmnd[14] = 0; 1065 cmd->cmnd[15] = 0; 1066 put_unaligned_be64(lba, &cmd->cmnd[2]); 1067 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1068 1069 return BLK_STS_OK; 1070 } 1071 1072 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1073 sector_t lba, unsigned int nr_blocks, 1074 unsigned char flags) 1075 { 1076 cmd->cmd_len = 10; 1077 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1078 cmd->cmnd[1] = flags; 1079 cmd->cmnd[6] = 0; 1080 cmd->cmnd[9] = 0; 1081 put_unaligned_be32(lba, &cmd->cmnd[2]); 1082 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1083 1084 return BLK_STS_OK; 1085 } 1086 1087 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1088 sector_t lba, unsigned int nr_blocks, 1089 unsigned char flags) 1090 { 1091 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1092 if (WARN_ON_ONCE(nr_blocks == 0)) 1093 return BLK_STS_IOERR; 1094 1095 if (unlikely(flags & 0x8)) { 1096 /* 1097 * This happens only if this drive failed 10byte rw 1098 * command with ILLEGAL_REQUEST during operation and 1099 * thus turned off use_10_for_rw. 1100 */ 1101 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1102 return BLK_STS_IOERR; 1103 } 1104 1105 cmd->cmd_len = 6; 1106 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1107 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1108 cmd->cmnd[2] = (lba >> 8) & 0xff; 1109 cmd->cmnd[3] = lba & 0xff; 1110 cmd->cmnd[4] = nr_blocks; 1111 cmd->cmnd[5] = 0; 1112 1113 return BLK_STS_OK; 1114 } 1115 1116 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1117 { 1118 struct request *rq = scsi_cmd_to_rq(cmd); 1119 struct scsi_device *sdp = cmd->device; 1120 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 1121 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1122 sector_t threshold; 1123 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1124 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1125 bool write = rq_data_dir(rq) == WRITE; 1126 unsigned char protect, fua; 1127 blk_status_t ret; 1128 unsigned int dif; 1129 bool dix; 1130 1131 ret = scsi_alloc_sgtables(cmd); 1132 if (ret != BLK_STS_OK) 1133 return ret; 1134 1135 ret = BLK_STS_IOERR; 1136 if (!scsi_device_online(sdp) || sdp->changed) { 1137 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1138 goto fail; 1139 } 1140 1141 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { 1142 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1143 goto fail; 1144 } 1145 1146 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1147 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1148 goto fail; 1149 } 1150 1151 /* 1152 * Some SD card readers can't handle accesses which touch the 1153 * last one or two logical blocks. Split accesses as needed. 1154 */ 1155 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1156 1157 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1158 if (lba < threshold) { 1159 /* Access up to the threshold but not beyond */ 1160 nr_blocks = threshold - lba; 1161 } else { 1162 /* Access only a single logical block */ 1163 nr_blocks = 1; 1164 } 1165 } 1166 1167 if (req_op(rq) == REQ_OP_ZONE_APPEND) { 1168 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); 1169 if (ret) 1170 goto fail; 1171 } 1172 1173 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1174 dix = scsi_prot_sg_count(cmd); 1175 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1176 1177 if (dif || dix) 1178 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1179 else 1180 protect = 0; 1181 1182 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1183 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1184 protect | fua); 1185 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1186 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1187 protect | fua); 1188 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1189 sdp->use_10_for_rw || protect) { 1190 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1191 protect | fua); 1192 } else { 1193 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1194 protect | fua); 1195 } 1196 1197 if (unlikely(ret != BLK_STS_OK)) 1198 goto fail; 1199 1200 /* 1201 * We shouldn't disconnect in the middle of a sector, so with a dumb 1202 * host adapter, it's safe to assume that we can at least transfer 1203 * this many bytes between each connect / disconnect. 1204 */ 1205 cmd->transfersize = sdp->sector_size; 1206 cmd->underflow = nr_blocks << 9; 1207 cmd->allowed = sdkp->max_retries; 1208 cmd->sdb.length = nr_blocks * sdp->sector_size; 1209 1210 SCSI_LOG_HLQUEUE(1, 1211 scmd_printk(KERN_INFO, cmd, 1212 "%s: block=%llu, count=%d\n", __func__, 1213 (unsigned long long)blk_rq_pos(rq), 1214 blk_rq_sectors(rq))); 1215 SCSI_LOG_HLQUEUE(2, 1216 scmd_printk(KERN_INFO, cmd, 1217 "%s %d/%u 512 byte blocks.\n", 1218 write ? "writing" : "reading", nr_blocks, 1219 blk_rq_sectors(rq))); 1220 1221 /* 1222 * This indicates that the command is ready from our end to be queued. 1223 */ 1224 return BLK_STS_OK; 1225 fail: 1226 scsi_free_sgtables(cmd); 1227 return ret; 1228 } 1229 1230 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1231 { 1232 struct request *rq = scsi_cmd_to_rq(cmd); 1233 1234 switch (req_op(rq)) { 1235 case REQ_OP_DISCARD: 1236 switch (scsi_disk(rq->q->disk)->provisioning_mode) { 1237 case SD_LBP_UNMAP: 1238 return sd_setup_unmap_cmnd(cmd); 1239 case SD_LBP_WS16: 1240 return sd_setup_write_same16_cmnd(cmd, true); 1241 case SD_LBP_WS10: 1242 return sd_setup_write_same10_cmnd(cmd, true); 1243 case SD_LBP_ZERO: 1244 return sd_setup_write_same10_cmnd(cmd, false); 1245 default: 1246 return BLK_STS_TARGET; 1247 } 1248 case REQ_OP_WRITE_ZEROES: 1249 return sd_setup_write_zeroes_cmnd(cmd); 1250 case REQ_OP_FLUSH: 1251 return sd_setup_flush_cmnd(cmd); 1252 case REQ_OP_READ: 1253 case REQ_OP_WRITE: 1254 case REQ_OP_ZONE_APPEND: 1255 return sd_setup_read_write_cmnd(cmd); 1256 case REQ_OP_ZONE_RESET: 1257 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1258 false); 1259 case REQ_OP_ZONE_RESET_ALL: 1260 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1261 true); 1262 case REQ_OP_ZONE_OPEN: 1263 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1264 case REQ_OP_ZONE_CLOSE: 1265 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1266 case REQ_OP_ZONE_FINISH: 1267 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1268 default: 1269 WARN_ON_ONCE(1); 1270 return BLK_STS_NOTSUPP; 1271 } 1272 } 1273 1274 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1275 { 1276 struct request *rq = scsi_cmd_to_rq(SCpnt); 1277 1278 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1279 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1280 } 1281 1282 static bool sd_need_revalidate(struct block_device *bdev, 1283 struct scsi_disk *sdkp) 1284 { 1285 if (sdkp->device->removable || sdkp->write_prot) { 1286 if (bdev_check_media_change(bdev)) 1287 return true; 1288 } 1289 1290 /* 1291 * Force a full rescan after ioctl(BLKRRPART). While the disk state has 1292 * nothing to do with partitions, BLKRRPART is used to force a full 1293 * revalidate after things like a format for historical reasons. 1294 */ 1295 return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1296 } 1297 1298 /** 1299 * sd_open - open a scsi disk device 1300 * @bdev: Block device of the scsi disk to open 1301 * @mode: FMODE_* mask 1302 * 1303 * Returns 0 if successful. Returns a negated errno value in case 1304 * of error. 1305 * 1306 * Note: This can be called from a user context (e.g. fsck(1) ) 1307 * or from within the kernel (e.g. as a result of a mount(1) ). 1308 * In the latter case @inode and @filp carry an abridged amount 1309 * of information as noted above. 1310 * 1311 * Locking: called with bdev->bd_disk->open_mutex held. 1312 **/ 1313 static int sd_open(struct block_device *bdev, fmode_t mode) 1314 { 1315 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1316 struct scsi_device *sdev = sdkp->device; 1317 int retval; 1318 1319 if (scsi_device_get(sdev)) 1320 return -ENXIO; 1321 1322 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1323 1324 /* 1325 * If the device is in error recovery, wait until it is done. 1326 * If the device is offline, then disallow any access to it. 1327 */ 1328 retval = -ENXIO; 1329 if (!scsi_block_when_processing_errors(sdev)) 1330 goto error_out; 1331 1332 if (sd_need_revalidate(bdev, sdkp)) 1333 sd_revalidate_disk(bdev->bd_disk); 1334 1335 /* 1336 * If the drive is empty, just let the open fail. 1337 */ 1338 retval = -ENOMEDIUM; 1339 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) 1340 goto error_out; 1341 1342 /* 1343 * If the device has the write protect tab set, have the open fail 1344 * if the user expects to be able to write to the thing. 1345 */ 1346 retval = -EROFS; 1347 if (sdkp->write_prot && (mode & FMODE_WRITE)) 1348 goto error_out; 1349 1350 /* 1351 * It is possible that the disk changing stuff resulted in 1352 * the device being taken offline. If this is the case, 1353 * report this to the user, and don't pretend that the 1354 * open actually succeeded. 1355 */ 1356 retval = -ENXIO; 1357 if (!scsi_device_online(sdev)) 1358 goto error_out; 1359 1360 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1361 if (scsi_block_when_processing_errors(sdev)) 1362 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1363 } 1364 1365 return 0; 1366 1367 error_out: 1368 scsi_device_put(sdev); 1369 return retval; 1370 } 1371 1372 /** 1373 * sd_release - invoked when the (last) close(2) is called on this 1374 * scsi disk. 1375 * @disk: disk to release 1376 * @mode: FMODE_* mask 1377 * 1378 * Returns 0. 1379 * 1380 * Note: may block (uninterruptible) if error recovery is underway 1381 * on this disk. 1382 * 1383 * Locking: called with bdev->bd_disk->open_mutex held. 1384 **/ 1385 static void sd_release(struct gendisk *disk, fmode_t mode) 1386 { 1387 struct scsi_disk *sdkp = scsi_disk(disk); 1388 struct scsi_device *sdev = sdkp->device; 1389 1390 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1391 1392 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1393 if (scsi_block_when_processing_errors(sdev)) 1394 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1395 } 1396 1397 scsi_device_put(sdev); 1398 } 1399 1400 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1401 { 1402 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1403 struct scsi_device *sdp = sdkp->device; 1404 struct Scsi_Host *host = sdp->host; 1405 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1406 int diskinfo[4]; 1407 1408 /* default to most commonly used values */ 1409 diskinfo[0] = 0x40; /* 1 << 6 */ 1410 diskinfo[1] = 0x20; /* 1 << 5 */ 1411 diskinfo[2] = capacity >> 11; 1412 1413 /* override with calculated, extended default, or driver values */ 1414 if (host->hostt->bios_param) 1415 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); 1416 else 1417 scsicam_bios_param(bdev, capacity, diskinfo); 1418 1419 geo->heads = diskinfo[0]; 1420 geo->sectors = diskinfo[1]; 1421 geo->cylinders = diskinfo[2]; 1422 return 0; 1423 } 1424 1425 /** 1426 * sd_ioctl - process an ioctl 1427 * @bdev: target block device 1428 * @mode: FMODE_* mask 1429 * @cmd: ioctl command number 1430 * @arg: this is third argument given to ioctl(2) system call. 1431 * Often contains a pointer. 1432 * 1433 * Returns 0 if successful (some ioctls return positive numbers on 1434 * success as well). Returns a negated errno value in case of error. 1435 * 1436 * Note: most ioctls are forward onto the block subsystem or further 1437 * down in the scsi subsystem. 1438 **/ 1439 static int sd_ioctl(struct block_device *bdev, fmode_t mode, 1440 unsigned int cmd, unsigned long arg) 1441 { 1442 struct gendisk *disk = bdev->bd_disk; 1443 struct scsi_disk *sdkp = scsi_disk(disk); 1444 struct scsi_device *sdp = sdkp->device; 1445 void __user *p = (void __user *)arg; 1446 int error; 1447 1448 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1449 "cmd=0x%x\n", disk->disk_name, cmd)); 1450 1451 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) 1452 return -ENOIOCTLCMD; 1453 1454 /* 1455 * If we are in the middle of error recovery, don't let anyone 1456 * else try and use this device. Also, if error recovery fails, it 1457 * may try and take the device offline, in which case all further 1458 * access to the device is prohibited. 1459 */ 1460 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1461 (mode & FMODE_NDELAY) != 0); 1462 if (error) 1463 return error; 1464 1465 if (is_sed_ioctl(cmd)) 1466 return sed_ioctl(sdkp->opal_dev, cmd, p); 1467 return scsi_ioctl(sdp, mode, cmd, p); 1468 } 1469 1470 static void set_media_not_present(struct scsi_disk *sdkp) 1471 { 1472 if (sdkp->media_present) 1473 sdkp->device->changed = 1; 1474 1475 if (sdkp->device->removable) { 1476 sdkp->media_present = 0; 1477 sdkp->capacity = 0; 1478 } 1479 } 1480 1481 static int media_not_present(struct scsi_disk *sdkp, 1482 struct scsi_sense_hdr *sshdr) 1483 { 1484 if (!scsi_sense_valid(sshdr)) 1485 return 0; 1486 1487 /* not invoked for commands that could return deferred errors */ 1488 switch (sshdr->sense_key) { 1489 case UNIT_ATTENTION: 1490 case NOT_READY: 1491 /* medium not present */ 1492 if (sshdr->asc == 0x3A) { 1493 set_media_not_present(sdkp); 1494 return 1; 1495 } 1496 } 1497 return 0; 1498 } 1499 1500 /** 1501 * sd_check_events - check media events 1502 * @disk: kernel device descriptor 1503 * @clearing: disk events currently being cleared 1504 * 1505 * Returns mask of DISK_EVENT_*. 1506 * 1507 * Note: this function is invoked from the block subsystem. 1508 **/ 1509 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1510 { 1511 struct scsi_disk *sdkp = disk->private_data; 1512 struct scsi_device *sdp; 1513 int retval; 1514 bool disk_changed; 1515 1516 if (!sdkp) 1517 return 0; 1518 1519 sdp = sdkp->device; 1520 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1521 1522 /* 1523 * If the device is offline, don't send any commands - just pretend as 1524 * if the command failed. If the device ever comes back online, we 1525 * can deal with it then. It is only because of unrecoverable errors 1526 * that we would ever take a device offline in the first place. 1527 */ 1528 if (!scsi_device_online(sdp)) { 1529 set_media_not_present(sdkp); 1530 goto out; 1531 } 1532 1533 /* 1534 * Using TEST_UNIT_READY enables differentiation between drive with 1535 * no cartridge loaded - NOT READY, drive with changed cartridge - 1536 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1537 * 1538 * Drives that auto spin down. eg iomega jaz 1G, will be started 1539 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1540 * sd_revalidate() is called. 1541 */ 1542 if (scsi_block_when_processing_errors(sdp)) { 1543 struct scsi_sense_hdr sshdr = { 0, }; 1544 1545 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, 1546 &sshdr); 1547 1548 /* failed to execute TUR, assume media not present */ 1549 if (retval < 0 || host_byte(retval)) { 1550 set_media_not_present(sdkp); 1551 goto out; 1552 } 1553 1554 if (media_not_present(sdkp, &sshdr)) 1555 goto out; 1556 } 1557 1558 /* 1559 * For removable scsi disk we have to recognise the presence 1560 * of a disk in the drive. 1561 */ 1562 if (!sdkp->media_present) 1563 sdp->changed = 1; 1564 sdkp->media_present = 1; 1565 out: 1566 /* 1567 * sdp->changed is set under the following conditions: 1568 * 1569 * Medium present state has changed in either direction. 1570 * Device has indicated UNIT_ATTENTION. 1571 */ 1572 disk_changed = sdp->changed; 1573 sdp->changed = 0; 1574 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1575 } 1576 1577 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 1578 { 1579 int retries, res; 1580 struct scsi_device *sdp = sdkp->device; 1581 const int timeout = sdp->request_queue->rq_timeout 1582 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1583 struct scsi_sense_hdr my_sshdr; 1584 1585 if (!scsi_device_online(sdp)) 1586 return -ENODEV; 1587 1588 /* caller might not be interested in sense, but we need it */ 1589 if (!sshdr) 1590 sshdr = &my_sshdr; 1591 1592 for (retries = 3; retries > 0; --retries) { 1593 unsigned char cmd[16] = { 0 }; 1594 1595 if (sdp->use_16_for_sync) 1596 cmd[0] = SYNCHRONIZE_CACHE_16; 1597 else 1598 cmd[0] = SYNCHRONIZE_CACHE; 1599 /* 1600 * Leave the rest of the command zero to indicate 1601 * flush everything. 1602 */ 1603 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, 1604 timeout, sdkp->max_retries, 0, RQF_PM, NULL); 1605 if (res == 0) 1606 break; 1607 } 1608 1609 if (res) { 1610 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1611 1612 if (res < 0) 1613 return res; 1614 1615 if (scsi_status_is_check_condition(res) && 1616 scsi_sense_valid(sshdr)) { 1617 sd_print_sense_hdr(sdkp, sshdr); 1618 1619 /* we need to evaluate the error return */ 1620 if (sshdr->asc == 0x3a || /* medium not present */ 1621 sshdr->asc == 0x20 || /* invalid command */ 1622 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */ 1623 /* this is no error here */ 1624 return 0; 1625 } 1626 1627 switch (host_byte(res)) { 1628 /* ignore errors due to racing a disconnection */ 1629 case DID_BAD_TARGET: 1630 case DID_NO_CONNECT: 1631 return 0; 1632 /* signal the upper layer it might try again */ 1633 case DID_BUS_BUSY: 1634 case DID_IMM_RETRY: 1635 case DID_REQUEUE: 1636 case DID_SOFT_ERROR: 1637 return -EBUSY; 1638 default: 1639 return -EIO; 1640 } 1641 } 1642 return 0; 1643 } 1644 1645 static void sd_rescan(struct device *dev) 1646 { 1647 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1648 1649 sd_revalidate_disk(sdkp->disk); 1650 } 1651 1652 static int sd_get_unique_id(struct gendisk *disk, u8 id[16], 1653 enum blk_unique_id type) 1654 { 1655 struct scsi_device *sdev = scsi_disk(disk)->device; 1656 const struct scsi_vpd *vpd; 1657 const unsigned char *d; 1658 int ret = -ENXIO, len; 1659 1660 rcu_read_lock(); 1661 vpd = rcu_dereference(sdev->vpd_pg83); 1662 if (!vpd) 1663 goto out_unlock; 1664 1665 ret = -EINVAL; 1666 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { 1667 /* we only care about designators with LU association */ 1668 if (((d[1] >> 4) & 0x3) != 0x00) 1669 continue; 1670 if ((d[1] & 0xf) != type) 1671 continue; 1672 1673 /* 1674 * Only exit early if a 16-byte descriptor was found. Otherwise 1675 * keep looking as one with more entropy might still show up. 1676 */ 1677 len = d[3]; 1678 if (len != 8 && len != 12 && len != 16) 1679 continue; 1680 ret = len; 1681 memcpy(id, d + 4, len); 1682 if (len == 16) 1683 break; 1684 } 1685 out_unlock: 1686 rcu_read_unlock(); 1687 return ret; 1688 } 1689 1690 static char sd_pr_type(enum pr_type type) 1691 { 1692 switch (type) { 1693 case PR_WRITE_EXCLUSIVE: 1694 return 0x01; 1695 case PR_EXCLUSIVE_ACCESS: 1696 return 0x03; 1697 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1698 return 0x05; 1699 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1700 return 0x06; 1701 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1702 return 0x07; 1703 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1704 return 0x08; 1705 default: 1706 return 0; 1707 } 1708 }; 1709 1710 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) 1711 { 1712 switch (host_byte(result)) { 1713 case DID_TRANSPORT_MARGINAL: 1714 case DID_TRANSPORT_DISRUPTED: 1715 case DID_BUS_BUSY: 1716 return PR_STS_RETRY_PATH_FAILURE; 1717 case DID_NO_CONNECT: 1718 return PR_STS_PATH_FAILED; 1719 case DID_TRANSPORT_FAILFAST: 1720 return PR_STS_PATH_FAST_FAILED; 1721 } 1722 1723 switch (status_byte(result)) { 1724 case SAM_STAT_RESERVATION_CONFLICT: 1725 return PR_STS_RESERVATION_CONFLICT; 1726 case SAM_STAT_CHECK_CONDITION: 1727 if (!scsi_sense_valid(sshdr)) 1728 return PR_STS_IOERR; 1729 1730 if (sshdr->sense_key == ILLEGAL_REQUEST && 1731 (sshdr->asc == 0x26 || sshdr->asc == 0x24)) 1732 return -EINVAL; 1733 1734 fallthrough; 1735 default: 1736 return PR_STS_IOERR; 1737 } 1738 } 1739 1740 static int sd_pr_command(struct block_device *bdev, u8 sa, 1741 u64 key, u64 sa_key, u8 type, u8 flags) 1742 { 1743 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1744 struct scsi_device *sdev = sdkp->device; 1745 struct scsi_sense_hdr sshdr; 1746 int result; 1747 u8 cmd[16] = { 0, }; 1748 u8 data[24] = { 0, }; 1749 1750 cmd[0] = PERSISTENT_RESERVE_OUT; 1751 cmd[1] = sa; 1752 cmd[2] = type; 1753 put_unaligned_be32(sizeof(data), &cmd[5]); 1754 1755 put_unaligned_be64(key, &data[0]); 1756 put_unaligned_be64(sa_key, &data[8]); 1757 data[20] = flags; 1758 1759 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), 1760 &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL); 1761 1762 if (scsi_status_is_check_condition(result) && 1763 scsi_sense_valid(&sshdr)) { 1764 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1765 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1766 } 1767 1768 if (result <= 0) 1769 return result; 1770 1771 return sd_scsi_to_pr_err(&sshdr, result); 1772 } 1773 1774 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 1775 u32 flags) 1776 { 1777 if (flags & ~PR_FL_IGNORE_KEY) 1778 return -EOPNOTSUPP; 1779 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1780 old_key, new_key, 0, 1781 (1 << 0) /* APTPL */); 1782 } 1783 1784 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 1785 u32 flags) 1786 { 1787 if (flags) 1788 return -EOPNOTSUPP; 1789 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); 1790 } 1791 1792 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1793 { 1794 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); 1795 } 1796 1797 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1798 enum pr_type type, bool abort) 1799 { 1800 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1801 sd_pr_type(type), 0); 1802 } 1803 1804 static int sd_pr_clear(struct block_device *bdev, u64 key) 1805 { 1806 return sd_pr_command(bdev, 0x03, key, 0, 0, 0); 1807 } 1808 1809 static const struct pr_ops sd_pr_ops = { 1810 .pr_register = sd_pr_register, 1811 .pr_reserve = sd_pr_reserve, 1812 .pr_release = sd_pr_release, 1813 .pr_preempt = sd_pr_preempt, 1814 .pr_clear = sd_pr_clear, 1815 }; 1816 1817 static void scsi_disk_free_disk(struct gendisk *disk) 1818 { 1819 struct scsi_disk *sdkp = scsi_disk(disk); 1820 1821 put_device(&sdkp->disk_dev); 1822 } 1823 1824 static const struct block_device_operations sd_fops = { 1825 .owner = THIS_MODULE, 1826 .open = sd_open, 1827 .release = sd_release, 1828 .ioctl = sd_ioctl, 1829 .getgeo = sd_getgeo, 1830 .compat_ioctl = blkdev_compat_ptr_ioctl, 1831 .check_events = sd_check_events, 1832 .unlock_native_capacity = sd_unlock_native_capacity, 1833 .report_zones = sd_zbc_report_zones, 1834 .get_unique_id = sd_get_unique_id, 1835 .free_disk = scsi_disk_free_disk, 1836 .pr_ops = &sd_pr_ops, 1837 }; 1838 1839 /** 1840 * sd_eh_reset - reset error handling callback 1841 * @scmd: sd-issued command that has failed 1842 * 1843 * This function is called by the SCSI midlayer before starting 1844 * SCSI EH. When counting medium access failures we have to be 1845 * careful to register it only only once per device and SCSI EH run; 1846 * there might be several timed out commands which will cause the 1847 * 'max_medium_access_timeouts' counter to trigger after the first 1848 * SCSI EH run already and set the device to offline. 1849 * So this function resets the internal counter before starting SCSI EH. 1850 **/ 1851 static void sd_eh_reset(struct scsi_cmnd *scmd) 1852 { 1853 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1854 1855 /* New SCSI EH run, reset gate variable */ 1856 sdkp->ignore_medium_access_errors = false; 1857 } 1858 1859 /** 1860 * sd_eh_action - error handling callback 1861 * @scmd: sd-issued command that has failed 1862 * @eh_disp: The recovery disposition suggested by the midlayer 1863 * 1864 * This function is called by the SCSI midlayer upon completion of an 1865 * error test command (currently TEST UNIT READY). The result of sending 1866 * the eh command is passed in eh_disp. We're looking for devices that 1867 * fail medium access commands but are OK with non access commands like 1868 * test unit ready (so wrongly see the device as having a successful 1869 * recovery) 1870 **/ 1871 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 1872 { 1873 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); 1874 struct scsi_device *sdev = scmd->device; 1875 1876 if (!scsi_device_online(sdev) || 1877 !scsi_medium_access_command(scmd) || 1878 host_byte(scmd->result) != DID_TIME_OUT || 1879 eh_disp != SUCCESS) 1880 return eh_disp; 1881 1882 /* 1883 * The device has timed out executing a medium access command. 1884 * However, the TEST UNIT READY command sent during error 1885 * handling completed successfully. Either the device is in the 1886 * process of recovering or has it suffered an internal failure 1887 * that prevents access to the storage medium. 1888 */ 1889 if (!sdkp->ignore_medium_access_errors) { 1890 sdkp->medium_access_timed_out++; 1891 sdkp->ignore_medium_access_errors = true; 1892 } 1893 1894 /* 1895 * If the device keeps failing read/write commands but TEST UNIT 1896 * READY always completes successfully we assume that medium 1897 * access is no longer possible and take the device offline. 1898 */ 1899 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 1900 scmd_printk(KERN_ERR, scmd, 1901 "Medium access timeout failure. Offlining disk!\n"); 1902 mutex_lock(&sdev->state_mutex); 1903 scsi_device_set_state(sdev, SDEV_OFFLINE); 1904 mutex_unlock(&sdev->state_mutex); 1905 1906 return SUCCESS; 1907 } 1908 1909 return eh_disp; 1910 } 1911 1912 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1913 { 1914 struct request *req = scsi_cmd_to_rq(scmd); 1915 struct scsi_device *sdev = scmd->device; 1916 unsigned int transferred, good_bytes; 1917 u64 start_lba, end_lba, bad_lba; 1918 1919 /* 1920 * Some commands have a payload smaller than the device logical 1921 * block size (e.g. INQUIRY on a 4K disk). 1922 */ 1923 if (scsi_bufflen(scmd) <= sdev->sector_size) 1924 return 0; 1925 1926 /* Check if we have a 'bad_lba' information */ 1927 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 1928 SCSI_SENSE_BUFFERSIZE, 1929 &bad_lba)) 1930 return 0; 1931 1932 /* 1933 * If the bad lba was reported incorrectly, we have no idea where 1934 * the error is. 1935 */ 1936 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 1937 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 1938 if (bad_lba < start_lba || bad_lba >= end_lba) 1939 return 0; 1940 1941 /* 1942 * resid is optional but mostly filled in. When it's unused, 1943 * its value is zero, so we assume the whole buffer transferred 1944 */ 1945 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 1946 1947 /* This computation should always be done in terms of the 1948 * resolution of the device's medium. 1949 */ 1950 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 1951 1952 return min(good_bytes, transferred); 1953 } 1954 1955 /** 1956 * sd_done - bottom half handler: called when the lower level 1957 * driver has completed (successfully or otherwise) a scsi command. 1958 * @SCpnt: mid-level's per command structure. 1959 * 1960 * Note: potentially run from within an ISR. Must not block. 1961 **/ 1962 static int sd_done(struct scsi_cmnd *SCpnt) 1963 { 1964 int result = SCpnt->result; 1965 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1966 unsigned int sector_size = SCpnt->device->sector_size; 1967 unsigned int resid; 1968 struct scsi_sense_hdr sshdr; 1969 struct request *req = scsi_cmd_to_rq(SCpnt); 1970 struct scsi_disk *sdkp = scsi_disk(req->q->disk); 1971 int sense_valid = 0; 1972 int sense_deferred = 0; 1973 1974 switch (req_op(req)) { 1975 case REQ_OP_DISCARD: 1976 case REQ_OP_WRITE_ZEROES: 1977 case REQ_OP_ZONE_RESET: 1978 case REQ_OP_ZONE_RESET_ALL: 1979 case REQ_OP_ZONE_OPEN: 1980 case REQ_OP_ZONE_CLOSE: 1981 case REQ_OP_ZONE_FINISH: 1982 if (!result) { 1983 good_bytes = blk_rq_bytes(req); 1984 scsi_set_resid(SCpnt, 0); 1985 } else { 1986 good_bytes = 0; 1987 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1988 } 1989 break; 1990 default: 1991 /* 1992 * In case of bogus fw or device, we could end up having 1993 * an unaligned partial completion. Check this here and force 1994 * alignment. 1995 */ 1996 resid = scsi_get_resid(SCpnt); 1997 if (resid & (sector_size - 1)) { 1998 sd_printk(KERN_INFO, sdkp, 1999 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2000 resid, sector_size); 2001 scsi_print_command(SCpnt); 2002 resid = min(scsi_bufflen(SCpnt), 2003 round_up(resid, sector_size)); 2004 scsi_set_resid(SCpnt, resid); 2005 } 2006 } 2007 2008 if (result) { 2009 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2010 if (sense_valid) 2011 sense_deferred = scsi_sense_is_deferred(&sshdr); 2012 } 2013 sdkp->medium_access_timed_out = 0; 2014 2015 if (!scsi_status_is_check_condition(result) && 2016 (!sense_valid || sense_deferred)) 2017 goto out; 2018 2019 switch (sshdr.sense_key) { 2020 case HARDWARE_ERROR: 2021 case MEDIUM_ERROR: 2022 good_bytes = sd_completed_bytes(SCpnt); 2023 break; 2024 case RECOVERED_ERROR: 2025 good_bytes = scsi_bufflen(SCpnt); 2026 break; 2027 case NO_SENSE: 2028 /* This indicates a false check condition, so ignore it. An 2029 * unknown amount of data was transferred so treat it as an 2030 * error. 2031 */ 2032 SCpnt->result = 0; 2033 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2034 break; 2035 case ABORTED_COMMAND: 2036 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2037 good_bytes = sd_completed_bytes(SCpnt); 2038 break; 2039 case ILLEGAL_REQUEST: 2040 switch (sshdr.asc) { 2041 case 0x10: /* DIX: Host detected corruption */ 2042 good_bytes = sd_completed_bytes(SCpnt); 2043 break; 2044 case 0x20: /* INVALID COMMAND OPCODE */ 2045 case 0x24: /* INVALID FIELD IN CDB */ 2046 switch (SCpnt->cmnd[0]) { 2047 case UNMAP: 2048 sd_config_discard(sdkp, SD_LBP_DISABLE); 2049 break; 2050 case WRITE_SAME_16: 2051 case WRITE_SAME: 2052 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2053 sd_config_discard(sdkp, SD_LBP_DISABLE); 2054 } else { 2055 sdkp->device->no_write_same = 1; 2056 sd_config_write_same(sdkp); 2057 req->rq_flags |= RQF_QUIET; 2058 } 2059 break; 2060 } 2061 } 2062 break; 2063 default: 2064 break; 2065 } 2066 2067 out: 2068 if (sd_is_zoned(sdkp)) 2069 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2070 2071 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2072 "sd_done: completed %d of %d bytes\n", 2073 good_bytes, scsi_bufflen(SCpnt))); 2074 2075 return good_bytes; 2076 } 2077 2078 /* 2079 * spinup disk - called only in sd_revalidate_disk() 2080 */ 2081 static void 2082 sd_spinup_disk(struct scsi_disk *sdkp) 2083 { 2084 unsigned char cmd[10]; 2085 unsigned long spintime_expire = 0; 2086 int retries, spintime; 2087 unsigned int the_result; 2088 struct scsi_sense_hdr sshdr; 2089 int sense_valid = 0; 2090 2091 spintime = 0; 2092 2093 /* Spin up drives, as required. Only do this at boot time */ 2094 /* Spinup needs to be done for module loads too. */ 2095 do { 2096 retries = 0; 2097 2098 do { 2099 bool media_was_present = sdkp->media_present; 2100 2101 cmd[0] = TEST_UNIT_READY; 2102 memset((void *) &cmd[1], 0, 9); 2103 2104 the_result = scsi_execute_req(sdkp->device, cmd, 2105 DMA_NONE, NULL, 0, 2106 &sshdr, SD_TIMEOUT, 2107 sdkp->max_retries, NULL); 2108 2109 /* 2110 * If the drive has indicated to us that it 2111 * doesn't have any media in it, don't bother 2112 * with any more polling. 2113 */ 2114 if (media_not_present(sdkp, &sshdr)) { 2115 if (media_was_present) 2116 sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2117 return; 2118 } 2119 2120 if (the_result) 2121 sense_valid = scsi_sense_valid(&sshdr); 2122 retries++; 2123 } while (retries < 3 && 2124 (!scsi_status_is_good(the_result) || 2125 (scsi_status_is_check_condition(the_result) && 2126 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 2127 2128 if (!scsi_status_is_check_condition(the_result)) { 2129 /* no sense, TUR either succeeded or failed 2130 * with a status error */ 2131 if(!spintime && !scsi_status_is_good(the_result)) { 2132 sd_print_result(sdkp, "Test Unit Ready failed", 2133 the_result); 2134 } 2135 break; 2136 } 2137 2138 /* 2139 * The device does not want the automatic start to be issued. 2140 */ 2141 if (sdkp->device->no_start_on_add) 2142 break; 2143 2144 if (sense_valid && sshdr.sense_key == NOT_READY) { 2145 if (sshdr.asc == 4 && sshdr.ascq == 3) 2146 break; /* manual intervention required */ 2147 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2148 break; /* standby */ 2149 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2150 break; /* unavailable */ 2151 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2152 break; /* sanitize in progress */ 2153 /* 2154 * Issue command to spin up drive when not ready 2155 */ 2156 if (!spintime) { 2157 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2158 cmd[0] = START_STOP; 2159 cmd[1] = 1; /* Return immediately */ 2160 memset((void *) &cmd[2], 0, 8); 2161 cmd[4] = 1; /* Start spin cycle */ 2162 if (sdkp->device->start_stop_pwr_cond) 2163 cmd[4] |= 1 << 4; 2164 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 2165 NULL, 0, &sshdr, 2166 SD_TIMEOUT, sdkp->max_retries, 2167 NULL); 2168 spintime_expire = jiffies + 100 * HZ; 2169 spintime = 1; 2170 } 2171 /* Wait 1 second for next try */ 2172 msleep(1000); 2173 printk(KERN_CONT "."); 2174 2175 /* 2176 * Wait for USB flash devices with slow firmware. 2177 * Yes, this sense key/ASC combination shouldn't 2178 * occur here. It's characteristic of these devices. 2179 */ 2180 } else if (sense_valid && 2181 sshdr.sense_key == UNIT_ATTENTION && 2182 sshdr.asc == 0x28) { 2183 if (!spintime) { 2184 spintime_expire = jiffies + 5 * HZ; 2185 spintime = 1; 2186 } 2187 /* Wait 1 second for next try */ 2188 msleep(1000); 2189 } else { 2190 /* we don't understand the sense code, so it's 2191 * probably pointless to loop */ 2192 if(!spintime) { 2193 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2194 sd_print_sense_hdr(sdkp, &sshdr); 2195 } 2196 break; 2197 } 2198 2199 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2200 2201 if (spintime) { 2202 if (scsi_status_is_good(the_result)) 2203 printk(KERN_CONT "ready\n"); 2204 else 2205 printk(KERN_CONT "not responding...\n"); 2206 } 2207 } 2208 2209 /* 2210 * Determine whether disk supports Data Integrity Field. 2211 */ 2212 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2213 { 2214 struct scsi_device *sdp = sdkp->device; 2215 u8 type; 2216 2217 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { 2218 sdkp->protection_type = 0; 2219 return 0; 2220 } 2221 2222 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2223 2224 if (type > T10_PI_TYPE3_PROTECTION) { 2225 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2226 " protection type %u. Disabling disk!\n", 2227 type); 2228 sdkp->protection_type = 0; 2229 return -ENODEV; 2230 } 2231 2232 sdkp->protection_type = type; 2233 2234 return 0; 2235 } 2236 2237 static void sd_config_protection(struct scsi_disk *sdkp) 2238 { 2239 struct scsi_device *sdp = sdkp->device; 2240 2241 if (!sdkp->first_scan) 2242 return; 2243 2244 sd_dif_config_host(sdkp); 2245 2246 if (!sdkp->protection_type) 2247 return; 2248 2249 if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) { 2250 sd_printk(KERN_NOTICE, sdkp, 2251 "Disabling DIF Type %u protection\n", 2252 sdkp->protection_type); 2253 sdkp->protection_type = 0; 2254 } 2255 2256 sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n", 2257 sdkp->protection_type); 2258 } 2259 2260 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2261 struct scsi_sense_hdr *sshdr, int sense_valid, 2262 int the_result) 2263 { 2264 if (sense_valid) 2265 sd_print_sense_hdr(sdkp, sshdr); 2266 else 2267 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2268 2269 /* 2270 * Set dirty bit for removable devices if not ready - 2271 * sometimes drives will not report this properly. 2272 */ 2273 if (sdp->removable && 2274 sense_valid && sshdr->sense_key == NOT_READY) 2275 set_media_not_present(sdkp); 2276 2277 /* 2278 * We used to set media_present to 0 here to indicate no media 2279 * in the drive, but some drives fail read capacity even with 2280 * media present, so we can't do that. 2281 */ 2282 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2283 } 2284 2285 #define RC16_LEN 32 2286 #if RC16_LEN > SD_BUF_SIZE 2287 #error RC16_LEN must not be more than SD_BUF_SIZE 2288 #endif 2289 2290 #define READ_CAPACITY_RETRIES_ON_RESET 10 2291 2292 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2293 unsigned char *buffer) 2294 { 2295 unsigned char cmd[16]; 2296 struct scsi_sense_hdr sshdr; 2297 int sense_valid = 0; 2298 int the_result; 2299 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2300 unsigned int alignment; 2301 unsigned long long lba; 2302 unsigned sector_size; 2303 2304 if (sdp->no_read_capacity_16) 2305 return -EINVAL; 2306 2307 do { 2308 memset(cmd, 0, 16); 2309 cmd[0] = SERVICE_ACTION_IN_16; 2310 cmd[1] = SAI_READ_CAPACITY_16; 2311 cmd[13] = RC16_LEN; 2312 memset(buffer, 0, RC16_LEN); 2313 2314 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2315 buffer, RC16_LEN, &sshdr, 2316 SD_TIMEOUT, sdkp->max_retries, NULL); 2317 2318 if (media_not_present(sdkp, &sshdr)) 2319 return -ENODEV; 2320 2321 if (the_result > 0) { 2322 sense_valid = scsi_sense_valid(&sshdr); 2323 if (sense_valid && 2324 sshdr.sense_key == ILLEGAL_REQUEST && 2325 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2326 sshdr.ascq == 0x00) 2327 /* Invalid Command Operation Code or 2328 * Invalid Field in CDB, just retry 2329 * silently with RC10 */ 2330 return -EINVAL; 2331 if (sense_valid && 2332 sshdr.sense_key == UNIT_ATTENTION && 2333 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2334 /* Device reset might occur several times, 2335 * give it one more chance */ 2336 if (--reset_retries > 0) 2337 continue; 2338 } 2339 retries--; 2340 2341 } while (the_result && retries); 2342 2343 if (the_result) { 2344 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2345 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2346 return -EINVAL; 2347 } 2348 2349 sector_size = get_unaligned_be32(&buffer[8]); 2350 lba = get_unaligned_be64(&buffer[0]); 2351 2352 if (sd_read_protection_type(sdkp, buffer) < 0) { 2353 sdkp->capacity = 0; 2354 return -ENODEV; 2355 } 2356 2357 /* Logical blocks per physical block exponent */ 2358 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2359 2360 /* RC basis */ 2361 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2362 2363 /* Lowest aligned logical block */ 2364 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2365 blk_queue_alignment_offset(sdp->request_queue, alignment); 2366 if (alignment && sdkp->first_scan) 2367 sd_printk(KERN_NOTICE, sdkp, 2368 "physical block alignment offset: %u\n", alignment); 2369 2370 if (buffer[14] & 0x80) { /* LBPME */ 2371 sdkp->lbpme = 1; 2372 2373 if (buffer[14] & 0x40) /* LBPRZ */ 2374 sdkp->lbprz = 1; 2375 2376 sd_config_discard(sdkp, SD_LBP_WS16); 2377 } 2378 2379 sdkp->capacity = lba + 1; 2380 return sector_size; 2381 } 2382 2383 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2384 unsigned char *buffer) 2385 { 2386 unsigned char cmd[16]; 2387 struct scsi_sense_hdr sshdr; 2388 int sense_valid = 0; 2389 int the_result; 2390 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2391 sector_t lba; 2392 unsigned sector_size; 2393 2394 do { 2395 cmd[0] = READ_CAPACITY; 2396 memset(&cmd[1], 0, 9); 2397 memset(buffer, 0, 8); 2398 2399 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2400 buffer, 8, &sshdr, 2401 SD_TIMEOUT, sdkp->max_retries, NULL); 2402 2403 if (media_not_present(sdkp, &sshdr)) 2404 return -ENODEV; 2405 2406 if (the_result > 0) { 2407 sense_valid = scsi_sense_valid(&sshdr); 2408 if (sense_valid && 2409 sshdr.sense_key == UNIT_ATTENTION && 2410 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2411 /* Device reset might occur several times, 2412 * give it one more chance */ 2413 if (--reset_retries > 0) 2414 continue; 2415 } 2416 retries--; 2417 2418 } while (the_result && retries); 2419 2420 if (the_result) { 2421 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2422 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2423 return -EINVAL; 2424 } 2425 2426 sector_size = get_unaligned_be32(&buffer[4]); 2427 lba = get_unaligned_be32(&buffer[0]); 2428 2429 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2430 /* Some buggy (usb cardreader) devices return an lba of 2431 0xffffffff when the want to report a size of 0 (with 2432 which they really mean no media is present) */ 2433 sdkp->capacity = 0; 2434 sdkp->physical_block_size = sector_size; 2435 return sector_size; 2436 } 2437 2438 sdkp->capacity = lba + 1; 2439 sdkp->physical_block_size = sector_size; 2440 return sector_size; 2441 } 2442 2443 static int sd_try_rc16_first(struct scsi_device *sdp) 2444 { 2445 if (sdp->host->max_cmd_len < 16) 2446 return 0; 2447 if (sdp->try_rc_10_first) 2448 return 0; 2449 if (sdp->scsi_level > SCSI_SPC_2) 2450 return 1; 2451 if (scsi_device_protection(sdp)) 2452 return 1; 2453 return 0; 2454 } 2455 2456 /* 2457 * read disk capacity 2458 */ 2459 static void 2460 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 2461 { 2462 int sector_size; 2463 struct scsi_device *sdp = sdkp->device; 2464 2465 if (sd_try_rc16_first(sdp)) { 2466 sector_size = read_capacity_16(sdkp, sdp, buffer); 2467 if (sector_size == -EOVERFLOW) 2468 goto got_data; 2469 if (sector_size == -ENODEV) 2470 return; 2471 if (sector_size < 0) 2472 sector_size = read_capacity_10(sdkp, sdp, buffer); 2473 if (sector_size < 0) 2474 return; 2475 } else { 2476 sector_size = read_capacity_10(sdkp, sdp, buffer); 2477 if (sector_size == -EOVERFLOW) 2478 goto got_data; 2479 if (sector_size < 0) 2480 return; 2481 if ((sizeof(sdkp->capacity) > 4) && 2482 (sdkp->capacity > 0xffffffffULL)) { 2483 int old_sector_size = sector_size; 2484 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2485 "Trying to use READ CAPACITY(16).\n"); 2486 sector_size = read_capacity_16(sdkp, sdp, buffer); 2487 if (sector_size < 0) { 2488 sd_printk(KERN_NOTICE, sdkp, 2489 "Using 0xffffffff as device size\n"); 2490 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2491 sector_size = old_sector_size; 2492 goto got_data; 2493 } 2494 /* Remember that READ CAPACITY(16) succeeded */ 2495 sdp->try_rc_10_first = 0; 2496 } 2497 } 2498 2499 /* Some devices are known to return the total number of blocks, 2500 * not the highest block number. Some devices have versions 2501 * which do this and others which do not. Some devices we might 2502 * suspect of doing this but we don't know for certain. 2503 * 2504 * If we know the reported capacity is wrong, decrement it. If 2505 * we can only guess, then assume the number of blocks is even 2506 * (usually true but not always) and err on the side of lowering 2507 * the capacity. 2508 */ 2509 if (sdp->fix_capacity || 2510 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2511 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2512 "from its reported value: %llu\n", 2513 (unsigned long long) sdkp->capacity); 2514 --sdkp->capacity; 2515 } 2516 2517 got_data: 2518 if (sector_size == 0) { 2519 sector_size = 512; 2520 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2521 "assuming 512.\n"); 2522 } 2523 2524 if (sector_size != 512 && 2525 sector_size != 1024 && 2526 sector_size != 2048 && 2527 sector_size != 4096) { 2528 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2529 sector_size); 2530 /* 2531 * The user might want to re-format the drive with 2532 * a supported sectorsize. Once this happens, it 2533 * would be relatively trivial to set the thing up. 2534 * For this reason, we leave the thing in the table. 2535 */ 2536 sdkp->capacity = 0; 2537 /* 2538 * set a bogus sector size so the normal read/write 2539 * logic in the block layer will eventually refuse any 2540 * request on this device without tripping over power 2541 * of two sector size assumptions 2542 */ 2543 sector_size = 512; 2544 } 2545 blk_queue_logical_block_size(sdp->request_queue, sector_size); 2546 blk_queue_physical_block_size(sdp->request_queue, 2547 sdkp->physical_block_size); 2548 sdkp->device->sector_size = sector_size; 2549 2550 if (sdkp->capacity > 0xffffffff) 2551 sdp->use_16_for_rw = 1; 2552 2553 } 2554 2555 /* 2556 * Print disk capacity 2557 */ 2558 static void 2559 sd_print_capacity(struct scsi_disk *sdkp, 2560 sector_t old_capacity) 2561 { 2562 int sector_size = sdkp->device->sector_size; 2563 char cap_str_2[10], cap_str_10[10]; 2564 2565 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2566 return; 2567 2568 string_get_size(sdkp->capacity, sector_size, 2569 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2570 string_get_size(sdkp->capacity, sector_size, 2571 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2572 2573 sd_printk(KERN_NOTICE, sdkp, 2574 "%llu %d-byte logical blocks: (%s/%s)\n", 2575 (unsigned long long)sdkp->capacity, 2576 sector_size, cap_str_10, cap_str_2); 2577 2578 if (sdkp->physical_block_size != sector_size) 2579 sd_printk(KERN_NOTICE, sdkp, 2580 "%u-byte physical blocks\n", 2581 sdkp->physical_block_size); 2582 } 2583 2584 /* called with buffer of length 512 */ 2585 static inline int 2586 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, 2587 unsigned char *buffer, int len, struct scsi_mode_data *data, 2588 struct scsi_sense_hdr *sshdr) 2589 { 2590 /* 2591 * If we must use MODE SENSE(10), make sure that the buffer length 2592 * is at least 8 bytes so that the mode sense header fits. 2593 */ 2594 if (sdkp->device->use_10_for_ms && len < 8) 2595 len = 8; 2596 2597 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len, 2598 SD_TIMEOUT, sdkp->max_retries, data, 2599 sshdr); 2600 } 2601 2602 /* 2603 * read write protect setting, if possible - called only in sd_revalidate_disk() 2604 * called with buffer of length SD_BUF_SIZE 2605 */ 2606 static void 2607 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2608 { 2609 int res; 2610 struct scsi_device *sdp = sdkp->device; 2611 struct scsi_mode_data data; 2612 int old_wp = sdkp->write_prot; 2613 2614 set_disk_ro(sdkp->disk, 0); 2615 if (sdp->skip_ms_page_3f) { 2616 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2617 return; 2618 } 2619 2620 if (sdp->use_192_bytes_for_3f) { 2621 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); 2622 } else { 2623 /* 2624 * First attempt: ask for all pages (0x3F), but only 4 bytes. 2625 * We have to start carefully: some devices hang if we ask 2626 * for more than is available. 2627 */ 2628 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); 2629 2630 /* 2631 * Second attempt: ask for page 0 When only page 0 is 2632 * implemented, a request for page 3F may return Sense Key 2633 * 5: Illegal Request, Sense Code 24: Invalid field in 2634 * CDB. 2635 */ 2636 if (res < 0) 2637 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); 2638 2639 /* 2640 * Third attempt: ask 255 bytes, as we did earlier. 2641 */ 2642 if (res < 0) 2643 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, 2644 &data, NULL); 2645 } 2646 2647 if (res < 0) { 2648 sd_first_printk(KERN_WARNING, sdkp, 2649 "Test WP failed, assume Write Enabled\n"); 2650 } else { 2651 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2652 set_disk_ro(sdkp->disk, sdkp->write_prot); 2653 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2654 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2655 sdkp->write_prot ? "on" : "off"); 2656 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 2657 } 2658 } 2659 } 2660 2661 /* 2662 * sd_read_cache_type - called only from sd_revalidate_disk() 2663 * called with buffer of length SD_BUF_SIZE 2664 */ 2665 static void 2666 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 2667 { 2668 int len = 0, res; 2669 struct scsi_device *sdp = sdkp->device; 2670 2671 int dbd; 2672 int modepage; 2673 int first_len; 2674 struct scsi_mode_data data; 2675 struct scsi_sense_hdr sshdr; 2676 int old_wce = sdkp->WCE; 2677 int old_rcd = sdkp->RCD; 2678 int old_dpofua = sdkp->DPOFUA; 2679 2680 2681 if (sdkp->cache_override) 2682 return; 2683 2684 first_len = 4; 2685 if (sdp->skip_ms_page_8) { 2686 if (sdp->type == TYPE_RBC) 2687 goto defaults; 2688 else { 2689 if (sdp->skip_ms_page_3f) 2690 goto defaults; 2691 modepage = 0x3F; 2692 if (sdp->use_192_bytes_for_3f) 2693 first_len = 192; 2694 dbd = 0; 2695 } 2696 } else if (sdp->type == TYPE_RBC) { 2697 modepage = 6; 2698 dbd = 8; 2699 } else { 2700 modepage = 8; 2701 dbd = 0; 2702 } 2703 2704 /* cautiously ask */ 2705 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, 2706 &data, &sshdr); 2707 2708 if (res < 0) 2709 goto bad_sense; 2710 2711 if (!data.header_length) { 2712 modepage = 6; 2713 first_len = 0; 2714 sd_first_printk(KERN_ERR, sdkp, 2715 "Missing header in MODE_SENSE response\n"); 2716 } 2717 2718 /* that went OK, now ask for the proper length */ 2719 len = data.length; 2720 2721 /* 2722 * We're only interested in the first three bytes, actually. 2723 * But the data cache page is defined for the first 20. 2724 */ 2725 if (len < 3) 2726 goto bad_sense; 2727 else if (len > SD_BUF_SIZE) { 2728 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2729 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2730 len = SD_BUF_SIZE; 2731 } 2732 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 2733 len = 192; 2734 2735 /* Get the data */ 2736 if (len > first_len) 2737 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, 2738 &data, &sshdr); 2739 2740 if (!res) { 2741 int offset = data.header_length + data.block_descriptor_length; 2742 2743 while (offset < len) { 2744 u8 page_code = buffer[offset] & 0x3F; 2745 u8 spf = buffer[offset] & 0x40; 2746 2747 if (page_code == 8 || page_code == 6) { 2748 /* We're interested only in the first 3 bytes. 2749 */ 2750 if (len - offset <= 2) { 2751 sd_first_printk(KERN_ERR, sdkp, 2752 "Incomplete mode parameter " 2753 "data\n"); 2754 goto defaults; 2755 } else { 2756 modepage = page_code; 2757 goto Page_found; 2758 } 2759 } else { 2760 /* Go to the next page */ 2761 if (spf && len - offset > 3) 2762 offset += 4 + (buffer[offset+2] << 8) + 2763 buffer[offset+3]; 2764 else if (!spf && len - offset > 1) 2765 offset += 2 + buffer[offset+1]; 2766 else { 2767 sd_first_printk(KERN_ERR, sdkp, 2768 "Incomplete mode " 2769 "parameter data\n"); 2770 goto defaults; 2771 } 2772 } 2773 } 2774 2775 sd_first_printk(KERN_WARNING, sdkp, 2776 "No Caching mode page found\n"); 2777 goto defaults; 2778 2779 Page_found: 2780 if (modepage == 8) { 2781 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2782 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2783 } else { 2784 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 2785 sdkp->RCD = 0; 2786 } 2787 2788 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2789 if (sdp->broken_fua) { 2790 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2791 sdkp->DPOFUA = 0; 2792 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 2793 !sdkp->device->use_16_for_rw) { 2794 sd_first_printk(KERN_NOTICE, sdkp, 2795 "Uses READ/WRITE(6), disabling FUA\n"); 2796 sdkp->DPOFUA = 0; 2797 } 2798 2799 /* No cache flush allowed for write protected devices */ 2800 if (sdkp->WCE && sdkp->write_prot) 2801 sdkp->WCE = 0; 2802 2803 if (sdkp->first_scan || old_wce != sdkp->WCE || 2804 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2805 sd_printk(KERN_NOTICE, sdkp, 2806 "Write cache: %s, read cache: %s, %s\n", 2807 sdkp->WCE ? "enabled" : "disabled", 2808 sdkp->RCD ? "disabled" : "enabled", 2809 sdkp->DPOFUA ? "supports DPO and FUA" 2810 : "doesn't support DPO or FUA"); 2811 2812 return; 2813 } 2814 2815 bad_sense: 2816 if (scsi_sense_valid(&sshdr) && 2817 sshdr.sense_key == ILLEGAL_REQUEST && 2818 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2819 /* Invalid field in CDB */ 2820 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2821 else 2822 sd_first_printk(KERN_ERR, sdkp, 2823 "Asking for cache data failed\n"); 2824 2825 defaults: 2826 if (sdp->wce_default_on) { 2827 sd_first_printk(KERN_NOTICE, sdkp, 2828 "Assuming drive cache: write back\n"); 2829 sdkp->WCE = 1; 2830 } else { 2831 sd_first_printk(KERN_WARNING, sdkp, 2832 "Assuming drive cache: write through\n"); 2833 sdkp->WCE = 0; 2834 } 2835 sdkp->RCD = 0; 2836 sdkp->DPOFUA = 0; 2837 } 2838 2839 /* 2840 * The ATO bit indicates whether the DIF application tag is available 2841 * for use by the operating system. 2842 */ 2843 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 2844 { 2845 int res, offset; 2846 struct scsi_device *sdp = sdkp->device; 2847 struct scsi_mode_data data; 2848 struct scsi_sense_hdr sshdr; 2849 2850 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 2851 return; 2852 2853 if (sdkp->protection_type == 0) 2854 return; 2855 2856 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, 2857 sdkp->max_retries, &data, &sshdr); 2858 2859 if (res < 0 || !data.header_length || 2860 data.length < 6) { 2861 sd_first_printk(KERN_WARNING, sdkp, 2862 "getting Control mode page failed, assume no ATO\n"); 2863 2864 if (scsi_sense_valid(&sshdr)) 2865 sd_print_sense_hdr(sdkp, &sshdr); 2866 2867 return; 2868 } 2869 2870 offset = data.header_length + data.block_descriptor_length; 2871 2872 if ((buffer[offset] & 0x3f) != 0x0a) { 2873 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2874 return; 2875 } 2876 2877 if ((buffer[offset + 5] & 0x80) == 0) 2878 return; 2879 2880 sdkp->ATO = 1; 2881 2882 return; 2883 } 2884 2885 /** 2886 * sd_read_block_limits - Query disk device for preferred I/O sizes. 2887 * @sdkp: disk to query 2888 */ 2889 static void sd_read_block_limits(struct scsi_disk *sdkp) 2890 { 2891 struct scsi_vpd *vpd; 2892 2893 rcu_read_lock(); 2894 2895 vpd = rcu_dereference(sdkp->device->vpd_pgb0); 2896 if (!vpd || vpd->len < 16) 2897 goto out; 2898 2899 sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]); 2900 sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]); 2901 sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]); 2902 2903 if (vpd->len >= 64) { 2904 unsigned int lba_count, desc_count; 2905 2906 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); 2907 2908 if (!sdkp->lbpme) 2909 goto out; 2910 2911 lba_count = get_unaligned_be32(&vpd->data[20]); 2912 desc_count = get_unaligned_be32(&vpd->data[24]); 2913 2914 if (lba_count && desc_count) 2915 sdkp->max_unmap_blocks = lba_count; 2916 2917 sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]); 2918 2919 if (vpd->data[32] & 0x80) 2920 sdkp->unmap_alignment = 2921 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); 2922 2923 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ 2924 2925 if (sdkp->max_unmap_blocks) 2926 sd_config_discard(sdkp, SD_LBP_UNMAP); 2927 else 2928 sd_config_discard(sdkp, SD_LBP_WS16); 2929 2930 } else { /* LBP VPD page tells us what to use */ 2931 if (sdkp->lbpu && sdkp->max_unmap_blocks) 2932 sd_config_discard(sdkp, SD_LBP_UNMAP); 2933 else if (sdkp->lbpws) 2934 sd_config_discard(sdkp, SD_LBP_WS16); 2935 else if (sdkp->lbpws10) 2936 sd_config_discard(sdkp, SD_LBP_WS10); 2937 else 2938 sd_config_discard(sdkp, SD_LBP_DISABLE); 2939 } 2940 } 2941 2942 out: 2943 rcu_read_unlock(); 2944 } 2945 2946 /** 2947 * sd_read_block_characteristics - Query block dev. characteristics 2948 * @sdkp: disk to query 2949 */ 2950 static void sd_read_block_characteristics(struct scsi_disk *sdkp) 2951 { 2952 struct request_queue *q = sdkp->disk->queue; 2953 struct scsi_vpd *vpd; 2954 u16 rot; 2955 u8 zoned; 2956 2957 rcu_read_lock(); 2958 vpd = rcu_dereference(sdkp->device->vpd_pgb1); 2959 2960 if (!vpd || vpd->len < 8) { 2961 rcu_read_unlock(); 2962 return; 2963 } 2964 2965 rot = get_unaligned_be16(&vpd->data[4]); 2966 zoned = (vpd->data[8] >> 4) & 3; 2967 rcu_read_unlock(); 2968 2969 if (rot == 1) { 2970 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2971 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2972 } 2973 2974 if (sdkp->device->type == TYPE_ZBC) { 2975 /* Host-managed */ 2976 disk_set_zoned(sdkp->disk, BLK_ZONED_HM); 2977 } else { 2978 sdkp->zoned = zoned; 2979 if (sdkp->zoned == 1) { 2980 /* Host-aware */ 2981 disk_set_zoned(sdkp->disk, BLK_ZONED_HA); 2982 } else { 2983 /* Regular disk or drive managed disk */ 2984 disk_set_zoned(sdkp->disk, BLK_ZONED_NONE); 2985 } 2986 } 2987 2988 if (!sdkp->first_scan) 2989 return; 2990 2991 if (blk_queue_is_zoned(q)) { 2992 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2993 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2994 } else { 2995 if (sdkp->zoned == 1) 2996 sd_printk(KERN_NOTICE, sdkp, 2997 "Host-aware SMR disk used as regular disk\n"); 2998 else if (sdkp->zoned == 2) 2999 sd_printk(KERN_NOTICE, sdkp, 3000 "Drive-managed SMR disk\n"); 3001 } 3002 } 3003 3004 /** 3005 * sd_read_block_provisioning - Query provisioning VPD page 3006 * @sdkp: disk to query 3007 */ 3008 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 3009 { 3010 struct scsi_vpd *vpd; 3011 3012 if (sdkp->lbpme == 0) 3013 return; 3014 3015 rcu_read_lock(); 3016 vpd = rcu_dereference(sdkp->device->vpd_pgb2); 3017 3018 if (!vpd || vpd->len < 8) { 3019 rcu_read_unlock(); 3020 return; 3021 } 3022 3023 sdkp->lbpvpd = 1; 3024 sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */ 3025 sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */ 3026 sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */ 3027 rcu_read_unlock(); 3028 } 3029 3030 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3031 { 3032 struct scsi_device *sdev = sdkp->device; 3033 3034 if (sdev->host->no_write_same) { 3035 sdev->no_write_same = 1; 3036 3037 return; 3038 } 3039 3040 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 3041 struct scsi_vpd *vpd; 3042 3043 sdev->no_report_opcodes = 1; 3044 3045 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 3046 * CODES is unsupported and the device has an ATA 3047 * Information VPD page (SAT). 3048 */ 3049 rcu_read_lock(); 3050 vpd = rcu_dereference(sdev->vpd_pg89); 3051 if (vpd) 3052 sdev->no_write_same = 1; 3053 rcu_read_unlock(); 3054 } 3055 3056 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) 3057 sdkp->ws16 = 1; 3058 3059 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) 3060 sdkp->ws10 = 1; 3061 } 3062 3063 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3064 { 3065 struct scsi_device *sdev = sdkp->device; 3066 3067 if (!sdev->security_supported) 3068 return; 3069 3070 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3071 SECURITY_PROTOCOL_IN) == 1 && 3072 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3073 SECURITY_PROTOCOL_OUT) == 1) 3074 sdkp->security = 1; 3075 } 3076 3077 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) 3078 { 3079 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); 3080 } 3081 3082 /** 3083 * sd_read_cpr - Query concurrent positioning ranges 3084 * @sdkp: disk to query 3085 */ 3086 static void sd_read_cpr(struct scsi_disk *sdkp) 3087 { 3088 struct blk_independent_access_ranges *iars = NULL; 3089 unsigned char *buffer = NULL; 3090 unsigned int nr_cpr = 0; 3091 int i, vpd_len, buf_len = SD_BUF_SIZE; 3092 u8 *desc; 3093 3094 /* 3095 * We need to have the capacity set first for the block layer to be 3096 * able to check the ranges. 3097 */ 3098 if (sdkp->first_scan) 3099 return; 3100 3101 if (!sdkp->capacity) 3102 goto out; 3103 3104 /* 3105 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, 3106 * leading to a maximum page size of 64 + 256*32 bytes. 3107 */ 3108 buf_len = 64 + 256*32; 3109 buffer = kmalloc(buf_len, GFP_KERNEL); 3110 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) 3111 goto out; 3112 3113 /* We must have at least a 64B header and one 32B range descriptor */ 3114 vpd_len = get_unaligned_be16(&buffer[2]) + 4; 3115 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { 3116 sd_printk(KERN_ERR, sdkp, 3117 "Invalid Concurrent Positioning Ranges VPD page\n"); 3118 goto out; 3119 } 3120 3121 nr_cpr = (vpd_len - 64) / 32; 3122 if (nr_cpr == 1) { 3123 nr_cpr = 0; 3124 goto out; 3125 } 3126 3127 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); 3128 if (!iars) { 3129 nr_cpr = 0; 3130 goto out; 3131 } 3132 3133 desc = &buffer[64]; 3134 for (i = 0; i < nr_cpr; i++, desc += 32) { 3135 if (desc[0] != i) { 3136 sd_printk(KERN_ERR, sdkp, 3137 "Invalid Concurrent Positioning Range number\n"); 3138 nr_cpr = 0; 3139 break; 3140 } 3141 3142 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); 3143 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); 3144 } 3145 3146 out: 3147 disk_set_independent_access_ranges(sdkp->disk, iars); 3148 if (nr_cpr && sdkp->nr_actuators != nr_cpr) { 3149 sd_printk(KERN_NOTICE, sdkp, 3150 "%u concurrent positioning ranges\n", nr_cpr); 3151 sdkp->nr_actuators = nr_cpr; 3152 } 3153 3154 kfree(buffer); 3155 } 3156 3157 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp) 3158 { 3159 struct scsi_device *sdp = sdkp->device; 3160 unsigned int min_xfer_bytes = 3161 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3162 3163 if (sdkp->min_xfer_blocks == 0) 3164 return false; 3165 3166 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { 3167 sd_first_printk(KERN_WARNING, sdkp, 3168 "Preferred minimum I/O size %u bytes not a " \ 3169 "multiple of physical block size (%u bytes)\n", 3170 min_xfer_bytes, sdkp->physical_block_size); 3171 sdkp->min_xfer_blocks = 0; 3172 return false; 3173 } 3174 3175 sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n", 3176 min_xfer_bytes); 3177 return true; 3178 } 3179 3180 /* 3181 * Determine the device's preferred I/O size for reads and writes 3182 * unless the reported value is unreasonably small, large, not a 3183 * multiple of the physical block size, or simply garbage. 3184 */ 3185 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3186 unsigned int dev_max) 3187 { 3188 struct scsi_device *sdp = sdkp->device; 3189 unsigned int opt_xfer_bytes = 3190 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3191 unsigned int min_xfer_bytes = 3192 logical_to_bytes(sdp, sdkp->min_xfer_blocks); 3193 3194 if (sdkp->opt_xfer_blocks == 0) 3195 return false; 3196 3197 if (sdkp->opt_xfer_blocks > dev_max) { 3198 sd_first_printk(KERN_WARNING, sdkp, 3199 "Optimal transfer size %u logical blocks " \ 3200 "> dev_max (%u logical blocks)\n", 3201 sdkp->opt_xfer_blocks, dev_max); 3202 return false; 3203 } 3204 3205 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3206 sd_first_printk(KERN_WARNING, sdkp, 3207 "Optimal transfer size %u logical blocks " \ 3208 "> sd driver limit (%u logical blocks)\n", 3209 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3210 return false; 3211 } 3212 3213 if (opt_xfer_bytes < PAGE_SIZE) { 3214 sd_first_printk(KERN_WARNING, sdkp, 3215 "Optimal transfer size %u bytes < " \ 3216 "PAGE_SIZE (%u bytes)\n", 3217 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3218 return false; 3219 } 3220 3221 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { 3222 sd_first_printk(KERN_WARNING, sdkp, 3223 "Optimal transfer size %u bytes not a " \ 3224 "multiple of preferred minimum block " \ 3225 "size (%u bytes)\n", 3226 opt_xfer_bytes, min_xfer_bytes); 3227 return false; 3228 } 3229 3230 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3231 sd_first_printk(KERN_WARNING, sdkp, 3232 "Optimal transfer size %u bytes not a " \ 3233 "multiple of physical block size (%u bytes)\n", 3234 opt_xfer_bytes, sdkp->physical_block_size); 3235 return false; 3236 } 3237 3238 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3239 opt_xfer_bytes); 3240 return true; 3241 } 3242 3243 /** 3244 * sd_revalidate_disk - called the first time a new disk is seen, 3245 * performs disk spin up, read_capacity, etc. 3246 * @disk: struct gendisk we care about 3247 **/ 3248 static int sd_revalidate_disk(struct gendisk *disk) 3249 { 3250 struct scsi_disk *sdkp = scsi_disk(disk); 3251 struct scsi_device *sdp = sdkp->device; 3252 struct request_queue *q = sdkp->disk->queue; 3253 sector_t old_capacity = sdkp->capacity; 3254 unsigned char *buffer; 3255 unsigned int dev_max, rw_max; 3256 3257 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3258 "sd_revalidate_disk\n")); 3259 3260 /* 3261 * If the device is offline, don't try and read capacity or any 3262 * of the other niceties. 3263 */ 3264 if (!scsi_device_online(sdp)) 3265 goto out; 3266 3267 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3268 if (!buffer) { 3269 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 3270 "allocation failure.\n"); 3271 goto out; 3272 } 3273 3274 sd_spinup_disk(sdkp); 3275 3276 /* 3277 * Without media there is no reason to ask; moreover, some devices 3278 * react badly if we do. 3279 */ 3280 if (sdkp->media_present) { 3281 sd_read_capacity(sdkp, buffer); 3282 3283 /* 3284 * set the default to rotational. All non-rotational devices 3285 * support the block characteristics VPD page, which will 3286 * cause this to be updated correctly and any device which 3287 * doesn't support it should be treated as rotational. 3288 */ 3289 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 3290 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); 3291 3292 if (scsi_device_supports_vpd(sdp)) { 3293 sd_read_block_provisioning(sdkp); 3294 sd_read_block_limits(sdkp); 3295 sd_read_block_characteristics(sdkp); 3296 sd_zbc_read_zones(sdkp, buffer); 3297 sd_read_cpr(sdkp); 3298 } 3299 3300 sd_print_capacity(sdkp, old_capacity); 3301 3302 sd_read_write_protect_flag(sdkp, buffer); 3303 sd_read_cache_type(sdkp, buffer); 3304 sd_read_app_tag_own(sdkp, buffer); 3305 sd_read_write_same(sdkp, buffer); 3306 sd_read_security(sdkp, buffer); 3307 sd_config_protection(sdkp); 3308 } 3309 3310 /* 3311 * We now have all cache related info, determine how we deal 3312 * with flush requests. 3313 */ 3314 sd_set_flush_flag(sdkp); 3315 3316 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3317 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3318 3319 /* Some devices report a maximum block count for READ/WRITE requests. */ 3320 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3321 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3322 3323 if (sd_validate_min_xfer_size(sdkp)) 3324 blk_queue_io_min(sdkp->disk->queue, 3325 logical_to_bytes(sdp, sdkp->min_xfer_blocks)); 3326 else 3327 blk_queue_io_min(sdkp->disk->queue, 0); 3328 3329 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3330 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3331 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 3332 } else { 3333 q->limits.io_opt = 0; 3334 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3335 (sector_t)BLK_DEF_MAX_SECTORS); 3336 } 3337 3338 /* 3339 * Limit default to SCSI host optimal sector limit if set. There may be 3340 * an impact on performance for when the size of a request exceeds this 3341 * host limit. 3342 */ 3343 rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); 3344 3345 /* Do not exceed controller limit */ 3346 rw_max = min(rw_max, queue_max_hw_sectors(q)); 3347 3348 /* 3349 * Only update max_sectors if previously unset or if the current value 3350 * exceeds the capabilities of the hardware. 3351 */ 3352 if (sdkp->first_scan || 3353 q->limits.max_sectors > q->limits.max_dev_sectors || 3354 q->limits.max_sectors > q->limits.max_hw_sectors) 3355 q->limits.max_sectors = rw_max; 3356 3357 sdkp->first_scan = 0; 3358 3359 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); 3360 sd_config_write_same(sdkp); 3361 kfree(buffer); 3362 3363 /* 3364 * For a zoned drive, revalidating the zones can be done only once 3365 * the gendisk capacity is set. So if this fails, set back the gendisk 3366 * capacity to 0. 3367 */ 3368 if (sd_zbc_revalidate_zones(sdkp)) 3369 set_capacity_and_notify(disk, 0); 3370 3371 out: 3372 return 0; 3373 } 3374 3375 /** 3376 * sd_unlock_native_capacity - unlock native capacity 3377 * @disk: struct gendisk to set capacity for 3378 * 3379 * Block layer calls this function if it detects that partitions 3380 * on @disk reach beyond the end of the device. If the SCSI host 3381 * implements ->unlock_native_capacity() method, it's invoked to 3382 * give it a chance to adjust the device capacity. 3383 * 3384 * CONTEXT: 3385 * Defined by block layer. Might sleep. 3386 */ 3387 static void sd_unlock_native_capacity(struct gendisk *disk) 3388 { 3389 struct scsi_device *sdev = scsi_disk(disk)->device; 3390 3391 if (sdev->host->hostt->unlock_native_capacity) 3392 sdev->host->hostt->unlock_native_capacity(sdev); 3393 } 3394 3395 /** 3396 * sd_format_disk_name - format disk name 3397 * @prefix: name prefix - ie. "sd" for SCSI disks 3398 * @index: index of the disk to format name for 3399 * @buf: output buffer 3400 * @buflen: length of the output buffer 3401 * 3402 * SCSI disk names starts at sda. The 26th device is sdz and the 3403 * 27th is sdaa. The last one for two lettered suffix is sdzz 3404 * which is followed by sdaaa. 3405 * 3406 * This is basically 26 base counting with one extra 'nil' entry 3407 * at the beginning from the second digit on and can be 3408 * determined using similar method as 26 base conversion with the 3409 * index shifted -1 after each digit is computed. 3410 * 3411 * CONTEXT: 3412 * Don't care. 3413 * 3414 * RETURNS: 3415 * 0 on success, -errno on failure. 3416 */ 3417 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3418 { 3419 const int base = 'z' - 'a' + 1; 3420 char *begin = buf + strlen(prefix); 3421 char *end = buf + buflen; 3422 char *p; 3423 int unit; 3424 3425 p = end - 1; 3426 *p = '\0'; 3427 unit = base; 3428 do { 3429 if (p == begin) 3430 return -EINVAL; 3431 *--p = 'a' + (index % unit); 3432 index = (index / unit) - 1; 3433 } while (index >= 0); 3434 3435 memmove(begin, p, end - p); 3436 memcpy(buf, prefix, strlen(prefix)); 3437 3438 return 0; 3439 } 3440 3441 /** 3442 * sd_probe - called during driver initialization and whenever a 3443 * new scsi device is attached to the system. It is called once 3444 * for each scsi device (not just disks) present. 3445 * @dev: pointer to device object 3446 * 3447 * Returns 0 if successful (or not interested in this scsi device 3448 * (e.g. scanner)); 1 when there is an error. 3449 * 3450 * Note: this function is invoked from the scsi mid-level. 3451 * This function sets up the mapping between a given 3452 * <host,channel,id,lun> (found in sdp) and new device name 3453 * (e.g. /dev/sda). More precisely it is the block device major 3454 * and minor number that is chosen here. 3455 * 3456 * Assume sd_probe is not re-entrant (for time being) 3457 * Also think about sd_probe() and sd_remove() running coincidentally. 3458 **/ 3459 static int sd_probe(struct device *dev) 3460 { 3461 struct scsi_device *sdp = to_scsi_device(dev); 3462 struct scsi_disk *sdkp; 3463 struct gendisk *gd; 3464 int index; 3465 int error; 3466 3467 scsi_autopm_get_device(sdp); 3468 error = -ENODEV; 3469 if (sdp->type != TYPE_DISK && 3470 sdp->type != TYPE_ZBC && 3471 sdp->type != TYPE_MOD && 3472 sdp->type != TYPE_RBC) 3473 goto out; 3474 3475 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { 3476 sdev_printk(KERN_WARNING, sdp, 3477 "Unsupported ZBC host-managed device.\n"); 3478 goto out; 3479 } 3480 3481 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3482 "sd_probe\n")); 3483 3484 error = -ENOMEM; 3485 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3486 if (!sdkp) 3487 goto out; 3488 3489 gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, 3490 &sd_bio_compl_lkclass); 3491 if (!gd) 3492 goto out_free; 3493 3494 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3495 if (index < 0) { 3496 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3497 goto out_put; 3498 } 3499 3500 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3501 if (error) { 3502 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3503 goto out_free_index; 3504 } 3505 3506 sdkp->device = sdp; 3507 sdkp->disk = gd; 3508 sdkp->index = index; 3509 sdkp->max_retries = SD_MAX_RETRIES; 3510 atomic_set(&sdkp->openers, 0); 3511 atomic_set(&sdkp->device->ioerr_cnt, 0); 3512 3513 if (!sdp->request_queue->rq_timeout) { 3514 if (sdp->type != TYPE_MOD) 3515 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 3516 else 3517 blk_queue_rq_timeout(sdp->request_queue, 3518 SD_MOD_TIMEOUT); 3519 } 3520 3521 device_initialize(&sdkp->disk_dev); 3522 sdkp->disk_dev.parent = get_device(dev); 3523 sdkp->disk_dev.class = &sd_disk_class; 3524 dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); 3525 3526 error = device_add(&sdkp->disk_dev); 3527 if (error) { 3528 put_device(&sdkp->disk_dev); 3529 goto out; 3530 } 3531 3532 dev_set_drvdata(dev, sdkp); 3533 3534 gd->major = sd_major((index & 0xf0) >> 4); 3535 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 3536 gd->minors = SD_MINORS; 3537 3538 gd->fops = &sd_fops; 3539 gd->private_data = sdkp; 3540 3541 /* defaults, until the device tells us otherwise */ 3542 sdp->sector_size = 512; 3543 sdkp->capacity = 0; 3544 sdkp->media_present = 1; 3545 sdkp->write_prot = 0; 3546 sdkp->cache_override = 0; 3547 sdkp->WCE = 0; 3548 sdkp->RCD = 0; 3549 sdkp->ATO = 0; 3550 sdkp->first_scan = 1; 3551 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 3552 3553 sd_revalidate_disk(gd); 3554 3555 if (sdp->removable) { 3556 gd->flags |= GENHD_FL_REMOVABLE; 3557 gd->events |= DISK_EVENT_MEDIA_CHANGE; 3558 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 3559 } 3560 3561 blk_pm_runtime_init(sdp->request_queue, dev); 3562 if (sdp->rpm_autosuspend) { 3563 pm_runtime_set_autosuspend_delay(dev, 3564 sdp->host->hostt->rpm_autosuspend_delay); 3565 } 3566 3567 error = device_add_disk(dev, gd, NULL); 3568 if (error) { 3569 put_device(&sdkp->disk_dev); 3570 put_disk(gd); 3571 goto out; 3572 } 3573 3574 if (sdkp->security) { 3575 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); 3576 if (sdkp->opal_dev) 3577 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 3578 } 3579 3580 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 3581 sdp->removable ? "removable " : ""); 3582 scsi_autopm_put_device(sdp); 3583 3584 return 0; 3585 3586 out_free_index: 3587 ida_free(&sd_index_ida, index); 3588 out_put: 3589 put_disk(gd); 3590 out_free: 3591 kfree(sdkp); 3592 out: 3593 scsi_autopm_put_device(sdp); 3594 return error; 3595 } 3596 3597 /** 3598 * sd_remove - called whenever a scsi disk (previously recognized by 3599 * sd_probe) is detached from the system. It is called (potentially 3600 * multiple times) during sd module unload. 3601 * @dev: pointer to device object 3602 * 3603 * Note: this function is invoked from the scsi mid-level. 3604 * This function potentially frees up a device name (e.g. /dev/sdc) 3605 * that could be re-used by a subsequent sd_probe(). 3606 * This function is not called when the built-in sd driver is "exit-ed". 3607 **/ 3608 static int sd_remove(struct device *dev) 3609 { 3610 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3611 3612 scsi_autopm_get_device(sdkp->device); 3613 3614 device_del(&sdkp->disk_dev); 3615 del_gendisk(sdkp->disk); 3616 sd_shutdown(dev); 3617 3618 put_disk(sdkp->disk); 3619 return 0; 3620 } 3621 3622 static void scsi_disk_release(struct device *dev) 3623 { 3624 struct scsi_disk *sdkp = to_scsi_disk(dev); 3625 3626 ida_free(&sd_index_ida, sdkp->index); 3627 sd_zbc_free_zone_info(sdkp); 3628 put_device(&sdkp->device->sdev_gendev); 3629 free_opal_dev(sdkp->opal_dev); 3630 3631 kfree(sdkp); 3632 } 3633 3634 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 3635 { 3636 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 3637 struct scsi_sense_hdr sshdr; 3638 struct scsi_device *sdp = sdkp->device; 3639 int res; 3640 3641 if (start) 3642 cmd[4] |= 1; /* START */ 3643 3644 if (sdp->start_stop_pwr_cond) 3645 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 3646 3647 if (!scsi_device_online(sdp)) 3648 return -ENODEV; 3649 3650 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 3651 SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL); 3652 if (res) { 3653 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3654 if (res > 0 && scsi_sense_valid(&sshdr)) { 3655 sd_print_sense_hdr(sdkp, &sshdr); 3656 /* 0x3a is medium not present */ 3657 if (sshdr.asc == 0x3a) 3658 res = 0; 3659 } 3660 } 3661 3662 /* SCSI error codes must not go to the generic layer */ 3663 if (res) 3664 return -EIO; 3665 3666 return 0; 3667 } 3668 3669 /* 3670 * Send a SYNCHRONIZE CACHE instruction down to the device through 3671 * the normal SCSI command structure. Wait for the command to 3672 * complete. 3673 */ 3674 static void sd_shutdown(struct device *dev) 3675 { 3676 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3677 3678 if (!sdkp) 3679 return; /* this can happen */ 3680 3681 if (pm_runtime_suspended(dev)) 3682 return; 3683 3684 if (sdkp->WCE && sdkp->media_present) { 3685 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3686 sd_sync_cache(sdkp, NULL); 3687 } 3688 3689 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3690 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3691 sd_start_stop_device(sdkp, 0); 3692 } 3693 } 3694 3695 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3696 { 3697 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3698 struct scsi_sense_hdr sshdr; 3699 int ret = 0; 3700 3701 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3702 return 0; 3703 3704 if (sdkp->WCE && sdkp->media_present) { 3705 if (!sdkp->device->silence_suspend) 3706 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3707 ret = sd_sync_cache(sdkp, &sshdr); 3708 3709 if (ret) { 3710 /* ignore OFFLINE device */ 3711 if (ret == -ENODEV) 3712 return 0; 3713 3714 if (!scsi_sense_valid(&sshdr) || 3715 sshdr.sense_key != ILLEGAL_REQUEST) 3716 return ret; 3717 3718 /* 3719 * sshdr.sense_key == ILLEGAL_REQUEST means this drive 3720 * doesn't support sync. There's not much to do and 3721 * suspend shouldn't fail. 3722 */ 3723 ret = 0; 3724 } 3725 } 3726 3727 if (sdkp->device->manage_start_stop) { 3728 if (!sdkp->device->silence_suspend) 3729 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3730 /* an error is not worth aborting a system sleep */ 3731 ret = sd_start_stop_device(sdkp, 0); 3732 if (ignore_stop_errors) 3733 ret = 0; 3734 } 3735 3736 return ret; 3737 } 3738 3739 static int sd_suspend_system(struct device *dev) 3740 { 3741 if (pm_runtime_suspended(dev)) 3742 return 0; 3743 3744 return sd_suspend_common(dev, true); 3745 } 3746 3747 static int sd_suspend_runtime(struct device *dev) 3748 { 3749 return sd_suspend_common(dev, false); 3750 } 3751 3752 static int sd_resume(struct device *dev) 3753 { 3754 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3755 int ret; 3756 3757 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3758 return 0; 3759 3760 if (!sdkp->device->manage_start_stop) 3761 return 0; 3762 3763 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 3764 ret = sd_start_stop_device(sdkp, 1); 3765 if (!ret) 3766 opal_unlock_from_suspend(sdkp->opal_dev); 3767 return ret; 3768 } 3769 3770 static int sd_resume_system(struct device *dev) 3771 { 3772 if (pm_runtime_suspended(dev)) 3773 return 0; 3774 3775 return sd_resume(dev); 3776 } 3777 3778 static int sd_resume_runtime(struct device *dev) 3779 { 3780 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3781 struct scsi_device *sdp; 3782 3783 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3784 return 0; 3785 3786 sdp = sdkp->device; 3787 3788 if (sdp->ignore_media_change) { 3789 /* clear the device's sense data */ 3790 static const u8 cmd[10] = { REQUEST_SENSE }; 3791 3792 if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, 3793 NULL, sdp->request_queue->rq_timeout, 1, 0, 3794 RQF_PM, NULL)) 3795 sd_printk(KERN_NOTICE, sdkp, 3796 "Failed to clear sense data\n"); 3797 } 3798 3799 return sd_resume(dev); 3800 } 3801 3802 /** 3803 * init_sd - entry point for this driver (both when built in or when 3804 * a module). 3805 * 3806 * Note: this function registers this driver with the scsi mid-level. 3807 **/ 3808 static int __init init_sd(void) 3809 { 3810 int majors = 0, i, err; 3811 3812 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 3813 3814 for (i = 0; i < SD_MAJORS; i++) { 3815 if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) 3816 continue; 3817 majors++; 3818 } 3819 3820 if (!majors) 3821 return -ENODEV; 3822 3823 err = class_register(&sd_disk_class); 3824 if (err) 3825 goto err_out; 3826 3827 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 3828 0, 0, NULL); 3829 if (!sd_cdb_cache) { 3830 printk(KERN_ERR "sd: can't init extended cdb cache\n"); 3831 err = -ENOMEM; 3832 goto err_out_class; 3833 } 3834 3835 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 3836 if (!sd_page_pool) { 3837 printk(KERN_ERR "sd: can't init discard page pool\n"); 3838 err = -ENOMEM; 3839 goto err_out_cache; 3840 } 3841 3842 err = scsi_register_driver(&sd_template.gendrv); 3843 if (err) 3844 goto err_out_driver; 3845 3846 return 0; 3847 3848 err_out_driver: 3849 mempool_destroy(sd_page_pool); 3850 3851 err_out_cache: 3852 kmem_cache_destroy(sd_cdb_cache); 3853 3854 err_out_class: 3855 class_unregister(&sd_disk_class); 3856 err_out: 3857 for (i = 0; i < SD_MAJORS; i++) 3858 unregister_blkdev(sd_major(i), "sd"); 3859 return err; 3860 } 3861 3862 /** 3863 * exit_sd - exit point for this driver (when it is a module). 3864 * 3865 * Note: this function unregisters this driver from the scsi mid-level. 3866 **/ 3867 static void __exit exit_sd(void) 3868 { 3869 int i; 3870 3871 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 3872 3873 scsi_unregister_driver(&sd_template.gendrv); 3874 mempool_destroy(sd_page_pool); 3875 kmem_cache_destroy(sd_cdb_cache); 3876 3877 class_unregister(&sd_disk_class); 3878 3879 for (i = 0; i < SD_MAJORS; i++) 3880 unregister_blkdev(sd_major(i), "sd"); 3881 } 3882 3883 module_init(init_sd); 3884 module_exit(exit_sd); 3885 3886 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 3887 { 3888 scsi_print_sense_hdr(sdkp->device, 3889 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 3890 } 3891 3892 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) 3893 { 3894 const char *hb_string = scsi_hostbyte_string(result); 3895 3896 if (hb_string) 3897 sd_printk(KERN_INFO, sdkp, 3898 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 3899 hb_string ? hb_string : "invalid", 3900 "DRIVER_OK"); 3901 else 3902 sd_printk(KERN_INFO, sdkp, 3903 "%s: Result: hostbyte=0x%02x driverbyte=%s\n", 3904 msg, host_byte(result), "DRIVER_OK"); 3905 } 3906