1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sd.c Copyright (C) 1992 Drew Eckhardt 4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 5 * 6 * Linux scsi disk driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * Modification history: 10 * - Drew Eckhardt <drew@colorado.edu> original 11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 12 * outstanding request, and other enhancements. 13 * Support loadable low-level scsi drivers. 14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 15 * eight major numbers. 16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs. 17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 18 * sd_init and cleanups. 19 * - Alex Davis <letmein@erols.com> Fix problem where partition info 20 * not being read in sd_open. Fix problem where removable media 21 * could be ejected after sd_open. 22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x 23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 25 * Support 32k/1M disks. 26 * 27 * Logging policy (needs CONFIG_SCSI_LOGGING defined): 28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 31 * - entering other commands: SCSI_LOG_HLQUEUE level 3 32 * Note: when the logging level is set by the user, it must be greater 33 * than the level indicated above to trigger output. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/fs.h> 38 #include <linux/kernel.h> 39 #include <linux/mm.h> 40 #include <linux/bio.h> 41 #include <linux/genhd.h> 42 #include <linux/hdreg.h> 43 #include <linux/errno.h> 44 #include <linux/idr.h> 45 #include <linux/interrupt.h> 46 #include <linux/init.h> 47 #include <linux/blkdev.h> 48 #include <linux/blkpg.h> 49 #include <linux/blk-pm.h> 50 #include <linux/delay.h> 51 #include <linux/mutex.h> 52 #include <linux/string_helpers.h> 53 #include <linux/async.h> 54 #include <linux/slab.h> 55 #include <linux/sed-opal.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/pr.h> 58 #include <linux/t10-pi.h> 59 #include <linux/uaccess.h> 60 #include <asm/unaligned.h> 61 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_dbg.h> 65 #include <scsi/scsi_device.h> 66 #include <scsi/scsi_driver.h> 67 #include <scsi/scsi_eh.h> 68 #include <scsi/scsi_host.h> 69 #include <scsi/scsi_ioctl.h> 70 #include <scsi/scsicam.h> 71 72 #include "sd.h" 73 #include "scsi_priv.h" 74 #include "scsi_logging.h" 75 76 MODULE_AUTHOR("Eric Youngdale"); 77 MODULE_DESCRIPTION("SCSI disk (sd) driver"); 78 MODULE_LICENSE("GPL"); 79 80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); 81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); 82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); 83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); 84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); 85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); 86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); 87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); 88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); 89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); 90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); 91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); 92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); 93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); 97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); 100 101 #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) 102 #define SD_MINORS 16 103 #else 104 #define SD_MINORS 0 105 #endif 106 107 static void sd_config_discard(struct scsi_disk *, unsigned int); 108 static void sd_config_write_same(struct scsi_disk *); 109 static int sd_revalidate_disk(struct gendisk *); 110 static void sd_unlock_native_capacity(struct gendisk *disk); 111 static int sd_probe(struct device *); 112 static int sd_remove(struct device *); 113 static void sd_shutdown(struct device *); 114 static int sd_suspend_system(struct device *); 115 static int sd_suspend_runtime(struct device *); 116 static int sd_resume(struct device *); 117 static void sd_rescan(struct device *); 118 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); 119 static void sd_uninit_command(struct scsi_cmnd *SCpnt); 120 static int sd_done(struct scsi_cmnd *); 121 static void sd_eh_reset(struct scsi_cmnd *); 122 static int sd_eh_action(struct scsi_cmnd *, int); 123 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 124 static void scsi_disk_release(struct device *cdev); 125 static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 126 static void sd_print_result(const struct scsi_disk *, const char *, int); 127 128 static DEFINE_IDA(sd_index_ida); 129 130 /* This semaphore is used to mediate the 0->1 reference get in the 131 * face of object destruction (i.e. we can't allow a get on an 132 * object after last put) */ 133 static DEFINE_MUTEX(sd_ref_mutex); 134 135 static struct kmem_cache *sd_cdb_cache; 136 static mempool_t *sd_cdb_pool; 137 static mempool_t *sd_page_pool; 138 139 static const char *sd_cache_types[] = { 140 "write through", "none", "write back", 141 "write back, no read (daft)" 142 }; 143 144 static void sd_set_flush_flag(struct scsi_disk *sdkp) 145 { 146 bool wc = false, fua = false; 147 148 if (sdkp->WCE) { 149 wc = true; 150 if (sdkp->DPOFUA) 151 fua = true; 152 } 153 154 blk_queue_write_cache(sdkp->disk->queue, wc, fua); 155 } 156 157 static ssize_t 158 cache_type_store(struct device *dev, struct device_attribute *attr, 159 const char *buf, size_t count) 160 { 161 int ct, rcd, wce, sp; 162 struct scsi_disk *sdkp = to_scsi_disk(dev); 163 struct scsi_device *sdp = sdkp->device; 164 char buffer[64]; 165 char *buffer_data; 166 struct scsi_mode_data data; 167 struct scsi_sense_hdr sshdr; 168 static const char temp[] = "temporary "; 169 int len; 170 171 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 172 /* no cache control on RBC devices; theoretically they 173 * can do it, but there's probably so many exceptions 174 * it's not worth the risk */ 175 return -EINVAL; 176 177 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { 178 buf += sizeof(temp) - 1; 179 sdkp->cache_override = 1; 180 } else { 181 sdkp->cache_override = 0; 182 } 183 184 ct = sysfs_match_string(sd_cache_types, buf); 185 if (ct < 0) 186 return -EINVAL; 187 188 rcd = ct & 0x01 ? 1 : 0; 189 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; 190 191 if (sdkp->cache_override) { 192 sdkp->WCE = wce; 193 sdkp->RCD = rcd; 194 sd_set_flush_flag(sdkp); 195 return count; 196 } 197 198 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 199 SD_MAX_RETRIES, &data, NULL)) 200 return -EINVAL; 201 len = min_t(size_t, sizeof(buffer), data.length - data.header_length - 202 data.block_descriptor_length); 203 buffer_data = buffer + data.header_length + 204 data.block_descriptor_length; 205 buffer_data[2] &= ~0x05; 206 buffer_data[2] |= wce << 2 | rcd; 207 sp = buffer_data[0] & 0x80 ? 1 : 0; 208 buffer_data[0] &= ~0x80; 209 210 /* 211 * Ensure WP, DPOFUA, and RESERVED fields are cleared in 212 * received mode parameter buffer before doing MODE SELECT. 213 */ 214 data.device_specific = 0; 215 216 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 217 SD_MAX_RETRIES, &data, &sshdr)) { 218 if (scsi_sense_valid(&sshdr)) 219 sd_print_sense_hdr(sdkp, &sshdr); 220 return -EINVAL; 221 } 222 revalidate_disk(sdkp->disk); 223 return count; 224 } 225 226 static ssize_t 227 manage_start_stop_show(struct device *dev, struct device_attribute *attr, 228 char *buf) 229 { 230 struct scsi_disk *sdkp = to_scsi_disk(dev); 231 struct scsi_device *sdp = sdkp->device; 232 233 return sprintf(buf, "%u\n", sdp->manage_start_stop); 234 } 235 236 static ssize_t 237 manage_start_stop_store(struct device *dev, struct device_attribute *attr, 238 const char *buf, size_t count) 239 { 240 struct scsi_disk *sdkp = to_scsi_disk(dev); 241 struct scsi_device *sdp = sdkp->device; 242 bool v; 243 244 if (!capable(CAP_SYS_ADMIN)) 245 return -EACCES; 246 247 if (kstrtobool(buf, &v)) 248 return -EINVAL; 249 250 sdp->manage_start_stop = v; 251 252 return count; 253 } 254 static DEVICE_ATTR_RW(manage_start_stop); 255 256 static ssize_t 257 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) 258 { 259 struct scsi_disk *sdkp = to_scsi_disk(dev); 260 261 return sprintf(buf, "%u\n", sdkp->device->allow_restart); 262 } 263 264 static ssize_t 265 allow_restart_store(struct device *dev, struct device_attribute *attr, 266 const char *buf, size_t count) 267 { 268 bool v; 269 struct scsi_disk *sdkp = to_scsi_disk(dev); 270 struct scsi_device *sdp = sdkp->device; 271 272 if (!capable(CAP_SYS_ADMIN)) 273 return -EACCES; 274 275 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 276 return -EINVAL; 277 278 if (kstrtobool(buf, &v)) 279 return -EINVAL; 280 281 sdp->allow_restart = v; 282 283 return count; 284 } 285 static DEVICE_ATTR_RW(allow_restart); 286 287 static ssize_t 288 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 289 { 290 struct scsi_disk *sdkp = to_scsi_disk(dev); 291 int ct = sdkp->RCD + 2*sdkp->WCE; 292 293 return sprintf(buf, "%s\n", sd_cache_types[ct]); 294 } 295 static DEVICE_ATTR_RW(cache_type); 296 297 static ssize_t 298 FUA_show(struct device *dev, struct device_attribute *attr, char *buf) 299 { 300 struct scsi_disk *sdkp = to_scsi_disk(dev); 301 302 return sprintf(buf, "%u\n", sdkp->DPOFUA); 303 } 304 static DEVICE_ATTR_RO(FUA); 305 306 static ssize_t 307 protection_type_show(struct device *dev, struct device_attribute *attr, 308 char *buf) 309 { 310 struct scsi_disk *sdkp = to_scsi_disk(dev); 311 312 return sprintf(buf, "%u\n", sdkp->protection_type); 313 } 314 315 static ssize_t 316 protection_type_store(struct device *dev, struct device_attribute *attr, 317 const char *buf, size_t count) 318 { 319 struct scsi_disk *sdkp = to_scsi_disk(dev); 320 unsigned int val; 321 int err; 322 323 if (!capable(CAP_SYS_ADMIN)) 324 return -EACCES; 325 326 err = kstrtouint(buf, 10, &val); 327 328 if (err) 329 return err; 330 331 if (val <= T10_PI_TYPE3_PROTECTION) 332 sdkp->protection_type = val; 333 334 return count; 335 } 336 static DEVICE_ATTR_RW(protection_type); 337 338 static ssize_t 339 protection_mode_show(struct device *dev, struct device_attribute *attr, 340 char *buf) 341 { 342 struct scsi_disk *sdkp = to_scsi_disk(dev); 343 struct scsi_device *sdp = sdkp->device; 344 unsigned int dif, dix; 345 346 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 347 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 348 349 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { 350 dif = 0; 351 dix = 1; 352 } 353 354 if (!dif && !dix) 355 return sprintf(buf, "none\n"); 356 357 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); 358 } 359 static DEVICE_ATTR_RO(protection_mode); 360 361 static ssize_t 362 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) 363 { 364 struct scsi_disk *sdkp = to_scsi_disk(dev); 365 366 return sprintf(buf, "%u\n", sdkp->ATO); 367 } 368 static DEVICE_ATTR_RO(app_tag_own); 369 370 static ssize_t 371 thin_provisioning_show(struct device *dev, struct device_attribute *attr, 372 char *buf) 373 { 374 struct scsi_disk *sdkp = to_scsi_disk(dev); 375 376 return sprintf(buf, "%u\n", sdkp->lbpme); 377 } 378 static DEVICE_ATTR_RO(thin_provisioning); 379 380 /* sysfs_match_string() requires dense arrays */ 381 static const char *lbp_mode[] = { 382 [SD_LBP_FULL] = "full", 383 [SD_LBP_UNMAP] = "unmap", 384 [SD_LBP_WS16] = "writesame_16", 385 [SD_LBP_WS10] = "writesame_10", 386 [SD_LBP_ZERO] = "writesame_zero", 387 [SD_LBP_DISABLE] = "disabled", 388 }; 389 390 static ssize_t 391 provisioning_mode_show(struct device *dev, struct device_attribute *attr, 392 char *buf) 393 { 394 struct scsi_disk *sdkp = to_scsi_disk(dev); 395 396 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); 397 } 398 399 static ssize_t 400 provisioning_mode_store(struct device *dev, struct device_attribute *attr, 401 const char *buf, size_t count) 402 { 403 struct scsi_disk *sdkp = to_scsi_disk(dev); 404 struct scsi_device *sdp = sdkp->device; 405 int mode; 406 407 if (!capable(CAP_SYS_ADMIN)) 408 return -EACCES; 409 410 if (sd_is_zoned(sdkp)) { 411 sd_config_discard(sdkp, SD_LBP_DISABLE); 412 return count; 413 } 414 415 if (sdp->type != TYPE_DISK) 416 return -EINVAL; 417 418 mode = sysfs_match_string(lbp_mode, buf); 419 if (mode < 0) 420 return -EINVAL; 421 422 sd_config_discard(sdkp, mode); 423 424 return count; 425 } 426 static DEVICE_ATTR_RW(provisioning_mode); 427 428 /* sysfs_match_string() requires dense arrays */ 429 static const char *zeroing_mode[] = { 430 [SD_ZERO_WRITE] = "write", 431 [SD_ZERO_WS] = "writesame", 432 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", 433 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", 434 }; 435 436 static ssize_t 437 zeroing_mode_show(struct device *dev, struct device_attribute *attr, 438 char *buf) 439 { 440 struct scsi_disk *sdkp = to_scsi_disk(dev); 441 442 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); 443 } 444 445 static ssize_t 446 zeroing_mode_store(struct device *dev, struct device_attribute *attr, 447 const char *buf, size_t count) 448 { 449 struct scsi_disk *sdkp = to_scsi_disk(dev); 450 int mode; 451 452 if (!capable(CAP_SYS_ADMIN)) 453 return -EACCES; 454 455 mode = sysfs_match_string(zeroing_mode, buf); 456 if (mode < 0) 457 return -EINVAL; 458 459 sdkp->zeroing_mode = mode; 460 461 return count; 462 } 463 static DEVICE_ATTR_RW(zeroing_mode); 464 465 static ssize_t 466 max_medium_access_timeouts_show(struct device *dev, 467 struct device_attribute *attr, char *buf) 468 { 469 struct scsi_disk *sdkp = to_scsi_disk(dev); 470 471 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); 472 } 473 474 static ssize_t 475 max_medium_access_timeouts_store(struct device *dev, 476 struct device_attribute *attr, const char *buf, 477 size_t count) 478 { 479 struct scsi_disk *sdkp = to_scsi_disk(dev); 480 int err; 481 482 if (!capable(CAP_SYS_ADMIN)) 483 return -EACCES; 484 485 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); 486 487 return err ? err : count; 488 } 489 static DEVICE_ATTR_RW(max_medium_access_timeouts); 490 491 static ssize_t 492 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, 493 char *buf) 494 { 495 struct scsi_disk *sdkp = to_scsi_disk(dev); 496 497 return sprintf(buf, "%u\n", sdkp->max_ws_blocks); 498 } 499 500 static ssize_t 501 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, 502 const char *buf, size_t count) 503 { 504 struct scsi_disk *sdkp = to_scsi_disk(dev); 505 struct scsi_device *sdp = sdkp->device; 506 unsigned long max; 507 int err; 508 509 if (!capable(CAP_SYS_ADMIN)) 510 return -EACCES; 511 512 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 513 return -EINVAL; 514 515 err = kstrtoul(buf, 10, &max); 516 517 if (err) 518 return err; 519 520 if (max == 0) 521 sdp->no_write_same = 1; 522 else if (max <= SD_MAX_WS16_BLOCKS) { 523 sdp->no_write_same = 0; 524 sdkp->max_ws_blocks = max; 525 } 526 527 sd_config_write_same(sdkp); 528 529 return count; 530 } 531 static DEVICE_ATTR_RW(max_write_same_blocks); 532 533 static struct attribute *sd_disk_attrs[] = { 534 &dev_attr_cache_type.attr, 535 &dev_attr_FUA.attr, 536 &dev_attr_allow_restart.attr, 537 &dev_attr_manage_start_stop.attr, 538 &dev_attr_protection_type.attr, 539 &dev_attr_protection_mode.attr, 540 &dev_attr_app_tag_own.attr, 541 &dev_attr_thin_provisioning.attr, 542 &dev_attr_provisioning_mode.attr, 543 &dev_attr_zeroing_mode.attr, 544 &dev_attr_max_write_same_blocks.attr, 545 &dev_attr_max_medium_access_timeouts.attr, 546 NULL, 547 }; 548 ATTRIBUTE_GROUPS(sd_disk); 549 550 static struct class sd_disk_class = { 551 .name = "scsi_disk", 552 .owner = THIS_MODULE, 553 .dev_release = scsi_disk_release, 554 .dev_groups = sd_disk_groups, 555 }; 556 557 static const struct dev_pm_ops sd_pm_ops = { 558 .suspend = sd_suspend_system, 559 .resume = sd_resume, 560 .poweroff = sd_suspend_system, 561 .restore = sd_resume, 562 .runtime_suspend = sd_suspend_runtime, 563 .runtime_resume = sd_resume, 564 }; 565 566 static struct scsi_driver sd_template = { 567 .gendrv = { 568 .name = "sd", 569 .owner = THIS_MODULE, 570 .probe = sd_probe, 571 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 572 .remove = sd_remove, 573 .shutdown = sd_shutdown, 574 .pm = &sd_pm_ops, 575 }, 576 .rescan = sd_rescan, 577 .init_command = sd_init_command, 578 .uninit_command = sd_uninit_command, 579 .done = sd_done, 580 .eh_action = sd_eh_action, 581 .eh_reset = sd_eh_reset, 582 }; 583 584 /* 585 * Dummy kobj_map->probe function. 586 * The default ->probe function will call modprobe, which is 587 * pointless as this module is already loaded. 588 */ 589 static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data) 590 { 591 return NULL; 592 } 593 594 /* 595 * Device no to disk mapping: 596 * 597 * major disc2 disc p1 598 * |............|.............|....|....| <- dev_t 599 * 31 20 19 8 7 4 3 0 600 * 601 * Inside a major, we have 16k disks, however mapped non- 602 * contiguously. The first 16 disks are for major0, the next 603 * ones with major1, ... Disk 256 is for major0 again, disk 272 604 * for major1, ... 605 * As we stay compatible with our numbering scheme, we can reuse 606 * the well-know SCSI majors 8, 65--71, 136--143. 607 */ 608 static int sd_major(int major_idx) 609 { 610 switch (major_idx) { 611 case 0: 612 return SCSI_DISK0_MAJOR; 613 case 1 ... 7: 614 return SCSI_DISK1_MAJOR + major_idx - 1; 615 case 8 ... 15: 616 return SCSI_DISK8_MAJOR + major_idx - 8; 617 default: 618 BUG(); 619 return 0; /* shut up gcc */ 620 } 621 } 622 623 static struct scsi_disk *scsi_disk_get(struct gendisk *disk) 624 { 625 struct scsi_disk *sdkp = NULL; 626 627 mutex_lock(&sd_ref_mutex); 628 629 if (disk->private_data) { 630 sdkp = scsi_disk(disk); 631 if (scsi_device_get(sdkp->device) == 0) 632 get_device(&sdkp->dev); 633 else 634 sdkp = NULL; 635 } 636 mutex_unlock(&sd_ref_mutex); 637 return sdkp; 638 } 639 640 static void scsi_disk_put(struct scsi_disk *sdkp) 641 { 642 struct scsi_device *sdev = sdkp->device; 643 644 mutex_lock(&sd_ref_mutex); 645 put_device(&sdkp->dev); 646 scsi_device_put(sdev); 647 mutex_unlock(&sd_ref_mutex); 648 } 649 650 #ifdef CONFIG_BLK_SED_OPAL 651 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, 652 size_t len, bool send) 653 { 654 struct scsi_device *sdev = data; 655 u8 cdb[12] = { 0, }; 656 int ret; 657 658 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; 659 cdb[1] = secp; 660 put_unaligned_be16(spsp, &cdb[2]); 661 put_unaligned_be32(len, &cdb[6]); 662 663 ret = scsi_execute_req(sdev, cdb, 664 send ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 665 buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL); 666 return ret <= 0 ? ret : -EIO; 667 } 668 #endif /* CONFIG_BLK_SED_OPAL */ 669 670 /* 671 * Look up the DIX operation based on whether the command is read or 672 * write and whether dix and dif are enabled. 673 */ 674 static unsigned int sd_prot_op(bool write, bool dix, bool dif) 675 { 676 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ 677 static const unsigned int ops[] = { /* wrt dix dif */ 678 SCSI_PROT_NORMAL, /* 0 0 0 */ 679 SCSI_PROT_READ_STRIP, /* 0 0 1 */ 680 SCSI_PROT_READ_INSERT, /* 0 1 0 */ 681 SCSI_PROT_READ_PASS, /* 0 1 1 */ 682 SCSI_PROT_NORMAL, /* 1 0 0 */ 683 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ 684 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ 685 SCSI_PROT_WRITE_PASS, /* 1 1 1 */ 686 }; 687 688 return ops[write << 2 | dix << 1 | dif]; 689 } 690 691 /* 692 * Returns a mask of the protection flags that are valid for a given DIX 693 * operation. 694 */ 695 static unsigned int sd_prot_flag_mask(unsigned int prot_op) 696 { 697 static const unsigned int flag_mask[] = { 698 [SCSI_PROT_NORMAL] = 0, 699 700 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | 701 SCSI_PROT_GUARD_CHECK | 702 SCSI_PROT_REF_CHECK | 703 SCSI_PROT_REF_INCREMENT, 704 705 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | 706 SCSI_PROT_IP_CHECKSUM, 707 708 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | 709 SCSI_PROT_GUARD_CHECK | 710 SCSI_PROT_REF_CHECK | 711 SCSI_PROT_REF_INCREMENT | 712 SCSI_PROT_IP_CHECKSUM, 713 714 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | 715 SCSI_PROT_REF_INCREMENT, 716 717 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | 718 SCSI_PROT_REF_CHECK | 719 SCSI_PROT_REF_INCREMENT | 720 SCSI_PROT_IP_CHECKSUM, 721 722 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | 723 SCSI_PROT_GUARD_CHECK | 724 SCSI_PROT_REF_CHECK | 725 SCSI_PROT_REF_INCREMENT | 726 SCSI_PROT_IP_CHECKSUM, 727 }; 728 729 return flag_mask[prot_op]; 730 } 731 732 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, 733 unsigned int dix, unsigned int dif) 734 { 735 struct bio *bio = scmd->request->bio; 736 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif); 737 unsigned int protect = 0; 738 739 if (dix) { /* DIX Type 0, 1, 2, 3 */ 740 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) 741 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; 742 743 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 744 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 745 } 746 747 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 748 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 749 750 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 751 scmd->prot_flags |= SCSI_PROT_REF_CHECK; 752 } 753 754 if (dif) { /* DIX/DIF Type 1, 2, 3 */ 755 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; 756 757 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) 758 protect = 3 << 5; /* Disable target PI checking */ 759 else 760 protect = 1 << 5; /* Enable target PI checking */ 761 } 762 763 scsi_set_prot_op(scmd, prot_op); 764 scsi_set_prot_type(scmd, dif); 765 scmd->prot_flags &= sd_prot_flag_mask(prot_op); 766 767 return protect; 768 } 769 770 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) 771 { 772 struct request_queue *q = sdkp->disk->queue; 773 unsigned int logical_block_size = sdkp->device->sector_size; 774 unsigned int max_blocks = 0; 775 776 q->limits.discard_alignment = 777 sdkp->unmap_alignment * logical_block_size; 778 q->limits.discard_granularity = 779 max(sdkp->physical_block_size, 780 sdkp->unmap_granularity * logical_block_size); 781 sdkp->provisioning_mode = mode; 782 783 switch (mode) { 784 785 case SD_LBP_FULL: 786 case SD_LBP_DISABLE: 787 blk_queue_max_discard_sectors(q, 0); 788 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); 789 return; 790 791 case SD_LBP_UNMAP: 792 max_blocks = min_not_zero(sdkp->max_unmap_blocks, 793 (u32)SD_MAX_WS16_BLOCKS); 794 break; 795 796 case SD_LBP_WS16: 797 if (sdkp->device->unmap_limit_for_ws) 798 max_blocks = sdkp->max_unmap_blocks; 799 else 800 max_blocks = sdkp->max_ws_blocks; 801 802 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); 803 break; 804 805 case SD_LBP_WS10: 806 if (sdkp->device->unmap_limit_for_ws) 807 max_blocks = sdkp->max_unmap_blocks; 808 else 809 max_blocks = sdkp->max_ws_blocks; 810 811 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); 812 break; 813 814 case SD_LBP_ZERO: 815 max_blocks = min_not_zero(sdkp->max_ws_blocks, 816 (u32)SD_MAX_WS10_BLOCKS); 817 break; 818 } 819 820 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); 821 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 822 } 823 824 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 825 { 826 struct scsi_device *sdp = cmd->device; 827 struct request *rq = cmd->request; 828 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 829 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 830 unsigned int data_len = 24; 831 char *buf; 832 833 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 834 if (!rq->special_vec.bv_page) 835 return BLK_STS_RESOURCE; 836 clear_highpage(rq->special_vec.bv_page); 837 rq->special_vec.bv_offset = 0; 838 rq->special_vec.bv_len = data_len; 839 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 840 841 cmd->cmd_len = 10; 842 cmd->cmnd[0] = UNMAP; 843 cmd->cmnd[8] = 24; 844 845 buf = page_address(rq->special_vec.bv_page); 846 put_unaligned_be16(6 + 16, &buf[0]); 847 put_unaligned_be16(16, &buf[2]); 848 put_unaligned_be64(lba, &buf[8]); 849 put_unaligned_be32(nr_blocks, &buf[16]); 850 851 cmd->allowed = SD_MAX_RETRIES; 852 cmd->transfersize = data_len; 853 rq->timeout = SD_TIMEOUT; 854 855 return scsi_init_io(cmd); 856 } 857 858 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 859 bool unmap) 860 { 861 struct scsi_device *sdp = cmd->device; 862 struct request *rq = cmd->request; 863 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 864 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 865 u32 data_len = sdp->sector_size; 866 867 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 868 if (!rq->special_vec.bv_page) 869 return BLK_STS_RESOURCE; 870 clear_highpage(rq->special_vec.bv_page); 871 rq->special_vec.bv_offset = 0; 872 rq->special_vec.bv_len = data_len; 873 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 874 875 cmd->cmd_len = 16; 876 cmd->cmnd[0] = WRITE_SAME_16; 877 if (unmap) 878 cmd->cmnd[1] = 0x8; /* UNMAP */ 879 put_unaligned_be64(lba, &cmd->cmnd[2]); 880 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 881 882 cmd->allowed = SD_MAX_RETRIES; 883 cmd->transfersize = data_len; 884 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 885 886 return scsi_init_io(cmd); 887 } 888 889 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 890 bool unmap) 891 { 892 struct scsi_device *sdp = cmd->device; 893 struct request *rq = cmd->request; 894 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 895 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 896 u32 data_len = sdp->sector_size; 897 898 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); 899 if (!rq->special_vec.bv_page) 900 return BLK_STS_RESOURCE; 901 clear_highpage(rq->special_vec.bv_page); 902 rq->special_vec.bv_offset = 0; 903 rq->special_vec.bv_len = data_len; 904 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 905 906 cmd->cmd_len = 10; 907 cmd->cmnd[0] = WRITE_SAME; 908 if (unmap) 909 cmd->cmnd[1] = 0x8; /* UNMAP */ 910 put_unaligned_be32(lba, &cmd->cmnd[2]); 911 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 912 913 cmd->allowed = SD_MAX_RETRIES; 914 cmd->transfersize = data_len; 915 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; 916 917 return scsi_init_io(cmd); 918 } 919 920 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 921 { 922 struct request *rq = cmd->request; 923 struct scsi_device *sdp = cmd->device; 924 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 925 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 926 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 927 928 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 929 switch (sdkp->zeroing_mode) { 930 case SD_ZERO_WS16_UNMAP: 931 return sd_setup_write_same16_cmnd(cmd, true); 932 case SD_ZERO_WS10_UNMAP: 933 return sd_setup_write_same10_cmnd(cmd, true); 934 } 935 } 936 937 if (sdp->no_write_same) 938 return BLK_STS_TARGET; 939 940 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) 941 return sd_setup_write_same16_cmnd(cmd, false); 942 943 return sd_setup_write_same10_cmnd(cmd, false); 944 } 945 946 static void sd_config_write_same(struct scsi_disk *sdkp) 947 { 948 struct request_queue *q = sdkp->disk->queue; 949 unsigned int logical_block_size = sdkp->device->sector_size; 950 951 if (sdkp->device->no_write_same) { 952 sdkp->max_ws_blocks = 0; 953 goto out; 954 } 955 956 /* Some devices can not handle block counts above 0xffff despite 957 * supporting WRITE SAME(16). Consequently we default to 64k 958 * blocks per I/O unless the device explicitly advertises a 959 * bigger limit. 960 */ 961 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) 962 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 963 (u32)SD_MAX_WS16_BLOCKS); 964 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) 965 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, 966 (u32)SD_MAX_WS10_BLOCKS); 967 else { 968 sdkp->device->no_write_same = 1; 969 sdkp->max_ws_blocks = 0; 970 } 971 972 if (sdkp->lbprz && sdkp->lbpws) 973 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; 974 else if (sdkp->lbprz && sdkp->lbpws10) 975 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; 976 else if (sdkp->max_ws_blocks) 977 sdkp->zeroing_mode = SD_ZERO_WS; 978 else 979 sdkp->zeroing_mode = SD_ZERO_WRITE; 980 981 if (sdkp->max_ws_blocks && 982 sdkp->physical_block_size > logical_block_size) { 983 /* 984 * Reporting a maximum number of blocks that is not aligned 985 * on the device physical size would cause a large write same 986 * request to be split into physically unaligned chunks by 987 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same() 988 * even if the caller of these functions took care to align the 989 * large request. So make sure the maximum reported is aligned 990 * to the device physical block size. This is only an optional 991 * optimization for regular disks, but this is mandatory to 992 * avoid failure of large write same requests directed at 993 * sequential write required zones of host-managed ZBC disks. 994 */ 995 sdkp->max_ws_blocks = 996 round_down(sdkp->max_ws_blocks, 997 bytes_to_logical(sdkp->device, 998 sdkp->physical_block_size)); 999 } 1000 1001 out: 1002 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * 1003 (logical_block_size >> 9)); 1004 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * 1005 (logical_block_size >> 9)); 1006 } 1007 1008 /** 1009 * sd_setup_write_same_cmnd - write the same data to multiple blocks 1010 * @cmd: command to prepare 1011 * 1012 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on 1013 * the preference indicated by the target device. 1014 **/ 1015 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) 1016 { 1017 struct request *rq = cmd->request; 1018 struct scsi_device *sdp = cmd->device; 1019 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 1020 struct bio *bio = rq->bio; 1021 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1022 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1023 blk_status_t ret; 1024 1025 if (sdkp->device->no_write_same) 1026 return BLK_STS_TARGET; 1027 1028 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 1029 1030 rq->timeout = SD_WRITE_SAME_TIMEOUT; 1031 1032 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) { 1033 cmd->cmd_len = 16; 1034 cmd->cmnd[0] = WRITE_SAME_16; 1035 put_unaligned_be64(lba, &cmd->cmnd[2]); 1036 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1037 } else { 1038 cmd->cmd_len = 10; 1039 cmd->cmnd[0] = WRITE_SAME; 1040 put_unaligned_be32(lba, &cmd->cmnd[2]); 1041 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1042 } 1043 1044 cmd->transfersize = sdp->sector_size; 1045 cmd->allowed = SD_MAX_RETRIES; 1046 1047 /* 1048 * For WRITE SAME the data transferred via the DATA OUT buffer is 1049 * different from the amount of data actually written to the target. 1050 * 1051 * We set up __data_len to the amount of data transferred via the 1052 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list 1053 * to transfer a single sector of data first, but then reset it to 1054 * the amount of data to be written right after so that the I/O path 1055 * knows how much to actually write. 1056 */ 1057 rq->__data_len = sdp->sector_size; 1058 ret = scsi_init_io(cmd); 1059 rq->__data_len = blk_rq_bytes(rq); 1060 1061 return ret; 1062 } 1063 1064 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 1065 { 1066 struct request *rq = cmd->request; 1067 1068 /* flush requests don't perform I/O, zero the S/G table */ 1069 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1070 1071 cmd->cmnd[0] = SYNCHRONIZE_CACHE; 1072 cmd->cmd_len = 10; 1073 cmd->transfersize = 0; 1074 cmd->allowed = SD_MAX_RETRIES; 1075 1076 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1077 return BLK_STS_OK; 1078 } 1079 1080 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, 1081 sector_t lba, unsigned int nr_blocks, 1082 unsigned char flags) 1083 { 1084 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); 1085 if (unlikely(cmd->cmnd == NULL)) 1086 return BLK_STS_RESOURCE; 1087 1088 cmd->cmd_len = SD_EXT_CDB_SIZE; 1089 memset(cmd->cmnd, 0, cmd->cmd_len); 1090 1091 cmd->cmnd[0] = VARIABLE_LENGTH_CMD; 1092 cmd->cmnd[7] = 0x18; /* Additional CDB len */ 1093 cmd->cmnd[9] = write ? WRITE_32 : READ_32; 1094 cmd->cmnd[10] = flags; 1095 put_unaligned_be64(lba, &cmd->cmnd[12]); 1096 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ 1097 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); 1098 1099 return BLK_STS_OK; 1100 } 1101 1102 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, 1103 sector_t lba, unsigned int nr_blocks, 1104 unsigned char flags) 1105 { 1106 cmd->cmd_len = 16; 1107 cmd->cmnd[0] = write ? WRITE_16 : READ_16; 1108 cmd->cmnd[1] = flags; 1109 cmd->cmnd[14] = 0; 1110 cmd->cmnd[15] = 0; 1111 put_unaligned_be64(lba, &cmd->cmnd[2]); 1112 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); 1113 1114 return BLK_STS_OK; 1115 } 1116 1117 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, 1118 sector_t lba, unsigned int nr_blocks, 1119 unsigned char flags) 1120 { 1121 cmd->cmd_len = 10; 1122 cmd->cmnd[0] = write ? WRITE_10 : READ_10; 1123 cmd->cmnd[1] = flags; 1124 cmd->cmnd[6] = 0; 1125 cmd->cmnd[9] = 0; 1126 put_unaligned_be32(lba, &cmd->cmnd[2]); 1127 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); 1128 1129 return BLK_STS_OK; 1130 } 1131 1132 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, 1133 sector_t lba, unsigned int nr_blocks, 1134 unsigned char flags) 1135 { 1136 /* Avoid that 0 blocks gets translated into 256 blocks. */ 1137 if (WARN_ON_ONCE(nr_blocks == 0)) 1138 return BLK_STS_IOERR; 1139 1140 if (unlikely(flags & 0x8)) { 1141 /* 1142 * This happens only if this drive failed 10byte rw 1143 * command with ILLEGAL_REQUEST during operation and 1144 * thus turned off use_10_for_rw. 1145 */ 1146 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); 1147 return BLK_STS_IOERR; 1148 } 1149 1150 cmd->cmd_len = 6; 1151 cmd->cmnd[0] = write ? WRITE_6 : READ_6; 1152 cmd->cmnd[1] = (lba >> 16) & 0x1f; 1153 cmd->cmnd[2] = (lba >> 8) & 0xff; 1154 cmd->cmnd[3] = lba & 0xff; 1155 cmd->cmnd[4] = nr_blocks; 1156 cmd->cmnd[5] = 0; 1157 1158 return BLK_STS_OK; 1159 } 1160 1161 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) 1162 { 1163 struct request *rq = cmd->request; 1164 struct scsi_device *sdp = cmd->device; 1165 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 1166 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); 1167 sector_t threshold; 1168 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); 1169 unsigned int mask = logical_to_sectors(sdp, 1) - 1; 1170 bool write = rq_data_dir(rq) == WRITE; 1171 unsigned char protect, fua; 1172 blk_status_t ret; 1173 unsigned int dif; 1174 bool dix; 1175 1176 ret = scsi_init_io(cmd); 1177 if (ret != BLK_STS_OK) 1178 return ret; 1179 1180 if (!scsi_device_online(sdp) || sdp->changed) { 1181 scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); 1182 return BLK_STS_IOERR; 1183 } 1184 1185 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { 1186 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); 1187 return BLK_STS_IOERR; 1188 } 1189 1190 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { 1191 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); 1192 return BLK_STS_IOERR; 1193 } 1194 1195 /* 1196 * Some SD card readers can't handle accesses which touch the 1197 * last one or two logical blocks. Split accesses as needed. 1198 */ 1199 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; 1200 1201 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { 1202 if (lba < threshold) { 1203 /* Access up to the threshold but not beyond */ 1204 nr_blocks = threshold - lba; 1205 } else { 1206 /* Access only a single logical block */ 1207 nr_blocks = 1; 1208 } 1209 } 1210 1211 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; 1212 dix = scsi_prot_sg_count(cmd); 1213 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); 1214 1215 if (dif || dix) 1216 protect = sd_setup_protect_cmnd(cmd, dix, dif); 1217 else 1218 protect = 0; 1219 1220 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1221 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, 1222 protect | fua); 1223 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { 1224 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, 1225 protect | fua); 1226 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || 1227 sdp->use_10_for_rw || protect) { 1228 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, 1229 protect | fua); 1230 } else { 1231 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, 1232 protect | fua); 1233 } 1234 1235 if (unlikely(ret != BLK_STS_OK)) 1236 return ret; 1237 1238 /* 1239 * We shouldn't disconnect in the middle of a sector, so with a dumb 1240 * host adapter, it's safe to assume that we can at least transfer 1241 * this many bytes between each connect / disconnect. 1242 */ 1243 cmd->transfersize = sdp->sector_size; 1244 cmd->underflow = nr_blocks << 9; 1245 cmd->allowed = SD_MAX_RETRIES; 1246 cmd->sdb.length = nr_blocks * sdp->sector_size; 1247 1248 SCSI_LOG_HLQUEUE(1, 1249 scmd_printk(KERN_INFO, cmd, 1250 "%s: block=%llu, count=%d\n", __func__, 1251 (unsigned long long)blk_rq_pos(rq), 1252 blk_rq_sectors(rq))); 1253 SCSI_LOG_HLQUEUE(2, 1254 scmd_printk(KERN_INFO, cmd, 1255 "%s %d/%u 512 byte blocks.\n", 1256 write ? "writing" : "reading", nr_blocks, 1257 blk_rq_sectors(rq))); 1258 1259 /* 1260 * This indicates that the command is ready from our end to be 1261 * queued. 1262 */ 1263 return BLK_STS_OK; 1264 } 1265 1266 static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1267 { 1268 struct request *rq = cmd->request; 1269 1270 switch (req_op(rq)) { 1271 case REQ_OP_DISCARD: 1272 switch (scsi_disk(rq->rq_disk)->provisioning_mode) { 1273 case SD_LBP_UNMAP: 1274 return sd_setup_unmap_cmnd(cmd); 1275 case SD_LBP_WS16: 1276 return sd_setup_write_same16_cmnd(cmd, true); 1277 case SD_LBP_WS10: 1278 return sd_setup_write_same10_cmnd(cmd, true); 1279 case SD_LBP_ZERO: 1280 return sd_setup_write_same10_cmnd(cmd, false); 1281 default: 1282 return BLK_STS_TARGET; 1283 } 1284 case REQ_OP_WRITE_ZEROES: 1285 return sd_setup_write_zeroes_cmnd(cmd); 1286 case REQ_OP_WRITE_SAME: 1287 return sd_setup_write_same_cmnd(cmd); 1288 case REQ_OP_FLUSH: 1289 return sd_setup_flush_cmnd(cmd); 1290 case REQ_OP_READ: 1291 case REQ_OP_WRITE: 1292 return sd_setup_read_write_cmnd(cmd); 1293 case REQ_OP_ZONE_RESET: 1294 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1295 false); 1296 case REQ_OP_ZONE_RESET_ALL: 1297 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, 1298 true); 1299 case REQ_OP_ZONE_OPEN: 1300 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); 1301 case REQ_OP_ZONE_CLOSE: 1302 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); 1303 case REQ_OP_ZONE_FINISH: 1304 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); 1305 default: 1306 WARN_ON_ONCE(1); 1307 return BLK_STS_NOTSUPP; 1308 } 1309 } 1310 1311 static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1312 { 1313 struct request *rq = SCpnt->request; 1314 u8 *cmnd; 1315 1316 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1317 mempool_free(rq->special_vec.bv_page, sd_page_pool); 1318 1319 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1320 cmnd = SCpnt->cmnd; 1321 SCpnt->cmnd = NULL; 1322 SCpnt->cmd_len = 0; 1323 mempool_free(cmnd, sd_cdb_pool); 1324 } 1325 } 1326 1327 /** 1328 * sd_open - open a scsi disk device 1329 * @bdev: Block device of the scsi disk to open 1330 * @mode: FMODE_* mask 1331 * 1332 * Returns 0 if successful. Returns a negated errno value in case 1333 * of error. 1334 * 1335 * Note: This can be called from a user context (e.g. fsck(1) ) 1336 * or from within the kernel (e.g. as a result of a mount(1) ). 1337 * In the latter case @inode and @filp carry an abridged amount 1338 * of information as noted above. 1339 * 1340 * Locking: called with bdev->bd_mutex held. 1341 **/ 1342 static int sd_open(struct block_device *bdev, fmode_t mode) 1343 { 1344 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk); 1345 struct scsi_device *sdev; 1346 int retval; 1347 1348 if (!sdkp) 1349 return -ENXIO; 1350 1351 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); 1352 1353 sdev = sdkp->device; 1354 1355 /* 1356 * If the device is in error recovery, wait until it is done. 1357 * If the device is offline, then disallow any access to it. 1358 */ 1359 retval = -ENXIO; 1360 if (!scsi_block_when_processing_errors(sdev)) 1361 goto error_out; 1362 1363 if (sdev->removable || sdkp->write_prot) 1364 check_disk_change(bdev); 1365 1366 /* 1367 * If the drive is empty, just let the open fail. 1368 */ 1369 retval = -ENOMEDIUM; 1370 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) 1371 goto error_out; 1372 1373 /* 1374 * If the device has the write protect tab set, have the open fail 1375 * if the user expects to be able to write to the thing. 1376 */ 1377 retval = -EROFS; 1378 if (sdkp->write_prot && (mode & FMODE_WRITE)) 1379 goto error_out; 1380 1381 /* 1382 * It is possible that the disk changing stuff resulted in 1383 * the device being taken offline. If this is the case, 1384 * report this to the user, and don't pretend that the 1385 * open actually succeeded. 1386 */ 1387 retval = -ENXIO; 1388 if (!scsi_device_online(sdev)) 1389 goto error_out; 1390 1391 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 1392 if (scsi_block_when_processing_errors(sdev)) 1393 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 1394 } 1395 1396 return 0; 1397 1398 error_out: 1399 scsi_disk_put(sdkp); 1400 return retval; 1401 } 1402 1403 /** 1404 * sd_release - invoked when the (last) close(2) is called on this 1405 * scsi disk. 1406 * @disk: disk to release 1407 * @mode: FMODE_* mask 1408 * 1409 * Returns 0. 1410 * 1411 * Note: may block (uninterruptible) if error recovery is underway 1412 * on this disk. 1413 * 1414 * Locking: called with bdev->bd_mutex held. 1415 **/ 1416 static void sd_release(struct gendisk *disk, fmode_t mode) 1417 { 1418 struct scsi_disk *sdkp = scsi_disk(disk); 1419 struct scsi_device *sdev = sdkp->device; 1420 1421 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 1422 1423 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { 1424 if (scsi_block_when_processing_errors(sdev)) 1425 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1426 } 1427 1428 scsi_disk_put(sdkp); 1429 } 1430 1431 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1432 { 1433 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1434 struct scsi_device *sdp = sdkp->device; 1435 struct Scsi_Host *host = sdp->host; 1436 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); 1437 int diskinfo[4]; 1438 1439 /* default to most commonly used values */ 1440 diskinfo[0] = 0x40; /* 1 << 6 */ 1441 diskinfo[1] = 0x20; /* 1 << 5 */ 1442 diskinfo[2] = capacity >> 11; 1443 1444 /* override with calculated, extended default, or driver values */ 1445 if (host->hostt->bios_param) 1446 host->hostt->bios_param(sdp, bdev, capacity, diskinfo); 1447 else 1448 scsicam_bios_param(bdev, capacity, diskinfo); 1449 1450 geo->heads = diskinfo[0]; 1451 geo->sectors = diskinfo[1]; 1452 geo->cylinders = diskinfo[2]; 1453 return 0; 1454 } 1455 1456 /** 1457 * sd_ioctl - process an ioctl 1458 * @bdev: target block device 1459 * @mode: FMODE_* mask 1460 * @cmd: ioctl command number 1461 * @arg: this is third argument given to ioctl(2) system call. 1462 * Often contains a pointer. 1463 * 1464 * Returns 0 if successful (some ioctls return positive numbers on 1465 * success as well). Returns a negated errno value in case of error. 1466 * 1467 * Note: most ioctls are forward onto the block subsystem or further 1468 * down in the scsi subsystem. 1469 **/ 1470 static int sd_ioctl(struct block_device *bdev, fmode_t mode, 1471 unsigned int cmd, unsigned long arg) 1472 { 1473 struct gendisk *disk = bdev->bd_disk; 1474 struct scsi_disk *sdkp = scsi_disk(disk); 1475 struct scsi_device *sdp = sdkp->device; 1476 void __user *p = (void __user *)arg; 1477 int error; 1478 1479 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " 1480 "cmd=0x%x\n", disk->disk_name, cmd)); 1481 1482 error = scsi_verify_blk_ioctl(bdev, cmd); 1483 if (error < 0) 1484 return error; 1485 1486 /* 1487 * If we are in the middle of error recovery, don't let anyone 1488 * else try and use this device. Also, if error recovery fails, it 1489 * may try and take the device offline, in which case all further 1490 * access to the device is prohibited. 1491 */ 1492 error = scsi_ioctl_block_when_processing_errors(sdp, cmd, 1493 (mode & FMODE_NDELAY) != 0); 1494 if (error) 1495 goto out; 1496 1497 if (is_sed_ioctl(cmd)) 1498 return sed_ioctl(sdkp->opal_dev, cmd, p); 1499 1500 /* 1501 * Send SCSI addressing ioctls directly to mid level, send other 1502 * ioctls to block level and then onto mid level if they can't be 1503 * resolved. 1504 */ 1505 switch (cmd) { 1506 case SCSI_IOCTL_GET_IDLUN: 1507 case SCSI_IOCTL_GET_BUS_NUMBER: 1508 error = scsi_ioctl(sdp, cmd, p); 1509 break; 1510 default: 1511 error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); 1512 if (error != -ENOTTY) 1513 break; 1514 error = scsi_ioctl(sdp, cmd, p); 1515 break; 1516 } 1517 out: 1518 return error; 1519 } 1520 1521 static void set_media_not_present(struct scsi_disk *sdkp) 1522 { 1523 if (sdkp->media_present) 1524 sdkp->device->changed = 1; 1525 1526 if (sdkp->device->removable) { 1527 sdkp->media_present = 0; 1528 sdkp->capacity = 0; 1529 } 1530 } 1531 1532 static int media_not_present(struct scsi_disk *sdkp, 1533 struct scsi_sense_hdr *sshdr) 1534 { 1535 if (!scsi_sense_valid(sshdr)) 1536 return 0; 1537 1538 /* not invoked for commands that could return deferred errors */ 1539 switch (sshdr->sense_key) { 1540 case UNIT_ATTENTION: 1541 case NOT_READY: 1542 /* medium not present */ 1543 if (sshdr->asc == 0x3A) { 1544 set_media_not_present(sdkp); 1545 return 1; 1546 } 1547 } 1548 return 0; 1549 } 1550 1551 /** 1552 * sd_check_events - check media events 1553 * @disk: kernel device descriptor 1554 * @clearing: disk events currently being cleared 1555 * 1556 * Returns mask of DISK_EVENT_*. 1557 * 1558 * Note: this function is invoked from the block subsystem. 1559 **/ 1560 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1561 { 1562 struct scsi_disk *sdkp = scsi_disk_get(disk); 1563 struct scsi_device *sdp; 1564 int retval; 1565 1566 if (!sdkp) 1567 return 0; 1568 1569 sdp = sdkp->device; 1570 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1571 1572 /* 1573 * If the device is offline, don't send any commands - just pretend as 1574 * if the command failed. If the device ever comes back online, we 1575 * can deal with it then. It is only because of unrecoverable errors 1576 * that we would ever take a device offline in the first place. 1577 */ 1578 if (!scsi_device_online(sdp)) { 1579 set_media_not_present(sdkp); 1580 goto out; 1581 } 1582 1583 /* 1584 * Using TEST_UNIT_READY enables differentiation between drive with 1585 * no cartridge loaded - NOT READY, drive with changed cartridge - 1586 * UNIT ATTENTION, or with same cartridge - GOOD STATUS. 1587 * 1588 * Drives that auto spin down. eg iomega jaz 1G, will be started 1589 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever 1590 * sd_revalidate() is called. 1591 */ 1592 if (scsi_block_when_processing_errors(sdp)) { 1593 struct scsi_sense_hdr sshdr = { 0, }; 1594 1595 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, 1596 &sshdr); 1597 1598 /* failed to execute TUR, assume media not present */ 1599 if (host_byte(retval)) { 1600 set_media_not_present(sdkp); 1601 goto out; 1602 } 1603 1604 if (media_not_present(sdkp, &sshdr)) 1605 goto out; 1606 } 1607 1608 /* 1609 * For removable scsi disk we have to recognise the presence 1610 * of a disk in the drive. 1611 */ 1612 if (!sdkp->media_present) 1613 sdp->changed = 1; 1614 sdkp->media_present = 1; 1615 out: 1616 /* 1617 * sdp->changed is set under the following conditions: 1618 * 1619 * Medium present state has changed in either direction. 1620 * Device has indicated UNIT_ATTENTION. 1621 */ 1622 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1623 sdp->changed = 0; 1624 scsi_disk_put(sdkp); 1625 return retval; 1626 } 1627 1628 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) 1629 { 1630 int retries, res; 1631 struct scsi_device *sdp = sdkp->device; 1632 const int timeout = sdp->request_queue->rq_timeout 1633 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1634 struct scsi_sense_hdr my_sshdr; 1635 1636 if (!scsi_device_online(sdp)) 1637 return -ENODEV; 1638 1639 /* caller might not be interested in sense, but we need it */ 1640 if (!sshdr) 1641 sshdr = &my_sshdr; 1642 1643 for (retries = 3; retries > 0; --retries) { 1644 unsigned char cmd[10] = { 0 }; 1645 1646 cmd[0] = SYNCHRONIZE_CACHE; 1647 /* 1648 * Leave the rest of the command zero to indicate 1649 * flush everything. 1650 */ 1651 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, 1652 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); 1653 if (res == 0) 1654 break; 1655 } 1656 1657 if (res) { 1658 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1659 1660 if (driver_byte(res) == DRIVER_SENSE) 1661 sd_print_sense_hdr(sdkp, sshdr); 1662 1663 /* we need to evaluate the error return */ 1664 if (scsi_sense_valid(sshdr) && 1665 (sshdr->asc == 0x3a || /* medium not present */ 1666 sshdr->asc == 0x20 || /* invalid command */ 1667 (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ 1668 /* this is no error here */ 1669 return 0; 1670 1671 switch (host_byte(res)) { 1672 /* ignore errors due to racing a disconnection */ 1673 case DID_BAD_TARGET: 1674 case DID_NO_CONNECT: 1675 return 0; 1676 /* signal the upper layer it might try again */ 1677 case DID_BUS_BUSY: 1678 case DID_IMM_RETRY: 1679 case DID_REQUEUE: 1680 case DID_SOFT_ERROR: 1681 return -EBUSY; 1682 default: 1683 return -EIO; 1684 } 1685 } 1686 return 0; 1687 } 1688 1689 static void sd_rescan(struct device *dev) 1690 { 1691 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1692 1693 revalidate_disk(sdkp->disk); 1694 } 1695 1696 1697 #ifdef CONFIG_COMPAT 1698 /* 1699 * This gets directly called from VFS. When the ioctl 1700 * is not recognized we go back to the other translation paths. 1701 */ 1702 static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, 1703 unsigned int cmd, unsigned long arg) 1704 { 1705 struct gendisk *disk = bdev->bd_disk; 1706 struct scsi_disk *sdkp = scsi_disk(disk); 1707 struct scsi_device *sdev = sdkp->device; 1708 void __user *p = compat_ptr(arg); 1709 int error; 1710 1711 error = scsi_verify_blk_ioctl(bdev, cmd); 1712 if (error < 0) 1713 return error; 1714 1715 error = scsi_ioctl_block_when_processing_errors(sdev, cmd, 1716 (mode & FMODE_NDELAY) != 0); 1717 if (error) 1718 return error; 1719 1720 if (is_sed_ioctl(cmd)) 1721 return sed_ioctl(sdkp->opal_dev, cmd, p); 1722 1723 /* 1724 * Let the static ioctl translation table take care of it. 1725 */ 1726 if (!sdev->host->hostt->compat_ioctl) 1727 return -ENOIOCTLCMD; 1728 return sdev->host->hostt->compat_ioctl(sdev, cmd, p); 1729 } 1730 #endif 1731 1732 static char sd_pr_type(enum pr_type type) 1733 { 1734 switch (type) { 1735 case PR_WRITE_EXCLUSIVE: 1736 return 0x01; 1737 case PR_EXCLUSIVE_ACCESS: 1738 return 0x03; 1739 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1740 return 0x05; 1741 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1742 return 0x06; 1743 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1744 return 0x07; 1745 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1746 return 0x08; 1747 default: 1748 return 0; 1749 } 1750 }; 1751 1752 static int sd_pr_command(struct block_device *bdev, u8 sa, 1753 u64 key, u64 sa_key, u8 type, u8 flags) 1754 { 1755 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; 1756 struct scsi_sense_hdr sshdr; 1757 int result; 1758 u8 cmd[16] = { 0, }; 1759 u8 data[24] = { 0, }; 1760 1761 cmd[0] = PERSISTENT_RESERVE_OUT; 1762 cmd[1] = sa; 1763 cmd[2] = type; 1764 put_unaligned_be32(sizeof(data), &cmd[5]); 1765 1766 put_unaligned_be64(key, &data[0]); 1767 put_unaligned_be64(sa_key, &data[8]); 1768 data[20] = flags; 1769 1770 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), 1771 &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL); 1772 1773 if (driver_byte(result) == DRIVER_SENSE && 1774 scsi_sense_valid(&sshdr)) { 1775 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1776 scsi_print_sense_hdr(sdev, NULL, &sshdr); 1777 } 1778 1779 return result; 1780 } 1781 1782 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 1783 u32 flags) 1784 { 1785 if (flags & ~PR_FL_IGNORE_KEY) 1786 return -EOPNOTSUPP; 1787 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1788 old_key, new_key, 0, 1789 (1 << 0) /* APTPL */); 1790 } 1791 1792 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 1793 u32 flags) 1794 { 1795 if (flags) 1796 return -EOPNOTSUPP; 1797 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); 1798 } 1799 1800 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1801 { 1802 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); 1803 } 1804 1805 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1806 enum pr_type type, bool abort) 1807 { 1808 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1809 sd_pr_type(type), 0); 1810 } 1811 1812 static int sd_pr_clear(struct block_device *bdev, u64 key) 1813 { 1814 return sd_pr_command(bdev, 0x03, key, 0, 0, 0); 1815 } 1816 1817 static const struct pr_ops sd_pr_ops = { 1818 .pr_register = sd_pr_register, 1819 .pr_reserve = sd_pr_reserve, 1820 .pr_release = sd_pr_release, 1821 .pr_preempt = sd_pr_preempt, 1822 .pr_clear = sd_pr_clear, 1823 }; 1824 1825 static const struct block_device_operations sd_fops = { 1826 .owner = THIS_MODULE, 1827 .open = sd_open, 1828 .release = sd_release, 1829 .ioctl = sd_ioctl, 1830 .getgeo = sd_getgeo, 1831 #ifdef CONFIG_COMPAT 1832 .compat_ioctl = sd_compat_ioctl, 1833 #endif 1834 .check_events = sd_check_events, 1835 .revalidate_disk = sd_revalidate_disk, 1836 .unlock_native_capacity = sd_unlock_native_capacity, 1837 .report_zones = sd_zbc_report_zones, 1838 .pr_ops = &sd_pr_ops, 1839 }; 1840 1841 /** 1842 * sd_eh_reset - reset error handling callback 1843 * @scmd: sd-issued command that has failed 1844 * 1845 * This function is called by the SCSI midlayer before starting 1846 * SCSI EH. When counting medium access failures we have to be 1847 * careful to register it only only once per device and SCSI EH run; 1848 * there might be several timed out commands which will cause the 1849 * 'max_medium_access_timeouts' counter to trigger after the first 1850 * SCSI EH run already and set the device to offline. 1851 * So this function resets the internal counter before starting SCSI EH. 1852 **/ 1853 static void sd_eh_reset(struct scsi_cmnd *scmd) 1854 { 1855 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); 1856 1857 /* New SCSI EH run, reset gate variable */ 1858 sdkp->ignore_medium_access_errors = false; 1859 } 1860 1861 /** 1862 * sd_eh_action - error handling callback 1863 * @scmd: sd-issued command that has failed 1864 * @eh_disp: The recovery disposition suggested by the midlayer 1865 * 1866 * This function is called by the SCSI midlayer upon completion of an 1867 * error test command (currently TEST UNIT READY). The result of sending 1868 * the eh command is passed in eh_disp. We're looking for devices that 1869 * fail medium access commands but are OK with non access commands like 1870 * test unit ready (so wrongly see the device as having a successful 1871 * recovery) 1872 **/ 1873 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) 1874 { 1875 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); 1876 struct scsi_device *sdev = scmd->device; 1877 1878 if (!scsi_device_online(sdev) || 1879 !scsi_medium_access_command(scmd) || 1880 host_byte(scmd->result) != DID_TIME_OUT || 1881 eh_disp != SUCCESS) 1882 return eh_disp; 1883 1884 /* 1885 * The device has timed out executing a medium access command. 1886 * However, the TEST UNIT READY command sent during error 1887 * handling completed successfully. Either the device is in the 1888 * process of recovering or has it suffered an internal failure 1889 * that prevents access to the storage medium. 1890 */ 1891 if (!sdkp->ignore_medium_access_errors) { 1892 sdkp->medium_access_timed_out++; 1893 sdkp->ignore_medium_access_errors = true; 1894 } 1895 1896 /* 1897 * If the device keeps failing read/write commands but TEST UNIT 1898 * READY always completes successfully we assume that medium 1899 * access is no longer possible and take the device offline. 1900 */ 1901 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { 1902 scmd_printk(KERN_ERR, scmd, 1903 "Medium access timeout failure. Offlining disk!\n"); 1904 mutex_lock(&sdev->state_mutex); 1905 scsi_device_set_state(sdev, SDEV_OFFLINE); 1906 mutex_unlock(&sdev->state_mutex); 1907 1908 return SUCCESS; 1909 } 1910 1911 return eh_disp; 1912 } 1913 1914 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1915 { 1916 struct request *req = scmd->request; 1917 struct scsi_device *sdev = scmd->device; 1918 unsigned int transferred, good_bytes; 1919 u64 start_lba, end_lba, bad_lba; 1920 1921 /* 1922 * Some commands have a payload smaller than the device logical 1923 * block size (e.g. INQUIRY on a 4K disk). 1924 */ 1925 if (scsi_bufflen(scmd) <= sdev->sector_size) 1926 return 0; 1927 1928 /* Check if we have a 'bad_lba' information */ 1929 if (!scsi_get_sense_info_fld(scmd->sense_buffer, 1930 SCSI_SENSE_BUFFERSIZE, 1931 &bad_lba)) 1932 return 0; 1933 1934 /* 1935 * If the bad lba was reported incorrectly, we have no idea where 1936 * the error is. 1937 */ 1938 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); 1939 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); 1940 if (bad_lba < start_lba || bad_lba >= end_lba) 1941 return 0; 1942 1943 /* 1944 * resid is optional but mostly filled in. When it's unused, 1945 * its value is zero, so we assume the whole buffer transferred 1946 */ 1947 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 1948 1949 /* This computation should always be done in terms of the 1950 * resolution of the device's medium. 1951 */ 1952 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); 1953 1954 return min(good_bytes, transferred); 1955 } 1956 1957 /** 1958 * sd_done - bottom half handler: called when the lower level 1959 * driver has completed (successfully or otherwise) a scsi command. 1960 * @SCpnt: mid-level's per command structure. 1961 * 1962 * Note: potentially run from within an ISR. Must not block. 1963 **/ 1964 static int sd_done(struct scsi_cmnd *SCpnt) 1965 { 1966 int result = SCpnt->result; 1967 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1968 unsigned int sector_size = SCpnt->device->sector_size; 1969 unsigned int resid; 1970 struct scsi_sense_hdr sshdr; 1971 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1972 struct request *req = SCpnt->request; 1973 int sense_valid = 0; 1974 int sense_deferred = 0; 1975 1976 switch (req_op(req)) { 1977 case REQ_OP_DISCARD: 1978 case REQ_OP_WRITE_ZEROES: 1979 case REQ_OP_WRITE_SAME: 1980 case REQ_OP_ZONE_RESET: 1981 case REQ_OP_ZONE_RESET_ALL: 1982 case REQ_OP_ZONE_OPEN: 1983 case REQ_OP_ZONE_CLOSE: 1984 case REQ_OP_ZONE_FINISH: 1985 if (!result) { 1986 good_bytes = blk_rq_bytes(req); 1987 scsi_set_resid(SCpnt, 0); 1988 } else { 1989 good_bytes = 0; 1990 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1991 } 1992 break; 1993 default: 1994 /* 1995 * In case of bogus fw or device, we could end up having 1996 * an unaligned partial completion. Check this here and force 1997 * alignment. 1998 */ 1999 resid = scsi_get_resid(SCpnt); 2000 if (resid & (sector_size - 1)) { 2001 sd_printk(KERN_INFO, sdkp, 2002 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 2003 resid, sector_size); 2004 scsi_print_command(SCpnt); 2005 resid = min(scsi_bufflen(SCpnt), 2006 round_up(resid, sector_size)); 2007 scsi_set_resid(SCpnt, resid); 2008 } 2009 } 2010 2011 if (result) { 2012 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 2013 if (sense_valid) 2014 sense_deferred = scsi_sense_is_deferred(&sshdr); 2015 } 2016 sdkp->medium_access_timed_out = 0; 2017 2018 if (driver_byte(result) != DRIVER_SENSE && 2019 (!sense_valid || sense_deferred)) 2020 goto out; 2021 2022 switch (sshdr.sense_key) { 2023 case HARDWARE_ERROR: 2024 case MEDIUM_ERROR: 2025 good_bytes = sd_completed_bytes(SCpnt); 2026 break; 2027 case RECOVERED_ERROR: 2028 good_bytes = scsi_bufflen(SCpnt); 2029 break; 2030 case NO_SENSE: 2031 /* This indicates a false check condition, so ignore it. An 2032 * unknown amount of data was transferred so treat it as an 2033 * error. 2034 */ 2035 SCpnt->result = 0; 2036 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2037 break; 2038 case ABORTED_COMMAND: 2039 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ 2040 good_bytes = sd_completed_bytes(SCpnt); 2041 break; 2042 case ILLEGAL_REQUEST: 2043 switch (sshdr.asc) { 2044 case 0x10: /* DIX: Host detected corruption */ 2045 good_bytes = sd_completed_bytes(SCpnt); 2046 break; 2047 case 0x20: /* INVALID COMMAND OPCODE */ 2048 case 0x24: /* INVALID FIELD IN CDB */ 2049 switch (SCpnt->cmnd[0]) { 2050 case UNMAP: 2051 sd_config_discard(sdkp, SD_LBP_DISABLE); 2052 break; 2053 case WRITE_SAME_16: 2054 case WRITE_SAME: 2055 if (SCpnt->cmnd[1] & 8) { /* UNMAP */ 2056 sd_config_discard(sdkp, SD_LBP_DISABLE); 2057 } else { 2058 sdkp->device->no_write_same = 1; 2059 sd_config_write_same(sdkp); 2060 req->rq_flags |= RQF_QUIET; 2061 } 2062 break; 2063 } 2064 } 2065 break; 2066 default: 2067 break; 2068 } 2069 2070 out: 2071 if (sd_is_zoned(sdkp)) 2072 sd_zbc_complete(SCpnt, good_bytes, &sshdr); 2073 2074 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, 2075 "sd_done: completed %d of %d bytes\n", 2076 good_bytes, scsi_bufflen(SCpnt))); 2077 2078 return good_bytes; 2079 } 2080 2081 /* 2082 * spinup disk - called only in sd_revalidate_disk() 2083 */ 2084 static void 2085 sd_spinup_disk(struct scsi_disk *sdkp) 2086 { 2087 unsigned char cmd[10]; 2088 unsigned long spintime_expire = 0; 2089 int retries, spintime; 2090 unsigned int the_result; 2091 struct scsi_sense_hdr sshdr; 2092 int sense_valid = 0; 2093 2094 spintime = 0; 2095 2096 /* Spin up drives, as required. Only do this at boot time */ 2097 /* Spinup needs to be done for module loads too. */ 2098 do { 2099 retries = 0; 2100 2101 do { 2102 cmd[0] = TEST_UNIT_READY; 2103 memset((void *) &cmd[1], 0, 9); 2104 2105 the_result = scsi_execute_req(sdkp->device, cmd, 2106 DMA_NONE, NULL, 0, 2107 &sshdr, SD_TIMEOUT, 2108 SD_MAX_RETRIES, NULL); 2109 2110 /* 2111 * If the drive has indicated to us that it 2112 * doesn't have any media in it, don't bother 2113 * with any more polling. 2114 */ 2115 if (media_not_present(sdkp, &sshdr)) 2116 return; 2117 2118 if (the_result) 2119 sense_valid = scsi_sense_valid(&sshdr); 2120 retries++; 2121 } while (retries < 3 && 2122 (!scsi_status_is_good(the_result) || 2123 ((driver_byte(the_result) == DRIVER_SENSE) && 2124 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 2125 2126 if (driver_byte(the_result) != DRIVER_SENSE) { 2127 /* no sense, TUR either succeeded or failed 2128 * with a status error */ 2129 if(!spintime && !scsi_status_is_good(the_result)) { 2130 sd_print_result(sdkp, "Test Unit Ready failed", 2131 the_result); 2132 } 2133 break; 2134 } 2135 2136 /* 2137 * The device does not want the automatic start to be issued. 2138 */ 2139 if (sdkp->device->no_start_on_add) 2140 break; 2141 2142 if (sense_valid && sshdr.sense_key == NOT_READY) { 2143 if (sshdr.asc == 4 && sshdr.ascq == 3) 2144 break; /* manual intervention required */ 2145 if (sshdr.asc == 4 && sshdr.ascq == 0xb) 2146 break; /* standby */ 2147 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2148 break; /* unavailable */ 2149 if (sshdr.asc == 4 && sshdr.ascq == 0x1b) 2150 break; /* sanitize in progress */ 2151 /* 2152 * Issue command to spin up drive when not ready 2153 */ 2154 if (!spintime) { 2155 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); 2156 cmd[0] = START_STOP; 2157 cmd[1] = 1; /* Return immediately */ 2158 memset((void *) &cmd[2], 0, 8); 2159 cmd[4] = 1; /* Start spin cycle */ 2160 if (sdkp->device->start_stop_pwr_cond) 2161 cmd[4] |= 1 << 4; 2162 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 2163 NULL, 0, &sshdr, 2164 SD_TIMEOUT, SD_MAX_RETRIES, 2165 NULL); 2166 spintime_expire = jiffies + 100 * HZ; 2167 spintime = 1; 2168 } 2169 /* Wait 1 second for next try */ 2170 msleep(1000); 2171 printk(KERN_CONT "."); 2172 2173 /* 2174 * Wait for USB flash devices with slow firmware. 2175 * Yes, this sense key/ASC combination shouldn't 2176 * occur here. It's characteristic of these devices. 2177 */ 2178 } else if (sense_valid && 2179 sshdr.sense_key == UNIT_ATTENTION && 2180 sshdr.asc == 0x28) { 2181 if (!spintime) { 2182 spintime_expire = jiffies + 5 * HZ; 2183 spintime = 1; 2184 } 2185 /* Wait 1 second for next try */ 2186 msleep(1000); 2187 } else { 2188 /* we don't understand the sense code, so it's 2189 * probably pointless to loop */ 2190 if(!spintime) { 2191 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 2192 sd_print_sense_hdr(sdkp, &sshdr); 2193 } 2194 break; 2195 } 2196 2197 } while (spintime && time_before_eq(jiffies, spintime_expire)); 2198 2199 if (spintime) { 2200 if (scsi_status_is_good(the_result)) 2201 printk(KERN_CONT "ready\n"); 2202 else 2203 printk(KERN_CONT "not responding...\n"); 2204 } 2205 } 2206 2207 /* 2208 * Determine whether disk supports Data Integrity Field. 2209 */ 2210 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 2211 { 2212 struct scsi_device *sdp = sdkp->device; 2213 u8 type; 2214 int ret = 0; 2215 2216 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) 2217 return ret; 2218 2219 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 2220 2221 if (type > T10_PI_TYPE3_PROTECTION) 2222 ret = -ENODEV; 2223 else if (scsi_host_dif_capable(sdp->host, type)) 2224 ret = 1; 2225 2226 if (sdkp->first_scan || type != sdkp->protection_type) 2227 switch (ret) { 2228 case -ENODEV: 2229 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ 2230 " protection type %u. Disabling disk!\n", 2231 type); 2232 break; 2233 case 1: 2234 sd_printk(KERN_NOTICE, sdkp, 2235 "Enabling DIF Type %u protection\n", type); 2236 break; 2237 case 0: 2238 sd_printk(KERN_NOTICE, sdkp, 2239 "Disabling DIF Type %u protection\n", type); 2240 break; 2241 } 2242 2243 sdkp->protection_type = type; 2244 2245 return ret; 2246 } 2247 2248 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 2249 struct scsi_sense_hdr *sshdr, int sense_valid, 2250 int the_result) 2251 { 2252 if (driver_byte(the_result) == DRIVER_SENSE) 2253 sd_print_sense_hdr(sdkp, sshdr); 2254 else 2255 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); 2256 2257 /* 2258 * Set dirty bit for removable devices if not ready - 2259 * sometimes drives will not report this properly. 2260 */ 2261 if (sdp->removable && 2262 sense_valid && sshdr->sense_key == NOT_READY) 2263 set_media_not_present(sdkp); 2264 2265 /* 2266 * We used to set media_present to 0 here to indicate no media 2267 * in the drive, but some drives fail read capacity even with 2268 * media present, so we can't do that. 2269 */ 2270 sdkp->capacity = 0; /* unknown mapped to zero - as usual */ 2271 } 2272 2273 #define RC16_LEN 32 2274 #if RC16_LEN > SD_BUF_SIZE 2275 #error RC16_LEN must not be more than SD_BUF_SIZE 2276 #endif 2277 2278 #define READ_CAPACITY_RETRIES_ON_RESET 10 2279 2280 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2281 unsigned char *buffer) 2282 { 2283 unsigned char cmd[16]; 2284 struct scsi_sense_hdr sshdr; 2285 int sense_valid = 0; 2286 int the_result; 2287 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2288 unsigned int alignment; 2289 unsigned long long lba; 2290 unsigned sector_size; 2291 2292 if (sdp->no_read_capacity_16) 2293 return -EINVAL; 2294 2295 do { 2296 memset(cmd, 0, 16); 2297 cmd[0] = SERVICE_ACTION_IN_16; 2298 cmd[1] = SAI_READ_CAPACITY_16; 2299 cmd[13] = RC16_LEN; 2300 memset(buffer, 0, RC16_LEN); 2301 2302 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2303 buffer, RC16_LEN, &sshdr, 2304 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 2305 2306 if (media_not_present(sdkp, &sshdr)) 2307 return -ENODEV; 2308 2309 if (the_result) { 2310 sense_valid = scsi_sense_valid(&sshdr); 2311 if (sense_valid && 2312 sshdr.sense_key == ILLEGAL_REQUEST && 2313 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && 2314 sshdr.ascq == 0x00) 2315 /* Invalid Command Operation Code or 2316 * Invalid Field in CDB, just retry 2317 * silently with RC10 */ 2318 return -EINVAL; 2319 if (sense_valid && 2320 sshdr.sense_key == UNIT_ATTENTION && 2321 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2322 /* Device reset might occur several times, 2323 * give it one more chance */ 2324 if (--reset_retries > 0) 2325 continue; 2326 } 2327 retries--; 2328 2329 } while (the_result && retries); 2330 2331 if (the_result) { 2332 sd_print_result(sdkp, "Read Capacity(16) failed", the_result); 2333 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2334 return -EINVAL; 2335 } 2336 2337 sector_size = get_unaligned_be32(&buffer[8]); 2338 lba = get_unaligned_be64(&buffer[0]); 2339 2340 if (sd_read_protection_type(sdkp, buffer) < 0) { 2341 sdkp->capacity = 0; 2342 return -ENODEV; 2343 } 2344 2345 /* Logical blocks per physical block exponent */ 2346 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; 2347 2348 /* RC basis */ 2349 sdkp->rc_basis = (buffer[12] >> 4) & 0x3; 2350 2351 /* Lowest aligned logical block */ 2352 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 2353 blk_queue_alignment_offset(sdp->request_queue, alignment); 2354 if (alignment && sdkp->first_scan) 2355 sd_printk(KERN_NOTICE, sdkp, 2356 "physical block alignment offset: %u\n", alignment); 2357 2358 if (buffer[14] & 0x80) { /* LBPME */ 2359 sdkp->lbpme = 1; 2360 2361 if (buffer[14] & 0x40) /* LBPRZ */ 2362 sdkp->lbprz = 1; 2363 2364 sd_config_discard(sdkp, SD_LBP_WS16); 2365 } 2366 2367 sdkp->capacity = lba + 1; 2368 return sector_size; 2369 } 2370 2371 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, 2372 unsigned char *buffer) 2373 { 2374 unsigned char cmd[16]; 2375 struct scsi_sense_hdr sshdr; 2376 int sense_valid = 0; 2377 int the_result; 2378 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; 2379 sector_t lba; 2380 unsigned sector_size; 2381 2382 do { 2383 cmd[0] = READ_CAPACITY; 2384 memset(&cmd[1], 0, 9); 2385 memset(buffer, 0, 8); 2386 2387 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 2388 buffer, 8, &sshdr, 2389 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 2390 2391 if (media_not_present(sdkp, &sshdr)) 2392 return -ENODEV; 2393 2394 if (the_result) { 2395 sense_valid = scsi_sense_valid(&sshdr); 2396 if (sense_valid && 2397 sshdr.sense_key == UNIT_ATTENTION && 2398 sshdr.asc == 0x29 && sshdr.ascq == 0x00) 2399 /* Device reset might occur several times, 2400 * give it one more chance */ 2401 if (--reset_retries > 0) 2402 continue; 2403 } 2404 retries--; 2405 2406 } while (the_result && retries); 2407 2408 if (the_result) { 2409 sd_print_result(sdkp, "Read Capacity(10) failed", the_result); 2410 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2411 return -EINVAL; 2412 } 2413 2414 sector_size = get_unaligned_be32(&buffer[4]); 2415 lba = get_unaligned_be32(&buffer[0]); 2416 2417 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { 2418 /* Some buggy (usb cardreader) devices return an lba of 2419 0xffffffff when the want to report a size of 0 (with 2420 which they really mean no media is present) */ 2421 sdkp->capacity = 0; 2422 sdkp->physical_block_size = sector_size; 2423 return sector_size; 2424 } 2425 2426 sdkp->capacity = lba + 1; 2427 sdkp->physical_block_size = sector_size; 2428 return sector_size; 2429 } 2430 2431 static int sd_try_rc16_first(struct scsi_device *sdp) 2432 { 2433 if (sdp->host->max_cmd_len < 16) 2434 return 0; 2435 if (sdp->try_rc_10_first) 2436 return 0; 2437 if (sdp->scsi_level > SCSI_SPC_2) 2438 return 1; 2439 if (scsi_device_protection(sdp)) 2440 return 1; 2441 return 0; 2442 } 2443 2444 /* 2445 * read disk capacity 2446 */ 2447 static void 2448 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) 2449 { 2450 int sector_size; 2451 struct scsi_device *sdp = sdkp->device; 2452 2453 if (sd_try_rc16_first(sdp)) { 2454 sector_size = read_capacity_16(sdkp, sdp, buffer); 2455 if (sector_size == -EOVERFLOW) 2456 goto got_data; 2457 if (sector_size == -ENODEV) 2458 return; 2459 if (sector_size < 0) 2460 sector_size = read_capacity_10(sdkp, sdp, buffer); 2461 if (sector_size < 0) 2462 return; 2463 } else { 2464 sector_size = read_capacity_10(sdkp, sdp, buffer); 2465 if (sector_size == -EOVERFLOW) 2466 goto got_data; 2467 if (sector_size < 0) 2468 return; 2469 if ((sizeof(sdkp->capacity) > 4) && 2470 (sdkp->capacity > 0xffffffffULL)) { 2471 int old_sector_size = sector_size; 2472 sd_printk(KERN_NOTICE, sdkp, "Very big device. " 2473 "Trying to use READ CAPACITY(16).\n"); 2474 sector_size = read_capacity_16(sdkp, sdp, buffer); 2475 if (sector_size < 0) { 2476 sd_printk(KERN_NOTICE, sdkp, 2477 "Using 0xffffffff as device size\n"); 2478 sdkp->capacity = 1 + (sector_t) 0xffffffff; 2479 sector_size = old_sector_size; 2480 goto got_data; 2481 } 2482 /* Remember that READ CAPACITY(16) succeeded */ 2483 sdp->try_rc_10_first = 0; 2484 } 2485 } 2486 2487 /* Some devices are known to return the total number of blocks, 2488 * not the highest block number. Some devices have versions 2489 * which do this and others which do not. Some devices we might 2490 * suspect of doing this but we don't know for certain. 2491 * 2492 * If we know the reported capacity is wrong, decrement it. If 2493 * we can only guess, then assume the number of blocks is even 2494 * (usually true but not always) and err on the side of lowering 2495 * the capacity. 2496 */ 2497 if (sdp->fix_capacity || 2498 (sdp->guess_capacity && (sdkp->capacity & 0x01))) { 2499 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " 2500 "from its reported value: %llu\n", 2501 (unsigned long long) sdkp->capacity); 2502 --sdkp->capacity; 2503 } 2504 2505 got_data: 2506 if (sector_size == 0) { 2507 sector_size = 512; 2508 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " 2509 "assuming 512.\n"); 2510 } 2511 2512 if (sector_size != 512 && 2513 sector_size != 1024 && 2514 sector_size != 2048 && 2515 sector_size != 4096) { 2516 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2517 sector_size); 2518 /* 2519 * The user might want to re-format the drive with 2520 * a supported sectorsize. Once this happens, it 2521 * would be relatively trivial to set the thing up. 2522 * For this reason, we leave the thing in the table. 2523 */ 2524 sdkp->capacity = 0; 2525 /* 2526 * set a bogus sector size so the normal read/write 2527 * logic in the block layer will eventually refuse any 2528 * request on this device without tripping over power 2529 * of two sector size assumptions 2530 */ 2531 sector_size = 512; 2532 } 2533 blk_queue_logical_block_size(sdp->request_queue, sector_size); 2534 blk_queue_physical_block_size(sdp->request_queue, 2535 sdkp->physical_block_size); 2536 sdkp->device->sector_size = sector_size; 2537 2538 if (sdkp->capacity > 0xffffffff) 2539 sdp->use_16_for_rw = 1; 2540 2541 } 2542 2543 /* 2544 * Print disk capacity 2545 */ 2546 static void 2547 sd_print_capacity(struct scsi_disk *sdkp, 2548 sector_t old_capacity) 2549 { 2550 int sector_size = sdkp->device->sector_size; 2551 char cap_str_2[10], cap_str_10[10]; 2552 2553 if (!sdkp->first_scan && old_capacity == sdkp->capacity) 2554 return; 2555 2556 string_get_size(sdkp->capacity, sector_size, 2557 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 2558 string_get_size(sdkp->capacity, sector_size, 2559 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 2560 2561 sd_printk(KERN_NOTICE, sdkp, 2562 "%llu %d-byte logical blocks: (%s/%s)\n", 2563 (unsigned long long)sdkp->capacity, 2564 sector_size, cap_str_10, cap_str_2); 2565 2566 if (sdkp->physical_block_size != sector_size) 2567 sd_printk(KERN_NOTICE, sdkp, 2568 "%u-byte physical blocks\n", 2569 sdkp->physical_block_size); 2570 2571 sd_zbc_print_zones(sdkp); 2572 } 2573 2574 /* called with buffer of length 512 */ 2575 static inline int 2576 sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage, 2577 unsigned char *buffer, int len, struct scsi_mode_data *data, 2578 struct scsi_sense_hdr *sshdr) 2579 { 2580 return scsi_mode_sense(sdp, dbd, modepage, buffer, len, 2581 SD_TIMEOUT, SD_MAX_RETRIES, data, 2582 sshdr); 2583 } 2584 2585 /* 2586 * read write protect setting, if possible - called only in sd_revalidate_disk() 2587 * called with buffer of length SD_BUF_SIZE 2588 */ 2589 static void 2590 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) 2591 { 2592 int res; 2593 struct scsi_device *sdp = sdkp->device; 2594 struct scsi_mode_data data; 2595 int old_wp = sdkp->write_prot; 2596 2597 set_disk_ro(sdkp->disk, 0); 2598 if (sdp->skip_ms_page_3f) { 2599 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2600 return; 2601 } 2602 2603 if (sdp->use_192_bytes_for_3f) { 2604 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL); 2605 } else { 2606 /* 2607 * First attempt: ask for all pages (0x3F), but only 4 bytes. 2608 * We have to start carefully: some devices hang if we ask 2609 * for more than is available. 2610 */ 2611 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL); 2612 2613 /* 2614 * Second attempt: ask for page 0 When only page 0 is 2615 * implemented, a request for page 3F may return Sense Key 2616 * 5: Illegal Request, Sense Code 24: Invalid field in 2617 * CDB. 2618 */ 2619 if (!scsi_status_is_good(res)) 2620 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL); 2621 2622 /* 2623 * Third attempt: ask 255 bytes, as we did earlier. 2624 */ 2625 if (!scsi_status_is_good(res)) 2626 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255, 2627 &data, NULL); 2628 } 2629 2630 if (!scsi_status_is_good(res)) { 2631 sd_first_printk(KERN_WARNING, sdkp, 2632 "Test WP failed, assume Write Enabled\n"); 2633 } else { 2634 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2635 set_disk_ro(sdkp->disk, sdkp->write_prot); 2636 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2637 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2638 sdkp->write_prot ? "on" : "off"); 2639 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); 2640 } 2641 } 2642 } 2643 2644 /* 2645 * sd_read_cache_type - called only from sd_revalidate_disk() 2646 * called with buffer of length SD_BUF_SIZE 2647 */ 2648 static void 2649 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) 2650 { 2651 int len = 0, res; 2652 struct scsi_device *sdp = sdkp->device; 2653 2654 int dbd; 2655 int modepage; 2656 int first_len; 2657 struct scsi_mode_data data; 2658 struct scsi_sense_hdr sshdr; 2659 int old_wce = sdkp->WCE; 2660 int old_rcd = sdkp->RCD; 2661 int old_dpofua = sdkp->DPOFUA; 2662 2663 2664 if (sdkp->cache_override) 2665 return; 2666 2667 first_len = 4; 2668 if (sdp->skip_ms_page_8) { 2669 if (sdp->type == TYPE_RBC) 2670 goto defaults; 2671 else { 2672 if (sdp->skip_ms_page_3f) 2673 goto defaults; 2674 modepage = 0x3F; 2675 if (sdp->use_192_bytes_for_3f) 2676 first_len = 192; 2677 dbd = 0; 2678 } 2679 } else if (sdp->type == TYPE_RBC) { 2680 modepage = 6; 2681 dbd = 8; 2682 } else { 2683 modepage = 8; 2684 dbd = 0; 2685 } 2686 2687 /* cautiously ask */ 2688 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len, 2689 &data, &sshdr); 2690 2691 if (!scsi_status_is_good(res)) 2692 goto bad_sense; 2693 2694 if (!data.header_length) { 2695 modepage = 6; 2696 first_len = 0; 2697 sd_first_printk(KERN_ERR, sdkp, 2698 "Missing header in MODE_SENSE response\n"); 2699 } 2700 2701 /* that went OK, now ask for the proper length */ 2702 len = data.length; 2703 2704 /* 2705 * We're only interested in the first three bytes, actually. 2706 * But the data cache page is defined for the first 20. 2707 */ 2708 if (len < 3) 2709 goto bad_sense; 2710 else if (len > SD_BUF_SIZE) { 2711 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2712 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2713 len = SD_BUF_SIZE; 2714 } 2715 if (modepage == 0x3F && sdp->use_192_bytes_for_3f) 2716 len = 192; 2717 2718 /* Get the data */ 2719 if (len > first_len) 2720 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, 2721 &data, &sshdr); 2722 2723 if (scsi_status_is_good(res)) { 2724 int offset = data.header_length + data.block_descriptor_length; 2725 2726 while (offset < len) { 2727 u8 page_code = buffer[offset] & 0x3F; 2728 u8 spf = buffer[offset] & 0x40; 2729 2730 if (page_code == 8 || page_code == 6) { 2731 /* We're interested only in the first 3 bytes. 2732 */ 2733 if (len - offset <= 2) { 2734 sd_first_printk(KERN_ERR, sdkp, 2735 "Incomplete mode parameter " 2736 "data\n"); 2737 goto defaults; 2738 } else { 2739 modepage = page_code; 2740 goto Page_found; 2741 } 2742 } else { 2743 /* Go to the next page */ 2744 if (spf && len - offset > 3) 2745 offset += 4 + (buffer[offset+2] << 8) + 2746 buffer[offset+3]; 2747 else if (!spf && len - offset > 1) 2748 offset += 2 + buffer[offset+1]; 2749 else { 2750 sd_first_printk(KERN_ERR, sdkp, 2751 "Incomplete mode " 2752 "parameter data\n"); 2753 goto defaults; 2754 } 2755 } 2756 } 2757 2758 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); 2759 goto defaults; 2760 2761 Page_found: 2762 if (modepage == 8) { 2763 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2764 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2765 } else { 2766 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); 2767 sdkp->RCD = 0; 2768 } 2769 2770 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2771 if (sdp->broken_fua) { 2772 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2773 sdkp->DPOFUA = 0; 2774 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && 2775 !sdkp->device->use_16_for_rw) { 2776 sd_first_printk(KERN_NOTICE, sdkp, 2777 "Uses READ/WRITE(6), disabling FUA\n"); 2778 sdkp->DPOFUA = 0; 2779 } 2780 2781 /* No cache flush allowed for write protected devices */ 2782 if (sdkp->WCE && sdkp->write_prot) 2783 sdkp->WCE = 0; 2784 2785 if (sdkp->first_scan || old_wce != sdkp->WCE || 2786 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2787 sd_printk(KERN_NOTICE, sdkp, 2788 "Write cache: %s, read cache: %s, %s\n", 2789 sdkp->WCE ? "enabled" : "disabled", 2790 sdkp->RCD ? "disabled" : "enabled", 2791 sdkp->DPOFUA ? "supports DPO and FUA" 2792 : "doesn't support DPO or FUA"); 2793 2794 return; 2795 } 2796 2797 bad_sense: 2798 if (scsi_sense_valid(&sshdr) && 2799 sshdr.sense_key == ILLEGAL_REQUEST && 2800 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2801 /* Invalid field in CDB */ 2802 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2803 else 2804 sd_first_printk(KERN_ERR, sdkp, 2805 "Asking for cache data failed\n"); 2806 2807 defaults: 2808 if (sdp->wce_default_on) { 2809 sd_first_printk(KERN_NOTICE, sdkp, 2810 "Assuming drive cache: write back\n"); 2811 sdkp->WCE = 1; 2812 } else { 2813 sd_first_printk(KERN_ERR, sdkp, 2814 "Assuming drive cache: write through\n"); 2815 sdkp->WCE = 0; 2816 } 2817 sdkp->RCD = 0; 2818 sdkp->DPOFUA = 0; 2819 } 2820 2821 /* 2822 * The ATO bit indicates whether the DIF application tag is available 2823 * for use by the operating system. 2824 */ 2825 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 2826 { 2827 int res, offset; 2828 struct scsi_device *sdp = sdkp->device; 2829 struct scsi_mode_data data; 2830 struct scsi_sense_hdr sshdr; 2831 2832 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 2833 return; 2834 2835 if (sdkp->protection_type == 0) 2836 return; 2837 2838 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, 2839 SD_MAX_RETRIES, &data, &sshdr); 2840 2841 if (!scsi_status_is_good(res) || !data.header_length || 2842 data.length < 6) { 2843 sd_first_printk(KERN_WARNING, sdkp, 2844 "getting Control mode page failed, assume no ATO\n"); 2845 2846 if (scsi_sense_valid(&sshdr)) 2847 sd_print_sense_hdr(sdkp, &sshdr); 2848 2849 return; 2850 } 2851 2852 offset = data.header_length + data.block_descriptor_length; 2853 2854 if ((buffer[offset] & 0x3f) != 0x0a) { 2855 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2856 return; 2857 } 2858 2859 if ((buffer[offset + 5] & 0x80) == 0) 2860 return; 2861 2862 sdkp->ATO = 1; 2863 2864 return; 2865 } 2866 2867 /** 2868 * sd_read_block_limits - Query disk device for preferred I/O sizes. 2869 * @sdkp: disk to query 2870 */ 2871 static void sd_read_block_limits(struct scsi_disk *sdkp) 2872 { 2873 unsigned int sector_sz = sdkp->device->sector_size; 2874 const int vpd_len = 64; 2875 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2876 2877 if (!buffer || 2878 /* Block Limits VPD */ 2879 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2880 goto out; 2881 2882 blk_queue_io_min(sdkp->disk->queue, 2883 get_unaligned_be16(&buffer[6]) * sector_sz); 2884 2885 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); 2886 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); 2887 2888 if (buffer[3] == 0x3c) { 2889 unsigned int lba_count, desc_count; 2890 2891 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); 2892 2893 if (!sdkp->lbpme) 2894 goto out; 2895 2896 lba_count = get_unaligned_be32(&buffer[20]); 2897 desc_count = get_unaligned_be32(&buffer[24]); 2898 2899 if (lba_count && desc_count) 2900 sdkp->max_unmap_blocks = lba_count; 2901 2902 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); 2903 2904 if (buffer[32] & 0x80) 2905 sdkp->unmap_alignment = 2906 get_unaligned_be32(&buffer[32]) & ~(1 << 31); 2907 2908 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ 2909 2910 if (sdkp->max_unmap_blocks) 2911 sd_config_discard(sdkp, SD_LBP_UNMAP); 2912 else 2913 sd_config_discard(sdkp, SD_LBP_WS16); 2914 2915 } else { /* LBP VPD page tells us what to use */ 2916 if (sdkp->lbpu && sdkp->max_unmap_blocks) 2917 sd_config_discard(sdkp, SD_LBP_UNMAP); 2918 else if (sdkp->lbpws) 2919 sd_config_discard(sdkp, SD_LBP_WS16); 2920 else if (sdkp->lbpws10) 2921 sd_config_discard(sdkp, SD_LBP_WS10); 2922 else 2923 sd_config_discard(sdkp, SD_LBP_DISABLE); 2924 } 2925 } 2926 2927 out: 2928 kfree(buffer); 2929 } 2930 2931 /** 2932 * sd_read_block_characteristics - Query block dev. characteristics 2933 * @sdkp: disk to query 2934 */ 2935 static void sd_read_block_characteristics(struct scsi_disk *sdkp) 2936 { 2937 struct request_queue *q = sdkp->disk->queue; 2938 unsigned char *buffer; 2939 u16 rot; 2940 const int vpd_len = 64; 2941 2942 buffer = kmalloc(vpd_len, GFP_KERNEL); 2943 2944 if (!buffer || 2945 /* Block Device Characteristics VPD */ 2946 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len)) 2947 goto out; 2948 2949 rot = get_unaligned_be16(&buffer[4]); 2950 2951 if (rot == 1) { 2952 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2953 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2954 } 2955 2956 if (sdkp->device->type == TYPE_ZBC) { 2957 /* Host-managed */ 2958 q->limits.zoned = BLK_ZONED_HM; 2959 } else { 2960 sdkp->zoned = (buffer[8] >> 4) & 3; 2961 if (sdkp->zoned == 1) 2962 /* Host-aware */ 2963 q->limits.zoned = BLK_ZONED_HA; 2964 else 2965 /* 2966 * Treat drive-managed devices as 2967 * regular block devices. 2968 */ 2969 q->limits.zoned = BLK_ZONED_NONE; 2970 } 2971 if (blk_queue_is_zoned(q) && sdkp->first_scan) 2972 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2973 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2974 2975 out: 2976 kfree(buffer); 2977 } 2978 2979 /** 2980 * sd_read_block_provisioning - Query provisioning VPD page 2981 * @sdkp: disk to query 2982 */ 2983 static void sd_read_block_provisioning(struct scsi_disk *sdkp) 2984 { 2985 unsigned char *buffer; 2986 const int vpd_len = 8; 2987 2988 if (sdkp->lbpme == 0) 2989 return; 2990 2991 buffer = kmalloc(vpd_len, GFP_KERNEL); 2992 2993 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) 2994 goto out; 2995 2996 sdkp->lbpvpd = 1; 2997 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ 2998 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ 2999 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ 3000 3001 out: 3002 kfree(buffer); 3003 } 3004 3005 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) 3006 { 3007 struct scsi_device *sdev = sdkp->device; 3008 3009 if (sdev->host->no_write_same) { 3010 sdev->no_write_same = 1; 3011 3012 return; 3013 } 3014 3015 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { 3016 /* too large values might cause issues with arcmsr */ 3017 int vpd_buf_len = 64; 3018 3019 sdev->no_report_opcodes = 1; 3020 3021 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION 3022 * CODES is unsupported and the device has an ATA 3023 * Information VPD page (SAT). 3024 */ 3025 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) 3026 sdev->no_write_same = 1; 3027 } 3028 3029 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) 3030 sdkp->ws16 = 1; 3031 3032 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) 3033 sdkp->ws10 = 1; 3034 } 3035 3036 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) 3037 { 3038 struct scsi_device *sdev = sdkp->device; 3039 3040 if (!sdev->security_supported) 3041 return; 3042 3043 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3044 SECURITY_PROTOCOL_IN) == 1 && 3045 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, 3046 SECURITY_PROTOCOL_OUT) == 1) 3047 sdkp->security = 1; 3048 } 3049 3050 /* 3051 * Determine the device's preferred I/O size for reads and writes 3052 * unless the reported value is unreasonably small, large, not a 3053 * multiple of the physical block size, or simply garbage. 3054 */ 3055 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, 3056 unsigned int dev_max) 3057 { 3058 struct scsi_device *sdp = sdkp->device; 3059 unsigned int opt_xfer_bytes = 3060 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3061 3062 if (sdkp->opt_xfer_blocks == 0) 3063 return false; 3064 3065 if (sdkp->opt_xfer_blocks > dev_max) { 3066 sd_first_printk(KERN_WARNING, sdkp, 3067 "Optimal transfer size %u logical blocks " \ 3068 "> dev_max (%u logical blocks)\n", 3069 sdkp->opt_xfer_blocks, dev_max); 3070 return false; 3071 } 3072 3073 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { 3074 sd_first_printk(KERN_WARNING, sdkp, 3075 "Optimal transfer size %u logical blocks " \ 3076 "> sd driver limit (%u logical blocks)\n", 3077 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); 3078 return false; 3079 } 3080 3081 if (opt_xfer_bytes < PAGE_SIZE) { 3082 sd_first_printk(KERN_WARNING, sdkp, 3083 "Optimal transfer size %u bytes < " \ 3084 "PAGE_SIZE (%u bytes)\n", 3085 opt_xfer_bytes, (unsigned int)PAGE_SIZE); 3086 return false; 3087 } 3088 3089 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { 3090 sd_first_printk(KERN_WARNING, sdkp, 3091 "Optimal transfer size %u bytes not a " \ 3092 "multiple of physical block size (%u bytes)\n", 3093 opt_xfer_bytes, sdkp->physical_block_size); 3094 return false; 3095 } 3096 3097 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", 3098 opt_xfer_bytes); 3099 return true; 3100 } 3101 3102 /** 3103 * sd_revalidate_disk - called the first time a new disk is seen, 3104 * performs disk spin up, read_capacity, etc. 3105 * @disk: struct gendisk we care about 3106 **/ 3107 static int sd_revalidate_disk(struct gendisk *disk) 3108 { 3109 struct scsi_disk *sdkp = scsi_disk(disk); 3110 struct scsi_device *sdp = sdkp->device; 3111 struct request_queue *q = sdkp->disk->queue; 3112 sector_t old_capacity = sdkp->capacity; 3113 unsigned char *buffer; 3114 unsigned int dev_max, rw_max; 3115 3116 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 3117 "sd_revalidate_disk\n")); 3118 3119 /* 3120 * If the device is offline, don't try and read capacity or any 3121 * of the other niceties. 3122 */ 3123 if (!scsi_device_online(sdp)) 3124 goto out; 3125 3126 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); 3127 if (!buffer) { 3128 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 3129 "allocation failure.\n"); 3130 goto out; 3131 } 3132 3133 sd_spinup_disk(sdkp); 3134 3135 /* 3136 * Without media there is no reason to ask; moreover, some devices 3137 * react badly if we do. 3138 */ 3139 if (sdkp->media_present) { 3140 sd_read_capacity(sdkp, buffer); 3141 3142 /* 3143 * set the default to rotational. All non-rotational devices 3144 * support the block characteristics VPD page, which will 3145 * cause this to be updated correctly and any device which 3146 * doesn't support it should be treated as rotational. 3147 */ 3148 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 3149 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); 3150 3151 if (scsi_device_supports_vpd(sdp)) { 3152 sd_read_block_provisioning(sdkp); 3153 sd_read_block_limits(sdkp); 3154 sd_read_block_characteristics(sdkp); 3155 sd_zbc_read_zones(sdkp, buffer); 3156 } 3157 3158 sd_print_capacity(sdkp, old_capacity); 3159 3160 sd_read_write_protect_flag(sdkp, buffer); 3161 sd_read_cache_type(sdkp, buffer); 3162 sd_read_app_tag_own(sdkp, buffer); 3163 sd_read_write_same(sdkp, buffer); 3164 sd_read_security(sdkp, buffer); 3165 } 3166 3167 /* 3168 * We now have all cache related info, determine how we deal 3169 * with flush requests. 3170 */ 3171 sd_set_flush_flag(sdkp); 3172 3173 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ 3174 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; 3175 3176 /* Some devices report a maximum block count for READ/WRITE requests. */ 3177 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); 3178 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3179 3180 if (sd_validate_opt_xfer_size(sdkp, dev_max)) { 3181 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3182 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 3183 } else 3184 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3185 (sector_t)BLK_DEF_MAX_SECTORS); 3186 3187 /* Do not exceed controller limit */ 3188 rw_max = min(rw_max, queue_max_hw_sectors(q)); 3189 3190 /* 3191 * Only update max_sectors if previously unset or if the current value 3192 * exceeds the capabilities of the hardware. 3193 */ 3194 if (sdkp->first_scan || 3195 q->limits.max_sectors > q->limits.max_dev_sectors || 3196 q->limits.max_sectors > q->limits.max_hw_sectors) 3197 q->limits.max_sectors = rw_max; 3198 3199 sdkp->first_scan = 0; 3200 3201 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); 3202 sd_config_write_same(sdkp); 3203 kfree(buffer); 3204 3205 out: 3206 return 0; 3207 } 3208 3209 /** 3210 * sd_unlock_native_capacity - unlock native capacity 3211 * @disk: struct gendisk to set capacity for 3212 * 3213 * Block layer calls this function if it detects that partitions 3214 * on @disk reach beyond the end of the device. If the SCSI host 3215 * implements ->unlock_native_capacity() method, it's invoked to 3216 * give it a chance to adjust the device capacity. 3217 * 3218 * CONTEXT: 3219 * Defined by block layer. Might sleep. 3220 */ 3221 static void sd_unlock_native_capacity(struct gendisk *disk) 3222 { 3223 struct scsi_device *sdev = scsi_disk(disk)->device; 3224 3225 if (sdev->host->hostt->unlock_native_capacity) 3226 sdev->host->hostt->unlock_native_capacity(sdev); 3227 } 3228 3229 /** 3230 * sd_format_disk_name - format disk name 3231 * @prefix: name prefix - ie. "sd" for SCSI disks 3232 * @index: index of the disk to format name for 3233 * @buf: output buffer 3234 * @buflen: length of the output buffer 3235 * 3236 * SCSI disk names starts at sda. The 26th device is sdz and the 3237 * 27th is sdaa. The last one for two lettered suffix is sdzz 3238 * which is followed by sdaaa. 3239 * 3240 * This is basically 26 base counting with one extra 'nil' entry 3241 * at the beginning from the second digit on and can be 3242 * determined using similar method as 26 base conversion with the 3243 * index shifted -1 after each digit is computed. 3244 * 3245 * CONTEXT: 3246 * Don't care. 3247 * 3248 * RETURNS: 3249 * 0 on success, -errno on failure. 3250 */ 3251 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) 3252 { 3253 const int base = 'z' - 'a' + 1; 3254 char *begin = buf + strlen(prefix); 3255 char *end = buf + buflen; 3256 char *p; 3257 int unit; 3258 3259 p = end - 1; 3260 *p = '\0'; 3261 unit = base; 3262 do { 3263 if (p == begin) 3264 return -EINVAL; 3265 *--p = 'a' + (index % unit); 3266 index = (index / unit) - 1; 3267 } while (index >= 0); 3268 3269 memmove(begin, p, end - p); 3270 memcpy(buf, prefix, strlen(prefix)); 3271 3272 return 0; 3273 } 3274 3275 /** 3276 * sd_probe - called during driver initialization and whenever a 3277 * new scsi device is attached to the system. It is called once 3278 * for each scsi device (not just disks) present. 3279 * @dev: pointer to device object 3280 * 3281 * Returns 0 if successful (or not interested in this scsi device 3282 * (e.g. scanner)); 1 when there is an error. 3283 * 3284 * Note: this function is invoked from the scsi mid-level. 3285 * This function sets up the mapping between a given 3286 * <host,channel,id,lun> (found in sdp) and new device name 3287 * (e.g. /dev/sda). More precisely it is the block device major 3288 * and minor number that is chosen here. 3289 * 3290 * Assume sd_probe is not re-entrant (for time being) 3291 * Also think about sd_probe() and sd_remove() running coincidentally. 3292 **/ 3293 static int sd_probe(struct device *dev) 3294 { 3295 struct scsi_device *sdp = to_scsi_device(dev); 3296 struct scsi_disk *sdkp; 3297 struct gendisk *gd; 3298 int index; 3299 int error; 3300 3301 scsi_autopm_get_device(sdp); 3302 error = -ENODEV; 3303 if (sdp->type != TYPE_DISK && 3304 sdp->type != TYPE_ZBC && 3305 sdp->type != TYPE_MOD && 3306 sdp->type != TYPE_RBC) 3307 goto out; 3308 3309 #ifndef CONFIG_BLK_DEV_ZONED 3310 if (sdp->type == TYPE_ZBC) 3311 goto out; 3312 #endif 3313 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 3314 "sd_probe\n")); 3315 3316 error = -ENOMEM; 3317 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 3318 if (!sdkp) 3319 goto out; 3320 3321 gd = alloc_disk(SD_MINORS); 3322 if (!gd) 3323 goto out_free; 3324 3325 index = ida_alloc(&sd_index_ida, GFP_KERNEL); 3326 if (index < 0) { 3327 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); 3328 goto out_put; 3329 } 3330 3331 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3332 if (error) { 3333 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3334 goto out_free_index; 3335 } 3336 3337 sdkp->device = sdp; 3338 sdkp->driver = &sd_template; 3339 sdkp->disk = gd; 3340 sdkp->index = index; 3341 atomic_set(&sdkp->openers, 0); 3342 atomic_set(&sdkp->device->ioerr_cnt, 0); 3343 3344 if (!sdp->request_queue->rq_timeout) { 3345 if (sdp->type != TYPE_MOD) 3346 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); 3347 else 3348 blk_queue_rq_timeout(sdp->request_queue, 3349 SD_MOD_TIMEOUT); 3350 } 3351 3352 device_initialize(&sdkp->dev); 3353 sdkp->dev.parent = dev; 3354 sdkp->dev.class = &sd_disk_class; 3355 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3356 3357 error = device_add(&sdkp->dev); 3358 if (error) 3359 goto out_free_index; 3360 3361 get_device(dev); 3362 dev_set_drvdata(dev, sdkp); 3363 3364 gd->major = sd_major((index & 0xf0) >> 4); 3365 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 3366 3367 gd->fops = &sd_fops; 3368 gd->private_data = &sdkp->driver; 3369 gd->queue = sdkp->device->request_queue; 3370 3371 /* defaults, until the device tells us otherwise */ 3372 sdp->sector_size = 512; 3373 sdkp->capacity = 0; 3374 sdkp->media_present = 1; 3375 sdkp->write_prot = 0; 3376 sdkp->cache_override = 0; 3377 sdkp->WCE = 0; 3378 sdkp->RCD = 0; 3379 sdkp->ATO = 0; 3380 sdkp->first_scan = 1; 3381 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; 3382 3383 sd_revalidate_disk(gd); 3384 3385 gd->flags = GENHD_FL_EXT_DEVT; 3386 if (sdp->removable) { 3387 gd->flags |= GENHD_FL_REMOVABLE; 3388 gd->events |= DISK_EVENT_MEDIA_CHANGE; 3389 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; 3390 } 3391 3392 blk_pm_runtime_init(sdp->request_queue, dev); 3393 if (sdp->rpm_autosuspend) { 3394 pm_runtime_set_autosuspend_delay(dev, 3395 sdp->host->hostt->rpm_autosuspend_delay); 3396 } 3397 device_add_disk(dev, gd, NULL); 3398 if (sdkp->capacity) 3399 sd_dif_config_host(sdkp); 3400 3401 sd_revalidate_disk(gd); 3402 3403 if (sdkp->security) { 3404 sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit); 3405 if (sdkp->opal_dev) 3406 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); 3407 } 3408 3409 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 3410 sdp->removable ? "removable " : ""); 3411 scsi_autopm_put_device(sdp); 3412 3413 return 0; 3414 3415 out_free_index: 3416 ida_free(&sd_index_ida, index); 3417 out_put: 3418 put_disk(gd); 3419 out_free: 3420 kfree(sdkp); 3421 out: 3422 scsi_autopm_put_device(sdp); 3423 return error; 3424 } 3425 3426 /** 3427 * sd_remove - called whenever a scsi disk (previously recognized by 3428 * sd_probe) is detached from the system. It is called (potentially 3429 * multiple times) during sd module unload. 3430 * @dev: pointer to device object 3431 * 3432 * Note: this function is invoked from the scsi mid-level. 3433 * This function potentially frees up a device name (e.g. /dev/sdc) 3434 * that could be re-used by a subsequent sd_probe(). 3435 * This function is not called when the built-in sd driver is "exit-ed". 3436 **/ 3437 static int sd_remove(struct device *dev) 3438 { 3439 struct scsi_disk *sdkp; 3440 dev_t devt; 3441 3442 sdkp = dev_get_drvdata(dev); 3443 devt = disk_devt(sdkp->disk); 3444 scsi_autopm_get_device(sdkp->device); 3445 3446 async_synchronize_full_domain(&scsi_sd_pm_domain); 3447 device_del(&sdkp->dev); 3448 del_gendisk(sdkp->disk); 3449 sd_shutdown(dev); 3450 3451 free_opal_dev(sdkp->opal_dev); 3452 3453 blk_register_region(devt, SD_MINORS, NULL, 3454 sd_default_probe, NULL, NULL); 3455 3456 mutex_lock(&sd_ref_mutex); 3457 dev_set_drvdata(dev, NULL); 3458 put_device(&sdkp->dev); 3459 mutex_unlock(&sd_ref_mutex); 3460 3461 return 0; 3462 } 3463 3464 /** 3465 * scsi_disk_release - Called to free the scsi_disk structure 3466 * @dev: pointer to embedded class device 3467 * 3468 * sd_ref_mutex must be held entering this routine. Because it is 3469 * called on last put, you should always use the scsi_disk_get() 3470 * scsi_disk_put() helpers which manipulate the semaphore directly 3471 * and never do a direct put_device. 3472 **/ 3473 static void scsi_disk_release(struct device *dev) 3474 { 3475 struct scsi_disk *sdkp = to_scsi_disk(dev); 3476 struct gendisk *disk = sdkp->disk; 3477 struct request_queue *q = disk->queue; 3478 3479 ida_free(&sd_index_ida, sdkp->index); 3480 3481 /* 3482 * Wait until all requests that are in progress have completed. 3483 * This is necessary to avoid that e.g. scsi_end_request() crashes 3484 * due to clearing the disk->private_data pointer. Wait from inside 3485 * scsi_disk_release() instead of from sd_release() to avoid that 3486 * freezing and unfreezing the request queue affects user space I/O 3487 * in case multiple processes open a /dev/sd... node concurrently. 3488 */ 3489 blk_mq_freeze_queue(q); 3490 blk_mq_unfreeze_queue(q); 3491 3492 disk->private_data = NULL; 3493 put_disk(disk); 3494 put_device(&sdkp->device->sdev_gendev); 3495 3496 kfree(sdkp); 3497 } 3498 3499 static int sd_start_stop_device(struct scsi_disk *sdkp, int start) 3500 { 3501 unsigned char cmd[6] = { START_STOP }; /* START_VALID */ 3502 struct scsi_sense_hdr sshdr; 3503 struct scsi_device *sdp = sdkp->device; 3504 int res; 3505 3506 if (start) 3507 cmd[4] |= 1; /* START */ 3508 3509 if (sdp->start_stop_pwr_cond) 3510 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ 3511 3512 if (!scsi_device_online(sdp)) 3513 return -ENODEV; 3514 3515 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 3516 SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); 3517 if (res) { 3518 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3519 if (driver_byte(res) == DRIVER_SENSE) 3520 sd_print_sense_hdr(sdkp, &sshdr); 3521 if (scsi_sense_valid(&sshdr) && 3522 /* 0x3a is medium not present */ 3523 sshdr.asc == 0x3a) 3524 res = 0; 3525 } 3526 3527 /* SCSI error codes must not go to the generic layer */ 3528 if (res) 3529 return -EIO; 3530 3531 return 0; 3532 } 3533 3534 /* 3535 * Send a SYNCHRONIZE CACHE instruction down to the device through 3536 * the normal SCSI command structure. Wait for the command to 3537 * complete. 3538 */ 3539 static void sd_shutdown(struct device *dev) 3540 { 3541 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3542 3543 if (!sdkp) 3544 return; /* this can happen */ 3545 3546 if (pm_runtime_suspended(dev)) 3547 return; 3548 3549 if (sdkp->WCE && sdkp->media_present) { 3550 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3551 sd_sync_cache(sdkp, NULL); 3552 } 3553 3554 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3555 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3556 sd_start_stop_device(sdkp, 0); 3557 } 3558 } 3559 3560 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3561 { 3562 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3563 struct scsi_sense_hdr sshdr; 3564 int ret = 0; 3565 3566 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3567 return 0; 3568 3569 if (sdkp->WCE && sdkp->media_present) { 3570 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3571 ret = sd_sync_cache(sdkp, &sshdr); 3572 3573 if (ret) { 3574 /* ignore OFFLINE device */ 3575 if (ret == -ENODEV) 3576 return 0; 3577 3578 if (!scsi_sense_valid(&sshdr) || 3579 sshdr.sense_key != ILLEGAL_REQUEST) 3580 return ret; 3581 3582 /* 3583 * sshdr.sense_key == ILLEGAL_REQUEST means this drive 3584 * doesn't support sync. There's not much to do and 3585 * suspend shouldn't fail. 3586 */ 3587 ret = 0; 3588 } 3589 } 3590 3591 if (sdkp->device->manage_start_stop) { 3592 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 3593 /* an error is not worth aborting a system sleep */ 3594 ret = sd_start_stop_device(sdkp, 0); 3595 if (ignore_stop_errors) 3596 ret = 0; 3597 } 3598 3599 return ret; 3600 } 3601 3602 static int sd_suspend_system(struct device *dev) 3603 { 3604 return sd_suspend_common(dev, true); 3605 } 3606 3607 static int sd_suspend_runtime(struct device *dev) 3608 { 3609 return sd_suspend_common(dev, false); 3610 } 3611 3612 static int sd_resume(struct device *dev) 3613 { 3614 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3615 int ret; 3616 3617 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ 3618 return 0; 3619 3620 if (!sdkp->device->manage_start_stop) 3621 return 0; 3622 3623 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); 3624 ret = sd_start_stop_device(sdkp, 1); 3625 if (!ret) 3626 opal_unlock_from_suspend(sdkp->opal_dev); 3627 return ret; 3628 } 3629 3630 /** 3631 * init_sd - entry point for this driver (both when built in or when 3632 * a module). 3633 * 3634 * Note: this function registers this driver with the scsi mid-level. 3635 **/ 3636 static int __init init_sd(void) 3637 { 3638 int majors = 0, i, err; 3639 3640 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); 3641 3642 for (i = 0; i < SD_MAJORS; i++) { 3643 if (register_blkdev(sd_major(i), "sd") != 0) 3644 continue; 3645 majors++; 3646 blk_register_region(sd_major(i), SD_MINORS, NULL, 3647 sd_default_probe, NULL, NULL); 3648 } 3649 3650 if (!majors) 3651 return -ENODEV; 3652 3653 err = class_register(&sd_disk_class); 3654 if (err) 3655 goto err_out; 3656 3657 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 3658 0, 0, NULL); 3659 if (!sd_cdb_cache) { 3660 printk(KERN_ERR "sd: can't init extended cdb cache\n"); 3661 err = -ENOMEM; 3662 goto err_out_class; 3663 } 3664 3665 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); 3666 if (!sd_cdb_pool) { 3667 printk(KERN_ERR "sd: can't init extended cdb pool\n"); 3668 err = -ENOMEM; 3669 goto err_out_cache; 3670 } 3671 3672 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); 3673 if (!sd_page_pool) { 3674 printk(KERN_ERR "sd: can't init discard page pool\n"); 3675 err = -ENOMEM; 3676 goto err_out_ppool; 3677 } 3678 3679 err = scsi_register_driver(&sd_template.gendrv); 3680 if (err) 3681 goto err_out_driver; 3682 3683 return 0; 3684 3685 err_out_driver: 3686 mempool_destroy(sd_page_pool); 3687 3688 err_out_ppool: 3689 mempool_destroy(sd_cdb_pool); 3690 3691 err_out_cache: 3692 kmem_cache_destroy(sd_cdb_cache); 3693 3694 err_out_class: 3695 class_unregister(&sd_disk_class); 3696 err_out: 3697 for (i = 0; i < SD_MAJORS; i++) 3698 unregister_blkdev(sd_major(i), "sd"); 3699 return err; 3700 } 3701 3702 /** 3703 * exit_sd - exit point for this driver (when it is a module). 3704 * 3705 * Note: this function unregisters this driver from the scsi mid-level. 3706 **/ 3707 static void __exit exit_sd(void) 3708 { 3709 int i; 3710 3711 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); 3712 3713 scsi_unregister_driver(&sd_template.gendrv); 3714 mempool_destroy(sd_cdb_pool); 3715 mempool_destroy(sd_page_pool); 3716 kmem_cache_destroy(sd_cdb_cache); 3717 3718 class_unregister(&sd_disk_class); 3719 3720 for (i = 0; i < SD_MAJORS; i++) { 3721 blk_unregister_region(sd_major(i), SD_MINORS); 3722 unregister_blkdev(sd_major(i), "sd"); 3723 } 3724 } 3725 3726 module_init(init_sd); 3727 module_exit(exit_sd); 3728 3729 static void sd_print_sense_hdr(struct scsi_disk *sdkp, 3730 struct scsi_sense_hdr *sshdr) 3731 { 3732 scsi_print_sense_hdr(sdkp->device, 3733 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); 3734 } 3735 3736 static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, 3737 int result) 3738 { 3739 const char *hb_string = scsi_hostbyte_string(result); 3740 const char *db_string = scsi_driverbyte_string(result); 3741 3742 if (hb_string || db_string) 3743 sd_printk(KERN_INFO, sdkp, 3744 "%s: Result: hostbyte=%s driverbyte=%s\n", msg, 3745 hb_string ? hb_string : "invalid", 3746 db_string ? db_string : "invalid"); 3747 else 3748 sd_printk(KERN_INFO, sdkp, 3749 "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n", 3750 msg, host_byte(result), driver_byte(result)); 3751 } 3752 3753