1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * libata-scsi.c - helper library for ATA 4 * 5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 6 * Copyright 2003-2004 Jeff Garzik 7 * 8 * libata documentation is available via 'make {ps|pdf}docs', 9 * as Documentation/driver-api/libata.rst 10 * 11 * Hardware documentation available from 12 * - http://www.t10.org/ 13 * - http://www.t13.org/ 14 */ 15 16 #include <linux/compat.h> 17 #include <linux/slab.h> 18 #include <linux/kernel.h> 19 #include <linux/blkdev.h> 20 #include <linux/spinlock.h> 21 #include <linux/export.h> 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_eh.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_tcq.h> 28 #include <scsi/scsi_transport.h> 29 #include <linux/libata.h> 30 #include <linux/hdreg.h> 31 #include <linux/uaccess.h> 32 #include <linux/suspend.h> 33 #include <asm/unaligned.h> 34 #include <linux/ioprio.h> 35 #include <linux/of.h> 36 37 #include "libata.h" 38 #include "libata-transport.h" 39 40 #define ATA_SCSI_RBUF_SIZE 576 41 42 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); 43 static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; 44 45 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 46 47 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 48 const struct scsi_device *scsidev); 49 50 #define RW_RECOVERY_MPAGE 0x1 51 #define RW_RECOVERY_MPAGE_LEN 12 52 #define CACHE_MPAGE 0x8 53 #define CACHE_MPAGE_LEN 20 54 #define CONTROL_MPAGE 0xa 55 #define CONTROL_MPAGE_LEN 12 56 #define ALL_MPAGES 0x3f 57 #define ALL_SUB_MPAGES 0xff 58 59 60 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { 61 RW_RECOVERY_MPAGE, 62 RW_RECOVERY_MPAGE_LEN - 2, 63 (1 << 7), /* AWRE */ 64 0, /* read retry count */ 65 0, 0, 0, 0, 66 0, /* write retry count */ 67 0, 0, 0 68 }; 69 70 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = { 71 CACHE_MPAGE, 72 CACHE_MPAGE_LEN - 2, 73 0, /* contains WCE, needs to be 0 for logic */ 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 75 0, /* contains DRA, needs to be 0 for logic */ 76 0, 0, 0, 0, 0, 0, 0 77 }; 78 79 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 80 CONTROL_MPAGE, 81 CONTROL_MPAGE_LEN - 2, 82 2, /* DSENSE=0, GLTSD=1 */ 83 0, /* [QAM+QERR may be 1, see 05-359r1] */ 84 0, 0, 0, 0, 0xff, 0xff, 85 0, 30 /* extended self test time, see 05-359r1 */ 86 }; 87 88 static ssize_t ata_scsi_park_show(struct device *device, 89 struct device_attribute *attr, char *buf) 90 { 91 struct scsi_device *sdev = to_scsi_device(device); 92 struct ata_port *ap; 93 struct ata_link *link; 94 struct ata_device *dev; 95 unsigned long now; 96 unsigned int msecs; 97 int rc = 0; 98 99 ap = ata_shost_to_port(sdev->host); 100 101 spin_lock_irq(ap->lock); 102 dev = ata_scsi_find_dev(ap, sdev); 103 if (!dev) { 104 rc = -ENODEV; 105 goto unlock; 106 } 107 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { 108 rc = -EOPNOTSUPP; 109 goto unlock; 110 } 111 112 link = dev->link; 113 now = jiffies; 114 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && 115 link->eh_context.unloaded_mask & (1 << dev->devno) && 116 time_after(dev->unpark_deadline, now)) 117 msecs = jiffies_to_msecs(dev->unpark_deadline - now); 118 else 119 msecs = 0; 120 121 unlock: 122 spin_unlock_irq(ap->lock); 123 124 return rc ? rc : sysfs_emit(buf, "%u\n", msecs); 125 } 126 127 static ssize_t ata_scsi_park_store(struct device *device, 128 struct device_attribute *attr, 129 const char *buf, size_t len) 130 { 131 struct scsi_device *sdev = to_scsi_device(device); 132 struct ata_port *ap; 133 struct ata_device *dev; 134 long int input; 135 unsigned long flags; 136 int rc; 137 138 rc = kstrtol(buf, 10, &input); 139 if (rc) 140 return rc; 141 if (input < -2) 142 return -EINVAL; 143 if (input > ATA_TMOUT_MAX_PARK) { 144 rc = -EOVERFLOW; 145 input = ATA_TMOUT_MAX_PARK; 146 } 147 148 ap = ata_shost_to_port(sdev->host); 149 150 spin_lock_irqsave(ap->lock, flags); 151 dev = ata_scsi_find_dev(ap, sdev); 152 if (unlikely(!dev)) { 153 rc = -ENODEV; 154 goto unlock; 155 } 156 if (dev->class != ATA_DEV_ATA && 157 dev->class != ATA_DEV_ZAC) { 158 rc = -EOPNOTSUPP; 159 goto unlock; 160 } 161 162 if (input >= 0) { 163 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { 164 rc = -EOPNOTSUPP; 165 goto unlock; 166 } 167 168 dev->unpark_deadline = ata_deadline(jiffies, input); 169 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; 170 ata_port_schedule_eh(ap); 171 complete(&ap->park_req_pending); 172 } else { 173 switch (input) { 174 case -1: 175 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; 176 break; 177 case -2: 178 dev->flags |= ATA_DFLAG_NO_UNLOAD; 179 break; 180 } 181 } 182 unlock: 183 spin_unlock_irqrestore(ap->lock, flags); 184 185 return rc ? rc : len; 186 } 187 DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, 188 ata_scsi_park_show, ata_scsi_park_store); 189 EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 190 191 bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq) 192 { 193 /* 194 * If sk == NO_SENSE, and asc + ascq == NO ADDITIONAL SENSE INFORMATION, 195 * then there is no sense data to add. 196 */ 197 if (sk == 0 && asc == 0 && ascq == 0) 198 return false; 199 200 /* If sk > COMPLETED, sense data is bogus. */ 201 if (sk > COMPLETED) 202 return false; 203 204 return true; 205 } 206 207 void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, 208 u8 sk, u8 asc, u8 ascq) 209 { 210 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); 211 212 if (!cmd) 213 return; 214 215 scsi_build_sense(cmd, d_sense, sk, asc, ascq); 216 } 217 218 void ata_scsi_set_sense_information(struct ata_device *dev, 219 struct scsi_cmnd *cmd, 220 const struct ata_taskfile *tf) 221 { 222 u64 information; 223 224 if (!cmd) 225 return; 226 227 information = ata_tf_read_block(tf, dev); 228 if (information == U64_MAX) 229 return; 230 231 scsi_set_sense_information(cmd->sense_buffer, 232 SCSI_SENSE_BUFFERSIZE, information); 233 } 234 235 static void ata_scsi_set_invalid_field(struct ata_device *dev, 236 struct scsi_cmnd *cmd, u16 field, u8 bit) 237 { 238 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0); 239 /* "Invalid field in CDB" */ 240 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 241 field, bit, 1); 242 } 243 244 static void ata_scsi_set_invalid_parameter(struct ata_device *dev, 245 struct scsi_cmnd *cmd, u16 field) 246 { 247 /* "Invalid field in parameter list" */ 248 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0); 249 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 250 field, 0xff, 0); 251 } 252 253 static struct attribute *ata_common_sdev_attrs[] = { 254 &dev_attr_unload_heads.attr, 255 NULL 256 }; 257 258 static const struct attribute_group ata_common_sdev_attr_group = { 259 .attrs = ata_common_sdev_attrs 260 }; 261 262 const struct attribute_group *ata_common_sdev_groups[] = { 263 &ata_common_sdev_attr_group, 264 NULL 265 }; 266 EXPORT_SYMBOL_GPL(ata_common_sdev_groups); 267 268 /** 269 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 270 * @sdev: SCSI device for which BIOS geometry is to be determined 271 * @bdev: block device associated with @sdev 272 * @capacity: capacity of SCSI device 273 * @geom: location to which geometry will be output 274 * 275 * Generic bios head/sector/cylinder calculator 276 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) 277 * mapping. Some situations may arise where the disk is not 278 * bootable if this is not used. 279 * 280 * LOCKING: 281 * Defined by the SCSI layer. We don't really care. 282 * 283 * RETURNS: 284 * Zero. 285 */ 286 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, 287 sector_t capacity, int geom[]) 288 { 289 geom[0] = 255; 290 geom[1] = 63; 291 sector_div(capacity, 255*63); 292 geom[2] = capacity; 293 294 return 0; 295 } 296 EXPORT_SYMBOL_GPL(ata_std_bios_param); 297 298 /** 299 * ata_scsi_unlock_native_capacity - unlock native capacity 300 * @sdev: SCSI device to adjust device capacity for 301 * 302 * This function is called if a partition on @sdev extends beyond 303 * the end of the device. It requests EH to unlock HPA. 304 * 305 * LOCKING: 306 * Defined by the SCSI layer. Might sleep. 307 */ 308 void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) 309 { 310 struct ata_port *ap = ata_shost_to_port(sdev->host); 311 struct ata_device *dev; 312 unsigned long flags; 313 314 spin_lock_irqsave(ap->lock, flags); 315 316 dev = ata_scsi_find_dev(ap, sdev); 317 if (dev && dev->n_sectors < dev->n_native_sectors) { 318 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 319 dev->link->eh_info.action |= ATA_EH_RESET; 320 ata_port_schedule_eh(ap); 321 } 322 323 spin_unlock_irqrestore(ap->lock, flags); 324 ata_port_wait_eh(ap); 325 } 326 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 327 328 /** 329 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 330 * @ap: target port 331 * @sdev: SCSI device to get identify data for 332 * @arg: User buffer area for identify data 333 * 334 * LOCKING: 335 * Defined by the SCSI layer. We don't really care. 336 * 337 * RETURNS: 338 * Zero on success, negative errno on error. 339 */ 340 static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, 341 void __user *arg) 342 { 343 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 344 u16 __user *dst = arg; 345 char buf[40]; 346 347 if (!dev) 348 return -ENOMSG; 349 350 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) 351 return -EFAULT; 352 353 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); 354 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) 355 return -EFAULT; 356 357 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); 358 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) 359 return -EFAULT; 360 361 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); 362 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) 363 return -EFAULT; 364 365 return 0; 366 } 367 368 /** 369 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 370 * @scsidev: Device to which we are issuing command 371 * @arg: User provided data for issuing command 372 * 373 * LOCKING: 374 * Defined by the SCSI layer. We don't really care. 375 * 376 * RETURNS: 377 * Zero on success, negative errno on error. 378 */ 379 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 380 { 381 int rc = 0; 382 u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; 383 u8 scsi_cmd[MAX_COMMAND_SIZE]; 384 u8 args[4], *argbuf = NULL; 385 int argsize = 0; 386 enum dma_data_direction data_dir; 387 struct scsi_sense_hdr sshdr; 388 int cmd_result; 389 390 if (arg == NULL) 391 return -EINVAL; 392 393 if (copy_from_user(args, arg, sizeof(args))) 394 return -EFAULT; 395 396 memset(sensebuf, 0, sizeof(sensebuf)); 397 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 398 399 if (args[3]) { 400 argsize = ATA_SECT_SIZE * args[3]; 401 argbuf = kmalloc(argsize, GFP_KERNEL); 402 if (argbuf == NULL) { 403 rc = -ENOMEM; 404 goto error; 405 } 406 407 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 408 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 409 block count in sector count field */ 410 data_dir = DMA_FROM_DEVICE; 411 } else { 412 scsi_cmd[1] = (3 << 1); /* Non-data */ 413 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 414 data_dir = DMA_NONE; 415 } 416 417 scsi_cmd[0] = ATA_16; 418 419 scsi_cmd[4] = args[2]; 420 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ 421 scsi_cmd[6] = args[3]; 422 scsi_cmd[8] = args[1]; 423 scsi_cmd[10] = ATA_SMART_LBAM_PASS; 424 scsi_cmd[12] = ATA_SMART_LBAH_PASS; 425 } else { 426 scsi_cmd[6] = args[1]; 427 } 428 scsi_cmd[14] = args[0]; 429 430 /* Good values for timeout and retries? Values below 431 from scsi_ioctl_send_command() for default case... */ 432 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 433 sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); 434 435 if (cmd_result < 0) { 436 rc = cmd_result; 437 goto error; 438 } 439 if (scsi_sense_valid(&sshdr)) {/* sense data available */ 440 u8 *desc = sensebuf + 8; 441 442 /* If we set cc then ATA pass-through will cause a 443 * check condition even if no error. Filter that. */ 444 if (scsi_status_is_check_condition(cmd_result)) { 445 if (sshdr.sense_key == RECOVERED_ERROR && 446 sshdr.asc == 0 && sshdr.ascq == 0x1d) 447 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 448 } 449 450 /* Send userspace a few ATA registers (same as drivers/ide) */ 451 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 452 desc[0] == 0x09) { /* code is "ATA Descriptor" */ 453 args[0] = desc[13]; /* status */ 454 args[1] = desc[3]; /* error */ 455 args[2] = desc[5]; /* sector count (0:7) */ 456 if (copy_to_user(arg, args, sizeof(args))) 457 rc = -EFAULT; 458 } 459 } 460 461 462 if (cmd_result) { 463 rc = -EIO; 464 goto error; 465 } 466 467 if ((argbuf) 468 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 469 rc = -EFAULT; 470 error: 471 kfree(argbuf); 472 return rc; 473 } 474 475 /** 476 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl 477 * @scsidev: Device to which we are issuing command 478 * @arg: User provided data for issuing command 479 * 480 * LOCKING: 481 * Defined by the SCSI layer. We don't really care. 482 * 483 * RETURNS: 484 * Zero on success, negative errno on error. 485 */ 486 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) 487 { 488 int rc = 0; 489 u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; 490 u8 scsi_cmd[MAX_COMMAND_SIZE]; 491 u8 args[7]; 492 struct scsi_sense_hdr sshdr; 493 int cmd_result; 494 495 if (arg == NULL) 496 return -EINVAL; 497 498 if (copy_from_user(args, arg, sizeof(args))) 499 return -EFAULT; 500 501 memset(sensebuf, 0, sizeof(sensebuf)); 502 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 503 scsi_cmd[0] = ATA_16; 504 scsi_cmd[1] = (3 << 1); /* Non-data */ 505 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 506 scsi_cmd[4] = args[1]; 507 scsi_cmd[6] = args[2]; 508 scsi_cmd[8] = args[3]; 509 scsi_cmd[10] = args[4]; 510 scsi_cmd[12] = args[5]; 511 scsi_cmd[13] = args[6] & 0x4f; 512 scsi_cmd[14] = args[0]; 513 514 /* Good values for timeout and retries? Values below 515 from scsi_ioctl_send_command() for default case... */ 516 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, 517 sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); 518 519 if (cmd_result < 0) { 520 rc = cmd_result; 521 goto error; 522 } 523 if (scsi_sense_valid(&sshdr)) {/* sense data available */ 524 u8 *desc = sensebuf + 8; 525 526 /* If we set cc then ATA pass-through will cause a 527 * check condition even if no error. Filter that. */ 528 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 529 if (sshdr.sense_key == RECOVERED_ERROR && 530 sshdr.asc == 0 && sshdr.ascq == 0x1d) 531 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 532 } 533 534 /* Send userspace ATA registers */ 535 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 536 desc[0] == 0x09) {/* code is "ATA Descriptor" */ 537 args[0] = desc[13]; /* status */ 538 args[1] = desc[3]; /* error */ 539 args[2] = desc[5]; /* sector count (0:7) */ 540 args[3] = desc[7]; /* lbal */ 541 args[4] = desc[9]; /* lbam */ 542 args[5] = desc[11]; /* lbah */ 543 args[6] = desc[12]; /* select */ 544 if (copy_to_user(arg, args, sizeof(args))) 545 rc = -EFAULT; 546 } 547 } 548 549 if (cmd_result) { 550 rc = -EIO; 551 goto error; 552 } 553 554 error: 555 return rc; 556 } 557 558 static bool ata_ioc32(struct ata_port *ap) 559 { 560 if (ap->flags & ATA_FLAG_PIO_DMA) 561 return true; 562 if (ap->pflags & ATA_PFLAG_PIO32) 563 return true; 564 return false; 565 } 566 567 /* 568 * This handles both native and compat commands, so anything added 569 * here must have a compatible argument, or check in_compat_syscall() 570 */ 571 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 572 unsigned int cmd, void __user *arg) 573 { 574 unsigned long val; 575 int rc = -EINVAL; 576 unsigned long flags; 577 578 switch (cmd) { 579 case HDIO_GET_32BIT: 580 spin_lock_irqsave(ap->lock, flags); 581 val = ata_ioc32(ap); 582 spin_unlock_irqrestore(ap->lock, flags); 583 #ifdef CONFIG_COMPAT 584 if (in_compat_syscall()) 585 return put_user(val, (compat_ulong_t __user *)arg); 586 #endif 587 return put_user(val, (unsigned long __user *)arg); 588 589 case HDIO_SET_32BIT: 590 val = (unsigned long) arg; 591 rc = 0; 592 spin_lock_irqsave(ap->lock, flags); 593 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { 594 if (val) 595 ap->pflags |= ATA_PFLAG_PIO32; 596 else 597 ap->pflags &= ~ATA_PFLAG_PIO32; 598 } else { 599 if (val != ata_ioc32(ap)) 600 rc = -EINVAL; 601 } 602 spin_unlock_irqrestore(ap->lock, flags); 603 return rc; 604 605 case HDIO_GET_IDENTITY: 606 return ata_get_identity(ap, scsidev, arg); 607 608 case HDIO_DRIVE_CMD: 609 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 610 return -EACCES; 611 return ata_cmd_ioctl(scsidev, arg); 612 613 case HDIO_DRIVE_TASK: 614 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 615 return -EACCES; 616 return ata_task_ioctl(scsidev, arg); 617 618 default: 619 rc = -ENOTTY; 620 break; 621 } 622 623 return rc; 624 } 625 EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); 626 627 int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd, 628 void __user *arg) 629 { 630 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), 631 scsidev, cmd, arg); 632 } 633 EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 634 635 /** 636 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 637 * @dev: ATA device to which the new command is attached 638 * @cmd: SCSI command that originated this ATA command 639 * 640 * Obtain a reference to an unused ata_queued_cmd structure, 641 * which is the basic libata structure representing a single 642 * ATA command sent to the hardware. 643 * 644 * If a command was available, fill in the SCSI-specific 645 * portions of the structure with information on the 646 * current command. 647 * 648 * LOCKING: 649 * spin_lock_irqsave(host lock) 650 * 651 * RETURNS: 652 * Command allocated, or %NULL if none available. 653 */ 654 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 655 struct scsi_cmnd *cmd) 656 { 657 struct ata_port *ap = dev->link->ap; 658 struct ata_queued_cmd *qc; 659 int tag; 660 661 if (unlikely(ata_port_is_frozen(ap))) 662 goto fail; 663 664 if (ap->flags & ATA_FLAG_SAS_HOST) { 665 /* 666 * SAS hosts may queue > ATA_MAX_QUEUE commands so use 667 * unique per-device budget token as a tag. 668 */ 669 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) 670 goto fail; 671 tag = cmd->budget_token; 672 } else { 673 tag = scsi_cmd_to_rq(cmd)->tag; 674 } 675 676 qc = __ata_qc_from_tag(ap, tag); 677 qc->tag = qc->hw_tag = tag; 678 qc->ap = ap; 679 qc->dev = dev; 680 681 ata_qc_reinit(qc); 682 683 qc->scsicmd = cmd; 684 qc->scsidone = scsi_done; 685 686 qc->sg = scsi_sglist(cmd); 687 qc->n_elem = scsi_sg_count(cmd); 688 689 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) 690 qc->flags |= ATA_QCFLAG_QUIET; 691 692 return qc; 693 694 fail: 695 set_host_byte(cmd, DID_OK); 696 set_status_byte(cmd, SAM_STAT_TASK_SET_FULL); 697 scsi_done(cmd); 698 return NULL; 699 } 700 701 static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) 702 { 703 struct scsi_cmnd *scmd = qc->scsicmd; 704 705 qc->extrabytes = scmd->extra_len; 706 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; 707 } 708 709 /** 710 * ata_dump_status - user friendly display of error info 711 * @ap: the port in question 712 * @tf: ptr to filled out taskfile 713 * 714 * Decode and dump the ATA error/status registers for the user so 715 * that they have some idea what really happened at the non 716 * make-believe layer. 717 * 718 * LOCKING: 719 * inherited from caller 720 */ 721 static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf) 722 { 723 u8 stat = tf->status, err = tf->error; 724 725 if (stat & ATA_BUSY) { 726 ata_port_warn(ap, "status=0x%02x {Busy} ", stat); 727 } else { 728 ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat, 729 stat & ATA_DRDY ? "DriveReady " : "", 730 stat & ATA_DF ? "DeviceFault " : "", 731 stat & ATA_DSC ? "SeekComplete " : "", 732 stat & ATA_DRQ ? "DataRequest " : "", 733 stat & ATA_CORR ? "CorrectedError " : "", 734 stat & ATA_SENSE ? "Sense " : "", 735 stat & ATA_ERR ? "Error " : ""); 736 if (err) 737 ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err, 738 err & ATA_ABORTED ? 739 "DriveStatusError " : "", 740 err & ATA_ICRC ? 741 (err & ATA_ABORTED ? 742 "BadCRC " : "Sector ") : "", 743 err & ATA_UNC ? "UncorrectableError " : "", 744 err & ATA_IDNF ? "SectorIdNotFound " : "", 745 err & ATA_TRK0NF ? "TrackZeroNotFound " : "", 746 err & ATA_AMNF ? "AddrMarkNotFound " : ""); 747 } 748 } 749 750 /** 751 * ata_to_sense_error - convert ATA error to SCSI error 752 * @id: ATA device number 753 * @drv_stat: value contained in ATA status register 754 * @drv_err: value contained in ATA error register 755 * @sk: the sense key we'll fill out 756 * @asc: the additional sense code we'll fill out 757 * @ascq: the additional sense code qualifier we'll fill out 758 * @verbose: be verbose 759 * 760 * Converts an ATA error into a SCSI error. Fill out pointers to 761 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 762 * format sense blocks. 763 * 764 * LOCKING: 765 * spin_lock_irqsave(host lock) 766 */ 767 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, 768 u8 *asc, u8 *ascq, int verbose) 769 { 770 int i; 771 772 /* Based on the 3ware driver translation table */ 773 static const unsigned char sense_table[][4] = { 774 /* BBD|ECC|ID|MAR */ 775 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, 776 // Device busy Aborted command 777 /* BBD|ECC|ID */ 778 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, 779 // Device busy Aborted command 780 /* ECC|MC|MARK */ 781 {0x61, HARDWARE_ERROR, 0x00, 0x00}, 782 // Device fault Hardware error 783 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 784 {0x84, ABORTED_COMMAND, 0x47, 0x00}, 785 // Data CRC error SCSI parity error 786 /* MC|ID|ABRT|TRK0|MARK */ 787 {0x37, NOT_READY, 0x04, 0x00}, 788 // Unit offline Not ready 789 /* MCR|MARK */ 790 {0x09, NOT_READY, 0x04, 0x00}, 791 // Unrecovered disk error Not ready 792 /* Bad address mark */ 793 {0x01, MEDIUM_ERROR, 0x13, 0x00}, 794 // Address mark not found for data field 795 /* TRK0 - Track 0 not found */ 796 {0x02, HARDWARE_ERROR, 0x00, 0x00}, 797 // Hardware error 798 /* Abort: 0x04 is not translated here, see below */ 799 /* Media change request */ 800 {0x08, NOT_READY, 0x04, 0x00}, 801 // FIXME: faking offline 802 /* SRV/IDNF - ID not found */ 803 {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, 804 // Logical address out of range 805 /* MC - Media Changed */ 806 {0x20, UNIT_ATTENTION, 0x28, 0x00}, 807 // Not ready to ready change, medium may have changed 808 /* ECC - Uncorrectable ECC error */ 809 {0x40, MEDIUM_ERROR, 0x11, 0x04}, 810 // Unrecovered read error 811 /* BBD - block marked bad */ 812 {0x80, MEDIUM_ERROR, 0x11, 0x04}, 813 // Block marked bad Medium error, unrecovered read error 814 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 815 }; 816 static const unsigned char stat_table[][4] = { 817 /* Must be first because BUSY means no other bits valid */ 818 {0x80, ABORTED_COMMAND, 0x47, 0x00}, 819 // Busy, fake parity for now 820 {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, 821 // Device ready, unaligned write command 822 {0x20, HARDWARE_ERROR, 0x44, 0x00}, 823 // Device fault, internal target failure 824 {0x08, ABORTED_COMMAND, 0x47, 0x00}, 825 // Timed out in xfer, fake parity for now 826 {0x04, RECOVERED_ERROR, 0x11, 0x00}, 827 // Recovered ECC error Medium error, recovered 828 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 829 }; 830 831 /* 832 * Is this an error we can process/parse 833 */ 834 if (drv_stat & ATA_BUSY) { 835 drv_err = 0; /* Ignore the err bits, they're invalid */ 836 } 837 838 if (drv_err) { 839 /* Look for drv_err */ 840 for (i = 0; sense_table[i][0] != 0xFF; i++) { 841 /* Look for best matches first */ 842 if ((sense_table[i][0] & drv_err) == 843 sense_table[i][0]) { 844 *sk = sense_table[i][1]; 845 *asc = sense_table[i][2]; 846 *ascq = sense_table[i][3]; 847 goto translate_done; 848 } 849 } 850 } 851 852 /* 853 * Fall back to interpreting status bits. Note that if the drv_err 854 * has only the ABRT bit set, we decode drv_stat. ABRT by itself 855 * is not descriptive enough. 856 */ 857 for (i = 0; stat_table[i][0] != 0xFF; i++) { 858 if (stat_table[i][0] & drv_stat) { 859 *sk = stat_table[i][1]; 860 *asc = stat_table[i][2]; 861 *ascq = stat_table[i][3]; 862 goto translate_done; 863 } 864 } 865 866 /* 867 * We need a sensible error return here, which is tricky, and one 868 * that won't cause people to do things like return a disk wrongly. 869 */ 870 *sk = ABORTED_COMMAND; 871 *asc = 0x00; 872 *ascq = 0x00; 873 874 translate_done: 875 if (verbose) 876 pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", 877 id, drv_stat, drv_err, *sk, *asc, *ascq); 878 return; 879 } 880 881 /* 882 * ata_gen_passthru_sense - Generate check condition sense block. 883 * @qc: Command that completed. 884 * 885 * This function is specific to the ATA descriptor format sense 886 * block specified for the ATA pass through commands. Regardless 887 * of whether the command errored or not, return a sense 888 * block. Copy all controller registers into the sense 889 * block. If there was no error, we get the request from an ATA 890 * passthrough command, so we use the following sense data: 891 * sk = RECOVERED ERROR 892 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE 893 * 894 * 895 * LOCKING: 896 * None. 897 */ 898 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) 899 { 900 struct scsi_cmnd *cmd = qc->scsicmd; 901 struct ata_taskfile *tf = &qc->result_tf; 902 unsigned char *sb = cmd->sense_buffer; 903 unsigned char *desc = sb + 8; 904 int verbose = qc->ap->ops->error_handler == NULL; 905 u8 sense_key, asc, ascq; 906 907 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 908 909 /* 910 * Use ata_to_sense_error() to map status register bits 911 * onto sense key, asc & ascq. 912 */ 913 if (qc->err_mask || 914 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 915 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, 916 &sense_key, &asc, &ascq, verbose); 917 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); 918 } else { 919 /* 920 * ATA PASS-THROUGH INFORMATION AVAILABLE 921 * Always in descriptor format sense. 922 */ 923 scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); 924 } 925 926 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { 927 u8 len; 928 929 /* descriptor format */ 930 len = sb[7]; 931 desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); 932 if (!desc) { 933 if (SCSI_SENSE_BUFFERSIZE < len + 14) 934 return; 935 sb[7] = len + 14; 936 desc = sb + 8 + len; 937 } 938 desc[0] = 9; 939 desc[1] = 12; 940 /* 941 * Copy registers into sense buffer. 942 */ 943 desc[2] = 0x00; 944 desc[3] = tf->error; 945 desc[5] = tf->nsect; 946 desc[7] = tf->lbal; 947 desc[9] = tf->lbam; 948 desc[11] = tf->lbah; 949 desc[12] = tf->device; 950 desc[13] = tf->status; 951 952 /* 953 * Fill in Extend bit, and the high order bytes 954 * if applicable. 955 */ 956 if (tf->flags & ATA_TFLAG_LBA48) { 957 desc[2] |= 0x01; 958 desc[4] = tf->hob_nsect; 959 desc[6] = tf->hob_lbal; 960 desc[8] = tf->hob_lbam; 961 desc[10] = tf->hob_lbah; 962 } 963 } else { 964 /* Fixed sense format */ 965 desc[0] = tf->error; 966 desc[1] = tf->status; 967 desc[2] = tf->device; 968 desc[3] = tf->nsect; 969 desc[7] = 0; 970 if (tf->flags & ATA_TFLAG_LBA48) { 971 desc[8] |= 0x80; 972 if (tf->hob_nsect) 973 desc[8] |= 0x40; 974 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) 975 desc[8] |= 0x20; 976 } 977 desc[9] = tf->lbal; 978 desc[10] = tf->lbam; 979 desc[11] = tf->lbah; 980 } 981 } 982 983 /** 984 * ata_gen_ata_sense - generate a SCSI fixed sense block 985 * @qc: Command that we are erroring out 986 * 987 * Generate sense block for a failed ATA command @qc. Descriptor 988 * format is used to accommodate LBA48 block address. 989 * 990 * LOCKING: 991 * None. 992 */ 993 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) 994 { 995 struct ata_device *dev = qc->dev; 996 struct scsi_cmnd *cmd = qc->scsicmd; 997 struct ata_taskfile *tf = &qc->result_tf; 998 unsigned char *sb = cmd->sense_buffer; 999 int verbose = qc->ap->ops->error_handler == NULL; 1000 u64 block; 1001 u8 sense_key, asc, ascq; 1002 1003 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 1004 1005 if (ata_dev_disabled(dev)) { 1006 /* Device disabled after error recovery */ 1007 /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */ 1008 ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21); 1009 return; 1010 } 1011 /* Use ata_to_sense_error() to map status register bits 1012 * onto sense key, asc & ascq. 1013 */ 1014 if (qc->err_mask || 1015 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 1016 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, 1017 &sense_key, &asc, &ascq, verbose); 1018 ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); 1019 } else { 1020 /* Could not decode error */ 1021 ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", 1022 tf->status, qc->err_mask); 1023 ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); 1024 return; 1025 } 1026 1027 block = ata_tf_read_block(&qc->result_tf, dev); 1028 if (block == U64_MAX) 1029 return; 1030 1031 scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); 1032 } 1033 1034 void ata_scsi_sdev_config(struct scsi_device *sdev) 1035 { 1036 sdev->use_10_for_rw = 1; 1037 sdev->use_10_for_ms = 1; 1038 sdev->no_write_same = 1; 1039 1040 /* Schedule policy is determined by ->qc_defer() callback and 1041 * it needs to see every deferred qc. Set dev_blocked to 1 to 1042 * prevent SCSI midlayer from automatically deferring 1043 * requests. 1044 */ 1045 sdev->max_device_blocked = 1; 1046 } 1047 1048 /** 1049 * ata_scsi_dma_need_drain - Check whether data transfer may overflow 1050 * @rq: request to be checked 1051 * 1052 * ATAPI commands which transfer variable length data to host 1053 * might overflow due to application error or hardware bug. This 1054 * function checks whether overflow should be drained and ignored 1055 * for @request. 1056 * 1057 * LOCKING: 1058 * None. 1059 * 1060 * RETURNS: 1061 * 1 if ; otherwise, 0. 1062 */ 1063 bool ata_scsi_dma_need_drain(struct request *rq) 1064 { 1065 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 1066 1067 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; 1068 } 1069 EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); 1070 1071 int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) 1072 { 1073 struct request_queue *q = sdev->request_queue; 1074 int depth = 1; 1075 1076 if (!ata_id_has_unload(dev->id)) 1077 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1078 1079 /* configure max sectors */ 1080 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); 1081 blk_queue_max_hw_sectors(q, dev->max_sectors); 1082 1083 if (dev->class == ATA_DEV_ATAPI) { 1084 sdev->sector_size = ATA_SECT_SIZE; 1085 1086 /* set DMA padding */ 1087 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); 1088 1089 /* make room for appending the drain */ 1090 blk_queue_max_segments(q, queue_max_segments(q) - 1); 1091 1092 sdev->dma_drain_len = ATAPI_MAX_DRAIN; 1093 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); 1094 if (!sdev->dma_drain_buf) { 1095 ata_dev_err(dev, "drain buffer allocation failed\n"); 1096 return -ENOMEM; 1097 } 1098 } else { 1099 sdev->sector_size = ata_id_logical_sector_size(dev->id); 1100 sdev->manage_start_stop = 1; 1101 } 1102 1103 /* 1104 * ata_pio_sectors() expects buffer for each sector to not cross 1105 * page boundary. Enforce it by requiring buffers to be sector 1106 * aligned, which works iff sector_size is not larger than 1107 * PAGE_SIZE. ATAPI devices also need the alignment as 1108 * IDENTIFY_PACKET is executed as ATA_PROT_PIO. 1109 */ 1110 if (sdev->sector_size > PAGE_SIZE) 1111 ata_dev_warn(dev, 1112 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", 1113 sdev->sector_size); 1114 1115 blk_queue_update_dma_alignment(q, sdev->sector_size - 1); 1116 1117 if (dev->flags & ATA_DFLAG_AN) 1118 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1119 1120 if (dev->flags & ATA_DFLAG_NCQ) 1121 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1122 depth = min(ATA_MAX_QUEUE, depth); 1123 scsi_change_queue_depth(sdev, depth); 1124 1125 if (dev->flags & ATA_DFLAG_TRUSTED) 1126 sdev->security_supported = 1; 1127 1128 dev->sdev = sdev; 1129 return 0; 1130 } 1131 1132 /** 1133 * ata_scsi_slave_config - Set SCSI device attributes 1134 * @sdev: SCSI device to examine 1135 * 1136 * This is called before we actually start reading 1137 * and writing to the device, to configure certain 1138 * SCSI mid-layer behaviors. 1139 * 1140 * LOCKING: 1141 * Defined by SCSI layer. We don't really care. 1142 */ 1143 1144 int ata_scsi_slave_config(struct scsi_device *sdev) 1145 { 1146 struct ata_port *ap = ata_shost_to_port(sdev->host); 1147 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 1148 int rc = 0; 1149 1150 ata_scsi_sdev_config(sdev); 1151 1152 if (dev) 1153 rc = ata_scsi_dev_config(sdev, dev); 1154 1155 return rc; 1156 } 1157 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 1158 1159 /** 1160 * ata_scsi_slave_destroy - SCSI device is about to be destroyed 1161 * @sdev: SCSI device to be destroyed 1162 * 1163 * @sdev is about to be destroyed for hot/warm unplugging. If 1164 * this unplugging was initiated by libata as indicated by NULL 1165 * dev->sdev, this function doesn't have to do anything. 1166 * Otherwise, SCSI layer initiated warm-unplug is in progress. 1167 * Clear dev->sdev, schedule the device for ATA detach and invoke 1168 * EH. 1169 * 1170 * LOCKING: 1171 * Defined by SCSI layer. We don't really care. 1172 */ 1173 void ata_scsi_slave_destroy(struct scsi_device *sdev) 1174 { 1175 struct ata_port *ap = ata_shost_to_port(sdev->host); 1176 unsigned long flags; 1177 struct ata_device *dev; 1178 1179 if (!ap->ops->error_handler) 1180 return; 1181 1182 spin_lock_irqsave(ap->lock, flags); 1183 dev = __ata_scsi_find_dev(ap, sdev); 1184 if (dev && dev->sdev) { 1185 /* SCSI device already in CANCEL state, no need to offline it */ 1186 dev->sdev = NULL; 1187 dev->flags |= ATA_DFLAG_DETACH; 1188 ata_port_schedule_eh(ap); 1189 } 1190 spin_unlock_irqrestore(ap->lock, flags); 1191 1192 kfree(sdev->dma_drain_buf); 1193 } 1194 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 1195 1196 /** 1197 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 1198 * @qc: Storage for translated ATA taskfile 1199 * 1200 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY 1201 * (to start). Perhaps these commands should be preceded by 1202 * CHECK POWER MODE to see what power mode the device is already in. 1203 * [See SAT revision 5 at www.t10.org] 1204 * 1205 * LOCKING: 1206 * spin_lock_irqsave(host lock) 1207 * 1208 * RETURNS: 1209 * Zero on success, non-zero on error. 1210 */ 1211 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) 1212 { 1213 struct scsi_cmnd *scmd = qc->scsicmd; 1214 struct ata_taskfile *tf = &qc->tf; 1215 const u8 *cdb = scmd->cmnd; 1216 u16 fp; 1217 u8 bp = 0xff; 1218 1219 if (scmd->cmd_len < 5) { 1220 fp = 4; 1221 goto invalid_fld; 1222 } 1223 1224 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1225 tf->protocol = ATA_PROT_NODATA; 1226 if (cdb[1] & 0x1) { 1227 ; /* ignore IMMED bit, violates sat-r05 */ 1228 } 1229 if (cdb[4] & 0x2) { 1230 fp = 4; 1231 bp = 1; 1232 goto invalid_fld; /* LOEJ bit set not supported */ 1233 } 1234 if (((cdb[4] >> 4) & 0xf) != 0) { 1235 fp = 4; 1236 bp = 3; 1237 goto invalid_fld; /* power conditions not supported */ 1238 } 1239 1240 if (cdb[4] & 0x1) { 1241 tf->nsect = 1; /* 1 sector, lba=0 */ 1242 1243 if (qc->dev->flags & ATA_DFLAG_LBA) { 1244 tf->flags |= ATA_TFLAG_LBA; 1245 1246 tf->lbah = 0x0; 1247 tf->lbam = 0x0; 1248 tf->lbal = 0x0; 1249 tf->device |= ATA_LBA; 1250 } else { 1251 /* CHS */ 1252 tf->lbal = 0x1; /* sect */ 1253 tf->lbam = 0x0; /* cyl low */ 1254 tf->lbah = 0x0; /* cyl high */ 1255 } 1256 1257 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 1258 } else { 1259 /* Some odd clown BIOSen issue spindown on power off (ACPI S4 1260 * or S5) causing some drives to spin up and down again. 1261 */ 1262 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && 1263 system_state == SYSTEM_POWER_OFF) 1264 goto skip; 1265 1266 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && 1267 system_entering_hibernation()) 1268 goto skip; 1269 1270 /* Issue ATA STANDBY IMMEDIATE command */ 1271 tf->command = ATA_CMD_STANDBYNOW1; 1272 } 1273 1274 /* 1275 * Standby and Idle condition timers could be implemented but that 1276 * would require libata to implement the Power condition mode page 1277 * and allow the user to change it. Changing mode pages requires 1278 * MODE SELECT to be implemented. 1279 */ 1280 1281 return 0; 1282 1283 invalid_fld: 1284 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 1285 return 1; 1286 skip: 1287 scmd->result = SAM_STAT_GOOD; 1288 return 1; 1289 } 1290 1291 1292 /** 1293 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 1294 * @qc: Storage for translated ATA taskfile 1295 * 1296 * Sets up an ATA taskfile to issue FLUSH CACHE or 1297 * FLUSH CACHE EXT. 1298 * 1299 * LOCKING: 1300 * spin_lock_irqsave(host lock) 1301 * 1302 * RETURNS: 1303 * Zero on success, non-zero on error. 1304 */ 1305 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) 1306 { 1307 struct ata_taskfile *tf = &qc->tf; 1308 1309 tf->flags |= ATA_TFLAG_DEVICE; 1310 tf->protocol = ATA_PROT_NODATA; 1311 1312 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) 1313 tf->command = ATA_CMD_FLUSH_EXT; 1314 else 1315 tf->command = ATA_CMD_FLUSH; 1316 1317 /* flush is critical for IO integrity, consider it an IO command */ 1318 qc->flags |= ATA_QCFLAG_IO; 1319 1320 return 0; 1321 } 1322 1323 /** 1324 * scsi_6_lba_len - Get LBA and transfer length 1325 * @cdb: SCSI command to translate 1326 * 1327 * Calculate LBA and transfer length for 6-byte commands. 1328 * 1329 * RETURNS: 1330 * @plba: the LBA 1331 * @plen: the transfer length 1332 */ 1333 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1334 { 1335 u64 lba = 0; 1336 u32 len; 1337 1338 lba |= ((u64)(cdb[1] & 0x1f)) << 16; 1339 lba |= ((u64)cdb[2]) << 8; 1340 lba |= ((u64)cdb[3]); 1341 1342 len = cdb[4]; 1343 1344 *plba = lba; 1345 *plen = len; 1346 } 1347 1348 /** 1349 * scsi_10_lba_len - Get LBA and transfer length 1350 * @cdb: SCSI command to translate 1351 * 1352 * Calculate LBA and transfer length for 10-byte commands. 1353 * 1354 * RETURNS: 1355 * @plba: the LBA 1356 * @plen: the transfer length 1357 */ 1358 static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1359 { 1360 *plba = get_unaligned_be32(&cdb[2]); 1361 *plen = get_unaligned_be16(&cdb[7]); 1362 } 1363 1364 /** 1365 * scsi_16_lba_len - Get LBA and transfer length 1366 * @cdb: SCSI command to translate 1367 * 1368 * Calculate LBA and transfer length for 16-byte commands. 1369 * 1370 * RETURNS: 1371 * @plba: the LBA 1372 * @plen: the transfer length 1373 */ 1374 static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1375 { 1376 *plba = get_unaligned_be64(&cdb[2]); 1377 *plen = get_unaligned_be32(&cdb[10]); 1378 } 1379 1380 /** 1381 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 1382 * @qc: Storage for translated ATA taskfile 1383 * 1384 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1385 * 1386 * LOCKING: 1387 * spin_lock_irqsave(host lock) 1388 * 1389 * RETURNS: 1390 * Zero on success, non-zero on error. 1391 */ 1392 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) 1393 { 1394 struct scsi_cmnd *scmd = qc->scsicmd; 1395 struct ata_taskfile *tf = &qc->tf; 1396 struct ata_device *dev = qc->dev; 1397 u64 dev_sectors = qc->dev->n_sectors; 1398 const u8 *cdb = scmd->cmnd; 1399 u64 block; 1400 u32 n_block; 1401 u16 fp; 1402 1403 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1404 tf->protocol = ATA_PROT_NODATA; 1405 1406 switch (cdb[0]) { 1407 case VERIFY: 1408 if (scmd->cmd_len < 10) { 1409 fp = 9; 1410 goto invalid_fld; 1411 } 1412 scsi_10_lba_len(cdb, &block, &n_block); 1413 break; 1414 case VERIFY_16: 1415 if (scmd->cmd_len < 16) { 1416 fp = 15; 1417 goto invalid_fld; 1418 } 1419 scsi_16_lba_len(cdb, &block, &n_block); 1420 break; 1421 default: 1422 fp = 0; 1423 goto invalid_fld; 1424 } 1425 1426 if (!n_block) 1427 goto nothing_to_do; 1428 if (block >= dev_sectors) 1429 goto out_of_range; 1430 if ((block + n_block) > dev_sectors) 1431 goto out_of_range; 1432 1433 if (dev->flags & ATA_DFLAG_LBA) { 1434 tf->flags |= ATA_TFLAG_LBA; 1435 1436 if (lba_28_ok(block, n_block)) { 1437 /* use LBA28 */ 1438 tf->command = ATA_CMD_VERIFY; 1439 tf->device |= (block >> 24) & 0xf; 1440 } else if (lba_48_ok(block, n_block)) { 1441 if (!(dev->flags & ATA_DFLAG_LBA48)) 1442 goto out_of_range; 1443 1444 /* use LBA48 */ 1445 tf->flags |= ATA_TFLAG_LBA48; 1446 tf->command = ATA_CMD_VERIFY_EXT; 1447 1448 tf->hob_nsect = (n_block >> 8) & 0xff; 1449 1450 tf->hob_lbah = (block >> 40) & 0xff; 1451 tf->hob_lbam = (block >> 32) & 0xff; 1452 tf->hob_lbal = (block >> 24) & 0xff; 1453 } else 1454 /* request too large even for LBA48 */ 1455 goto out_of_range; 1456 1457 tf->nsect = n_block & 0xff; 1458 1459 tf->lbah = (block >> 16) & 0xff; 1460 tf->lbam = (block >> 8) & 0xff; 1461 tf->lbal = block & 0xff; 1462 1463 tf->device |= ATA_LBA; 1464 } else { 1465 /* CHS */ 1466 u32 sect, head, cyl, track; 1467 1468 if (!lba_28_ok(block, n_block)) 1469 goto out_of_range; 1470 1471 /* Convert LBA to CHS */ 1472 track = (u32)block / dev->sectors; 1473 cyl = track / dev->heads; 1474 head = track % dev->heads; 1475 sect = (u32)block % dev->sectors + 1; 1476 1477 /* Check whether the converted CHS can fit. 1478 Cylinder: 0-65535 1479 Head: 0-15 1480 Sector: 1-255*/ 1481 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 1482 goto out_of_range; 1483 1484 tf->command = ATA_CMD_VERIFY; 1485 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 1486 tf->lbal = sect; 1487 tf->lbam = cyl; 1488 tf->lbah = cyl >> 8; 1489 tf->device |= head; 1490 } 1491 1492 return 0; 1493 1494 invalid_fld: 1495 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 1496 return 1; 1497 1498 out_of_range: 1499 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1500 /* "Logical Block Address out of range" */ 1501 return 1; 1502 1503 nothing_to_do: 1504 scmd->result = SAM_STAT_GOOD; 1505 return 1; 1506 } 1507 1508 static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) 1509 { 1510 struct request *rq = scsi_cmd_to_rq(scmd); 1511 u32 req_blocks; 1512 1513 if (!blk_rq_is_passthrough(rq)) 1514 return true; 1515 1516 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; 1517 if (n_blocks > req_blocks) 1518 return false; 1519 1520 return true; 1521 } 1522 1523 /** 1524 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1525 * @qc: Storage for translated ATA taskfile 1526 * 1527 * Converts any of six SCSI read/write commands into the 1528 * ATA counterpart, including starting sector (LBA), 1529 * sector count, and taking into account the device's LBA48 1530 * support. 1531 * 1532 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and 1533 * %WRITE_16 are currently supported. 1534 * 1535 * LOCKING: 1536 * spin_lock_irqsave(host lock) 1537 * 1538 * RETURNS: 1539 * Zero on success, non-zero on error. 1540 */ 1541 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) 1542 { 1543 struct scsi_cmnd *scmd = qc->scsicmd; 1544 const u8 *cdb = scmd->cmnd; 1545 struct request *rq = scsi_cmd_to_rq(scmd); 1546 int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 1547 unsigned int tf_flags = 0; 1548 u64 block; 1549 u32 n_block; 1550 int rc; 1551 u16 fp = 0; 1552 1553 switch (cdb[0]) { 1554 case WRITE_6: 1555 case WRITE_10: 1556 case WRITE_16: 1557 tf_flags |= ATA_TFLAG_WRITE; 1558 break; 1559 } 1560 1561 /* Calculate the SCSI LBA, transfer length and FUA. */ 1562 switch (cdb[0]) { 1563 case READ_10: 1564 case WRITE_10: 1565 if (unlikely(scmd->cmd_len < 10)) { 1566 fp = 9; 1567 goto invalid_fld; 1568 } 1569 scsi_10_lba_len(cdb, &block, &n_block); 1570 if (cdb[1] & (1 << 3)) 1571 tf_flags |= ATA_TFLAG_FUA; 1572 if (!ata_check_nblocks(scmd, n_block)) 1573 goto invalid_fld; 1574 break; 1575 case READ_6: 1576 case WRITE_6: 1577 if (unlikely(scmd->cmd_len < 6)) { 1578 fp = 5; 1579 goto invalid_fld; 1580 } 1581 scsi_6_lba_len(cdb, &block, &n_block); 1582 1583 /* for 6-byte r/w commands, transfer length 0 1584 * means 256 blocks of data, not 0 block. 1585 */ 1586 if (!n_block) 1587 n_block = 256; 1588 if (!ata_check_nblocks(scmd, n_block)) 1589 goto invalid_fld; 1590 break; 1591 case READ_16: 1592 case WRITE_16: 1593 if (unlikely(scmd->cmd_len < 16)) { 1594 fp = 15; 1595 goto invalid_fld; 1596 } 1597 scsi_16_lba_len(cdb, &block, &n_block); 1598 if (cdb[1] & (1 << 3)) 1599 tf_flags |= ATA_TFLAG_FUA; 1600 if (!ata_check_nblocks(scmd, n_block)) 1601 goto invalid_fld; 1602 break; 1603 default: 1604 fp = 0; 1605 goto invalid_fld; 1606 } 1607 1608 /* Check and compose ATA command */ 1609 if (!n_block) 1610 /* For 10-byte and 16-byte SCSI R/W commands, transfer 1611 * length 0 means transfer 0 block of data. 1612 * However, for ATA R/W commands, sector count 0 means 1613 * 256 or 65536 sectors, not 0 sectors as in SCSI. 1614 * 1615 * WARNING: one or two older ATA drives treat 0 as 0... 1616 */ 1617 goto nothing_to_do; 1618 1619 qc->flags |= ATA_QCFLAG_IO; 1620 qc->nbytes = n_block * scmd->device->sector_size; 1621 1622 rc = ata_build_rw_tf(qc, block, n_block, tf_flags, class); 1623 if (likely(rc == 0)) 1624 return 0; 1625 1626 if (rc == -ERANGE) 1627 goto out_of_range; 1628 /* treat all other errors as -EINVAL, fall through */ 1629 invalid_fld: 1630 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 1631 return 1; 1632 1633 out_of_range: 1634 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1635 /* "Logical Block Address out of range" */ 1636 return 1; 1637 1638 nothing_to_do: 1639 scmd->result = SAM_STAT_GOOD; 1640 return 1; 1641 } 1642 1643 static void ata_qc_done(struct ata_queued_cmd *qc) 1644 { 1645 struct scsi_cmnd *cmd = qc->scsicmd; 1646 void (*done)(struct scsi_cmnd *) = qc->scsidone; 1647 1648 ata_qc_free(qc); 1649 done(cmd); 1650 } 1651 1652 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1653 { 1654 struct ata_port *ap = qc->ap; 1655 struct scsi_cmnd *cmd = qc->scsicmd; 1656 u8 *cdb = cmd->cmnd; 1657 int need_sense = (qc->err_mask != 0); 1658 1659 /* For ATA pass thru (SAT) commands, generate a sense block if 1660 * user mandated it or if there's an error. Note that if we 1661 * generate because the user forced us to [CK_COND =1], a check 1662 * condition is generated and the ATA register values are returned 1663 * whether the command completed successfully or not. If there 1664 * was no error, we use the following sense data: 1665 * sk = RECOVERED ERROR 1666 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE 1667 */ 1668 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1669 ((cdb[2] & 0x20) || need_sense)) 1670 ata_gen_passthru_sense(qc); 1671 else if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1672 cmd->result = SAM_STAT_CHECK_CONDITION; 1673 else if (need_sense) 1674 ata_gen_ata_sense(qc); 1675 else 1676 cmd->result = SAM_STAT_GOOD; 1677 1678 if (need_sense && !ap->ops->error_handler) 1679 ata_dump_status(ap, &qc->result_tf); 1680 1681 ata_qc_done(qc); 1682 } 1683 1684 /** 1685 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1686 * @dev: ATA device to which the command is addressed 1687 * @cmd: SCSI command to execute 1688 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1689 * 1690 * Our ->queuecommand() function has decided that the SCSI 1691 * command issued can be directly translated into an ATA 1692 * command, rather than handled internally. 1693 * 1694 * This function sets up an ata_queued_cmd structure for the 1695 * SCSI command, and sends that ata_queued_cmd to the hardware. 1696 * 1697 * The xlat_func argument (actor) returns 0 if ready to execute 1698 * ATA command, else 1 to finish translation. If 1 is returned 1699 * then cmd->result (and possibly cmd->sense_buffer) are assumed 1700 * to be set reflecting an error condition or clean (early) 1701 * termination. 1702 * 1703 * LOCKING: 1704 * spin_lock_irqsave(host lock) 1705 * 1706 * RETURNS: 1707 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1708 * needs to be deferred. 1709 */ 1710 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1711 ata_xlat_func_t xlat_func) 1712 { 1713 struct ata_port *ap = dev->link->ap; 1714 struct ata_queued_cmd *qc; 1715 int rc; 1716 1717 qc = ata_scsi_qc_new(dev, cmd); 1718 if (!qc) 1719 goto err_mem; 1720 1721 /* data is present; dma-map it */ 1722 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1723 cmd->sc_data_direction == DMA_TO_DEVICE) { 1724 if (unlikely(scsi_bufflen(cmd) < 1)) { 1725 ata_dev_warn(dev, "WARNING: zero len r/w req\n"); 1726 goto err_did; 1727 } 1728 1729 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); 1730 1731 qc->dma_dir = cmd->sc_data_direction; 1732 } 1733 1734 qc->complete_fn = ata_scsi_qc_complete; 1735 1736 if (xlat_func(qc)) 1737 goto early_finish; 1738 1739 if (ap->ops->qc_defer) { 1740 if ((rc = ap->ops->qc_defer(qc))) 1741 goto defer; 1742 } 1743 1744 /* select device, send command to hardware */ 1745 ata_qc_issue(qc); 1746 1747 return 0; 1748 1749 early_finish: 1750 ata_qc_free(qc); 1751 scsi_done(cmd); 1752 return 0; 1753 1754 err_did: 1755 ata_qc_free(qc); 1756 cmd->result = (DID_ERROR << 16); 1757 scsi_done(cmd); 1758 err_mem: 1759 return 0; 1760 1761 defer: 1762 ata_qc_free(qc); 1763 if (rc == ATA_DEFER_LINK) 1764 return SCSI_MLQUEUE_DEVICE_BUSY; 1765 else 1766 return SCSI_MLQUEUE_HOST_BUSY; 1767 } 1768 1769 struct ata_scsi_args { 1770 struct ata_device *dev; 1771 u16 *id; 1772 struct scsi_cmnd *cmd; 1773 }; 1774 1775 /** 1776 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators 1777 * @args: device IDENTIFY data / SCSI command of interest. 1778 * @actor: Callback hook for desired SCSI command simulator 1779 * 1780 * Takes care of the hard work of simulating a SCSI command... 1781 * Mapping the response buffer, calling the command's handler, 1782 * and handling the handler's return value. This return value 1783 * indicates whether the handler wishes the SCSI command to be 1784 * completed successfully (0), or not (in which case cmd->result 1785 * and sense buffer are assumed to be set). 1786 * 1787 * LOCKING: 1788 * spin_lock_irqsave(host lock) 1789 */ 1790 static void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1791 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf)) 1792 { 1793 unsigned int rc; 1794 struct scsi_cmnd *cmd = args->cmd; 1795 unsigned long flags; 1796 1797 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); 1798 1799 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE); 1800 rc = actor(args, ata_scsi_rbuf); 1801 if (rc == 0) 1802 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), 1803 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); 1804 1805 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); 1806 1807 if (rc == 0) 1808 cmd->result = SAM_STAT_GOOD; 1809 } 1810 1811 /** 1812 * ata_scsiop_inq_std - Simulate INQUIRY command 1813 * @args: device IDENTIFY data / SCSI command of interest. 1814 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1815 * 1816 * Returns standard device identification data associated 1817 * with non-VPD INQUIRY command output. 1818 * 1819 * LOCKING: 1820 * spin_lock_irqsave(host lock) 1821 */ 1822 static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 1823 { 1824 static const u8 versions[] = { 1825 0x00, 1826 0x60, /* SAM-3 (no version claimed) */ 1827 1828 0x03, 1829 0x20, /* SBC-2 (no version claimed) */ 1830 1831 0x03, 1832 0x00 /* SPC-3 (no version claimed) */ 1833 }; 1834 static const u8 versions_zbc[] = { 1835 0x00, 1836 0xA0, /* SAM-5 (no version claimed) */ 1837 1838 0x06, 1839 0x00, /* SBC-4 (no version claimed) */ 1840 1841 0x05, 1842 0xC0, /* SPC-5 (no version claimed) */ 1843 1844 0x60, 1845 0x24, /* ZBC r05 */ 1846 }; 1847 1848 u8 hdr[] = { 1849 TYPE_DISK, 1850 0, 1851 0x5, /* claim SPC-3 version compatibility */ 1852 2, 1853 95 - 4, 1854 0, 1855 0, 1856 2 1857 }; 1858 1859 /* set scsi removable (RMB) bit per ata bit, or if the 1860 * AHCI port says it's external (Hotplug-capable, eSATA). 1861 */ 1862 if (ata_id_removable(args->id) || 1863 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) 1864 hdr[1] |= (1 << 7); 1865 1866 if (args->dev->class == ATA_DEV_ZAC) { 1867 hdr[0] = TYPE_ZBC; 1868 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ 1869 } 1870 1871 memcpy(rbuf, hdr, sizeof(hdr)); 1872 memcpy(&rbuf[8], "ATA ", 8); 1873 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1874 1875 /* From SAT, use last 2 words from fw rev unless they are spaces */ 1876 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); 1877 if (strncmp(&rbuf[32], " ", 4) == 0) 1878 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1879 1880 if (rbuf[32] == 0 || rbuf[32] == ' ') 1881 memcpy(&rbuf[32], "n/a ", 4); 1882 1883 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) 1884 memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc)); 1885 else 1886 memcpy(rbuf + 58, versions, sizeof(versions)); 1887 1888 return 0; 1889 } 1890 1891 /** 1892 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1893 * @args: device IDENTIFY data / SCSI command of interest. 1894 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1895 * 1896 * Returns list of inquiry VPD pages available. 1897 * 1898 * LOCKING: 1899 * spin_lock_irqsave(host lock) 1900 */ 1901 static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) 1902 { 1903 int i, num_pages = 0; 1904 static const u8 pages[] = { 1905 0x00, /* page 0x00, this page */ 1906 0x80, /* page 0x80, unit serial no page */ 1907 0x83, /* page 0x83, device ident page */ 1908 0x89, /* page 0x89, ata info page */ 1909 0xb0, /* page 0xb0, block limits page */ 1910 0xb1, /* page 0xb1, block device characteristics page */ 1911 0xb2, /* page 0xb2, thin provisioning page */ 1912 0xb6, /* page 0xb6, zoned block device characteristics */ 1913 0xb9, /* page 0xb9, concurrent positioning ranges */ 1914 }; 1915 1916 for (i = 0; i < sizeof(pages); i++) { 1917 if (pages[i] == 0xb6 && 1918 !(args->dev->flags & ATA_DFLAG_ZAC)) 1919 continue; 1920 rbuf[num_pages + 4] = pages[i]; 1921 num_pages++; 1922 } 1923 rbuf[3] = num_pages; /* number of supported VPD pages */ 1924 return 0; 1925 } 1926 1927 /** 1928 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1929 * @args: device IDENTIFY data / SCSI command of interest. 1930 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1931 * 1932 * Returns ATA device serial number. 1933 * 1934 * LOCKING: 1935 * spin_lock_irqsave(host lock) 1936 */ 1937 static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf) 1938 { 1939 static const u8 hdr[] = { 1940 0, 1941 0x80, /* this page code */ 1942 0, 1943 ATA_ID_SERNO_LEN, /* page len */ 1944 }; 1945 1946 memcpy(rbuf, hdr, sizeof(hdr)); 1947 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1948 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1949 return 0; 1950 } 1951 1952 /** 1953 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1954 * @args: device IDENTIFY data / SCSI command of interest. 1955 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1956 * 1957 * Yields two logical unit device identification designators: 1958 * - vendor specific ASCII containing the ATA serial number 1959 * - SAT defined "t10 vendor id based" containing ASCII vendor 1960 * name ("ATA "), model and serial numbers. 1961 * 1962 * LOCKING: 1963 * spin_lock_irqsave(host lock) 1964 */ 1965 static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf) 1966 { 1967 const int sat_model_serial_desc_len = 68; 1968 int num; 1969 1970 rbuf[1] = 0x83; /* this page code */ 1971 num = 4; 1972 1973 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1974 rbuf[num + 0] = 2; 1975 rbuf[num + 3] = ATA_ID_SERNO_LEN; 1976 num += 4; 1977 ata_id_string(args->id, (unsigned char *) rbuf + num, 1978 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1979 num += ATA_ID_SERNO_LEN; 1980 1981 /* SAT defined lu model and serial numbers descriptor */ 1982 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 1983 rbuf[num + 0] = 2; 1984 rbuf[num + 1] = 1; 1985 rbuf[num + 3] = sat_model_serial_desc_len; 1986 num += 4; 1987 memcpy(rbuf + num, "ATA ", 8); 1988 num += 8; 1989 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, 1990 ATA_ID_PROD_LEN); 1991 num += ATA_ID_PROD_LEN; 1992 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, 1993 ATA_ID_SERNO_LEN); 1994 num += ATA_ID_SERNO_LEN; 1995 1996 if (ata_id_has_wwn(args->id)) { 1997 /* SAT defined lu world wide name */ 1998 /* piv=0, assoc=lu, code_set=binary, designator=NAA */ 1999 rbuf[num + 0] = 1; 2000 rbuf[num + 1] = 3; 2001 rbuf[num + 3] = ATA_ID_WWN_LEN; 2002 num += 4; 2003 ata_id_string(args->id, (unsigned char *) rbuf + num, 2004 ATA_ID_WWN, ATA_ID_WWN_LEN); 2005 num += ATA_ID_WWN_LEN; 2006 } 2007 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 2008 return 0; 2009 } 2010 2011 /** 2012 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 2013 * @args: device IDENTIFY data / SCSI command of interest. 2014 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2015 * 2016 * Yields SAT-specified ATA VPD page. 2017 * 2018 * LOCKING: 2019 * spin_lock_irqsave(host lock) 2020 */ 2021 static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) 2022 { 2023 rbuf[1] = 0x89; /* our page code */ 2024 rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 2025 rbuf[3] = (0x238 & 0xff); 2026 2027 memcpy(&rbuf[8], "linux ", 8); 2028 memcpy(&rbuf[16], "libata ", 16); 2029 memcpy(&rbuf[32], DRV_VERSION, 4); 2030 2031 rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 2032 rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */ 2033 /* TODO: PMP? */ 2034 2035 /* we don't store the ATA device signature, so we fake it */ 2036 rbuf[38] = ATA_DRDY; /* really, this is Status reg */ 2037 rbuf[40] = 0x1; 2038 rbuf[48] = 0x1; 2039 2040 rbuf[56] = ATA_CMD_ID_ATA; 2041 2042 memcpy(&rbuf[60], &args->id[0], 512); 2043 return 0; 2044 } 2045 2046 static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) 2047 { 2048 struct ata_device *dev = args->dev; 2049 u16 min_io_sectors; 2050 2051 rbuf[1] = 0xb0; 2052 rbuf[3] = 0x3c; /* required VPD size with unmap support */ 2053 2054 /* 2055 * Optimal transfer length granularity. 2056 * 2057 * This is always one physical block, but for disks with a smaller 2058 * logical than physical sector size we need to figure out what the 2059 * latter is. 2060 */ 2061 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); 2062 put_unaligned_be16(min_io_sectors, &rbuf[6]); 2063 2064 /* 2065 * Optimal unmap granularity. 2066 * 2067 * The ATA spec doesn't even know about a granularity or alignment 2068 * for the TRIM command. We can leave away most of the unmap related 2069 * VPD page entries, but we have specifify a granularity to signal 2070 * that we support some form of unmap - in thise case via WRITE SAME 2071 * with the unmap bit set. 2072 */ 2073 if (ata_id_has_trim(args->id)) { 2074 u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; 2075 2076 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) 2077 max_blocks = 128 << (20 - SECTOR_SHIFT); 2078 2079 put_unaligned_be64(max_blocks, &rbuf[36]); 2080 put_unaligned_be32(1, &rbuf[28]); 2081 } 2082 2083 return 0; 2084 } 2085 2086 static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) 2087 { 2088 int form_factor = ata_id_form_factor(args->id); 2089 int media_rotation_rate = ata_id_rotation_rate(args->id); 2090 u8 zoned = ata_id_zoned_cap(args->id); 2091 2092 rbuf[1] = 0xb1; 2093 rbuf[3] = 0x3c; 2094 rbuf[4] = media_rotation_rate >> 8; 2095 rbuf[5] = media_rotation_rate; 2096 rbuf[7] = form_factor; 2097 if (zoned) 2098 rbuf[8] = (zoned << 4); 2099 2100 return 0; 2101 } 2102 2103 static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) 2104 { 2105 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ 2106 rbuf[1] = 0xb2; 2107 rbuf[3] = 0x4; 2108 rbuf[5] = 1 << 6; /* TPWS */ 2109 2110 return 0; 2111 } 2112 2113 static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf) 2114 { 2115 /* 2116 * zbc-r05 SCSI Zoned Block device characteristics VPD page 2117 */ 2118 rbuf[1] = 0xb6; 2119 rbuf[3] = 0x3C; 2120 2121 /* 2122 * URSWRZ bit is only meaningful for host-managed ZAC drives 2123 */ 2124 if (args->dev->zac_zoned_cap & 1) 2125 rbuf[4] |= 1; 2126 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); 2127 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); 2128 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); 2129 2130 return 0; 2131 } 2132 2133 static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) 2134 { 2135 struct ata_cpr_log *cpr_log = args->dev->cpr_log; 2136 u8 *desc = &rbuf[64]; 2137 int i; 2138 2139 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ 2140 rbuf[1] = 0xb9; 2141 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); 2142 2143 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { 2144 desc[0] = cpr_log->cpr[i].num; 2145 desc[1] = cpr_log->cpr[i].num_storage_elements; 2146 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); 2147 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); 2148 } 2149 2150 return 0; 2151 } 2152 2153 /** 2154 * modecpy - Prepare response for MODE SENSE 2155 * @dest: output buffer 2156 * @src: data being copied 2157 * @n: length of mode page 2158 * @changeable: whether changeable parameters are requested 2159 * 2160 * Generate a generic MODE SENSE page for either current or changeable 2161 * parameters. 2162 * 2163 * LOCKING: 2164 * None. 2165 */ 2166 static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) 2167 { 2168 if (changeable) { 2169 memcpy(dest, src, 2); 2170 memset(dest + 2, 0, n - 2); 2171 } else { 2172 memcpy(dest, src, n); 2173 } 2174 } 2175 2176 /** 2177 * ata_msense_caching - Simulate MODE SENSE caching info page 2178 * @id: device IDENTIFY data 2179 * @buf: output buffer 2180 * @changeable: whether changeable parameters are requested 2181 * 2182 * Generate a caching info page, which conditionally indicates 2183 * write caching to the SCSI layer, depending on device 2184 * capabilities. 2185 * 2186 * LOCKING: 2187 * None. 2188 */ 2189 static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) 2190 { 2191 modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable); 2192 if (changeable) { 2193 buf[2] |= (1 << 2); /* ata_mselect_caching() */ 2194 } else { 2195 buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */ 2196 buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */ 2197 } 2198 return sizeof(def_cache_mpage); 2199 } 2200 2201 /** 2202 * ata_msense_control - Simulate MODE SENSE control mode page 2203 * @dev: ATA device of interest 2204 * @buf: output buffer 2205 * @changeable: whether changeable parameters are requested 2206 * 2207 * Generate a generic MODE SENSE control mode page. 2208 * 2209 * LOCKING: 2210 * None. 2211 */ 2212 static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, 2213 bool changeable) 2214 { 2215 modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); 2216 if (changeable) { 2217 buf[2] |= (1 << 2); /* ata_mselect_control() */ 2218 } else { 2219 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); 2220 2221 buf[2] |= (d_sense << 2); /* descriptor format sense data */ 2222 } 2223 return sizeof(def_control_mpage); 2224 } 2225 2226 /** 2227 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 2228 * @buf: output buffer 2229 * @changeable: whether changeable parameters are requested 2230 * 2231 * Generate a generic MODE SENSE r/w error recovery page. 2232 * 2233 * LOCKING: 2234 * None. 2235 */ 2236 static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable) 2237 { 2238 modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage), 2239 changeable); 2240 return sizeof(def_rw_recovery_mpage); 2241 } 2242 2243 /* 2244 * We can turn this into a real blacklist if it's needed, for now just 2245 * blacklist any Maxtor BANC1G10 revision firmware 2246 */ 2247 static int ata_dev_supports_fua(u16 *id) 2248 { 2249 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1]; 2250 2251 if (!libata_fua) 2252 return 0; 2253 if (!ata_id_has_fua(id)) 2254 return 0; 2255 2256 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 2257 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw)); 2258 2259 if (strcmp(model, "Maxtor")) 2260 return 1; 2261 if (strcmp(fw, "BANC1G10")) 2262 return 1; 2263 2264 return 0; /* blacklisted */ 2265 } 2266 2267 /** 2268 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 2269 * @args: device IDENTIFY data / SCSI command of interest. 2270 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2271 * 2272 * Simulate MODE SENSE commands. Assume this is invoked for direct 2273 * access devices (e.g. disks) only. There should be no block 2274 * descriptor for other device types. 2275 * 2276 * LOCKING: 2277 * spin_lock_irqsave(host lock) 2278 */ 2279 static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) 2280 { 2281 struct ata_device *dev = args->dev; 2282 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; 2283 static const u8 sat_blk_desc[] = { 2284 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2285 0, 2286 0, 0x2, 0x0 /* block length: 512 bytes */ 2287 }; 2288 u8 pg, spg; 2289 unsigned int ebd, page_control, six_byte; 2290 u8 dpofua, bp = 0xff; 2291 u16 fp; 2292 2293 six_byte = (scsicmd[0] == MODE_SENSE); 2294 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ 2295 /* 2296 * LLBA bit in msense(10) ignored (compliant) 2297 */ 2298 2299 page_control = scsicmd[2] >> 6; 2300 switch (page_control) { 2301 case 0: /* current */ 2302 case 1: /* changeable */ 2303 case 2: /* defaults */ 2304 break; /* supported */ 2305 case 3: /* saved */ 2306 goto saving_not_supp; 2307 default: 2308 fp = 2; 2309 bp = 6; 2310 goto invalid_fld; 2311 } 2312 2313 if (six_byte) 2314 p += 4 + (ebd ? 8 : 0); 2315 else 2316 p += 8 + (ebd ? 8 : 0); 2317 2318 pg = scsicmd[2] & 0x3f; 2319 spg = scsicmd[3]; 2320 /* 2321 * No mode subpages supported (yet) but asking for _all_ 2322 * subpages may be valid 2323 */ 2324 if (spg && (spg != ALL_SUB_MPAGES)) { 2325 fp = 3; 2326 goto invalid_fld; 2327 } 2328 2329 switch(pg) { 2330 case RW_RECOVERY_MPAGE: 2331 p += ata_msense_rw_recovery(p, page_control == 1); 2332 break; 2333 2334 case CACHE_MPAGE: 2335 p += ata_msense_caching(args->id, p, page_control == 1); 2336 break; 2337 2338 case CONTROL_MPAGE: 2339 p += ata_msense_control(args->dev, p, page_control == 1); 2340 break; 2341 2342 case ALL_MPAGES: 2343 p += ata_msense_rw_recovery(p, page_control == 1); 2344 p += ata_msense_caching(args->id, p, page_control == 1); 2345 p += ata_msense_control(args->dev, p, page_control == 1); 2346 break; 2347 2348 default: /* invalid page code */ 2349 fp = 2; 2350 goto invalid_fld; 2351 } 2352 2353 dpofua = 0; 2354 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && 2355 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 2356 dpofua = 1 << 4; 2357 2358 if (six_byte) { 2359 rbuf[0] = p - rbuf - 1; 2360 rbuf[2] |= dpofua; 2361 if (ebd) { 2362 rbuf[3] = sizeof(sat_blk_desc); 2363 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc)); 2364 } 2365 } else { 2366 unsigned int output_len = p - rbuf - 2; 2367 2368 rbuf[0] = output_len >> 8; 2369 rbuf[1] = output_len; 2370 rbuf[3] |= dpofua; 2371 if (ebd) { 2372 rbuf[7] = sizeof(sat_blk_desc); 2373 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc)); 2374 } 2375 } 2376 return 0; 2377 2378 invalid_fld: 2379 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); 2380 return 1; 2381 2382 saving_not_supp: 2383 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); 2384 /* "Saving parameters not supported" */ 2385 return 1; 2386 } 2387 2388 /** 2389 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2390 * @args: device IDENTIFY data / SCSI command of interest. 2391 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2392 * 2393 * Simulate READ CAPACITY commands. 2394 * 2395 * LOCKING: 2396 * None. 2397 */ 2398 static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) 2399 { 2400 struct ata_device *dev = args->dev; 2401 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ 2402 u32 sector_size; /* physical sector size in bytes */ 2403 u8 log2_per_phys; 2404 u16 lowest_aligned; 2405 2406 sector_size = ata_id_logical_sector_size(dev->id); 2407 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); 2408 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); 2409 2410 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2411 if (last_lba >= 0xffffffffULL) 2412 last_lba = 0xffffffff; 2413 2414 /* sector count, 32-bit */ 2415 rbuf[0] = last_lba >> (8 * 3); 2416 rbuf[1] = last_lba >> (8 * 2); 2417 rbuf[2] = last_lba >> (8 * 1); 2418 rbuf[3] = last_lba; 2419 2420 /* sector size */ 2421 rbuf[4] = sector_size >> (8 * 3); 2422 rbuf[5] = sector_size >> (8 * 2); 2423 rbuf[6] = sector_size >> (8 * 1); 2424 rbuf[7] = sector_size; 2425 } else { 2426 /* sector count, 64-bit */ 2427 rbuf[0] = last_lba >> (8 * 7); 2428 rbuf[1] = last_lba >> (8 * 6); 2429 rbuf[2] = last_lba >> (8 * 5); 2430 rbuf[3] = last_lba >> (8 * 4); 2431 rbuf[4] = last_lba >> (8 * 3); 2432 rbuf[5] = last_lba >> (8 * 2); 2433 rbuf[6] = last_lba >> (8 * 1); 2434 rbuf[7] = last_lba; 2435 2436 /* sector size */ 2437 rbuf[ 8] = sector_size >> (8 * 3); 2438 rbuf[ 9] = sector_size >> (8 * 2); 2439 rbuf[10] = sector_size >> (8 * 1); 2440 rbuf[11] = sector_size; 2441 2442 rbuf[12] = 0; 2443 rbuf[13] = log2_per_phys; 2444 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2445 rbuf[15] = lowest_aligned; 2446 2447 if (ata_id_has_trim(args->id) && 2448 !(dev->horkage & ATA_HORKAGE_NOTRIM)) { 2449 rbuf[14] |= 0x80; /* LBPME */ 2450 2451 if (ata_id_has_zero_after_trim(args->id) && 2452 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { 2453 ata_dev_info(dev, "Enabling discard_zeroes_data\n"); 2454 rbuf[14] |= 0x40; /* LBPRZ */ 2455 } 2456 } 2457 if (ata_id_zoned_cap(args->id) || 2458 args->dev->class == ATA_DEV_ZAC) 2459 rbuf[12] = (1 << 4); /* RC_BASIS */ 2460 } 2461 return 0; 2462 } 2463 2464 /** 2465 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2466 * @args: device IDENTIFY data / SCSI command of interest. 2467 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2468 * 2469 * Simulate REPORT LUNS command. 2470 * 2471 * LOCKING: 2472 * spin_lock_irqsave(host lock) 2473 */ 2474 static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf) 2475 { 2476 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2477 2478 return 0; 2479 } 2480 2481 static void atapi_sense_complete(struct ata_queued_cmd *qc) 2482 { 2483 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2484 /* FIXME: not quite right; we don't want the 2485 * translation of taskfile registers into 2486 * a sense descriptors, since that's only 2487 * correct for ATA, not ATAPI 2488 */ 2489 ata_gen_passthru_sense(qc); 2490 } 2491 2492 ata_qc_done(qc); 2493 } 2494 2495 /* is it pointless to prefer PIO for "safety reasons"? */ 2496 static inline int ata_pio_use_silly(struct ata_port *ap) 2497 { 2498 return (ap->flags & ATA_FLAG_PIO_DMA); 2499 } 2500 2501 static void atapi_request_sense(struct ata_queued_cmd *qc) 2502 { 2503 struct ata_port *ap = qc->ap; 2504 struct scsi_cmnd *cmd = qc->scsicmd; 2505 2506 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2507 2508 #ifdef CONFIG_ATA_SFF 2509 if (ap->ops->sff_tf_read) 2510 ap->ops->sff_tf_read(ap, &qc->tf); 2511 #endif 2512 2513 /* fill these in, for the case where they are -not- overwritten */ 2514 cmd->sense_buffer[0] = 0x70; 2515 cmd->sense_buffer[2] = qc->tf.error >> 4; 2516 2517 ata_qc_reinit(qc); 2518 2519 /* setup sg table and init transfer direction */ 2520 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); 2521 ata_sg_init(qc, &qc->sgent, 1); 2522 qc->dma_dir = DMA_FROM_DEVICE; 2523 2524 memset(&qc->cdb, 0, qc->dev->cdb_len); 2525 qc->cdb[0] = REQUEST_SENSE; 2526 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2527 2528 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2529 qc->tf.command = ATA_CMD_PACKET; 2530 2531 if (ata_pio_use_silly(ap)) { 2532 qc->tf.protocol = ATAPI_PROT_DMA; 2533 qc->tf.feature |= ATAPI_PKT_DMA; 2534 } else { 2535 qc->tf.protocol = ATAPI_PROT_PIO; 2536 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2537 qc->tf.lbah = 0; 2538 } 2539 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2540 2541 qc->complete_fn = atapi_sense_complete; 2542 2543 ata_qc_issue(qc); 2544 } 2545 2546 /* 2547 * ATAPI devices typically report zero for their SCSI version, and sometimes 2548 * deviate from the spec WRT response data format. If SCSI version is 2549 * reported as zero like normal, then we make the following fixups: 2550 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a 2551 * modern device. 2552 * 2) Ensure response data format / ATAPI information are always correct. 2553 */ 2554 static void atapi_fixup_inquiry(struct scsi_cmnd *cmd) 2555 { 2556 u8 buf[4]; 2557 2558 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4); 2559 if (buf[2] == 0) { 2560 buf[2] = 0x5; 2561 buf[3] = 0x32; 2562 } 2563 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4); 2564 } 2565 2566 static void atapi_qc_complete(struct ata_queued_cmd *qc) 2567 { 2568 struct scsi_cmnd *cmd = qc->scsicmd; 2569 unsigned int err_mask = qc->err_mask; 2570 2571 /* handle completion from new EH */ 2572 if (unlikely(qc->ap->ops->error_handler && 2573 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { 2574 2575 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 2576 /* FIXME: not quite right; we don't want the 2577 * translation of taskfile registers into a 2578 * sense descriptors, since that's only 2579 * correct for ATA, not ATAPI 2580 */ 2581 ata_gen_passthru_sense(qc); 2582 } 2583 2584 /* SCSI EH automatically locks door if sdev->locked is 2585 * set. Sometimes door lock request continues to 2586 * fail, for example, when no media is present. This 2587 * creates a loop - SCSI EH issues door lock which 2588 * fails and gets invoked again to acquire sense data 2589 * for the failed command. 2590 * 2591 * If door lock fails, always clear sdev->locked to 2592 * avoid this infinite loop. 2593 * 2594 * This may happen before SCSI scan is complete. Make 2595 * sure qc->dev->sdev isn't NULL before dereferencing. 2596 */ 2597 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) 2598 qc->dev->sdev->locked = 0; 2599 2600 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2601 ata_qc_done(qc); 2602 return; 2603 } 2604 2605 /* successful completion or old EH failure path */ 2606 if (unlikely(err_mask & AC_ERR_DEV)) { 2607 cmd->result = SAM_STAT_CHECK_CONDITION; 2608 atapi_request_sense(qc); 2609 return; 2610 } else if (unlikely(err_mask)) { 2611 /* FIXME: not quite right; we don't want the 2612 * translation of taskfile registers into 2613 * a sense descriptors, since that's only 2614 * correct for ATA, not ATAPI 2615 */ 2616 ata_gen_passthru_sense(qc); 2617 } else { 2618 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) 2619 atapi_fixup_inquiry(cmd); 2620 cmd->result = SAM_STAT_GOOD; 2621 } 2622 2623 ata_qc_done(qc); 2624 } 2625 /** 2626 * atapi_xlat - Initialize PACKET taskfile 2627 * @qc: command structure to be initialized 2628 * 2629 * LOCKING: 2630 * spin_lock_irqsave(host lock) 2631 * 2632 * RETURNS: 2633 * Zero on success, non-zero on failure. 2634 */ 2635 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) 2636 { 2637 struct scsi_cmnd *scmd = qc->scsicmd; 2638 struct ata_device *dev = qc->dev; 2639 int nodata = (scmd->sc_data_direction == DMA_NONE); 2640 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); 2641 unsigned int nbytes; 2642 2643 memset(qc->cdb, 0, dev->cdb_len); 2644 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2645 2646 qc->complete_fn = atapi_qc_complete; 2647 2648 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2649 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 2650 qc->tf.flags |= ATA_TFLAG_WRITE; 2651 } 2652 2653 qc->tf.command = ATA_CMD_PACKET; 2654 ata_qc_set_pc_nbytes(qc); 2655 2656 /* check whether ATAPI DMA is safe */ 2657 if (!nodata && !using_pio && atapi_check_dma(qc)) 2658 using_pio = 1; 2659 2660 /* Some controller variants snoop this value for Packet 2661 * transfers to do state machine and FIFO management. Thus we 2662 * want to set it properly, and for DMA where it is 2663 * effectively meaningless. 2664 */ 2665 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024); 2666 2667 /* Most ATAPI devices which honor transfer chunk size don't 2668 * behave according to the spec when odd chunk size which 2669 * matches the transfer length is specified. If the number of 2670 * bytes to transfer is 2n+1. According to the spec, what 2671 * should happen is to indicate that 2n+1 is going to be 2672 * transferred and transfer 2n+2 bytes where the last byte is 2673 * padding. 2674 * 2675 * In practice, this doesn't happen. ATAPI devices first 2676 * indicate and transfer 2n bytes and then indicate and 2677 * transfer 2 bytes where the last byte is padding. 2678 * 2679 * This inconsistency confuses several controllers which 2680 * perform PIO using DMA such as Intel AHCIs and sil3124/32. 2681 * These controllers use actual number of transferred bytes to 2682 * update DMA pointer and transfer of 4n+2 bytes make those 2683 * controller push DMA pointer by 4n+4 bytes because SATA data 2684 * FISes are aligned to 4 bytes. This causes data corruption 2685 * and buffer overrun. 2686 * 2687 * Always setting nbytes to even number solves this problem 2688 * because then ATAPI devices don't have to split data at 2n 2689 * boundaries. 2690 */ 2691 if (nbytes & 0x1) 2692 nbytes++; 2693 2694 qc->tf.lbam = (nbytes & 0xFF); 2695 qc->tf.lbah = (nbytes >> 8); 2696 2697 if (nodata) 2698 qc->tf.protocol = ATAPI_PROT_NODATA; 2699 else if (using_pio) 2700 qc->tf.protocol = ATAPI_PROT_PIO; 2701 else { 2702 /* DMA data xfer */ 2703 qc->tf.protocol = ATAPI_PROT_DMA; 2704 qc->tf.feature |= ATAPI_PKT_DMA; 2705 2706 if ((dev->flags & ATA_DFLAG_DMADIR) && 2707 (scmd->sc_data_direction != DMA_TO_DEVICE)) 2708 /* some SATA bridges need us to indicate data xfer direction */ 2709 qc->tf.feature |= ATAPI_DMADIR; 2710 } 2711 2712 2713 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE 2714 as ATAPI tape drives don't get this right otherwise */ 2715 return 0; 2716 } 2717 2718 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 2719 { 2720 if (!sata_pmp_attached(ap)) { 2721 if (likely(devno >= 0 && 2722 devno < ata_link_max_devices(&ap->link))) 2723 return &ap->link.device[devno]; 2724 } else { 2725 if (likely(devno >= 0 && 2726 devno < ap->nr_pmp_links)) 2727 return &ap->pmp_link[devno].device[0]; 2728 } 2729 2730 return NULL; 2731 } 2732 2733 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2734 const struct scsi_device *scsidev) 2735 { 2736 int devno; 2737 2738 /* skip commands not addressed to targets we simulate */ 2739 if (!sata_pmp_attached(ap)) { 2740 if (unlikely(scsidev->channel || scsidev->lun)) 2741 return NULL; 2742 devno = scsidev->id; 2743 } else { 2744 if (unlikely(scsidev->id || scsidev->lun)) 2745 return NULL; 2746 devno = scsidev->channel; 2747 } 2748 2749 return ata_find_dev(ap, devno); 2750 } 2751 2752 /** 2753 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2754 * @ap: ATA port to which the device is attached 2755 * @scsidev: SCSI device from which we derive the ATA device 2756 * 2757 * Given various information provided in struct scsi_cmnd, 2758 * map that onto an ATA bus, and using that mapping 2759 * determine which ata_device is associated with the 2760 * SCSI command to be sent. 2761 * 2762 * LOCKING: 2763 * spin_lock_irqsave(host lock) 2764 * 2765 * RETURNS: 2766 * Associated ATA device, or %NULL if not found. 2767 */ 2768 struct ata_device * 2769 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2770 { 2771 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2772 2773 if (unlikely(!dev || !ata_dev_enabled(dev))) 2774 return NULL; 2775 2776 return dev; 2777 } 2778 2779 /* 2780 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value. 2781 * @byte1: Byte 1 from pass-thru CDB. 2782 * 2783 * RETURNS: 2784 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise. 2785 */ 2786 static u8 2787 ata_scsi_map_proto(u8 byte1) 2788 { 2789 switch((byte1 & 0x1e) >> 1) { 2790 case 3: /* Non-data */ 2791 return ATA_PROT_NODATA; 2792 2793 case 6: /* DMA */ 2794 case 10: /* UDMA Data-in */ 2795 case 11: /* UDMA Data-Out */ 2796 return ATA_PROT_DMA; 2797 2798 case 4: /* PIO Data-in */ 2799 case 5: /* PIO Data-out */ 2800 return ATA_PROT_PIO; 2801 2802 case 12: /* FPDMA */ 2803 return ATA_PROT_NCQ; 2804 2805 case 0: /* Hard Reset */ 2806 case 1: /* SRST */ 2807 case 8: /* Device Diagnostic */ 2808 case 9: /* Device Reset */ 2809 case 7: /* DMA Queued */ 2810 case 15: /* Return Response Info */ 2811 default: /* Reserved */ 2812 break; 2813 } 2814 2815 return ATA_PROT_UNKNOWN; 2816 } 2817 2818 /** 2819 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile 2820 * @qc: command structure to be initialized 2821 * 2822 * Handles either 12, 16, or 32-byte versions of the CDB. 2823 * 2824 * RETURNS: 2825 * Zero on success, non-zero on failure. 2826 */ 2827 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) 2828 { 2829 struct ata_taskfile *tf = &(qc->tf); 2830 struct scsi_cmnd *scmd = qc->scsicmd; 2831 struct ata_device *dev = qc->dev; 2832 const u8 *cdb = scmd->cmnd; 2833 u16 fp; 2834 u16 cdb_offset = 0; 2835 2836 /* 7Fh variable length cmd means a ata pass-thru(32) */ 2837 if (cdb[0] == VARIABLE_LENGTH_CMD) 2838 cdb_offset = 9; 2839 2840 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); 2841 if (tf->protocol == ATA_PROT_UNKNOWN) { 2842 fp = 1; 2843 goto invalid_fld; 2844 } 2845 2846 if ((cdb[2 + cdb_offset] & 0x3) == 0) { 2847 /* 2848 * When T_LENGTH is zero (No data is transferred), dir should 2849 * be DMA_NONE. 2850 */ 2851 if (scmd->sc_data_direction != DMA_NONE) { 2852 fp = 2 + cdb_offset; 2853 goto invalid_fld; 2854 } 2855 2856 if (ata_is_ncq(tf->protocol)) 2857 tf->protocol = ATA_PROT_NCQ_NODATA; 2858 } 2859 2860 /* enable LBA */ 2861 tf->flags |= ATA_TFLAG_LBA; 2862 2863 /* 2864 * 12 and 16 byte CDBs use different offsets to 2865 * provide the various register values. 2866 */ 2867 switch (cdb[0]) { 2868 case ATA_16: 2869 /* 2870 * 16-byte CDB - may contain extended commands. 2871 * 2872 * If that is the case, copy the upper byte register values. 2873 */ 2874 if (cdb[1] & 0x01) { 2875 tf->hob_feature = cdb[3]; 2876 tf->hob_nsect = cdb[5]; 2877 tf->hob_lbal = cdb[7]; 2878 tf->hob_lbam = cdb[9]; 2879 tf->hob_lbah = cdb[11]; 2880 tf->flags |= ATA_TFLAG_LBA48; 2881 } else 2882 tf->flags &= ~ATA_TFLAG_LBA48; 2883 2884 /* 2885 * Always copy low byte, device and command registers. 2886 */ 2887 tf->feature = cdb[4]; 2888 tf->nsect = cdb[6]; 2889 tf->lbal = cdb[8]; 2890 tf->lbam = cdb[10]; 2891 tf->lbah = cdb[12]; 2892 tf->device = cdb[13]; 2893 tf->command = cdb[14]; 2894 break; 2895 case ATA_12: 2896 /* 2897 * 12-byte CDB - incapable of extended commands. 2898 */ 2899 tf->flags &= ~ATA_TFLAG_LBA48; 2900 2901 tf->feature = cdb[3]; 2902 tf->nsect = cdb[4]; 2903 tf->lbal = cdb[5]; 2904 tf->lbam = cdb[6]; 2905 tf->lbah = cdb[7]; 2906 tf->device = cdb[8]; 2907 tf->command = cdb[9]; 2908 break; 2909 default: 2910 /* 2911 * 32-byte CDB - may contain extended command fields. 2912 * 2913 * If that is the case, copy the upper byte register values. 2914 */ 2915 if (cdb[10] & 0x01) { 2916 tf->hob_feature = cdb[20]; 2917 tf->hob_nsect = cdb[22]; 2918 tf->hob_lbal = cdb[16]; 2919 tf->hob_lbam = cdb[15]; 2920 tf->hob_lbah = cdb[14]; 2921 tf->flags |= ATA_TFLAG_LBA48; 2922 } else 2923 tf->flags &= ~ATA_TFLAG_LBA48; 2924 2925 tf->feature = cdb[21]; 2926 tf->nsect = cdb[23]; 2927 tf->lbal = cdb[19]; 2928 tf->lbam = cdb[18]; 2929 tf->lbah = cdb[17]; 2930 tf->device = cdb[24]; 2931 tf->command = cdb[25]; 2932 tf->auxiliary = get_unaligned_be32(&cdb[28]); 2933 break; 2934 } 2935 2936 /* For NCQ commands copy the tag value */ 2937 if (ata_is_ncq(tf->protocol)) 2938 tf->nsect = qc->hw_tag << 3; 2939 2940 /* enforce correct master/slave bit */ 2941 tf->device = dev->devno ? 2942 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2943 2944 switch (tf->command) { 2945 /* READ/WRITE LONG use a non-standard sect_size */ 2946 case ATA_CMD_READ_LONG: 2947 case ATA_CMD_READ_LONG_ONCE: 2948 case ATA_CMD_WRITE_LONG: 2949 case ATA_CMD_WRITE_LONG_ONCE: 2950 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { 2951 fp = 1; 2952 goto invalid_fld; 2953 } 2954 qc->sect_size = scsi_bufflen(scmd); 2955 break; 2956 2957 /* commands using reported Logical Block size (e.g. 512 or 4K) */ 2958 case ATA_CMD_CFA_WRITE_NE: 2959 case ATA_CMD_CFA_TRANS_SECT: 2960 case ATA_CMD_CFA_WRITE_MULT_NE: 2961 /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */ 2962 case ATA_CMD_READ: 2963 case ATA_CMD_READ_EXT: 2964 case ATA_CMD_READ_QUEUED: 2965 /* XXX: case ATA_CMD_READ_QUEUED_EXT: */ 2966 case ATA_CMD_FPDMA_READ: 2967 case ATA_CMD_READ_MULTI: 2968 case ATA_CMD_READ_MULTI_EXT: 2969 case ATA_CMD_PIO_READ: 2970 case ATA_CMD_PIO_READ_EXT: 2971 case ATA_CMD_READ_STREAM_DMA_EXT: 2972 case ATA_CMD_READ_STREAM_EXT: 2973 case ATA_CMD_VERIFY: 2974 case ATA_CMD_VERIFY_EXT: 2975 case ATA_CMD_WRITE: 2976 case ATA_CMD_WRITE_EXT: 2977 case ATA_CMD_WRITE_FUA_EXT: 2978 case ATA_CMD_WRITE_QUEUED: 2979 case ATA_CMD_WRITE_QUEUED_FUA_EXT: 2980 case ATA_CMD_FPDMA_WRITE: 2981 case ATA_CMD_WRITE_MULTI: 2982 case ATA_CMD_WRITE_MULTI_EXT: 2983 case ATA_CMD_WRITE_MULTI_FUA_EXT: 2984 case ATA_CMD_PIO_WRITE: 2985 case ATA_CMD_PIO_WRITE_EXT: 2986 case ATA_CMD_WRITE_STREAM_DMA_EXT: 2987 case ATA_CMD_WRITE_STREAM_EXT: 2988 qc->sect_size = scmd->device->sector_size; 2989 break; 2990 2991 /* Everything else uses 512 byte "sectors" */ 2992 default: 2993 qc->sect_size = ATA_SECT_SIZE; 2994 } 2995 2996 /* 2997 * Set flags so that all registers will be written, pass on 2998 * write indication (used for PIO/DMA setup), result TF is 2999 * copied back and we don't whine too much about its failure. 3000 */ 3001 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3002 if (scmd->sc_data_direction == DMA_TO_DEVICE) 3003 tf->flags |= ATA_TFLAG_WRITE; 3004 3005 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; 3006 3007 /* 3008 * Set transfer length. 3009 * 3010 * TODO: find out if we need to do more here to 3011 * cover scatter/gather case. 3012 */ 3013 ata_qc_set_pc_nbytes(qc); 3014 3015 /* We may not issue DMA commands if no DMA mode is set */ 3016 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { 3017 fp = 1; 3018 goto invalid_fld; 3019 } 3020 3021 /* We may not issue NCQ commands to devices not supporting NCQ */ 3022 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { 3023 fp = 1; 3024 goto invalid_fld; 3025 } 3026 3027 /* sanity check for pio multi commands */ 3028 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { 3029 fp = 1; 3030 goto invalid_fld; 3031 } 3032 3033 if (is_multi_taskfile(tf)) { 3034 unsigned int multi_count = 1 << (cdb[1] >> 5); 3035 3036 /* compare the passed through multi_count 3037 * with the cached multi_count of libata 3038 */ 3039 if (multi_count != dev->multi_count) 3040 ata_dev_warn(dev, "invalid multi_count %u ignored\n", 3041 multi_count); 3042 } 3043 3044 /* 3045 * Filter SET_FEATURES - XFER MODE command -- otherwise, 3046 * SET_FEATURES - XFER MODE must be preceded/succeeded 3047 * by an update to hardware-specific registers for each 3048 * controller (i.e. the reason for ->set_piomode(), 3049 * ->set_dmamode(), and ->post_set_mode() hooks). 3050 */ 3051 if (tf->command == ATA_CMD_SET_FEATURES && 3052 tf->feature == SETFEATURES_XFER) { 3053 fp = (cdb[0] == ATA_16) ? 4 : 3; 3054 goto invalid_fld; 3055 } 3056 3057 /* 3058 * Filter TPM commands by default. These provide an 3059 * essentially uncontrolled encrypted "back door" between 3060 * applications and the disk. Set libata.allow_tpm=1 if you 3061 * have a real reason for wanting to use them. This ensures 3062 * that installed software cannot easily mess stuff up without 3063 * user intent. DVR type users will probably ship with this enabled 3064 * for movie content management. 3065 * 3066 * Note that for ATA8 we can issue a DCS change and DCS freeze lock 3067 * for this and should do in future but that it is not sufficient as 3068 * DCS is an optional feature set. Thus we also do the software filter 3069 * so that we comply with the TC consortium stated goal that the user 3070 * can turn off TC features of their system. 3071 */ 3072 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { 3073 fp = (cdb[0] == ATA_16) ? 14 : 9; 3074 goto invalid_fld; 3075 } 3076 3077 return 0; 3078 3079 invalid_fld: 3080 ata_scsi_set_invalid_field(dev, scmd, fp, 0xff); 3081 return 1; 3082 } 3083 3084 /** 3085 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim 3086 * @cmd: SCSI command being translated 3087 * @trmax: Maximum number of entries that will fit in sector_size bytes. 3088 * @sector: Starting sector 3089 * @count: Total Range of request in logical sectors 3090 * 3091 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted 3092 * descriptor. 3093 * 3094 * Upto 64 entries of the format: 3095 * 63:48 Range Length 3096 * 47:0 LBA 3097 * 3098 * Range Length of 0 is ignored. 3099 * LBA's should be sorted order and not overlap. 3100 * 3101 * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET 3102 * 3103 * Return: Number of bytes copied into sglist. 3104 */ 3105 static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax, 3106 u64 sector, u32 count) 3107 { 3108 struct scsi_device *sdp = cmd->device; 3109 size_t len = sdp->sector_size; 3110 size_t r; 3111 __le64 *buf; 3112 u32 i = 0; 3113 unsigned long flags; 3114 3115 WARN_ON(len > ATA_SCSI_RBUF_SIZE); 3116 3117 if (len > ATA_SCSI_RBUF_SIZE) 3118 len = ATA_SCSI_RBUF_SIZE; 3119 3120 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); 3121 buf = ((void *)ata_scsi_rbuf); 3122 memset(buf, 0, len); 3123 while (i < trmax) { 3124 u64 entry = sector | 3125 ((u64)(count > 0xffff ? 0xffff : count) << 48); 3126 buf[i++] = __cpu_to_le64(entry); 3127 if (count <= 0xffff) 3128 break; 3129 count -= 0xffff; 3130 sector += 0xffff; 3131 } 3132 r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); 3133 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); 3134 3135 return r; 3136 } 3137 3138 /** 3139 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same 3140 * @qc: Command to be translated 3141 * 3142 * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or 3143 * an SCT Write Same command. 3144 * Based on WRITE SAME has the UNMAP flag: 3145 * 3146 * - When set translate to DSM TRIM 3147 * - When clear translate to SCT Write Same 3148 */ 3149 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) 3150 { 3151 struct ata_taskfile *tf = &qc->tf; 3152 struct scsi_cmnd *scmd = qc->scsicmd; 3153 struct scsi_device *sdp = scmd->device; 3154 size_t len = sdp->sector_size; 3155 struct ata_device *dev = qc->dev; 3156 const u8 *cdb = scmd->cmnd; 3157 u64 block; 3158 u32 n_block; 3159 const u32 trmax = len >> 3; 3160 u32 size; 3161 u16 fp; 3162 u8 bp = 0xff; 3163 u8 unmap = cdb[1] & 0x8; 3164 3165 /* we may not issue DMA commands if no DMA mode is set */ 3166 if (unlikely(!ata_dma_enabled(dev))) 3167 goto invalid_opcode; 3168 3169 /* 3170 * We only allow sending this command through the block layer, 3171 * as it modifies the DATA OUT buffer, which would corrupt user 3172 * memory for SG_IO commands. 3173 */ 3174 if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))) 3175 goto invalid_opcode; 3176 3177 if (unlikely(scmd->cmd_len < 16)) { 3178 fp = 15; 3179 goto invalid_fld; 3180 } 3181 scsi_16_lba_len(cdb, &block, &n_block); 3182 3183 if (!unmap || 3184 (dev->horkage & ATA_HORKAGE_NOTRIM) || 3185 !ata_id_has_trim(dev->id)) { 3186 fp = 1; 3187 bp = 3; 3188 goto invalid_fld; 3189 } 3190 /* If the request is too large the cmd is invalid */ 3191 if (n_block > 0xffff * trmax) { 3192 fp = 2; 3193 goto invalid_fld; 3194 } 3195 3196 /* 3197 * WRITE SAME always has a sector sized buffer as payload, this 3198 * should never be a multiple entry S/G list. 3199 */ 3200 if (!scsi_sg_count(scmd)) 3201 goto invalid_param_len; 3202 3203 /* 3204 * size must match sector size in bytes 3205 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) 3206 * is defined as number of 512 byte blocks to be transferred. 3207 */ 3208 3209 size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); 3210 if (size != len) 3211 goto invalid_param_len; 3212 3213 if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { 3214 /* Newer devices support queued TRIM commands */ 3215 tf->protocol = ATA_PROT_NCQ; 3216 tf->command = ATA_CMD_FPDMA_SEND; 3217 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; 3218 tf->nsect = qc->hw_tag << 3; 3219 tf->hob_feature = (size / 512) >> 8; 3220 tf->feature = size / 512; 3221 3222 tf->auxiliary = 1; 3223 } else { 3224 tf->protocol = ATA_PROT_DMA; 3225 tf->hob_feature = 0; 3226 tf->feature = ATA_DSM_TRIM; 3227 tf->hob_nsect = (size / 512) >> 8; 3228 tf->nsect = size / 512; 3229 tf->command = ATA_CMD_DSM; 3230 } 3231 3232 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | 3233 ATA_TFLAG_WRITE; 3234 3235 ata_qc_set_pc_nbytes(qc); 3236 3237 return 0; 3238 3239 invalid_fld: 3240 ata_scsi_set_invalid_field(dev, scmd, fp, bp); 3241 return 1; 3242 invalid_param_len: 3243 /* "Parameter list length error" */ 3244 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3245 return 1; 3246 invalid_opcode: 3247 /* "Invalid command operation code" */ 3248 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0); 3249 return 1; 3250 } 3251 3252 /** 3253 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN 3254 * @args: device MAINTENANCE_IN data / SCSI command of interest. 3255 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 3256 * 3257 * Yields a subset to satisfy scsi_report_opcode() 3258 * 3259 * LOCKING: 3260 * spin_lock_irqsave(host lock) 3261 */ 3262 static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) 3263 { 3264 struct ata_device *dev = args->dev; 3265 u8 *cdb = args->cmd->cmnd; 3266 u8 supported = 0; 3267 unsigned int err = 0; 3268 3269 if (cdb[2] != 1) { 3270 ata_dev_warn(dev, "invalid command format %d\n", cdb[2]); 3271 err = 2; 3272 goto out; 3273 } 3274 switch (cdb[3]) { 3275 case INQUIRY: 3276 case MODE_SENSE: 3277 case MODE_SENSE_10: 3278 case READ_CAPACITY: 3279 case SERVICE_ACTION_IN_16: 3280 case REPORT_LUNS: 3281 case REQUEST_SENSE: 3282 case SYNCHRONIZE_CACHE: 3283 case SYNCHRONIZE_CACHE_16: 3284 case REZERO_UNIT: 3285 case SEEK_6: 3286 case SEEK_10: 3287 case TEST_UNIT_READY: 3288 case SEND_DIAGNOSTIC: 3289 case MAINTENANCE_IN: 3290 case READ_6: 3291 case READ_10: 3292 case READ_16: 3293 case WRITE_6: 3294 case WRITE_10: 3295 case WRITE_16: 3296 case ATA_12: 3297 case ATA_16: 3298 case VERIFY: 3299 case VERIFY_16: 3300 case MODE_SELECT: 3301 case MODE_SELECT_10: 3302 case START_STOP: 3303 supported = 3; 3304 break; 3305 case ZBC_IN: 3306 case ZBC_OUT: 3307 if (ata_id_zoned_cap(dev->id) || 3308 dev->class == ATA_DEV_ZAC) 3309 supported = 3; 3310 break; 3311 case SECURITY_PROTOCOL_IN: 3312 case SECURITY_PROTOCOL_OUT: 3313 if (dev->flags & ATA_DFLAG_TRUSTED) 3314 supported = 3; 3315 break; 3316 default: 3317 break; 3318 } 3319 out: 3320 rbuf[1] = supported; /* supported */ 3321 return err; 3322 } 3323 3324 /** 3325 * ata_scsi_report_zones_complete - convert ATA output 3326 * @qc: command structure returning the data 3327 * 3328 * Convert T-13 little-endian field representation into 3329 * T-10 big-endian field representation. 3330 * What a mess. 3331 */ 3332 static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc) 3333 { 3334 struct scsi_cmnd *scmd = qc->scsicmd; 3335 struct sg_mapping_iter miter; 3336 unsigned long flags; 3337 unsigned int bytes = 0; 3338 3339 sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), 3340 SG_MITER_TO_SG | SG_MITER_ATOMIC); 3341 3342 local_irq_save(flags); 3343 while (sg_miter_next(&miter)) { 3344 unsigned int offset = 0; 3345 3346 if (bytes == 0) { 3347 char *hdr; 3348 u32 list_length; 3349 u64 max_lba, opt_lba; 3350 u16 same; 3351 3352 /* Swizzle header */ 3353 hdr = miter.addr; 3354 list_length = get_unaligned_le32(&hdr[0]); 3355 same = get_unaligned_le16(&hdr[4]); 3356 max_lba = get_unaligned_le64(&hdr[8]); 3357 opt_lba = get_unaligned_le64(&hdr[16]); 3358 put_unaligned_be32(list_length, &hdr[0]); 3359 hdr[4] = same & 0xf; 3360 put_unaligned_be64(max_lba, &hdr[8]); 3361 put_unaligned_be64(opt_lba, &hdr[16]); 3362 offset += 64; 3363 bytes += 64; 3364 } 3365 while (offset < miter.length) { 3366 char *rec; 3367 u8 cond, type, non_seq, reset; 3368 u64 size, start, wp; 3369 3370 /* Swizzle zone descriptor */ 3371 rec = miter.addr + offset; 3372 type = rec[0] & 0xf; 3373 cond = (rec[1] >> 4) & 0xf; 3374 non_seq = (rec[1] & 2); 3375 reset = (rec[1] & 1); 3376 size = get_unaligned_le64(&rec[8]); 3377 start = get_unaligned_le64(&rec[16]); 3378 wp = get_unaligned_le64(&rec[24]); 3379 rec[0] = type; 3380 rec[1] = (cond << 4) | non_seq | reset; 3381 put_unaligned_be64(size, &rec[8]); 3382 put_unaligned_be64(start, &rec[16]); 3383 put_unaligned_be64(wp, &rec[24]); 3384 WARN_ON(offset + 64 > miter.length); 3385 offset += 64; 3386 bytes += 64; 3387 } 3388 } 3389 sg_miter_stop(&miter); 3390 local_irq_restore(flags); 3391 3392 ata_scsi_qc_complete(qc); 3393 } 3394 3395 static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) 3396 { 3397 struct ata_taskfile *tf = &qc->tf; 3398 struct scsi_cmnd *scmd = qc->scsicmd; 3399 const u8 *cdb = scmd->cmnd; 3400 u16 sect, fp = (u16)-1; 3401 u8 sa, options, bp = 0xff; 3402 u64 block; 3403 u32 n_block; 3404 3405 if (unlikely(scmd->cmd_len < 16)) { 3406 ata_dev_warn(qc->dev, "invalid cdb length %d\n", 3407 scmd->cmd_len); 3408 fp = 15; 3409 goto invalid_fld; 3410 } 3411 scsi_16_lba_len(cdb, &block, &n_block); 3412 if (n_block != scsi_bufflen(scmd)) { 3413 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", 3414 n_block, scsi_bufflen(scmd)); 3415 goto invalid_param_len; 3416 } 3417 sa = cdb[1] & 0x1f; 3418 if (sa != ZI_REPORT_ZONES) { 3419 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); 3420 fp = 1; 3421 goto invalid_fld; 3422 } 3423 /* 3424 * ZAC allows only for transfers in 512 byte blocks, 3425 * and uses a 16 bit value for the transfer count. 3426 */ 3427 if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) { 3428 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); 3429 goto invalid_param_len; 3430 } 3431 sect = n_block / 512; 3432 options = cdb[14] & 0xbf; 3433 3434 if (ata_ncq_enabled(qc->dev) && 3435 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { 3436 tf->protocol = ATA_PROT_NCQ; 3437 tf->command = ATA_CMD_FPDMA_RECV; 3438 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; 3439 tf->nsect = qc->hw_tag << 3; 3440 tf->feature = sect & 0xff; 3441 tf->hob_feature = (sect >> 8) & 0xff; 3442 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); 3443 } else { 3444 tf->command = ATA_CMD_ZAC_MGMT_IN; 3445 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; 3446 tf->protocol = ATA_PROT_DMA; 3447 tf->hob_feature = options; 3448 tf->hob_nsect = (sect >> 8) & 0xff; 3449 tf->nsect = sect & 0xff; 3450 } 3451 tf->device = ATA_LBA; 3452 tf->lbah = (block >> 16) & 0xff; 3453 tf->lbam = (block >> 8) & 0xff; 3454 tf->lbal = block & 0xff; 3455 tf->hob_lbah = (block >> 40) & 0xff; 3456 tf->hob_lbam = (block >> 32) & 0xff; 3457 tf->hob_lbal = (block >> 24) & 0xff; 3458 3459 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; 3460 qc->flags |= ATA_QCFLAG_RESULT_TF; 3461 3462 ata_qc_set_pc_nbytes(qc); 3463 3464 qc->complete_fn = ata_scsi_report_zones_complete; 3465 3466 return 0; 3467 3468 invalid_fld: 3469 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 3470 return 1; 3471 3472 invalid_param_len: 3473 /* "Parameter list length error" */ 3474 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3475 return 1; 3476 } 3477 3478 static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) 3479 { 3480 struct ata_taskfile *tf = &qc->tf; 3481 struct scsi_cmnd *scmd = qc->scsicmd; 3482 struct ata_device *dev = qc->dev; 3483 const u8 *cdb = scmd->cmnd; 3484 u8 all, sa; 3485 u64 block; 3486 u32 n_block; 3487 u16 fp = (u16)-1; 3488 3489 if (unlikely(scmd->cmd_len < 16)) { 3490 fp = 15; 3491 goto invalid_fld; 3492 } 3493 3494 sa = cdb[1] & 0x1f; 3495 if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) && 3496 (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) { 3497 fp = 1; 3498 goto invalid_fld; 3499 } 3500 3501 scsi_16_lba_len(cdb, &block, &n_block); 3502 if (n_block) { 3503 /* 3504 * ZAC MANAGEMENT OUT doesn't define any length 3505 */ 3506 goto invalid_param_len; 3507 } 3508 3509 all = cdb[14] & 0x1; 3510 if (all) { 3511 /* 3512 * Ignore the block address (zone ID) as defined by ZBC. 3513 */ 3514 block = 0; 3515 } else if (block >= dev->n_sectors) { 3516 /* 3517 * Block must be a valid zone ID (a zone start LBA). 3518 */ 3519 fp = 2; 3520 goto invalid_fld; 3521 } 3522 3523 if (ata_ncq_enabled(qc->dev) && 3524 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { 3525 tf->protocol = ATA_PROT_NCQ_NODATA; 3526 tf->command = ATA_CMD_NCQ_NON_DATA; 3527 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; 3528 tf->nsect = qc->hw_tag << 3; 3529 tf->auxiliary = sa | ((u16)all << 8); 3530 } else { 3531 tf->protocol = ATA_PROT_NODATA; 3532 tf->command = ATA_CMD_ZAC_MGMT_OUT; 3533 tf->feature = sa; 3534 tf->hob_feature = all; 3535 } 3536 tf->lbah = (block >> 16) & 0xff; 3537 tf->lbam = (block >> 8) & 0xff; 3538 tf->lbal = block & 0xff; 3539 tf->hob_lbah = (block >> 40) & 0xff; 3540 tf->hob_lbam = (block >> 32) & 0xff; 3541 tf->hob_lbal = (block >> 24) & 0xff; 3542 tf->device = ATA_LBA; 3543 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; 3544 3545 return 0; 3546 3547 invalid_fld: 3548 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 3549 return 1; 3550 invalid_param_len: 3551 /* "Parameter list length error" */ 3552 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3553 return 1; 3554 } 3555 3556 /** 3557 * ata_mselect_caching - Simulate MODE SELECT for caching info page 3558 * @qc: Storage for translated ATA taskfile 3559 * @buf: input buffer 3560 * @len: number of valid bytes in the input buffer 3561 * @fp: out parameter for the failed field on error 3562 * 3563 * Prepare a taskfile to modify caching information for the device. 3564 * 3565 * LOCKING: 3566 * None. 3567 */ 3568 static int ata_mselect_caching(struct ata_queued_cmd *qc, 3569 const u8 *buf, int len, u16 *fp) 3570 { 3571 struct ata_taskfile *tf = &qc->tf; 3572 struct ata_device *dev = qc->dev; 3573 u8 mpage[CACHE_MPAGE_LEN]; 3574 u8 wce; 3575 int i; 3576 3577 /* 3578 * The first two bytes of def_cache_mpage are a header, so offsets 3579 * in mpage are off by 2 compared to buf. Same for len. 3580 */ 3581 3582 if (len != CACHE_MPAGE_LEN - 2) { 3583 *fp = min(len, CACHE_MPAGE_LEN - 2); 3584 return -EINVAL; 3585 } 3586 3587 wce = buf[0] & (1 << 2); 3588 3589 /* 3590 * Check that read-only bits are not modified. 3591 */ 3592 ata_msense_caching(dev->id, mpage, false); 3593 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { 3594 if (i == 0) 3595 continue; 3596 if (mpage[i + 2] != buf[i]) { 3597 *fp = i; 3598 return -EINVAL; 3599 } 3600 } 3601 3602 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3603 tf->protocol = ATA_PROT_NODATA; 3604 tf->nsect = 0; 3605 tf->command = ATA_CMD_SET_FEATURES; 3606 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; 3607 return 0; 3608 } 3609 3610 /** 3611 * ata_mselect_control - Simulate MODE SELECT for control page 3612 * @qc: Storage for translated ATA taskfile 3613 * @buf: input buffer 3614 * @len: number of valid bytes in the input buffer 3615 * @fp: out parameter for the failed field on error 3616 * 3617 * Prepare a taskfile to modify caching information for the device. 3618 * 3619 * LOCKING: 3620 * None. 3621 */ 3622 static int ata_mselect_control(struct ata_queued_cmd *qc, 3623 const u8 *buf, int len, u16 *fp) 3624 { 3625 struct ata_device *dev = qc->dev; 3626 u8 mpage[CONTROL_MPAGE_LEN]; 3627 u8 d_sense; 3628 int i; 3629 3630 /* 3631 * The first two bytes of def_control_mpage are a header, so offsets 3632 * in mpage are off by 2 compared to buf. Same for len. 3633 */ 3634 3635 if (len != CONTROL_MPAGE_LEN - 2) { 3636 *fp = min(len, CONTROL_MPAGE_LEN - 2); 3637 return -EINVAL; 3638 } 3639 3640 d_sense = buf[0] & (1 << 2); 3641 3642 /* 3643 * Check that read-only bits are not modified. 3644 */ 3645 ata_msense_control(dev, mpage, false); 3646 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { 3647 if (i == 0) 3648 continue; 3649 if (mpage[2 + i] != buf[i]) { 3650 *fp = i; 3651 return -EINVAL; 3652 } 3653 } 3654 if (d_sense & (1 << 2)) 3655 dev->flags |= ATA_DFLAG_D_SENSE; 3656 else 3657 dev->flags &= ~ATA_DFLAG_D_SENSE; 3658 return 0; 3659 } 3660 3661 /** 3662 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands 3663 * @qc: Storage for translated ATA taskfile 3664 * 3665 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile. 3666 * Assume this is invoked for direct access devices (e.g. disks) only. 3667 * There should be no block descriptor for other device types. 3668 * 3669 * LOCKING: 3670 * spin_lock_irqsave(host lock) 3671 */ 3672 static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) 3673 { 3674 struct scsi_cmnd *scmd = qc->scsicmd; 3675 const u8 *cdb = scmd->cmnd; 3676 u8 pg, spg; 3677 unsigned six_byte, pg_len, hdr_len, bd_len; 3678 int len; 3679 u16 fp = (u16)-1; 3680 u8 bp = 0xff; 3681 u8 buffer[64]; 3682 const u8 *p = buffer; 3683 3684 six_byte = (cdb[0] == MODE_SELECT); 3685 if (six_byte) { 3686 if (scmd->cmd_len < 5) { 3687 fp = 4; 3688 goto invalid_fld; 3689 } 3690 3691 len = cdb[4]; 3692 hdr_len = 4; 3693 } else { 3694 if (scmd->cmd_len < 9) { 3695 fp = 8; 3696 goto invalid_fld; 3697 } 3698 3699 len = get_unaligned_be16(&cdb[7]); 3700 hdr_len = 8; 3701 } 3702 3703 /* We only support PF=1, SP=0. */ 3704 if ((cdb[1] & 0x11) != 0x10) { 3705 fp = 1; 3706 bp = (cdb[1] & 0x01) ? 1 : 5; 3707 goto invalid_fld; 3708 } 3709 3710 /* Test early for possible overrun. */ 3711 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) 3712 goto invalid_param_len; 3713 3714 /* Move past header and block descriptors. */ 3715 if (len < hdr_len) 3716 goto invalid_param_len; 3717 3718 if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), 3719 buffer, sizeof(buffer))) 3720 goto invalid_param_len; 3721 3722 if (six_byte) 3723 bd_len = p[3]; 3724 else 3725 bd_len = get_unaligned_be16(&p[6]); 3726 3727 len -= hdr_len; 3728 p += hdr_len; 3729 if (len < bd_len) 3730 goto invalid_param_len; 3731 if (bd_len != 0 && bd_len != 8) { 3732 fp = (six_byte) ? 3 : 6; 3733 fp += bd_len + hdr_len; 3734 goto invalid_param; 3735 } 3736 3737 len -= bd_len; 3738 p += bd_len; 3739 if (len == 0) 3740 goto skip; 3741 3742 /* Parse both possible formats for the mode page headers. */ 3743 pg = p[0] & 0x3f; 3744 if (p[0] & 0x40) { 3745 if (len < 4) 3746 goto invalid_param_len; 3747 3748 spg = p[1]; 3749 pg_len = get_unaligned_be16(&p[2]); 3750 p += 4; 3751 len -= 4; 3752 } else { 3753 if (len < 2) 3754 goto invalid_param_len; 3755 3756 spg = 0; 3757 pg_len = p[1]; 3758 p += 2; 3759 len -= 2; 3760 } 3761 3762 /* 3763 * No mode subpages supported (yet) but asking for _all_ 3764 * subpages may be valid 3765 */ 3766 if (spg && (spg != ALL_SUB_MPAGES)) { 3767 fp = (p[0] & 0x40) ? 1 : 0; 3768 fp += hdr_len + bd_len; 3769 goto invalid_param; 3770 } 3771 if (pg_len > len) 3772 goto invalid_param_len; 3773 3774 switch (pg) { 3775 case CACHE_MPAGE: 3776 if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) { 3777 fp += hdr_len + bd_len; 3778 goto invalid_param; 3779 } 3780 break; 3781 case CONTROL_MPAGE: 3782 if (ata_mselect_control(qc, p, pg_len, &fp) < 0) { 3783 fp += hdr_len + bd_len; 3784 goto invalid_param; 3785 } else { 3786 goto skip; /* No ATA command to send */ 3787 } 3788 break; 3789 default: /* invalid page code */ 3790 fp = bd_len + hdr_len; 3791 goto invalid_param; 3792 } 3793 3794 /* 3795 * Only one page has changeable data, so we only support setting one 3796 * page at a time. 3797 */ 3798 if (len > pg_len) 3799 goto invalid_param; 3800 3801 return 0; 3802 3803 invalid_fld: 3804 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 3805 return 1; 3806 3807 invalid_param: 3808 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); 3809 return 1; 3810 3811 invalid_param_len: 3812 /* "Parameter list length error" */ 3813 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3814 return 1; 3815 3816 skip: 3817 scmd->result = SAM_STAT_GOOD; 3818 return 1; 3819 } 3820 3821 static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma) 3822 { 3823 if (len == 0) 3824 return ATA_CMD_TRUSTED_NONDATA; 3825 else if (send) 3826 return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND; 3827 else 3828 return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV; 3829 } 3830 3831 static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc) 3832 { 3833 struct scsi_cmnd *scmd = qc->scsicmd; 3834 const u8 *cdb = scmd->cmnd; 3835 struct ata_taskfile *tf = &qc->tf; 3836 u8 secp = cdb[1]; 3837 bool send = (cdb[0] == SECURITY_PROTOCOL_OUT); 3838 u16 spsp = get_unaligned_be16(&cdb[2]); 3839 u32 len = get_unaligned_be32(&cdb[6]); 3840 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); 3841 3842 /* 3843 * We don't support the ATA "security" protocol. 3844 */ 3845 if (secp == 0xef) { 3846 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); 3847 return 1; 3848 } 3849 3850 if (cdb[4] & 7) { /* INC_512 */ 3851 if (len > 0xffff) { 3852 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); 3853 return 1; 3854 } 3855 } else { 3856 if (len > 0x01fffe00) { 3857 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); 3858 return 1; 3859 } 3860 3861 /* convert to the sector-based ATA addressing */ 3862 len = (len + 511) / 512; 3863 } 3864 3865 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; 3866 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; 3867 if (send) 3868 tf->flags |= ATA_TFLAG_WRITE; 3869 tf->command = ata_scsi_trusted_op(len, send, dma); 3870 tf->feature = secp; 3871 tf->lbam = spsp & 0xff; 3872 tf->lbah = spsp >> 8; 3873 3874 if (len) { 3875 tf->nsect = len & 0xff; 3876 tf->lbal = len >> 8; 3877 } else { 3878 if (!send) 3879 tf->lbah = (1 << 7); 3880 } 3881 3882 ata_qc_set_pc_nbytes(qc); 3883 return 0; 3884 } 3885 3886 /** 3887 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler 3888 * @qc: Command to be translated 3889 * 3890 * Translate a SCSI variable length CDB to specified commands. 3891 * It checks a service action value in CDB to call corresponding handler. 3892 * 3893 * RETURNS: 3894 * Zero on success, non-zero on failure 3895 * 3896 */ 3897 static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc) 3898 { 3899 struct scsi_cmnd *scmd = qc->scsicmd; 3900 const u8 *cdb = scmd->cmnd; 3901 const u16 sa = get_unaligned_be16(&cdb[8]); 3902 3903 /* 3904 * if service action represents a ata pass-thru(32) command, 3905 * then pass it to ata_scsi_pass_thru handler. 3906 */ 3907 if (sa == ATA_32) 3908 return ata_scsi_pass_thru(qc); 3909 3910 /* unsupported service action */ 3911 return 1; 3912 } 3913 3914 /** 3915 * ata_get_xlat_func - check if SCSI to ATA translation is possible 3916 * @dev: ATA device 3917 * @cmd: SCSI command opcode to consider 3918 * 3919 * Look up the SCSI command given, and determine whether the 3920 * SCSI command is to be translated or simulated. 3921 * 3922 * RETURNS: 3923 * Pointer to translation function if possible, %NULL if not. 3924 */ 3925 3926 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) 3927 { 3928 switch (cmd) { 3929 case READ_6: 3930 case READ_10: 3931 case READ_16: 3932 3933 case WRITE_6: 3934 case WRITE_10: 3935 case WRITE_16: 3936 return ata_scsi_rw_xlat; 3937 3938 case WRITE_SAME_16: 3939 return ata_scsi_write_same_xlat; 3940 3941 case SYNCHRONIZE_CACHE: 3942 case SYNCHRONIZE_CACHE_16: 3943 if (ata_try_flush_cache(dev)) 3944 return ata_scsi_flush_xlat; 3945 break; 3946 3947 case VERIFY: 3948 case VERIFY_16: 3949 return ata_scsi_verify_xlat; 3950 3951 case ATA_12: 3952 case ATA_16: 3953 return ata_scsi_pass_thru; 3954 3955 case VARIABLE_LENGTH_CMD: 3956 return ata_scsi_var_len_cdb_xlat; 3957 3958 case MODE_SELECT: 3959 case MODE_SELECT_10: 3960 return ata_scsi_mode_select_xlat; 3961 3962 case ZBC_IN: 3963 return ata_scsi_zbc_in_xlat; 3964 3965 case ZBC_OUT: 3966 return ata_scsi_zbc_out_xlat; 3967 3968 case SECURITY_PROTOCOL_IN: 3969 case SECURITY_PROTOCOL_OUT: 3970 if (!(dev->flags & ATA_DFLAG_TRUSTED)) 3971 break; 3972 return ata_scsi_security_inout_xlat; 3973 3974 case START_STOP: 3975 return ata_scsi_start_stop_xlat; 3976 } 3977 3978 return NULL; 3979 } 3980 3981 int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) 3982 { 3983 struct ata_port *ap = dev->link->ap; 3984 u8 scsi_op = scmd->cmnd[0]; 3985 ata_xlat_func_t xlat_func; 3986 3987 /* 3988 * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). 3989 * However, this check is done without holding the ap->lock (a libata 3990 * specific lock), so we can have received an error irq since then, 3991 * therefore we must check if EH is pending, while holding ap->lock. 3992 */ 3993 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) 3994 return SCSI_MLQUEUE_DEVICE_BUSY; 3995 3996 if (unlikely(!scmd->cmd_len)) 3997 goto bad_cdb_len; 3998 3999 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 4000 if (unlikely(scmd->cmd_len > dev->cdb_len)) 4001 goto bad_cdb_len; 4002 4003 xlat_func = ata_get_xlat_func(dev, scsi_op); 4004 } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 4005 /* relay SCSI command to ATAPI device */ 4006 int len = COMMAND_SIZE(scsi_op); 4007 4008 if (unlikely(len > scmd->cmd_len || 4009 len > dev->cdb_len || 4010 scmd->cmd_len > ATAPI_CDB_LEN)) 4011 goto bad_cdb_len; 4012 4013 xlat_func = atapi_xlat; 4014 } else { 4015 /* ATA_16 passthru, treat as an ATA command */ 4016 if (unlikely(scmd->cmd_len > 16)) 4017 goto bad_cdb_len; 4018 4019 xlat_func = ata_get_xlat_func(dev, scsi_op); 4020 } 4021 4022 if (xlat_func) 4023 return ata_scsi_translate(dev, scmd, xlat_func); 4024 4025 ata_scsi_simulate(dev, scmd); 4026 4027 return 0; 4028 4029 bad_cdb_len: 4030 scmd->result = DID_ERROR << 16; 4031 scsi_done(scmd); 4032 return 0; 4033 } 4034 4035 /** 4036 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 4037 * @shost: SCSI host of command to be sent 4038 * @cmd: SCSI command to be sent 4039 * 4040 * In some cases, this function translates SCSI commands into 4041 * ATA taskfiles, and queues the taskfiles to be sent to 4042 * hardware. In other cases, this function simulates a 4043 * SCSI device by evaluating and responding to certain 4044 * SCSI commands. This creates the overall effect of 4045 * ATA and ATAPI devices appearing as SCSI devices. 4046 * 4047 * LOCKING: 4048 * ATA host lock 4049 * 4050 * RETURNS: 4051 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 4052 * 0 otherwise. 4053 */ 4054 int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 4055 { 4056 struct ata_port *ap; 4057 struct ata_device *dev; 4058 struct scsi_device *scsidev = cmd->device; 4059 int rc = 0; 4060 unsigned long irq_flags; 4061 4062 ap = ata_shost_to_port(shost); 4063 4064 spin_lock_irqsave(ap->lock, irq_flags); 4065 4066 dev = ata_scsi_find_dev(ap, scsidev); 4067 if (likely(dev)) 4068 rc = __ata_scsi_queuecmd(cmd, dev); 4069 else { 4070 cmd->result = (DID_BAD_TARGET << 16); 4071 scsi_done(cmd); 4072 } 4073 4074 spin_unlock_irqrestore(ap->lock, irq_flags); 4075 4076 return rc; 4077 } 4078 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4079 4080 /** 4081 * ata_scsi_simulate - simulate SCSI command on ATA device 4082 * @dev: the target device 4083 * @cmd: SCSI command being sent to device. 4084 * 4085 * Interprets and directly executes a select list of SCSI commands 4086 * that can be handled internally. 4087 * 4088 * LOCKING: 4089 * spin_lock_irqsave(host lock) 4090 */ 4091 4092 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) 4093 { 4094 struct ata_scsi_args args; 4095 const u8 *scsicmd = cmd->cmnd; 4096 u8 tmp8; 4097 4098 args.dev = dev; 4099 args.id = dev->id; 4100 args.cmd = cmd; 4101 4102 switch(scsicmd[0]) { 4103 case INQUIRY: 4104 if (scsicmd[1] & 2) /* is CmdDt set? */ 4105 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4106 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 4107 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 4108 else switch (scsicmd[2]) { 4109 case 0x00: 4110 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 4111 break; 4112 case 0x80: 4113 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 4114 break; 4115 case 0x83: 4116 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 4117 break; 4118 case 0x89: 4119 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 4120 break; 4121 case 0xb0: 4122 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0); 4123 break; 4124 case 0xb1: 4125 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); 4126 break; 4127 case 0xb2: 4128 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); 4129 break; 4130 case 0xb6: 4131 if (dev->flags & ATA_DFLAG_ZAC) 4132 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); 4133 else 4134 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4135 break; 4136 case 0xb9: 4137 if (dev->cpr_log) 4138 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9); 4139 else 4140 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4141 break; 4142 default: 4143 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4144 break; 4145 } 4146 break; 4147 4148 case MODE_SENSE: 4149 case MODE_SENSE_10: 4150 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 4151 break; 4152 4153 case READ_CAPACITY: 4154 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 4155 break; 4156 4157 case SERVICE_ACTION_IN_16: 4158 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 4159 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 4160 else 4161 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4162 break; 4163 4164 case REPORT_LUNS: 4165 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 4166 break; 4167 4168 case REQUEST_SENSE: 4169 ata_scsi_set_sense(dev, cmd, 0, 0, 0); 4170 break; 4171 4172 /* if we reach this, then writeback caching is disabled, 4173 * turning this into a no-op. 4174 */ 4175 case SYNCHRONIZE_CACHE: 4176 case SYNCHRONIZE_CACHE_16: 4177 fallthrough; 4178 4179 /* no-op's, complete with success */ 4180 case REZERO_UNIT: 4181 case SEEK_6: 4182 case SEEK_10: 4183 case TEST_UNIT_READY: 4184 break; 4185 4186 case SEND_DIAGNOSTIC: 4187 tmp8 = scsicmd[1] & ~(1 << 3); 4188 if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4]) 4189 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4190 break; 4191 4192 case MAINTENANCE_IN: 4193 if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) 4194 ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); 4195 else 4196 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4197 break; 4198 4199 /* all other commands */ 4200 default: 4201 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0); 4202 /* "Invalid command operation code" */ 4203 break; 4204 } 4205 4206 scsi_done(cmd); 4207 } 4208 4209 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) 4210 { 4211 int i, rc; 4212 4213 for (i = 0; i < host->n_ports; i++) { 4214 struct ata_port *ap = host->ports[i]; 4215 struct Scsi_Host *shost; 4216 4217 rc = -ENOMEM; 4218 shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); 4219 if (!shost) 4220 goto err_alloc; 4221 4222 shost->eh_noresume = 1; 4223 *(struct ata_port **)&shost->hostdata[0] = ap; 4224 ap->scsi_host = shost; 4225 4226 shost->transportt = ata_scsi_transport_template; 4227 shost->unique_id = ap->print_id; 4228 shost->max_id = 16; 4229 shost->max_lun = 1; 4230 shost->max_channel = 1; 4231 shost->max_cmd_len = 32; 4232 4233 /* Schedule policy is determined by ->qc_defer() 4234 * callback and it needs to see every deferred qc. 4235 * Set host_blocked to 1 to prevent SCSI midlayer from 4236 * automatically deferring requests. 4237 */ 4238 shost->max_host_blocked = 1; 4239 4240 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); 4241 if (rc) 4242 goto err_alloc; 4243 } 4244 4245 return 0; 4246 4247 err_alloc: 4248 while (--i >= 0) { 4249 struct Scsi_Host *shost = host->ports[i]->scsi_host; 4250 4251 /* scsi_host_put() is in ata_devres_release() */ 4252 scsi_remove_host(shost); 4253 } 4254 return rc; 4255 } 4256 4257 #ifdef CONFIG_OF 4258 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) 4259 { 4260 struct scsi_device *sdev = dev->sdev; 4261 struct device *d = ap->host->dev; 4262 struct device_node *np = d->of_node; 4263 struct device_node *child; 4264 4265 for_each_available_child_of_node(np, child) { 4266 int ret; 4267 u32 val; 4268 4269 ret = of_property_read_u32(child, "reg", &val); 4270 if (ret) 4271 continue; 4272 if (val == dev->devno) { 4273 dev_dbg(d, "found matching device node\n"); 4274 sdev->sdev_gendev.of_node = child; 4275 return; 4276 } 4277 } 4278 } 4279 #else 4280 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) 4281 { 4282 } 4283 #endif 4284 4285 void ata_scsi_scan_host(struct ata_port *ap, int sync) 4286 { 4287 int tries = 5; 4288 struct ata_device *last_failed_dev = NULL; 4289 struct ata_link *link; 4290 struct ata_device *dev; 4291 4292 repeat: 4293 ata_for_each_link(link, ap, EDGE) { 4294 ata_for_each_dev(dev, link, ENABLED) { 4295 struct scsi_device *sdev; 4296 int channel = 0, id = 0; 4297 4298 if (dev->sdev) 4299 continue; 4300 4301 if (ata_is_host_link(link)) 4302 id = dev->devno; 4303 else 4304 channel = link->pmp; 4305 4306 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, 4307 NULL); 4308 if (!IS_ERR(sdev)) { 4309 dev->sdev = sdev; 4310 ata_scsi_assign_ofnode(dev, ap); 4311 scsi_device_put(sdev); 4312 } else { 4313 dev->sdev = NULL; 4314 } 4315 } 4316 } 4317 4318 /* If we scanned while EH was in progress or allocation 4319 * failure occurred, scan would have failed silently. Check 4320 * whether all devices are attached. 4321 */ 4322 ata_for_each_link(link, ap, EDGE) { 4323 ata_for_each_dev(dev, link, ENABLED) { 4324 if (!dev->sdev) 4325 goto exit_loop; 4326 } 4327 } 4328 exit_loop: 4329 if (!link) 4330 return; 4331 4332 /* we're missing some SCSI devices */ 4333 if (sync) { 4334 /* If caller requested synchrnous scan && we've made 4335 * any progress, sleep briefly and repeat. 4336 */ 4337 if (dev != last_failed_dev) { 4338 msleep(100); 4339 last_failed_dev = dev; 4340 goto repeat; 4341 } 4342 4343 /* We might be failing to detect boot device, give it 4344 * a few more chances. 4345 */ 4346 if (--tries) { 4347 msleep(100); 4348 goto repeat; 4349 } 4350 4351 ata_port_err(ap, 4352 "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n"); 4353 } 4354 4355 queue_delayed_work(system_long_wq, &ap->hotplug_task, 4356 round_jiffies_relative(HZ)); 4357 } 4358 4359 /** 4360 * ata_scsi_offline_dev - offline attached SCSI device 4361 * @dev: ATA device to offline attached SCSI device for 4362 * 4363 * This function is called from ata_eh_hotplug() and responsible 4364 * for taking the SCSI device attached to @dev offline. This 4365 * function is called with host lock which protects dev->sdev 4366 * against clearing. 4367 * 4368 * LOCKING: 4369 * spin_lock_irqsave(host lock) 4370 * 4371 * RETURNS: 4372 * 1 if attached SCSI device exists, 0 otherwise. 4373 */ 4374 int ata_scsi_offline_dev(struct ata_device *dev) 4375 { 4376 if (dev->sdev) { 4377 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); 4378 return 1; 4379 } 4380 return 0; 4381 } 4382 4383 /** 4384 * ata_scsi_remove_dev - remove attached SCSI device 4385 * @dev: ATA device to remove attached SCSI device for 4386 * 4387 * This function is called from ata_eh_scsi_hotplug() and 4388 * responsible for removing the SCSI device attached to @dev. 4389 * 4390 * LOCKING: 4391 * Kernel thread context (may sleep). 4392 */ 4393 static void ata_scsi_remove_dev(struct ata_device *dev) 4394 { 4395 struct ata_port *ap = dev->link->ap; 4396 struct scsi_device *sdev; 4397 unsigned long flags; 4398 4399 /* Alas, we need to grab scan_mutex to ensure SCSI device 4400 * state doesn't change underneath us and thus 4401 * scsi_device_get() always succeeds. The mutex locking can 4402 * be removed if there is __scsi_device_get() interface which 4403 * increments reference counts regardless of device state. 4404 */ 4405 mutex_lock(&ap->scsi_host->scan_mutex); 4406 spin_lock_irqsave(ap->lock, flags); 4407 4408 /* clearing dev->sdev is protected by host lock */ 4409 sdev = dev->sdev; 4410 dev->sdev = NULL; 4411 4412 if (sdev) { 4413 /* If user initiated unplug races with us, sdev can go 4414 * away underneath us after the host lock and 4415 * scan_mutex are released. Hold onto it. 4416 */ 4417 if (scsi_device_get(sdev) == 0) { 4418 /* The following ensures the attached sdev is 4419 * offline on return from ata_scsi_offline_dev() 4420 * regardless it wins or loses the race 4421 * against this function. 4422 */ 4423 scsi_device_set_state(sdev, SDEV_OFFLINE); 4424 } else { 4425 WARN_ON(1); 4426 sdev = NULL; 4427 } 4428 } 4429 4430 spin_unlock_irqrestore(ap->lock, flags); 4431 mutex_unlock(&ap->scsi_host->scan_mutex); 4432 4433 if (sdev) { 4434 ata_dev_info(dev, "detaching (SCSI %s)\n", 4435 dev_name(&sdev->sdev_gendev)); 4436 4437 scsi_remove_device(sdev); 4438 scsi_device_put(sdev); 4439 } 4440 } 4441 4442 static void ata_scsi_handle_link_detach(struct ata_link *link) 4443 { 4444 struct ata_port *ap = link->ap; 4445 struct ata_device *dev; 4446 4447 ata_for_each_dev(dev, link, ALL) { 4448 unsigned long flags; 4449 4450 if (!(dev->flags & ATA_DFLAG_DETACHED)) 4451 continue; 4452 4453 spin_lock_irqsave(ap->lock, flags); 4454 dev->flags &= ~ATA_DFLAG_DETACHED; 4455 spin_unlock_irqrestore(ap->lock, flags); 4456 4457 if (zpodd_dev_enabled(dev)) 4458 zpodd_exit(dev); 4459 4460 ata_scsi_remove_dev(dev); 4461 } 4462 } 4463 4464 /** 4465 * ata_scsi_media_change_notify - send media change event 4466 * @dev: Pointer to the disk device with media change event 4467 * 4468 * Tell the block layer to send a media change notification 4469 * event. 4470 * 4471 * LOCKING: 4472 * spin_lock_irqsave(host lock) 4473 */ 4474 void ata_scsi_media_change_notify(struct ata_device *dev) 4475 { 4476 if (dev->sdev) 4477 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, 4478 GFP_ATOMIC); 4479 } 4480 4481 /** 4482 * ata_scsi_hotplug - SCSI part of hotplug 4483 * @work: Pointer to ATA port to perform SCSI hotplug on 4484 * 4485 * Perform SCSI part of hotplug. It's executed from a separate 4486 * workqueue after EH completes. This is necessary because SCSI 4487 * hot plugging requires working EH and hot unplugging is 4488 * synchronized with hot plugging with a mutex. 4489 * 4490 * LOCKING: 4491 * Kernel thread context (may sleep). 4492 */ 4493 void ata_scsi_hotplug(struct work_struct *work) 4494 { 4495 struct ata_port *ap = 4496 container_of(work, struct ata_port, hotplug_task.work); 4497 int i; 4498 4499 if (ap->pflags & ATA_PFLAG_UNLOADING) 4500 return; 4501 4502 mutex_lock(&ap->scsi_scan_mutex); 4503 4504 /* Unplug detached devices. We cannot use link iterator here 4505 * because PMP links have to be scanned even if PMP is 4506 * currently not attached. Iterate manually. 4507 */ 4508 ata_scsi_handle_link_detach(&ap->link); 4509 if (ap->pmp_link) 4510 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 4511 ata_scsi_handle_link_detach(&ap->pmp_link[i]); 4512 4513 /* scan for new ones */ 4514 ata_scsi_scan_host(ap, 0); 4515 4516 mutex_unlock(&ap->scsi_scan_mutex); 4517 } 4518 4519 /** 4520 * ata_scsi_user_scan - indication for user-initiated bus scan 4521 * @shost: SCSI host to scan 4522 * @channel: Channel to scan 4523 * @id: ID to scan 4524 * @lun: LUN to scan 4525 * 4526 * This function is called when user explicitly requests bus 4527 * scan. Set probe pending flag and invoke EH. 4528 * 4529 * LOCKING: 4530 * SCSI layer (we don't care) 4531 * 4532 * RETURNS: 4533 * Zero. 4534 */ 4535 int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 4536 unsigned int id, u64 lun) 4537 { 4538 struct ata_port *ap = ata_shost_to_port(shost); 4539 unsigned long flags; 4540 int devno, rc = 0; 4541 4542 if (!ap->ops->error_handler) 4543 return -EOPNOTSUPP; 4544 4545 if (lun != SCAN_WILD_CARD && lun) 4546 return -EINVAL; 4547 4548 if (!sata_pmp_attached(ap)) { 4549 if (channel != SCAN_WILD_CARD && channel) 4550 return -EINVAL; 4551 devno = id; 4552 } else { 4553 if (id != SCAN_WILD_CARD && id) 4554 return -EINVAL; 4555 devno = channel; 4556 } 4557 4558 spin_lock_irqsave(ap->lock, flags); 4559 4560 if (devno == SCAN_WILD_CARD) { 4561 struct ata_link *link; 4562 4563 ata_for_each_link(link, ap, EDGE) { 4564 struct ata_eh_info *ehi = &link->eh_info; 4565 ehi->probe_mask |= ATA_ALL_DEVICES; 4566 ehi->action |= ATA_EH_RESET; 4567 } 4568 } else { 4569 struct ata_device *dev = ata_find_dev(ap, devno); 4570 4571 if (dev) { 4572 struct ata_eh_info *ehi = &dev->link->eh_info; 4573 ehi->probe_mask |= 1 << dev->devno; 4574 ehi->action |= ATA_EH_RESET; 4575 } else 4576 rc = -EINVAL; 4577 } 4578 4579 if (rc == 0) { 4580 ata_port_schedule_eh(ap); 4581 spin_unlock_irqrestore(ap->lock, flags); 4582 ata_port_wait_eh(ap); 4583 } else 4584 spin_unlock_irqrestore(ap->lock, flags); 4585 4586 return rc; 4587 } 4588 4589 /** 4590 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 4591 * @work: Pointer to ATA port to perform scsi_rescan_device() 4592 * 4593 * After ATA pass thru (SAT) commands are executed successfully, 4594 * libata need to propagate the changes to SCSI layer. 4595 * 4596 * LOCKING: 4597 * Kernel thread context (may sleep). 4598 */ 4599 void ata_scsi_dev_rescan(struct work_struct *work) 4600 { 4601 struct ata_port *ap = 4602 container_of(work, struct ata_port, scsi_rescan_task); 4603 struct ata_link *link; 4604 struct ata_device *dev; 4605 unsigned long flags; 4606 4607 mutex_lock(&ap->scsi_scan_mutex); 4608 spin_lock_irqsave(ap->lock, flags); 4609 4610 ata_for_each_link(link, ap, EDGE) { 4611 ata_for_each_dev(dev, link, ENABLED) { 4612 struct scsi_device *sdev = dev->sdev; 4613 4614 if (!sdev) 4615 continue; 4616 if (scsi_device_get(sdev)) 4617 continue; 4618 4619 spin_unlock_irqrestore(ap->lock, flags); 4620 scsi_rescan_device(&(sdev->sdev_gendev)); 4621 scsi_device_put(sdev); 4622 spin_lock_irqsave(ap->lock, flags); 4623 } 4624 } 4625 4626 spin_unlock_irqrestore(ap->lock, flags); 4627 mutex_unlock(&ap->scsi_scan_mutex); 4628 } 4629