1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * libata-scsi.c - helper library for ATA 4 * 5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 6 * Copyright 2003-2004 Jeff Garzik 7 * 8 * libata documentation is available via 'make {ps|pdf}docs', 9 * as Documentation/driver-api/libata.rst 10 * 11 * Hardware documentation available from 12 * - http://www.t10.org/ 13 * - http://www.t13.org/ 14 */ 15 16 #include <linux/compat.h> 17 #include <linux/slab.h> 18 #include <linux/kernel.h> 19 #include <linux/blkdev.h> 20 #include <linux/spinlock.h> 21 #include <linux/export.h> 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_eh.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_tcq.h> 28 #include <scsi/scsi_transport.h> 29 #include <linux/libata.h> 30 #include <linux/hdreg.h> 31 #include <linux/uaccess.h> 32 #include <linux/suspend.h> 33 #include <asm/unaligned.h> 34 #include <linux/ioprio.h> 35 #include <linux/of.h> 36 37 #include "libata.h" 38 #include "libata-transport.h" 39 40 #define ATA_SCSI_RBUF_SIZE 2048 41 42 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); 43 static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; 44 45 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 46 47 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 48 const struct scsi_device *scsidev); 49 50 #define RW_RECOVERY_MPAGE 0x1 51 #define RW_RECOVERY_MPAGE_LEN 12 52 #define CACHE_MPAGE 0x8 53 #define CACHE_MPAGE_LEN 20 54 #define CONTROL_MPAGE 0xa 55 #define CONTROL_MPAGE_LEN 12 56 #define ALL_MPAGES 0x3f 57 #define ALL_SUB_MPAGES 0xff 58 #define CDL_T2A_SUB_MPAGE 0x07 59 #define CDL_T2B_SUB_MPAGE 0x08 60 #define CDL_T2_SUB_MPAGE_LEN 232 61 #define ATA_FEATURE_SUB_MPAGE 0xf2 62 #define ATA_FEATURE_SUB_MPAGE_LEN 16 63 64 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { 65 RW_RECOVERY_MPAGE, 66 RW_RECOVERY_MPAGE_LEN - 2, 67 (1 << 7), /* AWRE */ 68 0, /* read retry count */ 69 0, 0, 0, 0, 70 0, /* write retry count */ 71 0, 0, 0 72 }; 73 74 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = { 75 CACHE_MPAGE, 76 CACHE_MPAGE_LEN - 2, 77 0, /* contains WCE, needs to be 0 for logic */ 78 0, 0, 0, 0, 0, 0, 0, 0, 0, 79 0, /* contains DRA, needs to be 0 for logic */ 80 0, 0, 0, 0, 0, 0, 0 81 }; 82 83 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 84 CONTROL_MPAGE, 85 CONTROL_MPAGE_LEN - 2, 86 2, /* DSENSE=0, GLTSD=1 */ 87 0, /* [QAM+QERR may be 1, see 05-359r1] */ 88 0, 0, 0, 0, 0xff, 0xff, 89 0, 30 /* extended self test time, see 05-359r1 */ 90 }; 91 92 static ssize_t ata_scsi_park_show(struct device *device, 93 struct device_attribute *attr, char *buf) 94 { 95 struct scsi_device *sdev = to_scsi_device(device); 96 struct ata_port *ap; 97 struct ata_link *link; 98 struct ata_device *dev; 99 unsigned long now; 100 unsigned int msecs; 101 int rc = 0; 102 103 ap = ata_shost_to_port(sdev->host); 104 105 spin_lock_irq(ap->lock); 106 dev = ata_scsi_find_dev(ap, sdev); 107 if (!dev) { 108 rc = -ENODEV; 109 goto unlock; 110 } 111 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { 112 rc = -EOPNOTSUPP; 113 goto unlock; 114 } 115 116 link = dev->link; 117 now = jiffies; 118 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && 119 link->eh_context.unloaded_mask & (1 << dev->devno) && 120 time_after(dev->unpark_deadline, now)) 121 msecs = jiffies_to_msecs(dev->unpark_deadline - now); 122 else 123 msecs = 0; 124 125 unlock: 126 spin_unlock_irq(ap->lock); 127 128 return rc ? rc : sysfs_emit(buf, "%u\n", msecs); 129 } 130 131 static ssize_t ata_scsi_park_store(struct device *device, 132 struct device_attribute *attr, 133 const char *buf, size_t len) 134 { 135 struct scsi_device *sdev = to_scsi_device(device); 136 struct ata_port *ap; 137 struct ata_device *dev; 138 long int input; 139 unsigned long flags; 140 int rc; 141 142 rc = kstrtol(buf, 10, &input); 143 if (rc) 144 return rc; 145 if (input < -2) 146 return -EINVAL; 147 if (input > ATA_TMOUT_MAX_PARK) { 148 rc = -EOVERFLOW; 149 input = ATA_TMOUT_MAX_PARK; 150 } 151 152 ap = ata_shost_to_port(sdev->host); 153 154 spin_lock_irqsave(ap->lock, flags); 155 dev = ata_scsi_find_dev(ap, sdev); 156 if (unlikely(!dev)) { 157 rc = -ENODEV; 158 goto unlock; 159 } 160 if (dev->class != ATA_DEV_ATA && 161 dev->class != ATA_DEV_ZAC) { 162 rc = -EOPNOTSUPP; 163 goto unlock; 164 } 165 166 if (input >= 0) { 167 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { 168 rc = -EOPNOTSUPP; 169 goto unlock; 170 } 171 172 dev->unpark_deadline = ata_deadline(jiffies, input); 173 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; 174 ata_port_schedule_eh(ap); 175 complete(&ap->park_req_pending); 176 } else { 177 switch (input) { 178 case -1: 179 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; 180 break; 181 case -2: 182 dev->flags |= ATA_DFLAG_NO_UNLOAD; 183 break; 184 } 185 } 186 unlock: 187 spin_unlock_irqrestore(ap->lock, flags); 188 189 return rc ? rc : len; 190 } 191 DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, 192 ata_scsi_park_show, ata_scsi_park_store); 193 EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 194 195 bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq) 196 { 197 /* 198 * If sk == NO_SENSE, and asc + ascq == NO ADDITIONAL SENSE INFORMATION, 199 * then there is no sense data to add. 200 */ 201 if (sk == 0 && asc == 0 && ascq == 0) 202 return false; 203 204 /* If sk > COMPLETED, sense data is bogus. */ 205 if (sk > COMPLETED) 206 return false; 207 208 return true; 209 } 210 211 void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, 212 u8 sk, u8 asc, u8 ascq) 213 { 214 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); 215 216 scsi_build_sense(cmd, d_sense, sk, asc, ascq); 217 } 218 219 void ata_scsi_set_sense_information(struct ata_device *dev, 220 struct scsi_cmnd *cmd, 221 const struct ata_taskfile *tf) 222 { 223 u64 information; 224 225 information = ata_tf_read_block(tf, dev); 226 if (information == U64_MAX) 227 return; 228 229 scsi_set_sense_information(cmd->sense_buffer, 230 SCSI_SENSE_BUFFERSIZE, information); 231 } 232 233 static void ata_scsi_set_invalid_field(struct ata_device *dev, 234 struct scsi_cmnd *cmd, u16 field, u8 bit) 235 { 236 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0); 237 /* "Invalid field in CDB" */ 238 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 239 field, bit, 1); 240 } 241 242 static void ata_scsi_set_invalid_parameter(struct ata_device *dev, 243 struct scsi_cmnd *cmd, u16 field) 244 { 245 /* "Invalid field in parameter list" */ 246 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0); 247 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 248 field, 0xff, 0); 249 } 250 251 static struct attribute *ata_common_sdev_attrs[] = { 252 &dev_attr_unload_heads.attr, 253 NULL 254 }; 255 256 static const struct attribute_group ata_common_sdev_attr_group = { 257 .attrs = ata_common_sdev_attrs 258 }; 259 260 const struct attribute_group *ata_common_sdev_groups[] = { 261 &ata_common_sdev_attr_group, 262 NULL 263 }; 264 EXPORT_SYMBOL_GPL(ata_common_sdev_groups); 265 266 /** 267 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 268 * @sdev: SCSI device for which BIOS geometry is to be determined 269 * @bdev: block device associated with @sdev 270 * @capacity: capacity of SCSI device 271 * @geom: location to which geometry will be output 272 * 273 * Generic bios head/sector/cylinder calculator 274 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) 275 * mapping. Some situations may arise where the disk is not 276 * bootable if this is not used. 277 * 278 * LOCKING: 279 * Defined by the SCSI layer. We don't really care. 280 * 281 * RETURNS: 282 * Zero. 283 */ 284 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, 285 sector_t capacity, int geom[]) 286 { 287 geom[0] = 255; 288 geom[1] = 63; 289 sector_div(capacity, 255*63); 290 geom[2] = capacity; 291 292 return 0; 293 } 294 EXPORT_SYMBOL_GPL(ata_std_bios_param); 295 296 /** 297 * ata_scsi_unlock_native_capacity - unlock native capacity 298 * @sdev: SCSI device to adjust device capacity for 299 * 300 * This function is called if a partition on @sdev extends beyond 301 * the end of the device. It requests EH to unlock HPA. 302 * 303 * LOCKING: 304 * Defined by the SCSI layer. Might sleep. 305 */ 306 void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) 307 { 308 struct ata_port *ap = ata_shost_to_port(sdev->host); 309 struct ata_device *dev; 310 unsigned long flags; 311 312 spin_lock_irqsave(ap->lock, flags); 313 314 dev = ata_scsi_find_dev(ap, sdev); 315 if (dev && dev->n_sectors < dev->n_native_sectors) { 316 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 317 dev->link->eh_info.action |= ATA_EH_RESET; 318 ata_port_schedule_eh(ap); 319 } 320 321 spin_unlock_irqrestore(ap->lock, flags); 322 ata_port_wait_eh(ap); 323 } 324 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 325 326 /** 327 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 328 * @ap: target port 329 * @sdev: SCSI device to get identify data for 330 * @arg: User buffer area for identify data 331 * 332 * LOCKING: 333 * Defined by the SCSI layer. We don't really care. 334 * 335 * RETURNS: 336 * Zero on success, negative errno on error. 337 */ 338 static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, 339 void __user *arg) 340 { 341 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 342 u16 __user *dst = arg; 343 char buf[40]; 344 345 if (!dev) 346 return -ENOMSG; 347 348 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) 349 return -EFAULT; 350 351 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); 352 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) 353 return -EFAULT; 354 355 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); 356 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) 357 return -EFAULT; 358 359 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); 360 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) 361 return -EFAULT; 362 363 return 0; 364 } 365 366 /** 367 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 368 * @scsidev: Device to which we are issuing command 369 * @arg: User provided data for issuing command 370 * 371 * LOCKING: 372 * Defined by the SCSI layer. We don't really care. 373 * 374 * RETURNS: 375 * Zero on success, negative errno on error. 376 */ 377 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 378 { 379 int rc = 0; 380 u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; 381 u8 scsi_cmd[MAX_COMMAND_SIZE]; 382 u8 args[4], *argbuf = NULL; 383 int argsize = 0; 384 struct scsi_sense_hdr sshdr; 385 const struct scsi_exec_args exec_args = { 386 .sshdr = &sshdr, 387 .sense = sensebuf, 388 .sense_len = sizeof(sensebuf), 389 }; 390 int cmd_result; 391 392 if (arg == NULL) 393 return -EINVAL; 394 395 if (copy_from_user(args, arg, sizeof(args))) 396 return -EFAULT; 397 398 memset(sensebuf, 0, sizeof(sensebuf)); 399 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 400 401 if (args[3]) { 402 argsize = ATA_SECT_SIZE * args[3]; 403 argbuf = kmalloc(argsize, GFP_KERNEL); 404 if (argbuf == NULL) { 405 rc = -ENOMEM; 406 goto error; 407 } 408 409 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 410 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 411 block count in sector count field */ 412 } else { 413 scsi_cmd[1] = (3 << 1); /* Non-data */ 414 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 415 } 416 417 scsi_cmd[0] = ATA_16; 418 419 scsi_cmd[4] = args[2]; 420 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ 421 scsi_cmd[6] = args[3]; 422 scsi_cmd[8] = args[1]; 423 scsi_cmd[10] = ATA_SMART_LBAM_PASS; 424 scsi_cmd[12] = ATA_SMART_LBAH_PASS; 425 } else { 426 scsi_cmd[6] = args[1]; 427 } 428 scsi_cmd[14] = args[0]; 429 430 /* Good values for timeout and retries? Values below 431 from scsi_ioctl_send_command() for default case... */ 432 cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf, 433 argsize, 10 * HZ, 5, &exec_args); 434 if (cmd_result < 0) { 435 rc = cmd_result; 436 goto error; 437 } 438 if (scsi_sense_valid(&sshdr)) {/* sense data available */ 439 u8 *desc = sensebuf + 8; 440 441 /* If we set cc then ATA pass-through will cause a 442 * check condition even if no error. Filter that. */ 443 if (scsi_status_is_check_condition(cmd_result)) { 444 if (sshdr.sense_key == RECOVERED_ERROR && 445 sshdr.asc == 0 && sshdr.ascq == 0x1d) 446 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 447 } 448 449 /* Send userspace a few ATA registers (same as drivers/ide) */ 450 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 451 desc[0] == 0x09) { /* code is "ATA Descriptor" */ 452 args[0] = desc[13]; /* status */ 453 args[1] = desc[3]; /* error */ 454 args[2] = desc[5]; /* sector count (0:7) */ 455 if (copy_to_user(arg, args, sizeof(args))) 456 rc = -EFAULT; 457 } 458 } 459 460 461 if (cmd_result) { 462 rc = -EIO; 463 goto error; 464 } 465 466 if ((argbuf) 467 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 468 rc = -EFAULT; 469 error: 470 kfree(argbuf); 471 return rc; 472 } 473 474 /** 475 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl 476 * @scsidev: Device to which we are issuing command 477 * @arg: User provided data for issuing command 478 * 479 * LOCKING: 480 * Defined by the SCSI layer. We don't really care. 481 * 482 * RETURNS: 483 * Zero on success, negative errno on error. 484 */ 485 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) 486 { 487 int rc = 0; 488 u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; 489 u8 scsi_cmd[MAX_COMMAND_SIZE]; 490 u8 args[7]; 491 struct scsi_sense_hdr sshdr; 492 int cmd_result; 493 const struct scsi_exec_args exec_args = { 494 .sshdr = &sshdr, 495 .sense = sensebuf, 496 .sense_len = sizeof(sensebuf), 497 }; 498 499 if (arg == NULL) 500 return -EINVAL; 501 502 if (copy_from_user(args, arg, sizeof(args))) 503 return -EFAULT; 504 505 memset(sensebuf, 0, sizeof(sensebuf)); 506 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 507 scsi_cmd[0] = ATA_16; 508 scsi_cmd[1] = (3 << 1); /* Non-data */ 509 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 510 scsi_cmd[4] = args[1]; 511 scsi_cmd[6] = args[2]; 512 scsi_cmd[8] = args[3]; 513 scsi_cmd[10] = args[4]; 514 scsi_cmd[12] = args[5]; 515 scsi_cmd[13] = args[6] & 0x4f; 516 scsi_cmd[14] = args[0]; 517 518 /* Good values for timeout and retries? Values below 519 from scsi_ioctl_send_command() for default case... */ 520 cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL, 521 0, 10 * HZ, 5, &exec_args); 522 if (cmd_result < 0) { 523 rc = cmd_result; 524 goto error; 525 } 526 if (scsi_sense_valid(&sshdr)) {/* sense data available */ 527 u8 *desc = sensebuf + 8; 528 529 /* If we set cc then ATA pass-through will cause a 530 * check condition even if no error. Filter that. */ 531 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 532 if (sshdr.sense_key == RECOVERED_ERROR && 533 sshdr.asc == 0 && sshdr.ascq == 0x1d) 534 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 535 } 536 537 /* Send userspace ATA registers */ 538 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 539 desc[0] == 0x09) {/* code is "ATA Descriptor" */ 540 args[0] = desc[13]; /* status */ 541 args[1] = desc[3]; /* error */ 542 args[2] = desc[5]; /* sector count (0:7) */ 543 args[3] = desc[7]; /* lbal */ 544 args[4] = desc[9]; /* lbam */ 545 args[5] = desc[11]; /* lbah */ 546 args[6] = desc[12]; /* select */ 547 if (copy_to_user(arg, args, sizeof(args))) 548 rc = -EFAULT; 549 } 550 } 551 552 if (cmd_result) { 553 rc = -EIO; 554 goto error; 555 } 556 557 error: 558 return rc; 559 } 560 561 static bool ata_ioc32(struct ata_port *ap) 562 { 563 if (ap->flags & ATA_FLAG_PIO_DMA) 564 return true; 565 if (ap->pflags & ATA_PFLAG_PIO32) 566 return true; 567 return false; 568 } 569 570 /* 571 * This handles both native and compat commands, so anything added 572 * here must have a compatible argument, or check in_compat_syscall() 573 */ 574 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 575 unsigned int cmd, void __user *arg) 576 { 577 unsigned long val; 578 int rc = -EINVAL; 579 unsigned long flags; 580 581 switch (cmd) { 582 case HDIO_GET_32BIT: 583 spin_lock_irqsave(ap->lock, flags); 584 val = ata_ioc32(ap); 585 spin_unlock_irqrestore(ap->lock, flags); 586 #ifdef CONFIG_COMPAT 587 if (in_compat_syscall()) 588 return put_user(val, (compat_ulong_t __user *)arg); 589 #endif 590 return put_user(val, (unsigned long __user *)arg); 591 592 case HDIO_SET_32BIT: 593 val = (unsigned long) arg; 594 rc = 0; 595 spin_lock_irqsave(ap->lock, flags); 596 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { 597 if (val) 598 ap->pflags |= ATA_PFLAG_PIO32; 599 else 600 ap->pflags &= ~ATA_PFLAG_PIO32; 601 } else { 602 if (val != ata_ioc32(ap)) 603 rc = -EINVAL; 604 } 605 spin_unlock_irqrestore(ap->lock, flags); 606 return rc; 607 608 case HDIO_GET_IDENTITY: 609 return ata_get_identity(ap, scsidev, arg); 610 611 case HDIO_DRIVE_CMD: 612 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 613 return -EACCES; 614 return ata_cmd_ioctl(scsidev, arg); 615 616 case HDIO_DRIVE_TASK: 617 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 618 return -EACCES; 619 return ata_task_ioctl(scsidev, arg); 620 621 default: 622 rc = -ENOTTY; 623 break; 624 } 625 626 return rc; 627 } 628 EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); 629 630 int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd, 631 void __user *arg) 632 { 633 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), 634 scsidev, cmd, arg); 635 } 636 EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 637 638 /** 639 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 640 * @dev: ATA device to which the new command is attached 641 * @cmd: SCSI command that originated this ATA command 642 * 643 * Obtain a reference to an unused ata_queued_cmd structure, 644 * which is the basic libata structure representing a single 645 * ATA command sent to the hardware. 646 * 647 * If a command was available, fill in the SCSI-specific 648 * portions of the structure with information on the 649 * current command. 650 * 651 * LOCKING: 652 * spin_lock_irqsave(host lock) 653 * 654 * RETURNS: 655 * Command allocated, or %NULL if none available. 656 */ 657 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 658 struct scsi_cmnd *cmd) 659 { 660 struct ata_port *ap = dev->link->ap; 661 struct ata_queued_cmd *qc; 662 int tag; 663 664 if (unlikely(ata_port_is_frozen(ap))) 665 goto fail; 666 667 if (ap->flags & ATA_FLAG_SAS_HOST) { 668 /* 669 * SAS hosts may queue > ATA_MAX_QUEUE commands so use 670 * unique per-device budget token as a tag. 671 */ 672 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) 673 goto fail; 674 tag = cmd->budget_token; 675 } else { 676 tag = scsi_cmd_to_rq(cmd)->tag; 677 } 678 679 qc = __ata_qc_from_tag(ap, tag); 680 qc->tag = qc->hw_tag = tag; 681 qc->ap = ap; 682 qc->dev = dev; 683 684 ata_qc_reinit(qc); 685 686 qc->scsicmd = cmd; 687 qc->scsidone = scsi_done; 688 689 qc->sg = scsi_sglist(cmd); 690 qc->n_elem = scsi_sg_count(cmd); 691 692 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) 693 qc->flags |= ATA_QCFLAG_QUIET; 694 695 return qc; 696 697 fail: 698 set_host_byte(cmd, DID_OK); 699 set_status_byte(cmd, SAM_STAT_TASK_SET_FULL); 700 scsi_done(cmd); 701 return NULL; 702 } 703 704 static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) 705 { 706 struct scsi_cmnd *scmd = qc->scsicmd; 707 708 qc->extrabytes = scmd->extra_len; 709 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; 710 } 711 712 /** 713 * ata_dump_status - user friendly display of error info 714 * @ap: the port in question 715 * @tf: ptr to filled out taskfile 716 * 717 * Decode and dump the ATA error/status registers for the user so 718 * that they have some idea what really happened at the non 719 * make-believe layer. 720 * 721 * LOCKING: 722 * inherited from caller 723 */ 724 static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf) 725 { 726 u8 stat = tf->status, err = tf->error; 727 728 if (stat & ATA_BUSY) { 729 ata_port_warn(ap, "status=0x%02x {Busy} ", stat); 730 } else { 731 ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat, 732 stat & ATA_DRDY ? "DriveReady " : "", 733 stat & ATA_DF ? "DeviceFault " : "", 734 stat & ATA_DSC ? "SeekComplete " : "", 735 stat & ATA_DRQ ? "DataRequest " : "", 736 stat & ATA_CORR ? "CorrectedError " : "", 737 stat & ATA_SENSE ? "Sense " : "", 738 stat & ATA_ERR ? "Error " : ""); 739 if (err) 740 ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err, 741 err & ATA_ABORTED ? 742 "DriveStatusError " : "", 743 err & ATA_ICRC ? 744 (err & ATA_ABORTED ? 745 "BadCRC " : "Sector ") : "", 746 err & ATA_UNC ? "UncorrectableError " : "", 747 err & ATA_IDNF ? "SectorIdNotFound " : "", 748 err & ATA_TRK0NF ? "TrackZeroNotFound " : "", 749 err & ATA_AMNF ? "AddrMarkNotFound " : ""); 750 } 751 } 752 753 /** 754 * ata_to_sense_error - convert ATA error to SCSI error 755 * @id: ATA device number 756 * @drv_stat: value contained in ATA status register 757 * @drv_err: value contained in ATA error register 758 * @sk: the sense key we'll fill out 759 * @asc: the additional sense code we'll fill out 760 * @ascq: the additional sense code qualifier we'll fill out 761 * @verbose: be verbose 762 * 763 * Converts an ATA error into a SCSI error. Fill out pointers to 764 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 765 * format sense blocks. 766 * 767 * LOCKING: 768 * spin_lock_irqsave(host lock) 769 */ 770 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, 771 u8 *asc, u8 *ascq, int verbose) 772 { 773 int i; 774 775 /* Based on the 3ware driver translation table */ 776 static const unsigned char sense_table[][4] = { 777 /* BBD|ECC|ID|MAR */ 778 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, 779 // Device busy Aborted command 780 /* BBD|ECC|ID */ 781 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, 782 // Device busy Aborted command 783 /* ECC|MC|MARK */ 784 {0x61, HARDWARE_ERROR, 0x00, 0x00}, 785 // Device fault Hardware error 786 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 787 {0x84, ABORTED_COMMAND, 0x47, 0x00}, 788 // Data CRC error SCSI parity error 789 /* MC|ID|ABRT|TRK0|MARK */ 790 {0x37, NOT_READY, 0x04, 0x00}, 791 // Unit offline Not ready 792 /* MCR|MARK */ 793 {0x09, NOT_READY, 0x04, 0x00}, 794 // Unrecovered disk error Not ready 795 /* Bad address mark */ 796 {0x01, MEDIUM_ERROR, 0x13, 0x00}, 797 // Address mark not found for data field 798 /* TRK0 - Track 0 not found */ 799 {0x02, HARDWARE_ERROR, 0x00, 0x00}, 800 // Hardware error 801 /* Abort: 0x04 is not translated here, see below */ 802 /* Media change request */ 803 {0x08, NOT_READY, 0x04, 0x00}, 804 // FIXME: faking offline 805 /* SRV/IDNF - ID not found */ 806 {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, 807 // Logical address out of range 808 /* MC - Media Changed */ 809 {0x20, UNIT_ATTENTION, 0x28, 0x00}, 810 // Not ready to ready change, medium may have changed 811 /* ECC - Uncorrectable ECC error */ 812 {0x40, MEDIUM_ERROR, 0x11, 0x04}, 813 // Unrecovered read error 814 /* BBD - block marked bad */ 815 {0x80, MEDIUM_ERROR, 0x11, 0x04}, 816 // Block marked bad Medium error, unrecovered read error 817 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 818 }; 819 static const unsigned char stat_table[][4] = { 820 /* Must be first because BUSY means no other bits valid */ 821 {0x80, ABORTED_COMMAND, 0x47, 0x00}, 822 // Busy, fake parity for now 823 {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, 824 // Device ready, unaligned write command 825 {0x20, HARDWARE_ERROR, 0x44, 0x00}, 826 // Device fault, internal target failure 827 {0x08, ABORTED_COMMAND, 0x47, 0x00}, 828 // Timed out in xfer, fake parity for now 829 {0x04, RECOVERED_ERROR, 0x11, 0x00}, 830 // Recovered ECC error Medium error, recovered 831 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 832 }; 833 834 /* 835 * Is this an error we can process/parse 836 */ 837 if (drv_stat & ATA_BUSY) { 838 drv_err = 0; /* Ignore the err bits, they're invalid */ 839 } 840 841 if (drv_err) { 842 /* Look for drv_err */ 843 for (i = 0; sense_table[i][0] != 0xFF; i++) { 844 /* Look for best matches first */ 845 if ((sense_table[i][0] & drv_err) == 846 sense_table[i][0]) { 847 *sk = sense_table[i][1]; 848 *asc = sense_table[i][2]; 849 *ascq = sense_table[i][3]; 850 goto translate_done; 851 } 852 } 853 } 854 855 /* 856 * Fall back to interpreting status bits. Note that if the drv_err 857 * has only the ABRT bit set, we decode drv_stat. ABRT by itself 858 * is not descriptive enough. 859 */ 860 for (i = 0; stat_table[i][0] != 0xFF; i++) { 861 if (stat_table[i][0] & drv_stat) { 862 *sk = stat_table[i][1]; 863 *asc = stat_table[i][2]; 864 *ascq = stat_table[i][3]; 865 goto translate_done; 866 } 867 } 868 869 /* 870 * We need a sensible error return here, which is tricky, and one 871 * that won't cause people to do things like return a disk wrongly. 872 */ 873 *sk = ABORTED_COMMAND; 874 *asc = 0x00; 875 *ascq = 0x00; 876 877 translate_done: 878 if (verbose) 879 pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", 880 id, drv_stat, drv_err, *sk, *asc, *ascq); 881 return; 882 } 883 884 /* 885 * ata_gen_passthru_sense - Generate check condition sense block. 886 * @qc: Command that completed. 887 * 888 * This function is specific to the ATA descriptor format sense 889 * block specified for the ATA pass through commands. Regardless 890 * of whether the command errored or not, return a sense 891 * block. Copy all controller registers into the sense 892 * block. If there was no error, we get the request from an ATA 893 * passthrough command, so we use the following sense data: 894 * sk = RECOVERED ERROR 895 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE 896 * 897 * 898 * LOCKING: 899 * None. 900 */ 901 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) 902 { 903 struct scsi_cmnd *cmd = qc->scsicmd; 904 struct ata_taskfile *tf = &qc->result_tf; 905 unsigned char *sb = cmd->sense_buffer; 906 unsigned char *desc = sb + 8; 907 int verbose = qc->ap->ops->error_handler == NULL; 908 u8 sense_key, asc, ascq; 909 910 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 911 912 /* 913 * Use ata_to_sense_error() to map status register bits 914 * onto sense key, asc & ascq. 915 */ 916 if (qc->err_mask || 917 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 918 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, 919 &sense_key, &asc, &ascq, verbose); 920 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); 921 } else { 922 /* 923 * ATA PASS-THROUGH INFORMATION AVAILABLE 924 * Always in descriptor format sense. 925 */ 926 scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); 927 } 928 929 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { 930 u8 len; 931 932 /* descriptor format */ 933 len = sb[7]; 934 desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); 935 if (!desc) { 936 if (SCSI_SENSE_BUFFERSIZE < len + 14) 937 return; 938 sb[7] = len + 14; 939 desc = sb + 8 + len; 940 } 941 desc[0] = 9; 942 desc[1] = 12; 943 /* 944 * Copy registers into sense buffer. 945 */ 946 desc[2] = 0x00; 947 desc[3] = tf->error; 948 desc[5] = tf->nsect; 949 desc[7] = tf->lbal; 950 desc[9] = tf->lbam; 951 desc[11] = tf->lbah; 952 desc[12] = tf->device; 953 desc[13] = tf->status; 954 955 /* 956 * Fill in Extend bit, and the high order bytes 957 * if applicable. 958 */ 959 if (tf->flags & ATA_TFLAG_LBA48) { 960 desc[2] |= 0x01; 961 desc[4] = tf->hob_nsect; 962 desc[6] = tf->hob_lbal; 963 desc[8] = tf->hob_lbam; 964 desc[10] = tf->hob_lbah; 965 } 966 } else { 967 /* Fixed sense format */ 968 desc[0] = tf->error; 969 desc[1] = tf->status; 970 desc[2] = tf->device; 971 desc[3] = tf->nsect; 972 desc[7] = 0; 973 if (tf->flags & ATA_TFLAG_LBA48) { 974 desc[8] |= 0x80; 975 if (tf->hob_nsect) 976 desc[8] |= 0x40; 977 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) 978 desc[8] |= 0x20; 979 } 980 desc[9] = tf->lbal; 981 desc[10] = tf->lbam; 982 desc[11] = tf->lbah; 983 } 984 } 985 986 /** 987 * ata_gen_ata_sense - generate a SCSI fixed sense block 988 * @qc: Command that we are erroring out 989 * 990 * Generate sense block for a failed ATA command @qc. Descriptor 991 * format is used to accommodate LBA48 block address. 992 * 993 * LOCKING: 994 * None. 995 */ 996 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) 997 { 998 struct ata_device *dev = qc->dev; 999 struct scsi_cmnd *cmd = qc->scsicmd; 1000 struct ata_taskfile *tf = &qc->result_tf; 1001 unsigned char *sb = cmd->sense_buffer; 1002 int verbose = qc->ap->ops->error_handler == NULL; 1003 u64 block; 1004 u8 sense_key, asc, ascq; 1005 1006 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 1007 1008 if (ata_dev_disabled(dev)) { 1009 /* Device disabled after error recovery */ 1010 /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */ 1011 ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21); 1012 return; 1013 } 1014 /* Use ata_to_sense_error() to map status register bits 1015 * onto sense key, asc & ascq. 1016 */ 1017 if (qc->err_mask || 1018 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 1019 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, 1020 &sense_key, &asc, &ascq, verbose); 1021 ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); 1022 } else { 1023 /* Could not decode error */ 1024 ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", 1025 tf->status, qc->err_mask); 1026 ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); 1027 return; 1028 } 1029 1030 block = ata_tf_read_block(&qc->result_tf, dev); 1031 if (block == U64_MAX) 1032 return; 1033 1034 scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); 1035 } 1036 1037 void ata_scsi_sdev_config(struct scsi_device *sdev) 1038 { 1039 sdev->use_10_for_rw = 1; 1040 sdev->use_10_for_ms = 1; 1041 sdev->no_write_same = 1; 1042 1043 /* Schedule policy is determined by ->qc_defer() callback and 1044 * it needs to see every deferred qc. Set dev_blocked to 1 to 1045 * prevent SCSI midlayer from automatically deferring 1046 * requests. 1047 */ 1048 sdev->max_device_blocked = 1; 1049 } 1050 1051 /** 1052 * ata_scsi_dma_need_drain - Check whether data transfer may overflow 1053 * @rq: request to be checked 1054 * 1055 * ATAPI commands which transfer variable length data to host 1056 * might overflow due to application error or hardware bug. This 1057 * function checks whether overflow should be drained and ignored 1058 * for @request. 1059 * 1060 * LOCKING: 1061 * None. 1062 * 1063 * RETURNS: 1064 * 1 if ; otherwise, 0. 1065 */ 1066 bool ata_scsi_dma_need_drain(struct request *rq) 1067 { 1068 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 1069 1070 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; 1071 } 1072 EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); 1073 1074 int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) 1075 { 1076 struct request_queue *q = sdev->request_queue; 1077 int depth = 1; 1078 1079 if (!ata_id_has_unload(dev->id)) 1080 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1081 1082 /* configure max sectors */ 1083 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); 1084 blk_queue_max_hw_sectors(q, dev->max_sectors); 1085 1086 if (dev->class == ATA_DEV_ATAPI) { 1087 sdev->sector_size = ATA_SECT_SIZE; 1088 1089 /* set DMA padding */ 1090 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); 1091 1092 /* make room for appending the drain */ 1093 blk_queue_max_segments(q, queue_max_segments(q) - 1); 1094 1095 sdev->dma_drain_len = ATAPI_MAX_DRAIN; 1096 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); 1097 if (!sdev->dma_drain_buf) { 1098 ata_dev_err(dev, "drain buffer allocation failed\n"); 1099 return -ENOMEM; 1100 } 1101 } else { 1102 sdev->sector_size = ata_id_logical_sector_size(dev->id); 1103 /* 1104 * Stop the drive on suspend but do not issue START STOP UNIT 1105 * on resume as this is not necessary and may fail: the device 1106 * will be woken up by ata_port_pm_resume() with a port reset 1107 * and device revalidation. 1108 */ 1109 sdev->manage_start_stop = 1; 1110 sdev->no_start_on_resume = 1; 1111 } 1112 1113 /* 1114 * ata_pio_sectors() expects buffer for each sector to not cross 1115 * page boundary. Enforce it by requiring buffers to be sector 1116 * aligned, which works iff sector_size is not larger than 1117 * PAGE_SIZE. ATAPI devices also need the alignment as 1118 * IDENTIFY_PACKET is executed as ATA_PROT_PIO. 1119 */ 1120 if (sdev->sector_size > PAGE_SIZE) 1121 ata_dev_warn(dev, 1122 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", 1123 sdev->sector_size); 1124 1125 blk_queue_update_dma_alignment(q, sdev->sector_size - 1); 1126 1127 if (dev->flags & ATA_DFLAG_AN) 1128 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1129 1130 if (ata_ncq_supported(dev)) 1131 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1132 depth = min(ATA_MAX_QUEUE, depth); 1133 scsi_change_queue_depth(sdev, depth); 1134 1135 if (dev->flags & ATA_DFLAG_TRUSTED) 1136 sdev->security_supported = 1; 1137 1138 dev->sdev = sdev; 1139 return 0; 1140 } 1141 1142 /** 1143 * ata_scsi_slave_config - Set SCSI device attributes 1144 * @sdev: SCSI device to examine 1145 * 1146 * This is called before we actually start reading 1147 * and writing to the device, to configure certain 1148 * SCSI mid-layer behaviors. 1149 * 1150 * LOCKING: 1151 * Defined by SCSI layer. We don't really care. 1152 */ 1153 1154 int ata_scsi_slave_config(struct scsi_device *sdev) 1155 { 1156 struct ata_port *ap = ata_shost_to_port(sdev->host); 1157 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 1158 int rc = 0; 1159 1160 ata_scsi_sdev_config(sdev); 1161 1162 if (dev) 1163 rc = ata_scsi_dev_config(sdev, dev); 1164 1165 return rc; 1166 } 1167 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 1168 1169 /** 1170 * ata_scsi_slave_destroy - SCSI device is about to be destroyed 1171 * @sdev: SCSI device to be destroyed 1172 * 1173 * @sdev is about to be destroyed for hot/warm unplugging. If 1174 * this unplugging was initiated by libata as indicated by NULL 1175 * dev->sdev, this function doesn't have to do anything. 1176 * Otherwise, SCSI layer initiated warm-unplug is in progress. 1177 * Clear dev->sdev, schedule the device for ATA detach and invoke 1178 * EH. 1179 * 1180 * LOCKING: 1181 * Defined by SCSI layer. We don't really care. 1182 */ 1183 void ata_scsi_slave_destroy(struct scsi_device *sdev) 1184 { 1185 struct ata_port *ap = ata_shost_to_port(sdev->host); 1186 unsigned long flags; 1187 struct ata_device *dev; 1188 1189 if (!ap->ops->error_handler) 1190 return; 1191 1192 spin_lock_irqsave(ap->lock, flags); 1193 dev = __ata_scsi_find_dev(ap, sdev); 1194 if (dev && dev->sdev) { 1195 /* SCSI device already in CANCEL state, no need to offline it */ 1196 dev->sdev = NULL; 1197 dev->flags |= ATA_DFLAG_DETACH; 1198 ata_port_schedule_eh(ap); 1199 } 1200 spin_unlock_irqrestore(ap->lock, flags); 1201 1202 kfree(sdev->dma_drain_buf); 1203 } 1204 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 1205 1206 /** 1207 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 1208 * @qc: Storage for translated ATA taskfile 1209 * 1210 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY 1211 * (to start). Perhaps these commands should be preceded by 1212 * CHECK POWER MODE to see what power mode the device is already in. 1213 * [See SAT revision 5 at www.t10.org] 1214 * 1215 * LOCKING: 1216 * spin_lock_irqsave(host lock) 1217 * 1218 * RETURNS: 1219 * Zero on success, non-zero on error. 1220 */ 1221 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) 1222 { 1223 struct scsi_cmnd *scmd = qc->scsicmd; 1224 struct ata_taskfile *tf = &qc->tf; 1225 const u8 *cdb = scmd->cmnd; 1226 u16 fp; 1227 u8 bp = 0xff; 1228 1229 if (scmd->cmd_len < 5) { 1230 fp = 4; 1231 goto invalid_fld; 1232 } 1233 1234 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1235 tf->protocol = ATA_PROT_NODATA; 1236 if (cdb[1] & 0x1) { 1237 ; /* ignore IMMED bit, violates sat-r05 */ 1238 } 1239 if (cdb[4] & 0x2) { 1240 fp = 4; 1241 bp = 1; 1242 goto invalid_fld; /* LOEJ bit set not supported */ 1243 } 1244 if (((cdb[4] >> 4) & 0xf) != 0) { 1245 fp = 4; 1246 bp = 3; 1247 goto invalid_fld; /* power conditions not supported */ 1248 } 1249 1250 if (cdb[4] & 0x1) { 1251 tf->nsect = 1; /* 1 sector, lba=0 */ 1252 1253 if (qc->dev->flags & ATA_DFLAG_LBA) { 1254 tf->flags |= ATA_TFLAG_LBA; 1255 1256 tf->lbah = 0x0; 1257 tf->lbam = 0x0; 1258 tf->lbal = 0x0; 1259 tf->device |= ATA_LBA; 1260 } else { 1261 /* CHS */ 1262 tf->lbal = 0x1; /* sect */ 1263 tf->lbam = 0x0; /* cyl low */ 1264 tf->lbah = 0x0; /* cyl high */ 1265 } 1266 1267 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 1268 } else { 1269 /* Some odd clown BIOSen issue spindown on power off (ACPI S4 1270 * or S5) causing some drives to spin up and down again. 1271 */ 1272 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && 1273 system_state == SYSTEM_POWER_OFF) 1274 goto skip; 1275 1276 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && 1277 system_entering_hibernation()) 1278 goto skip; 1279 1280 /* Issue ATA STANDBY IMMEDIATE command */ 1281 tf->command = ATA_CMD_STANDBYNOW1; 1282 } 1283 1284 /* 1285 * Standby and Idle condition timers could be implemented but that 1286 * would require libata to implement the Power condition mode page 1287 * and allow the user to change it. Changing mode pages requires 1288 * MODE SELECT to be implemented. 1289 */ 1290 1291 return 0; 1292 1293 invalid_fld: 1294 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 1295 return 1; 1296 skip: 1297 scmd->result = SAM_STAT_GOOD; 1298 return 1; 1299 } 1300 1301 1302 /** 1303 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 1304 * @qc: Storage for translated ATA taskfile 1305 * 1306 * Sets up an ATA taskfile to issue FLUSH CACHE or 1307 * FLUSH CACHE EXT. 1308 * 1309 * LOCKING: 1310 * spin_lock_irqsave(host lock) 1311 * 1312 * RETURNS: 1313 * Zero on success, non-zero on error. 1314 */ 1315 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) 1316 { 1317 struct ata_taskfile *tf = &qc->tf; 1318 1319 tf->flags |= ATA_TFLAG_DEVICE; 1320 tf->protocol = ATA_PROT_NODATA; 1321 1322 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) 1323 tf->command = ATA_CMD_FLUSH_EXT; 1324 else 1325 tf->command = ATA_CMD_FLUSH; 1326 1327 /* flush is critical for IO integrity, consider it an IO command */ 1328 qc->flags |= ATA_QCFLAG_IO; 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * scsi_6_lba_len - Get LBA and transfer length 1335 * @cdb: SCSI command to translate 1336 * 1337 * Calculate LBA and transfer length for 6-byte commands. 1338 * 1339 * RETURNS: 1340 * @plba: the LBA 1341 * @plen: the transfer length 1342 */ 1343 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1344 { 1345 u64 lba = 0; 1346 u32 len; 1347 1348 lba |= ((u64)(cdb[1] & 0x1f)) << 16; 1349 lba |= ((u64)cdb[2]) << 8; 1350 lba |= ((u64)cdb[3]); 1351 1352 len = cdb[4]; 1353 1354 *plba = lba; 1355 *plen = len; 1356 } 1357 1358 /** 1359 * scsi_10_lba_len - Get LBA and transfer length 1360 * @cdb: SCSI command to translate 1361 * 1362 * Calculate LBA and transfer length for 10-byte commands. 1363 * 1364 * RETURNS: 1365 * @plba: the LBA 1366 * @plen: the transfer length 1367 */ 1368 static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1369 { 1370 *plba = get_unaligned_be32(&cdb[2]); 1371 *plen = get_unaligned_be16(&cdb[7]); 1372 } 1373 1374 /** 1375 * scsi_16_lba_len - Get LBA and transfer length 1376 * @cdb: SCSI command to translate 1377 * 1378 * Calculate LBA and transfer length for 16-byte commands. 1379 * 1380 * RETURNS: 1381 * @plba: the LBA 1382 * @plen: the transfer length 1383 */ 1384 static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1385 { 1386 *plba = get_unaligned_be64(&cdb[2]); 1387 *plen = get_unaligned_be32(&cdb[10]); 1388 } 1389 1390 /** 1391 * scsi_dld - Get duration limit descriptor index 1392 * @cdb: SCSI command to translate 1393 * 1394 * Returns the dld bits indicating the index of a command duration limit 1395 * descriptor. 1396 */ 1397 static inline int scsi_dld(const u8 *cdb) 1398 { 1399 return ((cdb[1] & 0x01) << 2) | ((cdb[14] >> 6) & 0x03); 1400 } 1401 1402 /** 1403 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 1404 * @qc: Storage for translated ATA taskfile 1405 * 1406 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1407 * 1408 * LOCKING: 1409 * spin_lock_irqsave(host lock) 1410 * 1411 * RETURNS: 1412 * Zero on success, non-zero on error. 1413 */ 1414 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) 1415 { 1416 struct scsi_cmnd *scmd = qc->scsicmd; 1417 struct ata_taskfile *tf = &qc->tf; 1418 struct ata_device *dev = qc->dev; 1419 u64 dev_sectors = qc->dev->n_sectors; 1420 const u8 *cdb = scmd->cmnd; 1421 u64 block; 1422 u32 n_block; 1423 u16 fp; 1424 1425 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1426 tf->protocol = ATA_PROT_NODATA; 1427 1428 switch (cdb[0]) { 1429 case VERIFY: 1430 if (scmd->cmd_len < 10) { 1431 fp = 9; 1432 goto invalid_fld; 1433 } 1434 scsi_10_lba_len(cdb, &block, &n_block); 1435 break; 1436 case VERIFY_16: 1437 if (scmd->cmd_len < 16) { 1438 fp = 15; 1439 goto invalid_fld; 1440 } 1441 scsi_16_lba_len(cdb, &block, &n_block); 1442 break; 1443 default: 1444 fp = 0; 1445 goto invalid_fld; 1446 } 1447 1448 if (!n_block) 1449 goto nothing_to_do; 1450 if (block >= dev_sectors) 1451 goto out_of_range; 1452 if ((block + n_block) > dev_sectors) 1453 goto out_of_range; 1454 1455 if (dev->flags & ATA_DFLAG_LBA) { 1456 tf->flags |= ATA_TFLAG_LBA; 1457 1458 if (lba_28_ok(block, n_block)) { 1459 /* use LBA28 */ 1460 tf->command = ATA_CMD_VERIFY; 1461 tf->device |= (block >> 24) & 0xf; 1462 } else if (lba_48_ok(block, n_block)) { 1463 if (!(dev->flags & ATA_DFLAG_LBA48)) 1464 goto out_of_range; 1465 1466 /* use LBA48 */ 1467 tf->flags |= ATA_TFLAG_LBA48; 1468 tf->command = ATA_CMD_VERIFY_EXT; 1469 1470 tf->hob_nsect = (n_block >> 8) & 0xff; 1471 1472 tf->hob_lbah = (block >> 40) & 0xff; 1473 tf->hob_lbam = (block >> 32) & 0xff; 1474 tf->hob_lbal = (block >> 24) & 0xff; 1475 } else 1476 /* request too large even for LBA48 */ 1477 goto out_of_range; 1478 1479 tf->nsect = n_block & 0xff; 1480 1481 tf->lbah = (block >> 16) & 0xff; 1482 tf->lbam = (block >> 8) & 0xff; 1483 tf->lbal = block & 0xff; 1484 1485 tf->device |= ATA_LBA; 1486 } else { 1487 /* CHS */ 1488 u32 sect, head, cyl, track; 1489 1490 if (!lba_28_ok(block, n_block)) 1491 goto out_of_range; 1492 1493 /* Convert LBA to CHS */ 1494 track = (u32)block / dev->sectors; 1495 cyl = track / dev->heads; 1496 head = track % dev->heads; 1497 sect = (u32)block % dev->sectors + 1; 1498 1499 /* Check whether the converted CHS can fit. 1500 Cylinder: 0-65535 1501 Head: 0-15 1502 Sector: 1-255*/ 1503 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 1504 goto out_of_range; 1505 1506 tf->command = ATA_CMD_VERIFY; 1507 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 1508 tf->lbal = sect; 1509 tf->lbam = cyl; 1510 tf->lbah = cyl >> 8; 1511 tf->device |= head; 1512 } 1513 1514 return 0; 1515 1516 invalid_fld: 1517 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 1518 return 1; 1519 1520 out_of_range: 1521 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1522 /* "Logical Block Address out of range" */ 1523 return 1; 1524 1525 nothing_to_do: 1526 scmd->result = SAM_STAT_GOOD; 1527 return 1; 1528 } 1529 1530 static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) 1531 { 1532 struct request *rq = scsi_cmd_to_rq(scmd); 1533 u32 req_blocks; 1534 1535 if (!blk_rq_is_passthrough(rq)) 1536 return true; 1537 1538 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; 1539 if (n_blocks > req_blocks) 1540 return false; 1541 1542 return true; 1543 } 1544 1545 /** 1546 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1547 * @qc: Storage for translated ATA taskfile 1548 * 1549 * Converts any of six SCSI read/write commands into the 1550 * ATA counterpart, including starting sector (LBA), 1551 * sector count, and taking into account the device's LBA48 1552 * support. 1553 * 1554 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and 1555 * %WRITE_16 are currently supported. 1556 * 1557 * LOCKING: 1558 * spin_lock_irqsave(host lock) 1559 * 1560 * RETURNS: 1561 * Zero on success, non-zero on error. 1562 */ 1563 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) 1564 { 1565 struct scsi_cmnd *scmd = qc->scsicmd; 1566 const u8 *cdb = scmd->cmnd; 1567 struct request *rq = scsi_cmd_to_rq(scmd); 1568 int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 1569 unsigned int tf_flags = 0; 1570 int dld = 0; 1571 u64 block; 1572 u32 n_block; 1573 int rc; 1574 u16 fp = 0; 1575 1576 switch (cdb[0]) { 1577 case WRITE_6: 1578 case WRITE_10: 1579 case WRITE_16: 1580 tf_flags |= ATA_TFLAG_WRITE; 1581 break; 1582 } 1583 1584 /* Calculate the SCSI LBA, transfer length and FUA. */ 1585 switch (cdb[0]) { 1586 case READ_10: 1587 case WRITE_10: 1588 if (unlikely(scmd->cmd_len < 10)) { 1589 fp = 9; 1590 goto invalid_fld; 1591 } 1592 scsi_10_lba_len(cdb, &block, &n_block); 1593 if (cdb[1] & (1 << 3)) 1594 tf_flags |= ATA_TFLAG_FUA; 1595 if (!ata_check_nblocks(scmd, n_block)) 1596 goto invalid_fld; 1597 break; 1598 case READ_6: 1599 case WRITE_6: 1600 if (unlikely(scmd->cmd_len < 6)) { 1601 fp = 5; 1602 goto invalid_fld; 1603 } 1604 scsi_6_lba_len(cdb, &block, &n_block); 1605 1606 /* for 6-byte r/w commands, transfer length 0 1607 * means 256 blocks of data, not 0 block. 1608 */ 1609 if (!n_block) 1610 n_block = 256; 1611 if (!ata_check_nblocks(scmd, n_block)) 1612 goto invalid_fld; 1613 break; 1614 case READ_16: 1615 case WRITE_16: 1616 if (unlikely(scmd->cmd_len < 16)) { 1617 fp = 15; 1618 goto invalid_fld; 1619 } 1620 scsi_16_lba_len(cdb, &block, &n_block); 1621 dld = scsi_dld(cdb); 1622 if (cdb[1] & (1 << 3)) 1623 tf_flags |= ATA_TFLAG_FUA; 1624 if (!ata_check_nblocks(scmd, n_block)) 1625 goto invalid_fld; 1626 break; 1627 default: 1628 fp = 0; 1629 goto invalid_fld; 1630 } 1631 1632 /* Check and compose ATA command */ 1633 if (!n_block) 1634 /* For 10-byte and 16-byte SCSI R/W commands, transfer 1635 * length 0 means transfer 0 block of data. 1636 * However, for ATA R/W commands, sector count 0 means 1637 * 256 or 65536 sectors, not 0 sectors as in SCSI. 1638 * 1639 * WARNING: one or two older ATA drives treat 0 as 0... 1640 */ 1641 goto nothing_to_do; 1642 1643 qc->flags |= ATA_QCFLAG_IO; 1644 qc->nbytes = n_block * scmd->device->sector_size; 1645 1646 rc = ata_build_rw_tf(qc, block, n_block, tf_flags, dld, class); 1647 if (likely(rc == 0)) 1648 return 0; 1649 1650 if (rc == -ERANGE) 1651 goto out_of_range; 1652 /* treat all other errors as -EINVAL, fall through */ 1653 invalid_fld: 1654 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 1655 return 1; 1656 1657 out_of_range: 1658 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1659 /* "Logical Block Address out of range" */ 1660 return 1; 1661 1662 nothing_to_do: 1663 scmd->result = SAM_STAT_GOOD; 1664 return 1; 1665 } 1666 1667 static void ata_qc_done(struct ata_queued_cmd *qc) 1668 { 1669 struct scsi_cmnd *cmd = qc->scsicmd; 1670 void (*done)(struct scsi_cmnd *) = qc->scsidone; 1671 1672 ata_qc_free(qc); 1673 done(cmd); 1674 } 1675 1676 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1677 { 1678 struct ata_port *ap = qc->ap; 1679 struct scsi_cmnd *cmd = qc->scsicmd; 1680 u8 *cdb = cmd->cmnd; 1681 int need_sense = (qc->err_mask != 0) && 1682 !(qc->flags & ATA_QCFLAG_SENSE_VALID); 1683 1684 /* For ATA pass thru (SAT) commands, generate a sense block if 1685 * user mandated it or if there's an error. Note that if we 1686 * generate because the user forced us to [CK_COND =1], a check 1687 * condition is generated and the ATA register values are returned 1688 * whether the command completed successfully or not. If there 1689 * was no error, we use the following sense data: 1690 * sk = RECOVERED ERROR 1691 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE 1692 */ 1693 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1694 ((cdb[2] & 0x20) || need_sense)) 1695 ata_gen_passthru_sense(qc); 1696 else if (need_sense) 1697 ata_gen_ata_sense(qc); 1698 else 1699 /* Keep the SCSI ML and status byte, clear host byte. */ 1700 cmd->result &= 0x0000ffff; 1701 1702 if (need_sense && !ap->ops->error_handler) 1703 ata_dump_status(ap, &qc->result_tf); 1704 1705 ata_qc_done(qc); 1706 } 1707 1708 /** 1709 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1710 * @dev: ATA device to which the command is addressed 1711 * @cmd: SCSI command to execute 1712 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1713 * 1714 * Our ->queuecommand() function has decided that the SCSI 1715 * command issued can be directly translated into an ATA 1716 * command, rather than handled internally. 1717 * 1718 * This function sets up an ata_queued_cmd structure for the 1719 * SCSI command, and sends that ata_queued_cmd to the hardware. 1720 * 1721 * The xlat_func argument (actor) returns 0 if ready to execute 1722 * ATA command, else 1 to finish translation. If 1 is returned 1723 * then cmd->result (and possibly cmd->sense_buffer) are assumed 1724 * to be set reflecting an error condition or clean (early) 1725 * termination. 1726 * 1727 * LOCKING: 1728 * spin_lock_irqsave(host lock) 1729 * 1730 * RETURNS: 1731 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1732 * needs to be deferred. 1733 */ 1734 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1735 ata_xlat_func_t xlat_func) 1736 { 1737 struct ata_port *ap = dev->link->ap; 1738 struct ata_queued_cmd *qc; 1739 int rc; 1740 1741 qc = ata_scsi_qc_new(dev, cmd); 1742 if (!qc) 1743 goto err_mem; 1744 1745 /* data is present; dma-map it */ 1746 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1747 cmd->sc_data_direction == DMA_TO_DEVICE) { 1748 if (unlikely(scsi_bufflen(cmd) < 1)) { 1749 ata_dev_warn(dev, "WARNING: zero len r/w req\n"); 1750 goto err_did; 1751 } 1752 1753 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); 1754 1755 qc->dma_dir = cmd->sc_data_direction; 1756 } 1757 1758 qc->complete_fn = ata_scsi_qc_complete; 1759 1760 if (xlat_func(qc)) 1761 goto early_finish; 1762 1763 if (ap->ops->qc_defer) { 1764 if ((rc = ap->ops->qc_defer(qc))) 1765 goto defer; 1766 } 1767 1768 /* select device, send command to hardware */ 1769 ata_qc_issue(qc); 1770 1771 return 0; 1772 1773 early_finish: 1774 ata_qc_free(qc); 1775 scsi_done(cmd); 1776 return 0; 1777 1778 err_did: 1779 ata_qc_free(qc); 1780 cmd->result = (DID_ERROR << 16); 1781 scsi_done(cmd); 1782 err_mem: 1783 return 0; 1784 1785 defer: 1786 ata_qc_free(qc); 1787 if (rc == ATA_DEFER_LINK) 1788 return SCSI_MLQUEUE_DEVICE_BUSY; 1789 else 1790 return SCSI_MLQUEUE_HOST_BUSY; 1791 } 1792 1793 struct ata_scsi_args { 1794 struct ata_device *dev; 1795 u16 *id; 1796 struct scsi_cmnd *cmd; 1797 }; 1798 1799 /** 1800 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators 1801 * @args: device IDENTIFY data / SCSI command of interest. 1802 * @actor: Callback hook for desired SCSI command simulator 1803 * 1804 * Takes care of the hard work of simulating a SCSI command... 1805 * Mapping the response buffer, calling the command's handler, 1806 * and handling the handler's return value. This return value 1807 * indicates whether the handler wishes the SCSI command to be 1808 * completed successfully (0), or not (in which case cmd->result 1809 * and sense buffer are assumed to be set). 1810 * 1811 * LOCKING: 1812 * spin_lock_irqsave(host lock) 1813 */ 1814 static void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1815 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf)) 1816 { 1817 unsigned int rc; 1818 struct scsi_cmnd *cmd = args->cmd; 1819 unsigned long flags; 1820 1821 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); 1822 1823 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE); 1824 rc = actor(args, ata_scsi_rbuf); 1825 if (rc == 0) 1826 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), 1827 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); 1828 1829 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); 1830 1831 if (rc == 0) 1832 cmd->result = SAM_STAT_GOOD; 1833 } 1834 1835 /** 1836 * ata_scsiop_inq_std - Simulate INQUIRY command 1837 * @args: device IDENTIFY data / SCSI command of interest. 1838 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1839 * 1840 * Returns standard device identification data associated 1841 * with non-VPD INQUIRY command output. 1842 * 1843 * LOCKING: 1844 * spin_lock_irqsave(host lock) 1845 */ 1846 static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 1847 { 1848 static const u8 versions[] = { 1849 0x00, 1850 0x60, /* SAM-3 (no version claimed) */ 1851 1852 0x03, 1853 0x20, /* SBC-2 (no version claimed) */ 1854 1855 0x03, 1856 0x00 /* SPC-3 (no version claimed) */ 1857 }; 1858 static const u8 versions_zbc[] = { 1859 0x00, 1860 0xA0, /* SAM-5 (no version claimed) */ 1861 1862 0x06, 1863 0x00, /* SBC-4 (no version claimed) */ 1864 1865 0x05, 1866 0xC0, /* SPC-5 (no version claimed) */ 1867 1868 0x60, 1869 0x24, /* ZBC r05 */ 1870 }; 1871 1872 u8 hdr[] = { 1873 TYPE_DISK, 1874 0, 1875 0x5, /* claim SPC-3 version compatibility */ 1876 2, 1877 95 - 4, 1878 0, 1879 0, 1880 2 1881 }; 1882 1883 /* set scsi removable (RMB) bit per ata bit, or if the 1884 * AHCI port says it's external (Hotplug-capable, eSATA). 1885 */ 1886 if (ata_id_removable(args->id) || 1887 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) 1888 hdr[1] |= (1 << 7); 1889 1890 if (args->dev->class == ATA_DEV_ZAC) { 1891 hdr[0] = TYPE_ZBC; 1892 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ 1893 } 1894 1895 memcpy(rbuf, hdr, sizeof(hdr)); 1896 memcpy(&rbuf[8], "ATA ", 8); 1897 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1898 1899 /* From SAT, use last 2 words from fw rev unless they are spaces */ 1900 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); 1901 if (strncmp(&rbuf[32], " ", 4) == 0) 1902 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1903 1904 if (rbuf[32] == 0 || rbuf[32] == ' ') 1905 memcpy(&rbuf[32], "n/a ", 4); 1906 1907 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) 1908 memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc)); 1909 else 1910 memcpy(rbuf + 58, versions, sizeof(versions)); 1911 1912 return 0; 1913 } 1914 1915 /** 1916 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1917 * @args: device IDENTIFY data / SCSI command of interest. 1918 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1919 * 1920 * Returns list of inquiry VPD pages available. 1921 * 1922 * LOCKING: 1923 * spin_lock_irqsave(host lock) 1924 */ 1925 static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) 1926 { 1927 int i, num_pages = 0; 1928 static const u8 pages[] = { 1929 0x00, /* page 0x00, this page */ 1930 0x80, /* page 0x80, unit serial no page */ 1931 0x83, /* page 0x83, device ident page */ 1932 0x89, /* page 0x89, ata info page */ 1933 0xb0, /* page 0xb0, block limits page */ 1934 0xb1, /* page 0xb1, block device characteristics page */ 1935 0xb2, /* page 0xb2, thin provisioning page */ 1936 0xb6, /* page 0xb6, zoned block device characteristics */ 1937 0xb9, /* page 0xb9, concurrent positioning ranges */ 1938 }; 1939 1940 for (i = 0; i < sizeof(pages); i++) { 1941 if (pages[i] == 0xb6 && 1942 !(args->dev->flags & ATA_DFLAG_ZAC)) 1943 continue; 1944 rbuf[num_pages + 4] = pages[i]; 1945 num_pages++; 1946 } 1947 rbuf[3] = num_pages; /* number of supported VPD pages */ 1948 return 0; 1949 } 1950 1951 /** 1952 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1953 * @args: device IDENTIFY data / SCSI command of interest. 1954 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1955 * 1956 * Returns ATA device serial number. 1957 * 1958 * LOCKING: 1959 * spin_lock_irqsave(host lock) 1960 */ 1961 static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf) 1962 { 1963 static const u8 hdr[] = { 1964 0, 1965 0x80, /* this page code */ 1966 0, 1967 ATA_ID_SERNO_LEN, /* page len */ 1968 }; 1969 1970 memcpy(rbuf, hdr, sizeof(hdr)); 1971 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1972 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1973 return 0; 1974 } 1975 1976 /** 1977 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1978 * @args: device IDENTIFY data / SCSI command of interest. 1979 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1980 * 1981 * Yields two logical unit device identification designators: 1982 * - vendor specific ASCII containing the ATA serial number 1983 * - SAT defined "t10 vendor id based" containing ASCII vendor 1984 * name ("ATA "), model and serial numbers. 1985 * 1986 * LOCKING: 1987 * spin_lock_irqsave(host lock) 1988 */ 1989 static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf) 1990 { 1991 const int sat_model_serial_desc_len = 68; 1992 int num; 1993 1994 rbuf[1] = 0x83; /* this page code */ 1995 num = 4; 1996 1997 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1998 rbuf[num + 0] = 2; 1999 rbuf[num + 3] = ATA_ID_SERNO_LEN; 2000 num += 4; 2001 ata_id_string(args->id, (unsigned char *) rbuf + num, 2002 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 2003 num += ATA_ID_SERNO_LEN; 2004 2005 /* SAT defined lu model and serial numbers descriptor */ 2006 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 2007 rbuf[num + 0] = 2; 2008 rbuf[num + 1] = 1; 2009 rbuf[num + 3] = sat_model_serial_desc_len; 2010 num += 4; 2011 memcpy(rbuf + num, "ATA ", 8); 2012 num += 8; 2013 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, 2014 ATA_ID_PROD_LEN); 2015 num += ATA_ID_PROD_LEN; 2016 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, 2017 ATA_ID_SERNO_LEN); 2018 num += ATA_ID_SERNO_LEN; 2019 2020 if (ata_id_has_wwn(args->id)) { 2021 /* SAT defined lu world wide name */ 2022 /* piv=0, assoc=lu, code_set=binary, designator=NAA */ 2023 rbuf[num + 0] = 1; 2024 rbuf[num + 1] = 3; 2025 rbuf[num + 3] = ATA_ID_WWN_LEN; 2026 num += 4; 2027 ata_id_string(args->id, (unsigned char *) rbuf + num, 2028 ATA_ID_WWN, ATA_ID_WWN_LEN); 2029 num += ATA_ID_WWN_LEN; 2030 } 2031 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 2032 return 0; 2033 } 2034 2035 /** 2036 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 2037 * @args: device IDENTIFY data / SCSI command of interest. 2038 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2039 * 2040 * Yields SAT-specified ATA VPD page. 2041 * 2042 * LOCKING: 2043 * spin_lock_irqsave(host lock) 2044 */ 2045 static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) 2046 { 2047 rbuf[1] = 0x89; /* our page code */ 2048 rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 2049 rbuf[3] = (0x238 & 0xff); 2050 2051 memcpy(&rbuf[8], "linux ", 8); 2052 memcpy(&rbuf[16], "libata ", 16); 2053 memcpy(&rbuf[32], DRV_VERSION, 4); 2054 2055 rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 2056 rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */ 2057 /* TODO: PMP? */ 2058 2059 /* we don't store the ATA device signature, so we fake it */ 2060 rbuf[38] = ATA_DRDY; /* really, this is Status reg */ 2061 rbuf[40] = 0x1; 2062 rbuf[48] = 0x1; 2063 2064 rbuf[56] = ATA_CMD_ID_ATA; 2065 2066 memcpy(&rbuf[60], &args->id[0], 512); 2067 return 0; 2068 } 2069 2070 static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) 2071 { 2072 struct ata_device *dev = args->dev; 2073 u16 min_io_sectors; 2074 2075 rbuf[1] = 0xb0; 2076 rbuf[3] = 0x3c; /* required VPD size with unmap support */ 2077 2078 /* 2079 * Optimal transfer length granularity. 2080 * 2081 * This is always one physical block, but for disks with a smaller 2082 * logical than physical sector size we need to figure out what the 2083 * latter is. 2084 */ 2085 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); 2086 put_unaligned_be16(min_io_sectors, &rbuf[6]); 2087 2088 /* 2089 * Optimal unmap granularity. 2090 * 2091 * The ATA spec doesn't even know about a granularity or alignment 2092 * for the TRIM command. We can leave away most of the unmap related 2093 * VPD page entries, but we have specifify a granularity to signal 2094 * that we support some form of unmap - in thise case via WRITE SAME 2095 * with the unmap bit set. 2096 */ 2097 if (ata_id_has_trim(args->id)) { 2098 u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; 2099 2100 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) 2101 max_blocks = 128 << (20 - SECTOR_SHIFT); 2102 2103 put_unaligned_be64(max_blocks, &rbuf[36]); 2104 put_unaligned_be32(1, &rbuf[28]); 2105 } 2106 2107 return 0; 2108 } 2109 2110 static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) 2111 { 2112 int form_factor = ata_id_form_factor(args->id); 2113 int media_rotation_rate = ata_id_rotation_rate(args->id); 2114 u8 zoned = ata_id_zoned_cap(args->id); 2115 2116 rbuf[1] = 0xb1; 2117 rbuf[3] = 0x3c; 2118 rbuf[4] = media_rotation_rate >> 8; 2119 rbuf[5] = media_rotation_rate; 2120 rbuf[7] = form_factor; 2121 if (zoned) 2122 rbuf[8] = (zoned << 4); 2123 2124 return 0; 2125 } 2126 2127 static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) 2128 { 2129 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ 2130 rbuf[1] = 0xb2; 2131 rbuf[3] = 0x4; 2132 rbuf[5] = 1 << 6; /* TPWS */ 2133 2134 return 0; 2135 } 2136 2137 static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf) 2138 { 2139 /* 2140 * zbc-r05 SCSI Zoned Block device characteristics VPD page 2141 */ 2142 rbuf[1] = 0xb6; 2143 rbuf[3] = 0x3C; 2144 2145 /* 2146 * URSWRZ bit is only meaningful for host-managed ZAC drives 2147 */ 2148 if (args->dev->zac_zoned_cap & 1) 2149 rbuf[4] |= 1; 2150 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); 2151 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); 2152 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); 2153 2154 return 0; 2155 } 2156 2157 static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) 2158 { 2159 struct ata_cpr_log *cpr_log = args->dev->cpr_log; 2160 u8 *desc = &rbuf[64]; 2161 int i; 2162 2163 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ 2164 rbuf[1] = 0xb9; 2165 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); 2166 2167 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { 2168 desc[0] = cpr_log->cpr[i].num; 2169 desc[1] = cpr_log->cpr[i].num_storage_elements; 2170 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); 2171 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); 2172 } 2173 2174 return 0; 2175 } 2176 2177 /** 2178 * modecpy - Prepare response for MODE SENSE 2179 * @dest: output buffer 2180 * @src: data being copied 2181 * @n: length of mode page 2182 * @changeable: whether changeable parameters are requested 2183 * 2184 * Generate a generic MODE SENSE page for either current or changeable 2185 * parameters. 2186 * 2187 * LOCKING: 2188 * None. 2189 */ 2190 static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) 2191 { 2192 if (changeable) { 2193 memcpy(dest, src, 2); 2194 memset(dest + 2, 0, n - 2); 2195 } else { 2196 memcpy(dest, src, n); 2197 } 2198 } 2199 2200 /** 2201 * ata_msense_caching - Simulate MODE SENSE caching info page 2202 * @id: device IDENTIFY data 2203 * @buf: output buffer 2204 * @changeable: whether changeable parameters are requested 2205 * 2206 * Generate a caching info page, which conditionally indicates 2207 * write caching to the SCSI layer, depending on device 2208 * capabilities. 2209 * 2210 * LOCKING: 2211 * None. 2212 */ 2213 static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) 2214 { 2215 modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable); 2216 if (changeable) { 2217 buf[2] |= (1 << 2); /* ata_mselect_caching() */ 2218 } else { 2219 buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */ 2220 buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */ 2221 } 2222 return sizeof(def_cache_mpage); 2223 } 2224 2225 /* 2226 * Simulate MODE SENSE control mode page, sub-page 0. 2227 */ 2228 static unsigned int ata_msense_control_spg0(struct ata_device *dev, u8 *buf, 2229 bool changeable) 2230 { 2231 modecpy(buf, def_control_mpage, 2232 sizeof(def_control_mpage), changeable); 2233 if (changeable) { 2234 /* ata_mselect_control() */ 2235 buf[2] |= (1 << 2); 2236 } else { 2237 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); 2238 2239 /* descriptor format sense data */ 2240 buf[2] |= (d_sense << 2); 2241 } 2242 2243 return sizeof(def_control_mpage); 2244 } 2245 2246 /* 2247 * Translate an ATA duration limit in microseconds to a SCSI duration limit 2248 * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes 2249 * only, take care of overflows. 2250 */ 2251 static inline u16 ata_xlat_cdl_limit(u8 *buf) 2252 { 2253 u32 limit = get_unaligned_le32(buf); 2254 2255 return min_t(u32, limit / 10000, 65535); 2256 } 2257 2258 /* 2259 * Simulate MODE SENSE control mode page, sub-pages 07h and 08h 2260 * (command duration limits T2A and T2B mode pages). 2261 */ 2262 static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf, 2263 u8 spg) 2264 { 2265 u8 *b, *cdl = dev->cdl, *desc; 2266 u32 policy; 2267 int i; 2268 2269 /* 2270 * Fill the subpage. The first four bytes of the T2A/T2B mode pages 2271 * are a header. The PAGE LENGTH field is the size of the page 2272 * excluding the header. 2273 */ 2274 buf[0] = CONTROL_MPAGE; 2275 buf[1] = spg; 2276 put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); 2277 if (spg == CDL_T2A_SUB_MPAGE) { 2278 /* 2279 * Read descriptors map to the T2A page: 2280 * set perf_vs_duration_guidleine. 2281 */ 2282 buf[7] = (cdl[0] & 0x03) << 4; 2283 desc = cdl + 64; 2284 } else { 2285 /* Write descriptors map to the T2B page */ 2286 desc = cdl + 288; 2287 } 2288 2289 /* Fill the T2 page descriptors */ 2290 b = &buf[8]; 2291 policy = get_unaligned_le32(&cdl[0]); 2292 for (i = 0; i < 7; i++, b += 32, desc += 32) { 2293 /* t2cdlunits: fixed to 10ms */ 2294 b[0] = 0x0a; 2295 2296 /* Max inactive time and its policy */ 2297 put_unaligned_be16(ata_xlat_cdl_limit(&desc[8]), &b[2]); 2298 b[6] = ((policy >> 8) & 0x0f) << 4; 2299 2300 /* Max active time and its policy */ 2301 put_unaligned_be16(ata_xlat_cdl_limit(&desc[4]), &b[4]); 2302 b[6] |= (policy >> 4) & 0x0f; 2303 2304 /* Command duration guideline and its policy */ 2305 put_unaligned_be16(ata_xlat_cdl_limit(&desc[16]), &b[10]); 2306 b[14] = policy & 0x0f; 2307 } 2308 2309 return CDL_T2_SUB_MPAGE_LEN; 2310 } 2311 2312 /* 2313 * Simulate MODE SENSE control mode page, sub-page f2h 2314 * (ATA feature control mode page). 2315 */ 2316 static unsigned int ata_msense_control_ata_feature(struct ata_device *dev, 2317 u8 *buf) 2318 { 2319 /* PS=0, SPF=1 */ 2320 buf[0] = CONTROL_MPAGE | (1 << 6); 2321 buf[1] = ATA_FEATURE_SUB_MPAGE; 2322 2323 /* 2324 * The first four bytes of ATA Feature Control mode page are a header. 2325 * The PAGE LENGTH field is the size of the page excluding the header. 2326 */ 2327 put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); 2328 2329 if (dev->flags & ATA_DFLAG_CDL) 2330 buf[4] = 0x02; /* Support T2A and T2B pages */ 2331 else 2332 buf[4] = 0; 2333 2334 return ATA_FEATURE_SUB_MPAGE_LEN; 2335 } 2336 2337 /** 2338 * ata_msense_control - Simulate MODE SENSE control mode page 2339 * @dev: ATA device of interest 2340 * @buf: output buffer 2341 * @spg: sub-page code 2342 * @changeable: whether changeable parameters are requested 2343 * 2344 * Generate a generic MODE SENSE control mode page. 2345 * 2346 * LOCKING: 2347 * None. 2348 */ 2349 static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, 2350 u8 spg, bool changeable) 2351 { 2352 unsigned int n; 2353 2354 switch (spg) { 2355 case 0: 2356 return ata_msense_control_spg0(dev, buf, changeable); 2357 case CDL_T2A_SUB_MPAGE: 2358 case CDL_T2B_SUB_MPAGE: 2359 return ata_msense_control_spgt2(dev, buf, spg); 2360 case ATA_FEATURE_SUB_MPAGE: 2361 return ata_msense_control_ata_feature(dev, buf); 2362 case ALL_SUB_MPAGES: 2363 n = ata_msense_control_spg0(dev, buf, changeable); 2364 n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); 2365 n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); 2366 n += ata_msense_control_ata_feature(dev, buf + n); 2367 return n; 2368 default: 2369 return 0; 2370 } 2371 } 2372 2373 /** 2374 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 2375 * @buf: output buffer 2376 * @changeable: whether changeable parameters are requested 2377 * 2378 * Generate a generic MODE SENSE r/w error recovery page. 2379 * 2380 * LOCKING: 2381 * None. 2382 */ 2383 static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable) 2384 { 2385 modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage), 2386 changeable); 2387 return sizeof(def_rw_recovery_mpage); 2388 } 2389 2390 /** 2391 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 2392 * @args: device IDENTIFY data / SCSI command of interest. 2393 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2394 * 2395 * Simulate MODE SENSE commands. Assume this is invoked for direct 2396 * access devices (e.g. disks) only. There should be no block 2397 * descriptor for other device types. 2398 * 2399 * LOCKING: 2400 * spin_lock_irqsave(host lock) 2401 */ 2402 static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) 2403 { 2404 struct ata_device *dev = args->dev; 2405 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; 2406 static const u8 sat_blk_desc[] = { 2407 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2408 0, 2409 0, 0x2, 0x0 /* block length: 512 bytes */ 2410 }; 2411 u8 pg, spg; 2412 unsigned int ebd, page_control, six_byte; 2413 u8 dpofua = 0, bp = 0xff; 2414 u16 fp; 2415 2416 six_byte = (scsicmd[0] == MODE_SENSE); 2417 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ 2418 /* 2419 * LLBA bit in msense(10) ignored (compliant) 2420 */ 2421 2422 page_control = scsicmd[2] >> 6; 2423 switch (page_control) { 2424 case 0: /* current */ 2425 case 1: /* changeable */ 2426 case 2: /* defaults */ 2427 break; /* supported */ 2428 case 3: /* saved */ 2429 goto saving_not_supp; 2430 default: 2431 fp = 2; 2432 bp = 6; 2433 goto invalid_fld; 2434 } 2435 2436 if (six_byte) 2437 p += 4 + (ebd ? 8 : 0); 2438 else 2439 p += 8 + (ebd ? 8 : 0); 2440 2441 pg = scsicmd[2] & 0x3f; 2442 spg = scsicmd[3]; 2443 2444 /* 2445 * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of 2446 * the control page. 2447 */ 2448 if (spg) { 2449 switch (spg) { 2450 case ALL_SUB_MPAGES: 2451 break; 2452 case CDL_T2A_SUB_MPAGE: 2453 case CDL_T2B_SUB_MPAGE: 2454 case ATA_FEATURE_SUB_MPAGE: 2455 if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) 2456 break; 2457 fallthrough; 2458 default: 2459 fp = 3; 2460 goto invalid_fld; 2461 } 2462 } 2463 2464 switch(pg) { 2465 case RW_RECOVERY_MPAGE: 2466 p += ata_msense_rw_recovery(p, page_control == 1); 2467 break; 2468 2469 case CACHE_MPAGE: 2470 p += ata_msense_caching(args->id, p, page_control == 1); 2471 break; 2472 2473 case CONTROL_MPAGE: 2474 p += ata_msense_control(args->dev, p, spg, page_control == 1); 2475 break; 2476 2477 case ALL_MPAGES: 2478 p += ata_msense_rw_recovery(p, page_control == 1); 2479 p += ata_msense_caching(args->id, p, page_control == 1); 2480 p += ata_msense_control(args->dev, p, spg, page_control == 1); 2481 break; 2482 2483 default: /* invalid page code */ 2484 fp = 2; 2485 goto invalid_fld; 2486 } 2487 2488 if (dev->flags & ATA_DFLAG_FUA) 2489 dpofua = 1 << 4; 2490 2491 if (six_byte) { 2492 rbuf[0] = p - rbuf - 1; 2493 rbuf[2] |= dpofua; 2494 if (ebd) { 2495 rbuf[3] = sizeof(sat_blk_desc); 2496 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc)); 2497 } 2498 } else { 2499 put_unaligned_be16(p - rbuf - 2, &rbuf[0]); 2500 rbuf[3] |= dpofua; 2501 if (ebd) { 2502 rbuf[7] = sizeof(sat_blk_desc); 2503 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc)); 2504 } 2505 } 2506 return 0; 2507 2508 invalid_fld: 2509 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); 2510 return 1; 2511 2512 saving_not_supp: 2513 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); 2514 /* "Saving parameters not supported" */ 2515 return 1; 2516 } 2517 2518 /** 2519 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2520 * @args: device IDENTIFY data / SCSI command of interest. 2521 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2522 * 2523 * Simulate READ CAPACITY commands. 2524 * 2525 * LOCKING: 2526 * None. 2527 */ 2528 static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) 2529 { 2530 struct ata_device *dev = args->dev; 2531 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ 2532 u32 sector_size; /* physical sector size in bytes */ 2533 u8 log2_per_phys; 2534 u16 lowest_aligned; 2535 2536 sector_size = ata_id_logical_sector_size(dev->id); 2537 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); 2538 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); 2539 2540 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2541 if (last_lba >= 0xffffffffULL) 2542 last_lba = 0xffffffff; 2543 2544 /* sector count, 32-bit */ 2545 rbuf[0] = last_lba >> (8 * 3); 2546 rbuf[1] = last_lba >> (8 * 2); 2547 rbuf[2] = last_lba >> (8 * 1); 2548 rbuf[3] = last_lba; 2549 2550 /* sector size */ 2551 rbuf[4] = sector_size >> (8 * 3); 2552 rbuf[5] = sector_size >> (8 * 2); 2553 rbuf[6] = sector_size >> (8 * 1); 2554 rbuf[7] = sector_size; 2555 } else { 2556 /* sector count, 64-bit */ 2557 rbuf[0] = last_lba >> (8 * 7); 2558 rbuf[1] = last_lba >> (8 * 6); 2559 rbuf[2] = last_lba >> (8 * 5); 2560 rbuf[3] = last_lba >> (8 * 4); 2561 rbuf[4] = last_lba >> (8 * 3); 2562 rbuf[5] = last_lba >> (8 * 2); 2563 rbuf[6] = last_lba >> (8 * 1); 2564 rbuf[7] = last_lba; 2565 2566 /* sector size */ 2567 rbuf[ 8] = sector_size >> (8 * 3); 2568 rbuf[ 9] = sector_size >> (8 * 2); 2569 rbuf[10] = sector_size >> (8 * 1); 2570 rbuf[11] = sector_size; 2571 2572 rbuf[12] = 0; 2573 rbuf[13] = log2_per_phys; 2574 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2575 rbuf[15] = lowest_aligned; 2576 2577 if (ata_id_has_trim(args->id) && 2578 !(dev->horkage & ATA_HORKAGE_NOTRIM)) { 2579 rbuf[14] |= 0x80; /* LBPME */ 2580 2581 if (ata_id_has_zero_after_trim(args->id) && 2582 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { 2583 ata_dev_info(dev, "Enabling discard_zeroes_data\n"); 2584 rbuf[14] |= 0x40; /* LBPRZ */ 2585 } 2586 } 2587 if (ata_id_zoned_cap(args->id) || 2588 args->dev->class == ATA_DEV_ZAC) 2589 rbuf[12] = (1 << 4); /* RC_BASIS */ 2590 } 2591 return 0; 2592 } 2593 2594 /** 2595 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2596 * @args: device IDENTIFY data / SCSI command of interest. 2597 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2598 * 2599 * Simulate REPORT LUNS command. 2600 * 2601 * LOCKING: 2602 * spin_lock_irqsave(host lock) 2603 */ 2604 static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf) 2605 { 2606 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2607 2608 return 0; 2609 } 2610 2611 static void atapi_sense_complete(struct ata_queued_cmd *qc) 2612 { 2613 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2614 /* FIXME: not quite right; we don't want the 2615 * translation of taskfile registers into 2616 * a sense descriptors, since that's only 2617 * correct for ATA, not ATAPI 2618 */ 2619 ata_gen_passthru_sense(qc); 2620 } 2621 2622 ata_qc_done(qc); 2623 } 2624 2625 /* is it pointless to prefer PIO for "safety reasons"? */ 2626 static inline int ata_pio_use_silly(struct ata_port *ap) 2627 { 2628 return (ap->flags & ATA_FLAG_PIO_DMA); 2629 } 2630 2631 static void atapi_request_sense(struct ata_queued_cmd *qc) 2632 { 2633 struct ata_port *ap = qc->ap; 2634 struct scsi_cmnd *cmd = qc->scsicmd; 2635 2636 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2637 2638 #ifdef CONFIG_ATA_SFF 2639 if (ap->ops->sff_tf_read) 2640 ap->ops->sff_tf_read(ap, &qc->tf); 2641 #endif 2642 2643 /* fill these in, for the case where they are -not- overwritten */ 2644 cmd->sense_buffer[0] = 0x70; 2645 cmd->sense_buffer[2] = qc->tf.error >> 4; 2646 2647 ata_qc_reinit(qc); 2648 2649 /* setup sg table and init transfer direction */ 2650 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); 2651 ata_sg_init(qc, &qc->sgent, 1); 2652 qc->dma_dir = DMA_FROM_DEVICE; 2653 2654 memset(&qc->cdb, 0, qc->dev->cdb_len); 2655 qc->cdb[0] = REQUEST_SENSE; 2656 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2657 2658 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2659 qc->tf.command = ATA_CMD_PACKET; 2660 2661 if (ata_pio_use_silly(ap)) { 2662 qc->tf.protocol = ATAPI_PROT_DMA; 2663 qc->tf.feature |= ATAPI_PKT_DMA; 2664 } else { 2665 qc->tf.protocol = ATAPI_PROT_PIO; 2666 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2667 qc->tf.lbah = 0; 2668 } 2669 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2670 2671 qc->complete_fn = atapi_sense_complete; 2672 2673 ata_qc_issue(qc); 2674 } 2675 2676 /* 2677 * ATAPI devices typically report zero for their SCSI version, and sometimes 2678 * deviate from the spec WRT response data format. If SCSI version is 2679 * reported as zero like normal, then we make the following fixups: 2680 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a 2681 * modern device. 2682 * 2) Ensure response data format / ATAPI information are always correct. 2683 */ 2684 static void atapi_fixup_inquiry(struct scsi_cmnd *cmd) 2685 { 2686 u8 buf[4]; 2687 2688 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4); 2689 if (buf[2] == 0) { 2690 buf[2] = 0x5; 2691 buf[3] = 0x32; 2692 } 2693 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4); 2694 } 2695 2696 static void atapi_qc_complete(struct ata_queued_cmd *qc) 2697 { 2698 struct scsi_cmnd *cmd = qc->scsicmd; 2699 unsigned int err_mask = qc->err_mask; 2700 2701 /* handle completion from new EH */ 2702 if (unlikely(qc->ap->ops->error_handler && 2703 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { 2704 2705 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 2706 /* FIXME: not quite right; we don't want the 2707 * translation of taskfile registers into a 2708 * sense descriptors, since that's only 2709 * correct for ATA, not ATAPI 2710 */ 2711 ata_gen_passthru_sense(qc); 2712 } 2713 2714 /* SCSI EH automatically locks door if sdev->locked is 2715 * set. Sometimes door lock request continues to 2716 * fail, for example, when no media is present. This 2717 * creates a loop - SCSI EH issues door lock which 2718 * fails and gets invoked again to acquire sense data 2719 * for the failed command. 2720 * 2721 * If door lock fails, always clear sdev->locked to 2722 * avoid this infinite loop. 2723 * 2724 * This may happen before SCSI scan is complete. Make 2725 * sure qc->dev->sdev isn't NULL before dereferencing. 2726 */ 2727 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) 2728 qc->dev->sdev->locked = 0; 2729 2730 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2731 ata_qc_done(qc); 2732 return; 2733 } 2734 2735 /* successful completion or old EH failure path */ 2736 if (unlikely(err_mask & AC_ERR_DEV)) { 2737 cmd->result = SAM_STAT_CHECK_CONDITION; 2738 atapi_request_sense(qc); 2739 return; 2740 } else if (unlikely(err_mask)) { 2741 /* FIXME: not quite right; we don't want the 2742 * translation of taskfile registers into 2743 * a sense descriptors, since that's only 2744 * correct for ATA, not ATAPI 2745 */ 2746 ata_gen_passthru_sense(qc); 2747 } else { 2748 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) 2749 atapi_fixup_inquiry(cmd); 2750 cmd->result = SAM_STAT_GOOD; 2751 } 2752 2753 ata_qc_done(qc); 2754 } 2755 /** 2756 * atapi_xlat - Initialize PACKET taskfile 2757 * @qc: command structure to be initialized 2758 * 2759 * LOCKING: 2760 * spin_lock_irqsave(host lock) 2761 * 2762 * RETURNS: 2763 * Zero on success, non-zero on failure. 2764 */ 2765 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) 2766 { 2767 struct scsi_cmnd *scmd = qc->scsicmd; 2768 struct ata_device *dev = qc->dev; 2769 int nodata = (scmd->sc_data_direction == DMA_NONE); 2770 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); 2771 unsigned int nbytes; 2772 2773 memset(qc->cdb, 0, dev->cdb_len); 2774 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2775 2776 qc->complete_fn = atapi_qc_complete; 2777 2778 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2779 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 2780 qc->tf.flags |= ATA_TFLAG_WRITE; 2781 } 2782 2783 qc->tf.command = ATA_CMD_PACKET; 2784 ata_qc_set_pc_nbytes(qc); 2785 2786 /* check whether ATAPI DMA is safe */ 2787 if (!nodata && !using_pio && atapi_check_dma(qc)) 2788 using_pio = 1; 2789 2790 /* Some controller variants snoop this value for Packet 2791 * transfers to do state machine and FIFO management. Thus we 2792 * want to set it properly, and for DMA where it is 2793 * effectively meaningless. 2794 */ 2795 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024); 2796 2797 /* Most ATAPI devices which honor transfer chunk size don't 2798 * behave according to the spec when odd chunk size which 2799 * matches the transfer length is specified. If the number of 2800 * bytes to transfer is 2n+1. According to the spec, what 2801 * should happen is to indicate that 2n+1 is going to be 2802 * transferred and transfer 2n+2 bytes where the last byte is 2803 * padding. 2804 * 2805 * In practice, this doesn't happen. ATAPI devices first 2806 * indicate and transfer 2n bytes and then indicate and 2807 * transfer 2 bytes where the last byte is padding. 2808 * 2809 * This inconsistency confuses several controllers which 2810 * perform PIO using DMA such as Intel AHCIs and sil3124/32. 2811 * These controllers use actual number of transferred bytes to 2812 * update DMA pointer and transfer of 4n+2 bytes make those 2813 * controller push DMA pointer by 4n+4 bytes because SATA data 2814 * FISes are aligned to 4 bytes. This causes data corruption 2815 * and buffer overrun. 2816 * 2817 * Always setting nbytes to even number solves this problem 2818 * because then ATAPI devices don't have to split data at 2n 2819 * boundaries. 2820 */ 2821 if (nbytes & 0x1) 2822 nbytes++; 2823 2824 qc->tf.lbam = (nbytes & 0xFF); 2825 qc->tf.lbah = (nbytes >> 8); 2826 2827 if (nodata) 2828 qc->tf.protocol = ATAPI_PROT_NODATA; 2829 else if (using_pio) 2830 qc->tf.protocol = ATAPI_PROT_PIO; 2831 else { 2832 /* DMA data xfer */ 2833 qc->tf.protocol = ATAPI_PROT_DMA; 2834 qc->tf.feature |= ATAPI_PKT_DMA; 2835 2836 if ((dev->flags & ATA_DFLAG_DMADIR) && 2837 (scmd->sc_data_direction != DMA_TO_DEVICE)) 2838 /* some SATA bridges need us to indicate data xfer direction */ 2839 qc->tf.feature |= ATAPI_DMADIR; 2840 } 2841 2842 2843 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE 2844 as ATAPI tape drives don't get this right otherwise */ 2845 return 0; 2846 } 2847 2848 static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno) 2849 { 2850 /* 2851 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), 2852 * or 2 (IDE master + slave case). However, the former case includes 2853 * libsas hosted devices which are numbered per scsi host, leading 2854 * to devno potentially being larger than 0 but with each struct 2855 * ata_device having its own struct ata_port and struct ata_link. 2856 * To accommodate these, ignore devno and always use device number 0. 2857 */ 2858 if (likely(!sata_pmp_attached(ap))) { 2859 int link_max_devices = ata_link_max_devices(&ap->link); 2860 2861 if (link_max_devices == 1) 2862 return &ap->link.device[0]; 2863 2864 if (devno < link_max_devices) 2865 return &ap->link.device[devno]; 2866 2867 return NULL; 2868 } 2869 2870 /* 2871 * For PMP-attached devices, the device number corresponds to C 2872 * (channel) of SCSI [H:C:I:L], indicating the port pmp link 2873 * for the device. 2874 */ 2875 if (devno < ap->nr_pmp_links) 2876 return &ap->pmp_link[devno].device[0]; 2877 2878 return NULL; 2879 } 2880 2881 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2882 const struct scsi_device *scsidev) 2883 { 2884 int devno; 2885 2886 /* skip commands not addressed to targets we simulate */ 2887 if (!sata_pmp_attached(ap)) { 2888 if (unlikely(scsidev->channel || scsidev->lun)) 2889 return NULL; 2890 devno = scsidev->id; 2891 } else { 2892 if (unlikely(scsidev->id || scsidev->lun)) 2893 return NULL; 2894 devno = scsidev->channel; 2895 } 2896 2897 return ata_find_dev(ap, devno); 2898 } 2899 2900 /** 2901 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2902 * @ap: ATA port to which the device is attached 2903 * @scsidev: SCSI device from which we derive the ATA device 2904 * 2905 * Given various information provided in struct scsi_cmnd, 2906 * map that onto an ATA bus, and using that mapping 2907 * determine which ata_device is associated with the 2908 * SCSI command to be sent. 2909 * 2910 * LOCKING: 2911 * spin_lock_irqsave(host lock) 2912 * 2913 * RETURNS: 2914 * Associated ATA device, or %NULL if not found. 2915 */ 2916 struct ata_device * 2917 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2918 { 2919 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2920 2921 if (unlikely(!dev || !ata_dev_enabled(dev))) 2922 return NULL; 2923 2924 return dev; 2925 } 2926 2927 /* 2928 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value. 2929 * @byte1: Byte 1 from pass-thru CDB. 2930 * 2931 * RETURNS: 2932 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise. 2933 */ 2934 static u8 2935 ata_scsi_map_proto(u8 byte1) 2936 { 2937 switch((byte1 & 0x1e) >> 1) { 2938 case 3: /* Non-data */ 2939 return ATA_PROT_NODATA; 2940 2941 case 6: /* DMA */ 2942 case 10: /* UDMA Data-in */ 2943 case 11: /* UDMA Data-Out */ 2944 return ATA_PROT_DMA; 2945 2946 case 4: /* PIO Data-in */ 2947 case 5: /* PIO Data-out */ 2948 return ATA_PROT_PIO; 2949 2950 case 12: /* FPDMA */ 2951 return ATA_PROT_NCQ; 2952 2953 case 0: /* Hard Reset */ 2954 case 1: /* SRST */ 2955 case 8: /* Device Diagnostic */ 2956 case 9: /* Device Reset */ 2957 case 7: /* DMA Queued */ 2958 case 15: /* Return Response Info */ 2959 default: /* Reserved */ 2960 break; 2961 } 2962 2963 return ATA_PROT_UNKNOWN; 2964 } 2965 2966 /** 2967 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile 2968 * @qc: command structure to be initialized 2969 * 2970 * Handles either 12, 16, or 32-byte versions of the CDB. 2971 * 2972 * RETURNS: 2973 * Zero on success, non-zero on failure. 2974 */ 2975 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) 2976 { 2977 struct ata_taskfile *tf = &(qc->tf); 2978 struct scsi_cmnd *scmd = qc->scsicmd; 2979 struct ata_device *dev = qc->dev; 2980 const u8 *cdb = scmd->cmnd; 2981 u16 fp; 2982 u16 cdb_offset = 0; 2983 2984 /* 7Fh variable length cmd means a ata pass-thru(32) */ 2985 if (cdb[0] == VARIABLE_LENGTH_CMD) 2986 cdb_offset = 9; 2987 2988 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); 2989 if (tf->protocol == ATA_PROT_UNKNOWN) { 2990 fp = 1; 2991 goto invalid_fld; 2992 } 2993 2994 if ((cdb[2 + cdb_offset] & 0x3) == 0) { 2995 /* 2996 * When T_LENGTH is zero (No data is transferred), dir should 2997 * be DMA_NONE. 2998 */ 2999 if (scmd->sc_data_direction != DMA_NONE) { 3000 fp = 2 + cdb_offset; 3001 goto invalid_fld; 3002 } 3003 3004 if (ata_is_ncq(tf->protocol)) 3005 tf->protocol = ATA_PROT_NCQ_NODATA; 3006 } 3007 3008 /* enable LBA */ 3009 tf->flags |= ATA_TFLAG_LBA; 3010 3011 /* 3012 * 12 and 16 byte CDBs use different offsets to 3013 * provide the various register values. 3014 */ 3015 switch (cdb[0]) { 3016 case ATA_16: 3017 /* 3018 * 16-byte CDB - may contain extended commands. 3019 * 3020 * If that is the case, copy the upper byte register values. 3021 */ 3022 if (cdb[1] & 0x01) { 3023 tf->hob_feature = cdb[3]; 3024 tf->hob_nsect = cdb[5]; 3025 tf->hob_lbal = cdb[7]; 3026 tf->hob_lbam = cdb[9]; 3027 tf->hob_lbah = cdb[11]; 3028 tf->flags |= ATA_TFLAG_LBA48; 3029 } else 3030 tf->flags &= ~ATA_TFLAG_LBA48; 3031 3032 /* 3033 * Always copy low byte, device and command registers. 3034 */ 3035 tf->feature = cdb[4]; 3036 tf->nsect = cdb[6]; 3037 tf->lbal = cdb[8]; 3038 tf->lbam = cdb[10]; 3039 tf->lbah = cdb[12]; 3040 tf->device = cdb[13]; 3041 tf->command = cdb[14]; 3042 break; 3043 case ATA_12: 3044 /* 3045 * 12-byte CDB - incapable of extended commands. 3046 */ 3047 tf->flags &= ~ATA_TFLAG_LBA48; 3048 3049 tf->feature = cdb[3]; 3050 tf->nsect = cdb[4]; 3051 tf->lbal = cdb[5]; 3052 tf->lbam = cdb[6]; 3053 tf->lbah = cdb[7]; 3054 tf->device = cdb[8]; 3055 tf->command = cdb[9]; 3056 break; 3057 default: 3058 /* 3059 * 32-byte CDB - may contain extended command fields. 3060 * 3061 * If that is the case, copy the upper byte register values. 3062 */ 3063 if (cdb[10] & 0x01) { 3064 tf->hob_feature = cdb[20]; 3065 tf->hob_nsect = cdb[22]; 3066 tf->hob_lbal = cdb[16]; 3067 tf->hob_lbam = cdb[15]; 3068 tf->hob_lbah = cdb[14]; 3069 tf->flags |= ATA_TFLAG_LBA48; 3070 } else 3071 tf->flags &= ~ATA_TFLAG_LBA48; 3072 3073 tf->feature = cdb[21]; 3074 tf->nsect = cdb[23]; 3075 tf->lbal = cdb[19]; 3076 tf->lbam = cdb[18]; 3077 tf->lbah = cdb[17]; 3078 tf->device = cdb[24]; 3079 tf->command = cdb[25]; 3080 tf->auxiliary = get_unaligned_be32(&cdb[28]); 3081 break; 3082 } 3083 3084 /* For NCQ commands copy the tag value */ 3085 if (ata_is_ncq(tf->protocol)) 3086 tf->nsect = qc->hw_tag << 3; 3087 3088 /* enforce correct master/slave bit */ 3089 tf->device = dev->devno ? 3090 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 3091 3092 switch (tf->command) { 3093 /* READ/WRITE LONG use a non-standard sect_size */ 3094 case ATA_CMD_READ_LONG: 3095 case ATA_CMD_READ_LONG_ONCE: 3096 case ATA_CMD_WRITE_LONG: 3097 case ATA_CMD_WRITE_LONG_ONCE: 3098 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { 3099 fp = 1; 3100 goto invalid_fld; 3101 } 3102 qc->sect_size = scsi_bufflen(scmd); 3103 break; 3104 3105 /* commands using reported Logical Block size (e.g. 512 or 4K) */ 3106 case ATA_CMD_CFA_WRITE_NE: 3107 case ATA_CMD_CFA_TRANS_SECT: 3108 case ATA_CMD_CFA_WRITE_MULT_NE: 3109 /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */ 3110 case ATA_CMD_READ: 3111 case ATA_CMD_READ_EXT: 3112 case ATA_CMD_READ_QUEUED: 3113 /* XXX: case ATA_CMD_READ_QUEUED_EXT: */ 3114 case ATA_CMD_FPDMA_READ: 3115 case ATA_CMD_READ_MULTI: 3116 case ATA_CMD_READ_MULTI_EXT: 3117 case ATA_CMD_PIO_READ: 3118 case ATA_CMD_PIO_READ_EXT: 3119 case ATA_CMD_READ_STREAM_DMA_EXT: 3120 case ATA_CMD_READ_STREAM_EXT: 3121 case ATA_CMD_VERIFY: 3122 case ATA_CMD_VERIFY_EXT: 3123 case ATA_CMD_WRITE: 3124 case ATA_CMD_WRITE_EXT: 3125 case ATA_CMD_WRITE_FUA_EXT: 3126 case ATA_CMD_WRITE_QUEUED: 3127 case ATA_CMD_WRITE_QUEUED_FUA_EXT: 3128 case ATA_CMD_FPDMA_WRITE: 3129 case ATA_CMD_WRITE_MULTI: 3130 case ATA_CMD_WRITE_MULTI_EXT: 3131 case ATA_CMD_WRITE_MULTI_FUA_EXT: 3132 case ATA_CMD_PIO_WRITE: 3133 case ATA_CMD_PIO_WRITE_EXT: 3134 case ATA_CMD_WRITE_STREAM_DMA_EXT: 3135 case ATA_CMD_WRITE_STREAM_EXT: 3136 qc->sect_size = scmd->device->sector_size; 3137 break; 3138 3139 /* Everything else uses 512 byte "sectors" */ 3140 default: 3141 qc->sect_size = ATA_SECT_SIZE; 3142 } 3143 3144 /* 3145 * Set flags so that all registers will be written, pass on 3146 * write indication (used for PIO/DMA setup), result TF is 3147 * copied back and we don't whine too much about its failure. 3148 */ 3149 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3150 if (scmd->sc_data_direction == DMA_TO_DEVICE) 3151 tf->flags |= ATA_TFLAG_WRITE; 3152 3153 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; 3154 3155 /* 3156 * Set transfer length. 3157 * 3158 * TODO: find out if we need to do more here to 3159 * cover scatter/gather case. 3160 */ 3161 ata_qc_set_pc_nbytes(qc); 3162 3163 /* We may not issue DMA commands if no DMA mode is set */ 3164 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { 3165 fp = 1; 3166 goto invalid_fld; 3167 } 3168 3169 /* We may not issue NCQ commands to devices not supporting NCQ */ 3170 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { 3171 fp = 1; 3172 goto invalid_fld; 3173 } 3174 3175 /* sanity check for pio multi commands */ 3176 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { 3177 fp = 1; 3178 goto invalid_fld; 3179 } 3180 3181 if (is_multi_taskfile(tf)) { 3182 unsigned int multi_count = 1 << (cdb[1] >> 5); 3183 3184 /* compare the passed through multi_count 3185 * with the cached multi_count of libata 3186 */ 3187 if (multi_count != dev->multi_count) 3188 ata_dev_warn(dev, "invalid multi_count %u ignored\n", 3189 multi_count); 3190 } 3191 3192 /* 3193 * Filter SET_FEATURES - XFER MODE command -- otherwise, 3194 * SET_FEATURES - XFER MODE must be preceded/succeeded 3195 * by an update to hardware-specific registers for each 3196 * controller (i.e. the reason for ->set_piomode(), 3197 * ->set_dmamode(), and ->post_set_mode() hooks). 3198 */ 3199 if (tf->command == ATA_CMD_SET_FEATURES && 3200 tf->feature == SETFEATURES_XFER) { 3201 fp = (cdb[0] == ATA_16) ? 4 : 3; 3202 goto invalid_fld; 3203 } 3204 3205 /* 3206 * Filter TPM commands by default. These provide an 3207 * essentially uncontrolled encrypted "back door" between 3208 * applications and the disk. Set libata.allow_tpm=1 if you 3209 * have a real reason for wanting to use them. This ensures 3210 * that installed software cannot easily mess stuff up without 3211 * user intent. DVR type users will probably ship with this enabled 3212 * for movie content management. 3213 * 3214 * Note that for ATA8 we can issue a DCS change and DCS freeze lock 3215 * for this and should do in future but that it is not sufficient as 3216 * DCS is an optional feature set. Thus we also do the software filter 3217 * so that we comply with the TC consortium stated goal that the user 3218 * can turn off TC features of their system. 3219 */ 3220 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { 3221 fp = (cdb[0] == ATA_16) ? 14 : 9; 3222 goto invalid_fld; 3223 } 3224 3225 return 0; 3226 3227 invalid_fld: 3228 ata_scsi_set_invalid_field(dev, scmd, fp, 0xff); 3229 return 1; 3230 } 3231 3232 /** 3233 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim 3234 * @cmd: SCSI command being translated 3235 * @trmax: Maximum number of entries that will fit in sector_size bytes. 3236 * @sector: Starting sector 3237 * @count: Total Range of request in logical sectors 3238 * 3239 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted 3240 * descriptor. 3241 * 3242 * Upto 64 entries of the format: 3243 * 63:48 Range Length 3244 * 47:0 LBA 3245 * 3246 * Range Length of 0 is ignored. 3247 * LBA's should be sorted order and not overlap. 3248 * 3249 * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET 3250 * 3251 * Return: Number of bytes copied into sglist. 3252 */ 3253 static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax, 3254 u64 sector, u32 count) 3255 { 3256 struct scsi_device *sdp = cmd->device; 3257 size_t len = sdp->sector_size; 3258 size_t r; 3259 __le64 *buf; 3260 u32 i = 0; 3261 unsigned long flags; 3262 3263 WARN_ON(len > ATA_SCSI_RBUF_SIZE); 3264 3265 if (len > ATA_SCSI_RBUF_SIZE) 3266 len = ATA_SCSI_RBUF_SIZE; 3267 3268 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); 3269 buf = ((void *)ata_scsi_rbuf); 3270 memset(buf, 0, len); 3271 while (i < trmax) { 3272 u64 entry = sector | 3273 ((u64)(count > 0xffff ? 0xffff : count) << 48); 3274 buf[i++] = __cpu_to_le64(entry); 3275 if (count <= 0xffff) 3276 break; 3277 count -= 0xffff; 3278 sector += 0xffff; 3279 } 3280 r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); 3281 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); 3282 3283 return r; 3284 } 3285 3286 /** 3287 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same 3288 * @qc: Command to be translated 3289 * 3290 * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or 3291 * an SCT Write Same command. 3292 * Based on WRITE SAME has the UNMAP flag: 3293 * 3294 * - When set translate to DSM TRIM 3295 * - When clear translate to SCT Write Same 3296 */ 3297 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) 3298 { 3299 struct ata_taskfile *tf = &qc->tf; 3300 struct scsi_cmnd *scmd = qc->scsicmd; 3301 struct scsi_device *sdp = scmd->device; 3302 size_t len = sdp->sector_size; 3303 struct ata_device *dev = qc->dev; 3304 const u8 *cdb = scmd->cmnd; 3305 u64 block; 3306 u32 n_block; 3307 const u32 trmax = len >> 3; 3308 u32 size; 3309 u16 fp; 3310 u8 bp = 0xff; 3311 u8 unmap = cdb[1] & 0x8; 3312 3313 /* we may not issue DMA commands if no DMA mode is set */ 3314 if (unlikely(!ata_dma_enabled(dev))) 3315 goto invalid_opcode; 3316 3317 /* 3318 * We only allow sending this command through the block layer, 3319 * as it modifies the DATA OUT buffer, which would corrupt user 3320 * memory for SG_IO commands. 3321 */ 3322 if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))) 3323 goto invalid_opcode; 3324 3325 if (unlikely(scmd->cmd_len < 16)) { 3326 fp = 15; 3327 goto invalid_fld; 3328 } 3329 scsi_16_lba_len(cdb, &block, &n_block); 3330 3331 if (!unmap || 3332 (dev->horkage & ATA_HORKAGE_NOTRIM) || 3333 !ata_id_has_trim(dev->id)) { 3334 fp = 1; 3335 bp = 3; 3336 goto invalid_fld; 3337 } 3338 /* If the request is too large the cmd is invalid */ 3339 if (n_block > 0xffff * trmax) { 3340 fp = 2; 3341 goto invalid_fld; 3342 } 3343 3344 /* 3345 * WRITE SAME always has a sector sized buffer as payload, this 3346 * should never be a multiple entry S/G list. 3347 */ 3348 if (!scsi_sg_count(scmd)) 3349 goto invalid_param_len; 3350 3351 /* 3352 * size must match sector size in bytes 3353 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) 3354 * is defined as number of 512 byte blocks to be transferred. 3355 */ 3356 3357 size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); 3358 if (size != len) 3359 goto invalid_param_len; 3360 3361 if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { 3362 /* Newer devices support queued TRIM commands */ 3363 tf->protocol = ATA_PROT_NCQ; 3364 tf->command = ATA_CMD_FPDMA_SEND; 3365 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; 3366 tf->nsect = qc->hw_tag << 3; 3367 tf->hob_feature = (size / 512) >> 8; 3368 tf->feature = size / 512; 3369 3370 tf->auxiliary = 1; 3371 } else { 3372 tf->protocol = ATA_PROT_DMA; 3373 tf->hob_feature = 0; 3374 tf->feature = ATA_DSM_TRIM; 3375 tf->hob_nsect = (size / 512) >> 8; 3376 tf->nsect = size / 512; 3377 tf->command = ATA_CMD_DSM; 3378 } 3379 3380 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | 3381 ATA_TFLAG_WRITE; 3382 3383 ata_qc_set_pc_nbytes(qc); 3384 3385 return 0; 3386 3387 invalid_fld: 3388 ata_scsi_set_invalid_field(dev, scmd, fp, bp); 3389 return 1; 3390 invalid_param_len: 3391 /* "Parameter list length error" */ 3392 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3393 return 1; 3394 invalid_opcode: 3395 /* "Invalid command operation code" */ 3396 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0); 3397 return 1; 3398 } 3399 3400 /** 3401 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN 3402 * @args: device MAINTENANCE_IN data / SCSI command of interest. 3403 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 3404 * 3405 * Yields a subset to satisfy scsi_report_opcode() 3406 * 3407 * LOCKING: 3408 * spin_lock_irqsave(host lock) 3409 */ 3410 static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) 3411 { 3412 struct ata_device *dev = args->dev; 3413 u8 *cdb = args->cmd->cmnd; 3414 u8 supported = 0, cdlp = 0, rwcdlp = 0; 3415 unsigned int err = 0; 3416 3417 if (cdb[2] != 1 && cdb[2] != 3) { 3418 ata_dev_warn(dev, "invalid command format %d\n", cdb[2]); 3419 err = 2; 3420 goto out; 3421 } 3422 3423 switch (cdb[3]) { 3424 case INQUIRY: 3425 case MODE_SENSE: 3426 case MODE_SENSE_10: 3427 case READ_CAPACITY: 3428 case SERVICE_ACTION_IN_16: 3429 case REPORT_LUNS: 3430 case REQUEST_SENSE: 3431 case SYNCHRONIZE_CACHE: 3432 case SYNCHRONIZE_CACHE_16: 3433 case REZERO_UNIT: 3434 case SEEK_6: 3435 case SEEK_10: 3436 case TEST_UNIT_READY: 3437 case SEND_DIAGNOSTIC: 3438 case MAINTENANCE_IN: 3439 case READ_6: 3440 case READ_10: 3441 case WRITE_6: 3442 case WRITE_10: 3443 case ATA_12: 3444 case ATA_16: 3445 case VERIFY: 3446 case VERIFY_16: 3447 case MODE_SELECT: 3448 case MODE_SELECT_10: 3449 case START_STOP: 3450 supported = 3; 3451 break; 3452 case READ_16: 3453 supported = 3; 3454 if (dev->flags & ATA_DFLAG_CDL) { 3455 /* 3456 * CDL read descriptors map to the T2A page, that is, 3457 * rwcdlp = 0x01 and cdlp = 0x01 3458 */ 3459 rwcdlp = 0x01; 3460 cdlp = 0x01 << 3; 3461 } 3462 break; 3463 case WRITE_16: 3464 supported = 3; 3465 if (dev->flags & ATA_DFLAG_CDL) { 3466 /* 3467 * CDL write descriptors map to the T2B page, that is, 3468 * rwcdlp = 0x01 and cdlp = 0x02 3469 */ 3470 rwcdlp = 0x01; 3471 cdlp = 0x02 << 3; 3472 } 3473 break; 3474 case ZBC_IN: 3475 case ZBC_OUT: 3476 if (ata_id_zoned_cap(dev->id) || 3477 dev->class == ATA_DEV_ZAC) 3478 supported = 3; 3479 break; 3480 case SECURITY_PROTOCOL_IN: 3481 case SECURITY_PROTOCOL_OUT: 3482 if (dev->flags & ATA_DFLAG_TRUSTED) 3483 supported = 3; 3484 break; 3485 default: 3486 break; 3487 } 3488 out: 3489 /* One command format */ 3490 rbuf[0] = rwcdlp; 3491 rbuf[1] = cdlp | supported; 3492 return err; 3493 } 3494 3495 /** 3496 * ata_scsi_report_zones_complete - convert ATA output 3497 * @qc: command structure returning the data 3498 * 3499 * Convert T-13 little-endian field representation into 3500 * T-10 big-endian field representation. 3501 * What a mess. 3502 */ 3503 static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc) 3504 { 3505 struct scsi_cmnd *scmd = qc->scsicmd; 3506 struct sg_mapping_iter miter; 3507 unsigned long flags; 3508 unsigned int bytes = 0; 3509 3510 sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), 3511 SG_MITER_TO_SG | SG_MITER_ATOMIC); 3512 3513 local_irq_save(flags); 3514 while (sg_miter_next(&miter)) { 3515 unsigned int offset = 0; 3516 3517 if (bytes == 0) { 3518 char *hdr; 3519 u32 list_length; 3520 u64 max_lba, opt_lba; 3521 u16 same; 3522 3523 /* Swizzle header */ 3524 hdr = miter.addr; 3525 list_length = get_unaligned_le32(&hdr[0]); 3526 same = get_unaligned_le16(&hdr[4]); 3527 max_lba = get_unaligned_le64(&hdr[8]); 3528 opt_lba = get_unaligned_le64(&hdr[16]); 3529 put_unaligned_be32(list_length, &hdr[0]); 3530 hdr[4] = same & 0xf; 3531 put_unaligned_be64(max_lba, &hdr[8]); 3532 put_unaligned_be64(opt_lba, &hdr[16]); 3533 offset += 64; 3534 bytes += 64; 3535 } 3536 while (offset < miter.length) { 3537 char *rec; 3538 u8 cond, type, non_seq, reset; 3539 u64 size, start, wp; 3540 3541 /* Swizzle zone descriptor */ 3542 rec = miter.addr + offset; 3543 type = rec[0] & 0xf; 3544 cond = (rec[1] >> 4) & 0xf; 3545 non_seq = (rec[1] & 2); 3546 reset = (rec[1] & 1); 3547 size = get_unaligned_le64(&rec[8]); 3548 start = get_unaligned_le64(&rec[16]); 3549 wp = get_unaligned_le64(&rec[24]); 3550 rec[0] = type; 3551 rec[1] = (cond << 4) | non_seq | reset; 3552 put_unaligned_be64(size, &rec[8]); 3553 put_unaligned_be64(start, &rec[16]); 3554 put_unaligned_be64(wp, &rec[24]); 3555 WARN_ON(offset + 64 > miter.length); 3556 offset += 64; 3557 bytes += 64; 3558 } 3559 } 3560 sg_miter_stop(&miter); 3561 local_irq_restore(flags); 3562 3563 ata_scsi_qc_complete(qc); 3564 } 3565 3566 static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) 3567 { 3568 struct ata_taskfile *tf = &qc->tf; 3569 struct scsi_cmnd *scmd = qc->scsicmd; 3570 const u8 *cdb = scmd->cmnd; 3571 u16 sect, fp = (u16)-1; 3572 u8 sa, options, bp = 0xff; 3573 u64 block; 3574 u32 n_block; 3575 3576 if (unlikely(scmd->cmd_len < 16)) { 3577 ata_dev_warn(qc->dev, "invalid cdb length %d\n", 3578 scmd->cmd_len); 3579 fp = 15; 3580 goto invalid_fld; 3581 } 3582 scsi_16_lba_len(cdb, &block, &n_block); 3583 if (n_block != scsi_bufflen(scmd)) { 3584 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", 3585 n_block, scsi_bufflen(scmd)); 3586 goto invalid_param_len; 3587 } 3588 sa = cdb[1] & 0x1f; 3589 if (sa != ZI_REPORT_ZONES) { 3590 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); 3591 fp = 1; 3592 goto invalid_fld; 3593 } 3594 /* 3595 * ZAC allows only for transfers in 512 byte blocks, 3596 * and uses a 16 bit value for the transfer count. 3597 */ 3598 if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) { 3599 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); 3600 goto invalid_param_len; 3601 } 3602 sect = n_block / 512; 3603 options = cdb[14] & 0xbf; 3604 3605 if (ata_ncq_enabled(qc->dev) && 3606 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { 3607 tf->protocol = ATA_PROT_NCQ; 3608 tf->command = ATA_CMD_FPDMA_RECV; 3609 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; 3610 tf->nsect = qc->hw_tag << 3; 3611 tf->feature = sect & 0xff; 3612 tf->hob_feature = (sect >> 8) & 0xff; 3613 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); 3614 } else { 3615 tf->command = ATA_CMD_ZAC_MGMT_IN; 3616 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; 3617 tf->protocol = ATA_PROT_DMA; 3618 tf->hob_feature = options; 3619 tf->hob_nsect = (sect >> 8) & 0xff; 3620 tf->nsect = sect & 0xff; 3621 } 3622 tf->device = ATA_LBA; 3623 tf->lbah = (block >> 16) & 0xff; 3624 tf->lbam = (block >> 8) & 0xff; 3625 tf->lbal = block & 0xff; 3626 tf->hob_lbah = (block >> 40) & 0xff; 3627 tf->hob_lbam = (block >> 32) & 0xff; 3628 tf->hob_lbal = (block >> 24) & 0xff; 3629 3630 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; 3631 qc->flags |= ATA_QCFLAG_RESULT_TF; 3632 3633 ata_qc_set_pc_nbytes(qc); 3634 3635 qc->complete_fn = ata_scsi_report_zones_complete; 3636 3637 return 0; 3638 3639 invalid_fld: 3640 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 3641 return 1; 3642 3643 invalid_param_len: 3644 /* "Parameter list length error" */ 3645 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3646 return 1; 3647 } 3648 3649 static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) 3650 { 3651 struct ata_taskfile *tf = &qc->tf; 3652 struct scsi_cmnd *scmd = qc->scsicmd; 3653 struct ata_device *dev = qc->dev; 3654 const u8 *cdb = scmd->cmnd; 3655 u8 all, sa; 3656 u64 block; 3657 u32 n_block; 3658 u16 fp = (u16)-1; 3659 3660 if (unlikely(scmd->cmd_len < 16)) { 3661 fp = 15; 3662 goto invalid_fld; 3663 } 3664 3665 sa = cdb[1] & 0x1f; 3666 if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) && 3667 (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) { 3668 fp = 1; 3669 goto invalid_fld; 3670 } 3671 3672 scsi_16_lba_len(cdb, &block, &n_block); 3673 if (n_block) { 3674 /* 3675 * ZAC MANAGEMENT OUT doesn't define any length 3676 */ 3677 goto invalid_param_len; 3678 } 3679 3680 all = cdb[14] & 0x1; 3681 if (all) { 3682 /* 3683 * Ignore the block address (zone ID) as defined by ZBC. 3684 */ 3685 block = 0; 3686 } else if (block >= dev->n_sectors) { 3687 /* 3688 * Block must be a valid zone ID (a zone start LBA). 3689 */ 3690 fp = 2; 3691 goto invalid_fld; 3692 } 3693 3694 if (ata_ncq_enabled(qc->dev) && 3695 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { 3696 tf->protocol = ATA_PROT_NCQ_NODATA; 3697 tf->command = ATA_CMD_NCQ_NON_DATA; 3698 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; 3699 tf->nsect = qc->hw_tag << 3; 3700 tf->auxiliary = sa | ((u16)all << 8); 3701 } else { 3702 tf->protocol = ATA_PROT_NODATA; 3703 tf->command = ATA_CMD_ZAC_MGMT_OUT; 3704 tf->feature = sa; 3705 tf->hob_feature = all; 3706 } 3707 tf->lbah = (block >> 16) & 0xff; 3708 tf->lbam = (block >> 8) & 0xff; 3709 tf->lbal = block & 0xff; 3710 tf->hob_lbah = (block >> 40) & 0xff; 3711 tf->hob_lbam = (block >> 32) & 0xff; 3712 tf->hob_lbal = (block >> 24) & 0xff; 3713 tf->device = ATA_LBA; 3714 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; 3715 3716 return 0; 3717 3718 invalid_fld: 3719 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 3720 return 1; 3721 invalid_param_len: 3722 /* "Parameter list length error" */ 3723 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3724 return 1; 3725 } 3726 3727 /** 3728 * ata_mselect_caching - Simulate MODE SELECT for caching info page 3729 * @qc: Storage for translated ATA taskfile 3730 * @buf: input buffer 3731 * @len: number of valid bytes in the input buffer 3732 * @fp: out parameter for the failed field on error 3733 * 3734 * Prepare a taskfile to modify caching information for the device. 3735 * 3736 * LOCKING: 3737 * None. 3738 */ 3739 static int ata_mselect_caching(struct ata_queued_cmd *qc, 3740 const u8 *buf, int len, u16 *fp) 3741 { 3742 struct ata_taskfile *tf = &qc->tf; 3743 struct ata_device *dev = qc->dev; 3744 u8 mpage[CACHE_MPAGE_LEN]; 3745 u8 wce; 3746 int i; 3747 3748 /* 3749 * The first two bytes of def_cache_mpage are a header, so offsets 3750 * in mpage are off by 2 compared to buf. Same for len. 3751 */ 3752 3753 if (len != CACHE_MPAGE_LEN - 2) { 3754 *fp = min(len, CACHE_MPAGE_LEN - 2); 3755 return -EINVAL; 3756 } 3757 3758 wce = buf[0] & (1 << 2); 3759 3760 /* 3761 * Check that read-only bits are not modified. 3762 */ 3763 ata_msense_caching(dev->id, mpage, false); 3764 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { 3765 if (i == 0) 3766 continue; 3767 if (mpage[i + 2] != buf[i]) { 3768 *fp = i; 3769 return -EINVAL; 3770 } 3771 } 3772 3773 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3774 tf->protocol = ATA_PROT_NODATA; 3775 tf->nsect = 0; 3776 tf->command = ATA_CMD_SET_FEATURES; 3777 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; 3778 return 0; 3779 } 3780 3781 /* 3782 * Simulate MODE SELECT control mode page, sub-page 0. 3783 */ 3784 static int ata_mselect_control_spg0(struct ata_queued_cmd *qc, 3785 const u8 *buf, int len, u16 *fp) 3786 { 3787 struct ata_device *dev = qc->dev; 3788 u8 mpage[CONTROL_MPAGE_LEN]; 3789 u8 d_sense; 3790 int i; 3791 3792 /* 3793 * The first two bytes of def_control_mpage are a header, so offsets 3794 * in mpage are off by 2 compared to buf. Same for len. 3795 */ 3796 3797 if (len != CONTROL_MPAGE_LEN - 2) { 3798 *fp = min(len, CONTROL_MPAGE_LEN - 2); 3799 return -EINVAL; 3800 } 3801 3802 d_sense = buf[0] & (1 << 2); 3803 3804 /* 3805 * Check that read-only bits are not modified. 3806 */ 3807 ata_msense_control_spg0(dev, mpage, false); 3808 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { 3809 if (i == 0) 3810 continue; 3811 if (mpage[2 + i] != buf[i]) { 3812 *fp = i; 3813 return -EINVAL; 3814 } 3815 } 3816 if (d_sense & (1 << 2)) 3817 dev->flags |= ATA_DFLAG_D_SENSE; 3818 else 3819 dev->flags &= ~ATA_DFLAG_D_SENSE; 3820 return 0; 3821 } 3822 3823 /* 3824 * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode 3825 * page) into a SET FEATURES command. 3826 */ 3827 static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, 3828 const u8 *buf, int len, 3829 u16 *fp) 3830 { 3831 struct ata_device *dev = qc->dev; 3832 struct ata_taskfile *tf = &qc->tf; 3833 u8 cdl_action; 3834 3835 /* 3836 * The first four bytes of ATA Feature Control mode page are a header, 3837 * so offsets in mpage are off by 4 compared to buf. Same for len. 3838 */ 3839 if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { 3840 *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); 3841 return -EINVAL; 3842 } 3843 3844 /* Check cdl_ctrl */ 3845 switch (buf[0] & 0x03) { 3846 case 0: 3847 /* Disable CDL */ 3848 cdl_action = 0; 3849 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; 3850 break; 3851 case 0x02: 3852 /* Enable CDL T2A/T2B: NCQ priority must be disabled */ 3853 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { 3854 ata_dev_err(dev, 3855 "NCQ priority must be disabled to enable CDL\n"); 3856 return -EINVAL; 3857 } 3858 cdl_action = 1; 3859 dev->flags |= ATA_DFLAG_CDL_ENABLED; 3860 break; 3861 default: 3862 *fp = 0; 3863 return -EINVAL; 3864 } 3865 3866 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3867 tf->protocol = ATA_PROT_NODATA; 3868 tf->command = ATA_CMD_SET_FEATURES; 3869 tf->feature = SETFEATURES_CDL; 3870 tf->nsect = cdl_action; 3871 3872 return 1; 3873 } 3874 3875 /** 3876 * ata_mselect_control - Simulate MODE SELECT for control page 3877 * @qc: Storage for translated ATA taskfile 3878 * @spg: target sub-page of the control page 3879 * @buf: input buffer 3880 * @len: number of valid bytes in the input buffer 3881 * @fp: out parameter for the failed field on error 3882 * 3883 * Prepare a taskfile to modify caching information for the device. 3884 * 3885 * LOCKING: 3886 * None. 3887 */ 3888 static int ata_mselect_control(struct ata_queued_cmd *qc, u8 spg, 3889 const u8 *buf, int len, u16 *fp) 3890 { 3891 switch (spg) { 3892 case 0: 3893 return ata_mselect_control_spg0(qc, buf, len, fp); 3894 case ATA_FEATURE_SUB_MPAGE: 3895 return ata_mselect_control_ata_feature(qc, buf, len, fp); 3896 default: 3897 return -EINVAL; 3898 } 3899 } 3900 3901 /** 3902 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands 3903 * @qc: Storage for translated ATA taskfile 3904 * 3905 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile. 3906 * Assume this is invoked for direct access devices (e.g. disks) only. 3907 * There should be no block descriptor for other device types. 3908 * 3909 * LOCKING: 3910 * spin_lock_irqsave(host lock) 3911 */ 3912 static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) 3913 { 3914 struct scsi_cmnd *scmd = qc->scsicmd; 3915 const u8 *cdb = scmd->cmnd; 3916 u8 pg, spg; 3917 unsigned six_byte, pg_len, hdr_len, bd_len; 3918 int len, ret; 3919 u16 fp = (u16)-1; 3920 u8 bp = 0xff; 3921 u8 buffer[64]; 3922 const u8 *p = buffer; 3923 3924 six_byte = (cdb[0] == MODE_SELECT); 3925 if (six_byte) { 3926 if (scmd->cmd_len < 5) { 3927 fp = 4; 3928 goto invalid_fld; 3929 } 3930 3931 len = cdb[4]; 3932 hdr_len = 4; 3933 } else { 3934 if (scmd->cmd_len < 9) { 3935 fp = 8; 3936 goto invalid_fld; 3937 } 3938 3939 len = get_unaligned_be16(&cdb[7]); 3940 hdr_len = 8; 3941 } 3942 3943 /* We only support PF=1, SP=0. */ 3944 if ((cdb[1] & 0x11) != 0x10) { 3945 fp = 1; 3946 bp = (cdb[1] & 0x01) ? 1 : 5; 3947 goto invalid_fld; 3948 } 3949 3950 /* Test early for possible overrun. */ 3951 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) 3952 goto invalid_param_len; 3953 3954 /* Move past header and block descriptors. */ 3955 if (len < hdr_len) 3956 goto invalid_param_len; 3957 3958 if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), 3959 buffer, sizeof(buffer))) 3960 goto invalid_param_len; 3961 3962 if (six_byte) 3963 bd_len = p[3]; 3964 else 3965 bd_len = get_unaligned_be16(&p[6]); 3966 3967 len -= hdr_len; 3968 p += hdr_len; 3969 if (len < bd_len) 3970 goto invalid_param_len; 3971 if (bd_len != 0 && bd_len != 8) { 3972 fp = (six_byte) ? 3 : 6; 3973 fp += bd_len + hdr_len; 3974 goto invalid_param; 3975 } 3976 3977 len -= bd_len; 3978 p += bd_len; 3979 if (len == 0) 3980 goto skip; 3981 3982 /* Parse both possible formats for the mode page headers. */ 3983 pg = p[0] & 0x3f; 3984 if (p[0] & 0x40) { 3985 if (len < 4) 3986 goto invalid_param_len; 3987 3988 spg = p[1]; 3989 pg_len = get_unaligned_be16(&p[2]); 3990 p += 4; 3991 len -= 4; 3992 } else { 3993 if (len < 2) 3994 goto invalid_param_len; 3995 3996 spg = 0; 3997 pg_len = p[1]; 3998 p += 2; 3999 len -= 2; 4000 } 4001 4002 /* 4003 * Supported subpages: all subpages and ATA feature sub-page f2h of 4004 * the control page. 4005 */ 4006 if (spg) { 4007 switch (spg) { 4008 case ALL_SUB_MPAGES: 4009 /* All subpages is not supported for the control page */ 4010 if (pg == CONTROL_MPAGE) { 4011 fp = (p[0] & 0x40) ? 1 : 0; 4012 fp += hdr_len + bd_len; 4013 goto invalid_param; 4014 } 4015 break; 4016 case ATA_FEATURE_SUB_MPAGE: 4017 if (qc->dev->flags & ATA_DFLAG_CDL && 4018 pg == CONTROL_MPAGE) 4019 break; 4020 fallthrough; 4021 default: 4022 fp = (p[0] & 0x40) ? 1 : 0; 4023 fp += hdr_len + bd_len; 4024 goto invalid_param; 4025 } 4026 } 4027 if (pg_len > len) 4028 goto invalid_param_len; 4029 4030 switch (pg) { 4031 case CACHE_MPAGE: 4032 if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) { 4033 fp += hdr_len + bd_len; 4034 goto invalid_param; 4035 } 4036 break; 4037 case CONTROL_MPAGE: 4038 ret = ata_mselect_control(qc, spg, p, pg_len, &fp); 4039 if (ret < 0) { 4040 fp += hdr_len + bd_len; 4041 goto invalid_param; 4042 } 4043 if (!ret) 4044 goto skip; /* No ATA command to send */ 4045 break; 4046 default: 4047 /* Invalid page code */ 4048 fp = bd_len + hdr_len; 4049 goto invalid_param; 4050 } 4051 4052 /* 4053 * Only one page has changeable data, so we only support setting one 4054 * page at a time. 4055 */ 4056 if (len > pg_len) 4057 goto invalid_param; 4058 4059 return 0; 4060 4061 invalid_fld: 4062 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); 4063 return 1; 4064 4065 invalid_param: 4066 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); 4067 return 1; 4068 4069 invalid_param_len: 4070 /* "Parameter list length error" */ 4071 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 4072 return 1; 4073 4074 skip: 4075 scmd->result = SAM_STAT_GOOD; 4076 return 1; 4077 } 4078 4079 static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma) 4080 { 4081 if (len == 0) 4082 return ATA_CMD_TRUSTED_NONDATA; 4083 else if (send) 4084 return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND; 4085 else 4086 return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV; 4087 } 4088 4089 static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc) 4090 { 4091 struct scsi_cmnd *scmd = qc->scsicmd; 4092 const u8 *cdb = scmd->cmnd; 4093 struct ata_taskfile *tf = &qc->tf; 4094 u8 secp = cdb[1]; 4095 bool send = (cdb[0] == SECURITY_PROTOCOL_OUT); 4096 u16 spsp = get_unaligned_be16(&cdb[2]); 4097 u32 len = get_unaligned_be32(&cdb[6]); 4098 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); 4099 4100 /* 4101 * We don't support the ATA "security" protocol. 4102 */ 4103 if (secp == 0xef) { 4104 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); 4105 return 1; 4106 } 4107 4108 if (cdb[4] & 7) { /* INC_512 */ 4109 if (len > 0xffff) { 4110 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); 4111 return 1; 4112 } 4113 } else { 4114 if (len > 0x01fffe00) { 4115 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); 4116 return 1; 4117 } 4118 4119 /* convert to the sector-based ATA addressing */ 4120 len = (len + 511) / 512; 4121 } 4122 4123 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; 4124 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; 4125 if (send) 4126 tf->flags |= ATA_TFLAG_WRITE; 4127 tf->command = ata_scsi_trusted_op(len, send, dma); 4128 tf->feature = secp; 4129 tf->lbam = spsp & 0xff; 4130 tf->lbah = spsp >> 8; 4131 4132 if (len) { 4133 tf->nsect = len & 0xff; 4134 tf->lbal = len >> 8; 4135 } else { 4136 if (!send) 4137 tf->lbah = (1 << 7); 4138 } 4139 4140 ata_qc_set_pc_nbytes(qc); 4141 return 0; 4142 } 4143 4144 /** 4145 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler 4146 * @qc: Command to be translated 4147 * 4148 * Translate a SCSI variable length CDB to specified commands. 4149 * It checks a service action value in CDB to call corresponding handler. 4150 * 4151 * RETURNS: 4152 * Zero on success, non-zero on failure 4153 * 4154 */ 4155 static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc) 4156 { 4157 struct scsi_cmnd *scmd = qc->scsicmd; 4158 const u8 *cdb = scmd->cmnd; 4159 const u16 sa = get_unaligned_be16(&cdb[8]); 4160 4161 /* 4162 * if service action represents a ata pass-thru(32) command, 4163 * then pass it to ata_scsi_pass_thru handler. 4164 */ 4165 if (sa == ATA_32) 4166 return ata_scsi_pass_thru(qc); 4167 4168 /* unsupported service action */ 4169 return 1; 4170 } 4171 4172 /** 4173 * ata_get_xlat_func - check if SCSI to ATA translation is possible 4174 * @dev: ATA device 4175 * @cmd: SCSI command opcode to consider 4176 * 4177 * Look up the SCSI command given, and determine whether the 4178 * SCSI command is to be translated or simulated. 4179 * 4180 * RETURNS: 4181 * Pointer to translation function if possible, %NULL if not. 4182 */ 4183 4184 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) 4185 { 4186 switch (cmd) { 4187 case READ_6: 4188 case READ_10: 4189 case READ_16: 4190 4191 case WRITE_6: 4192 case WRITE_10: 4193 case WRITE_16: 4194 return ata_scsi_rw_xlat; 4195 4196 case WRITE_SAME_16: 4197 return ata_scsi_write_same_xlat; 4198 4199 case SYNCHRONIZE_CACHE: 4200 case SYNCHRONIZE_CACHE_16: 4201 if (ata_try_flush_cache(dev)) 4202 return ata_scsi_flush_xlat; 4203 break; 4204 4205 case VERIFY: 4206 case VERIFY_16: 4207 return ata_scsi_verify_xlat; 4208 4209 case ATA_12: 4210 case ATA_16: 4211 return ata_scsi_pass_thru; 4212 4213 case VARIABLE_LENGTH_CMD: 4214 return ata_scsi_var_len_cdb_xlat; 4215 4216 case MODE_SELECT: 4217 case MODE_SELECT_10: 4218 return ata_scsi_mode_select_xlat; 4219 4220 case ZBC_IN: 4221 return ata_scsi_zbc_in_xlat; 4222 4223 case ZBC_OUT: 4224 return ata_scsi_zbc_out_xlat; 4225 4226 case SECURITY_PROTOCOL_IN: 4227 case SECURITY_PROTOCOL_OUT: 4228 if (!(dev->flags & ATA_DFLAG_TRUSTED)) 4229 break; 4230 return ata_scsi_security_inout_xlat; 4231 4232 case START_STOP: 4233 return ata_scsi_start_stop_xlat; 4234 } 4235 4236 return NULL; 4237 } 4238 4239 int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) 4240 { 4241 struct ata_port *ap = dev->link->ap; 4242 u8 scsi_op = scmd->cmnd[0]; 4243 ata_xlat_func_t xlat_func; 4244 4245 /* 4246 * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). 4247 * However, this check is done without holding the ap->lock (a libata 4248 * specific lock), so we can have received an error irq since then, 4249 * therefore we must check if EH is pending, while holding ap->lock. 4250 */ 4251 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) 4252 return SCSI_MLQUEUE_DEVICE_BUSY; 4253 4254 if (unlikely(!scmd->cmd_len)) 4255 goto bad_cdb_len; 4256 4257 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 4258 if (unlikely(scmd->cmd_len > dev->cdb_len)) 4259 goto bad_cdb_len; 4260 4261 xlat_func = ata_get_xlat_func(dev, scsi_op); 4262 } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 4263 /* relay SCSI command to ATAPI device */ 4264 int len = COMMAND_SIZE(scsi_op); 4265 4266 if (unlikely(len > scmd->cmd_len || 4267 len > dev->cdb_len || 4268 scmd->cmd_len > ATAPI_CDB_LEN)) 4269 goto bad_cdb_len; 4270 4271 xlat_func = atapi_xlat; 4272 } else { 4273 /* ATA_16 passthru, treat as an ATA command */ 4274 if (unlikely(scmd->cmd_len > 16)) 4275 goto bad_cdb_len; 4276 4277 xlat_func = ata_get_xlat_func(dev, scsi_op); 4278 } 4279 4280 if (xlat_func) 4281 return ata_scsi_translate(dev, scmd, xlat_func); 4282 4283 ata_scsi_simulate(dev, scmd); 4284 4285 return 0; 4286 4287 bad_cdb_len: 4288 scmd->result = DID_ERROR << 16; 4289 scsi_done(scmd); 4290 return 0; 4291 } 4292 4293 /** 4294 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 4295 * @shost: SCSI host of command to be sent 4296 * @cmd: SCSI command to be sent 4297 * 4298 * In some cases, this function translates SCSI commands into 4299 * ATA taskfiles, and queues the taskfiles to be sent to 4300 * hardware. In other cases, this function simulates a 4301 * SCSI device by evaluating and responding to certain 4302 * SCSI commands. This creates the overall effect of 4303 * ATA and ATAPI devices appearing as SCSI devices. 4304 * 4305 * LOCKING: 4306 * ATA host lock 4307 * 4308 * RETURNS: 4309 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 4310 * 0 otherwise. 4311 */ 4312 int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 4313 { 4314 struct ata_port *ap; 4315 struct ata_device *dev; 4316 struct scsi_device *scsidev = cmd->device; 4317 int rc = 0; 4318 unsigned long irq_flags; 4319 4320 ap = ata_shost_to_port(shost); 4321 4322 spin_lock_irqsave(ap->lock, irq_flags); 4323 4324 dev = ata_scsi_find_dev(ap, scsidev); 4325 if (likely(dev)) 4326 rc = __ata_scsi_queuecmd(cmd, dev); 4327 else { 4328 cmd->result = (DID_BAD_TARGET << 16); 4329 scsi_done(cmd); 4330 } 4331 4332 spin_unlock_irqrestore(ap->lock, irq_flags); 4333 4334 return rc; 4335 } 4336 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4337 4338 /** 4339 * ata_scsi_simulate - simulate SCSI command on ATA device 4340 * @dev: the target device 4341 * @cmd: SCSI command being sent to device. 4342 * 4343 * Interprets and directly executes a select list of SCSI commands 4344 * that can be handled internally. 4345 * 4346 * LOCKING: 4347 * spin_lock_irqsave(host lock) 4348 */ 4349 4350 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) 4351 { 4352 struct ata_scsi_args args; 4353 const u8 *scsicmd = cmd->cmnd; 4354 u8 tmp8; 4355 4356 args.dev = dev; 4357 args.id = dev->id; 4358 args.cmd = cmd; 4359 4360 switch(scsicmd[0]) { 4361 case INQUIRY: 4362 if (scsicmd[1] & 2) /* is CmdDt set? */ 4363 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4364 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 4365 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 4366 else switch (scsicmd[2]) { 4367 case 0x00: 4368 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 4369 break; 4370 case 0x80: 4371 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 4372 break; 4373 case 0x83: 4374 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 4375 break; 4376 case 0x89: 4377 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 4378 break; 4379 case 0xb0: 4380 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0); 4381 break; 4382 case 0xb1: 4383 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); 4384 break; 4385 case 0xb2: 4386 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); 4387 break; 4388 case 0xb6: 4389 if (dev->flags & ATA_DFLAG_ZAC) 4390 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); 4391 else 4392 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4393 break; 4394 case 0xb9: 4395 if (dev->cpr_log) 4396 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9); 4397 else 4398 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4399 break; 4400 default: 4401 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 4402 break; 4403 } 4404 break; 4405 4406 case MODE_SENSE: 4407 case MODE_SENSE_10: 4408 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 4409 break; 4410 4411 case READ_CAPACITY: 4412 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 4413 break; 4414 4415 case SERVICE_ACTION_IN_16: 4416 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 4417 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 4418 else 4419 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4420 break; 4421 4422 case REPORT_LUNS: 4423 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 4424 break; 4425 4426 case REQUEST_SENSE: 4427 ata_scsi_set_sense(dev, cmd, 0, 0, 0); 4428 break; 4429 4430 /* if we reach this, then writeback caching is disabled, 4431 * turning this into a no-op. 4432 */ 4433 case SYNCHRONIZE_CACHE: 4434 case SYNCHRONIZE_CACHE_16: 4435 fallthrough; 4436 4437 /* no-op's, complete with success */ 4438 case REZERO_UNIT: 4439 case SEEK_6: 4440 case SEEK_10: 4441 case TEST_UNIT_READY: 4442 break; 4443 4444 case SEND_DIAGNOSTIC: 4445 tmp8 = scsicmd[1] & ~(1 << 3); 4446 if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4]) 4447 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4448 break; 4449 4450 case MAINTENANCE_IN: 4451 if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) 4452 ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); 4453 else 4454 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 4455 break; 4456 4457 /* all other commands */ 4458 default: 4459 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0); 4460 /* "Invalid command operation code" */ 4461 break; 4462 } 4463 4464 scsi_done(cmd); 4465 } 4466 4467 int ata_scsi_add_hosts(struct ata_host *host, const struct scsi_host_template *sht) 4468 { 4469 int i, rc; 4470 4471 for (i = 0; i < host->n_ports; i++) { 4472 struct ata_port *ap = host->ports[i]; 4473 struct Scsi_Host *shost; 4474 4475 rc = -ENOMEM; 4476 shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); 4477 if (!shost) 4478 goto err_alloc; 4479 4480 shost->eh_noresume = 1; 4481 *(struct ata_port **)&shost->hostdata[0] = ap; 4482 ap->scsi_host = shost; 4483 4484 shost->transportt = ata_scsi_transport_template; 4485 shost->unique_id = ap->print_id; 4486 shost->max_id = 16; 4487 shost->max_lun = 1; 4488 shost->max_channel = 1; 4489 shost->max_cmd_len = 32; 4490 4491 /* Schedule policy is determined by ->qc_defer() 4492 * callback and it needs to see every deferred qc. 4493 * Set host_blocked to 1 to prevent SCSI midlayer from 4494 * automatically deferring requests. 4495 */ 4496 shost->max_host_blocked = 1; 4497 4498 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); 4499 if (rc) 4500 goto err_alloc; 4501 } 4502 4503 return 0; 4504 4505 err_alloc: 4506 while (--i >= 0) { 4507 struct Scsi_Host *shost = host->ports[i]->scsi_host; 4508 4509 /* scsi_host_put() is in ata_devres_release() */ 4510 scsi_remove_host(shost); 4511 } 4512 return rc; 4513 } 4514 4515 #ifdef CONFIG_OF 4516 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) 4517 { 4518 struct scsi_device *sdev = dev->sdev; 4519 struct device *d = ap->host->dev; 4520 struct device_node *np = d->of_node; 4521 struct device_node *child; 4522 4523 for_each_available_child_of_node(np, child) { 4524 int ret; 4525 u32 val; 4526 4527 ret = of_property_read_u32(child, "reg", &val); 4528 if (ret) 4529 continue; 4530 if (val == dev->devno) { 4531 dev_dbg(d, "found matching device node\n"); 4532 sdev->sdev_gendev.of_node = child; 4533 return; 4534 } 4535 } 4536 } 4537 #else 4538 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap) 4539 { 4540 } 4541 #endif 4542 4543 void ata_scsi_scan_host(struct ata_port *ap, int sync) 4544 { 4545 int tries = 5; 4546 struct ata_device *last_failed_dev = NULL; 4547 struct ata_link *link; 4548 struct ata_device *dev; 4549 4550 repeat: 4551 ata_for_each_link(link, ap, EDGE) { 4552 ata_for_each_dev(dev, link, ENABLED) { 4553 struct scsi_device *sdev; 4554 int channel = 0, id = 0; 4555 4556 if (dev->sdev) 4557 continue; 4558 4559 if (ata_is_host_link(link)) 4560 id = dev->devno; 4561 else 4562 channel = link->pmp; 4563 4564 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, 4565 NULL); 4566 if (!IS_ERR(sdev)) { 4567 dev->sdev = sdev; 4568 ata_scsi_assign_ofnode(dev, ap); 4569 scsi_device_put(sdev); 4570 } else { 4571 dev->sdev = NULL; 4572 } 4573 } 4574 } 4575 4576 /* If we scanned while EH was in progress or allocation 4577 * failure occurred, scan would have failed silently. Check 4578 * whether all devices are attached. 4579 */ 4580 ata_for_each_link(link, ap, EDGE) { 4581 ata_for_each_dev(dev, link, ENABLED) { 4582 if (!dev->sdev) 4583 goto exit_loop; 4584 } 4585 } 4586 exit_loop: 4587 if (!link) 4588 return; 4589 4590 /* we're missing some SCSI devices */ 4591 if (sync) { 4592 /* If caller requested synchrnous scan && we've made 4593 * any progress, sleep briefly and repeat. 4594 */ 4595 if (dev != last_failed_dev) { 4596 msleep(100); 4597 last_failed_dev = dev; 4598 goto repeat; 4599 } 4600 4601 /* We might be failing to detect boot device, give it 4602 * a few more chances. 4603 */ 4604 if (--tries) { 4605 msleep(100); 4606 goto repeat; 4607 } 4608 4609 ata_port_err(ap, 4610 "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n"); 4611 } 4612 4613 queue_delayed_work(system_long_wq, &ap->hotplug_task, 4614 round_jiffies_relative(HZ)); 4615 } 4616 4617 /** 4618 * ata_scsi_offline_dev - offline attached SCSI device 4619 * @dev: ATA device to offline attached SCSI device for 4620 * 4621 * This function is called from ata_eh_hotplug() and responsible 4622 * for taking the SCSI device attached to @dev offline. This 4623 * function is called with host lock which protects dev->sdev 4624 * against clearing. 4625 * 4626 * LOCKING: 4627 * spin_lock_irqsave(host lock) 4628 * 4629 * RETURNS: 4630 * 1 if attached SCSI device exists, 0 otherwise. 4631 */ 4632 int ata_scsi_offline_dev(struct ata_device *dev) 4633 { 4634 if (dev->sdev) { 4635 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); 4636 return 1; 4637 } 4638 return 0; 4639 } 4640 4641 /** 4642 * ata_scsi_remove_dev - remove attached SCSI device 4643 * @dev: ATA device to remove attached SCSI device for 4644 * 4645 * This function is called from ata_eh_scsi_hotplug() and 4646 * responsible for removing the SCSI device attached to @dev. 4647 * 4648 * LOCKING: 4649 * Kernel thread context (may sleep). 4650 */ 4651 static void ata_scsi_remove_dev(struct ata_device *dev) 4652 { 4653 struct ata_port *ap = dev->link->ap; 4654 struct scsi_device *sdev; 4655 unsigned long flags; 4656 4657 /* Alas, we need to grab scan_mutex to ensure SCSI device 4658 * state doesn't change underneath us and thus 4659 * scsi_device_get() always succeeds. The mutex locking can 4660 * be removed if there is __scsi_device_get() interface which 4661 * increments reference counts regardless of device state. 4662 */ 4663 mutex_lock(&ap->scsi_host->scan_mutex); 4664 spin_lock_irqsave(ap->lock, flags); 4665 4666 /* clearing dev->sdev is protected by host lock */ 4667 sdev = dev->sdev; 4668 dev->sdev = NULL; 4669 4670 if (sdev) { 4671 /* If user initiated unplug races with us, sdev can go 4672 * away underneath us after the host lock and 4673 * scan_mutex are released. Hold onto it. 4674 */ 4675 if (scsi_device_get(sdev) == 0) { 4676 /* The following ensures the attached sdev is 4677 * offline on return from ata_scsi_offline_dev() 4678 * regardless it wins or loses the race 4679 * against this function. 4680 */ 4681 scsi_device_set_state(sdev, SDEV_OFFLINE); 4682 } else { 4683 WARN_ON(1); 4684 sdev = NULL; 4685 } 4686 } 4687 4688 spin_unlock_irqrestore(ap->lock, flags); 4689 mutex_unlock(&ap->scsi_host->scan_mutex); 4690 4691 if (sdev) { 4692 ata_dev_info(dev, "detaching (SCSI %s)\n", 4693 dev_name(&sdev->sdev_gendev)); 4694 4695 scsi_remove_device(sdev); 4696 scsi_device_put(sdev); 4697 } 4698 } 4699 4700 static void ata_scsi_handle_link_detach(struct ata_link *link) 4701 { 4702 struct ata_port *ap = link->ap; 4703 struct ata_device *dev; 4704 4705 ata_for_each_dev(dev, link, ALL) { 4706 unsigned long flags; 4707 4708 if (!(dev->flags & ATA_DFLAG_DETACHED)) 4709 continue; 4710 4711 spin_lock_irqsave(ap->lock, flags); 4712 dev->flags &= ~ATA_DFLAG_DETACHED; 4713 spin_unlock_irqrestore(ap->lock, flags); 4714 4715 if (zpodd_dev_enabled(dev)) 4716 zpodd_exit(dev); 4717 4718 ata_scsi_remove_dev(dev); 4719 } 4720 } 4721 4722 /** 4723 * ata_scsi_media_change_notify - send media change event 4724 * @dev: Pointer to the disk device with media change event 4725 * 4726 * Tell the block layer to send a media change notification 4727 * event. 4728 * 4729 * LOCKING: 4730 * spin_lock_irqsave(host lock) 4731 */ 4732 void ata_scsi_media_change_notify(struct ata_device *dev) 4733 { 4734 if (dev->sdev) 4735 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, 4736 GFP_ATOMIC); 4737 } 4738 4739 /** 4740 * ata_scsi_hotplug - SCSI part of hotplug 4741 * @work: Pointer to ATA port to perform SCSI hotplug on 4742 * 4743 * Perform SCSI part of hotplug. It's executed from a separate 4744 * workqueue after EH completes. This is necessary because SCSI 4745 * hot plugging requires working EH and hot unplugging is 4746 * synchronized with hot plugging with a mutex. 4747 * 4748 * LOCKING: 4749 * Kernel thread context (may sleep). 4750 */ 4751 void ata_scsi_hotplug(struct work_struct *work) 4752 { 4753 struct ata_port *ap = 4754 container_of(work, struct ata_port, hotplug_task.work); 4755 int i; 4756 4757 if (ap->pflags & ATA_PFLAG_UNLOADING) 4758 return; 4759 4760 mutex_lock(&ap->scsi_scan_mutex); 4761 4762 /* Unplug detached devices. We cannot use link iterator here 4763 * because PMP links have to be scanned even if PMP is 4764 * currently not attached. Iterate manually. 4765 */ 4766 ata_scsi_handle_link_detach(&ap->link); 4767 if (ap->pmp_link) 4768 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 4769 ata_scsi_handle_link_detach(&ap->pmp_link[i]); 4770 4771 /* scan for new ones */ 4772 ata_scsi_scan_host(ap, 0); 4773 4774 mutex_unlock(&ap->scsi_scan_mutex); 4775 } 4776 4777 /** 4778 * ata_scsi_user_scan - indication for user-initiated bus scan 4779 * @shost: SCSI host to scan 4780 * @channel: Channel to scan 4781 * @id: ID to scan 4782 * @lun: LUN to scan 4783 * 4784 * This function is called when user explicitly requests bus 4785 * scan. Set probe pending flag and invoke EH. 4786 * 4787 * LOCKING: 4788 * SCSI layer (we don't care) 4789 * 4790 * RETURNS: 4791 * Zero. 4792 */ 4793 int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 4794 unsigned int id, u64 lun) 4795 { 4796 struct ata_port *ap = ata_shost_to_port(shost); 4797 unsigned long flags; 4798 int devno, rc = 0; 4799 4800 if (!ap->ops->error_handler) 4801 return -EOPNOTSUPP; 4802 4803 if (lun != SCAN_WILD_CARD && lun) 4804 return -EINVAL; 4805 4806 if (!sata_pmp_attached(ap)) { 4807 if (channel != SCAN_WILD_CARD && channel) 4808 return -EINVAL; 4809 devno = id; 4810 } else { 4811 if (id != SCAN_WILD_CARD && id) 4812 return -EINVAL; 4813 devno = channel; 4814 } 4815 4816 spin_lock_irqsave(ap->lock, flags); 4817 4818 if (devno == SCAN_WILD_CARD) { 4819 struct ata_link *link; 4820 4821 ata_for_each_link(link, ap, EDGE) { 4822 struct ata_eh_info *ehi = &link->eh_info; 4823 ehi->probe_mask |= ATA_ALL_DEVICES; 4824 ehi->action |= ATA_EH_RESET; 4825 } 4826 } else { 4827 struct ata_device *dev = ata_find_dev(ap, devno); 4828 4829 if (dev) { 4830 struct ata_eh_info *ehi = &dev->link->eh_info; 4831 ehi->probe_mask |= 1 << dev->devno; 4832 ehi->action |= ATA_EH_RESET; 4833 } else 4834 rc = -EINVAL; 4835 } 4836 4837 if (rc == 0) { 4838 ata_port_schedule_eh(ap); 4839 spin_unlock_irqrestore(ap->lock, flags); 4840 ata_port_wait_eh(ap); 4841 } else 4842 spin_unlock_irqrestore(ap->lock, flags); 4843 4844 return rc; 4845 } 4846 4847 /** 4848 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 4849 * @work: Pointer to ATA port to perform scsi_rescan_device() 4850 * 4851 * After ATA pass thru (SAT) commands are executed successfully, 4852 * libata need to propagate the changes to SCSI layer. 4853 * 4854 * LOCKING: 4855 * Kernel thread context (may sleep). 4856 */ 4857 void ata_scsi_dev_rescan(struct work_struct *work) 4858 { 4859 struct ata_port *ap = 4860 container_of(work, struct ata_port, scsi_rescan_task.work); 4861 struct ata_link *link; 4862 struct ata_device *dev; 4863 unsigned long flags; 4864 bool delay_rescan = false; 4865 4866 mutex_lock(&ap->scsi_scan_mutex); 4867 spin_lock_irqsave(ap->lock, flags); 4868 4869 ata_for_each_link(link, ap, EDGE) { 4870 ata_for_each_dev(dev, link, ENABLED) { 4871 struct scsi_device *sdev = dev->sdev; 4872 4873 if (!sdev) 4874 continue; 4875 if (scsi_device_get(sdev)) 4876 continue; 4877 4878 /* 4879 * If the rescan work was scheduled because of a resume 4880 * event, the port is already fully resumed, but the 4881 * SCSI device may not yet be fully resumed. In such 4882 * case, executing scsi_rescan_device() may cause a 4883 * deadlock with the PM code on device_lock(). Prevent 4884 * this by giving up and retrying rescan after a short 4885 * delay. 4886 */ 4887 delay_rescan = sdev->sdev_gendev.power.is_suspended; 4888 if (delay_rescan) { 4889 scsi_device_put(sdev); 4890 break; 4891 } 4892 4893 spin_unlock_irqrestore(ap->lock, flags); 4894 scsi_rescan_device(&(sdev->sdev_gendev)); 4895 scsi_device_put(sdev); 4896 spin_lock_irqsave(ap->lock, flags); 4897 } 4898 } 4899 4900 spin_unlock_irqrestore(ap->lock, flags); 4901 mutex_unlock(&ap->scsi_scan_mutex); 4902 4903 if (delay_rescan) 4904 schedule_delayed_work(&ap->scsi_rescan_task, 4905 msecs_to_jiffies(5)); 4906 } 4907