1 /* 2 * libata-scsi.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from 31 * - http://www.t10.org/ 32 * - http://www.t13.org/ 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/blkdev.h> 38 #include <linux/spinlock.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_eh.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_tcq.h> 45 #include <scsi/scsi_transport.h> 46 #include <linux/libata.h> 47 #include <linux/hdreg.h> 48 #include <linux/uaccess.h> 49 50 #include "libata.h" 51 52 #define SECTOR_SIZE 512 53 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 56 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 57 const struct scsi_device *scsidev); 58 static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 59 const struct scsi_device *scsidev); 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 unsigned int id, unsigned int lun); 62 63 64 #define RW_RECOVERY_MPAGE 0x1 65 #define RW_RECOVERY_MPAGE_LEN 12 66 #define CACHE_MPAGE 0x8 67 #define CACHE_MPAGE_LEN 20 68 #define CONTROL_MPAGE 0xa 69 #define CONTROL_MPAGE_LEN 12 70 #define ALL_MPAGES 0x3f 71 #define ALL_SUB_MPAGES 0xff 72 73 74 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { 75 RW_RECOVERY_MPAGE, 76 RW_RECOVERY_MPAGE_LEN - 2, 77 (1 << 7), /* AWRE */ 78 0, /* read retry count */ 79 0, 0, 0, 0, 80 0, /* write retry count */ 81 0, 0, 0 82 }; 83 84 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = { 85 CACHE_MPAGE, 86 CACHE_MPAGE_LEN - 2, 87 0, /* contains WCE, needs to be 0 for logic */ 88 0, 0, 0, 0, 0, 0, 0, 0, 0, 89 0, /* contains DRA, needs to be 0 for logic */ 90 0, 0, 0, 0, 0, 0, 0 91 }; 92 93 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 94 CONTROL_MPAGE, 95 CONTROL_MPAGE_LEN - 2, 96 2, /* DSENSE=0, GLTSD=1 */ 97 0, /* [QAM+QERR may be 1, see 05-359r1] */ 98 0, 0, 0, 0, 0xff, 0xff, 99 0, 30 /* extended self test time, see 05-359r1 */ 100 }; 101 102 /* 103 * libata transport template. libata doesn't do real transport stuff. 104 * It just needs the eh_timed_out hook. 105 */ 106 static struct scsi_transport_template ata_scsi_transport_template = { 107 .eh_strategy_handler = ata_scsi_error, 108 .eh_timed_out = ata_scsi_timed_out, 109 .user_scan = ata_scsi_user_scan, 110 }; 111 112 113 static const struct { 114 enum link_pm value; 115 const char *name; 116 } link_pm_policy[] = { 117 { NOT_AVAILABLE, "max_performance" }, 118 { MIN_POWER, "min_power" }, 119 { MAX_PERFORMANCE, "max_performance" }, 120 { MEDIUM_POWER, "medium_power" }, 121 }; 122 123 static const char *ata_scsi_lpm_get(enum link_pm policy) 124 { 125 int i; 126 127 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++) 128 if (link_pm_policy[i].value == policy) 129 return link_pm_policy[i].name; 130 131 return NULL; 132 } 133 134 static ssize_t ata_scsi_lpm_put(struct class_device *class_dev, 135 const char *buf, size_t count) 136 { 137 struct Scsi_Host *shost = class_to_shost(class_dev); 138 struct ata_port *ap = ata_shost_to_port(shost); 139 enum link_pm policy = 0; 140 int i; 141 142 /* 143 * we are skipping array location 0 on purpose - this 144 * is because a value of NOT_AVAILABLE is displayed 145 * to the user as max_performance, but when the user 146 * writes "max_performance", they actually want the 147 * value to match MAX_PERFORMANCE. 148 */ 149 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) { 150 const int len = strlen(link_pm_policy[i].name); 151 if (strncmp(link_pm_policy[i].name, buf, len) == 0 && 152 buf[len] == '\n') { 153 policy = link_pm_policy[i].value; 154 break; 155 } 156 } 157 if (!policy) 158 return -EINVAL; 159 160 ata_lpm_schedule(ap, policy); 161 return count; 162 } 163 164 static ssize_t 165 ata_scsi_lpm_show(struct class_device *class_dev, char *buf) 166 { 167 struct Scsi_Host *shost = class_to_shost(class_dev); 168 struct ata_port *ap = ata_shost_to_port(shost); 169 const char *policy = 170 ata_scsi_lpm_get(ap->pm_policy); 171 172 if (!policy) 173 return -EINVAL; 174 175 return snprintf(buf, 23, "%s\n", policy); 176 } 177 CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 178 ata_scsi_lpm_show, ata_scsi_lpm_put); 179 EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy); 180 181 static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 182 void (*done)(struct scsi_cmnd *)) 183 { 184 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); 185 /* "Invalid field in cbd" */ 186 done(cmd); 187 } 188 189 /** 190 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 191 * @sdev: SCSI device for which BIOS geometry is to be determined 192 * @bdev: block device associated with @sdev 193 * @capacity: capacity of SCSI device 194 * @geom: location to which geometry will be output 195 * 196 * Generic bios head/sector/cylinder calculator 197 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) 198 * mapping. Some situations may arise where the disk is not 199 * bootable if this is not used. 200 * 201 * LOCKING: 202 * Defined by the SCSI layer. We don't really care. 203 * 204 * RETURNS: 205 * Zero. 206 */ 207 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, 208 sector_t capacity, int geom[]) 209 { 210 geom[0] = 255; 211 geom[1] = 63; 212 sector_div(capacity, 255*63); 213 geom[2] = capacity; 214 215 return 0; 216 } 217 218 /** 219 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 220 * @sdev: SCSI device to get identify data for 221 * @arg: User buffer area for identify data 222 * 223 * LOCKING: 224 * Defined by the SCSI layer. We don't really care. 225 * 226 * RETURNS: 227 * Zero on success, negative errno on error. 228 */ 229 static int ata_get_identity(struct scsi_device *sdev, void __user *arg) 230 { 231 struct ata_port *ap = ata_shost_to_port(sdev->host); 232 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 233 u16 __user *dst = arg; 234 char buf[40]; 235 236 if (!dev) 237 return -ENOMSG; 238 239 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) 240 return -EFAULT; 241 242 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); 243 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) 244 return -EFAULT; 245 246 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); 247 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) 248 return -EFAULT; 249 250 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); 251 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) 252 return -EFAULT; 253 254 return 0; 255 } 256 257 /** 258 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 259 * @scsidev: Device to which we are issuing command 260 * @arg: User provided data for issuing command 261 * 262 * LOCKING: 263 * Defined by the SCSI layer. We don't really care. 264 * 265 * RETURNS: 266 * Zero on success, negative errno on error. 267 */ 268 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 269 { 270 int rc = 0; 271 u8 scsi_cmd[MAX_COMMAND_SIZE]; 272 u8 args[4], *argbuf = NULL, *sensebuf = NULL; 273 int argsize = 0; 274 enum dma_data_direction data_dir; 275 int cmd_result; 276 277 if (arg == NULL) 278 return -EINVAL; 279 280 if (copy_from_user(args, arg, sizeof(args))) 281 return -EFAULT; 282 283 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 284 if (!sensebuf) 285 return -ENOMEM; 286 287 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 288 289 if (args[3]) { 290 argsize = SECTOR_SIZE * args[3]; 291 argbuf = kmalloc(argsize, GFP_KERNEL); 292 if (argbuf == NULL) { 293 rc = -ENOMEM; 294 goto error; 295 } 296 297 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 298 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 299 block count in sector count field */ 300 data_dir = DMA_FROM_DEVICE; 301 } else { 302 scsi_cmd[1] = (3 << 1); /* Non-data */ 303 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 304 data_dir = DMA_NONE; 305 } 306 307 scsi_cmd[0] = ATA_16; 308 309 scsi_cmd[4] = args[2]; 310 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ 311 scsi_cmd[6] = args[3]; 312 scsi_cmd[8] = args[1]; 313 scsi_cmd[10] = 0x4f; 314 scsi_cmd[12] = 0xc2; 315 } else { 316 scsi_cmd[6] = args[1]; 317 } 318 scsi_cmd[14] = args[0]; 319 320 /* Good values for timeout and retries? Values below 321 from scsi_ioctl_send_command() for default case... */ 322 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 323 sensebuf, (10*HZ), 5, 0); 324 325 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 326 u8 *desc = sensebuf + 8; 327 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 328 329 /* If we set cc then ATA pass-through will cause a 330 * check condition even if no error. Filter that. */ 331 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 332 struct scsi_sense_hdr sshdr; 333 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 334 &sshdr); 335 if (sshdr.sense_key == 0 && 336 sshdr.asc == 0 && sshdr.ascq == 0) 337 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 338 } 339 340 /* Send userspace a few ATA registers (same as drivers/ide) */ 341 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 342 desc[0] == 0x09) { /* code is "ATA Descriptor" */ 343 args[0] = desc[13]; /* status */ 344 args[1] = desc[3]; /* error */ 345 args[2] = desc[5]; /* sector count (0:7) */ 346 if (copy_to_user(arg, args, sizeof(args))) 347 rc = -EFAULT; 348 } 349 } 350 351 352 if (cmd_result) { 353 rc = -EIO; 354 goto error; 355 } 356 357 if ((argbuf) 358 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 359 rc = -EFAULT; 360 error: 361 kfree(sensebuf); 362 kfree(argbuf); 363 return rc; 364 } 365 366 /** 367 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl 368 * @scsidev: Device to which we are issuing command 369 * @arg: User provided data for issuing command 370 * 371 * LOCKING: 372 * Defined by the SCSI layer. We don't really care. 373 * 374 * RETURNS: 375 * Zero on success, negative errno on error. 376 */ 377 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) 378 { 379 int rc = 0; 380 u8 scsi_cmd[MAX_COMMAND_SIZE]; 381 u8 args[7], *sensebuf = NULL; 382 int cmd_result; 383 384 if (arg == NULL) 385 return -EINVAL; 386 387 if (copy_from_user(args, arg, sizeof(args))) 388 return -EFAULT; 389 390 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 391 if (!sensebuf) 392 return -ENOMEM; 393 394 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 395 scsi_cmd[0] = ATA_16; 396 scsi_cmd[1] = (3 << 1); /* Non-data */ 397 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 398 scsi_cmd[4] = args[1]; 399 scsi_cmd[6] = args[2]; 400 scsi_cmd[8] = args[3]; 401 scsi_cmd[10] = args[4]; 402 scsi_cmd[12] = args[5]; 403 scsi_cmd[13] = args[6] & 0x4f; 404 scsi_cmd[14] = args[0]; 405 406 /* Good values for timeout and retries? Values below 407 from scsi_ioctl_send_command() for default case... */ 408 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, 409 sensebuf, (10*HZ), 5, 0); 410 411 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 412 u8 *desc = sensebuf + 8; 413 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 414 415 /* If we set cc then ATA pass-through will cause a 416 * check condition even if no error. Filter that. */ 417 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 418 struct scsi_sense_hdr sshdr; 419 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 420 &sshdr); 421 if (sshdr.sense_key == 0 && 422 sshdr.asc == 0 && sshdr.ascq == 0) 423 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 424 } 425 426 /* Send userspace ATA registers */ 427 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 428 desc[0] == 0x09) {/* code is "ATA Descriptor" */ 429 args[0] = desc[13]; /* status */ 430 args[1] = desc[3]; /* error */ 431 args[2] = desc[5]; /* sector count (0:7) */ 432 args[3] = desc[7]; /* lbal */ 433 args[4] = desc[9]; /* lbam */ 434 args[5] = desc[11]; /* lbah */ 435 args[6] = desc[12]; /* select */ 436 if (copy_to_user(arg, args, sizeof(args))) 437 rc = -EFAULT; 438 } 439 } 440 441 if (cmd_result) { 442 rc = -EIO; 443 goto error; 444 } 445 446 error: 447 kfree(sensebuf); 448 return rc; 449 } 450 451 int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) 452 { 453 int val = -EINVAL, rc = -EINVAL; 454 455 switch (cmd) { 456 case ATA_IOC_GET_IO32: 457 val = 0; 458 if (copy_to_user(arg, &val, 1)) 459 return -EFAULT; 460 return 0; 461 462 case ATA_IOC_SET_IO32: 463 val = (unsigned long) arg; 464 if (val != 0) 465 return -EINVAL; 466 return 0; 467 468 case HDIO_GET_IDENTITY: 469 return ata_get_identity(scsidev, arg); 470 471 case HDIO_DRIVE_CMD: 472 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 473 return -EACCES; 474 return ata_cmd_ioctl(scsidev, arg); 475 476 case HDIO_DRIVE_TASK: 477 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 478 return -EACCES; 479 return ata_task_ioctl(scsidev, arg); 480 481 default: 482 rc = -ENOTTY; 483 break; 484 } 485 486 return rc; 487 } 488 489 /** 490 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 491 * @dev: ATA device to which the new command is attached 492 * @cmd: SCSI command that originated this ATA command 493 * @done: SCSI command completion function 494 * 495 * Obtain a reference to an unused ata_queued_cmd structure, 496 * which is the basic libata structure representing a single 497 * ATA command sent to the hardware. 498 * 499 * If a command was available, fill in the SCSI-specific 500 * portions of the structure with information on the 501 * current command. 502 * 503 * LOCKING: 504 * spin_lock_irqsave(host lock) 505 * 506 * RETURNS: 507 * Command allocated, or %NULL if none available. 508 */ 509 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 510 struct scsi_cmnd *cmd, 511 void (*done)(struct scsi_cmnd *)) 512 { 513 struct ata_queued_cmd *qc; 514 515 qc = ata_qc_new_init(dev); 516 if (qc) { 517 qc->scsicmd = cmd; 518 qc->scsidone = done; 519 520 qc->sg = scsi_sglist(cmd); 521 qc->n_elem = scsi_sg_count(cmd); 522 } else { 523 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 524 done(cmd); 525 } 526 527 return qc; 528 } 529 530 /** 531 * ata_dump_status - user friendly display of error info 532 * @id: id of the port in question 533 * @tf: ptr to filled out taskfile 534 * 535 * Decode and dump the ATA error/status registers for the user so 536 * that they have some idea what really happened at the non 537 * make-believe layer. 538 * 539 * LOCKING: 540 * inherited from caller 541 */ 542 static void ata_dump_status(unsigned id, struct ata_taskfile *tf) 543 { 544 u8 stat = tf->command, err = tf->feature; 545 546 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat); 547 if (stat & ATA_BUSY) { 548 printk("Busy }\n"); /* Data is not valid in this case */ 549 } else { 550 if (stat & 0x40) printk("DriveReady "); 551 if (stat & 0x20) printk("DeviceFault "); 552 if (stat & 0x10) printk("SeekComplete "); 553 if (stat & 0x08) printk("DataRequest "); 554 if (stat & 0x04) printk("CorrectedError "); 555 if (stat & 0x02) printk("Index "); 556 if (stat & 0x01) printk("Error "); 557 printk("}\n"); 558 559 if (err) { 560 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); 561 if (err & 0x04) printk("DriveStatusError "); 562 if (err & 0x80) { 563 if (err & 0x04) printk("BadCRC "); 564 else printk("Sector "); 565 } 566 if (err & 0x40) printk("UncorrectableError "); 567 if (err & 0x10) printk("SectorIdNotFound "); 568 if (err & 0x02) printk("TrackZeroNotFound "); 569 if (err & 0x01) printk("AddrMarkNotFound "); 570 printk("}\n"); 571 } 572 } 573 } 574 575 /** 576 * ata_to_sense_error - convert ATA error to SCSI error 577 * @id: ATA device number 578 * @drv_stat: value contained in ATA status register 579 * @drv_err: value contained in ATA error register 580 * @sk: the sense key we'll fill out 581 * @asc: the additional sense code we'll fill out 582 * @ascq: the additional sense code qualifier we'll fill out 583 * @verbose: be verbose 584 * 585 * Converts an ATA error into a SCSI error. Fill out pointers to 586 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 587 * format sense blocks. 588 * 589 * LOCKING: 590 * spin_lock_irqsave(host lock) 591 */ 592 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, 593 u8 *asc, u8 *ascq, int verbose) 594 { 595 int i; 596 597 /* Based on the 3ware driver translation table */ 598 static const unsigned char sense_table[][4] = { 599 /* BBD|ECC|ID|MAR */ 600 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 601 /* BBD|ECC|ID */ 602 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 603 /* ECC|MC|MARK */ 604 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error 605 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 606 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error 607 /* MC|ID|ABRT|TRK0|MARK */ 608 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready 609 /* MCR|MARK */ 610 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready 611 /* Bad address mark */ 612 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field 613 /* TRK0 */ 614 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error 615 /* Abort & !ICRC */ 616 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command 617 /* Media change request */ 618 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline 619 /* SRV */ 620 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found 621 /* Media change */ 622 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline 623 /* ECC */ 624 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error 625 /* BBD - block marked bad */ 626 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error 627 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 628 }; 629 static const unsigned char stat_table[][4] = { 630 /* Must be first because BUSY means no other bits valid */ 631 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now 632 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault 633 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now 634 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered 635 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 636 }; 637 638 /* 639 * Is this an error we can process/parse 640 */ 641 if (drv_stat & ATA_BUSY) { 642 drv_err = 0; /* Ignore the err bits, they're invalid */ 643 } 644 645 if (drv_err) { 646 /* Look for drv_err */ 647 for (i = 0; sense_table[i][0] != 0xFF; i++) { 648 /* Look for best matches first */ 649 if ((sense_table[i][0] & drv_err) == 650 sense_table[i][0]) { 651 *sk = sense_table[i][1]; 652 *asc = sense_table[i][2]; 653 *ascq = sense_table[i][3]; 654 goto translate_done; 655 } 656 } 657 /* No immediate match */ 658 if (verbose) 659 printk(KERN_WARNING "ata%u: no sense translation for " 660 "error 0x%02x\n", id, drv_err); 661 } 662 663 /* Fall back to interpreting status bits */ 664 for (i = 0; stat_table[i][0] != 0xFF; i++) { 665 if (stat_table[i][0] & drv_stat) { 666 *sk = stat_table[i][1]; 667 *asc = stat_table[i][2]; 668 *ascq = stat_table[i][3]; 669 goto translate_done; 670 } 671 } 672 /* No error? Undecoded? */ 673 if (verbose) 674 printk(KERN_WARNING "ata%u: no sense translation for " 675 "status: 0x%02x\n", id, drv_stat); 676 677 /* We need a sensible error return here, which is tricky, and one 678 that won't cause people to do things like return a disk wrongly */ 679 *sk = ABORTED_COMMAND; 680 *asc = 0x00; 681 *ascq = 0x00; 682 683 translate_done: 684 if (verbose) 685 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x " 686 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", 687 id, drv_stat, drv_err, *sk, *asc, *ascq); 688 return; 689 } 690 691 /* 692 * ata_gen_passthru_sense - Generate check condition sense block. 693 * @qc: Command that completed. 694 * 695 * This function is specific to the ATA descriptor format sense 696 * block specified for the ATA pass through commands. Regardless 697 * of whether the command errored or not, return a sense 698 * block. Copy all controller registers into the sense 699 * block. Clear sense key, ASC & ASCQ if there is no error. 700 * 701 * LOCKING: 702 * None. 703 */ 704 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) 705 { 706 struct scsi_cmnd *cmd = qc->scsicmd; 707 struct ata_taskfile *tf = &qc->result_tf; 708 unsigned char *sb = cmd->sense_buffer; 709 unsigned char *desc = sb + 8; 710 int verbose = qc->ap->ops->error_handler == NULL; 711 712 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 713 714 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 715 716 /* 717 * Use ata_to_sense_error() to map status register bits 718 * onto sense key, asc & ascq. 719 */ 720 if (qc->err_mask || 721 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 722 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 723 &sb[1], &sb[2], &sb[3], verbose); 724 sb[1] &= 0x0f; 725 } 726 727 /* 728 * Sense data is current and format is descriptor. 729 */ 730 sb[0] = 0x72; 731 732 desc[0] = 0x09; 733 734 /* set length of additional sense data */ 735 sb[7] = 14; 736 desc[1] = 12; 737 738 /* 739 * Copy registers into sense buffer. 740 */ 741 desc[2] = 0x00; 742 desc[3] = tf->feature; /* == error reg */ 743 desc[5] = tf->nsect; 744 desc[7] = tf->lbal; 745 desc[9] = tf->lbam; 746 desc[11] = tf->lbah; 747 desc[12] = tf->device; 748 desc[13] = tf->command; /* == status reg */ 749 750 /* 751 * Fill in Extend bit, and the high order bytes 752 * if applicable. 753 */ 754 if (tf->flags & ATA_TFLAG_LBA48) { 755 desc[2] |= 0x01; 756 desc[4] = tf->hob_nsect; 757 desc[6] = tf->hob_lbal; 758 desc[8] = tf->hob_lbam; 759 desc[10] = tf->hob_lbah; 760 } 761 } 762 763 /** 764 * ata_gen_ata_sense - generate a SCSI fixed sense block 765 * @qc: Command that we are erroring out 766 * 767 * Generate sense block for a failed ATA command @qc. Descriptor 768 * format is used to accomodate LBA48 block address. 769 * 770 * LOCKING: 771 * None. 772 */ 773 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) 774 { 775 struct ata_device *dev = qc->dev; 776 struct scsi_cmnd *cmd = qc->scsicmd; 777 struct ata_taskfile *tf = &qc->result_tf; 778 unsigned char *sb = cmd->sense_buffer; 779 unsigned char *desc = sb + 8; 780 int verbose = qc->ap->ops->error_handler == NULL; 781 u64 block; 782 783 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 784 785 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 786 787 /* sense data is current and format is descriptor */ 788 sb[0] = 0x72; 789 790 /* Use ata_to_sense_error() to map status register bits 791 * onto sense key, asc & ascq. 792 */ 793 if (qc->err_mask || 794 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 795 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 796 &sb[1], &sb[2], &sb[3], verbose); 797 sb[1] &= 0x0f; 798 } 799 800 block = ata_tf_read_block(&qc->result_tf, dev); 801 802 /* information sense data descriptor */ 803 sb[7] = 12; 804 desc[0] = 0x00; 805 desc[1] = 10; 806 807 desc[2] |= 0x80; /* valid */ 808 desc[6] = block >> 40; 809 desc[7] = block >> 32; 810 desc[8] = block >> 24; 811 desc[9] = block >> 16; 812 desc[10] = block >> 8; 813 desc[11] = block; 814 } 815 816 static void ata_scsi_sdev_config(struct scsi_device *sdev) 817 { 818 sdev->use_10_for_rw = 1; 819 sdev->use_10_for_ms = 1; 820 821 /* Schedule policy is determined by ->qc_defer() callback and 822 * it needs to see every deferred qc. Set dev_blocked to 1 to 823 * prevent SCSI midlayer from automatically deferring 824 * requests. 825 */ 826 sdev->max_device_blocked = 1; 827 } 828 829 static void ata_scsi_dev_config(struct scsi_device *sdev, 830 struct ata_device *dev) 831 { 832 /* configure max sectors */ 833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 834 835 /* SATA DMA transfers must be multiples of 4 byte, so 836 * we need to pad ATAPI transfers using an extra sg. 837 * Decrement max hw segments accordingly. 838 */ 839 if (dev->class == ATA_DEV_ATAPI) { 840 struct request_queue *q = sdev->request_queue; 841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 842 843 /* set the min alignment */ 844 blk_queue_update_dma_alignment(sdev->request_queue, 845 ATA_DMA_PAD_SZ - 1); 846 } else 847 /* ATA devices must be sector aligned */ 848 blk_queue_update_dma_alignment(sdev->request_queue, 849 ATA_SECT_SIZE - 1); 850 851 if (dev->class == ATA_DEV_ATA) 852 sdev->manage_start_stop = 1; 853 854 if (dev->flags & ATA_DFLAG_AN) 855 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 856 857 if (dev->flags & ATA_DFLAG_NCQ) { 858 int depth; 859 860 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 861 depth = min(ATA_MAX_QUEUE - 1, depth); 862 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 863 } 864 } 865 866 /** 867 * ata_scsi_slave_config - Set SCSI device attributes 868 * @sdev: SCSI device to examine 869 * 870 * This is called before we actually start reading 871 * and writing to the device, to configure certain 872 * SCSI mid-layer behaviors. 873 * 874 * LOCKING: 875 * Defined by SCSI layer. We don't really care. 876 */ 877 878 int ata_scsi_slave_config(struct scsi_device *sdev) 879 { 880 struct ata_port *ap = ata_shost_to_port(sdev->host); 881 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 882 883 ata_scsi_sdev_config(sdev); 884 885 if (dev) 886 ata_scsi_dev_config(sdev, dev); 887 888 return 0; 889 } 890 891 /** 892 * ata_scsi_slave_destroy - SCSI device is about to be destroyed 893 * @sdev: SCSI device to be destroyed 894 * 895 * @sdev is about to be destroyed for hot/warm unplugging. If 896 * this unplugging was initiated by libata as indicated by NULL 897 * dev->sdev, this function doesn't have to do anything. 898 * Otherwise, SCSI layer initiated warm-unplug is in progress. 899 * Clear dev->sdev, schedule the device for ATA detach and invoke 900 * EH. 901 * 902 * LOCKING: 903 * Defined by SCSI layer. We don't really care. 904 */ 905 void ata_scsi_slave_destroy(struct scsi_device *sdev) 906 { 907 struct ata_port *ap = ata_shost_to_port(sdev->host); 908 unsigned long flags; 909 struct ata_device *dev; 910 911 if (!ap->ops->error_handler) 912 return; 913 914 spin_lock_irqsave(ap->lock, flags); 915 dev = __ata_scsi_find_dev(ap, sdev); 916 if (dev && dev->sdev) { 917 /* SCSI device already in CANCEL state, no need to offline it */ 918 dev->sdev = NULL; 919 dev->flags |= ATA_DFLAG_DETACH; 920 ata_port_schedule_eh(ap); 921 } 922 spin_unlock_irqrestore(ap->lock, flags); 923 } 924 925 /** 926 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 927 * @sdev: SCSI device to configure queue depth for 928 * @queue_depth: new queue depth 929 * 930 * This is libata standard hostt->change_queue_depth callback. 931 * SCSI will call into this callback when user tries to set queue 932 * depth via sysfs. 933 * 934 * LOCKING: 935 * SCSI layer (we don't care) 936 * 937 * RETURNS: 938 * Newly configured queue depth. 939 */ 940 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) 941 { 942 struct ata_port *ap = ata_shost_to_port(sdev->host); 943 struct ata_device *dev; 944 unsigned long flags; 945 946 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 947 return sdev->queue_depth; 948 949 dev = ata_scsi_find_dev(ap, sdev); 950 if (!dev || !ata_dev_enabled(dev)) 951 return sdev->queue_depth; 952 953 /* NCQ enabled? */ 954 spin_lock_irqsave(ap->lock, flags); 955 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 956 if (queue_depth == 1 || !ata_ncq_enabled(dev)) { 957 dev->flags |= ATA_DFLAG_NCQ_OFF; 958 queue_depth = 1; 959 } 960 spin_unlock_irqrestore(ap->lock, flags); 961 962 /* limit and apply queue depth */ 963 queue_depth = min(queue_depth, sdev->host->can_queue); 964 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); 965 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); 966 967 if (sdev->queue_depth == queue_depth) 968 return -EINVAL; 969 970 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); 971 return queue_depth; 972 } 973 974 /* XXX: for spindown warning */ 975 static void ata_delayed_done_timerfn(unsigned long arg) 976 { 977 struct scsi_cmnd *scmd = (void *)arg; 978 979 scmd->scsi_done(scmd); 980 } 981 982 /* XXX: for spindown warning */ 983 static void ata_delayed_done(struct scsi_cmnd *scmd) 984 { 985 static struct timer_list timer; 986 987 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd); 988 mod_timer(&timer, jiffies + 5 * HZ); 989 } 990 991 /** 992 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 993 * @qc: Storage for translated ATA taskfile 994 * 995 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY 996 * (to start). Perhaps these commands should be preceded by 997 * CHECK POWER MODE to see what power mode the device is already in. 998 * [See SAT revision 5 at www.t10.org] 999 * 1000 * LOCKING: 1001 * spin_lock_irqsave(host lock) 1002 * 1003 * RETURNS: 1004 * Zero on success, non-zero on error. 1005 */ 1006 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) 1007 { 1008 struct scsi_cmnd *scmd = qc->scsicmd; 1009 struct ata_taskfile *tf = &qc->tf; 1010 const u8 *cdb = scmd->cmnd; 1011 1012 if (scmd->cmd_len < 5) 1013 goto invalid_fld; 1014 1015 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1016 tf->protocol = ATA_PROT_NODATA; 1017 if (cdb[1] & 0x1) { 1018 ; /* ignore IMMED bit, violates sat-r05 */ 1019 } 1020 if (cdb[4] & 0x2) 1021 goto invalid_fld; /* LOEJ bit set not supported */ 1022 if (((cdb[4] >> 4) & 0xf) != 0) 1023 goto invalid_fld; /* power conditions not supported */ 1024 1025 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) { 1026 /* the device lacks PM support, finish without doing anything */ 1027 scmd->result = SAM_STAT_GOOD; 1028 return 1; 1029 } 1030 1031 if (cdb[4] & 0x1) { 1032 tf->nsect = 1; /* 1 sector, lba=0 */ 1033 1034 if (qc->dev->flags & ATA_DFLAG_LBA) { 1035 tf->flags |= ATA_TFLAG_LBA; 1036 1037 tf->lbah = 0x0; 1038 tf->lbam = 0x0; 1039 tf->lbal = 0x0; 1040 tf->device |= ATA_LBA; 1041 } else { 1042 /* CHS */ 1043 tf->lbal = 0x1; /* sect */ 1044 tf->lbam = 0x0; /* cyl low */ 1045 tf->lbah = 0x0; /* cyl high */ 1046 } 1047 1048 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 1049 } else { 1050 /* XXX: This is for backward compatibility, will be 1051 * removed. Read Documentation/feature-removal-schedule.txt 1052 * for more info. 1053 */ 1054 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 1055 (system_state == SYSTEM_HALT || 1056 system_state == SYSTEM_POWER_OFF)) { 1057 static unsigned long warned; 1058 1059 if (!test_and_set_bit(0, &warned)) { 1060 ata_dev_printk(qc->dev, KERN_WARNING, 1061 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. " 1062 "UPDATE SHUTDOWN UTILITY\n"); 1063 ata_dev_printk(qc->dev, KERN_WARNING, 1064 "For more info, visit " 1065 "http://linux-ata.org/shutdown.html\n"); 1066 1067 /* ->scsi_done is not used, use it for 1068 * delayed completion. 1069 */ 1070 scmd->scsi_done = qc->scsidone; 1071 qc->scsidone = ata_delayed_done; 1072 } 1073 scmd->result = SAM_STAT_GOOD; 1074 return 1; 1075 } 1076 1077 /* Issue ATA STANDBY IMMEDIATE command */ 1078 tf->command = ATA_CMD_STANDBYNOW1; 1079 } 1080 1081 /* 1082 * Standby and Idle condition timers could be implemented but that 1083 * would require libata to implement the Power condition mode page 1084 * and allow the user to change it. Changing mode pages requires 1085 * MODE SELECT to be implemented. 1086 */ 1087 1088 return 0; 1089 1090 invalid_fld: 1091 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1092 /* "Invalid field in cbd" */ 1093 return 1; 1094 } 1095 1096 1097 /** 1098 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 1099 * @qc: Storage for translated ATA taskfile 1100 * 1101 * Sets up an ATA taskfile to issue FLUSH CACHE or 1102 * FLUSH CACHE EXT. 1103 * 1104 * LOCKING: 1105 * spin_lock_irqsave(host lock) 1106 * 1107 * RETURNS: 1108 * Zero on success, non-zero on error. 1109 */ 1110 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) 1111 { 1112 struct ata_taskfile *tf = &qc->tf; 1113 1114 tf->flags |= ATA_TFLAG_DEVICE; 1115 tf->protocol = ATA_PROT_NODATA; 1116 1117 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) 1118 tf->command = ATA_CMD_FLUSH_EXT; 1119 else 1120 tf->command = ATA_CMD_FLUSH; 1121 1122 /* flush is critical for IO integrity, consider it an IO command */ 1123 qc->flags |= ATA_QCFLAG_IO; 1124 1125 return 0; 1126 } 1127 1128 /** 1129 * scsi_6_lba_len - Get LBA and transfer length 1130 * @cdb: SCSI command to translate 1131 * 1132 * Calculate LBA and transfer length for 6-byte commands. 1133 * 1134 * RETURNS: 1135 * @plba: the LBA 1136 * @plen: the transfer length 1137 */ 1138 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1139 { 1140 u64 lba = 0; 1141 u32 len; 1142 1143 VPRINTK("six-byte command\n"); 1144 1145 lba |= ((u64)(cdb[1] & 0x1f)) << 16; 1146 lba |= ((u64)cdb[2]) << 8; 1147 lba |= ((u64)cdb[3]); 1148 1149 len = cdb[4]; 1150 1151 *plba = lba; 1152 *plen = len; 1153 } 1154 1155 /** 1156 * scsi_10_lba_len - Get LBA and transfer length 1157 * @cdb: SCSI command to translate 1158 * 1159 * Calculate LBA and transfer length for 10-byte commands. 1160 * 1161 * RETURNS: 1162 * @plba: the LBA 1163 * @plen: the transfer length 1164 */ 1165 static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1166 { 1167 u64 lba = 0; 1168 u32 len = 0; 1169 1170 VPRINTK("ten-byte command\n"); 1171 1172 lba |= ((u64)cdb[2]) << 24; 1173 lba |= ((u64)cdb[3]) << 16; 1174 lba |= ((u64)cdb[4]) << 8; 1175 lba |= ((u64)cdb[5]); 1176 1177 len |= ((u32)cdb[7]) << 8; 1178 len |= ((u32)cdb[8]); 1179 1180 *plba = lba; 1181 *plen = len; 1182 } 1183 1184 /** 1185 * scsi_16_lba_len - Get LBA and transfer length 1186 * @cdb: SCSI command to translate 1187 * 1188 * Calculate LBA and transfer length for 16-byte commands. 1189 * 1190 * RETURNS: 1191 * @plba: the LBA 1192 * @plen: the transfer length 1193 */ 1194 static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1195 { 1196 u64 lba = 0; 1197 u32 len = 0; 1198 1199 VPRINTK("sixteen-byte command\n"); 1200 1201 lba |= ((u64)cdb[2]) << 56; 1202 lba |= ((u64)cdb[3]) << 48; 1203 lba |= ((u64)cdb[4]) << 40; 1204 lba |= ((u64)cdb[5]) << 32; 1205 lba |= ((u64)cdb[6]) << 24; 1206 lba |= ((u64)cdb[7]) << 16; 1207 lba |= ((u64)cdb[8]) << 8; 1208 lba |= ((u64)cdb[9]); 1209 1210 len |= ((u32)cdb[10]) << 24; 1211 len |= ((u32)cdb[11]) << 16; 1212 len |= ((u32)cdb[12]) << 8; 1213 len |= ((u32)cdb[13]); 1214 1215 *plba = lba; 1216 *plen = len; 1217 } 1218 1219 /** 1220 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 1221 * @qc: Storage for translated ATA taskfile 1222 * 1223 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1224 * 1225 * LOCKING: 1226 * spin_lock_irqsave(host lock) 1227 * 1228 * RETURNS: 1229 * Zero on success, non-zero on error. 1230 */ 1231 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) 1232 { 1233 struct scsi_cmnd *scmd = qc->scsicmd; 1234 struct ata_taskfile *tf = &qc->tf; 1235 struct ata_device *dev = qc->dev; 1236 u64 dev_sectors = qc->dev->n_sectors; 1237 const u8 *cdb = scmd->cmnd; 1238 u64 block; 1239 u32 n_block; 1240 1241 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1242 tf->protocol = ATA_PROT_NODATA; 1243 1244 if (cdb[0] == VERIFY) { 1245 if (scmd->cmd_len < 10) 1246 goto invalid_fld; 1247 scsi_10_lba_len(cdb, &block, &n_block); 1248 } else if (cdb[0] == VERIFY_16) { 1249 if (scmd->cmd_len < 16) 1250 goto invalid_fld; 1251 scsi_16_lba_len(cdb, &block, &n_block); 1252 } else 1253 goto invalid_fld; 1254 1255 if (!n_block) 1256 goto nothing_to_do; 1257 if (block >= dev_sectors) 1258 goto out_of_range; 1259 if ((block + n_block) > dev_sectors) 1260 goto out_of_range; 1261 1262 if (dev->flags & ATA_DFLAG_LBA) { 1263 tf->flags |= ATA_TFLAG_LBA; 1264 1265 if (lba_28_ok(block, n_block)) { 1266 /* use LBA28 */ 1267 tf->command = ATA_CMD_VERIFY; 1268 tf->device |= (block >> 24) & 0xf; 1269 } else if (lba_48_ok(block, n_block)) { 1270 if (!(dev->flags & ATA_DFLAG_LBA48)) 1271 goto out_of_range; 1272 1273 /* use LBA48 */ 1274 tf->flags |= ATA_TFLAG_LBA48; 1275 tf->command = ATA_CMD_VERIFY_EXT; 1276 1277 tf->hob_nsect = (n_block >> 8) & 0xff; 1278 1279 tf->hob_lbah = (block >> 40) & 0xff; 1280 tf->hob_lbam = (block >> 32) & 0xff; 1281 tf->hob_lbal = (block >> 24) & 0xff; 1282 } else 1283 /* request too large even for LBA48 */ 1284 goto out_of_range; 1285 1286 tf->nsect = n_block & 0xff; 1287 1288 tf->lbah = (block >> 16) & 0xff; 1289 tf->lbam = (block >> 8) & 0xff; 1290 tf->lbal = block & 0xff; 1291 1292 tf->device |= ATA_LBA; 1293 } else { 1294 /* CHS */ 1295 u32 sect, head, cyl, track; 1296 1297 if (!lba_28_ok(block, n_block)) 1298 goto out_of_range; 1299 1300 /* Convert LBA to CHS */ 1301 track = (u32)block / dev->sectors; 1302 cyl = track / dev->heads; 1303 head = track % dev->heads; 1304 sect = (u32)block % dev->sectors + 1; 1305 1306 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 1307 (u32)block, track, cyl, head, sect); 1308 1309 /* Check whether the converted CHS can fit. 1310 Cylinder: 0-65535 1311 Head: 0-15 1312 Sector: 1-255*/ 1313 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 1314 goto out_of_range; 1315 1316 tf->command = ATA_CMD_VERIFY; 1317 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 1318 tf->lbal = sect; 1319 tf->lbam = cyl; 1320 tf->lbah = cyl >> 8; 1321 tf->device |= head; 1322 } 1323 1324 return 0; 1325 1326 invalid_fld: 1327 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1328 /* "Invalid field in cbd" */ 1329 return 1; 1330 1331 out_of_range: 1332 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1333 /* "Logical Block Address out of range" */ 1334 return 1; 1335 1336 nothing_to_do: 1337 scmd->result = SAM_STAT_GOOD; 1338 return 1; 1339 } 1340 1341 /** 1342 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1343 * @qc: Storage for translated ATA taskfile 1344 * 1345 * Converts any of six SCSI read/write commands into the 1346 * ATA counterpart, including starting sector (LBA), 1347 * sector count, and taking into account the device's LBA48 1348 * support. 1349 * 1350 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and 1351 * %WRITE_16 are currently supported. 1352 * 1353 * LOCKING: 1354 * spin_lock_irqsave(host lock) 1355 * 1356 * RETURNS: 1357 * Zero on success, non-zero on error. 1358 */ 1359 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) 1360 { 1361 struct scsi_cmnd *scmd = qc->scsicmd; 1362 const u8 *cdb = scmd->cmnd; 1363 unsigned int tf_flags = 0; 1364 u64 block; 1365 u32 n_block; 1366 int rc; 1367 1368 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16) 1369 tf_flags |= ATA_TFLAG_WRITE; 1370 1371 /* Calculate the SCSI LBA, transfer length and FUA. */ 1372 switch (cdb[0]) { 1373 case READ_10: 1374 case WRITE_10: 1375 if (unlikely(scmd->cmd_len < 10)) 1376 goto invalid_fld; 1377 scsi_10_lba_len(cdb, &block, &n_block); 1378 if (unlikely(cdb[1] & (1 << 3))) 1379 tf_flags |= ATA_TFLAG_FUA; 1380 break; 1381 case READ_6: 1382 case WRITE_6: 1383 if (unlikely(scmd->cmd_len < 6)) 1384 goto invalid_fld; 1385 scsi_6_lba_len(cdb, &block, &n_block); 1386 1387 /* for 6-byte r/w commands, transfer length 0 1388 * means 256 blocks of data, not 0 block. 1389 */ 1390 if (!n_block) 1391 n_block = 256; 1392 break; 1393 case READ_16: 1394 case WRITE_16: 1395 if (unlikely(scmd->cmd_len < 16)) 1396 goto invalid_fld; 1397 scsi_16_lba_len(cdb, &block, &n_block); 1398 if (unlikely(cdb[1] & (1 << 3))) 1399 tf_flags |= ATA_TFLAG_FUA; 1400 break; 1401 default: 1402 DPRINTK("no-byte command\n"); 1403 goto invalid_fld; 1404 } 1405 1406 /* Check and compose ATA command */ 1407 if (!n_block) 1408 /* For 10-byte and 16-byte SCSI R/W commands, transfer 1409 * length 0 means transfer 0 block of data. 1410 * However, for ATA R/W commands, sector count 0 means 1411 * 256 or 65536 sectors, not 0 sectors as in SCSI. 1412 * 1413 * WARNING: one or two older ATA drives treat 0 as 0... 1414 */ 1415 goto nothing_to_do; 1416 1417 qc->flags |= ATA_QCFLAG_IO; 1418 qc->nbytes = n_block * ATA_SECT_SIZE; 1419 1420 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1421 qc->tag); 1422 if (likely(rc == 0)) 1423 return 0; 1424 1425 if (rc == -ERANGE) 1426 goto out_of_range; 1427 /* treat all other errors as -EINVAL, fall through */ 1428 invalid_fld: 1429 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1430 /* "Invalid field in cbd" */ 1431 return 1; 1432 1433 out_of_range: 1434 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1435 /* "Logical Block Address out of range" */ 1436 return 1; 1437 1438 nothing_to_do: 1439 scmd->result = SAM_STAT_GOOD; 1440 return 1; 1441 } 1442 1443 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1444 { 1445 struct ata_port *ap = qc->ap; 1446 struct scsi_cmnd *cmd = qc->scsicmd; 1447 u8 *cdb = cmd->cmnd; 1448 int need_sense = (qc->err_mask != 0); 1449 1450 /* For ATA pass thru (SAT) commands, generate a sense block if 1451 * user mandated it or if there's an error. Note that if we 1452 * generate because the user forced us to, a check condition 1453 * is generated and the ATA register values are returned 1454 * whether the command completed successfully or not. If there 1455 * was no error, SK, ASC and ASCQ will all be zero. 1456 */ 1457 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1458 ((cdb[2] & 0x20) || need_sense)) { 1459 ata_gen_passthru_sense(qc); 1460 } else { 1461 if (!need_sense) { 1462 cmd->result = SAM_STAT_GOOD; 1463 } else { 1464 /* TODO: decide which descriptor format to use 1465 * for 48b LBA devices and call that here 1466 * instead of the fixed desc, which is only 1467 * good for smaller LBA (and maybe CHS?) 1468 * devices. 1469 */ 1470 ata_gen_ata_sense(qc); 1471 } 1472 } 1473 1474 /* XXX: track spindown state for spindown skipping and warning */ 1475 if (unlikely(qc->tf.command == ATA_CMD_STANDBY || 1476 qc->tf.command == ATA_CMD_STANDBYNOW1)) 1477 qc->dev->flags |= ATA_DFLAG_SPUNDOWN; 1478 else if (likely(system_state != SYSTEM_HALT && 1479 system_state != SYSTEM_POWER_OFF)) 1480 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN; 1481 1482 if (need_sense && !ap->ops->error_handler) 1483 ata_dump_status(ap->print_id, &qc->result_tf); 1484 1485 qc->scsidone(cmd); 1486 1487 ata_qc_free(qc); 1488 } 1489 1490 /** 1491 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1492 * @dev: ATA device to which the command is addressed 1493 * @cmd: SCSI command to execute 1494 * @done: SCSI command completion function 1495 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1496 * 1497 * Our ->queuecommand() function has decided that the SCSI 1498 * command issued can be directly translated into an ATA 1499 * command, rather than handled internally. 1500 * 1501 * This function sets up an ata_queued_cmd structure for the 1502 * SCSI command, and sends that ata_queued_cmd to the hardware. 1503 * 1504 * The xlat_func argument (actor) returns 0 if ready to execute 1505 * ATA command, else 1 to finish translation. If 1 is returned 1506 * then cmd->result (and possibly cmd->sense_buffer) are assumed 1507 * to be set reflecting an error condition or clean (early) 1508 * termination. 1509 * 1510 * LOCKING: 1511 * spin_lock_irqsave(host lock) 1512 * 1513 * RETURNS: 1514 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1515 * needs to be deferred. 1516 */ 1517 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1518 void (*done)(struct scsi_cmnd *), 1519 ata_xlat_func_t xlat_func) 1520 { 1521 struct ata_port *ap = dev->link->ap; 1522 struct ata_queued_cmd *qc; 1523 int rc; 1524 1525 VPRINTK("ENTER\n"); 1526 1527 qc = ata_scsi_qc_new(dev, cmd, done); 1528 if (!qc) 1529 goto err_mem; 1530 1531 /* data is present; dma-map it */ 1532 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1533 cmd->sc_data_direction == DMA_TO_DEVICE) { 1534 if (unlikely(scsi_bufflen(cmd) < 1)) { 1535 ata_dev_printk(dev, KERN_WARNING, 1536 "WARNING: zero len r/w req\n"); 1537 goto err_did; 1538 } 1539 1540 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); 1541 1542 qc->dma_dir = cmd->sc_data_direction; 1543 } 1544 1545 qc->complete_fn = ata_scsi_qc_complete; 1546 1547 if (xlat_func(qc)) 1548 goto early_finish; 1549 1550 if (ap->ops->qc_defer) { 1551 if ((rc = ap->ops->qc_defer(qc))) 1552 goto defer; 1553 } 1554 1555 /* select device, send command to hardware */ 1556 ata_qc_issue(qc); 1557 1558 VPRINTK("EXIT\n"); 1559 return 0; 1560 1561 early_finish: 1562 ata_qc_free(qc); 1563 qc->scsidone(cmd); 1564 DPRINTK("EXIT - early finish (good or error)\n"); 1565 return 0; 1566 1567 err_did: 1568 ata_qc_free(qc); 1569 cmd->result = (DID_ERROR << 16); 1570 qc->scsidone(cmd); 1571 err_mem: 1572 DPRINTK("EXIT - internal\n"); 1573 return 0; 1574 1575 defer: 1576 ata_qc_free(qc); 1577 DPRINTK("EXIT - defer\n"); 1578 if (rc == ATA_DEFER_LINK) 1579 return SCSI_MLQUEUE_DEVICE_BUSY; 1580 else 1581 return SCSI_MLQUEUE_HOST_BUSY; 1582 } 1583 1584 /** 1585 * ata_scsi_rbuf_get - Map response buffer. 1586 * @cmd: SCSI command containing buffer to be mapped. 1587 * @buf_out: Pointer to mapped area. 1588 * 1589 * Maps buffer contained within SCSI command @cmd. 1590 * 1591 * LOCKING: 1592 * spin_lock_irqsave(host lock) 1593 * 1594 * RETURNS: 1595 * Length of response buffer. 1596 */ 1597 1598 static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) 1599 { 1600 u8 *buf; 1601 unsigned int buflen; 1602 1603 struct scatterlist *sg = scsi_sglist(cmd); 1604 1605 if (sg) { 1606 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1607 buflen = sg->length; 1608 } else { 1609 buf = NULL; 1610 buflen = 0; 1611 } 1612 1613 *buf_out = buf; 1614 return buflen; 1615 } 1616 1617 /** 1618 * ata_scsi_rbuf_put - Unmap response buffer. 1619 * @cmd: SCSI command containing buffer to be unmapped. 1620 * @buf: buffer to unmap 1621 * 1622 * Unmaps response buffer contained within @cmd. 1623 * 1624 * LOCKING: 1625 * spin_lock_irqsave(host lock) 1626 */ 1627 1628 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1629 { 1630 struct scatterlist *sg = scsi_sglist(cmd); 1631 if (sg) 1632 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1633 } 1634 1635 /** 1636 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators 1637 * @args: device IDENTIFY data / SCSI command of interest. 1638 * @actor: Callback hook for desired SCSI command simulator 1639 * 1640 * Takes care of the hard work of simulating a SCSI command... 1641 * Mapping the response buffer, calling the command's handler, 1642 * and handling the handler's return value. This return value 1643 * indicates whether the handler wishes the SCSI command to be 1644 * completed successfully (0), or not (in which case cmd->result 1645 * and sense buffer are assumed to be set). 1646 * 1647 * LOCKING: 1648 * spin_lock_irqsave(host lock) 1649 */ 1650 1651 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1652 unsigned int (*actor) (struct ata_scsi_args *args, 1653 u8 *rbuf, unsigned int buflen)) 1654 { 1655 u8 *rbuf; 1656 unsigned int buflen, rc; 1657 struct scsi_cmnd *cmd = args->cmd; 1658 1659 buflen = ata_scsi_rbuf_get(cmd, &rbuf); 1660 memset(rbuf, 0, buflen); 1661 rc = actor(args, rbuf, buflen); 1662 ata_scsi_rbuf_put(cmd, rbuf); 1663 1664 if (rc == 0) 1665 cmd->result = SAM_STAT_GOOD; 1666 args->done(cmd); 1667 } 1668 1669 /** 1670 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer 1671 * @idx: byte index into SCSI response buffer 1672 * @val: value to set 1673 * 1674 * To be used by SCSI command simulator functions. This macros 1675 * expects two local variables, u8 *rbuf and unsigned int buflen, 1676 * are in scope. 1677 * 1678 * LOCKING: 1679 * None. 1680 */ 1681 #define ATA_SCSI_RBUF_SET(idx, val) do { \ 1682 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \ 1683 } while (0) 1684 1685 /** 1686 * ata_scsiop_inq_std - Simulate INQUIRY command 1687 * @args: device IDENTIFY data / SCSI command of interest. 1688 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1689 * @buflen: Response buffer length. 1690 * 1691 * Returns standard device identification data associated 1692 * with non-VPD INQUIRY command output. 1693 * 1694 * LOCKING: 1695 * spin_lock_irqsave(host lock) 1696 */ 1697 1698 unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 1699 unsigned int buflen) 1700 { 1701 u8 hdr[] = { 1702 TYPE_DISK, 1703 0, 1704 0x5, /* claim SPC-3 version compatibility */ 1705 2, 1706 95 - 4 1707 }; 1708 1709 /* set scsi removeable (RMB) bit per ata bit */ 1710 if (ata_id_removeable(args->id)) 1711 hdr[1] |= (1 << 7); 1712 1713 VPRINTK("ENTER\n"); 1714 1715 memcpy(rbuf, hdr, sizeof(hdr)); 1716 1717 if (buflen > 35) { 1718 memcpy(&rbuf[8], "ATA ", 8); 1719 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1720 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1721 if (rbuf[32] == 0 || rbuf[32] == ' ') 1722 memcpy(&rbuf[32], "n/a ", 4); 1723 } 1724 1725 if (buflen > 63) { 1726 const u8 versions[] = { 1727 0x60, /* SAM-3 (no version claimed) */ 1728 1729 0x03, 1730 0x20, /* SBC-2 (no version claimed) */ 1731 1732 0x02, 1733 0x60 /* SPC-3 (no version claimed) */ 1734 }; 1735 1736 memcpy(rbuf + 59, versions, sizeof(versions)); 1737 } 1738 1739 return 0; 1740 } 1741 1742 /** 1743 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1744 * @args: device IDENTIFY data / SCSI command of interest. 1745 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1746 * @buflen: Response buffer length. 1747 * 1748 * Returns list of inquiry VPD pages available. 1749 * 1750 * LOCKING: 1751 * spin_lock_irqsave(host lock) 1752 */ 1753 1754 unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, 1755 unsigned int buflen) 1756 { 1757 const u8 pages[] = { 1758 0x00, /* page 0x00, this page */ 1759 0x80, /* page 0x80, unit serial no page */ 1760 0x83 /* page 0x83, device ident page */ 1761 }; 1762 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1763 1764 if (buflen > 6) 1765 memcpy(rbuf + 4, pages, sizeof(pages)); 1766 1767 return 0; 1768 } 1769 1770 /** 1771 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1772 * @args: device IDENTIFY data / SCSI command of interest. 1773 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1774 * @buflen: Response buffer length. 1775 * 1776 * Returns ATA device serial number. 1777 * 1778 * LOCKING: 1779 * spin_lock_irqsave(host lock) 1780 */ 1781 1782 unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, 1783 unsigned int buflen) 1784 { 1785 const u8 hdr[] = { 1786 0, 1787 0x80, /* this page code */ 1788 0, 1789 ATA_ID_SERNO_LEN, /* page len */ 1790 }; 1791 memcpy(rbuf, hdr, sizeof(hdr)); 1792 1793 if (buflen > (ATA_ID_SERNO_LEN + 4 - 1)) 1794 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1795 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1796 1797 return 0; 1798 } 1799 1800 /** 1801 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1802 * @args: device IDENTIFY data / SCSI command of interest. 1803 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1804 * @buflen: Response buffer length. 1805 * 1806 * Yields two logical unit device identification designators: 1807 * - vendor specific ASCII containing the ATA serial number 1808 * - SAT defined "t10 vendor id based" containing ASCII vendor 1809 * name ("ATA "), model and serial numbers. 1810 * 1811 * LOCKING: 1812 * spin_lock_irqsave(host lock) 1813 */ 1814 1815 unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, 1816 unsigned int buflen) 1817 { 1818 int num; 1819 const int sat_model_serial_desc_len = 68; 1820 1821 rbuf[1] = 0x83; /* this page code */ 1822 num = 4; 1823 1824 if (buflen > (ATA_ID_SERNO_LEN + num + 3)) { 1825 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1826 rbuf[num + 0] = 2; 1827 rbuf[num + 3] = ATA_ID_SERNO_LEN; 1828 num += 4; 1829 ata_id_string(args->id, (unsigned char *) rbuf + num, 1830 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1831 num += ATA_ID_SERNO_LEN; 1832 } 1833 if (buflen > (sat_model_serial_desc_len + num + 3)) { 1834 /* SAT defined lu model and serial numbers descriptor */ 1835 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 1836 rbuf[num + 0] = 2; 1837 rbuf[num + 1] = 1; 1838 rbuf[num + 3] = sat_model_serial_desc_len; 1839 num += 4; 1840 memcpy(rbuf + num, "ATA ", 8); 1841 num += 8; 1842 ata_id_string(args->id, (unsigned char *) rbuf + num, 1843 ATA_ID_PROD, ATA_ID_PROD_LEN); 1844 num += ATA_ID_PROD_LEN; 1845 ata_id_string(args->id, (unsigned char *) rbuf + num, 1846 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1847 num += ATA_ID_SERNO_LEN; 1848 } 1849 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 1850 return 0; 1851 } 1852 1853 /** 1854 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 1855 * @args: device IDENTIFY data / SCSI command of interest. 1856 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1857 * @buflen: Response buffer length. 1858 * 1859 * Yields SAT-specified ATA VPD page. 1860 * 1861 * LOCKING: 1862 * spin_lock_irqsave(host lock) 1863 */ 1864 1865 unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, 1866 unsigned int buflen) 1867 { 1868 u8 pbuf[60]; 1869 struct ata_taskfile tf; 1870 unsigned int i; 1871 1872 if (!buflen) 1873 return 0; 1874 1875 memset(&pbuf, 0, sizeof(pbuf)); 1876 memset(&tf, 0, sizeof(tf)); 1877 1878 pbuf[1] = 0x89; /* our page code */ 1879 pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 1880 pbuf[3] = (0x238 & 0xff); 1881 1882 memcpy(&pbuf[8], "linux ", 8); 1883 memcpy(&pbuf[16], "libata ", 16); 1884 memcpy(&pbuf[32], DRV_VERSION, 4); 1885 ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); 1886 1887 /* we don't store the ATA device signature, so we fake it */ 1888 1889 tf.command = ATA_DRDY; /* really, this is Status reg */ 1890 tf.lbal = 0x1; 1891 tf.nsect = 0x1; 1892 1893 ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ 1894 pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 1895 1896 pbuf[56] = ATA_CMD_ID_ATA; 1897 1898 i = min(buflen, 60U); 1899 memcpy(rbuf, &pbuf[0], i); 1900 buflen -= i; 1901 1902 if (!buflen) 1903 return 0; 1904 1905 memcpy(&rbuf[60], &args->id[0], min(buflen, 512U)); 1906 return 0; 1907 } 1908 1909 /** 1910 * ata_scsiop_noop - Command handler that simply returns success. 1911 * @args: device IDENTIFY data / SCSI command of interest. 1912 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1913 * @buflen: Response buffer length. 1914 * 1915 * No operation. Simply returns success to caller, to indicate 1916 * that the caller should successfully complete this SCSI command. 1917 * 1918 * LOCKING: 1919 * spin_lock_irqsave(host lock) 1920 */ 1921 1922 unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, 1923 unsigned int buflen) 1924 { 1925 VPRINTK("ENTER\n"); 1926 return 0; 1927 } 1928 1929 /** 1930 * ata_msense_push - Push data onto MODE SENSE data output buffer 1931 * @ptr_io: (input/output) Location to store more output data 1932 * @last: End of output data buffer 1933 * @buf: Pointer to BLOB being added to output buffer 1934 * @buflen: Length of BLOB 1935 * 1936 * Store MODE SENSE data on an output buffer. 1937 * 1938 * LOCKING: 1939 * None. 1940 */ 1941 1942 static void ata_msense_push(u8 **ptr_io, const u8 *last, 1943 const u8 *buf, unsigned int buflen) 1944 { 1945 u8 *ptr = *ptr_io; 1946 1947 if ((ptr + buflen - 1) > last) 1948 return; 1949 1950 memcpy(ptr, buf, buflen); 1951 1952 ptr += buflen; 1953 1954 *ptr_io = ptr; 1955 } 1956 1957 /** 1958 * ata_msense_caching - Simulate MODE SENSE caching info page 1959 * @id: device IDENTIFY data 1960 * @ptr_io: (input/output) Location to store more output data 1961 * @last: End of output data buffer 1962 * 1963 * Generate a caching info page, which conditionally indicates 1964 * write caching to the SCSI layer, depending on device 1965 * capabilities. 1966 * 1967 * LOCKING: 1968 * None. 1969 */ 1970 1971 static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, 1972 const u8 *last) 1973 { 1974 u8 page[CACHE_MPAGE_LEN]; 1975 1976 memcpy(page, def_cache_mpage, sizeof(page)); 1977 if (ata_id_wcache_enabled(id)) 1978 page[2] |= (1 << 2); /* write cache enable */ 1979 if (!ata_id_rahead_enabled(id)) 1980 page[12] |= (1 << 5); /* disable read ahead */ 1981 1982 ata_msense_push(ptr_io, last, page, sizeof(page)); 1983 return sizeof(page); 1984 } 1985 1986 /** 1987 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page 1988 * @dev: Device associated with this MODE SENSE command 1989 * @ptr_io: (input/output) Location to store more output data 1990 * @last: End of output data buffer 1991 * 1992 * Generate a generic MODE SENSE control mode page. 1993 * 1994 * LOCKING: 1995 * None. 1996 */ 1997 1998 static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) 1999 { 2000 ata_msense_push(ptr_io, last, def_control_mpage, 2001 sizeof(def_control_mpage)); 2002 return sizeof(def_control_mpage); 2003 } 2004 2005 /** 2006 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 2007 * @dev: Device associated with this MODE SENSE command 2008 * @ptr_io: (input/output) Location to store more output data 2009 * @last: End of output data buffer 2010 * 2011 * Generate a generic MODE SENSE r/w error recovery page. 2012 * 2013 * LOCKING: 2014 * None. 2015 */ 2016 2017 static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) 2018 { 2019 2020 ata_msense_push(ptr_io, last, def_rw_recovery_mpage, 2021 sizeof(def_rw_recovery_mpage)); 2022 return sizeof(def_rw_recovery_mpage); 2023 } 2024 2025 /* 2026 * We can turn this into a real blacklist if it's needed, for now just 2027 * blacklist any Maxtor BANC1G10 revision firmware 2028 */ 2029 static int ata_dev_supports_fua(u16 *id) 2030 { 2031 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1]; 2032 2033 if (!libata_fua) 2034 return 0; 2035 if (!ata_id_has_fua(id)) 2036 return 0; 2037 2038 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 2039 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw)); 2040 2041 if (strcmp(model, "Maxtor")) 2042 return 1; 2043 if (strcmp(fw, "BANC1G10")) 2044 return 1; 2045 2046 return 0; /* blacklisted */ 2047 } 2048 2049 /** 2050 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 2051 * @args: device IDENTIFY data / SCSI command of interest. 2052 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2053 * @buflen: Response buffer length. 2054 * 2055 * Simulate MODE SENSE commands. Assume this is invoked for direct 2056 * access devices (e.g. disks) only. There should be no block 2057 * descriptor for other device types. 2058 * 2059 * LOCKING: 2060 * spin_lock_irqsave(host lock) 2061 */ 2062 2063 unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 2064 unsigned int buflen) 2065 { 2066 struct ata_device *dev = args->dev; 2067 u8 *scsicmd = args->cmd->cmnd, *p, *last; 2068 const u8 sat_blk_desc[] = { 2069 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2070 0, 2071 0, 0x2, 0x0 /* block length: 512 bytes */ 2072 }; 2073 u8 pg, spg; 2074 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 2075 u8 dpofua; 2076 2077 VPRINTK("ENTER\n"); 2078 2079 six_byte = (scsicmd[0] == MODE_SENSE); 2080 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ 2081 /* 2082 * LLBA bit in msense(10) ignored (compliant) 2083 */ 2084 2085 page_control = scsicmd[2] >> 6; 2086 switch (page_control) { 2087 case 0: /* current */ 2088 break; /* supported */ 2089 case 3: /* saved */ 2090 goto saving_not_supp; 2091 case 1: /* changeable */ 2092 case 2: /* defaults */ 2093 default: 2094 goto invalid_fld; 2095 } 2096 2097 if (six_byte) { 2098 output_len = 4 + (ebd ? 8 : 0); 2099 alloc_len = scsicmd[4]; 2100 } else { 2101 output_len = 8 + (ebd ? 8 : 0); 2102 alloc_len = (scsicmd[7] << 8) + scsicmd[8]; 2103 } 2104 minlen = (alloc_len < buflen) ? alloc_len : buflen; 2105 2106 p = rbuf + output_len; 2107 last = rbuf + minlen - 1; 2108 2109 pg = scsicmd[2] & 0x3f; 2110 spg = scsicmd[3]; 2111 /* 2112 * No mode subpages supported (yet) but asking for _all_ 2113 * subpages may be valid 2114 */ 2115 if (spg && (spg != ALL_SUB_MPAGES)) 2116 goto invalid_fld; 2117 2118 switch(pg) { 2119 case RW_RECOVERY_MPAGE: 2120 output_len += ata_msense_rw_recovery(&p, last); 2121 break; 2122 2123 case CACHE_MPAGE: 2124 output_len += ata_msense_caching(args->id, &p, last); 2125 break; 2126 2127 case CONTROL_MPAGE: { 2128 output_len += ata_msense_ctl_mode(&p, last); 2129 break; 2130 } 2131 2132 case ALL_MPAGES: 2133 output_len += ata_msense_rw_recovery(&p, last); 2134 output_len += ata_msense_caching(args->id, &p, last); 2135 output_len += ata_msense_ctl_mode(&p, last); 2136 break; 2137 2138 default: /* invalid page code */ 2139 goto invalid_fld; 2140 } 2141 2142 if (minlen < 1) 2143 return 0; 2144 2145 dpofua = 0; 2146 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && 2147 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 2148 dpofua = 1 << 4; 2149 2150 if (six_byte) { 2151 output_len--; 2152 rbuf[0] = output_len; 2153 if (minlen > 2) 2154 rbuf[2] |= dpofua; 2155 if (ebd) { 2156 if (minlen > 3) 2157 rbuf[3] = sizeof(sat_blk_desc); 2158 if (minlen > 11) 2159 memcpy(rbuf + 4, sat_blk_desc, 2160 sizeof(sat_blk_desc)); 2161 } 2162 } else { 2163 output_len -= 2; 2164 rbuf[0] = output_len >> 8; 2165 if (minlen > 1) 2166 rbuf[1] = output_len; 2167 if (minlen > 3) 2168 rbuf[3] |= dpofua; 2169 if (ebd) { 2170 if (minlen > 7) 2171 rbuf[7] = sizeof(sat_blk_desc); 2172 if (minlen > 15) 2173 memcpy(rbuf + 8, sat_blk_desc, 2174 sizeof(sat_blk_desc)); 2175 } 2176 } 2177 return 0; 2178 2179 invalid_fld: 2180 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); 2181 /* "Invalid field in cbd" */ 2182 return 1; 2183 2184 saving_not_supp: 2185 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); 2186 /* "Saving parameters not supported" */ 2187 return 1; 2188 } 2189 2190 /** 2191 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2192 * @args: device IDENTIFY data / SCSI command of interest. 2193 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2194 * @buflen: Response buffer length. 2195 * 2196 * Simulate READ CAPACITY commands. 2197 * 2198 * LOCKING: 2199 * None. 2200 */ 2201 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2202 unsigned int buflen) 2203 { 2204 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2205 2206 VPRINTK("ENTER\n"); 2207 2208 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2209 if (last_lba >= 0xffffffffULL) 2210 last_lba = 0xffffffff; 2211 2212 /* sector count, 32-bit */ 2213 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3)); 2214 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2)); 2215 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1)); 2216 ATA_SCSI_RBUF_SET(3, last_lba); 2217 2218 /* sector size */ 2219 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); 2220 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff); 2221 } else { 2222 /* sector count, 64-bit */ 2223 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); 2224 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6)); 2225 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5)); 2226 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4)); 2227 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3)); 2228 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2)); 2229 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1)); 2230 ATA_SCSI_RBUF_SET(7, last_lba); 2231 2232 /* sector size */ 2233 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); 2234 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff); 2235 } 2236 2237 return 0; 2238 } 2239 2240 /** 2241 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2242 * @args: device IDENTIFY data / SCSI command of interest. 2243 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2244 * @buflen: Response buffer length. 2245 * 2246 * Simulate REPORT LUNS command. 2247 * 2248 * LOCKING: 2249 * spin_lock_irqsave(host lock) 2250 */ 2251 2252 unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, 2253 unsigned int buflen) 2254 { 2255 VPRINTK("ENTER\n"); 2256 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * ata_scsi_set_sense - Set SCSI sense data and status 2263 * @cmd: SCSI request to be handled 2264 * @sk: SCSI-defined sense key 2265 * @asc: SCSI-defined additional sense code 2266 * @ascq: SCSI-defined additional sense code qualifier 2267 * 2268 * Helper function that builds a valid fixed format, current 2269 * response code and the given sense key (sk), additional sense 2270 * code (asc) and additional sense code qualifier (ascq) with 2271 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and 2272 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . 2273 * 2274 * LOCKING: 2275 * Not required 2276 */ 2277 2278 void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 2279 { 2280 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 2281 2282 cmd->sense_buffer[0] = 0x70; /* fixed format, current */ 2283 cmd->sense_buffer[2] = sk; 2284 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ 2285 cmd->sense_buffer[12] = asc; 2286 cmd->sense_buffer[13] = ascq; 2287 } 2288 2289 /** 2290 * ata_scsi_badcmd - End a SCSI request with an error 2291 * @cmd: SCSI request to be handled 2292 * @done: SCSI command completion function 2293 * @asc: SCSI-defined additional sense code 2294 * @ascq: SCSI-defined additional sense code qualifier 2295 * 2296 * Helper function that completes a SCSI command with 2297 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST 2298 * and the specified additional sense codes. 2299 * 2300 * LOCKING: 2301 * spin_lock_irqsave(host lock) 2302 */ 2303 2304 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 2305 { 2306 DPRINTK("ENTER\n"); 2307 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); 2308 2309 done(cmd); 2310 } 2311 2312 static void atapi_sense_complete(struct ata_queued_cmd *qc) 2313 { 2314 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2315 /* FIXME: not quite right; we don't want the 2316 * translation of taskfile registers into 2317 * a sense descriptors, since that's only 2318 * correct for ATA, not ATAPI 2319 */ 2320 ata_gen_passthru_sense(qc); 2321 } 2322 2323 qc->scsidone(qc->scsicmd); 2324 ata_qc_free(qc); 2325 } 2326 2327 /* is it pointless to prefer PIO for "safety reasons"? */ 2328 static inline int ata_pio_use_silly(struct ata_port *ap) 2329 { 2330 return (ap->flags & ATA_FLAG_PIO_DMA); 2331 } 2332 2333 static void atapi_request_sense(struct ata_queued_cmd *qc) 2334 { 2335 struct ata_port *ap = qc->ap; 2336 struct scsi_cmnd *cmd = qc->scsicmd; 2337 2338 DPRINTK("ATAPI request sense\n"); 2339 2340 /* FIXME: is this needed? */ 2341 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2342 2343 ap->ops->tf_read(ap, &qc->tf); 2344 2345 /* fill these in, for the case where they are -not- overwritten */ 2346 cmd->sense_buffer[0] = 0x70; 2347 cmd->sense_buffer[2] = qc->tf.feature >> 4; 2348 2349 ata_qc_reinit(qc); 2350 2351 /* setup sg table and init transfer direction */ 2352 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); 2353 ata_sg_init(qc, &qc->sgent, 1); 2354 qc->dma_dir = DMA_FROM_DEVICE; 2355 2356 memset(&qc->cdb, 0, qc->dev->cdb_len); 2357 qc->cdb[0] = REQUEST_SENSE; 2358 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2359 2360 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2361 qc->tf.command = ATA_CMD_PACKET; 2362 2363 if (ata_pio_use_silly(ap)) { 2364 qc->tf.protocol = ATAPI_PROT_DMA; 2365 qc->tf.feature |= ATAPI_PKT_DMA; 2366 } else { 2367 qc->tf.protocol = ATAPI_PROT_PIO; 2368 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2369 qc->tf.lbah = 0; 2370 } 2371 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2372 2373 qc->complete_fn = atapi_sense_complete; 2374 2375 ata_qc_issue(qc); 2376 2377 DPRINTK("EXIT\n"); 2378 } 2379 2380 static void atapi_qc_complete(struct ata_queued_cmd *qc) 2381 { 2382 struct scsi_cmnd *cmd = qc->scsicmd; 2383 unsigned int err_mask = qc->err_mask; 2384 2385 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2386 2387 /* handle completion from new EH */ 2388 if (unlikely(qc->ap->ops->error_handler && 2389 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { 2390 2391 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 2392 /* FIXME: not quite right; we don't want the 2393 * translation of taskfile registers into a 2394 * sense descriptors, since that's only 2395 * correct for ATA, not ATAPI 2396 */ 2397 ata_gen_passthru_sense(qc); 2398 } 2399 2400 /* SCSI EH automatically locks door if sdev->locked is 2401 * set. Sometimes door lock request continues to 2402 * fail, for example, when no media is present. This 2403 * creates a loop - SCSI EH issues door lock which 2404 * fails and gets invoked again to acquire sense data 2405 * for the failed command. 2406 * 2407 * If door lock fails, always clear sdev->locked to 2408 * avoid this infinite loop. 2409 */ 2410 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2411 qc->dev->sdev->locked = 0; 2412 2413 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2414 qc->scsidone(cmd); 2415 ata_qc_free(qc); 2416 return; 2417 } 2418 2419 /* successful completion or old EH failure path */ 2420 if (unlikely(err_mask & AC_ERR_DEV)) { 2421 cmd->result = SAM_STAT_CHECK_CONDITION; 2422 atapi_request_sense(qc); 2423 return; 2424 } else if (unlikely(err_mask)) { 2425 /* FIXME: not quite right; we don't want the 2426 * translation of taskfile registers into 2427 * a sense descriptors, since that's only 2428 * correct for ATA, not ATAPI 2429 */ 2430 ata_gen_passthru_sense(qc); 2431 } else { 2432 u8 *scsicmd = cmd->cmnd; 2433 2434 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2435 u8 *buf = NULL; 2436 unsigned int buflen; 2437 2438 buflen = ata_scsi_rbuf_get(cmd, &buf); 2439 2440 /* ATAPI devices typically report zero for their SCSI version, 2441 * and sometimes deviate from the spec WRT response data 2442 * format. If SCSI version is reported as zero like normal, 2443 * then we make the following fixups: 1) Fake MMC-5 version, 2444 * to indicate to the Linux scsi midlayer this is a modern 2445 * device. 2) Ensure response data format / ATAPI information 2446 * are always correct. 2447 */ 2448 if (buf[2] == 0) { 2449 buf[2] = 0x5; 2450 buf[3] = 0x32; 2451 } 2452 2453 ata_scsi_rbuf_put(cmd, buf); 2454 } 2455 2456 cmd->result = SAM_STAT_GOOD; 2457 } 2458 2459 qc->scsidone(cmd); 2460 ata_qc_free(qc); 2461 } 2462 /** 2463 * atapi_xlat - Initialize PACKET taskfile 2464 * @qc: command structure to be initialized 2465 * 2466 * LOCKING: 2467 * spin_lock_irqsave(host lock) 2468 * 2469 * RETURNS: 2470 * Zero on success, non-zero on failure. 2471 */ 2472 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) 2473 { 2474 struct scsi_cmnd *scmd = qc->scsicmd; 2475 struct ata_device *dev = qc->dev; 2476 int using_pio = (dev->flags & ATA_DFLAG_PIO); 2477 int nodata = (scmd->sc_data_direction == DMA_NONE); 2478 unsigned int nbytes; 2479 2480 memset(qc->cdb, 0, dev->cdb_len); 2481 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2482 2483 qc->complete_fn = atapi_qc_complete; 2484 2485 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2486 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 2487 qc->tf.flags |= ATA_TFLAG_WRITE; 2488 DPRINTK("direction: write\n"); 2489 } 2490 2491 qc->tf.command = ATA_CMD_PACKET; 2492 qc->nbytes = scsi_bufflen(scmd); 2493 2494 /* check whether ATAPI DMA is safe */ 2495 if (!using_pio && ata_check_atapi_dma(qc)) 2496 using_pio = 1; 2497 2498 /* Some controller variants snoop this value for Packet 2499 * transfers to do state machine and FIFO management. Thus we 2500 * want to set it properly, and for DMA where it is 2501 * effectively meaningless. 2502 */ 2503 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2504 2505 /* Most ATAPI devices which honor transfer chunk size don't 2506 * behave according to the spec when odd chunk size which 2507 * matches the transfer length is specified. If the number of 2508 * bytes to transfer is 2n+1. According to the spec, what 2509 * should happen is to indicate that 2n+1 is going to be 2510 * transferred and transfer 2n+2 bytes where the last byte is 2511 * padding. 2512 * 2513 * In practice, this doesn't happen. ATAPI devices first 2514 * indicate and transfer 2n bytes and then indicate and 2515 * transfer 2 bytes where the last byte is padding. 2516 * 2517 * This inconsistency confuses several controllers which 2518 * perform PIO using DMA such as Intel AHCIs and sil3124/32. 2519 * These controllers use actual number of transferred bytes to 2520 * update DMA poitner and transfer of 4n+2 bytes make those 2521 * controller push DMA pointer by 4n+4 bytes because SATA data 2522 * FISes are aligned to 4 bytes. This causes data corruption 2523 * and buffer overrun. 2524 * 2525 * Always setting nbytes to even number solves this problem 2526 * because then ATAPI devices don't have to split data at 2n 2527 * boundaries. 2528 */ 2529 if (nbytes & 0x1) 2530 nbytes++; 2531 2532 qc->tf.lbam = (nbytes & 0xFF); 2533 qc->tf.lbah = (nbytes >> 8); 2534 2535 if (using_pio || nodata) { 2536 /* no data, or PIO data xfer */ 2537 if (nodata) 2538 qc->tf.protocol = ATAPI_PROT_NODATA; 2539 else 2540 qc->tf.protocol = ATAPI_PROT_PIO; 2541 } else { 2542 /* DMA data xfer */ 2543 qc->tf.protocol = ATAPI_PROT_DMA; 2544 qc->tf.feature |= ATAPI_PKT_DMA; 2545 2546 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2547 /* some SATA bridges need us to indicate data xfer direction */ 2548 qc->tf.feature |= ATAPI_DMADIR; 2549 } 2550 2551 2552 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE 2553 as ATAPI tape drives don't get this right otherwise */ 2554 return 0; 2555 } 2556 2557 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 2558 { 2559 if (ap->nr_pmp_links == 0) { 2560 if (likely(devno < ata_link_max_devices(&ap->link))) 2561 return &ap->link.device[devno]; 2562 } else { 2563 if (likely(devno < ap->nr_pmp_links)) 2564 return &ap->pmp_link[devno].device[0]; 2565 } 2566 2567 return NULL; 2568 } 2569 2570 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2571 const struct scsi_device *scsidev) 2572 { 2573 int devno; 2574 2575 /* skip commands not addressed to targets we simulate */ 2576 if (ap->nr_pmp_links == 0) { 2577 if (unlikely(scsidev->channel || scsidev->lun)) 2578 return NULL; 2579 devno = scsidev->id; 2580 } else { 2581 if (unlikely(scsidev->id || scsidev->lun)) 2582 return NULL; 2583 devno = scsidev->channel; 2584 } 2585 2586 return ata_find_dev(ap, devno); 2587 } 2588 2589 /** 2590 * ata_scsi_dev_enabled - determine if device is enabled 2591 * @dev: ATA device 2592 * 2593 * Determine if commands should be sent to the specified device. 2594 * 2595 * LOCKING: 2596 * spin_lock_irqsave(host lock) 2597 * 2598 * RETURNS: 2599 * 0 if commands are not allowed / 1 if commands are allowed 2600 */ 2601 2602 static int ata_scsi_dev_enabled(struct ata_device *dev) 2603 { 2604 if (unlikely(!ata_dev_enabled(dev))) 2605 return 0; 2606 2607 if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { 2608 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2609 ata_dev_printk(dev, KERN_WARNING, 2610 "WARNING: ATAPI is %s, device ignored.\n", 2611 atapi_enabled ? "not supported with this driver" : "disabled"); 2612 return 0; 2613 } 2614 } 2615 2616 return 1; 2617 } 2618 2619 /** 2620 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2621 * @ap: ATA port to which the device is attached 2622 * @scsidev: SCSI device from which we derive the ATA device 2623 * 2624 * Given various information provided in struct scsi_cmnd, 2625 * map that onto an ATA bus, and using that mapping 2626 * determine which ata_device is associated with the 2627 * SCSI command to be sent. 2628 * 2629 * LOCKING: 2630 * spin_lock_irqsave(host lock) 2631 * 2632 * RETURNS: 2633 * Associated ATA device, or %NULL if not found. 2634 */ 2635 static struct ata_device * 2636 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2637 { 2638 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2639 2640 if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) 2641 return NULL; 2642 2643 return dev; 2644 } 2645 2646 /* 2647 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value. 2648 * @byte1: Byte 1 from pass-thru CDB. 2649 * 2650 * RETURNS: 2651 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise. 2652 */ 2653 static u8 2654 ata_scsi_map_proto(u8 byte1) 2655 { 2656 switch((byte1 & 0x1e) >> 1) { 2657 case 3: /* Non-data */ 2658 return ATA_PROT_NODATA; 2659 2660 case 6: /* DMA */ 2661 case 10: /* UDMA Data-in */ 2662 case 11: /* UDMA Data-Out */ 2663 return ATA_PROT_DMA; 2664 2665 case 4: /* PIO Data-in */ 2666 case 5: /* PIO Data-out */ 2667 return ATA_PROT_PIO; 2668 2669 case 0: /* Hard Reset */ 2670 case 1: /* SRST */ 2671 case 8: /* Device Diagnostic */ 2672 case 9: /* Device Reset */ 2673 case 7: /* DMA Queued */ 2674 case 12: /* FPDMA */ 2675 case 15: /* Return Response Info */ 2676 default: /* Reserved */ 2677 break; 2678 } 2679 2680 return ATA_PROT_UNKNOWN; 2681 } 2682 2683 /** 2684 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile 2685 * @qc: command structure to be initialized 2686 * 2687 * Handles either 12 or 16-byte versions of the CDB. 2688 * 2689 * RETURNS: 2690 * Zero on success, non-zero on failure. 2691 */ 2692 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) 2693 { 2694 struct ata_taskfile *tf = &(qc->tf); 2695 struct scsi_cmnd *scmd = qc->scsicmd; 2696 struct ata_device *dev = qc->dev; 2697 const u8 *cdb = scmd->cmnd; 2698 2699 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) 2700 goto invalid_fld; 2701 2702 /* 2703 * Filter TPM commands by default. These provide an 2704 * essentially uncontrolled encrypted "back door" between 2705 * applications and the disk. Set libata.allow_tpm=1 if you 2706 * have a real reason for wanting to use them. This ensures 2707 * that installed software cannot easily mess stuff up without 2708 * user intent. DVR type users will probably ship with this enabled 2709 * for movie content management. 2710 * 2711 * Note that for ATA8 we can issue a DCS change and DCS freeze lock 2712 * for this and should do in future but that it is not sufficient as 2713 * DCS is an optional feature set. Thus we also do the software filter 2714 * so that we comply with the TC consortium stated goal that the user 2715 * can turn off TC features of their system. 2716 */ 2717 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) 2718 goto invalid_fld; 2719 2720 /* We may not issue DMA commands if no DMA mode is set */ 2721 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) 2722 goto invalid_fld; 2723 2724 /* 2725 * 12 and 16 byte CDBs use different offsets to 2726 * provide the various register values. 2727 */ 2728 if (cdb[0] == ATA_16) { 2729 /* 2730 * 16-byte CDB - may contain extended commands. 2731 * 2732 * If that is the case, copy the upper byte register values. 2733 */ 2734 if (cdb[1] & 0x01) { 2735 tf->hob_feature = cdb[3]; 2736 tf->hob_nsect = cdb[5]; 2737 tf->hob_lbal = cdb[7]; 2738 tf->hob_lbam = cdb[9]; 2739 tf->hob_lbah = cdb[11]; 2740 tf->flags |= ATA_TFLAG_LBA48; 2741 } else 2742 tf->flags &= ~ATA_TFLAG_LBA48; 2743 2744 /* 2745 * Always copy low byte, device and command registers. 2746 */ 2747 tf->feature = cdb[4]; 2748 tf->nsect = cdb[6]; 2749 tf->lbal = cdb[8]; 2750 tf->lbam = cdb[10]; 2751 tf->lbah = cdb[12]; 2752 tf->device = cdb[13]; 2753 tf->command = cdb[14]; 2754 } else { 2755 /* 2756 * 12-byte CDB - incapable of extended commands. 2757 */ 2758 tf->flags &= ~ATA_TFLAG_LBA48; 2759 2760 tf->feature = cdb[3]; 2761 tf->nsect = cdb[4]; 2762 tf->lbal = cdb[5]; 2763 tf->lbam = cdb[6]; 2764 tf->lbah = cdb[7]; 2765 tf->device = cdb[8]; 2766 tf->command = cdb[9]; 2767 } 2768 2769 /* enforce correct master/slave bit */ 2770 tf->device = dev->devno ? 2771 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2772 2773 /* sanity check for pio multi commands */ 2774 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) 2775 goto invalid_fld; 2776 2777 if (is_multi_taskfile(tf)) { 2778 unsigned int multi_count = 1 << (cdb[1] >> 5); 2779 2780 /* compare the passed through multi_count 2781 * with the cached multi_count of libata 2782 */ 2783 if (multi_count != dev->multi_count) 2784 ata_dev_printk(dev, KERN_WARNING, 2785 "invalid multi_count %u ignored\n", 2786 multi_count); 2787 } 2788 2789 /* READ/WRITE LONG use a non-standard sect_size */ 2790 qc->sect_size = ATA_SECT_SIZE; 2791 switch (tf->command) { 2792 case ATA_CMD_READ_LONG: 2793 case ATA_CMD_READ_LONG_ONCE: 2794 case ATA_CMD_WRITE_LONG: 2795 case ATA_CMD_WRITE_LONG_ONCE: 2796 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2797 goto invalid_fld; 2798 qc->sect_size = scsi_bufflen(scmd); 2799 } 2800 2801 /* 2802 * Filter SET_FEATURES - XFER MODE command -- otherwise, 2803 * SET_FEATURES - XFER MODE must be preceded/succeeded 2804 * by an update to hardware-specific registers for each 2805 * controller (i.e. the reason for ->set_piomode(), 2806 * ->set_dmamode(), and ->post_set_mode() hooks). 2807 */ 2808 if ((tf->command == ATA_CMD_SET_FEATURES) 2809 && (tf->feature == SETFEATURES_XFER)) 2810 goto invalid_fld; 2811 2812 /* 2813 * Set flags so that all registers will be written, 2814 * and pass on write indication (used for PIO/DMA 2815 * setup.) 2816 */ 2817 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); 2818 2819 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2820 tf->flags |= ATA_TFLAG_WRITE; 2821 2822 /* 2823 * Set transfer length. 2824 * 2825 * TODO: find out if we need to do more here to 2826 * cover scatter/gather case. 2827 */ 2828 qc->nbytes = scsi_bufflen(scmd); 2829 2830 /* request result TF and be quiet about device error */ 2831 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; 2832 2833 return 0; 2834 2835 invalid_fld: 2836 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); 2837 /* "Invalid field in cdb" */ 2838 return 1; 2839 } 2840 2841 /** 2842 * ata_get_xlat_func - check if SCSI to ATA translation is possible 2843 * @dev: ATA device 2844 * @cmd: SCSI command opcode to consider 2845 * 2846 * Look up the SCSI command given, and determine whether the 2847 * SCSI command is to be translated or simulated. 2848 * 2849 * RETURNS: 2850 * Pointer to translation function if possible, %NULL if not. 2851 */ 2852 2853 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) 2854 { 2855 switch (cmd) { 2856 case READ_6: 2857 case READ_10: 2858 case READ_16: 2859 2860 case WRITE_6: 2861 case WRITE_10: 2862 case WRITE_16: 2863 return ata_scsi_rw_xlat; 2864 2865 case SYNCHRONIZE_CACHE: 2866 if (ata_try_flush_cache(dev)) 2867 return ata_scsi_flush_xlat; 2868 break; 2869 2870 case VERIFY: 2871 case VERIFY_16: 2872 return ata_scsi_verify_xlat; 2873 2874 case ATA_12: 2875 case ATA_16: 2876 return ata_scsi_pass_thru; 2877 2878 case START_STOP: 2879 return ata_scsi_start_stop_xlat; 2880 } 2881 2882 return NULL; 2883 } 2884 2885 /** 2886 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg 2887 * @ap: ATA port to which the command was being sent 2888 * @cmd: SCSI command to dump 2889 * 2890 * Prints the contents of a SCSI command via printk(). 2891 */ 2892 2893 static inline void ata_scsi_dump_cdb(struct ata_port *ap, 2894 struct scsi_cmnd *cmd) 2895 { 2896 #ifdef ATA_DEBUG 2897 struct scsi_device *scsidev = cmd->device; 2898 u8 *scsicmd = cmd->cmnd; 2899 2900 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 2901 ap->print_id, 2902 scsidev->channel, scsidev->id, scsidev->lun, 2903 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], 2904 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], 2905 scsicmd[8]); 2906 #endif 2907 } 2908 2909 static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 2910 void (*done)(struct scsi_cmnd *), 2911 struct ata_device *dev) 2912 { 2913 u8 scsi_op = scmd->cmnd[0]; 2914 ata_xlat_func_t xlat_func; 2915 int rc = 0; 2916 2917 if (dev->class == ATA_DEV_ATA) { 2918 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) 2919 goto bad_cdb_len; 2920 2921 xlat_func = ata_get_xlat_func(dev, scsi_op); 2922 } else { 2923 if (unlikely(!scmd->cmd_len)) 2924 goto bad_cdb_len; 2925 2926 xlat_func = NULL; 2927 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 2928 /* relay SCSI command to ATAPI device */ 2929 int len = COMMAND_SIZE(scsi_op); 2930 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) 2931 goto bad_cdb_len; 2932 2933 xlat_func = atapi_xlat; 2934 } else { 2935 /* ATA_16 passthru, treat as an ATA command */ 2936 if (unlikely(scmd->cmd_len > 16)) 2937 goto bad_cdb_len; 2938 2939 xlat_func = ata_get_xlat_func(dev, scsi_op); 2940 } 2941 } 2942 2943 if (xlat_func) 2944 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 2945 else 2946 ata_scsi_simulate(dev, scmd, done); 2947 2948 return rc; 2949 2950 bad_cdb_len: 2951 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 2952 scmd->cmd_len, scsi_op, dev->cdb_len); 2953 scmd->result = DID_ERROR << 16; 2954 done(scmd); 2955 return 0; 2956 } 2957 2958 /** 2959 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 2960 * @cmd: SCSI command to be sent 2961 * @done: Completion function, called when command is complete 2962 * 2963 * In some cases, this function translates SCSI commands into 2964 * ATA taskfiles, and queues the taskfiles to be sent to 2965 * hardware. In other cases, this function simulates a 2966 * SCSI device by evaluating and responding to certain 2967 * SCSI commands. This creates the overall effect of 2968 * ATA and ATAPI devices appearing as SCSI devices. 2969 * 2970 * LOCKING: 2971 * Releases scsi-layer-held lock, and obtains host lock. 2972 * 2973 * RETURNS: 2974 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 2975 * 0 otherwise. 2976 */ 2977 int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2978 { 2979 struct ata_port *ap; 2980 struct ata_device *dev; 2981 struct scsi_device *scsidev = cmd->device; 2982 struct Scsi_Host *shost = scsidev->host; 2983 int rc = 0; 2984 2985 ap = ata_shost_to_port(shost); 2986 2987 spin_unlock(shost->host_lock); 2988 spin_lock(ap->lock); 2989 2990 ata_scsi_dump_cdb(ap, cmd); 2991 2992 dev = ata_scsi_find_dev(ap, scsidev); 2993 if (likely(dev)) 2994 rc = __ata_scsi_queuecmd(cmd, done, dev); 2995 else { 2996 cmd->result = (DID_BAD_TARGET << 16); 2997 done(cmd); 2998 } 2999 3000 spin_unlock(ap->lock); 3001 spin_lock(shost->host_lock); 3002 return rc; 3003 } 3004 3005 /** 3006 * ata_scsi_simulate - simulate SCSI command on ATA device 3007 * @dev: the target device 3008 * @cmd: SCSI command being sent to device. 3009 * @done: SCSI command completion function. 3010 * 3011 * Interprets and directly executes a select list of SCSI commands 3012 * that can be handled internally. 3013 * 3014 * LOCKING: 3015 * spin_lock_irqsave(host lock) 3016 */ 3017 3018 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 3019 void (*done)(struct scsi_cmnd *)) 3020 { 3021 struct ata_scsi_args args; 3022 const u8 *scsicmd = cmd->cmnd; 3023 u8 tmp8; 3024 3025 args.dev = dev; 3026 args.id = dev->id; 3027 args.cmd = cmd; 3028 args.done = done; 3029 3030 switch(scsicmd[0]) { 3031 /* TODO: worth improving? */ 3032 case FORMAT_UNIT: 3033 ata_scsi_invalid_field(cmd, done); 3034 break; 3035 3036 case INQUIRY: 3037 if (scsicmd[1] & 2) /* is CmdDt set? */ 3038 ata_scsi_invalid_field(cmd, done); 3039 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 3040 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 3041 else switch (scsicmd[2]) { 3042 case 0x00: 3043 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 3044 break; 3045 case 0x80: 3046 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 3047 break; 3048 case 0x83: 3049 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 3050 break; 3051 case 0x89: 3052 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 3053 break; 3054 default: 3055 ata_scsi_invalid_field(cmd, done); 3056 break; 3057 } 3058 break; 3059 3060 case MODE_SENSE: 3061 case MODE_SENSE_10: 3062 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 3063 break; 3064 3065 case MODE_SELECT: /* unconditionally return */ 3066 case MODE_SELECT_10: /* bad-field-in-cdb */ 3067 ata_scsi_invalid_field(cmd, done); 3068 break; 3069 3070 case READ_CAPACITY: 3071 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3072 break; 3073 3074 case SERVICE_ACTION_IN: 3075 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 3076 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3077 else 3078 ata_scsi_invalid_field(cmd, done); 3079 break; 3080 3081 case REPORT_LUNS: 3082 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 3083 break; 3084 3085 case REQUEST_SENSE: 3086 ata_scsi_set_sense(cmd, 0, 0, 0); 3087 cmd->result = (DRIVER_SENSE << 24); 3088 done(cmd); 3089 break; 3090 3091 /* if we reach this, then writeback caching is disabled, 3092 * turning this into a no-op. 3093 */ 3094 case SYNCHRONIZE_CACHE: 3095 /* fall through */ 3096 3097 /* no-op's, complete with success */ 3098 case REZERO_UNIT: 3099 case SEEK_6: 3100 case SEEK_10: 3101 case TEST_UNIT_READY: 3102 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3103 break; 3104 3105 case SEND_DIAGNOSTIC: 3106 tmp8 = scsicmd[1] & ~(1 << 3); 3107 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 3108 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3109 else 3110 ata_scsi_invalid_field(cmd, done); 3111 break; 3112 3113 /* all other commands */ 3114 default: 3115 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3116 /* "Invalid command operation code" */ 3117 done(cmd); 3118 break; 3119 } 3120 } 3121 3122 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) 3123 { 3124 int i, rc; 3125 3126 for (i = 0; i < host->n_ports; i++) { 3127 struct ata_port *ap = host->ports[i]; 3128 struct Scsi_Host *shost; 3129 3130 rc = -ENOMEM; 3131 shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); 3132 if (!shost) 3133 goto err_alloc; 3134 3135 *(struct ata_port **)&shost->hostdata[0] = ap; 3136 ap->scsi_host = shost; 3137 3138 shost->transportt = &ata_scsi_transport_template; 3139 shost->unique_id = ap->print_id; 3140 shost->max_id = 16; 3141 shost->max_lun = 1; 3142 shost->max_channel = 1; 3143 shost->max_cmd_len = 16; 3144 3145 /* Schedule policy is determined by ->qc_defer() 3146 * callback and it needs to see every deferred qc. 3147 * Set host_blocked to 1 to prevent SCSI midlayer from 3148 * automatically deferring requests. 3149 */ 3150 shost->max_host_blocked = 1; 3151 3152 rc = scsi_add_host(ap->scsi_host, ap->host->dev); 3153 if (rc) 3154 goto err_add; 3155 } 3156 3157 return 0; 3158 3159 err_add: 3160 scsi_host_put(host->ports[i]->scsi_host); 3161 err_alloc: 3162 while (--i >= 0) { 3163 struct Scsi_Host *shost = host->ports[i]->scsi_host; 3164 3165 scsi_remove_host(shost); 3166 scsi_host_put(shost); 3167 } 3168 return rc; 3169 } 3170 3171 void ata_scsi_scan_host(struct ata_port *ap, int sync) 3172 { 3173 int tries = 5; 3174 struct ata_device *last_failed_dev = NULL; 3175 struct ata_link *link; 3176 struct ata_device *dev; 3177 3178 if (ap->flags & ATA_FLAG_DISABLED) 3179 return; 3180 3181 repeat: 3182 ata_port_for_each_link(link, ap) { 3183 ata_link_for_each_dev(dev, link) { 3184 struct scsi_device *sdev; 3185 int channel = 0, id = 0; 3186 3187 if (!ata_dev_enabled(dev) || dev->sdev) 3188 continue; 3189 3190 if (ata_is_host_link(link)) 3191 id = dev->devno; 3192 else 3193 channel = link->pmp; 3194 3195 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, 3196 NULL); 3197 if (!IS_ERR(sdev)) { 3198 dev->sdev = sdev; 3199 scsi_device_put(sdev); 3200 } 3201 } 3202 } 3203 3204 /* If we scanned while EH was in progress or allocation 3205 * failure occurred, scan would have failed silently. Check 3206 * whether all devices are attached. 3207 */ 3208 ata_port_for_each_link(link, ap) { 3209 ata_link_for_each_dev(dev, link) { 3210 if (ata_dev_enabled(dev) && !dev->sdev) 3211 goto exit_loop; 3212 } 3213 } 3214 exit_loop: 3215 if (!link) 3216 return; 3217 3218 /* we're missing some SCSI devices */ 3219 if (sync) { 3220 /* If caller requested synchrnous scan && we've made 3221 * any progress, sleep briefly and repeat. 3222 */ 3223 if (dev != last_failed_dev) { 3224 msleep(100); 3225 last_failed_dev = dev; 3226 goto repeat; 3227 } 3228 3229 /* We might be failing to detect boot device, give it 3230 * a few more chances. 3231 */ 3232 if (--tries) { 3233 msleep(100); 3234 goto repeat; 3235 } 3236 3237 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " 3238 "failed without making any progress,\n" 3239 " switching to async\n"); 3240 } 3241 3242 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 3243 round_jiffies_relative(HZ)); 3244 } 3245 3246 /** 3247 * ata_scsi_offline_dev - offline attached SCSI device 3248 * @dev: ATA device to offline attached SCSI device for 3249 * 3250 * This function is called from ata_eh_hotplug() and responsible 3251 * for taking the SCSI device attached to @dev offline. This 3252 * function is called with host lock which protects dev->sdev 3253 * against clearing. 3254 * 3255 * LOCKING: 3256 * spin_lock_irqsave(host lock) 3257 * 3258 * RETURNS: 3259 * 1 if attached SCSI device exists, 0 otherwise. 3260 */ 3261 int ata_scsi_offline_dev(struct ata_device *dev) 3262 { 3263 if (dev->sdev) { 3264 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); 3265 return 1; 3266 } 3267 return 0; 3268 } 3269 3270 /** 3271 * ata_scsi_remove_dev - remove attached SCSI device 3272 * @dev: ATA device to remove attached SCSI device for 3273 * 3274 * This function is called from ata_eh_scsi_hotplug() and 3275 * responsible for removing the SCSI device attached to @dev. 3276 * 3277 * LOCKING: 3278 * Kernel thread context (may sleep). 3279 */ 3280 static void ata_scsi_remove_dev(struct ata_device *dev) 3281 { 3282 struct ata_port *ap = dev->link->ap; 3283 struct scsi_device *sdev; 3284 unsigned long flags; 3285 3286 /* Alas, we need to grab scan_mutex to ensure SCSI device 3287 * state doesn't change underneath us and thus 3288 * scsi_device_get() always succeeds. The mutex locking can 3289 * be removed if there is __scsi_device_get() interface which 3290 * increments reference counts regardless of device state. 3291 */ 3292 mutex_lock(&ap->scsi_host->scan_mutex); 3293 spin_lock_irqsave(ap->lock, flags); 3294 3295 /* clearing dev->sdev is protected by host lock */ 3296 sdev = dev->sdev; 3297 dev->sdev = NULL; 3298 3299 if (sdev) { 3300 /* If user initiated unplug races with us, sdev can go 3301 * away underneath us after the host lock and 3302 * scan_mutex are released. Hold onto it. 3303 */ 3304 if (scsi_device_get(sdev) == 0) { 3305 /* The following ensures the attached sdev is 3306 * offline on return from ata_scsi_offline_dev() 3307 * regardless it wins or loses the race 3308 * against this function. 3309 */ 3310 scsi_device_set_state(sdev, SDEV_OFFLINE); 3311 } else { 3312 WARN_ON(1); 3313 sdev = NULL; 3314 } 3315 } 3316 3317 spin_unlock_irqrestore(ap->lock, flags); 3318 mutex_unlock(&ap->scsi_host->scan_mutex); 3319 3320 if (sdev) { 3321 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", 3322 sdev->sdev_gendev.bus_id); 3323 3324 scsi_remove_device(sdev); 3325 scsi_device_put(sdev); 3326 } 3327 } 3328 3329 static void ata_scsi_handle_link_detach(struct ata_link *link) 3330 { 3331 struct ata_port *ap = link->ap; 3332 struct ata_device *dev; 3333 3334 ata_link_for_each_dev(dev, link) { 3335 unsigned long flags; 3336 3337 if (!(dev->flags & ATA_DFLAG_DETACHED)) 3338 continue; 3339 3340 spin_lock_irqsave(ap->lock, flags); 3341 dev->flags &= ~ATA_DFLAG_DETACHED; 3342 spin_unlock_irqrestore(ap->lock, flags); 3343 3344 ata_scsi_remove_dev(dev); 3345 } 3346 } 3347 3348 /** 3349 * ata_scsi_media_change_notify - send media change event 3350 * @dev: Pointer to the disk device with media change event 3351 * 3352 * Tell the block layer to send a media change notification 3353 * event. 3354 * 3355 * LOCKING: 3356 * spin_lock_irqsave(host lock) 3357 */ 3358 void ata_scsi_media_change_notify(struct ata_device *dev) 3359 { 3360 if (dev->sdev) 3361 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, 3362 GFP_ATOMIC); 3363 } 3364 3365 /** 3366 * ata_scsi_hotplug - SCSI part of hotplug 3367 * @work: Pointer to ATA port to perform SCSI hotplug on 3368 * 3369 * Perform SCSI part of hotplug. It's executed from a separate 3370 * workqueue after EH completes. This is necessary because SCSI 3371 * hot plugging requires working EH and hot unplugging is 3372 * synchronized with hot plugging with a mutex. 3373 * 3374 * LOCKING: 3375 * Kernel thread context (may sleep). 3376 */ 3377 void ata_scsi_hotplug(struct work_struct *work) 3378 { 3379 struct ata_port *ap = 3380 container_of(work, struct ata_port, hotplug_task.work); 3381 int i; 3382 3383 if (ap->pflags & ATA_PFLAG_UNLOADING) { 3384 DPRINTK("ENTER/EXIT - unloading\n"); 3385 return; 3386 } 3387 3388 DPRINTK("ENTER\n"); 3389 3390 /* Unplug detached devices. We cannot use link iterator here 3391 * because PMP links have to be scanned even if PMP is 3392 * currently not attached. Iterate manually. 3393 */ 3394 ata_scsi_handle_link_detach(&ap->link); 3395 if (ap->pmp_link) 3396 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 3397 ata_scsi_handle_link_detach(&ap->pmp_link[i]); 3398 3399 /* scan for new ones */ 3400 ata_scsi_scan_host(ap, 0); 3401 3402 DPRINTK("EXIT\n"); 3403 } 3404 3405 /** 3406 * ata_scsi_user_scan - indication for user-initiated bus scan 3407 * @shost: SCSI host to scan 3408 * @channel: Channel to scan 3409 * @id: ID to scan 3410 * @lun: LUN to scan 3411 * 3412 * This function is called when user explicitly requests bus 3413 * scan. Set probe pending flag and invoke EH. 3414 * 3415 * LOCKING: 3416 * SCSI layer (we don't care) 3417 * 3418 * RETURNS: 3419 * Zero. 3420 */ 3421 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 3422 unsigned int id, unsigned int lun) 3423 { 3424 struct ata_port *ap = ata_shost_to_port(shost); 3425 unsigned long flags; 3426 int devno, rc = 0; 3427 3428 if (!ap->ops->error_handler) 3429 return -EOPNOTSUPP; 3430 3431 if (lun != SCAN_WILD_CARD && lun) 3432 return -EINVAL; 3433 3434 if (ap->nr_pmp_links == 0) { 3435 if (channel != SCAN_WILD_CARD && channel) 3436 return -EINVAL; 3437 devno = id; 3438 } else { 3439 if (id != SCAN_WILD_CARD && id) 3440 return -EINVAL; 3441 devno = channel; 3442 } 3443 3444 spin_lock_irqsave(ap->lock, flags); 3445 3446 if (devno == SCAN_WILD_CARD) { 3447 struct ata_link *link; 3448 3449 ata_port_for_each_link(link, ap) { 3450 struct ata_eh_info *ehi = &link->eh_info; 3451 ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; 3452 ehi->action |= ATA_EH_SOFTRESET; 3453 } 3454 } else { 3455 struct ata_device *dev = ata_find_dev(ap, devno); 3456 3457 if (dev) { 3458 struct ata_eh_info *ehi = &dev->link->eh_info; 3459 ehi->probe_mask |= 1 << dev->devno; 3460 ehi->action |= ATA_EH_SOFTRESET; 3461 ehi->flags |= ATA_EHI_RESUME_LINK; 3462 } else 3463 rc = -EINVAL; 3464 } 3465 3466 if (rc == 0) { 3467 ata_port_schedule_eh(ap); 3468 spin_unlock_irqrestore(ap->lock, flags); 3469 ata_port_wait_eh(ap); 3470 } else 3471 spin_unlock_irqrestore(ap->lock, flags); 3472 3473 return rc; 3474 } 3475 3476 /** 3477 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3478 * @work: Pointer to ATA port to perform scsi_rescan_device() 3479 * 3480 * After ATA pass thru (SAT) commands are executed successfully, 3481 * libata need to propagate the changes to SCSI layer. This 3482 * function must be executed from ata_aux_wq such that sdev 3483 * attach/detach don't race with rescan. 3484 * 3485 * LOCKING: 3486 * Kernel thread context (may sleep). 3487 */ 3488 void ata_scsi_dev_rescan(struct work_struct *work) 3489 { 3490 struct ata_port *ap = 3491 container_of(work, struct ata_port, scsi_rescan_task); 3492 struct ata_link *link; 3493 struct ata_device *dev; 3494 unsigned long flags; 3495 3496 spin_lock_irqsave(ap->lock, flags); 3497 3498 ata_port_for_each_link(link, ap) { 3499 ata_link_for_each_dev(dev, link) { 3500 struct scsi_device *sdev = dev->sdev; 3501 3502 if (!ata_dev_enabled(dev) || !sdev) 3503 continue; 3504 if (scsi_device_get(sdev)) 3505 continue; 3506 3507 spin_unlock_irqrestore(ap->lock, flags); 3508 scsi_rescan_device(&(sdev->sdev_gendev)); 3509 scsi_device_put(sdev); 3510 spin_lock_irqsave(ap->lock, flags); 3511 } 3512 } 3513 3514 spin_unlock_irqrestore(ap->lock, flags); 3515 } 3516 3517 /** 3518 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 3519 * @host: ATA host container for all SAS ports 3520 * @port_info: Information from low-level host driver 3521 * @shost: SCSI host that the scsi device is attached to 3522 * 3523 * LOCKING: 3524 * PCI/etc. bus probe sem. 3525 * 3526 * RETURNS: 3527 * ata_port pointer on success / NULL on failure. 3528 */ 3529 3530 struct ata_port *ata_sas_port_alloc(struct ata_host *host, 3531 struct ata_port_info *port_info, 3532 struct Scsi_Host *shost) 3533 { 3534 struct ata_port *ap; 3535 3536 ap = ata_port_alloc(host); 3537 if (!ap) 3538 return NULL; 3539 3540 ap->port_no = 0; 3541 ap->lock = shost->host_lock; 3542 ap->pio_mask = port_info->pio_mask; 3543 ap->mwdma_mask = port_info->mwdma_mask; 3544 ap->udma_mask = port_info->udma_mask; 3545 ap->flags |= port_info->flags; 3546 ap->ops = port_info->port_ops; 3547 ap->cbl = ATA_CBL_SATA; 3548 3549 return ap; 3550 } 3551 EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 3552 3553 /** 3554 * ata_sas_port_start - Set port up for dma. 3555 * @ap: Port to initialize 3556 * 3557 * Called just after data structures for each port are 3558 * initialized. Allocates DMA pad. 3559 * 3560 * May be used as the port_start() entry in ata_port_operations. 3561 * 3562 * LOCKING: 3563 * Inherited from caller. 3564 */ 3565 int ata_sas_port_start(struct ata_port *ap) 3566 { 3567 return ata_pad_alloc(ap, ap->dev); 3568 } 3569 EXPORT_SYMBOL_GPL(ata_sas_port_start); 3570 3571 /** 3572 * ata_port_stop - Undo ata_sas_port_start() 3573 * @ap: Port to shut down 3574 * 3575 * Frees the DMA pad. 3576 * 3577 * May be used as the port_stop() entry in ata_port_operations. 3578 * 3579 * LOCKING: 3580 * Inherited from caller. 3581 */ 3582 3583 void ata_sas_port_stop(struct ata_port *ap) 3584 { 3585 ata_pad_free(ap, ap->dev); 3586 } 3587 EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3588 3589 /** 3590 * ata_sas_port_init - Initialize a SATA device 3591 * @ap: SATA port to initialize 3592 * 3593 * LOCKING: 3594 * PCI/etc. bus probe sem. 3595 * 3596 * RETURNS: 3597 * Zero on success, non-zero on error. 3598 */ 3599 3600 int ata_sas_port_init(struct ata_port *ap) 3601 { 3602 int rc = ap->ops->port_start(ap); 3603 3604 if (!rc) { 3605 ap->print_id = ata_print_id++; 3606 rc = ata_bus_probe(ap); 3607 } 3608 3609 return rc; 3610 } 3611 EXPORT_SYMBOL_GPL(ata_sas_port_init); 3612 3613 /** 3614 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 3615 * @ap: SATA port to destroy 3616 * 3617 */ 3618 3619 void ata_sas_port_destroy(struct ata_port *ap) 3620 { 3621 if (ap->ops->port_stop) 3622 ap->ops->port_stop(ap); 3623 kfree(ap); 3624 } 3625 EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 3626 3627 /** 3628 * ata_sas_slave_configure - Default slave_config routine for libata devices 3629 * @sdev: SCSI device to configure 3630 * @ap: ATA port to which SCSI device is attached 3631 * 3632 * RETURNS: 3633 * Zero. 3634 */ 3635 3636 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 3637 { 3638 ata_scsi_sdev_config(sdev); 3639 ata_scsi_dev_config(sdev, ap->link.device); 3640 return 0; 3641 } 3642 EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 3643 3644 /** 3645 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 3646 * @cmd: SCSI command to be sent 3647 * @done: Completion function, called when command is complete 3648 * @ap: ATA port to which the command is being sent 3649 * 3650 * RETURNS: 3651 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 3652 * 0 otherwise. 3653 */ 3654 3655 int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 3656 struct ata_port *ap) 3657 { 3658 int rc = 0; 3659 3660 ata_scsi_dump_cdb(ap, cmd); 3661 3662 if (likely(ata_scsi_dev_enabled(ap->link.device))) 3663 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3664 else { 3665 cmd->result = (DID_BAD_TARGET << 16); 3666 done(cmd); 3667 } 3668 return rc; 3669 } 3670 EXPORT_SYMBOL_GPL(ata_sas_queuecmd); 3671