1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <linux/glob.h> 63 #include <scsi/scsi.h> 64 #include <scsi/scsi_cmnd.h> 65 #include <scsi/scsi_host.h> 66 #include <linux/libata.h> 67 #include <asm/byteorder.h> 68 #include <linux/cdrom.h> 69 #include <linux/ratelimit.h> 70 #include <linux/pm_runtime.h> 71 #include <linux/platform_device.h> 72 73 #include "libata.h" 74 #include "libata-transport.h" 75 76 /* debounce timing parameters in msecs { interval, duration, timeout } */ 77 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 78 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 79 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 80 81 const struct ata_port_operations ata_base_port_ops = { 82 .prereset = ata_std_prereset, 83 .postreset = ata_std_postreset, 84 .error_handler = ata_std_error_handler, 85 .sched_eh = ata_std_sched_eh, 86 .end_eh = ata_std_end_eh, 87 }; 88 89 const struct ata_port_operations sata_port_ops = { 90 .inherits = &ata_base_port_ops, 91 92 .qc_defer = ata_std_qc_defer, 93 .hardreset = sata_std_hardreset, 94 }; 95 96 static unsigned int ata_dev_init_params(struct ata_device *dev, 97 u16 heads, u16 sectors); 98 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 99 static void ata_dev_xfermask(struct ata_device *dev); 100 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 101 102 atomic_t ata_print_id = ATOMIC_INIT(0); 103 104 struct ata_force_param { 105 const char *name; 106 unsigned int cbl; 107 int spd_limit; 108 unsigned long xfer_mask; 109 unsigned int horkage_on; 110 unsigned int horkage_off; 111 unsigned int lflags; 112 }; 113 114 struct ata_force_ent { 115 int port; 116 int device; 117 struct ata_force_param param; 118 }; 119 120 static struct ata_force_ent *ata_force_tbl; 121 static int ata_force_tbl_size; 122 123 static char ata_force_param_buf[PAGE_SIZE] __initdata; 124 /* param_buf is thrown away after initialization, disallow read */ 125 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 126 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 127 128 static int atapi_enabled = 1; 129 module_param(atapi_enabled, int, 0444); 130 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 131 132 static int atapi_dmadir = 0; 133 module_param(atapi_dmadir, int, 0444); 134 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 135 136 int atapi_passthru16 = 1; 137 module_param(atapi_passthru16, int, 0444); 138 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 139 140 int libata_fua = 0; 141 module_param_named(fua, libata_fua, int, 0444); 142 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 143 144 static int ata_ignore_hpa; 145 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 146 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 147 148 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 149 module_param_named(dma, libata_dma_mask, int, 0444); 150 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 151 152 static int ata_probe_timeout; 153 module_param(ata_probe_timeout, int, 0444); 154 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 155 156 int libata_noacpi = 0; 157 module_param_named(noacpi, libata_noacpi, int, 0444); 158 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 159 160 int libata_allow_tpm = 0; 161 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 162 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 163 164 static int atapi_an; 165 module_param(atapi_an, int, 0444); 166 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 167 168 MODULE_AUTHOR("Jeff Garzik"); 169 MODULE_DESCRIPTION("Library module for ATA devices"); 170 MODULE_LICENSE("GPL"); 171 MODULE_VERSION(DRV_VERSION); 172 173 174 static bool ata_sstatus_online(u32 sstatus) 175 { 176 return (sstatus & 0xf) == 0x3; 177 } 178 179 /** 180 * ata_link_next - link iteration helper 181 * @link: the previous link, NULL to start 182 * @ap: ATA port containing links to iterate 183 * @mode: iteration mode, one of ATA_LITER_* 184 * 185 * LOCKING: 186 * Host lock or EH context. 187 * 188 * RETURNS: 189 * Pointer to the next link. 190 */ 191 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 192 enum ata_link_iter_mode mode) 193 { 194 BUG_ON(mode != ATA_LITER_EDGE && 195 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 196 197 /* NULL link indicates start of iteration */ 198 if (!link) 199 switch (mode) { 200 case ATA_LITER_EDGE: 201 case ATA_LITER_PMP_FIRST: 202 if (sata_pmp_attached(ap)) 203 return ap->pmp_link; 204 /* fall through */ 205 case ATA_LITER_HOST_FIRST: 206 return &ap->link; 207 } 208 209 /* we just iterated over the host link, what's next? */ 210 if (link == &ap->link) 211 switch (mode) { 212 case ATA_LITER_HOST_FIRST: 213 if (sata_pmp_attached(ap)) 214 return ap->pmp_link; 215 /* fall through */ 216 case ATA_LITER_PMP_FIRST: 217 if (unlikely(ap->slave_link)) 218 return ap->slave_link; 219 /* fall through */ 220 case ATA_LITER_EDGE: 221 return NULL; 222 } 223 224 /* slave_link excludes PMP */ 225 if (unlikely(link == ap->slave_link)) 226 return NULL; 227 228 /* we were over a PMP link */ 229 if (++link < ap->pmp_link + ap->nr_pmp_links) 230 return link; 231 232 if (mode == ATA_LITER_PMP_FIRST) 233 return &ap->link; 234 235 return NULL; 236 } 237 238 /** 239 * ata_dev_next - device iteration helper 240 * @dev: the previous device, NULL to start 241 * @link: ATA link containing devices to iterate 242 * @mode: iteration mode, one of ATA_DITER_* 243 * 244 * LOCKING: 245 * Host lock or EH context. 246 * 247 * RETURNS: 248 * Pointer to the next device. 249 */ 250 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 251 enum ata_dev_iter_mode mode) 252 { 253 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 254 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 255 256 /* NULL dev indicates start of iteration */ 257 if (!dev) 258 switch (mode) { 259 case ATA_DITER_ENABLED: 260 case ATA_DITER_ALL: 261 dev = link->device; 262 goto check; 263 case ATA_DITER_ENABLED_REVERSE: 264 case ATA_DITER_ALL_REVERSE: 265 dev = link->device + ata_link_max_devices(link) - 1; 266 goto check; 267 } 268 269 next: 270 /* move to the next one */ 271 switch (mode) { 272 case ATA_DITER_ENABLED: 273 case ATA_DITER_ALL: 274 if (++dev < link->device + ata_link_max_devices(link)) 275 goto check; 276 return NULL; 277 case ATA_DITER_ENABLED_REVERSE: 278 case ATA_DITER_ALL_REVERSE: 279 if (--dev >= link->device) 280 goto check; 281 return NULL; 282 } 283 284 check: 285 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 286 !ata_dev_enabled(dev)) 287 goto next; 288 return dev; 289 } 290 291 /** 292 * ata_dev_phys_link - find physical link for a device 293 * @dev: ATA device to look up physical link for 294 * 295 * Look up physical link which @dev is attached to. Note that 296 * this is different from @dev->link only when @dev is on slave 297 * link. For all other cases, it's the same as @dev->link. 298 * 299 * LOCKING: 300 * Don't care. 301 * 302 * RETURNS: 303 * Pointer to the found physical link. 304 */ 305 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 306 { 307 struct ata_port *ap = dev->link->ap; 308 309 if (!ap->slave_link) 310 return dev->link; 311 if (!dev->devno) 312 return &ap->link; 313 return ap->slave_link; 314 } 315 316 /** 317 * ata_force_cbl - force cable type according to libata.force 318 * @ap: ATA port of interest 319 * 320 * Force cable type according to libata.force and whine about it. 321 * The last entry which has matching port number is used, so it 322 * can be specified as part of device force parameters. For 323 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 324 * same effect. 325 * 326 * LOCKING: 327 * EH context. 328 */ 329 void ata_force_cbl(struct ata_port *ap) 330 { 331 int i; 332 333 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 334 const struct ata_force_ent *fe = &ata_force_tbl[i]; 335 336 if (fe->port != -1 && fe->port != ap->print_id) 337 continue; 338 339 if (fe->param.cbl == ATA_CBL_NONE) 340 continue; 341 342 ap->cbl = fe->param.cbl; 343 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 344 return; 345 } 346 } 347 348 /** 349 * ata_force_link_limits - force link limits according to libata.force 350 * @link: ATA link of interest 351 * 352 * Force link flags and SATA spd limit according to libata.force 353 * and whine about it. When only the port part is specified 354 * (e.g. 1:), the limit applies to all links connected to both 355 * the host link and all fan-out ports connected via PMP. If the 356 * device part is specified as 0 (e.g. 1.00:), it specifies the 357 * first fan-out link not the host link. Device number 15 always 358 * points to the host link whether PMP is attached or not. If the 359 * controller has slave link, device number 16 points to it. 360 * 361 * LOCKING: 362 * EH context. 363 */ 364 static void ata_force_link_limits(struct ata_link *link) 365 { 366 bool did_spd = false; 367 int linkno = link->pmp; 368 int i; 369 370 if (ata_is_host_link(link)) 371 linkno += 15; 372 373 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 374 const struct ata_force_ent *fe = &ata_force_tbl[i]; 375 376 if (fe->port != -1 && fe->port != link->ap->print_id) 377 continue; 378 379 if (fe->device != -1 && fe->device != linkno) 380 continue; 381 382 /* only honor the first spd limit */ 383 if (!did_spd && fe->param.spd_limit) { 384 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 385 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 386 fe->param.name); 387 did_spd = true; 388 } 389 390 /* let lflags stack */ 391 if (fe->param.lflags) { 392 link->flags |= fe->param.lflags; 393 ata_link_notice(link, 394 "FORCE: link flag 0x%x forced -> 0x%x\n", 395 fe->param.lflags, link->flags); 396 } 397 } 398 } 399 400 /** 401 * ata_force_xfermask - force xfermask according to libata.force 402 * @dev: ATA device of interest 403 * 404 * Force xfer_mask according to libata.force and whine about it. 405 * For consistency with link selection, device number 15 selects 406 * the first device connected to the host link. 407 * 408 * LOCKING: 409 * EH context. 410 */ 411 static void ata_force_xfermask(struct ata_device *dev) 412 { 413 int devno = dev->link->pmp + dev->devno; 414 int alt_devno = devno; 415 int i; 416 417 /* allow n.15/16 for devices attached to host port */ 418 if (ata_is_host_link(dev->link)) 419 alt_devno += 15; 420 421 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 422 const struct ata_force_ent *fe = &ata_force_tbl[i]; 423 unsigned long pio_mask, mwdma_mask, udma_mask; 424 425 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 426 continue; 427 428 if (fe->device != -1 && fe->device != devno && 429 fe->device != alt_devno) 430 continue; 431 432 if (!fe->param.xfer_mask) 433 continue; 434 435 ata_unpack_xfermask(fe->param.xfer_mask, 436 &pio_mask, &mwdma_mask, &udma_mask); 437 if (udma_mask) 438 dev->udma_mask = udma_mask; 439 else if (mwdma_mask) { 440 dev->udma_mask = 0; 441 dev->mwdma_mask = mwdma_mask; 442 } else { 443 dev->udma_mask = 0; 444 dev->mwdma_mask = 0; 445 dev->pio_mask = pio_mask; 446 } 447 448 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 449 fe->param.name); 450 return; 451 } 452 } 453 454 /** 455 * ata_force_horkage - force horkage according to libata.force 456 * @dev: ATA device of interest 457 * 458 * Force horkage according to libata.force and whine about it. 459 * For consistency with link selection, device number 15 selects 460 * the first device connected to the host link. 461 * 462 * LOCKING: 463 * EH context. 464 */ 465 static void ata_force_horkage(struct ata_device *dev) 466 { 467 int devno = dev->link->pmp + dev->devno; 468 int alt_devno = devno; 469 int i; 470 471 /* allow n.15/16 for devices attached to host port */ 472 if (ata_is_host_link(dev->link)) 473 alt_devno += 15; 474 475 for (i = 0; i < ata_force_tbl_size; i++) { 476 const struct ata_force_ent *fe = &ata_force_tbl[i]; 477 478 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 479 continue; 480 481 if (fe->device != -1 && fe->device != devno && 482 fe->device != alt_devno) 483 continue; 484 485 if (!(~dev->horkage & fe->param.horkage_on) && 486 !(dev->horkage & fe->param.horkage_off)) 487 continue; 488 489 dev->horkage |= fe->param.horkage_on; 490 dev->horkage &= ~fe->param.horkage_off; 491 492 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 493 fe->param.name); 494 } 495 } 496 497 /** 498 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 499 * @opcode: SCSI opcode 500 * 501 * Determine ATAPI command type from @opcode. 502 * 503 * LOCKING: 504 * None. 505 * 506 * RETURNS: 507 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 508 */ 509 int atapi_cmd_type(u8 opcode) 510 { 511 switch (opcode) { 512 case GPCMD_READ_10: 513 case GPCMD_READ_12: 514 return ATAPI_READ; 515 516 case GPCMD_WRITE_10: 517 case GPCMD_WRITE_12: 518 case GPCMD_WRITE_AND_VERIFY_10: 519 return ATAPI_WRITE; 520 521 case GPCMD_READ_CD: 522 case GPCMD_READ_CD_MSF: 523 return ATAPI_READ_CD; 524 525 case ATA_16: 526 case ATA_12: 527 if (atapi_passthru16) 528 return ATAPI_PASS_THRU; 529 /* fall thru */ 530 default: 531 return ATAPI_MISC; 532 } 533 } 534 535 /** 536 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 537 * @tf: Taskfile to convert 538 * @pmp: Port multiplier port 539 * @is_cmd: This FIS is for command 540 * @fis: Buffer into which data will output 541 * 542 * Converts a standard ATA taskfile to a Serial ATA 543 * FIS structure (Register - Host to Device). 544 * 545 * LOCKING: 546 * Inherited from caller. 547 */ 548 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 549 { 550 fis[0] = 0x27; /* Register - Host to Device FIS */ 551 fis[1] = pmp & 0xf; /* Port multiplier number*/ 552 if (is_cmd) 553 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 554 555 fis[2] = tf->command; 556 fis[3] = tf->feature; 557 558 fis[4] = tf->lbal; 559 fis[5] = tf->lbam; 560 fis[6] = tf->lbah; 561 fis[7] = tf->device; 562 563 fis[8] = tf->hob_lbal; 564 fis[9] = tf->hob_lbam; 565 fis[10] = tf->hob_lbah; 566 fis[11] = tf->hob_feature; 567 568 fis[12] = tf->nsect; 569 fis[13] = tf->hob_nsect; 570 fis[14] = 0; 571 fis[15] = tf->ctl; 572 573 fis[16] = tf->auxiliary & 0xff; 574 fis[17] = (tf->auxiliary >> 8) & 0xff; 575 fis[18] = (tf->auxiliary >> 16) & 0xff; 576 fis[19] = (tf->auxiliary >> 24) & 0xff; 577 } 578 579 /** 580 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 581 * @fis: Buffer from which data will be input 582 * @tf: Taskfile to output 583 * 584 * Converts a serial ATA FIS structure to a standard ATA taskfile. 585 * 586 * LOCKING: 587 * Inherited from caller. 588 */ 589 590 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 591 { 592 tf->command = fis[2]; /* status */ 593 tf->feature = fis[3]; /* error */ 594 595 tf->lbal = fis[4]; 596 tf->lbam = fis[5]; 597 tf->lbah = fis[6]; 598 tf->device = fis[7]; 599 600 tf->hob_lbal = fis[8]; 601 tf->hob_lbam = fis[9]; 602 tf->hob_lbah = fis[10]; 603 604 tf->nsect = fis[12]; 605 tf->hob_nsect = fis[13]; 606 } 607 608 static const u8 ata_rw_cmds[] = { 609 /* pio multi */ 610 ATA_CMD_READ_MULTI, 611 ATA_CMD_WRITE_MULTI, 612 ATA_CMD_READ_MULTI_EXT, 613 ATA_CMD_WRITE_MULTI_EXT, 614 0, 615 0, 616 0, 617 ATA_CMD_WRITE_MULTI_FUA_EXT, 618 /* pio */ 619 ATA_CMD_PIO_READ, 620 ATA_CMD_PIO_WRITE, 621 ATA_CMD_PIO_READ_EXT, 622 ATA_CMD_PIO_WRITE_EXT, 623 0, 624 0, 625 0, 626 0, 627 /* dma */ 628 ATA_CMD_READ, 629 ATA_CMD_WRITE, 630 ATA_CMD_READ_EXT, 631 ATA_CMD_WRITE_EXT, 632 0, 633 0, 634 0, 635 ATA_CMD_WRITE_FUA_EXT 636 }; 637 638 /** 639 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 640 * @tf: command to examine and configure 641 * @dev: device tf belongs to 642 * 643 * Examine the device configuration and tf->flags to calculate 644 * the proper read/write commands and protocol to use. 645 * 646 * LOCKING: 647 * caller. 648 */ 649 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 650 { 651 u8 cmd; 652 653 int index, fua, lba48, write; 654 655 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 656 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 657 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 658 659 if (dev->flags & ATA_DFLAG_PIO) { 660 tf->protocol = ATA_PROT_PIO; 661 index = dev->multi_count ? 0 : 8; 662 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 663 /* Unable to use DMA due to host limitation */ 664 tf->protocol = ATA_PROT_PIO; 665 index = dev->multi_count ? 0 : 8; 666 } else { 667 tf->protocol = ATA_PROT_DMA; 668 index = 16; 669 } 670 671 cmd = ata_rw_cmds[index + fua + lba48 + write]; 672 if (cmd) { 673 tf->command = cmd; 674 return 0; 675 } 676 return -1; 677 } 678 679 /** 680 * ata_tf_read_block - Read block address from ATA taskfile 681 * @tf: ATA taskfile of interest 682 * @dev: ATA device @tf belongs to 683 * 684 * LOCKING: 685 * None. 686 * 687 * Read block address from @tf. This function can handle all 688 * three address formats - LBA, LBA48 and CHS. tf->protocol and 689 * flags select the address format to use. 690 * 691 * RETURNS: 692 * Block address read from @tf. 693 */ 694 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 695 { 696 u64 block = 0; 697 698 if (tf->flags & ATA_TFLAG_LBA) { 699 if (tf->flags & ATA_TFLAG_LBA48) { 700 block |= (u64)tf->hob_lbah << 40; 701 block |= (u64)tf->hob_lbam << 32; 702 block |= (u64)tf->hob_lbal << 24; 703 } else 704 block |= (tf->device & 0xf) << 24; 705 706 block |= tf->lbah << 16; 707 block |= tf->lbam << 8; 708 block |= tf->lbal; 709 } else { 710 u32 cyl, head, sect; 711 712 cyl = tf->lbam | (tf->lbah << 8); 713 head = tf->device & 0xf; 714 sect = tf->lbal; 715 716 if (!sect) { 717 ata_dev_warn(dev, 718 "device reported invalid CHS sector 0\n"); 719 sect = 1; /* oh well */ 720 } 721 722 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 723 } 724 725 return block; 726 } 727 728 /** 729 * ata_build_rw_tf - Build ATA taskfile for given read/write request 730 * @tf: Target ATA taskfile 731 * @dev: ATA device @tf belongs to 732 * @block: Block address 733 * @n_block: Number of blocks 734 * @tf_flags: RW/FUA etc... 735 * @tag: tag 736 * 737 * LOCKING: 738 * None. 739 * 740 * Build ATA taskfile @tf for read/write request described by 741 * @block, @n_block, @tf_flags and @tag on @dev. 742 * 743 * RETURNS: 744 * 745 * 0 on success, -ERANGE if the request is too large for @dev, 746 * -EINVAL if the request is invalid. 747 */ 748 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 749 u64 block, u32 n_block, unsigned int tf_flags, 750 unsigned int tag) 751 { 752 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 753 tf->flags |= tf_flags; 754 755 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 756 /* yay, NCQ */ 757 if (!lba_48_ok(block, n_block)) 758 return -ERANGE; 759 760 tf->protocol = ATA_PROT_NCQ; 761 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 762 763 if (tf->flags & ATA_TFLAG_WRITE) 764 tf->command = ATA_CMD_FPDMA_WRITE; 765 else 766 tf->command = ATA_CMD_FPDMA_READ; 767 768 tf->nsect = tag << 3; 769 tf->hob_feature = (n_block >> 8) & 0xff; 770 tf->feature = n_block & 0xff; 771 772 tf->hob_lbah = (block >> 40) & 0xff; 773 tf->hob_lbam = (block >> 32) & 0xff; 774 tf->hob_lbal = (block >> 24) & 0xff; 775 tf->lbah = (block >> 16) & 0xff; 776 tf->lbam = (block >> 8) & 0xff; 777 tf->lbal = block & 0xff; 778 779 tf->device = ATA_LBA; 780 if (tf->flags & ATA_TFLAG_FUA) 781 tf->device |= 1 << 7; 782 } else if (dev->flags & ATA_DFLAG_LBA) { 783 tf->flags |= ATA_TFLAG_LBA; 784 785 if (lba_28_ok(block, n_block)) { 786 /* use LBA28 */ 787 tf->device |= (block >> 24) & 0xf; 788 } else if (lba_48_ok(block, n_block)) { 789 if (!(dev->flags & ATA_DFLAG_LBA48)) 790 return -ERANGE; 791 792 /* use LBA48 */ 793 tf->flags |= ATA_TFLAG_LBA48; 794 795 tf->hob_nsect = (n_block >> 8) & 0xff; 796 797 tf->hob_lbah = (block >> 40) & 0xff; 798 tf->hob_lbam = (block >> 32) & 0xff; 799 tf->hob_lbal = (block >> 24) & 0xff; 800 } else 801 /* request too large even for LBA48 */ 802 return -ERANGE; 803 804 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 805 return -EINVAL; 806 807 tf->nsect = n_block & 0xff; 808 809 tf->lbah = (block >> 16) & 0xff; 810 tf->lbam = (block >> 8) & 0xff; 811 tf->lbal = block & 0xff; 812 813 tf->device |= ATA_LBA; 814 } else { 815 /* CHS */ 816 u32 sect, head, cyl, track; 817 818 /* The request -may- be too large for CHS addressing. */ 819 if (!lba_28_ok(block, n_block)) 820 return -ERANGE; 821 822 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 823 return -EINVAL; 824 825 /* Convert LBA to CHS */ 826 track = (u32)block / dev->sectors; 827 cyl = track / dev->heads; 828 head = track % dev->heads; 829 sect = (u32)block % dev->sectors + 1; 830 831 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 832 (u32)block, track, cyl, head, sect); 833 834 /* Check whether the converted CHS can fit. 835 Cylinder: 0-65535 836 Head: 0-15 837 Sector: 1-255*/ 838 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 839 return -ERANGE; 840 841 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 842 tf->lbal = sect; 843 tf->lbam = cyl; 844 tf->lbah = cyl >> 8; 845 tf->device |= head; 846 } 847 848 return 0; 849 } 850 851 /** 852 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 853 * @pio_mask: pio_mask 854 * @mwdma_mask: mwdma_mask 855 * @udma_mask: udma_mask 856 * 857 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 858 * unsigned int xfer_mask. 859 * 860 * LOCKING: 861 * None. 862 * 863 * RETURNS: 864 * Packed xfer_mask. 865 */ 866 unsigned long ata_pack_xfermask(unsigned long pio_mask, 867 unsigned long mwdma_mask, 868 unsigned long udma_mask) 869 { 870 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 871 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 872 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 873 } 874 875 /** 876 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 877 * @xfer_mask: xfer_mask to unpack 878 * @pio_mask: resulting pio_mask 879 * @mwdma_mask: resulting mwdma_mask 880 * @udma_mask: resulting udma_mask 881 * 882 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 883 * Any NULL distination masks will be ignored. 884 */ 885 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 886 unsigned long *mwdma_mask, unsigned long *udma_mask) 887 { 888 if (pio_mask) 889 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 890 if (mwdma_mask) 891 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 892 if (udma_mask) 893 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 894 } 895 896 static const struct ata_xfer_ent { 897 int shift, bits; 898 u8 base; 899 } ata_xfer_tbl[] = { 900 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 901 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 902 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 903 { -1, }, 904 }; 905 906 /** 907 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 908 * @xfer_mask: xfer_mask of interest 909 * 910 * Return matching XFER_* value for @xfer_mask. Only the highest 911 * bit of @xfer_mask is considered. 912 * 913 * LOCKING: 914 * None. 915 * 916 * RETURNS: 917 * Matching XFER_* value, 0xff if no match found. 918 */ 919 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 920 { 921 int highbit = fls(xfer_mask) - 1; 922 const struct ata_xfer_ent *ent; 923 924 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 925 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 926 return ent->base + highbit - ent->shift; 927 return 0xff; 928 } 929 930 /** 931 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 932 * @xfer_mode: XFER_* of interest 933 * 934 * Return matching xfer_mask for @xfer_mode. 935 * 936 * LOCKING: 937 * None. 938 * 939 * RETURNS: 940 * Matching xfer_mask, 0 if no match found. 941 */ 942 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 943 { 944 const struct ata_xfer_ent *ent; 945 946 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 947 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 948 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 949 & ~((1 << ent->shift) - 1); 950 return 0; 951 } 952 953 /** 954 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 955 * @xfer_mode: XFER_* of interest 956 * 957 * Return matching xfer_shift for @xfer_mode. 958 * 959 * LOCKING: 960 * None. 961 * 962 * RETURNS: 963 * Matching xfer_shift, -1 if no match found. 964 */ 965 int ata_xfer_mode2shift(unsigned long xfer_mode) 966 { 967 const struct ata_xfer_ent *ent; 968 969 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 970 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 971 return ent->shift; 972 return -1; 973 } 974 975 /** 976 * ata_mode_string - convert xfer_mask to string 977 * @xfer_mask: mask of bits supported; only highest bit counts. 978 * 979 * Determine string which represents the highest speed 980 * (highest bit in @modemask). 981 * 982 * LOCKING: 983 * None. 984 * 985 * RETURNS: 986 * Constant C string representing highest speed listed in 987 * @mode_mask, or the constant C string "<n/a>". 988 */ 989 const char *ata_mode_string(unsigned long xfer_mask) 990 { 991 static const char * const xfer_mode_str[] = { 992 "PIO0", 993 "PIO1", 994 "PIO2", 995 "PIO3", 996 "PIO4", 997 "PIO5", 998 "PIO6", 999 "MWDMA0", 1000 "MWDMA1", 1001 "MWDMA2", 1002 "MWDMA3", 1003 "MWDMA4", 1004 "UDMA/16", 1005 "UDMA/25", 1006 "UDMA/33", 1007 "UDMA/44", 1008 "UDMA/66", 1009 "UDMA/100", 1010 "UDMA/133", 1011 "UDMA7", 1012 }; 1013 int highbit; 1014 1015 highbit = fls(xfer_mask) - 1; 1016 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1017 return xfer_mode_str[highbit]; 1018 return "<n/a>"; 1019 } 1020 1021 const char *sata_spd_string(unsigned int spd) 1022 { 1023 static const char * const spd_str[] = { 1024 "1.5 Gbps", 1025 "3.0 Gbps", 1026 "6.0 Gbps", 1027 }; 1028 1029 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1030 return "<unknown>"; 1031 return spd_str[spd - 1]; 1032 } 1033 1034 /** 1035 * ata_dev_classify - determine device type based on ATA-spec signature 1036 * @tf: ATA taskfile register set for device to be identified 1037 * 1038 * Determine from taskfile register contents whether a device is 1039 * ATA or ATAPI, as per "Signature and persistence" section 1040 * of ATA/PI spec (volume 1, sect 5.14). 1041 * 1042 * LOCKING: 1043 * None. 1044 * 1045 * RETURNS: 1046 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1047 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1048 */ 1049 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1050 { 1051 /* Apple's open source Darwin code hints that some devices only 1052 * put a proper signature into the LBA mid/high registers, 1053 * So, we only check those. It's sufficient for uniqueness. 1054 * 1055 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1056 * signatures for ATA and ATAPI devices attached on SerialATA, 1057 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1058 * spec has never mentioned about using different signatures 1059 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1060 * Multiplier specification began to use 0x69/0x96 to identify 1061 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1062 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1063 * 0x69/0x96 shortly and described them as reserved for 1064 * SerialATA. 1065 * 1066 * We follow the current spec and consider that 0x69/0x96 1067 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1068 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1069 * SEMB signature. This is worked around in 1070 * ata_dev_read_id(). 1071 */ 1072 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1073 DPRINTK("found ATA device by sig\n"); 1074 return ATA_DEV_ATA; 1075 } 1076 1077 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1078 DPRINTK("found ATAPI device by sig\n"); 1079 return ATA_DEV_ATAPI; 1080 } 1081 1082 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1083 DPRINTK("found PMP device by sig\n"); 1084 return ATA_DEV_PMP; 1085 } 1086 1087 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1088 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1089 return ATA_DEV_SEMB; 1090 } 1091 1092 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1093 DPRINTK("found ZAC device by sig\n"); 1094 return ATA_DEV_ZAC; 1095 } 1096 1097 DPRINTK("unknown device\n"); 1098 return ATA_DEV_UNKNOWN; 1099 } 1100 1101 /** 1102 * ata_id_string - Convert IDENTIFY DEVICE page into string 1103 * @id: IDENTIFY DEVICE results we will examine 1104 * @s: string into which data is output 1105 * @ofs: offset into identify device page 1106 * @len: length of string to return. must be an even number. 1107 * 1108 * The strings in the IDENTIFY DEVICE page are broken up into 1109 * 16-bit chunks. Run through the string, and output each 1110 * 8-bit chunk linearly, regardless of platform. 1111 * 1112 * LOCKING: 1113 * caller. 1114 */ 1115 1116 void ata_id_string(const u16 *id, unsigned char *s, 1117 unsigned int ofs, unsigned int len) 1118 { 1119 unsigned int c; 1120 1121 BUG_ON(len & 1); 1122 1123 while (len > 0) { 1124 c = id[ofs] >> 8; 1125 *s = c; 1126 s++; 1127 1128 c = id[ofs] & 0xff; 1129 *s = c; 1130 s++; 1131 1132 ofs++; 1133 len -= 2; 1134 } 1135 } 1136 1137 /** 1138 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1139 * @id: IDENTIFY DEVICE results we will examine 1140 * @s: string into which data is output 1141 * @ofs: offset into identify device page 1142 * @len: length of string to return. must be an odd number. 1143 * 1144 * This function is identical to ata_id_string except that it 1145 * trims trailing spaces and terminates the resulting string with 1146 * null. @len must be actual maximum length (even number) + 1. 1147 * 1148 * LOCKING: 1149 * caller. 1150 */ 1151 void ata_id_c_string(const u16 *id, unsigned char *s, 1152 unsigned int ofs, unsigned int len) 1153 { 1154 unsigned char *p; 1155 1156 ata_id_string(id, s, ofs, len - 1); 1157 1158 p = s + strnlen(s, len - 1); 1159 while (p > s && p[-1] == ' ') 1160 p--; 1161 *p = '\0'; 1162 } 1163 1164 static u64 ata_id_n_sectors(const u16 *id) 1165 { 1166 if (ata_id_has_lba(id)) { 1167 if (ata_id_has_lba48(id)) 1168 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1169 else 1170 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1171 } else { 1172 if (ata_id_current_chs_valid(id)) 1173 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1174 id[ATA_ID_CUR_SECTORS]; 1175 else 1176 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1177 id[ATA_ID_SECTORS]; 1178 } 1179 } 1180 1181 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1182 { 1183 u64 sectors = 0; 1184 1185 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1186 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1187 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1188 sectors |= (tf->lbah & 0xff) << 16; 1189 sectors |= (tf->lbam & 0xff) << 8; 1190 sectors |= (tf->lbal & 0xff); 1191 1192 return sectors; 1193 } 1194 1195 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1196 { 1197 u64 sectors = 0; 1198 1199 sectors |= (tf->device & 0x0f) << 24; 1200 sectors |= (tf->lbah & 0xff) << 16; 1201 sectors |= (tf->lbam & 0xff) << 8; 1202 sectors |= (tf->lbal & 0xff); 1203 1204 return sectors; 1205 } 1206 1207 /** 1208 * ata_read_native_max_address - Read native max address 1209 * @dev: target device 1210 * @max_sectors: out parameter for the result native max address 1211 * 1212 * Perform an LBA48 or LBA28 native size query upon the device in 1213 * question. 1214 * 1215 * RETURNS: 1216 * 0 on success, -EACCES if command is aborted by the drive. 1217 * -EIO on other errors. 1218 */ 1219 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1220 { 1221 unsigned int err_mask; 1222 struct ata_taskfile tf; 1223 int lba48 = ata_id_has_lba48(dev->id); 1224 1225 ata_tf_init(dev, &tf); 1226 1227 /* always clear all address registers */ 1228 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1229 1230 if (lba48) { 1231 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1232 tf.flags |= ATA_TFLAG_LBA48; 1233 } else 1234 tf.command = ATA_CMD_READ_NATIVE_MAX; 1235 1236 tf.protocol |= ATA_PROT_NODATA; 1237 tf.device |= ATA_LBA; 1238 1239 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1240 if (err_mask) { 1241 ata_dev_warn(dev, 1242 "failed to read native max address (err_mask=0x%x)\n", 1243 err_mask); 1244 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1245 return -EACCES; 1246 return -EIO; 1247 } 1248 1249 if (lba48) 1250 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1251 else 1252 *max_sectors = ata_tf_to_lba(&tf) + 1; 1253 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1254 (*max_sectors)--; 1255 return 0; 1256 } 1257 1258 /** 1259 * ata_set_max_sectors - Set max sectors 1260 * @dev: target device 1261 * @new_sectors: new max sectors value to set for the device 1262 * 1263 * Set max sectors of @dev to @new_sectors. 1264 * 1265 * RETURNS: 1266 * 0 on success, -EACCES if command is aborted or denied (due to 1267 * previous non-volatile SET_MAX) by the drive. -EIO on other 1268 * errors. 1269 */ 1270 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1271 { 1272 unsigned int err_mask; 1273 struct ata_taskfile tf; 1274 int lba48 = ata_id_has_lba48(dev->id); 1275 1276 new_sectors--; 1277 1278 ata_tf_init(dev, &tf); 1279 1280 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1281 1282 if (lba48) { 1283 tf.command = ATA_CMD_SET_MAX_EXT; 1284 tf.flags |= ATA_TFLAG_LBA48; 1285 1286 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1287 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1288 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1289 } else { 1290 tf.command = ATA_CMD_SET_MAX; 1291 1292 tf.device |= (new_sectors >> 24) & 0xf; 1293 } 1294 1295 tf.protocol |= ATA_PROT_NODATA; 1296 tf.device |= ATA_LBA; 1297 1298 tf.lbal = (new_sectors >> 0) & 0xff; 1299 tf.lbam = (new_sectors >> 8) & 0xff; 1300 tf.lbah = (new_sectors >> 16) & 0xff; 1301 1302 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1303 if (err_mask) { 1304 ata_dev_warn(dev, 1305 "failed to set max address (err_mask=0x%x)\n", 1306 err_mask); 1307 if (err_mask == AC_ERR_DEV && 1308 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1309 return -EACCES; 1310 return -EIO; 1311 } 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * ata_hpa_resize - Resize a device with an HPA set 1318 * @dev: Device to resize 1319 * 1320 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1321 * it if required to the full size of the media. The caller must check 1322 * the drive has the HPA feature set enabled. 1323 * 1324 * RETURNS: 1325 * 0 on success, -errno on failure. 1326 */ 1327 static int ata_hpa_resize(struct ata_device *dev) 1328 { 1329 struct ata_eh_context *ehc = &dev->link->eh_context; 1330 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1331 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1332 u64 sectors = ata_id_n_sectors(dev->id); 1333 u64 native_sectors; 1334 int rc; 1335 1336 /* do we need to do it? */ 1337 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1338 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1339 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1340 return 0; 1341 1342 /* read native max address */ 1343 rc = ata_read_native_max_address(dev, &native_sectors); 1344 if (rc) { 1345 /* If device aborted the command or HPA isn't going to 1346 * be unlocked, skip HPA resizing. 1347 */ 1348 if (rc == -EACCES || !unlock_hpa) { 1349 ata_dev_warn(dev, 1350 "HPA support seems broken, skipping HPA handling\n"); 1351 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1352 1353 /* we can continue if device aborted the command */ 1354 if (rc == -EACCES) 1355 rc = 0; 1356 } 1357 1358 return rc; 1359 } 1360 dev->n_native_sectors = native_sectors; 1361 1362 /* nothing to do? */ 1363 if (native_sectors <= sectors || !unlock_hpa) { 1364 if (!print_info || native_sectors == sectors) 1365 return 0; 1366 1367 if (native_sectors > sectors) 1368 ata_dev_info(dev, 1369 "HPA detected: current %llu, native %llu\n", 1370 (unsigned long long)sectors, 1371 (unsigned long long)native_sectors); 1372 else if (native_sectors < sectors) 1373 ata_dev_warn(dev, 1374 "native sectors (%llu) is smaller than sectors (%llu)\n", 1375 (unsigned long long)native_sectors, 1376 (unsigned long long)sectors); 1377 return 0; 1378 } 1379 1380 /* let's unlock HPA */ 1381 rc = ata_set_max_sectors(dev, native_sectors); 1382 if (rc == -EACCES) { 1383 /* if device aborted the command, skip HPA resizing */ 1384 ata_dev_warn(dev, 1385 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1386 (unsigned long long)sectors, 1387 (unsigned long long)native_sectors); 1388 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1389 return 0; 1390 } else if (rc) 1391 return rc; 1392 1393 /* re-read IDENTIFY data */ 1394 rc = ata_dev_reread_id(dev, 0); 1395 if (rc) { 1396 ata_dev_err(dev, 1397 "failed to re-read IDENTIFY data after HPA resizing\n"); 1398 return rc; 1399 } 1400 1401 if (print_info) { 1402 u64 new_sectors = ata_id_n_sectors(dev->id); 1403 ata_dev_info(dev, 1404 "HPA unlocked: %llu -> %llu, native %llu\n", 1405 (unsigned long long)sectors, 1406 (unsigned long long)new_sectors, 1407 (unsigned long long)native_sectors); 1408 } 1409 1410 return 0; 1411 } 1412 1413 /** 1414 * ata_dump_id - IDENTIFY DEVICE info debugging output 1415 * @id: IDENTIFY DEVICE page to dump 1416 * 1417 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1418 * page. 1419 * 1420 * LOCKING: 1421 * caller. 1422 */ 1423 1424 static inline void ata_dump_id(const u16 *id) 1425 { 1426 DPRINTK("49==0x%04x " 1427 "53==0x%04x " 1428 "63==0x%04x " 1429 "64==0x%04x " 1430 "75==0x%04x \n", 1431 id[49], 1432 id[53], 1433 id[63], 1434 id[64], 1435 id[75]); 1436 DPRINTK("80==0x%04x " 1437 "81==0x%04x " 1438 "82==0x%04x " 1439 "83==0x%04x " 1440 "84==0x%04x \n", 1441 id[80], 1442 id[81], 1443 id[82], 1444 id[83], 1445 id[84]); 1446 DPRINTK("88==0x%04x " 1447 "93==0x%04x\n", 1448 id[88], 1449 id[93]); 1450 } 1451 1452 /** 1453 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1454 * @id: IDENTIFY data to compute xfer mask from 1455 * 1456 * Compute the xfermask for this device. This is not as trivial 1457 * as it seems if we must consider early devices correctly. 1458 * 1459 * FIXME: pre IDE drive timing (do we care ?). 1460 * 1461 * LOCKING: 1462 * None. 1463 * 1464 * RETURNS: 1465 * Computed xfermask 1466 */ 1467 unsigned long ata_id_xfermask(const u16 *id) 1468 { 1469 unsigned long pio_mask, mwdma_mask, udma_mask; 1470 1471 /* Usual case. Word 53 indicates word 64 is valid */ 1472 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1473 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1474 pio_mask <<= 3; 1475 pio_mask |= 0x7; 1476 } else { 1477 /* If word 64 isn't valid then Word 51 high byte holds 1478 * the PIO timing number for the maximum. Turn it into 1479 * a mask. 1480 */ 1481 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1482 if (mode < 5) /* Valid PIO range */ 1483 pio_mask = (2 << mode) - 1; 1484 else 1485 pio_mask = 1; 1486 1487 /* But wait.. there's more. Design your standards by 1488 * committee and you too can get a free iordy field to 1489 * process. However its the speeds not the modes that 1490 * are supported... Note drivers using the timing API 1491 * will get this right anyway 1492 */ 1493 } 1494 1495 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1496 1497 if (ata_id_is_cfa(id)) { 1498 /* 1499 * Process compact flash extended modes 1500 */ 1501 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1502 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1503 1504 if (pio) 1505 pio_mask |= (1 << 5); 1506 if (pio > 1) 1507 pio_mask |= (1 << 6); 1508 if (dma) 1509 mwdma_mask |= (1 << 3); 1510 if (dma > 1) 1511 mwdma_mask |= (1 << 4); 1512 } 1513 1514 udma_mask = 0; 1515 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1516 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1517 1518 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1519 } 1520 1521 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1522 { 1523 struct completion *waiting = qc->private_data; 1524 1525 complete(waiting); 1526 } 1527 1528 /** 1529 * ata_exec_internal_sg - execute libata internal command 1530 * @dev: Device to which the command is sent 1531 * @tf: Taskfile registers for the command and the result 1532 * @cdb: CDB for packet command 1533 * @dma_dir: Data transfer direction of the command 1534 * @sgl: sg list for the data buffer of the command 1535 * @n_elem: Number of sg entries 1536 * @timeout: Timeout in msecs (0 for default) 1537 * 1538 * Executes libata internal command with timeout. @tf contains 1539 * command on entry and result on return. Timeout and error 1540 * conditions are reported via return value. No recovery action 1541 * is taken after a command times out. It's caller's duty to 1542 * clean up after timeout. 1543 * 1544 * LOCKING: 1545 * None. Should be called with kernel context, might sleep. 1546 * 1547 * RETURNS: 1548 * Zero on success, AC_ERR_* mask on failure 1549 */ 1550 unsigned ata_exec_internal_sg(struct ata_device *dev, 1551 struct ata_taskfile *tf, const u8 *cdb, 1552 int dma_dir, struct scatterlist *sgl, 1553 unsigned int n_elem, unsigned long timeout) 1554 { 1555 struct ata_link *link = dev->link; 1556 struct ata_port *ap = link->ap; 1557 u8 command = tf->command; 1558 int auto_timeout = 0; 1559 struct ata_queued_cmd *qc; 1560 unsigned int tag, preempted_tag; 1561 u32 preempted_sactive, preempted_qc_active; 1562 int preempted_nr_active_links; 1563 DECLARE_COMPLETION_ONSTACK(wait); 1564 unsigned long flags; 1565 unsigned int err_mask; 1566 int rc; 1567 1568 spin_lock_irqsave(ap->lock, flags); 1569 1570 /* no internal command while frozen */ 1571 if (ap->pflags & ATA_PFLAG_FROZEN) { 1572 spin_unlock_irqrestore(ap->lock, flags); 1573 return AC_ERR_SYSTEM; 1574 } 1575 1576 /* initialize internal qc */ 1577 1578 /* XXX: Tag 0 is used for drivers with legacy EH as some 1579 * drivers choke if any other tag is given. This breaks 1580 * ata_tag_internal() test for those drivers. Don't use new 1581 * EH stuff without converting to it. 1582 */ 1583 if (ap->ops->error_handler) 1584 tag = ATA_TAG_INTERNAL; 1585 else 1586 tag = 0; 1587 1588 if (test_and_set_bit(tag, &ap->qc_allocated)) 1589 BUG(); 1590 qc = __ata_qc_from_tag(ap, tag); 1591 1592 qc->tag = tag; 1593 qc->scsicmd = NULL; 1594 qc->ap = ap; 1595 qc->dev = dev; 1596 ata_qc_reinit(qc); 1597 1598 preempted_tag = link->active_tag; 1599 preempted_sactive = link->sactive; 1600 preempted_qc_active = ap->qc_active; 1601 preempted_nr_active_links = ap->nr_active_links; 1602 link->active_tag = ATA_TAG_POISON; 1603 link->sactive = 0; 1604 ap->qc_active = 0; 1605 ap->nr_active_links = 0; 1606 1607 /* prepare & issue qc */ 1608 qc->tf = *tf; 1609 if (cdb) 1610 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1611 1612 /* some SATA bridges need us to indicate data xfer direction */ 1613 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1614 dma_dir == DMA_FROM_DEVICE) 1615 qc->tf.feature |= ATAPI_DMADIR; 1616 1617 qc->flags |= ATA_QCFLAG_RESULT_TF; 1618 qc->dma_dir = dma_dir; 1619 if (dma_dir != DMA_NONE) { 1620 unsigned int i, buflen = 0; 1621 struct scatterlist *sg; 1622 1623 for_each_sg(sgl, sg, n_elem, i) 1624 buflen += sg->length; 1625 1626 ata_sg_init(qc, sgl, n_elem); 1627 qc->nbytes = buflen; 1628 } 1629 1630 qc->private_data = &wait; 1631 qc->complete_fn = ata_qc_complete_internal; 1632 1633 ata_qc_issue(qc); 1634 1635 spin_unlock_irqrestore(ap->lock, flags); 1636 1637 if (!timeout) { 1638 if (ata_probe_timeout) 1639 timeout = ata_probe_timeout * 1000; 1640 else { 1641 timeout = ata_internal_cmd_timeout(dev, command); 1642 auto_timeout = 1; 1643 } 1644 } 1645 1646 if (ap->ops->error_handler) 1647 ata_eh_release(ap); 1648 1649 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1650 1651 if (ap->ops->error_handler) 1652 ata_eh_acquire(ap); 1653 1654 ata_sff_flush_pio_task(ap); 1655 1656 if (!rc) { 1657 spin_lock_irqsave(ap->lock, flags); 1658 1659 /* We're racing with irq here. If we lose, the 1660 * following test prevents us from completing the qc 1661 * twice. If we win, the port is frozen and will be 1662 * cleaned up by ->post_internal_cmd(). 1663 */ 1664 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1665 qc->err_mask |= AC_ERR_TIMEOUT; 1666 1667 if (ap->ops->error_handler) 1668 ata_port_freeze(ap); 1669 else 1670 ata_qc_complete(qc); 1671 1672 if (ata_msg_warn(ap)) 1673 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1674 command); 1675 } 1676 1677 spin_unlock_irqrestore(ap->lock, flags); 1678 } 1679 1680 /* do post_internal_cmd */ 1681 if (ap->ops->post_internal_cmd) 1682 ap->ops->post_internal_cmd(qc); 1683 1684 /* perform minimal error analysis */ 1685 if (qc->flags & ATA_QCFLAG_FAILED) { 1686 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1687 qc->err_mask |= AC_ERR_DEV; 1688 1689 if (!qc->err_mask) 1690 qc->err_mask |= AC_ERR_OTHER; 1691 1692 if (qc->err_mask & ~AC_ERR_OTHER) 1693 qc->err_mask &= ~AC_ERR_OTHER; 1694 } 1695 1696 /* finish up */ 1697 spin_lock_irqsave(ap->lock, flags); 1698 1699 *tf = qc->result_tf; 1700 err_mask = qc->err_mask; 1701 1702 ata_qc_free(qc); 1703 link->active_tag = preempted_tag; 1704 link->sactive = preempted_sactive; 1705 ap->qc_active = preempted_qc_active; 1706 ap->nr_active_links = preempted_nr_active_links; 1707 1708 spin_unlock_irqrestore(ap->lock, flags); 1709 1710 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1711 ata_internal_cmd_timed_out(dev, command); 1712 1713 return err_mask; 1714 } 1715 1716 /** 1717 * ata_exec_internal - execute libata internal command 1718 * @dev: Device to which the command is sent 1719 * @tf: Taskfile registers for the command and the result 1720 * @cdb: CDB for packet command 1721 * @dma_dir: Data transfer direction of the command 1722 * @buf: Data buffer of the command 1723 * @buflen: Length of data buffer 1724 * @timeout: Timeout in msecs (0 for default) 1725 * 1726 * Wrapper around ata_exec_internal_sg() which takes simple 1727 * buffer instead of sg list. 1728 * 1729 * LOCKING: 1730 * None. Should be called with kernel context, might sleep. 1731 * 1732 * RETURNS: 1733 * Zero on success, AC_ERR_* mask on failure 1734 */ 1735 unsigned ata_exec_internal(struct ata_device *dev, 1736 struct ata_taskfile *tf, const u8 *cdb, 1737 int dma_dir, void *buf, unsigned int buflen, 1738 unsigned long timeout) 1739 { 1740 struct scatterlist *psg = NULL, sg; 1741 unsigned int n_elem = 0; 1742 1743 if (dma_dir != DMA_NONE) { 1744 WARN_ON(!buf); 1745 sg_init_one(&sg, buf, buflen); 1746 psg = &sg; 1747 n_elem++; 1748 } 1749 1750 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1751 timeout); 1752 } 1753 1754 /** 1755 * ata_do_simple_cmd - execute simple internal command 1756 * @dev: Device to which the command is sent 1757 * @cmd: Opcode to execute 1758 * 1759 * Execute a 'simple' command, that only consists of the opcode 1760 * 'cmd' itself, without filling any other registers 1761 * 1762 * LOCKING: 1763 * Kernel thread context (may sleep). 1764 * 1765 * RETURNS: 1766 * Zero on success, AC_ERR_* mask on failure 1767 */ 1768 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1769 { 1770 struct ata_taskfile tf; 1771 1772 ata_tf_init(dev, &tf); 1773 1774 tf.command = cmd; 1775 tf.flags |= ATA_TFLAG_DEVICE; 1776 tf.protocol = ATA_PROT_NODATA; 1777 1778 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1779 } 1780 1781 /** 1782 * ata_pio_need_iordy - check if iordy needed 1783 * @adev: ATA device 1784 * 1785 * Check if the current speed of the device requires IORDY. Used 1786 * by various controllers for chip configuration. 1787 */ 1788 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1789 { 1790 /* Don't set IORDY if we're preparing for reset. IORDY may 1791 * lead to controller lock up on certain controllers if the 1792 * port is not occupied. See bko#11703 for details. 1793 */ 1794 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1795 return 0; 1796 /* Controller doesn't support IORDY. Probably a pointless 1797 * check as the caller should know this. 1798 */ 1799 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1800 return 0; 1801 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1802 if (ata_id_is_cfa(adev->id) 1803 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1804 return 0; 1805 /* PIO3 and higher it is mandatory */ 1806 if (adev->pio_mode > XFER_PIO_2) 1807 return 1; 1808 /* We turn it on when possible */ 1809 if (ata_id_has_iordy(adev->id)) 1810 return 1; 1811 return 0; 1812 } 1813 1814 /** 1815 * ata_pio_mask_no_iordy - Return the non IORDY mask 1816 * @adev: ATA device 1817 * 1818 * Compute the highest mode possible if we are not using iordy. Return 1819 * -1 if no iordy mode is available. 1820 */ 1821 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1822 { 1823 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1824 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1825 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1826 /* Is the speed faster than the drive allows non IORDY ? */ 1827 if (pio) { 1828 /* This is cycle times not frequency - watch the logic! */ 1829 if (pio > 240) /* PIO2 is 240nS per cycle */ 1830 return 3 << ATA_SHIFT_PIO; 1831 return 7 << ATA_SHIFT_PIO; 1832 } 1833 } 1834 return 3 << ATA_SHIFT_PIO; 1835 } 1836 1837 /** 1838 * ata_do_dev_read_id - default ID read method 1839 * @dev: device 1840 * @tf: proposed taskfile 1841 * @id: data buffer 1842 * 1843 * Issue the identify taskfile and hand back the buffer containing 1844 * identify data. For some RAID controllers and for pre ATA devices 1845 * this function is wrapped or replaced by the driver 1846 */ 1847 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1848 struct ata_taskfile *tf, u16 *id) 1849 { 1850 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1851 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1852 } 1853 1854 /** 1855 * ata_dev_read_id - Read ID data from the specified device 1856 * @dev: target device 1857 * @p_class: pointer to class of the target device (may be changed) 1858 * @flags: ATA_READID_* flags 1859 * @id: buffer to read IDENTIFY data into 1860 * 1861 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1862 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1863 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1864 * for pre-ATA4 drives. 1865 * 1866 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1867 * now we abort if we hit that case. 1868 * 1869 * LOCKING: 1870 * Kernel thread context (may sleep) 1871 * 1872 * RETURNS: 1873 * 0 on success, -errno otherwise. 1874 */ 1875 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1876 unsigned int flags, u16 *id) 1877 { 1878 struct ata_port *ap = dev->link->ap; 1879 unsigned int class = *p_class; 1880 struct ata_taskfile tf; 1881 unsigned int err_mask = 0; 1882 const char *reason; 1883 bool is_semb = class == ATA_DEV_SEMB; 1884 int may_fallback = 1, tried_spinup = 0; 1885 int rc; 1886 1887 if (ata_msg_ctl(ap)) 1888 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1889 1890 retry: 1891 ata_tf_init(dev, &tf); 1892 1893 switch (class) { 1894 case ATA_DEV_SEMB: 1895 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1896 case ATA_DEV_ATA: 1897 case ATA_DEV_ZAC: 1898 tf.command = ATA_CMD_ID_ATA; 1899 break; 1900 case ATA_DEV_ATAPI: 1901 tf.command = ATA_CMD_ID_ATAPI; 1902 break; 1903 default: 1904 rc = -ENODEV; 1905 reason = "unsupported class"; 1906 goto err_out; 1907 } 1908 1909 tf.protocol = ATA_PROT_PIO; 1910 1911 /* Some devices choke if TF registers contain garbage. Make 1912 * sure those are properly initialized. 1913 */ 1914 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1915 1916 /* Device presence detection is unreliable on some 1917 * controllers. Always poll IDENTIFY if available. 1918 */ 1919 tf.flags |= ATA_TFLAG_POLLING; 1920 1921 if (ap->ops->read_id) 1922 err_mask = ap->ops->read_id(dev, &tf, id); 1923 else 1924 err_mask = ata_do_dev_read_id(dev, &tf, id); 1925 1926 if (err_mask) { 1927 if (err_mask & AC_ERR_NODEV_HINT) { 1928 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1929 return -ENOENT; 1930 } 1931 1932 if (is_semb) { 1933 ata_dev_info(dev, 1934 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1935 /* SEMB is not supported yet */ 1936 *p_class = ATA_DEV_SEMB_UNSUP; 1937 return 0; 1938 } 1939 1940 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1941 /* Device or controller might have reported 1942 * the wrong device class. Give a shot at the 1943 * other IDENTIFY if the current one is 1944 * aborted by the device. 1945 */ 1946 if (may_fallback) { 1947 may_fallback = 0; 1948 1949 if (class == ATA_DEV_ATA) 1950 class = ATA_DEV_ATAPI; 1951 else 1952 class = ATA_DEV_ATA; 1953 goto retry; 1954 } 1955 1956 /* Control reaches here iff the device aborted 1957 * both flavors of IDENTIFYs which happens 1958 * sometimes with phantom devices. 1959 */ 1960 ata_dev_dbg(dev, 1961 "both IDENTIFYs aborted, assuming NODEV\n"); 1962 return -ENOENT; 1963 } 1964 1965 rc = -EIO; 1966 reason = "I/O error"; 1967 goto err_out; 1968 } 1969 1970 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1971 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1972 "class=%d may_fallback=%d tried_spinup=%d\n", 1973 class, may_fallback, tried_spinup); 1974 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1975 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1976 } 1977 1978 /* Falling back doesn't make sense if ID data was read 1979 * successfully at least once. 1980 */ 1981 may_fallback = 0; 1982 1983 swap_buf_le16(id, ATA_ID_WORDS); 1984 1985 /* sanity check */ 1986 rc = -EINVAL; 1987 reason = "device reports invalid type"; 1988 1989 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1990 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1991 goto err_out; 1992 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1993 ata_id_is_ata(id)) { 1994 ata_dev_dbg(dev, 1995 "host indicates ignore ATA devices, ignored\n"); 1996 return -ENOENT; 1997 } 1998 } else { 1999 if (ata_id_is_ata(id)) 2000 goto err_out; 2001 } 2002 2003 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2004 tried_spinup = 1; 2005 /* 2006 * Drive powered-up in standby mode, and requires a specific 2007 * SET_FEATURES spin-up subcommand before it will accept 2008 * anything other than the original IDENTIFY command. 2009 */ 2010 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2011 if (err_mask && id[2] != 0x738c) { 2012 rc = -EIO; 2013 reason = "SPINUP failed"; 2014 goto err_out; 2015 } 2016 /* 2017 * If the drive initially returned incomplete IDENTIFY info, 2018 * we now must reissue the IDENTIFY command. 2019 */ 2020 if (id[2] == 0x37c8) 2021 goto retry; 2022 } 2023 2024 if ((flags & ATA_READID_POSTRESET) && 2025 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2026 /* 2027 * The exact sequence expected by certain pre-ATA4 drives is: 2028 * SRST RESET 2029 * IDENTIFY (optional in early ATA) 2030 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2031 * anything else.. 2032 * Some drives were very specific about that exact sequence. 2033 * 2034 * Note that ATA4 says lba is mandatory so the second check 2035 * should never trigger. 2036 */ 2037 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2038 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2039 if (err_mask) { 2040 rc = -EIO; 2041 reason = "INIT_DEV_PARAMS failed"; 2042 goto err_out; 2043 } 2044 2045 /* current CHS translation info (id[53-58]) might be 2046 * changed. reread the identify device info. 2047 */ 2048 flags &= ~ATA_READID_POSTRESET; 2049 goto retry; 2050 } 2051 } 2052 2053 *p_class = class; 2054 2055 return 0; 2056 2057 err_out: 2058 if (ata_msg_warn(ap)) 2059 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2060 reason, err_mask); 2061 return rc; 2062 } 2063 2064 static int ata_do_link_spd_horkage(struct ata_device *dev) 2065 { 2066 struct ata_link *plink = ata_dev_phys_link(dev); 2067 u32 target, target_limit; 2068 2069 if (!sata_scr_valid(plink)) 2070 return 0; 2071 2072 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2073 target = 1; 2074 else 2075 return 0; 2076 2077 target_limit = (1 << target) - 1; 2078 2079 /* if already on stricter limit, no need to push further */ 2080 if (plink->sata_spd_limit <= target_limit) 2081 return 0; 2082 2083 plink->sata_spd_limit = target_limit; 2084 2085 /* Request another EH round by returning -EAGAIN if link is 2086 * going faster than the target speed. Forward progress is 2087 * guaranteed by setting sata_spd_limit to target_limit above. 2088 */ 2089 if (plink->sata_spd > target) { 2090 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2091 sata_spd_string(target)); 2092 return -EAGAIN; 2093 } 2094 return 0; 2095 } 2096 2097 static inline u8 ata_dev_knobble(struct ata_device *dev) 2098 { 2099 struct ata_port *ap = dev->link->ap; 2100 2101 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2102 return 0; 2103 2104 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2105 } 2106 2107 static int ata_dev_config_ncq(struct ata_device *dev, 2108 char *desc, size_t desc_sz) 2109 { 2110 struct ata_port *ap = dev->link->ap; 2111 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2112 unsigned int err_mask; 2113 char *aa_desc = ""; 2114 2115 if (!ata_id_has_ncq(dev->id)) { 2116 desc[0] = '\0'; 2117 return 0; 2118 } 2119 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2120 snprintf(desc, desc_sz, "NCQ (not used)"); 2121 return 0; 2122 } 2123 if (ap->flags & ATA_FLAG_NCQ) { 2124 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2125 dev->flags |= ATA_DFLAG_NCQ; 2126 } 2127 2128 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2129 (ap->flags & ATA_FLAG_FPDMA_AA) && 2130 ata_id_has_fpdma_aa(dev->id)) { 2131 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2132 SATA_FPDMA_AA); 2133 if (err_mask) { 2134 ata_dev_err(dev, 2135 "failed to enable AA (error_mask=0x%x)\n", 2136 err_mask); 2137 if (err_mask != AC_ERR_DEV) { 2138 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2139 return -EIO; 2140 } 2141 } else 2142 aa_desc = ", AA"; 2143 } 2144 2145 if (hdepth >= ddepth) 2146 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2147 else 2148 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2149 ddepth, aa_desc); 2150 2151 if ((ap->flags & ATA_FLAG_FPDMA_AUX) && 2152 ata_id_has_ncq_send_and_recv(dev->id)) { 2153 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2154 0, ap->sector_buf, 1); 2155 if (err_mask) { 2156 ata_dev_dbg(dev, 2157 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2158 err_mask); 2159 } else { 2160 u8 *cmds = dev->ncq_send_recv_cmds; 2161 2162 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2163 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2164 2165 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2166 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2167 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2168 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2169 } 2170 } 2171 } 2172 2173 return 0; 2174 } 2175 2176 /** 2177 * ata_dev_configure - Configure the specified ATA/ATAPI device 2178 * @dev: Target device to configure 2179 * 2180 * Configure @dev according to @dev->id. Generic and low-level 2181 * driver specific fixups are also applied. 2182 * 2183 * LOCKING: 2184 * Kernel thread context (may sleep) 2185 * 2186 * RETURNS: 2187 * 0 on success, -errno otherwise 2188 */ 2189 int ata_dev_configure(struct ata_device *dev) 2190 { 2191 struct ata_port *ap = dev->link->ap; 2192 struct ata_eh_context *ehc = &dev->link->eh_context; 2193 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2194 const u16 *id = dev->id; 2195 unsigned long xfer_mask; 2196 unsigned int err_mask; 2197 char revbuf[7]; /* XYZ-99\0 */ 2198 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2199 char modelbuf[ATA_ID_PROD_LEN+1]; 2200 int rc; 2201 2202 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2203 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2204 return 0; 2205 } 2206 2207 if (ata_msg_probe(ap)) 2208 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2209 2210 /* set horkage */ 2211 dev->horkage |= ata_dev_blacklisted(dev); 2212 ata_force_horkage(dev); 2213 2214 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2215 ata_dev_info(dev, "unsupported device, disabling\n"); 2216 ata_dev_disable(dev); 2217 return 0; 2218 } 2219 2220 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2221 dev->class == ATA_DEV_ATAPI) { 2222 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2223 atapi_enabled ? "not supported with this driver" 2224 : "disabled"); 2225 ata_dev_disable(dev); 2226 return 0; 2227 } 2228 2229 rc = ata_do_link_spd_horkage(dev); 2230 if (rc) 2231 return rc; 2232 2233 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2234 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2235 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2236 dev->horkage |= ATA_HORKAGE_NOLPM; 2237 2238 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2239 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2240 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2241 } 2242 2243 /* let ACPI work its magic */ 2244 rc = ata_acpi_on_devcfg(dev); 2245 if (rc) 2246 return rc; 2247 2248 /* massage HPA, do it early as it might change IDENTIFY data */ 2249 rc = ata_hpa_resize(dev); 2250 if (rc) 2251 return rc; 2252 2253 /* print device capabilities */ 2254 if (ata_msg_probe(ap)) 2255 ata_dev_dbg(dev, 2256 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2257 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2258 __func__, 2259 id[49], id[82], id[83], id[84], 2260 id[85], id[86], id[87], id[88]); 2261 2262 /* initialize to-be-configured parameters */ 2263 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2264 dev->max_sectors = 0; 2265 dev->cdb_len = 0; 2266 dev->n_sectors = 0; 2267 dev->cylinders = 0; 2268 dev->heads = 0; 2269 dev->sectors = 0; 2270 dev->multi_count = 0; 2271 2272 /* 2273 * common ATA, ATAPI feature tests 2274 */ 2275 2276 /* find max transfer mode; for printk only */ 2277 xfer_mask = ata_id_xfermask(id); 2278 2279 if (ata_msg_probe(ap)) 2280 ata_dump_id(id); 2281 2282 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2283 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2284 sizeof(fwrevbuf)); 2285 2286 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2287 sizeof(modelbuf)); 2288 2289 /* ATA-specific feature tests */ 2290 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2291 if (ata_id_is_cfa(id)) { 2292 /* CPRM may make this media unusable */ 2293 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2294 ata_dev_warn(dev, 2295 "supports DRM functions and may not be fully accessible\n"); 2296 snprintf(revbuf, 7, "CFA"); 2297 } else { 2298 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2299 /* Warn the user if the device has TPM extensions */ 2300 if (ata_id_has_tpm(id)) 2301 ata_dev_warn(dev, 2302 "supports DRM functions and may not be fully accessible\n"); 2303 } 2304 2305 dev->n_sectors = ata_id_n_sectors(id); 2306 2307 /* get current R/W Multiple count setting */ 2308 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2309 unsigned int max = dev->id[47] & 0xff; 2310 unsigned int cnt = dev->id[59] & 0xff; 2311 /* only recognize/allow powers of two here */ 2312 if (is_power_of_2(max) && is_power_of_2(cnt)) 2313 if (cnt <= max) 2314 dev->multi_count = cnt; 2315 } 2316 2317 if (ata_id_has_lba(id)) { 2318 const char *lba_desc; 2319 char ncq_desc[24]; 2320 2321 lba_desc = "LBA"; 2322 dev->flags |= ATA_DFLAG_LBA; 2323 if (ata_id_has_lba48(id)) { 2324 dev->flags |= ATA_DFLAG_LBA48; 2325 lba_desc = "LBA48"; 2326 2327 if (dev->n_sectors >= (1UL << 28) && 2328 ata_id_has_flush_ext(id)) 2329 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2330 } 2331 2332 /* config NCQ */ 2333 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2334 if (rc) 2335 return rc; 2336 2337 /* print device info to dmesg */ 2338 if (ata_msg_drv(ap) && print_info) { 2339 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2340 revbuf, modelbuf, fwrevbuf, 2341 ata_mode_string(xfer_mask)); 2342 ata_dev_info(dev, 2343 "%llu sectors, multi %u: %s %s\n", 2344 (unsigned long long)dev->n_sectors, 2345 dev->multi_count, lba_desc, ncq_desc); 2346 } 2347 } else { 2348 /* CHS */ 2349 2350 /* Default translation */ 2351 dev->cylinders = id[1]; 2352 dev->heads = id[3]; 2353 dev->sectors = id[6]; 2354 2355 if (ata_id_current_chs_valid(id)) { 2356 /* Current CHS translation is valid. */ 2357 dev->cylinders = id[54]; 2358 dev->heads = id[55]; 2359 dev->sectors = id[56]; 2360 } 2361 2362 /* print device info to dmesg */ 2363 if (ata_msg_drv(ap) && print_info) { 2364 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2365 revbuf, modelbuf, fwrevbuf, 2366 ata_mode_string(xfer_mask)); 2367 ata_dev_info(dev, 2368 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2369 (unsigned long long)dev->n_sectors, 2370 dev->multi_count, dev->cylinders, 2371 dev->heads, dev->sectors); 2372 } 2373 } 2374 2375 /* Check and mark DevSlp capability. Get DevSlp timing variables 2376 * from SATA Settings page of Identify Device Data Log. 2377 */ 2378 if (ata_id_has_devslp(dev->id)) { 2379 u8 *sata_setting = ap->sector_buf; 2380 int i, j; 2381 2382 dev->flags |= ATA_DFLAG_DEVSLP; 2383 err_mask = ata_read_log_page(dev, 2384 ATA_LOG_SATA_ID_DEV_DATA, 2385 ATA_LOG_SATA_SETTINGS, 2386 sata_setting, 2387 1); 2388 if (err_mask) 2389 ata_dev_dbg(dev, 2390 "failed to get Identify Device Data, Emask 0x%x\n", 2391 err_mask); 2392 else 2393 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2394 j = ATA_LOG_DEVSLP_OFFSET + i; 2395 dev->devslp_timing[i] = sata_setting[j]; 2396 } 2397 } 2398 2399 dev->cdb_len = 16; 2400 } 2401 2402 /* ATAPI-specific feature tests */ 2403 else if (dev->class == ATA_DEV_ATAPI) { 2404 const char *cdb_intr_string = ""; 2405 const char *atapi_an_string = ""; 2406 const char *dma_dir_string = ""; 2407 u32 sntf; 2408 2409 rc = atapi_cdb_len(id); 2410 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2411 if (ata_msg_warn(ap)) 2412 ata_dev_warn(dev, "unsupported CDB len\n"); 2413 rc = -EINVAL; 2414 goto err_out_nosup; 2415 } 2416 dev->cdb_len = (unsigned int) rc; 2417 2418 /* Enable ATAPI AN if both the host and device have 2419 * the support. If PMP is attached, SNTF is required 2420 * to enable ATAPI AN to discern between PHY status 2421 * changed notifications and ATAPI ANs. 2422 */ 2423 if (atapi_an && 2424 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2425 (!sata_pmp_attached(ap) || 2426 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2427 /* issue SET feature command to turn this on */ 2428 err_mask = ata_dev_set_feature(dev, 2429 SETFEATURES_SATA_ENABLE, SATA_AN); 2430 if (err_mask) 2431 ata_dev_err(dev, 2432 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2433 err_mask); 2434 else { 2435 dev->flags |= ATA_DFLAG_AN; 2436 atapi_an_string = ", ATAPI AN"; 2437 } 2438 } 2439 2440 if (ata_id_cdb_intr(dev->id)) { 2441 dev->flags |= ATA_DFLAG_CDB_INTR; 2442 cdb_intr_string = ", CDB intr"; 2443 } 2444 2445 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2446 dev->flags |= ATA_DFLAG_DMADIR; 2447 dma_dir_string = ", DMADIR"; 2448 } 2449 2450 if (ata_id_has_da(dev->id)) { 2451 dev->flags |= ATA_DFLAG_DA; 2452 zpodd_init(dev); 2453 } 2454 2455 /* print device info to dmesg */ 2456 if (ata_msg_drv(ap) && print_info) 2457 ata_dev_info(dev, 2458 "ATAPI: %s, %s, max %s%s%s%s\n", 2459 modelbuf, fwrevbuf, 2460 ata_mode_string(xfer_mask), 2461 cdb_intr_string, atapi_an_string, 2462 dma_dir_string); 2463 } 2464 2465 /* determine max_sectors */ 2466 dev->max_sectors = ATA_MAX_SECTORS; 2467 if (dev->flags & ATA_DFLAG_LBA48) 2468 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2469 2470 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2471 200 sectors */ 2472 if (ata_dev_knobble(dev)) { 2473 if (ata_msg_drv(ap) && print_info) 2474 ata_dev_info(dev, "applying bridge limits\n"); 2475 dev->udma_mask &= ATA_UDMA5; 2476 dev->max_sectors = ATA_MAX_SECTORS; 2477 } 2478 2479 if ((dev->class == ATA_DEV_ATAPI) && 2480 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2481 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2482 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2483 } 2484 2485 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2486 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2487 dev->max_sectors); 2488 2489 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2490 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2491 2492 if (ap->ops->dev_config) 2493 ap->ops->dev_config(dev); 2494 2495 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2496 /* Let the user know. We don't want to disallow opens for 2497 rescue purposes, or in case the vendor is just a blithering 2498 idiot. Do this after the dev_config call as some controllers 2499 with buggy firmware may want to avoid reporting false device 2500 bugs */ 2501 2502 if (print_info) { 2503 ata_dev_warn(dev, 2504 "Drive reports diagnostics failure. This may indicate a drive\n"); 2505 ata_dev_warn(dev, 2506 "fault or invalid emulation. Contact drive vendor for information.\n"); 2507 } 2508 } 2509 2510 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2511 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2512 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2513 } 2514 2515 return 0; 2516 2517 err_out_nosup: 2518 if (ata_msg_probe(ap)) 2519 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2520 return rc; 2521 } 2522 2523 /** 2524 * ata_cable_40wire - return 40 wire cable type 2525 * @ap: port 2526 * 2527 * Helper method for drivers which want to hardwire 40 wire cable 2528 * detection. 2529 */ 2530 2531 int ata_cable_40wire(struct ata_port *ap) 2532 { 2533 return ATA_CBL_PATA40; 2534 } 2535 2536 /** 2537 * ata_cable_80wire - return 80 wire cable type 2538 * @ap: port 2539 * 2540 * Helper method for drivers which want to hardwire 80 wire cable 2541 * detection. 2542 */ 2543 2544 int ata_cable_80wire(struct ata_port *ap) 2545 { 2546 return ATA_CBL_PATA80; 2547 } 2548 2549 /** 2550 * ata_cable_unknown - return unknown PATA cable. 2551 * @ap: port 2552 * 2553 * Helper method for drivers which have no PATA cable detection. 2554 */ 2555 2556 int ata_cable_unknown(struct ata_port *ap) 2557 { 2558 return ATA_CBL_PATA_UNK; 2559 } 2560 2561 /** 2562 * ata_cable_ignore - return ignored PATA cable. 2563 * @ap: port 2564 * 2565 * Helper method for drivers which don't use cable type to limit 2566 * transfer mode. 2567 */ 2568 int ata_cable_ignore(struct ata_port *ap) 2569 { 2570 return ATA_CBL_PATA_IGN; 2571 } 2572 2573 /** 2574 * ata_cable_sata - return SATA cable type 2575 * @ap: port 2576 * 2577 * Helper method for drivers which have SATA cables 2578 */ 2579 2580 int ata_cable_sata(struct ata_port *ap) 2581 { 2582 return ATA_CBL_SATA; 2583 } 2584 2585 /** 2586 * ata_bus_probe - Reset and probe ATA bus 2587 * @ap: Bus to probe 2588 * 2589 * Master ATA bus probing function. Initiates a hardware-dependent 2590 * bus reset, then attempts to identify any devices found on 2591 * the bus. 2592 * 2593 * LOCKING: 2594 * PCI/etc. bus probe sem. 2595 * 2596 * RETURNS: 2597 * Zero on success, negative errno otherwise. 2598 */ 2599 2600 int ata_bus_probe(struct ata_port *ap) 2601 { 2602 unsigned int classes[ATA_MAX_DEVICES]; 2603 int tries[ATA_MAX_DEVICES]; 2604 int rc; 2605 struct ata_device *dev; 2606 2607 ata_for_each_dev(dev, &ap->link, ALL) 2608 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2609 2610 retry: 2611 ata_for_each_dev(dev, &ap->link, ALL) { 2612 /* If we issue an SRST then an ATA drive (not ATAPI) 2613 * may change configuration and be in PIO0 timing. If 2614 * we do a hard reset (or are coming from power on) 2615 * this is true for ATA or ATAPI. Until we've set a 2616 * suitable controller mode we should not touch the 2617 * bus as we may be talking too fast. 2618 */ 2619 dev->pio_mode = XFER_PIO_0; 2620 dev->dma_mode = 0xff; 2621 2622 /* If the controller has a pio mode setup function 2623 * then use it to set the chipset to rights. Don't 2624 * touch the DMA setup as that will be dealt with when 2625 * configuring devices. 2626 */ 2627 if (ap->ops->set_piomode) 2628 ap->ops->set_piomode(ap, dev); 2629 } 2630 2631 /* reset and determine device classes */ 2632 ap->ops->phy_reset(ap); 2633 2634 ata_for_each_dev(dev, &ap->link, ALL) { 2635 if (dev->class != ATA_DEV_UNKNOWN) 2636 classes[dev->devno] = dev->class; 2637 else 2638 classes[dev->devno] = ATA_DEV_NONE; 2639 2640 dev->class = ATA_DEV_UNKNOWN; 2641 } 2642 2643 /* read IDENTIFY page and configure devices. We have to do the identify 2644 specific sequence bass-ackwards so that PDIAG- is released by 2645 the slave device */ 2646 2647 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2648 if (tries[dev->devno]) 2649 dev->class = classes[dev->devno]; 2650 2651 if (!ata_dev_enabled(dev)) 2652 continue; 2653 2654 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2655 dev->id); 2656 if (rc) 2657 goto fail; 2658 } 2659 2660 /* Now ask for the cable type as PDIAG- should have been released */ 2661 if (ap->ops->cable_detect) 2662 ap->cbl = ap->ops->cable_detect(ap); 2663 2664 /* We may have SATA bridge glue hiding here irrespective of 2665 * the reported cable types and sensed types. When SATA 2666 * drives indicate we have a bridge, we don't know which end 2667 * of the link the bridge is which is a problem. 2668 */ 2669 ata_for_each_dev(dev, &ap->link, ENABLED) 2670 if (ata_id_is_sata(dev->id)) 2671 ap->cbl = ATA_CBL_SATA; 2672 2673 /* After the identify sequence we can now set up the devices. We do 2674 this in the normal order so that the user doesn't get confused */ 2675 2676 ata_for_each_dev(dev, &ap->link, ENABLED) { 2677 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2678 rc = ata_dev_configure(dev); 2679 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2680 if (rc) 2681 goto fail; 2682 } 2683 2684 /* configure transfer mode */ 2685 rc = ata_set_mode(&ap->link, &dev); 2686 if (rc) 2687 goto fail; 2688 2689 ata_for_each_dev(dev, &ap->link, ENABLED) 2690 return 0; 2691 2692 return -ENODEV; 2693 2694 fail: 2695 tries[dev->devno]--; 2696 2697 switch (rc) { 2698 case -EINVAL: 2699 /* eeek, something went very wrong, give up */ 2700 tries[dev->devno] = 0; 2701 break; 2702 2703 case -ENODEV: 2704 /* give it just one more chance */ 2705 tries[dev->devno] = min(tries[dev->devno], 1); 2706 case -EIO: 2707 if (tries[dev->devno] == 1) { 2708 /* This is the last chance, better to slow 2709 * down than lose it. 2710 */ 2711 sata_down_spd_limit(&ap->link, 0); 2712 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2713 } 2714 } 2715 2716 if (!tries[dev->devno]) 2717 ata_dev_disable(dev); 2718 2719 goto retry; 2720 } 2721 2722 /** 2723 * sata_print_link_status - Print SATA link status 2724 * @link: SATA link to printk link status about 2725 * 2726 * This function prints link speed and status of a SATA link. 2727 * 2728 * LOCKING: 2729 * None. 2730 */ 2731 static void sata_print_link_status(struct ata_link *link) 2732 { 2733 u32 sstatus, scontrol, tmp; 2734 2735 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2736 return; 2737 sata_scr_read(link, SCR_CONTROL, &scontrol); 2738 2739 if (ata_phys_link_online(link)) { 2740 tmp = (sstatus >> 4) & 0xf; 2741 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2742 sata_spd_string(tmp), sstatus, scontrol); 2743 } else { 2744 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2745 sstatus, scontrol); 2746 } 2747 } 2748 2749 /** 2750 * ata_dev_pair - return other device on cable 2751 * @adev: device 2752 * 2753 * Obtain the other device on the same cable, or if none is 2754 * present NULL is returned 2755 */ 2756 2757 struct ata_device *ata_dev_pair(struct ata_device *adev) 2758 { 2759 struct ata_link *link = adev->link; 2760 struct ata_device *pair = &link->device[1 - adev->devno]; 2761 if (!ata_dev_enabled(pair)) 2762 return NULL; 2763 return pair; 2764 } 2765 2766 /** 2767 * sata_down_spd_limit - adjust SATA spd limit downward 2768 * @link: Link to adjust SATA spd limit for 2769 * @spd_limit: Additional limit 2770 * 2771 * Adjust SATA spd limit of @link downward. Note that this 2772 * function only adjusts the limit. The change must be applied 2773 * using sata_set_spd(). 2774 * 2775 * If @spd_limit is non-zero, the speed is limited to equal to or 2776 * lower than @spd_limit if such speed is supported. If 2777 * @spd_limit is slower than any supported speed, only the lowest 2778 * supported speed is allowed. 2779 * 2780 * LOCKING: 2781 * Inherited from caller. 2782 * 2783 * RETURNS: 2784 * 0 on success, negative errno on failure 2785 */ 2786 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2787 { 2788 u32 sstatus, spd, mask; 2789 int rc, bit; 2790 2791 if (!sata_scr_valid(link)) 2792 return -EOPNOTSUPP; 2793 2794 /* If SCR can be read, use it to determine the current SPD. 2795 * If not, use cached value in link->sata_spd. 2796 */ 2797 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2798 if (rc == 0 && ata_sstatus_online(sstatus)) 2799 spd = (sstatus >> 4) & 0xf; 2800 else 2801 spd = link->sata_spd; 2802 2803 mask = link->sata_spd_limit; 2804 if (mask <= 1) 2805 return -EINVAL; 2806 2807 /* unconditionally mask off the highest bit */ 2808 bit = fls(mask) - 1; 2809 mask &= ~(1 << bit); 2810 2811 /* Mask off all speeds higher than or equal to the current 2812 * one. Force 1.5Gbps if current SPD is not available. 2813 */ 2814 if (spd > 1) 2815 mask &= (1 << (spd - 1)) - 1; 2816 else 2817 mask &= 1; 2818 2819 /* were we already at the bottom? */ 2820 if (!mask) 2821 return -EINVAL; 2822 2823 if (spd_limit) { 2824 if (mask & ((1 << spd_limit) - 1)) 2825 mask &= (1 << spd_limit) - 1; 2826 else { 2827 bit = ffs(mask) - 1; 2828 mask = 1 << bit; 2829 } 2830 } 2831 2832 link->sata_spd_limit = mask; 2833 2834 ata_link_warn(link, "limiting SATA link speed to %s\n", 2835 sata_spd_string(fls(mask))); 2836 2837 return 0; 2838 } 2839 2840 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2841 { 2842 struct ata_link *host_link = &link->ap->link; 2843 u32 limit, target, spd; 2844 2845 limit = link->sata_spd_limit; 2846 2847 /* Don't configure downstream link faster than upstream link. 2848 * It doesn't speed up anything and some PMPs choke on such 2849 * configuration. 2850 */ 2851 if (!ata_is_host_link(link) && host_link->sata_spd) 2852 limit &= (1 << host_link->sata_spd) - 1; 2853 2854 if (limit == UINT_MAX) 2855 target = 0; 2856 else 2857 target = fls(limit); 2858 2859 spd = (*scontrol >> 4) & 0xf; 2860 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2861 2862 return spd != target; 2863 } 2864 2865 /** 2866 * sata_set_spd_needed - is SATA spd configuration needed 2867 * @link: Link in question 2868 * 2869 * Test whether the spd limit in SControl matches 2870 * @link->sata_spd_limit. This function is used to determine 2871 * whether hardreset is necessary to apply SATA spd 2872 * configuration. 2873 * 2874 * LOCKING: 2875 * Inherited from caller. 2876 * 2877 * RETURNS: 2878 * 1 if SATA spd configuration is needed, 0 otherwise. 2879 */ 2880 static int sata_set_spd_needed(struct ata_link *link) 2881 { 2882 u32 scontrol; 2883 2884 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2885 return 1; 2886 2887 return __sata_set_spd_needed(link, &scontrol); 2888 } 2889 2890 /** 2891 * sata_set_spd - set SATA spd according to spd limit 2892 * @link: Link to set SATA spd for 2893 * 2894 * Set SATA spd of @link according to sata_spd_limit. 2895 * 2896 * LOCKING: 2897 * Inherited from caller. 2898 * 2899 * RETURNS: 2900 * 0 if spd doesn't need to be changed, 1 if spd has been 2901 * changed. Negative errno if SCR registers are inaccessible. 2902 */ 2903 int sata_set_spd(struct ata_link *link) 2904 { 2905 u32 scontrol; 2906 int rc; 2907 2908 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2909 return rc; 2910 2911 if (!__sata_set_spd_needed(link, &scontrol)) 2912 return 0; 2913 2914 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2915 return rc; 2916 2917 return 1; 2918 } 2919 2920 /* 2921 * This mode timing computation functionality is ported over from 2922 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2923 */ 2924 /* 2925 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2926 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2927 * for UDMA6, which is currently supported only by Maxtor drives. 2928 * 2929 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2930 */ 2931 2932 static const struct ata_timing ata_timing[] = { 2933 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2934 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2935 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2936 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2937 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2938 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2939 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2940 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2941 2942 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2943 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2944 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2945 2946 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2947 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2948 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2949 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2950 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2951 2952 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2953 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2954 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2955 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2956 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2957 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2958 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2959 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2960 2961 { 0xFF } 2962 }; 2963 2964 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2965 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2966 2967 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2968 { 2969 q->setup = EZ(t->setup * 1000, T); 2970 q->act8b = EZ(t->act8b * 1000, T); 2971 q->rec8b = EZ(t->rec8b * 1000, T); 2972 q->cyc8b = EZ(t->cyc8b * 1000, T); 2973 q->active = EZ(t->active * 1000, T); 2974 q->recover = EZ(t->recover * 1000, T); 2975 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2976 q->cycle = EZ(t->cycle * 1000, T); 2977 q->udma = EZ(t->udma * 1000, UT); 2978 } 2979 2980 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2981 struct ata_timing *m, unsigned int what) 2982 { 2983 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2984 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2985 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2986 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2987 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2988 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2989 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2990 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2991 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2992 } 2993 2994 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2995 { 2996 const struct ata_timing *t = ata_timing; 2997 2998 while (xfer_mode > t->mode) 2999 t++; 3000 3001 if (xfer_mode == t->mode) 3002 return t; 3003 3004 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 3005 __func__, xfer_mode); 3006 3007 return NULL; 3008 } 3009 3010 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3011 struct ata_timing *t, int T, int UT) 3012 { 3013 const u16 *id = adev->id; 3014 const struct ata_timing *s; 3015 struct ata_timing p; 3016 3017 /* 3018 * Find the mode. 3019 */ 3020 3021 if (!(s = ata_timing_find_mode(speed))) 3022 return -EINVAL; 3023 3024 memcpy(t, s, sizeof(*s)); 3025 3026 /* 3027 * If the drive is an EIDE drive, it can tell us it needs extended 3028 * PIO/MW_DMA cycle timing. 3029 */ 3030 3031 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3032 memset(&p, 0, sizeof(p)); 3033 3034 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3035 if (speed <= XFER_PIO_2) 3036 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3037 else if ((speed <= XFER_PIO_4) || 3038 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3039 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3040 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3041 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3042 3043 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3044 } 3045 3046 /* 3047 * Convert the timing to bus clock counts. 3048 */ 3049 3050 ata_timing_quantize(t, t, T, UT); 3051 3052 /* 3053 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3054 * S.M.A.R.T * and some other commands. We have to ensure that the 3055 * DMA cycle timing is slower/equal than the fastest PIO timing. 3056 */ 3057 3058 if (speed > XFER_PIO_6) { 3059 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3060 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3061 } 3062 3063 /* 3064 * Lengthen active & recovery time so that cycle time is correct. 3065 */ 3066 3067 if (t->act8b + t->rec8b < t->cyc8b) { 3068 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3069 t->rec8b = t->cyc8b - t->act8b; 3070 } 3071 3072 if (t->active + t->recover < t->cycle) { 3073 t->active += (t->cycle - (t->active + t->recover)) / 2; 3074 t->recover = t->cycle - t->active; 3075 } 3076 3077 /* In a few cases quantisation may produce enough errors to 3078 leave t->cycle too low for the sum of active and recovery 3079 if so we must correct this */ 3080 if (t->active + t->recover > t->cycle) 3081 t->cycle = t->active + t->recover; 3082 3083 return 0; 3084 } 3085 3086 /** 3087 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3088 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3089 * @cycle: cycle duration in ns 3090 * 3091 * Return matching xfer mode for @cycle. The returned mode is of 3092 * the transfer type specified by @xfer_shift. If @cycle is too 3093 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3094 * than the fastest known mode, the fasted mode is returned. 3095 * 3096 * LOCKING: 3097 * None. 3098 * 3099 * RETURNS: 3100 * Matching xfer_mode, 0xff if no match found. 3101 */ 3102 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3103 { 3104 u8 base_mode = 0xff, last_mode = 0xff; 3105 const struct ata_xfer_ent *ent; 3106 const struct ata_timing *t; 3107 3108 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3109 if (ent->shift == xfer_shift) 3110 base_mode = ent->base; 3111 3112 for (t = ata_timing_find_mode(base_mode); 3113 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3114 unsigned short this_cycle; 3115 3116 switch (xfer_shift) { 3117 case ATA_SHIFT_PIO: 3118 case ATA_SHIFT_MWDMA: 3119 this_cycle = t->cycle; 3120 break; 3121 case ATA_SHIFT_UDMA: 3122 this_cycle = t->udma; 3123 break; 3124 default: 3125 return 0xff; 3126 } 3127 3128 if (cycle > this_cycle) 3129 break; 3130 3131 last_mode = t->mode; 3132 } 3133 3134 return last_mode; 3135 } 3136 3137 /** 3138 * ata_down_xfermask_limit - adjust dev xfer masks downward 3139 * @dev: Device to adjust xfer masks 3140 * @sel: ATA_DNXFER_* selector 3141 * 3142 * Adjust xfer masks of @dev downward. Note that this function 3143 * does not apply the change. Invoking ata_set_mode() afterwards 3144 * will apply the limit. 3145 * 3146 * LOCKING: 3147 * Inherited from caller. 3148 * 3149 * RETURNS: 3150 * 0 on success, negative errno on failure 3151 */ 3152 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3153 { 3154 char buf[32]; 3155 unsigned long orig_mask, xfer_mask; 3156 unsigned long pio_mask, mwdma_mask, udma_mask; 3157 int quiet, highbit; 3158 3159 quiet = !!(sel & ATA_DNXFER_QUIET); 3160 sel &= ~ATA_DNXFER_QUIET; 3161 3162 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3163 dev->mwdma_mask, 3164 dev->udma_mask); 3165 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3166 3167 switch (sel) { 3168 case ATA_DNXFER_PIO: 3169 highbit = fls(pio_mask) - 1; 3170 pio_mask &= ~(1 << highbit); 3171 break; 3172 3173 case ATA_DNXFER_DMA: 3174 if (udma_mask) { 3175 highbit = fls(udma_mask) - 1; 3176 udma_mask &= ~(1 << highbit); 3177 if (!udma_mask) 3178 return -ENOENT; 3179 } else if (mwdma_mask) { 3180 highbit = fls(mwdma_mask) - 1; 3181 mwdma_mask &= ~(1 << highbit); 3182 if (!mwdma_mask) 3183 return -ENOENT; 3184 } 3185 break; 3186 3187 case ATA_DNXFER_40C: 3188 udma_mask &= ATA_UDMA_MASK_40C; 3189 break; 3190 3191 case ATA_DNXFER_FORCE_PIO0: 3192 pio_mask &= 1; 3193 case ATA_DNXFER_FORCE_PIO: 3194 mwdma_mask = 0; 3195 udma_mask = 0; 3196 break; 3197 3198 default: 3199 BUG(); 3200 } 3201 3202 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3203 3204 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3205 return -ENOENT; 3206 3207 if (!quiet) { 3208 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3209 snprintf(buf, sizeof(buf), "%s:%s", 3210 ata_mode_string(xfer_mask), 3211 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3212 else 3213 snprintf(buf, sizeof(buf), "%s", 3214 ata_mode_string(xfer_mask)); 3215 3216 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3217 } 3218 3219 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3220 &dev->udma_mask); 3221 3222 return 0; 3223 } 3224 3225 static int ata_dev_set_mode(struct ata_device *dev) 3226 { 3227 struct ata_port *ap = dev->link->ap; 3228 struct ata_eh_context *ehc = &dev->link->eh_context; 3229 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3230 const char *dev_err_whine = ""; 3231 int ign_dev_err = 0; 3232 unsigned int err_mask = 0; 3233 int rc; 3234 3235 dev->flags &= ~ATA_DFLAG_PIO; 3236 if (dev->xfer_shift == ATA_SHIFT_PIO) 3237 dev->flags |= ATA_DFLAG_PIO; 3238 3239 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3240 dev_err_whine = " (SET_XFERMODE skipped)"; 3241 else { 3242 if (nosetxfer) 3243 ata_dev_warn(dev, 3244 "NOSETXFER but PATA detected - can't " 3245 "skip SETXFER, might malfunction\n"); 3246 err_mask = ata_dev_set_xfermode(dev); 3247 } 3248 3249 if (err_mask & ~AC_ERR_DEV) 3250 goto fail; 3251 3252 /* revalidate */ 3253 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3254 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3255 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3256 if (rc) 3257 return rc; 3258 3259 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3260 /* Old CFA may refuse this command, which is just fine */ 3261 if (ata_id_is_cfa(dev->id)) 3262 ign_dev_err = 1; 3263 /* Catch several broken garbage emulations plus some pre 3264 ATA devices */ 3265 if (ata_id_major_version(dev->id) == 0 && 3266 dev->pio_mode <= XFER_PIO_2) 3267 ign_dev_err = 1; 3268 /* Some very old devices and some bad newer ones fail 3269 any kind of SET_XFERMODE request but support PIO0-2 3270 timings and no IORDY */ 3271 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3272 ign_dev_err = 1; 3273 } 3274 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3275 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3276 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3277 dev->dma_mode == XFER_MW_DMA_0 && 3278 (dev->id[63] >> 8) & 1) 3279 ign_dev_err = 1; 3280 3281 /* if the device is actually configured correctly, ignore dev err */ 3282 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3283 ign_dev_err = 1; 3284 3285 if (err_mask & AC_ERR_DEV) { 3286 if (!ign_dev_err) 3287 goto fail; 3288 else 3289 dev_err_whine = " (device error ignored)"; 3290 } 3291 3292 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3293 dev->xfer_shift, (int)dev->xfer_mode); 3294 3295 ata_dev_info(dev, "configured for %s%s\n", 3296 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3297 dev_err_whine); 3298 3299 return 0; 3300 3301 fail: 3302 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3303 return -EIO; 3304 } 3305 3306 /** 3307 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3308 * @link: link on which timings will be programmed 3309 * @r_failed_dev: out parameter for failed device 3310 * 3311 * Standard implementation of the function used to tune and set 3312 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3313 * ata_dev_set_mode() fails, pointer to the failing device is 3314 * returned in @r_failed_dev. 3315 * 3316 * LOCKING: 3317 * PCI/etc. bus probe sem. 3318 * 3319 * RETURNS: 3320 * 0 on success, negative errno otherwise 3321 */ 3322 3323 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3324 { 3325 struct ata_port *ap = link->ap; 3326 struct ata_device *dev; 3327 int rc = 0, used_dma = 0, found = 0; 3328 3329 /* step 1: calculate xfer_mask */ 3330 ata_for_each_dev(dev, link, ENABLED) { 3331 unsigned long pio_mask, dma_mask; 3332 unsigned int mode_mask; 3333 3334 mode_mask = ATA_DMA_MASK_ATA; 3335 if (dev->class == ATA_DEV_ATAPI) 3336 mode_mask = ATA_DMA_MASK_ATAPI; 3337 else if (ata_id_is_cfa(dev->id)) 3338 mode_mask = ATA_DMA_MASK_CFA; 3339 3340 ata_dev_xfermask(dev); 3341 ata_force_xfermask(dev); 3342 3343 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3344 3345 if (libata_dma_mask & mode_mask) 3346 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3347 dev->udma_mask); 3348 else 3349 dma_mask = 0; 3350 3351 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3352 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3353 3354 found = 1; 3355 if (ata_dma_enabled(dev)) 3356 used_dma = 1; 3357 } 3358 if (!found) 3359 goto out; 3360 3361 /* step 2: always set host PIO timings */ 3362 ata_for_each_dev(dev, link, ENABLED) { 3363 if (dev->pio_mode == 0xff) { 3364 ata_dev_warn(dev, "no PIO support\n"); 3365 rc = -EINVAL; 3366 goto out; 3367 } 3368 3369 dev->xfer_mode = dev->pio_mode; 3370 dev->xfer_shift = ATA_SHIFT_PIO; 3371 if (ap->ops->set_piomode) 3372 ap->ops->set_piomode(ap, dev); 3373 } 3374 3375 /* step 3: set host DMA timings */ 3376 ata_for_each_dev(dev, link, ENABLED) { 3377 if (!ata_dma_enabled(dev)) 3378 continue; 3379 3380 dev->xfer_mode = dev->dma_mode; 3381 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3382 if (ap->ops->set_dmamode) 3383 ap->ops->set_dmamode(ap, dev); 3384 } 3385 3386 /* step 4: update devices' xfer mode */ 3387 ata_for_each_dev(dev, link, ENABLED) { 3388 rc = ata_dev_set_mode(dev); 3389 if (rc) 3390 goto out; 3391 } 3392 3393 /* Record simplex status. If we selected DMA then the other 3394 * host channels are not permitted to do so. 3395 */ 3396 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3397 ap->host->simplex_claimed = ap; 3398 3399 out: 3400 if (rc) 3401 *r_failed_dev = dev; 3402 return rc; 3403 } 3404 3405 /** 3406 * ata_wait_ready - wait for link to become ready 3407 * @link: link to be waited on 3408 * @deadline: deadline jiffies for the operation 3409 * @check_ready: callback to check link readiness 3410 * 3411 * Wait for @link to become ready. @check_ready should return 3412 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3413 * link doesn't seem to be occupied, other errno for other error 3414 * conditions. 3415 * 3416 * Transient -ENODEV conditions are allowed for 3417 * ATA_TMOUT_FF_WAIT. 3418 * 3419 * LOCKING: 3420 * EH context. 3421 * 3422 * RETURNS: 3423 * 0 if @linke is ready before @deadline; otherwise, -errno. 3424 */ 3425 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3426 int (*check_ready)(struct ata_link *link)) 3427 { 3428 unsigned long start = jiffies; 3429 unsigned long nodev_deadline; 3430 int warned = 0; 3431 3432 /* choose which 0xff timeout to use, read comment in libata.h */ 3433 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3434 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3435 else 3436 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3437 3438 /* Slave readiness can't be tested separately from master. On 3439 * M/S emulation configuration, this function should be called 3440 * only on the master and it will handle both master and slave. 3441 */ 3442 WARN_ON(link == link->ap->slave_link); 3443 3444 if (time_after(nodev_deadline, deadline)) 3445 nodev_deadline = deadline; 3446 3447 while (1) { 3448 unsigned long now = jiffies; 3449 int ready, tmp; 3450 3451 ready = tmp = check_ready(link); 3452 if (ready > 0) 3453 return 0; 3454 3455 /* 3456 * -ENODEV could be transient. Ignore -ENODEV if link 3457 * is online. Also, some SATA devices take a long 3458 * time to clear 0xff after reset. Wait for 3459 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3460 * offline. 3461 * 3462 * Note that some PATA controllers (pata_ali) explode 3463 * if status register is read more than once when 3464 * there's no device attached. 3465 */ 3466 if (ready == -ENODEV) { 3467 if (ata_link_online(link)) 3468 ready = 0; 3469 else if ((link->ap->flags & ATA_FLAG_SATA) && 3470 !ata_link_offline(link) && 3471 time_before(now, nodev_deadline)) 3472 ready = 0; 3473 } 3474 3475 if (ready) 3476 return ready; 3477 if (time_after(now, deadline)) 3478 return -EBUSY; 3479 3480 if (!warned && time_after(now, start + 5 * HZ) && 3481 (deadline - now > 3 * HZ)) { 3482 ata_link_warn(link, 3483 "link is slow to respond, please be patient " 3484 "(ready=%d)\n", tmp); 3485 warned = 1; 3486 } 3487 3488 ata_msleep(link->ap, 50); 3489 } 3490 } 3491 3492 /** 3493 * ata_wait_after_reset - wait for link to become ready after reset 3494 * @link: link to be waited on 3495 * @deadline: deadline jiffies for the operation 3496 * @check_ready: callback to check link readiness 3497 * 3498 * Wait for @link to become ready after reset. 3499 * 3500 * LOCKING: 3501 * EH context. 3502 * 3503 * RETURNS: 3504 * 0 if @linke is ready before @deadline; otherwise, -errno. 3505 */ 3506 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3507 int (*check_ready)(struct ata_link *link)) 3508 { 3509 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3510 3511 return ata_wait_ready(link, deadline, check_ready); 3512 } 3513 3514 /** 3515 * sata_link_debounce - debounce SATA phy status 3516 * @link: ATA link to debounce SATA phy status for 3517 * @params: timing parameters { interval, duratinon, timeout } in msec 3518 * @deadline: deadline jiffies for the operation 3519 * 3520 * Make sure SStatus of @link reaches stable state, determined by 3521 * holding the same value where DET is not 1 for @duration polled 3522 * every @interval, before @timeout. Timeout constraints the 3523 * beginning of the stable state. Because DET gets stuck at 1 on 3524 * some controllers after hot unplugging, this functions waits 3525 * until timeout then returns 0 if DET is stable at 1. 3526 * 3527 * @timeout is further limited by @deadline. The sooner of the 3528 * two is used. 3529 * 3530 * LOCKING: 3531 * Kernel thread context (may sleep) 3532 * 3533 * RETURNS: 3534 * 0 on success, -errno on failure. 3535 */ 3536 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3537 unsigned long deadline) 3538 { 3539 unsigned long interval = params[0]; 3540 unsigned long duration = params[1]; 3541 unsigned long last_jiffies, t; 3542 u32 last, cur; 3543 int rc; 3544 3545 t = ata_deadline(jiffies, params[2]); 3546 if (time_before(t, deadline)) 3547 deadline = t; 3548 3549 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3550 return rc; 3551 cur &= 0xf; 3552 3553 last = cur; 3554 last_jiffies = jiffies; 3555 3556 while (1) { 3557 ata_msleep(link->ap, interval); 3558 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3559 return rc; 3560 cur &= 0xf; 3561 3562 /* DET stable? */ 3563 if (cur == last) { 3564 if (cur == 1 && time_before(jiffies, deadline)) 3565 continue; 3566 if (time_after(jiffies, 3567 ata_deadline(last_jiffies, duration))) 3568 return 0; 3569 continue; 3570 } 3571 3572 /* unstable, start over */ 3573 last = cur; 3574 last_jiffies = jiffies; 3575 3576 /* Check deadline. If debouncing failed, return 3577 * -EPIPE to tell upper layer to lower link speed. 3578 */ 3579 if (time_after(jiffies, deadline)) 3580 return -EPIPE; 3581 } 3582 } 3583 3584 /** 3585 * sata_link_resume - resume SATA link 3586 * @link: ATA link to resume SATA 3587 * @params: timing parameters { interval, duratinon, timeout } in msec 3588 * @deadline: deadline jiffies for the operation 3589 * 3590 * Resume SATA phy @link and debounce it. 3591 * 3592 * LOCKING: 3593 * Kernel thread context (may sleep) 3594 * 3595 * RETURNS: 3596 * 0 on success, -errno on failure. 3597 */ 3598 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3599 unsigned long deadline) 3600 { 3601 int tries = ATA_LINK_RESUME_TRIES; 3602 u32 scontrol, serror; 3603 int rc; 3604 3605 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3606 return rc; 3607 3608 /* 3609 * Writes to SControl sometimes get ignored under certain 3610 * controllers (ata_piix SIDPR). Make sure DET actually is 3611 * cleared. 3612 */ 3613 do { 3614 scontrol = (scontrol & 0x0f0) | 0x300; 3615 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3616 return rc; 3617 /* 3618 * Some PHYs react badly if SStatus is pounded 3619 * immediately after resuming. Delay 200ms before 3620 * debouncing. 3621 */ 3622 ata_msleep(link->ap, 200); 3623 3624 /* is SControl restored correctly? */ 3625 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3626 return rc; 3627 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3628 3629 if ((scontrol & 0xf0f) != 0x300) { 3630 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3631 scontrol); 3632 return 0; 3633 } 3634 3635 if (tries < ATA_LINK_RESUME_TRIES) 3636 ata_link_warn(link, "link resume succeeded after %d retries\n", 3637 ATA_LINK_RESUME_TRIES - tries); 3638 3639 if ((rc = sata_link_debounce(link, params, deadline))) 3640 return rc; 3641 3642 /* clear SError, some PHYs require this even for SRST to work */ 3643 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3644 rc = sata_scr_write(link, SCR_ERROR, serror); 3645 3646 return rc != -EINVAL ? rc : 0; 3647 } 3648 3649 /** 3650 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3651 * @link: ATA link to manipulate SControl for 3652 * @policy: LPM policy to configure 3653 * @spm_wakeup: initiate LPM transition to active state 3654 * 3655 * Manipulate the IPM field of the SControl register of @link 3656 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3657 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3658 * the link. This function also clears PHYRDY_CHG before 3659 * returning. 3660 * 3661 * LOCKING: 3662 * EH context. 3663 * 3664 * RETURNS: 3665 * 0 on succes, -errno otherwise. 3666 */ 3667 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3668 bool spm_wakeup) 3669 { 3670 struct ata_eh_context *ehc = &link->eh_context; 3671 bool woken_up = false; 3672 u32 scontrol; 3673 int rc; 3674 3675 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3676 if (rc) 3677 return rc; 3678 3679 switch (policy) { 3680 case ATA_LPM_MAX_POWER: 3681 /* disable all LPM transitions */ 3682 scontrol |= (0x7 << 8); 3683 /* initiate transition to active state */ 3684 if (spm_wakeup) { 3685 scontrol |= (0x4 << 12); 3686 woken_up = true; 3687 } 3688 break; 3689 case ATA_LPM_MED_POWER: 3690 /* allow LPM to PARTIAL */ 3691 scontrol &= ~(0x1 << 8); 3692 scontrol |= (0x6 << 8); 3693 break; 3694 case ATA_LPM_MIN_POWER: 3695 if (ata_link_nr_enabled(link) > 0) 3696 /* no restrictions on LPM transitions */ 3697 scontrol &= ~(0x7 << 8); 3698 else { 3699 /* empty port, power off */ 3700 scontrol &= ~0xf; 3701 scontrol |= (0x1 << 2); 3702 } 3703 break; 3704 default: 3705 WARN_ON(1); 3706 } 3707 3708 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3709 if (rc) 3710 return rc; 3711 3712 /* give the link time to transit out of LPM state */ 3713 if (woken_up) 3714 msleep(10); 3715 3716 /* clear PHYRDY_CHG from SError */ 3717 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3718 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3719 } 3720 3721 /** 3722 * ata_std_prereset - prepare for reset 3723 * @link: ATA link to be reset 3724 * @deadline: deadline jiffies for the operation 3725 * 3726 * @link is about to be reset. Initialize it. Failure from 3727 * prereset makes libata abort whole reset sequence and give up 3728 * that port, so prereset should be best-effort. It does its 3729 * best to prepare for reset sequence but if things go wrong, it 3730 * should just whine, not fail. 3731 * 3732 * LOCKING: 3733 * Kernel thread context (may sleep) 3734 * 3735 * RETURNS: 3736 * 0 on success, -errno otherwise. 3737 */ 3738 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3739 { 3740 struct ata_port *ap = link->ap; 3741 struct ata_eh_context *ehc = &link->eh_context; 3742 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3743 int rc; 3744 3745 /* if we're about to do hardreset, nothing more to do */ 3746 if (ehc->i.action & ATA_EH_HARDRESET) 3747 return 0; 3748 3749 /* if SATA, resume link */ 3750 if (ap->flags & ATA_FLAG_SATA) { 3751 rc = sata_link_resume(link, timing, deadline); 3752 /* whine about phy resume failure but proceed */ 3753 if (rc && rc != -EOPNOTSUPP) 3754 ata_link_warn(link, 3755 "failed to resume link for reset (errno=%d)\n", 3756 rc); 3757 } 3758 3759 /* no point in trying softreset on offline link */ 3760 if (ata_phys_link_offline(link)) 3761 ehc->i.action &= ~ATA_EH_SOFTRESET; 3762 3763 return 0; 3764 } 3765 3766 /** 3767 * sata_link_hardreset - reset link via SATA phy reset 3768 * @link: link to reset 3769 * @timing: timing parameters { interval, duratinon, timeout } in msec 3770 * @deadline: deadline jiffies for the operation 3771 * @online: optional out parameter indicating link onlineness 3772 * @check_ready: optional callback to check link readiness 3773 * 3774 * SATA phy-reset @link using DET bits of SControl register. 3775 * After hardreset, link readiness is waited upon using 3776 * ata_wait_ready() if @check_ready is specified. LLDs are 3777 * allowed to not specify @check_ready and wait itself after this 3778 * function returns. Device classification is LLD's 3779 * responsibility. 3780 * 3781 * *@online is set to one iff reset succeeded and @link is online 3782 * after reset. 3783 * 3784 * LOCKING: 3785 * Kernel thread context (may sleep) 3786 * 3787 * RETURNS: 3788 * 0 on success, -errno otherwise. 3789 */ 3790 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3791 unsigned long deadline, 3792 bool *online, int (*check_ready)(struct ata_link *)) 3793 { 3794 u32 scontrol; 3795 int rc; 3796 3797 DPRINTK("ENTER\n"); 3798 3799 if (online) 3800 *online = false; 3801 3802 if (sata_set_spd_needed(link)) { 3803 /* SATA spec says nothing about how to reconfigure 3804 * spd. To be on the safe side, turn off phy during 3805 * reconfiguration. This works for at least ICH7 AHCI 3806 * and Sil3124. 3807 */ 3808 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3809 goto out; 3810 3811 scontrol = (scontrol & 0x0f0) | 0x304; 3812 3813 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3814 goto out; 3815 3816 sata_set_spd(link); 3817 } 3818 3819 /* issue phy wake/reset */ 3820 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3821 goto out; 3822 3823 scontrol = (scontrol & 0x0f0) | 0x301; 3824 3825 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3826 goto out; 3827 3828 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3829 * 10.4.2 says at least 1 ms. 3830 */ 3831 ata_msleep(link->ap, 1); 3832 3833 /* bring link back */ 3834 rc = sata_link_resume(link, timing, deadline); 3835 if (rc) 3836 goto out; 3837 /* if link is offline nothing more to do */ 3838 if (ata_phys_link_offline(link)) 3839 goto out; 3840 3841 /* Link is online. From this point, -ENODEV too is an error. */ 3842 if (online) 3843 *online = true; 3844 3845 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3846 /* If PMP is supported, we have to do follow-up SRST. 3847 * Some PMPs don't send D2H Reg FIS after hardreset if 3848 * the first port is empty. Wait only for 3849 * ATA_TMOUT_PMP_SRST_WAIT. 3850 */ 3851 if (check_ready) { 3852 unsigned long pmp_deadline; 3853 3854 pmp_deadline = ata_deadline(jiffies, 3855 ATA_TMOUT_PMP_SRST_WAIT); 3856 if (time_after(pmp_deadline, deadline)) 3857 pmp_deadline = deadline; 3858 ata_wait_ready(link, pmp_deadline, check_ready); 3859 } 3860 rc = -EAGAIN; 3861 goto out; 3862 } 3863 3864 rc = 0; 3865 if (check_ready) 3866 rc = ata_wait_ready(link, deadline, check_ready); 3867 out: 3868 if (rc && rc != -EAGAIN) { 3869 /* online is set iff link is online && reset succeeded */ 3870 if (online) 3871 *online = false; 3872 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3873 } 3874 DPRINTK("EXIT, rc=%d\n", rc); 3875 return rc; 3876 } 3877 3878 /** 3879 * sata_std_hardreset - COMRESET w/o waiting or classification 3880 * @link: link to reset 3881 * @class: resulting class of attached device 3882 * @deadline: deadline jiffies for the operation 3883 * 3884 * Standard SATA COMRESET w/o waiting or classification. 3885 * 3886 * LOCKING: 3887 * Kernel thread context (may sleep) 3888 * 3889 * RETURNS: 3890 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3891 */ 3892 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3893 unsigned long deadline) 3894 { 3895 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3896 bool online; 3897 int rc; 3898 3899 /* do hardreset */ 3900 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3901 return online ? -EAGAIN : rc; 3902 } 3903 3904 /** 3905 * ata_std_postreset - standard postreset callback 3906 * @link: the target ata_link 3907 * @classes: classes of attached devices 3908 * 3909 * This function is invoked after a successful reset. Note that 3910 * the device might have been reset more than once using 3911 * different reset methods before postreset is invoked. 3912 * 3913 * LOCKING: 3914 * Kernel thread context (may sleep) 3915 */ 3916 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3917 { 3918 u32 serror; 3919 3920 DPRINTK("ENTER\n"); 3921 3922 /* reset complete, clear SError */ 3923 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3924 sata_scr_write(link, SCR_ERROR, serror); 3925 3926 /* print link status */ 3927 sata_print_link_status(link); 3928 3929 DPRINTK("EXIT\n"); 3930 } 3931 3932 /** 3933 * ata_dev_same_device - Determine whether new ID matches configured device 3934 * @dev: device to compare against 3935 * @new_class: class of the new device 3936 * @new_id: IDENTIFY page of the new device 3937 * 3938 * Compare @new_class and @new_id against @dev and determine 3939 * whether @dev is the device indicated by @new_class and 3940 * @new_id. 3941 * 3942 * LOCKING: 3943 * None. 3944 * 3945 * RETURNS: 3946 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3947 */ 3948 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3949 const u16 *new_id) 3950 { 3951 const u16 *old_id = dev->id; 3952 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3953 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3954 3955 if (dev->class != new_class) { 3956 ata_dev_info(dev, "class mismatch %d != %d\n", 3957 dev->class, new_class); 3958 return 0; 3959 } 3960 3961 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3962 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3963 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3964 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3965 3966 if (strcmp(model[0], model[1])) { 3967 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3968 model[0], model[1]); 3969 return 0; 3970 } 3971 3972 if (strcmp(serial[0], serial[1])) { 3973 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3974 serial[0], serial[1]); 3975 return 0; 3976 } 3977 3978 return 1; 3979 } 3980 3981 /** 3982 * ata_dev_reread_id - Re-read IDENTIFY data 3983 * @dev: target ATA device 3984 * @readid_flags: read ID flags 3985 * 3986 * Re-read IDENTIFY page and make sure @dev is still attached to 3987 * the port. 3988 * 3989 * LOCKING: 3990 * Kernel thread context (may sleep) 3991 * 3992 * RETURNS: 3993 * 0 on success, negative errno otherwise 3994 */ 3995 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3996 { 3997 unsigned int class = dev->class; 3998 u16 *id = (void *)dev->link->ap->sector_buf; 3999 int rc; 4000 4001 /* read ID data */ 4002 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4003 if (rc) 4004 return rc; 4005 4006 /* is the device still there? */ 4007 if (!ata_dev_same_device(dev, class, id)) 4008 return -ENODEV; 4009 4010 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4011 return 0; 4012 } 4013 4014 /** 4015 * ata_dev_revalidate - Revalidate ATA device 4016 * @dev: device to revalidate 4017 * @new_class: new class code 4018 * @readid_flags: read ID flags 4019 * 4020 * Re-read IDENTIFY page, make sure @dev is still attached to the 4021 * port and reconfigure it according to the new IDENTIFY page. 4022 * 4023 * LOCKING: 4024 * Kernel thread context (may sleep) 4025 * 4026 * RETURNS: 4027 * 0 on success, negative errno otherwise 4028 */ 4029 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4030 unsigned int readid_flags) 4031 { 4032 u64 n_sectors = dev->n_sectors; 4033 u64 n_native_sectors = dev->n_native_sectors; 4034 int rc; 4035 4036 if (!ata_dev_enabled(dev)) 4037 return -ENODEV; 4038 4039 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4040 if (ata_class_enabled(new_class) && 4041 new_class != ATA_DEV_ATA && 4042 new_class != ATA_DEV_ATAPI && 4043 new_class != ATA_DEV_ZAC && 4044 new_class != ATA_DEV_SEMB) { 4045 ata_dev_info(dev, "class mismatch %u != %u\n", 4046 dev->class, new_class); 4047 rc = -ENODEV; 4048 goto fail; 4049 } 4050 4051 /* re-read ID */ 4052 rc = ata_dev_reread_id(dev, readid_flags); 4053 if (rc) 4054 goto fail; 4055 4056 /* configure device according to the new ID */ 4057 rc = ata_dev_configure(dev); 4058 if (rc) 4059 goto fail; 4060 4061 /* verify n_sectors hasn't changed */ 4062 if (dev->class != ATA_DEV_ATA || !n_sectors || 4063 dev->n_sectors == n_sectors) 4064 return 0; 4065 4066 /* n_sectors has changed */ 4067 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4068 (unsigned long long)n_sectors, 4069 (unsigned long long)dev->n_sectors); 4070 4071 /* 4072 * Something could have caused HPA to be unlocked 4073 * involuntarily. If n_native_sectors hasn't changed and the 4074 * new size matches it, keep the device. 4075 */ 4076 if (dev->n_native_sectors == n_native_sectors && 4077 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4078 ata_dev_warn(dev, 4079 "new n_sectors matches native, probably " 4080 "late HPA unlock, n_sectors updated\n"); 4081 /* use the larger n_sectors */ 4082 return 0; 4083 } 4084 4085 /* 4086 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4087 * unlocking HPA in those cases. 4088 * 4089 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4090 */ 4091 if (dev->n_native_sectors == n_native_sectors && 4092 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4093 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4094 ata_dev_warn(dev, 4095 "old n_sectors matches native, probably " 4096 "late HPA lock, will try to unlock HPA\n"); 4097 /* try unlocking HPA */ 4098 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4099 rc = -EIO; 4100 } else 4101 rc = -ENODEV; 4102 4103 /* restore original n_[native_]sectors and fail */ 4104 dev->n_native_sectors = n_native_sectors; 4105 dev->n_sectors = n_sectors; 4106 fail: 4107 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4108 return rc; 4109 } 4110 4111 struct ata_blacklist_entry { 4112 const char *model_num; 4113 const char *model_rev; 4114 unsigned long horkage; 4115 }; 4116 4117 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4118 /* Devices with DMA related problems under Linux */ 4119 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4120 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4121 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4122 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4123 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4124 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4125 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4126 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4127 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4128 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4129 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4130 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4131 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4132 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4133 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4134 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4135 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4136 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4137 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4138 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4139 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4140 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4141 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4142 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4143 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4144 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4145 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4146 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4147 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4148 /* Odd clown on sil3726/4726 PMPs */ 4149 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4150 4151 /* Weird ATAPI devices */ 4152 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4153 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4154 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4155 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4156 4157 /* Devices we expect to fail diagnostics */ 4158 4159 /* Devices where NCQ should be avoided */ 4160 /* NCQ is slow */ 4161 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4162 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4163 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4164 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4165 /* NCQ is broken */ 4166 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4167 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4168 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4169 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4170 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4171 4172 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4173 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4174 ATA_HORKAGE_FIRMWARE_WARN }, 4175 4176 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4177 ATA_HORKAGE_FIRMWARE_WARN }, 4178 4179 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4180 ATA_HORKAGE_FIRMWARE_WARN }, 4181 4182 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4183 ATA_HORKAGE_FIRMWARE_WARN }, 4184 4185 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4186 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4187 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4188 4189 /* Blacklist entries taken from Silicon Image 3124/3132 4190 Windows driver .inf file - also several Linux problem reports */ 4191 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4192 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4193 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4194 4195 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4196 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4197 4198 /* devices which puke on READ_NATIVE_MAX */ 4199 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4200 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4201 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4202 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4203 4204 /* this one allows HPA unlocking but fails IOs on the area */ 4205 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4206 4207 /* Devices which report 1 sector over size HPA */ 4208 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4209 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4210 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4211 4212 /* Devices which get the IVB wrong */ 4213 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4214 /* Maybe we should just blacklist TSSTcorp... */ 4215 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4216 4217 /* Devices that do not need bridging limits applied */ 4218 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4219 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4220 4221 /* Devices which aren't very happy with higher link speeds */ 4222 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4223 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4224 4225 /* 4226 * Devices which choke on SETXFER. Applies only if both the 4227 * device and controller are SATA. 4228 */ 4229 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4230 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4231 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4232 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4233 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4234 4235 /* devices that don't properly handle queued TRIM commands */ 4236 { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4237 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4238 { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4239 4240 /* 4241 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4242 * (Return Zero After Trim) flags in the ATA Command Set are 4243 * unreliable in the sense that they only define what happens if 4244 * the device successfully executed the DSM TRIM command. TRIM 4245 * is only advisory, however, and the device is free to silently 4246 * ignore all or parts of the request. 4247 * 4248 * Whitelist drives that are known to reliably return zeroes 4249 * after TRIM. 4250 */ 4251 4252 /* 4253 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4254 * that model before whitelisting all other intel SSDs. 4255 */ 4256 { "INTEL*SSDSC2MH*", NULL, 0, }, 4257 4258 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4259 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4260 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4261 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4262 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4263 4264 /* 4265 * Some WD SATA-I drives spin up and down erratically when the link 4266 * is put into the slumber mode. We don't have full list of the 4267 * affected devices. Disable LPM if the device matches one of the 4268 * known prefixes and is SATA-1. As a side effect LPM partial is 4269 * lost too. 4270 * 4271 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4272 */ 4273 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4274 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4275 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4276 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4277 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4278 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4279 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4280 4281 /* End Marker */ 4282 { } 4283 }; 4284 4285 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4286 { 4287 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4288 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4289 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4290 4291 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4292 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4293 4294 while (ad->model_num) { 4295 if (glob_match(ad->model_num, model_num)) { 4296 if (ad->model_rev == NULL) 4297 return ad->horkage; 4298 if (glob_match(ad->model_rev, model_rev)) 4299 return ad->horkage; 4300 } 4301 ad++; 4302 } 4303 return 0; 4304 } 4305 4306 static int ata_dma_blacklisted(const struct ata_device *dev) 4307 { 4308 /* We don't support polling DMA. 4309 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4310 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4311 */ 4312 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4313 (dev->flags & ATA_DFLAG_CDB_INTR)) 4314 return 1; 4315 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4316 } 4317 4318 /** 4319 * ata_is_40wire - check drive side detection 4320 * @dev: device 4321 * 4322 * Perform drive side detection decoding, allowing for device vendors 4323 * who can't follow the documentation. 4324 */ 4325 4326 static int ata_is_40wire(struct ata_device *dev) 4327 { 4328 if (dev->horkage & ATA_HORKAGE_IVB) 4329 return ata_drive_40wire_relaxed(dev->id); 4330 return ata_drive_40wire(dev->id); 4331 } 4332 4333 /** 4334 * cable_is_40wire - 40/80/SATA decider 4335 * @ap: port to consider 4336 * 4337 * This function encapsulates the policy for speed management 4338 * in one place. At the moment we don't cache the result but 4339 * there is a good case for setting ap->cbl to the result when 4340 * we are called with unknown cables (and figuring out if it 4341 * impacts hotplug at all). 4342 * 4343 * Return 1 if the cable appears to be 40 wire. 4344 */ 4345 4346 static int cable_is_40wire(struct ata_port *ap) 4347 { 4348 struct ata_link *link; 4349 struct ata_device *dev; 4350 4351 /* If the controller thinks we are 40 wire, we are. */ 4352 if (ap->cbl == ATA_CBL_PATA40) 4353 return 1; 4354 4355 /* If the controller thinks we are 80 wire, we are. */ 4356 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4357 return 0; 4358 4359 /* If the system is known to be 40 wire short cable (eg 4360 * laptop), then we allow 80 wire modes even if the drive 4361 * isn't sure. 4362 */ 4363 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4364 return 0; 4365 4366 /* If the controller doesn't know, we scan. 4367 * 4368 * Note: We look for all 40 wire detects at this point. Any 4369 * 80 wire detect is taken to be 80 wire cable because 4370 * - in many setups only the one drive (slave if present) will 4371 * give a valid detect 4372 * - if you have a non detect capable drive you don't want it 4373 * to colour the choice 4374 */ 4375 ata_for_each_link(link, ap, EDGE) { 4376 ata_for_each_dev(dev, link, ENABLED) { 4377 if (!ata_is_40wire(dev)) 4378 return 0; 4379 } 4380 } 4381 return 1; 4382 } 4383 4384 /** 4385 * ata_dev_xfermask - Compute supported xfermask of the given device 4386 * @dev: Device to compute xfermask for 4387 * 4388 * Compute supported xfermask of @dev and store it in 4389 * dev->*_mask. This function is responsible for applying all 4390 * known limits including host controller limits, device 4391 * blacklist, etc... 4392 * 4393 * LOCKING: 4394 * None. 4395 */ 4396 static void ata_dev_xfermask(struct ata_device *dev) 4397 { 4398 struct ata_link *link = dev->link; 4399 struct ata_port *ap = link->ap; 4400 struct ata_host *host = ap->host; 4401 unsigned long xfer_mask; 4402 4403 /* controller modes available */ 4404 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4405 ap->mwdma_mask, ap->udma_mask); 4406 4407 /* drive modes available */ 4408 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4409 dev->mwdma_mask, dev->udma_mask); 4410 xfer_mask &= ata_id_xfermask(dev->id); 4411 4412 /* 4413 * CFA Advanced TrueIDE timings are not allowed on a shared 4414 * cable 4415 */ 4416 if (ata_dev_pair(dev)) { 4417 /* No PIO5 or PIO6 */ 4418 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4419 /* No MWDMA3 or MWDMA 4 */ 4420 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4421 } 4422 4423 if (ata_dma_blacklisted(dev)) { 4424 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4425 ata_dev_warn(dev, 4426 "device is on DMA blacklist, disabling DMA\n"); 4427 } 4428 4429 if ((host->flags & ATA_HOST_SIMPLEX) && 4430 host->simplex_claimed && host->simplex_claimed != ap) { 4431 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4432 ata_dev_warn(dev, 4433 "simplex DMA is claimed by other device, disabling DMA\n"); 4434 } 4435 4436 if (ap->flags & ATA_FLAG_NO_IORDY) 4437 xfer_mask &= ata_pio_mask_no_iordy(dev); 4438 4439 if (ap->ops->mode_filter) 4440 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4441 4442 /* Apply cable rule here. Don't apply it early because when 4443 * we handle hot plug the cable type can itself change. 4444 * Check this last so that we know if the transfer rate was 4445 * solely limited by the cable. 4446 * Unknown or 80 wire cables reported host side are checked 4447 * drive side as well. Cases where we know a 40wire cable 4448 * is used safely for 80 are not checked here. 4449 */ 4450 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4451 /* UDMA/44 or higher would be available */ 4452 if (cable_is_40wire(ap)) { 4453 ata_dev_warn(dev, 4454 "limited to UDMA/33 due to 40-wire cable\n"); 4455 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4456 } 4457 4458 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4459 &dev->mwdma_mask, &dev->udma_mask); 4460 } 4461 4462 /** 4463 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4464 * @dev: Device to which command will be sent 4465 * 4466 * Issue SET FEATURES - XFER MODE command to device @dev 4467 * on port @ap. 4468 * 4469 * LOCKING: 4470 * PCI/etc. bus probe sem. 4471 * 4472 * RETURNS: 4473 * 0 on success, AC_ERR_* mask otherwise. 4474 */ 4475 4476 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4477 { 4478 struct ata_taskfile tf; 4479 unsigned int err_mask; 4480 4481 /* set up set-features taskfile */ 4482 DPRINTK("set features - xfer mode\n"); 4483 4484 /* Some controllers and ATAPI devices show flaky interrupt 4485 * behavior after setting xfer mode. Use polling instead. 4486 */ 4487 ata_tf_init(dev, &tf); 4488 tf.command = ATA_CMD_SET_FEATURES; 4489 tf.feature = SETFEATURES_XFER; 4490 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4491 tf.protocol = ATA_PROT_NODATA; 4492 /* If we are using IORDY we must send the mode setting command */ 4493 if (ata_pio_need_iordy(dev)) 4494 tf.nsect = dev->xfer_mode; 4495 /* If the device has IORDY and the controller does not - turn it off */ 4496 else if (ata_id_has_iordy(dev->id)) 4497 tf.nsect = 0x01; 4498 else /* In the ancient relic department - skip all of this */ 4499 return 0; 4500 4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4502 4503 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4504 return err_mask; 4505 } 4506 4507 /** 4508 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4509 * @dev: Device to which command will be sent 4510 * @enable: Whether to enable or disable the feature 4511 * @feature: The sector count represents the feature to set 4512 * 4513 * Issue SET FEATURES - SATA FEATURES command to device @dev 4514 * on port @ap with sector count 4515 * 4516 * LOCKING: 4517 * PCI/etc. bus probe sem. 4518 * 4519 * RETURNS: 4520 * 0 on success, AC_ERR_* mask otherwise. 4521 */ 4522 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4523 { 4524 struct ata_taskfile tf; 4525 unsigned int err_mask; 4526 4527 /* set up set-features taskfile */ 4528 DPRINTK("set features - SATA features\n"); 4529 4530 ata_tf_init(dev, &tf); 4531 tf.command = ATA_CMD_SET_FEATURES; 4532 tf.feature = enable; 4533 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4534 tf.protocol = ATA_PROT_NODATA; 4535 tf.nsect = feature; 4536 4537 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4538 4539 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4540 return err_mask; 4541 } 4542 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4543 4544 /** 4545 * ata_dev_init_params - Issue INIT DEV PARAMS command 4546 * @dev: Device to which command will be sent 4547 * @heads: Number of heads (taskfile parameter) 4548 * @sectors: Number of sectors (taskfile parameter) 4549 * 4550 * LOCKING: 4551 * Kernel thread context (may sleep) 4552 * 4553 * RETURNS: 4554 * 0 on success, AC_ERR_* mask otherwise. 4555 */ 4556 static unsigned int ata_dev_init_params(struct ata_device *dev, 4557 u16 heads, u16 sectors) 4558 { 4559 struct ata_taskfile tf; 4560 unsigned int err_mask; 4561 4562 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4563 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4564 return AC_ERR_INVALID; 4565 4566 /* set up init dev params taskfile */ 4567 DPRINTK("init dev params \n"); 4568 4569 ata_tf_init(dev, &tf); 4570 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4571 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4572 tf.protocol = ATA_PROT_NODATA; 4573 tf.nsect = sectors; 4574 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4575 4576 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4577 /* A clean abort indicates an original or just out of spec drive 4578 and we should continue as we issue the setup based on the 4579 drive reported working geometry */ 4580 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4581 err_mask = 0; 4582 4583 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4584 return err_mask; 4585 } 4586 4587 /** 4588 * ata_sg_clean - Unmap DMA memory associated with command 4589 * @qc: Command containing DMA memory to be released 4590 * 4591 * Unmap all mapped DMA memory associated with this command. 4592 * 4593 * LOCKING: 4594 * spin_lock_irqsave(host lock) 4595 */ 4596 void ata_sg_clean(struct ata_queued_cmd *qc) 4597 { 4598 struct ata_port *ap = qc->ap; 4599 struct scatterlist *sg = qc->sg; 4600 int dir = qc->dma_dir; 4601 4602 WARN_ON_ONCE(sg == NULL); 4603 4604 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4605 4606 if (qc->n_elem) 4607 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4608 4609 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4610 qc->sg = NULL; 4611 } 4612 4613 /** 4614 * atapi_check_dma - Check whether ATAPI DMA can be supported 4615 * @qc: Metadata associated with taskfile to check 4616 * 4617 * Allow low-level driver to filter ATA PACKET commands, returning 4618 * a status indicating whether or not it is OK to use DMA for the 4619 * supplied PACKET command. 4620 * 4621 * LOCKING: 4622 * spin_lock_irqsave(host lock) 4623 * 4624 * RETURNS: 0 when ATAPI DMA can be used 4625 * nonzero otherwise 4626 */ 4627 int atapi_check_dma(struct ata_queued_cmd *qc) 4628 { 4629 struct ata_port *ap = qc->ap; 4630 4631 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4632 * few ATAPI devices choke on such DMA requests. 4633 */ 4634 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4635 unlikely(qc->nbytes & 15)) 4636 return 1; 4637 4638 if (ap->ops->check_atapi_dma) 4639 return ap->ops->check_atapi_dma(qc); 4640 4641 return 0; 4642 } 4643 4644 /** 4645 * ata_std_qc_defer - Check whether a qc needs to be deferred 4646 * @qc: ATA command in question 4647 * 4648 * Non-NCQ commands cannot run with any other command, NCQ or 4649 * not. As upper layer only knows the queue depth, we are 4650 * responsible for maintaining exclusion. This function checks 4651 * whether a new command @qc can be issued. 4652 * 4653 * LOCKING: 4654 * spin_lock_irqsave(host lock) 4655 * 4656 * RETURNS: 4657 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4658 */ 4659 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4660 { 4661 struct ata_link *link = qc->dev->link; 4662 4663 if (qc->tf.protocol == ATA_PROT_NCQ) { 4664 if (!ata_tag_valid(link->active_tag)) 4665 return 0; 4666 } else { 4667 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4668 return 0; 4669 } 4670 4671 return ATA_DEFER_LINK; 4672 } 4673 4674 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4675 4676 /** 4677 * ata_sg_init - Associate command with scatter-gather table. 4678 * @qc: Command to be associated 4679 * @sg: Scatter-gather table. 4680 * @n_elem: Number of elements in s/g table. 4681 * 4682 * Initialize the data-related elements of queued_cmd @qc 4683 * to point to a scatter-gather table @sg, containing @n_elem 4684 * elements. 4685 * 4686 * LOCKING: 4687 * spin_lock_irqsave(host lock) 4688 */ 4689 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4690 unsigned int n_elem) 4691 { 4692 qc->sg = sg; 4693 qc->n_elem = n_elem; 4694 qc->cursg = qc->sg; 4695 } 4696 4697 /** 4698 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4699 * @qc: Command with scatter-gather table to be mapped. 4700 * 4701 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4702 * 4703 * LOCKING: 4704 * spin_lock_irqsave(host lock) 4705 * 4706 * RETURNS: 4707 * Zero on success, negative on error. 4708 * 4709 */ 4710 static int ata_sg_setup(struct ata_queued_cmd *qc) 4711 { 4712 struct ata_port *ap = qc->ap; 4713 unsigned int n_elem; 4714 4715 VPRINTK("ENTER, ata%u\n", ap->print_id); 4716 4717 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4718 if (n_elem < 1) 4719 return -1; 4720 4721 DPRINTK("%d sg elements mapped\n", n_elem); 4722 qc->orig_n_elem = qc->n_elem; 4723 qc->n_elem = n_elem; 4724 qc->flags |= ATA_QCFLAG_DMAMAP; 4725 4726 return 0; 4727 } 4728 4729 /** 4730 * swap_buf_le16 - swap halves of 16-bit words in place 4731 * @buf: Buffer to swap 4732 * @buf_words: Number of 16-bit words in buffer. 4733 * 4734 * Swap halves of 16-bit words if needed to convert from 4735 * little-endian byte order to native cpu byte order, or 4736 * vice-versa. 4737 * 4738 * LOCKING: 4739 * Inherited from caller. 4740 */ 4741 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4742 { 4743 #ifdef __BIG_ENDIAN 4744 unsigned int i; 4745 4746 for (i = 0; i < buf_words; i++) 4747 buf[i] = le16_to_cpu(buf[i]); 4748 #endif /* __BIG_ENDIAN */ 4749 } 4750 4751 /** 4752 * ata_qc_new - Request an available ATA command, for queueing 4753 * @ap: target port 4754 * 4755 * Some ATA host controllers may implement a queue depth which is less 4756 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond 4757 * the hardware limitation. 4758 * 4759 * LOCKING: 4760 * None. 4761 */ 4762 4763 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4764 { 4765 struct ata_queued_cmd *qc = NULL; 4766 unsigned int max_queue = ap->host->n_tags; 4767 unsigned int i, tag; 4768 4769 /* no command while frozen */ 4770 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4771 return NULL; 4772 4773 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { 4774 if (ap->flags & ATA_FLAG_LOWTAG) 4775 tag = i; 4776 else 4777 tag = tag < max_queue ? tag : 0; 4778 4779 /* the last tag is reserved for internal command. */ 4780 if (tag == ATA_TAG_INTERNAL) 4781 continue; 4782 4783 if (!test_and_set_bit(tag, &ap->qc_allocated)) { 4784 qc = __ata_qc_from_tag(ap, tag); 4785 qc->tag = tag; 4786 ap->last_tag = tag; 4787 break; 4788 } 4789 } 4790 4791 return qc; 4792 } 4793 4794 /** 4795 * ata_qc_new_init - Request an available ATA command, and initialize it 4796 * @dev: Device from whom we request an available command structure 4797 * 4798 * LOCKING: 4799 * None. 4800 */ 4801 4802 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4803 { 4804 struct ata_port *ap = dev->link->ap; 4805 struct ata_queued_cmd *qc; 4806 4807 qc = ata_qc_new(ap); 4808 if (qc) { 4809 qc->scsicmd = NULL; 4810 qc->ap = ap; 4811 qc->dev = dev; 4812 4813 ata_qc_reinit(qc); 4814 } 4815 4816 return qc; 4817 } 4818 4819 /** 4820 * ata_qc_free - free unused ata_queued_cmd 4821 * @qc: Command to complete 4822 * 4823 * Designed to free unused ata_queued_cmd object 4824 * in case something prevents using it. 4825 * 4826 * LOCKING: 4827 * spin_lock_irqsave(host lock) 4828 */ 4829 void ata_qc_free(struct ata_queued_cmd *qc) 4830 { 4831 struct ata_port *ap; 4832 unsigned int tag; 4833 4834 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4835 ap = qc->ap; 4836 4837 qc->flags = 0; 4838 tag = qc->tag; 4839 if (likely(ata_tag_valid(tag))) { 4840 qc->tag = ATA_TAG_POISON; 4841 clear_bit(tag, &ap->qc_allocated); 4842 } 4843 } 4844 4845 void __ata_qc_complete(struct ata_queued_cmd *qc) 4846 { 4847 struct ata_port *ap; 4848 struct ata_link *link; 4849 4850 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4851 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4852 ap = qc->ap; 4853 link = qc->dev->link; 4854 4855 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4856 ata_sg_clean(qc); 4857 4858 /* command should be marked inactive atomically with qc completion */ 4859 if (qc->tf.protocol == ATA_PROT_NCQ) { 4860 link->sactive &= ~(1 << qc->tag); 4861 if (!link->sactive) 4862 ap->nr_active_links--; 4863 } else { 4864 link->active_tag = ATA_TAG_POISON; 4865 ap->nr_active_links--; 4866 } 4867 4868 /* clear exclusive status */ 4869 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4870 ap->excl_link == link)) 4871 ap->excl_link = NULL; 4872 4873 /* atapi: mark qc as inactive to prevent the interrupt handler 4874 * from completing the command twice later, before the error handler 4875 * is called. (when rc != 0 and atapi request sense is needed) 4876 */ 4877 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4878 ap->qc_active &= ~(1 << qc->tag); 4879 4880 /* call completion callback */ 4881 qc->complete_fn(qc); 4882 } 4883 4884 static void fill_result_tf(struct ata_queued_cmd *qc) 4885 { 4886 struct ata_port *ap = qc->ap; 4887 4888 qc->result_tf.flags = qc->tf.flags; 4889 ap->ops->qc_fill_rtf(qc); 4890 } 4891 4892 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4893 { 4894 struct ata_device *dev = qc->dev; 4895 4896 if (ata_is_nodata(qc->tf.protocol)) 4897 return; 4898 4899 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4900 return; 4901 4902 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4903 } 4904 4905 /** 4906 * ata_qc_complete - Complete an active ATA command 4907 * @qc: Command to complete 4908 * 4909 * Indicate to the mid and upper layers that an ATA command has 4910 * completed, with either an ok or not-ok status. 4911 * 4912 * Refrain from calling this function multiple times when 4913 * successfully completing multiple NCQ commands. 4914 * ata_qc_complete_multiple() should be used instead, which will 4915 * properly update IRQ expect state. 4916 * 4917 * LOCKING: 4918 * spin_lock_irqsave(host lock) 4919 */ 4920 void ata_qc_complete(struct ata_queued_cmd *qc) 4921 { 4922 struct ata_port *ap = qc->ap; 4923 4924 /* XXX: New EH and old EH use different mechanisms to 4925 * synchronize EH with regular execution path. 4926 * 4927 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4928 * Normal execution path is responsible for not accessing a 4929 * failed qc. libata core enforces the rule by returning NULL 4930 * from ata_qc_from_tag() for failed qcs. 4931 * 4932 * Old EH depends on ata_qc_complete() nullifying completion 4933 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4934 * not synchronize with interrupt handler. Only PIO task is 4935 * taken care of. 4936 */ 4937 if (ap->ops->error_handler) { 4938 struct ata_device *dev = qc->dev; 4939 struct ata_eh_info *ehi = &dev->link->eh_info; 4940 4941 if (unlikely(qc->err_mask)) 4942 qc->flags |= ATA_QCFLAG_FAILED; 4943 4944 /* 4945 * Finish internal commands without any further processing 4946 * and always with the result TF filled. 4947 */ 4948 if (unlikely(ata_tag_internal(qc->tag))) { 4949 fill_result_tf(qc); 4950 __ata_qc_complete(qc); 4951 return; 4952 } 4953 4954 /* 4955 * Non-internal qc has failed. Fill the result TF and 4956 * summon EH. 4957 */ 4958 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4959 fill_result_tf(qc); 4960 ata_qc_schedule_eh(qc); 4961 return; 4962 } 4963 4964 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4965 4966 /* read result TF if requested */ 4967 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4968 fill_result_tf(qc); 4969 4970 /* Some commands need post-processing after successful 4971 * completion. 4972 */ 4973 switch (qc->tf.command) { 4974 case ATA_CMD_SET_FEATURES: 4975 if (qc->tf.feature != SETFEATURES_WC_ON && 4976 qc->tf.feature != SETFEATURES_WC_OFF) 4977 break; 4978 /* fall through */ 4979 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4980 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4981 /* revalidate device */ 4982 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4983 ata_port_schedule_eh(ap); 4984 break; 4985 4986 case ATA_CMD_SLEEP: 4987 dev->flags |= ATA_DFLAG_SLEEPING; 4988 break; 4989 } 4990 4991 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4992 ata_verify_xfer(qc); 4993 4994 __ata_qc_complete(qc); 4995 } else { 4996 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4997 return; 4998 4999 /* read result TF if failed or requested */ 5000 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5001 fill_result_tf(qc); 5002 5003 __ata_qc_complete(qc); 5004 } 5005 } 5006 5007 /** 5008 * ata_qc_complete_multiple - Complete multiple qcs successfully 5009 * @ap: port in question 5010 * @qc_active: new qc_active mask 5011 * 5012 * Complete in-flight commands. This functions is meant to be 5013 * called from low-level driver's interrupt routine to complete 5014 * requests normally. ap->qc_active and @qc_active is compared 5015 * and commands are completed accordingly. 5016 * 5017 * Always use this function when completing multiple NCQ commands 5018 * from IRQ handlers instead of calling ata_qc_complete() 5019 * multiple times to keep IRQ expect status properly in sync. 5020 * 5021 * LOCKING: 5022 * spin_lock_irqsave(host lock) 5023 * 5024 * RETURNS: 5025 * Number of completed commands on success, -errno otherwise. 5026 */ 5027 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5028 { 5029 int nr_done = 0; 5030 u32 done_mask; 5031 5032 done_mask = ap->qc_active ^ qc_active; 5033 5034 if (unlikely(done_mask & qc_active)) { 5035 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5036 ap->qc_active, qc_active); 5037 return -EINVAL; 5038 } 5039 5040 while (done_mask) { 5041 struct ata_queued_cmd *qc; 5042 unsigned int tag = __ffs(done_mask); 5043 5044 qc = ata_qc_from_tag(ap, tag); 5045 if (qc) { 5046 ata_qc_complete(qc); 5047 nr_done++; 5048 } 5049 done_mask &= ~(1 << tag); 5050 } 5051 5052 return nr_done; 5053 } 5054 5055 /** 5056 * ata_qc_issue - issue taskfile to device 5057 * @qc: command to issue to device 5058 * 5059 * Prepare an ATA command to submission to device. 5060 * This includes mapping the data into a DMA-able 5061 * area, filling in the S/G table, and finally 5062 * writing the taskfile to hardware, starting the command. 5063 * 5064 * LOCKING: 5065 * spin_lock_irqsave(host lock) 5066 */ 5067 void ata_qc_issue(struct ata_queued_cmd *qc) 5068 { 5069 struct ata_port *ap = qc->ap; 5070 struct ata_link *link = qc->dev->link; 5071 u8 prot = qc->tf.protocol; 5072 5073 /* Make sure only one non-NCQ command is outstanding. The 5074 * check is skipped for old EH because it reuses active qc to 5075 * request ATAPI sense. 5076 */ 5077 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5078 5079 if (ata_is_ncq(prot)) { 5080 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5081 5082 if (!link->sactive) 5083 ap->nr_active_links++; 5084 link->sactive |= 1 << qc->tag; 5085 } else { 5086 WARN_ON_ONCE(link->sactive); 5087 5088 ap->nr_active_links++; 5089 link->active_tag = qc->tag; 5090 } 5091 5092 qc->flags |= ATA_QCFLAG_ACTIVE; 5093 ap->qc_active |= 1 << qc->tag; 5094 5095 /* 5096 * We guarantee to LLDs that they will have at least one 5097 * non-zero sg if the command is a data command. 5098 */ 5099 if (WARN_ON_ONCE(ata_is_data(prot) && 5100 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5101 goto sys_err; 5102 5103 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5104 (ap->flags & ATA_FLAG_PIO_DMA))) 5105 if (ata_sg_setup(qc)) 5106 goto sys_err; 5107 5108 /* if device is sleeping, schedule reset and abort the link */ 5109 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5110 link->eh_info.action |= ATA_EH_RESET; 5111 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5112 ata_link_abort(link); 5113 return; 5114 } 5115 5116 ap->ops->qc_prep(qc); 5117 5118 qc->err_mask |= ap->ops->qc_issue(qc); 5119 if (unlikely(qc->err_mask)) 5120 goto err; 5121 return; 5122 5123 sys_err: 5124 qc->err_mask |= AC_ERR_SYSTEM; 5125 err: 5126 ata_qc_complete(qc); 5127 } 5128 5129 /** 5130 * sata_scr_valid - test whether SCRs are accessible 5131 * @link: ATA link to test SCR accessibility for 5132 * 5133 * Test whether SCRs are accessible for @link. 5134 * 5135 * LOCKING: 5136 * None. 5137 * 5138 * RETURNS: 5139 * 1 if SCRs are accessible, 0 otherwise. 5140 */ 5141 int sata_scr_valid(struct ata_link *link) 5142 { 5143 struct ata_port *ap = link->ap; 5144 5145 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5146 } 5147 5148 /** 5149 * sata_scr_read - read SCR register of the specified port 5150 * @link: ATA link to read SCR for 5151 * @reg: SCR to read 5152 * @val: Place to store read value 5153 * 5154 * Read SCR register @reg of @link into *@val. This function is 5155 * guaranteed to succeed if @link is ap->link, the cable type of 5156 * the port is SATA and the port implements ->scr_read. 5157 * 5158 * LOCKING: 5159 * None if @link is ap->link. Kernel thread context otherwise. 5160 * 5161 * RETURNS: 5162 * 0 on success, negative errno on failure. 5163 */ 5164 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5165 { 5166 if (ata_is_host_link(link)) { 5167 if (sata_scr_valid(link)) 5168 return link->ap->ops->scr_read(link, reg, val); 5169 return -EOPNOTSUPP; 5170 } 5171 5172 return sata_pmp_scr_read(link, reg, val); 5173 } 5174 5175 /** 5176 * sata_scr_write - write SCR register of the specified port 5177 * @link: ATA link to write SCR for 5178 * @reg: SCR to write 5179 * @val: value to write 5180 * 5181 * Write @val to SCR register @reg of @link. This function is 5182 * guaranteed to succeed if @link is ap->link, the cable type of 5183 * the port is SATA and the port implements ->scr_read. 5184 * 5185 * LOCKING: 5186 * None if @link is ap->link. Kernel thread context otherwise. 5187 * 5188 * RETURNS: 5189 * 0 on success, negative errno on failure. 5190 */ 5191 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5192 { 5193 if (ata_is_host_link(link)) { 5194 if (sata_scr_valid(link)) 5195 return link->ap->ops->scr_write(link, reg, val); 5196 return -EOPNOTSUPP; 5197 } 5198 5199 return sata_pmp_scr_write(link, reg, val); 5200 } 5201 5202 /** 5203 * sata_scr_write_flush - write SCR register of the specified port and flush 5204 * @link: ATA link to write SCR for 5205 * @reg: SCR to write 5206 * @val: value to write 5207 * 5208 * This function is identical to sata_scr_write() except that this 5209 * function performs flush after writing to the register. 5210 * 5211 * LOCKING: 5212 * None if @link is ap->link. Kernel thread context otherwise. 5213 * 5214 * RETURNS: 5215 * 0 on success, negative errno on failure. 5216 */ 5217 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5218 { 5219 if (ata_is_host_link(link)) { 5220 int rc; 5221 5222 if (sata_scr_valid(link)) { 5223 rc = link->ap->ops->scr_write(link, reg, val); 5224 if (rc == 0) 5225 rc = link->ap->ops->scr_read(link, reg, &val); 5226 return rc; 5227 } 5228 return -EOPNOTSUPP; 5229 } 5230 5231 return sata_pmp_scr_write(link, reg, val); 5232 } 5233 5234 /** 5235 * ata_phys_link_online - test whether the given link is online 5236 * @link: ATA link to test 5237 * 5238 * Test whether @link is online. Note that this function returns 5239 * 0 if online status of @link cannot be obtained, so 5240 * ata_link_online(link) != !ata_link_offline(link). 5241 * 5242 * LOCKING: 5243 * None. 5244 * 5245 * RETURNS: 5246 * True if the port online status is available and online. 5247 */ 5248 bool ata_phys_link_online(struct ata_link *link) 5249 { 5250 u32 sstatus; 5251 5252 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5253 ata_sstatus_online(sstatus)) 5254 return true; 5255 return false; 5256 } 5257 5258 /** 5259 * ata_phys_link_offline - test whether the given link is offline 5260 * @link: ATA link to test 5261 * 5262 * Test whether @link is offline. Note that this function 5263 * returns 0 if offline status of @link cannot be obtained, so 5264 * ata_link_online(link) != !ata_link_offline(link). 5265 * 5266 * LOCKING: 5267 * None. 5268 * 5269 * RETURNS: 5270 * True if the port offline status is available and offline. 5271 */ 5272 bool ata_phys_link_offline(struct ata_link *link) 5273 { 5274 u32 sstatus; 5275 5276 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5277 !ata_sstatus_online(sstatus)) 5278 return true; 5279 return false; 5280 } 5281 5282 /** 5283 * ata_link_online - test whether the given link is online 5284 * @link: ATA link to test 5285 * 5286 * Test whether @link is online. This is identical to 5287 * ata_phys_link_online() when there's no slave link. When 5288 * there's a slave link, this function should only be called on 5289 * the master link and will return true if any of M/S links is 5290 * online. 5291 * 5292 * LOCKING: 5293 * None. 5294 * 5295 * RETURNS: 5296 * True if the port online status is available and online. 5297 */ 5298 bool ata_link_online(struct ata_link *link) 5299 { 5300 struct ata_link *slave = link->ap->slave_link; 5301 5302 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5303 5304 return ata_phys_link_online(link) || 5305 (slave && ata_phys_link_online(slave)); 5306 } 5307 5308 /** 5309 * ata_link_offline - test whether the given link is offline 5310 * @link: ATA link to test 5311 * 5312 * Test whether @link is offline. This is identical to 5313 * ata_phys_link_offline() when there's no slave link. When 5314 * there's a slave link, this function should only be called on 5315 * the master link and will return true if both M/S links are 5316 * offline. 5317 * 5318 * LOCKING: 5319 * None. 5320 * 5321 * RETURNS: 5322 * True if the port offline status is available and offline. 5323 */ 5324 bool ata_link_offline(struct ata_link *link) 5325 { 5326 struct ata_link *slave = link->ap->slave_link; 5327 5328 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5329 5330 return ata_phys_link_offline(link) && 5331 (!slave || ata_phys_link_offline(slave)); 5332 } 5333 5334 #ifdef CONFIG_PM 5335 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5336 unsigned int action, unsigned int ehi_flags, 5337 bool async) 5338 { 5339 struct ata_link *link; 5340 unsigned long flags; 5341 5342 /* Previous resume operation might still be in 5343 * progress. Wait for PM_PENDING to clear. 5344 */ 5345 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5346 ata_port_wait_eh(ap); 5347 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5348 } 5349 5350 /* request PM ops to EH */ 5351 spin_lock_irqsave(ap->lock, flags); 5352 5353 ap->pm_mesg = mesg; 5354 ap->pflags |= ATA_PFLAG_PM_PENDING; 5355 ata_for_each_link(link, ap, HOST_FIRST) { 5356 link->eh_info.action |= action; 5357 link->eh_info.flags |= ehi_flags; 5358 } 5359 5360 ata_port_schedule_eh(ap); 5361 5362 spin_unlock_irqrestore(ap->lock, flags); 5363 5364 if (!async) { 5365 ata_port_wait_eh(ap); 5366 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5367 } 5368 } 5369 5370 /* 5371 * On some hardware, device fails to respond after spun down for suspend. As 5372 * the device won't be used before being resumed, we don't need to touch the 5373 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5374 * 5375 * http://thread.gmane.org/gmane.linux.ide/46764 5376 */ 5377 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5378 | ATA_EHI_NO_AUTOPSY 5379 | ATA_EHI_NO_RECOVERY; 5380 5381 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5382 { 5383 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5384 } 5385 5386 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5387 { 5388 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5389 } 5390 5391 static int ata_port_pm_suspend(struct device *dev) 5392 { 5393 struct ata_port *ap = to_ata_port(dev); 5394 5395 if (pm_runtime_suspended(dev)) 5396 return 0; 5397 5398 ata_port_suspend(ap, PMSG_SUSPEND); 5399 return 0; 5400 } 5401 5402 static int ata_port_pm_freeze(struct device *dev) 5403 { 5404 struct ata_port *ap = to_ata_port(dev); 5405 5406 if (pm_runtime_suspended(dev)) 5407 return 0; 5408 5409 ata_port_suspend(ap, PMSG_FREEZE); 5410 return 0; 5411 } 5412 5413 static int ata_port_pm_poweroff(struct device *dev) 5414 { 5415 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5416 return 0; 5417 } 5418 5419 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5420 | ATA_EHI_QUIET; 5421 5422 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5423 { 5424 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5425 } 5426 5427 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5428 { 5429 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5430 } 5431 5432 static int ata_port_pm_resume(struct device *dev) 5433 { 5434 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5435 pm_runtime_disable(dev); 5436 pm_runtime_set_active(dev); 5437 pm_runtime_enable(dev); 5438 return 0; 5439 } 5440 5441 /* 5442 * For ODDs, the upper layer will poll for media change every few seconds, 5443 * which will make it enter and leave suspend state every few seconds. And 5444 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5445 * is very little and the ODD may malfunction after constantly being reset. 5446 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5447 * ODD is attached to the port. 5448 */ 5449 static int ata_port_runtime_idle(struct device *dev) 5450 { 5451 struct ata_port *ap = to_ata_port(dev); 5452 struct ata_link *link; 5453 struct ata_device *adev; 5454 5455 ata_for_each_link(link, ap, HOST_FIRST) { 5456 ata_for_each_dev(adev, link, ENABLED) 5457 if (adev->class == ATA_DEV_ATAPI && 5458 !zpodd_dev_enabled(adev)) 5459 return -EBUSY; 5460 } 5461 5462 return 0; 5463 } 5464 5465 static int ata_port_runtime_suspend(struct device *dev) 5466 { 5467 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5468 return 0; 5469 } 5470 5471 static int ata_port_runtime_resume(struct device *dev) 5472 { 5473 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5474 return 0; 5475 } 5476 5477 static const struct dev_pm_ops ata_port_pm_ops = { 5478 .suspend = ata_port_pm_suspend, 5479 .resume = ata_port_pm_resume, 5480 .freeze = ata_port_pm_freeze, 5481 .thaw = ata_port_pm_resume, 5482 .poweroff = ata_port_pm_poweroff, 5483 .restore = ata_port_pm_resume, 5484 5485 .runtime_suspend = ata_port_runtime_suspend, 5486 .runtime_resume = ata_port_runtime_resume, 5487 .runtime_idle = ata_port_runtime_idle, 5488 }; 5489 5490 /* sas ports don't participate in pm runtime management of ata_ports, 5491 * and need to resume ata devices at the domain level, not the per-port 5492 * level. sas suspend/resume is async to allow parallel port recovery 5493 * since sas has multiple ata_port instances per Scsi_Host. 5494 */ 5495 void ata_sas_port_suspend(struct ata_port *ap) 5496 { 5497 ata_port_suspend_async(ap, PMSG_SUSPEND); 5498 } 5499 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5500 5501 void ata_sas_port_resume(struct ata_port *ap) 5502 { 5503 ata_port_resume_async(ap, PMSG_RESUME); 5504 } 5505 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5506 5507 /** 5508 * ata_host_suspend - suspend host 5509 * @host: host to suspend 5510 * @mesg: PM message 5511 * 5512 * Suspend @host. Actual operation is performed by port suspend. 5513 */ 5514 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5515 { 5516 host->dev->power.power_state = mesg; 5517 return 0; 5518 } 5519 5520 /** 5521 * ata_host_resume - resume host 5522 * @host: host to resume 5523 * 5524 * Resume @host. Actual operation is performed by port resume. 5525 */ 5526 void ata_host_resume(struct ata_host *host) 5527 { 5528 host->dev->power.power_state = PMSG_ON; 5529 } 5530 #endif 5531 5532 struct device_type ata_port_type = { 5533 .name = "ata_port", 5534 #ifdef CONFIG_PM 5535 .pm = &ata_port_pm_ops, 5536 #endif 5537 }; 5538 5539 /** 5540 * ata_dev_init - Initialize an ata_device structure 5541 * @dev: Device structure to initialize 5542 * 5543 * Initialize @dev in preparation for probing. 5544 * 5545 * LOCKING: 5546 * Inherited from caller. 5547 */ 5548 void ata_dev_init(struct ata_device *dev) 5549 { 5550 struct ata_link *link = ata_dev_phys_link(dev); 5551 struct ata_port *ap = link->ap; 5552 unsigned long flags; 5553 5554 /* SATA spd limit is bound to the attached device, reset together */ 5555 link->sata_spd_limit = link->hw_sata_spd_limit; 5556 link->sata_spd = 0; 5557 5558 /* High bits of dev->flags are used to record warm plug 5559 * requests which occur asynchronously. Synchronize using 5560 * host lock. 5561 */ 5562 spin_lock_irqsave(ap->lock, flags); 5563 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5564 dev->horkage = 0; 5565 spin_unlock_irqrestore(ap->lock, flags); 5566 5567 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5568 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5569 dev->pio_mask = UINT_MAX; 5570 dev->mwdma_mask = UINT_MAX; 5571 dev->udma_mask = UINT_MAX; 5572 } 5573 5574 /** 5575 * ata_link_init - Initialize an ata_link structure 5576 * @ap: ATA port link is attached to 5577 * @link: Link structure to initialize 5578 * @pmp: Port multiplier port number 5579 * 5580 * Initialize @link. 5581 * 5582 * LOCKING: 5583 * Kernel thread context (may sleep) 5584 */ 5585 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5586 { 5587 int i; 5588 5589 /* clear everything except for devices */ 5590 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5591 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5592 5593 link->ap = ap; 5594 link->pmp = pmp; 5595 link->active_tag = ATA_TAG_POISON; 5596 link->hw_sata_spd_limit = UINT_MAX; 5597 5598 /* can't use iterator, ap isn't initialized yet */ 5599 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5600 struct ata_device *dev = &link->device[i]; 5601 5602 dev->link = link; 5603 dev->devno = dev - link->device; 5604 #ifdef CONFIG_ATA_ACPI 5605 dev->gtf_filter = ata_acpi_gtf_filter; 5606 #endif 5607 ata_dev_init(dev); 5608 } 5609 } 5610 5611 /** 5612 * sata_link_init_spd - Initialize link->sata_spd_limit 5613 * @link: Link to configure sata_spd_limit for 5614 * 5615 * Initialize @link->[hw_]sata_spd_limit to the currently 5616 * configured value. 5617 * 5618 * LOCKING: 5619 * Kernel thread context (may sleep). 5620 * 5621 * RETURNS: 5622 * 0 on success, -errno on failure. 5623 */ 5624 int sata_link_init_spd(struct ata_link *link) 5625 { 5626 u8 spd; 5627 int rc; 5628 5629 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5630 if (rc) 5631 return rc; 5632 5633 spd = (link->saved_scontrol >> 4) & 0xf; 5634 if (spd) 5635 link->hw_sata_spd_limit &= (1 << spd) - 1; 5636 5637 ata_force_link_limits(link); 5638 5639 link->sata_spd_limit = link->hw_sata_spd_limit; 5640 5641 return 0; 5642 } 5643 5644 /** 5645 * ata_port_alloc - allocate and initialize basic ATA port resources 5646 * @host: ATA host this allocated port belongs to 5647 * 5648 * Allocate and initialize basic ATA port resources. 5649 * 5650 * RETURNS: 5651 * Allocate ATA port on success, NULL on failure. 5652 * 5653 * LOCKING: 5654 * Inherited from calling layer (may sleep). 5655 */ 5656 struct ata_port *ata_port_alloc(struct ata_host *host) 5657 { 5658 struct ata_port *ap; 5659 5660 DPRINTK("ENTER\n"); 5661 5662 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5663 if (!ap) 5664 return NULL; 5665 5666 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5667 ap->lock = &host->lock; 5668 ap->print_id = -1; 5669 ap->local_port_no = -1; 5670 ap->host = host; 5671 ap->dev = host->dev; 5672 5673 #if defined(ATA_VERBOSE_DEBUG) 5674 /* turn on all debugging levels */ 5675 ap->msg_enable = 0x00FF; 5676 #elif defined(ATA_DEBUG) 5677 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5678 #else 5679 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5680 #endif 5681 5682 mutex_init(&ap->scsi_scan_mutex); 5683 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5684 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5685 INIT_LIST_HEAD(&ap->eh_done_q); 5686 init_waitqueue_head(&ap->eh_wait_q); 5687 init_completion(&ap->park_req_pending); 5688 init_timer_deferrable(&ap->fastdrain_timer); 5689 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5690 ap->fastdrain_timer.data = (unsigned long)ap; 5691 5692 ap->cbl = ATA_CBL_NONE; 5693 5694 ata_link_init(ap, &ap->link, 0); 5695 5696 #ifdef ATA_IRQ_TRAP 5697 ap->stats.unhandled_irq = 1; 5698 ap->stats.idle_irq = 1; 5699 #endif 5700 ata_sff_port_init(ap); 5701 5702 return ap; 5703 } 5704 5705 static void ata_host_release(struct device *gendev, void *res) 5706 { 5707 struct ata_host *host = dev_get_drvdata(gendev); 5708 int i; 5709 5710 for (i = 0; i < host->n_ports; i++) { 5711 struct ata_port *ap = host->ports[i]; 5712 5713 if (!ap) 5714 continue; 5715 5716 if (ap->scsi_host) 5717 scsi_host_put(ap->scsi_host); 5718 5719 kfree(ap->pmp_link); 5720 kfree(ap->slave_link); 5721 kfree(ap); 5722 host->ports[i] = NULL; 5723 } 5724 5725 dev_set_drvdata(gendev, NULL); 5726 } 5727 5728 /** 5729 * ata_host_alloc - allocate and init basic ATA host resources 5730 * @dev: generic device this host is associated with 5731 * @max_ports: maximum number of ATA ports associated with this host 5732 * 5733 * Allocate and initialize basic ATA host resources. LLD calls 5734 * this function to allocate a host, initializes it fully and 5735 * attaches it using ata_host_register(). 5736 * 5737 * @max_ports ports are allocated and host->n_ports is 5738 * initialized to @max_ports. The caller is allowed to decrease 5739 * host->n_ports before calling ata_host_register(). The unused 5740 * ports will be automatically freed on registration. 5741 * 5742 * RETURNS: 5743 * Allocate ATA host on success, NULL on failure. 5744 * 5745 * LOCKING: 5746 * Inherited from calling layer (may sleep). 5747 */ 5748 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5749 { 5750 struct ata_host *host; 5751 size_t sz; 5752 int i; 5753 5754 DPRINTK("ENTER\n"); 5755 5756 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5757 return NULL; 5758 5759 /* alloc a container for our list of ATA ports (buses) */ 5760 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5761 /* alloc a container for our list of ATA ports (buses) */ 5762 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5763 if (!host) 5764 goto err_out; 5765 5766 devres_add(dev, host); 5767 dev_set_drvdata(dev, host); 5768 5769 spin_lock_init(&host->lock); 5770 mutex_init(&host->eh_mutex); 5771 host->dev = dev; 5772 host->n_ports = max_ports; 5773 5774 /* allocate ports bound to this host */ 5775 for (i = 0; i < max_ports; i++) { 5776 struct ata_port *ap; 5777 5778 ap = ata_port_alloc(host); 5779 if (!ap) 5780 goto err_out; 5781 5782 ap->port_no = i; 5783 host->ports[i] = ap; 5784 } 5785 5786 devres_remove_group(dev, NULL); 5787 return host; 5788 5789 err_out: 5790 devres_release_group(dev, NULL); 5791 return NULL; 5792 } 5793 5794 /** 5795 * ata_host_alloc_pinfo - alloc host and init with port_info array 5796 * @dev: generic device this host is associated with 5797 * @ppi: array of ATA port_info to initialize host with 5798 * @n_ports: number of ATA ports attached to this host 5799 * 5800 * Allocate ATA host and initialize with info from @ppi. If NULL 5801 * terminated, @ppi may contain fewer entries than @n_ports. The 5802 * last entry will be used for the remaining ports. 5803 * 5804 * RETURNS: 5805 * Allocate ATA host on success, NULL on failure. 5806 * 5807 * LOCKING: 5808 * Inherited from calling layer (may sleep). 5809 */ 5810 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5811 const struct ata_port_info * const * ppi, 5812 int n_ports) 5813 { 5814 const struct ata_port_info *pi; 5815 struct ata_host *host; 5816 int i, j; 5817 5818 host = ata_host_alloc(dev, n_ports); 5819 if (!host) 5820 return NULL; 5821 5822 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5823 struct ata_port *ap = host->ports[i]; 5824 5825 if (ppi[j]) 5826 pi = ppi[j++]; 5827 5828 ap->pio_mask = pi->pio_mask; 5829 ap->mwdma_mask = pi->mwdma_mask; 5830 ap->udma_mask = pi->udma_mask; 5831 ap->flags |= pi->flags; 5832 ap->link.flags |= pi->link_flags; 5833 ap->ops = pi->port_ops; 5834 5835 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5836 host->ops = pi->port_ops; 5837 } 5838 5839 return host; 5840 } 5841 5842 /** 5843 * ata_slave_link_init - initialize slave link 5844 * @ap: port to initialize slave link for 5845 * 5846 * Create and initialize slave link for @ap. This enables slave 5847 * link handling on the port. 5848 * 5849 * In libata, a port contains links and a link contains devices. 5850 * There is single host link but if a PMP is attached to it, 5851 * there can be multiple fan-out links. On SATA, there's usually 5852 * a single device connected to a link but PATA and SATA 5853 * controllers emulating TF based interface can have two - master 5854 * and slave. 5855 * 5856 * However, there are a few controllers which don't fit into this 5857 * abstraction too well - SATA controllers which emulate TF 5858 * interface with both master and slave devices but also have 5859 * separate SCR register sets for each device. These controllers 5860 * need separate links for physical link handling 5861 * (e.g. onlineness, link speed) but should be treated like a 5862 * traditional M/S controller for everything else (e.g. command 5863 * issue, softreset). 5864 * 5865 * slave_link is libata's way of handling this class of 5866 * controllers without impacting core layer too much. For 5867 * anything other than physical link handling, the default host 5868 * link is used for both master and slave. For physical link 5869 * handling, separate @ap->slave_link is used. All dirty details 5870 * are implemented inside libata core layer. From LLD's POV, the 5871 * only difference is that prereset, hardreset and postreset are 5872 * called once more for the slave link, so the reset sequence 5873 * looks like the following. 5874 * 5875 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5876 * softreset(M) -> postreset(M) -> postreset(S) 5877 * 5878 * Note that softreset is called only for the master. Softreset 5879 * resets both M/S by definition, so SRST on master should handle 5880 * both (the standard method will work just fine). 5881 * 5882 * LOCKING: 5883 * Should be called before host is registered. 5884 * 5885 * RETURNS: 5886 * 0 on success, -errno on failure. 5887 */ 5888 int ata_slave_link_init(struct ata_port *ap) 5889 { 5890 struct ata_link *link; 5891 5892 WARN_ON(ap->slave_link); 5893 WARN_ON(ap->flags & ATA_FLAG_PMP); 5894 5895 link = kzalloc(sizeof(*link), GFP_KERNEL); 5896 if (!link) 5897 return -ENOMEM; 5898 5899 ata_link_init(ap, link, 1); 5900 ap->slave_link = link; 5901 return 0; 5902 } 5903 5904 static void ata_host_stop(struct device *gendev, void *res) 5905 { 5906 struct ata_host *host = dev_get_drvdata(gendev); 5907 int i; 5908 5909 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5910 5911 for (i = 0; i < host->n_ports; i++) { 5912 struct ata_port *ap = host->ports[i]; 5913 5914 if (ap->ops->port_stop) 5915 ap->ops->port_stop(ap); 5916 } 5917 5918 if (host->ops->host_stop) 5919 host->ops->host_stop(host); 5920 } 5921 5922 /** 5923 * ata_finalize_port_ops - finalize ata_port_operations 5924 * @ops: ata_port_operations to finalize 5925 * 5926 * An ata_port_operations can inherit from another ops and that 5927 * ops can again inherit from another. This can go on as many 5928 * times as necessary as long as there is no loop in the 5929 * inheritance chain. 5930 * 5931 * Ops tables are finalized when the host is started. NULL or 5932 * unspecified entries are inherited from the closet ancestor 5933 * which has the method and the entry is populated with it. 5934 * After finalization, the ops table directly points to all the 5935 * methods and ->inherits is no longer necessary and cleared. 5936 * 5937 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5938 * 5939 * LOCKING: 5940 * None. 5941 */ 5942 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5943 { 5944 static DEFINE_SPINLOCK(lock); 5945 const struct ata_port_operations *cur; 5946 void **begin = (void **)ops; 5947 void **end = (void **)&ops->inherits; 5948 void **pp; 5949 5950 if (!ops || !ops->inherits) 5951 return; 5952 5953 spin_lock(&lock); 5954 5955 for (cur = ops->inherits; cur; cur = cur->inherits) { 5956 void **inherit = (void **)cur; 5957 5958 for (pp = begin; pp < end; pp++, inherit++) 5959 if (!*pp) 5960 *pp = *inherit; 5961 } 5962 5963 for (pp = begin; pp < end; pp++) 5964 if (IS_ERR(*pp)) 5965 *pp = NULL; 5966 5967 ops->inherits = NULL; 5968 5969 spin_unlock(&lock); 5970 } 5971 5972 /** 5973 * ata_host_start - start and freeze ports of an ATA host 5974 * @host: ATA host to start ports for 5975 * 5976 * Start and then freeze ports of @host. Started status is 5977 * recorded in host->flags, so this function can be called 5978 * multiple times. Ports are guaranteed to get started only 5979 * once. If host->ops isn't initialized yet, its set to the 5980 * first non-dummy port ops. 5981 * 5982 * LOCKING: 5983 * Inherited from calling layer (may sleep). 5984 * 5985 * RETURNS: 5986 * 0 if all ports are started successfully, -errno otherwise. 5987 */ 5988 int ata_host_start(struct ata_host *host) 5989 { 5990 int have_stop = 0; 5991 void *start_dr = NULL; 5992 int i, rc; 5993 5994 if (host->flags & ATA_HOST_STARTED) 5995 return 0; 5996 5997 ata_finalize_port_ops(host->ops); 5998 5999 for (i = 0; i < host->n_ports; i++) { 6000 struct ata_port *ap = host->ports[i]; 6001 6002 ata_finalize_port_ops(ap->ops); 6003 6004 if (!host->ops && !ata_port_is_dummy(ap)) 6005 host->ops = ap->ops; 6006 6007 if (ap->ops->port_stop) 6008 have_stop = 1; 6009 } 6010 6011 if (host->ops->host_stop) 6012 have_stop = 1; 6013 6014 if (have_stop) { 6015 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6016 if (!start_dr) 6017 return -ENOMEM; 6018 } 6019 6020 for (i = 0; i < host->n_ports; i++) { 6021 struct ata_port *ap = host->ports[i]; 6022 6023 if (ap->ops->port_start) { 6024 rc = ap->ops->port_start(ap); 6025 if (rc) { 6026 if (rc != -ENODEV) 6027 dev_err(host->dev, 6028 "failed to start port %d (errno=%d)\n", 6029 i, rc); 6030 goto err_out; 6031 } 6032 } 6033 ata_eh_freeze_port(ap); 6034 } 6035 6036 if (start_dr) 6037 devres_add(host->dev, start_dr); 6038 host->flags |= ATA_HOST_STARTED; 6039 return 0; 6040 6041 err_out: 6042 while (--i >= 0) { 6043 struct ata_port *ap = host->ports[i]; 6044 6045 if (ap->ops->port_stop) 6046 ap->ops->port_stop(ap); 6047 } 6048 devres_free(start_dr); 6049 return rc; 6050 } 6051 6052 /** 6053 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6054 * @host: host to initialize 6055 * @dev: device host is attached to 6056 * @ops: port_ops 6057 * 6058 */ 6059 void ata_host_init(struct ata_host *host, struct device *dev, 6060 struct ata_port_operations *ops) 6061 { 6062 spin_lock_init(&host->lock); 6063 mutex_init(&host->eh_mutex); 6064 host->n_tags = ATA_MAX_QUEUE - 1; 6065 host->dev = dev; 6066 host->ops = ops; 6067 } 6068 6069 void __ata_port_probe(struct ata_port *ap) 6070 { 6071 struct ata_eh_info *ehi = &ap->link.eh_info; 6072 unsigned long flags; 6073 6074 /* kick EH for boot probing */ 6075 spin_lock_irqsave(ap->lock, flags); 6076 6077 ehi->probe_mask |= ATA_ALL_DEVICES; 6078 ehi->action |= ATA_EH_RESET; 6079 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6080 6081 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6082 ap->pflags |= ATA_PFLAG_LOADING; 6083 ata_port_schedule_eh(ap); 6084 6085 spin_unlock_irqrestore(ap->lock, flags); 6086 } 6087 6088 int ata_port_probe(struct ata_port *ap) 6089 { 6090 int rc = 0; 6091 6092 if (ap->ops->error_handler) { 6093 __ata_port_probe(ap); 6094 ata_port_wait_eh(ap); 6095 } else { 6096 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6097 rc = ata_bus_probe(ap); 6098 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6099 } 6100 return rc; 6101 } 6102 6103 6104 static void async_port_probe(void *data, async_cookie_t cookie) 6105 { 6106 struct ata_port *ap = data; 6107 6108 /* 6109 * If we're not allowed to scan this host in parallel, 6110 * we need to wait until all previous scans have completed 6111 * before going further. 6112 * Jeff Garzik says this is only within a controller, so we 6113 * don't need to wait for port 0, only for later ports. 6114 */ 6115 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6116 async_synchronize_cookie(cookie); 6117 6118 (void)ata_port_probe(ap); 6119 6120 /* in order to keep device order, we need to synchronize at this point */ 6121 async_synchronize_cookie(cookie); 6122 6123 ata_scsi_scan_host(ap, 1); 6124 } 6125 6126 /** 6127 * ata_host_register - register initialized ATA host 6128 * @host: ATA host to register 6129 * @sht: template for SCSI host 6130 * 6131 * Register initialized ATA host. @host is allocated using 6132 * ata_host_alloc() and fully initialized by LLD. This function 6133 * starts ports, registers @host with ATA and SCSI layers and 6134 * probe registered devices. 6135 * 6136 * LOCKING: 6137 * Inherited from calling layer (may sleep). 6138 * 6139 * RETURNS: 6140 * 0 on success, -errno otherwise. 6141 */ 6142 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6143 { 6144 int i, rc; 6145 6146 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6147 6148 /* host must have been started */ 6149 if (!(host->flags & ATA_HOST_STARTED)) { 6150 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6151 WARN_ON(1); 6152 return -EINVAL; 6153 } 6154 6155 /* Blow away unused ports. This happens when LLD can't 6156 * determine the exact number of ports to allocate at 6157 * allocation time. 6158 */ 6159 for (i = host->n_ports; host->ports[i]; i++) 6160 kfree(host->ports[i]); 6161 6162 /* give ports names and add SCSI hosts */ 6163 for (i = 0; i < host->n_ports; i++) { 6164 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6165 host->ports[i]->local_port_no = i + 1; 6166 } 6167 6168 /* Create associated sysfs transport objects */ 6169 for (i = 0; i < host->n_ports; i++) { 6170 rc = ata_tport_add(host->dev,host->ports[i]); 6171 if (rc) { 6172 goto err_tadd; 6173 } 6174 } 6175 6176 rc = ata_scsi_add_hosts(host, sht); 6177 if (rc) 6178 goto err_tadd; 6179 6180 /* set cable, sata_spd_limit and report */ 6181 for (i = 0; i < host->n_ports; i++) { 6182 struct ata_port *ap = host->ports[i]; 6183 unsigned long xfer_mask; 6184 6185 /* set SATA cable type if still unset */ 6186 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6187 ap->cbl = ATA_CBL_SATA; 6188 6189 /* init sata_spd_limit to the current value */ 6190 sata_link_init_spd(&ap->link); 6191 if (ap->slave_link) 6192 sata_link_init_spd(ap->slave_link); 6193 6194 /* print per-port info to dmesg */ 6195 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6196 ap->udma_mask); 6197 6198 if (!ata_port_is_dummy(ap)) { 6199 ata_port_info(ap, "%cATA max %s %s\n", 6200 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6201 ata_mode_string(xfer_mask), 6202 ap->link.eh_info.desc); 6203 ata_ehi_clear_desc(&ap->link.eh_info); 6204 } else 6205 ata_port_info(ap, "DUMMY\n"); 6206 } 6207 6208 /* perform each probe asynchronously */ 6209 for (i = 0; i < host->n_ports; i++) { 6210 struct ata_port *ap = host->ports[i]; 6211 async_schedule(async_port_probe, ap); 6212 } 6213 6214 return 0; 6215 6216 err_tadd: 6217 while (--i >= 0) { 6218 ata_tport_delete(host->ports[i]); 6219 } 6220 return rc; 6221 6222 } 6223 6224 /** 6225 * ata_host_activate - start host, request IRQ and register it 6226 * @host: target ATA host 6227 * @irq: IRQ to request 6228 * @irq_handler: irq_handler used when requesting IRQ 6229 * @irq_flags: irq_flags used when requesting IRQ 6230 * @sht: scsi_host_template to use when registering the host 6231 * 6232 * After allocating an ATA host and initializing it, most libata 6233 * LLDs perform three steps to activate the host - start host, 6234 * request IRQ and register it. This helper takes necessasry 6235 * arguments and performs the three steps in one go. 6236 * 6237 * An invalid IRQ skips the IRQ registration and expects the host to 6238 * have set polling mode on the port. In this case, @irq_handler 6239 * should be NULL. 6240 * 6241 * LOCKING: 6242 * Inherited from calling layer (may sleep). 6243 * 6244 * RETURNS: 6245 * 0 on success, -errno otherwise. 6246 */ 6247 int ata_host_activate(struct ata_host *host, int irq, 6248 irq_handler_t irq_handler, unsigned long irq_flags, 6249 struct scsi_host_template *sht) 6250 { 6251 int i, rc; 6252 6253 rc = ata_host_start(host); 6254 if (rc) 6255 return rc; 6256 6257 /* Special case for polling mode */ 6258 if (!irq) { 6259 WARN_ON(irq_handler); 6260 return ata_host_register(host, sht); 6261 } 6262 6263 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6264 dev_name(host->dev), host); 6265 if (rc) 6266 return rc; 6267 6268 for (i = 0; i < host->n_ports; i++) 6269 ata_port_desc(host->ports[i], "irq %d", irq); 6270 6271 rc = ata_host_register(host, sht); 6272 /* if failed, just free the IRQ and leave ports alone */ 6273 if (rc) 6274 devm_free_irq(host->dev, irq, host); 6275 6276 return rc; 6277 } 6278 6279 /** 6280 * ata_port_detach - Detach ATA port in prepration of device removal 6281 * @ap: ATA port to be detached 6282 * 6283 * Detach all ATA devices and the associated SCSI devices of @ap; 6284 * then, remove the associated SCSI host. @ap is guaranteed to 6285 * be quiescent on return from this function. 6286 * 6287 * LOCKING: 6288 * Kernel thread context (may sleep). 6289 */ 6290 static void ata_port_detach(struct ata_port *ap) 6291 { 6292 unsigned long flags; 6293 struct ata_link *link; 6294 struct ata_device *dev; 6295 6296 if (!ap->ops->error_handler) 6297 goto skip_eh; 6298 6299 /* tell EH we're leaving & flush EH */ 6300 spin_lock_irqsave(ap->lock, flags); 6301 ap->pflags |= ATA_PFLAG_UNLOADING; 6302 ata_port_schedule_eh(ap); 6303 spin_unlock_irqrestore(ap->lock, flags); 6304 6305 /* wait till EH commits suicide */ 6306 ata_port_wait_eh(ap); 6307 6308 /* it better be dead now */ 6309 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6310 6311 cancel_delayed_work_sync(&ap->hotplug_task); 6312 6313 skip_eh: 6314 /* clean up zpodd on port removal */ 6315 ata_for_each_link(link, ap, HOST_FIRST) { 6316 ata_for_each_dev(dev, link, ALL) { 6317 if (zpodd_dev_enabled(dev)) 6318 zpodd_exit(dev); 6319 } 6320 } 6321 if (ap->pmp_link) { 6322 int i; 6323 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6324 ata_tlink_delete(&ap->pmp_link[i]); 6325 } 6326 /* remove the associated SCSI host */ 6327 scsi_remove_host(ap->scsi_host); 6328 ata_tport_delete(ap); 6329 } 6330 6331 /** 6332 * ata_host_detach - Detach all ports of an ATA host 6333 * @host: Host to detach 6334 * 6335 * Detach all ports of @host. 6336 * 6337 * LOCKING: 6338 * Kernel thread context (may sleep). 6339 */ 6340 void ata_host_detach(struct ata_host *host) 6341 { 6342 int i; 6343 6344 for (i = 0; i < host->n_ports; i++) 6345 ata_port_detach(host->ports[i]); 6346 6347 /* the host is dead now, dissociate ACPI */ 6348 ata_acpi_dissociate(host); 6349 } 6350 6351 #ifdef CONFIG_PCI 6352 6353 /** 6354 * ata_pci_remove_one - PCI layer callback for device removal 6355 * @pdev: PCI device that was removed 6356 * 6357 * PCI layer indicates to libata via this hook that hot-unplug or 6358 * module unload event has occurred. Detach all ports. Resource 6359 * release is handled via devres. 6360 * 6361 * LOCKING: 6362 * Inherited from PCI layer (may sleep). 6363 */ 6364 void ata_pci_remove_one(struct pci_dev *pdev) 6365 { 6366 struct ata_host *host = pci_get_drvdata(pdev); 6367 6368 ata_host_detach(host); 6369 } 6370 6371 /* move to PCI subsystem */ 6372 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6373 { 6374 unsigned long tmp = 0; 6375 6376 switch (bits->width) { 6377 case 1: { 6378 u8 tmp8 = 0; 6379 pci_read_config_byte(pdev, bits->reg, &tmp8); 6380 tmp = tmp8; 6381 break; 6382 } 6383 case 2: { 6384 u16 tmp16 = 0; 6385 pci_read_config_word(pdev, bits->reg, &tmp16); 6386 tmp = tmp16; 6387 break; 6388 } 6389 case 4: { 6390 u32 tmp32 = 0; 6391 pci_read_config_dword(pdev, bits->reg, &tmp32); 6392 tmp = tmp32; 6393 break; 6394 } 6395 6396 default: 6397 return -EINVAL; 6398 } 6399 6400 tmp &= bits->mask; 6401 6402 return (tmp == bits->val) ? 1 : 0; 6403 } 6404 6405 #ifdef CONFIG_PM 6406 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6407 { 6408 pci_save_state(pdev); 6409 pci_disable_device(pdev); 6410 6411 if (mesg.event & PM_EVENT_SLEEP) 6412 pci_set_power_state(pdev, PCI_D3hot); 6413 } 6414 6415 int ata_pci_device_do_resume(struct pci_dev *pdev) 6416 { 6417 int rc; 6418 6419 pci_set_power_state(pdev, PCI_D0); 6420 pci_restore_state(pdev); 6421 6422 rc = pcim_enable_device(pdev); 6423 if (rc) { 6424 dev_err(&pdev->dev, 6425 "failed to enable device after resume (%d)\n", rc); 6426 return rc; 6427 } 6428 6429 pci_set_master(pdev); 6430 return 0; 6431 } 6432 6433 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6434 { 6435 struct ata_host *host = pci_get_drvdata(pdev); 6436 int rc = 0; 6437 6438 rc = ata_host_suspend(host, mesg); 6439 if (rc) 6440 return rc; 6441 6442 ata_pci_device_do_suspend(pdev, mesg); 6443 6444 return 0; 6445 } 6446 6447 int ata_pci_device_resume(struct pci_dev *pdev) 6448 { 6449 struct ata_host *host = pci_get_drvdata(pdev); 6450 int rc; 6451 6452 rc = ata_pci_device_do_resume(pdev); 6453 if (rc == 0) 6454 ata_host_resume(host); 6455 return rc; 6456 } 6457 #endif /* CONFIG_PM */ 6458 6459 #endif /* CONFIG_PCI */ 6460 6461 /** 6462 * ata_platform_remove_one - Platform layer callback for device removal 6463 * @pdev: Platform device that was removed 6464 * 6465 * Platform layer indicates to libata via this hook that hot-unplug or 6466 * module unload event has occurred. Detach all ports. Resource 6467 * release is handled via devres. 6468 * 6469 * LOCKING: 6470 * Inherited from platform layer (may sleep). 6471 */ 6472 int ata_platform_remove_one(struct platform_device *pdev) 6473 { 6474 struct ata_host *host = platform_get_drvdata(pdev); 6475 6476 ata_host_detach(host); 6477 6478 return 0; 6479 } 6480 6481 static int __init ata_parse_force_one(char **cur, 6482 struct ata_force_ent *force_ent, 6483 const char **reason) 6484 { 6485 /* FIXME: Currently, there's no way to tag init const data and 6486 * using __initdata causes build failure on some versions of 6487 * gcc. Once __initdataconst is implemented, add const to the 6488 * following structure. 6489 */ 6490 static struct ata_force_param force_tbl[] __initdata = { 6491 { "40c", .cbl = ATA_CBL_PATA40 }, 6492 { "80c", .cbl = ATA_CBL_PATA80 }, 6493 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6494 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6495 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6496 { "sata", .cbl = ATA_CBL_SATA }, 6497 { "1.5Gbps", .spd_limit = 1 }, 6498 { "3.0Gbps", .spd_limit = 2 }, 6499 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6500 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6501 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6502 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6503 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6504 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6505 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6506 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6507 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6508 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6509 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6510 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6511 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6512 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6513 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6514 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6515 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6516 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6517 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6518 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6519 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6520 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6521 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6522 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6523 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6524 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6525 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6526 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6527 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6528 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6529 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6530 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6531 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6532 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6533 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6534 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6535 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6536 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6537 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6538 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6539 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6540 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6541 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6542 }; 6543 char *start = *cur, *p = *cur; 6544 char *id, *val, *endp; 6545 const struct ata_force_param *match_fp = NULL; 6546 int nr_matches = 0, i; 6547 6548 /* find where this param ends and update *cur */ 6549 while (*p != '\0' && *p != ',') 6550 p++; 6551 6552 if (*p == '\0') 6553 *cur = p; 6554 else 6555 *cur = p + 1; 6556 6557 *p = '\0'; 6558 6559 /* parse */ 6560 p = strchr(start, ':'); 6561 if (!p) { 6562 val = strstrip(start); 6563 goto parse_val; 6564 } 6565 *p = '\0'; 6566 6567 id = strstrip(start); 6568 val = strstrip(p + 1); 6569 6570 /* parse id */ 6571 p = strchr(id, '.'); 6572 if (p) { 6573 *p++ = '\0'; 6574 force_ent->device = simple_strtoul(p, &endp, 10); 6575 if (p == endp || *endp != '\0') { 6576 *reason = "invalid device"; 6577 return -EINVAL; 6578 } 6579 } 6580 6581 force_ent->port = simple_strtoul(id, &endp, 10); 6582 if (p == endp || *endp != '\0') { 6583 *reason = "invalid port/link"; 6584 return -EINVAL; 6585 } 6586 6587 parse_val: 6588 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6589 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6590 const struct ata_force_param *fp = &force_tbl[i]; 6591 6592 if (strncasecmp(val, fp->name, strlen(val))) 6593 continue; 6594 6595 nr_matches++; 6596 match_fp = fp; 6597 6598 if (strcasecmp(val, fp->name) == 0) { 6599 nr_matches = 1; 6600 break; 6601 } 6602 } 6603 6604 if (!nr_matches) { 6605 *reason = "unknown value"; 6606 return -EINVAL; 6607 } 6608 if (nr_matches > 1) { 6609 *reason = "ambigious value"; 6610 return -EINVAL; 6611 } 6612 6613 force_ent->param = *match_fp; 6614 6615 return 0; 6616 } 6617 6618 static void __init ata_parse_force_param(void) 6619 { 6620 int idx = 0, size = 1; 6621 int last_port = -1, last_device = -1; 6622 char *p, *cur, *next; 6623 6624 /* calculate maximum number of params and allocate force_tbl */ 6625 for (p = ata_force_param_buf; *p; p++) 6626 if (*p == ',') 6627 size++; 6628 6629 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6630 if (!ata_force_tbl) { 6631 printk(KERN_WARNING "ata: failed to extend force table, " 6632 "libata.force ignored\n"); 6633 return; 6634 } 6635 6636 /* parse and populate the table */ 6637 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6638 const char *reason = ""; 6639 struct ata_force_ent te = { .port = -1, .device = -1 }; 6640 6641 next = cur; 6642 if (ata_parse_force_one(&next, &te, &reason)) { 6643 printk(KERN_WARNING "ata: failed to parse force " 6644 "parameter \"%s\" (%s)\n", 6645 cur, reason); 6646 continue; 6647 } 6648 6649 if (te.port == -1) { 6650 te.port = last_port; 6651 te.device = last_device; 6652 } 6653 6654 ata_force_tbl[idx++] = te; 6655 6656 last_port = te.port; 6657 last_device = te.device; 6658 } 6659 6660 ata_force_tbl_size = idx; 6661 } 6662 6663 static int __init ata_init(void) 6664 { 6665 int rc; 6666 6667 ata_parse_force_param(); 6668 6669 rc = ata_sff_init(); 6670 if (rc) { 6671 kfree(ata_force_tbl); 6672 return rc; 6673 } 6674 6675 libata_transport_init(); 6676 ata_scsi_transport_template = ata_attach_transport(); 6677 if (!ata_scsi_transport_template) { 6678 ata_sff_exit(); 6679 rc = -ENOMEM; 6680 goto err_out; 6681 } 6682 6683 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6684 return 0; 6685 6686 err_out: 6687 return rc; 6688 } 6689 6690 static void __exit ata_exit(void) 6691 { 6692 ata_release_transport(ata_scsi_transport_template); 6693 libata_transport_exit(); 6694 ata_sff_exit(); 6695 kfree(ata_force_tbl); 6696 } 6697 6698 subsys_initcall(ata_init); 6699 module_exit(ata_exit); 6700 6701 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6702 6703 int ata_ratelimit(void) 6704 { 6705 return __ratelimit(&ratelimit); 6706 } 6707 6708 /** 6709 * ata_msleep - ATA EH owner aware msleep 6710 * @ap: ATA port to attribute the sleep to 6711 * @msecs: duration to sleep in milliseconds 6712 * 6713 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6714 * ownership is released before going to sleep and reacquired 6715 * after the sleep is complete. IOW, other ports sharing the 6716 * @ap->host will be allowed to own the EH while this task is 6717 * sleeping. 6718 * 6719 * LOCKING: 6720 * Might sleep. 6721 */ 6722 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6723 { 6724 bool owns_eh = ap && ap->host->eh_owner == current; 6725 6726 if (owns_eh) 6727 ata_eh_release(ap); 6728 6729 msleep(msecs); 6730 6731 if (owns_eh) 6732 ata_eh_acquire(ap); 6733 } 6734 6735 /** 6736 * ata_wait_register - wait until register value changes 6737 * @ap: ATA port to wait register for, can be NULL 6738 * @reg: IO-mapped register 6739 * @mask: Mask to apply to read register value 6740 * @val: Wait condition 6741 * @interval: polling interval in milliseconds 6742 * @timeout: timeout in milliseconds 6743 * 6744 * Waiting for some bits of register to change is a common 6745 * operation for ATA controllers. This function reads 32bit LE 6746 * IO-mapped register @reg and tests for the following condition. 6747 * 6748 * (*@reg & mask) != val 6749 * 6750 * If the condition is met, it returns; otherwise, the process is 6751 * repeated after @interval_msec until timeout. 6752 * 6753 * LOCKING: 6754 * Kernel thread context (may sleep) 6755 * 6756 * RETURNS: 6757 * The final register value. 6758 */ 6759 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6760 unsigned long interval, unsigned long timeout) 6761 { 6762 unsigned long deadline; 6763 u32 tmp; 6764 6765 tmp = ioread32(reg); 6766 6767 /* Calculate timeout _after_ the first read to make sure 6768 * preceding writes reach the controller before starting to 6769 * eat away the timeout. 6770 */ 6771 deadline = ata_deadline(jiffies, timeout); 6772 6773 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6774 ata_msleep(ap, interval); 6775 tmp = ioread32(reg); 6776 } 6777 6778 return tmp; 6779 } 6780 6781 /* 6782 * Dummy port_ops 6783 */ 6784 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6785 { 6786 return AC_ERR_SYSTEM; 6787 } 6788 6789 static void ata_dummy_error_handler(struct ata_port *ap) 6790 { 6791 /* truly dummy */ 6792 } 6793 6794 struct ata_port_operations ata_dummy_port_ops = { 6795 .qc_prep = ata_noop_qc_prep, 6796 .qc_issue = ata_dummy_qc_issue, 6797 .error_handler = ata_dummy_error_handler, 6798 .sched_eh = ata_std_sched_eh, 6799 .end_eh = ata_std_end_eh, 6800 }; 6801 6802 const struct ata_port_info ata_dummy_port_info = { 6803 .port_ops = &ata_dummy_port_ops, 6804 }; 6805 6806 /* 6807 * Utility print functions 6808 */ 6809 void ata_port_printk(const struct ata_port *ap, const char *level, 6810 const char *fmt, ...) 6811 { 6812 struct va_format vaf; 6813 va_list args; 6814 6815 va_start(args, fmt); 6816 6817 vaf.fmt = fmt; 6818 vaf.va = &args; 6819 6820 printk("%sata%u: %pV", level, ap->print_id, &vaf); 6821 6822 va_end(args); 6823 } 6824 EXPORT_SYMBOL(ata_port_printk); 6825 6826 void ata_link_printk(const struct ata_link *link, const char *level, 6827 const char *fmt, ...) 6828 { 6829 struct va_format vaf; 6830 va_list args; 6831 6832 va_start(args, fmt); 6833 6834 vaf.fmt = fmt; 6835 vaf.va = &args; 6836 6837 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6838 printk("%sata%u.%02u: %pV", 6839 level, link->ap->print_id, link->pmp, &vaf); 6840 else 6841 printk("%sata%u: %pV", 6842 level, link->ap->print_id, &vaf); 6843 6844 va_end(args); 6845 } 6846 EXPORT_SYMBOL(ata_link_printk); 6847 6848 void ata_dev_printk(const struct ata_device *dev, const char *level, 6849 const char *fmt, ...) 6850 { 6851 struct va_format vaf; 6852 va_list args; 6853 6854 va_start(args, fmt); 6855 6856 vaf.fmt = fmt; 6857 vaf.va = &args; 6858 6859 printk("%sata%u.%02u: %pV", 6860 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6861 &vaf); 6862 6863 va_end(args); 6864 } 6865 EXPORT_SYMBOL(ata_dev_printk); 6866 6867 void ata_print_version(const struct device *dev, const char *version) 6868 { 6869 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6870 } 6871 EXPORT_SYMBOL(ata_print_version); 6872 6873 /* 6874 * libata is essentially a library of internal helper functions for 6875 * low-level ATA host controller drivers. As such, the API/ABI is 6876 * likely to change as new drivers are added and updated. 6877 * Do not depend on ABI/API stability. 6878 */ 6879 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6880 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6881 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6882 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6883 EXPORT_SYMBOL_GPL(sata_port_ops); 6884 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6885 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6886 EXPORT_SYMBOL_GPL(ata_link_next); 6887 EXPORT_SYMBOL_GPL(ata_dev_next); 6888 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6889 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6890 EXPORT_SYMBOL_GPL(ata_host_init); 6891 EXPORT_SYMBOL_GPL(ata_host_alloc); 6892 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6893 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6894 EXPORT_SYMBOL_GPL(ata_host_start); 6895 EXPORT_SYMBOL_GPL(ata_host_register); 6896 EXPORT_SYMBOL_GPL(ata_host_activate); 6897 EXPORT_SYMBOL_GPL(ata_host_detach); 6898 EXPORT_SYMBOL_GPL(ata_sg_init); 6899 EXPORT_SYMBOL_GPL(ata_qc_complete); 6900 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6901 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6902 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6903 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6904 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6905 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6906 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6907 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6908 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6909 EXPORT_SYMBOL_GPL(ata_mode_string); 6910 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6911 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6912 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6913 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6914 EXPORT_SYMBOL_GPL(ata_dev_disable); 6915 EXPORT_SYMBOL_GPL(sata_set_spd); 6916 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6917 EXPORT_SYMBOL_GPL(sata_link_debounce); 6918 EXPORT_SYMBOL_GPL(sata_link_resume); 6919 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6920 EXPORT_SYMBOL_GPL(ata_std_prereset); 6921 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6922 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6923 EXPORT_SYMBOL_GPL(ata_std_postreset); 6924 EXPORT_SYMBOL_GPL(ata_dev_classify); 6925 EXPORT_SYMBOL_GPL(ata_dev_pair); 6926 EXPORT_SYMBOL_GPL(ata_ratelimit); 6927 EXPORT_SYMBOL_GPL(ata_msleep); 6928 EXPORT_SYMBOL_GPL(ata_wait_register); 6929 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6930 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6931 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6932 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6933 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 6934 EXPORT_SYMBOL_GPL(sata_scr_valid); 6935 EXPORT_SYMBOL_GPL(sata_scr_read); 6936 EXPORT_SYMBOL_GPL(sata_scr_write); 6937 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6938 EXPORT_SYMBOL_GPL(ata_link_online); 6939 EXPORT_SYMBOL_GPL(ata_link_offline); 6940 #ifdef CONFIG_PM 6941 EXPORT_SYMBOL_GPL(ata_host_suspend); 6942 EXPORT_SYMBOL_GPL(ata_host_resume); 6943 #endif /* CONFIG_PM */ 6944 EXPORT_SYMBOL_GPL(ata_id_string); 6945 EXPORT_SYMBOL_GPL(ata_id_c_string); 6946 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6947 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6948 6949 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6950 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6951 EXPORT_SYMBOL_GPL(ata_timing_compute); 6952 EXPORT_SYMBOL_GPL(ata_timing_merge); 6953 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6954 6955 #ifdef CONFIG_PCI 6956 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6957 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6958 #ifdef CONFIG_PM 6959 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6960 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6961 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6962 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6963 #endif /* CONFIG_PM */ 6964 #endif /* CONFIG_PCI */ 6965 6966 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 6967 6968 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6969 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6970 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6971 EXPORT_SYMBOL_GPL(ata_port_desc); 6972 #ifdef CONFIG_PCI 6973 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6974 #endif /* CONFIG_PCI */ 6975 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6976 EXPORT_SYMBOL_GPL(ata_link_abort); 6977 EXPORT_SYMBOL_GPL(ata_port_abort); 6978 EXPORT_SYMBOL_GPL(ata_port_freeze); 6979 EXPORT_SYMBOL_GPL(sata_async_notification); 6980 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6981 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6982 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6983 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6984 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6985 EXPORT_SYMBOL_GPL(ata_do_eh); 6986 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6987 6988 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6989 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6990 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6991 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6992 EXPORT_SYMBOL_GPL(ata_cable_sata); 6993