1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/time.h> 54 #include <linux/interrupt.h> 55 #include <linux/completion.h> 56 #include <linux/suspend.h> 57 #include <linux/workqueue.h> 58 #include <linux/scatterlist.h> 59 #include <linux/io.h> 60 #include <linux/async.h> 61 #include <linux/log2.h> 62 #include <linux/slab.h> 63 #include <linux/glob.h> 64 #include <scsi/scsi.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_host.h> 67 #include <linux/libata.h> 68 #include <asm/byteorder.h> 69 #include <asm/unaligned.h> 70 #include <linux/cdrom.h> 71 #include <linux/ratelimit.h> 72 #include <linux/pm_runtime.h> 73 #include <linux/platform_device.h> 74 75 #define CREATE_TRACE_POINTS 76 #include <trace/events/libata.h> 77 78 #include "libata.h" 79 #include "libata-transport.h" 80 81 /* debounce timing parameters in msecs { interval, duration, timeout } */ 82 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 83 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 84 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 85 86 const struct ata_port_operations ata_base_port_ops = { 87 .prereset = ata_std_prereset, 88 .postreset = ata_std_postreset, 89 .error_handler = ata_std_error_handler, 90 .sched_eh = ata_std_sched_eh, 91 .end_eh = ata_std_end_eh, 92 }; 93 94 const struct ata_port_operations sata_port_ops = { 95 .inherits = &ata_base_port_ops, 96 97 .qc_defer = ata_std_qc_defer, 98 .hardreset = sata_std_hardreset, 99 }; 100 101 static unsigned int ata_dev_init_params(struct ata_device *dev, 102 u16 heads, u16 sectors); 103 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 104 static void ata_dev_xfermask(struct ata_device *dev); 105 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 106 107 atomic_t ata_print_id = ATOMIC_INIT(0); 108 109 struct ata_force_param { 110 const char *name; 111 unsigned int cbl; 112 int spd_limit; 113 unsigned long xfer_mask; 114 unsigned int horkage_on; 115 unsigned int horkage_off; 116 unsigned int lflags; 117 }; 118 119 struct ata_force_ent { 120 int port; 121 int device; 122 struct ata_force_param param; 123 }; 124 125 static struct ata_force_ent *ata_force_tbl; 126 static int ata_force_tbl_size; 127 128 static char ata_force_param_buf[PAGE_SIZE] __initdata; 129 /* param_buf is thrown away after initialization, disallow read */ 130 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 131 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 132 133 static int atapi_enabled = 1; 134 module_param(atapi_enabled, int, 0444); 135 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 136 137 static int atapi_dmadir = 0; 138 module_param(atapi_dmadir, int, 0444); 139 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 140 141 int atapi_passthru16 = 1; 142 module_param(atapi_passthru16, int, 0444); 143 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 144 145 int libata_fua = 0; 146 module_param_named(fua, libata_fua, int, 0444); 147 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 148 149 static int ata_ignore_hpa; 150 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 151 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 152 153 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 154 module_param_named(dma, libata_dma_mask, int, 0444); 155 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 156 157 static int ata_probe_timeout; 158 module_param(ata_probe_timeout, int, 0444); 159 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 160 161 int libata_noacpi = 0; 162 module_param_named(noacpi, libata_noacpi, int, 0444); 163 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 164 165 int libata_allow_tpm = 0; 166 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 167 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 168 169 static int atapi_an; 170 module_param(atapi_an, int, 0444); 171 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 172 173 MODULE_AUTHOR("Jeff Garzik"); 174 MODULE_DESCRIPTION("Library module for ATA devices"); 175 MODULE_LICENSE("GPL"); 176 MODULE_VERSION(DRV_VERSION); 177 178 179 static bool ata_sstatus_online(u32 sstatus) 180 { 181 return (sstatus & 0xf) == 0x3; 182 } 183 184 /** 185 * ata_link_next - link iteration helper 186 * @link: the previous link, NULL to start 187 * @ap: ATA port containing links to iterate 188 * @mode: iteration mode, one of ATA_LITER_* 189 * 190 * LOCKING: 191 * Host lock or EH context. 192 * 193 * RETURNS: 194 * Pointer to the next link. 195 */ 196 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 197 enum ata_link_iter_mode mode) 198 { 199 BUG_ON(mode != ATA_LITER_EDGE && 200 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 201 202 /* NULL link indicates start of iteration */ 203 if (!link) 204 switch (mode) { 205 case ATA_LITER_EDGE: 206 case ATA_LITER_PMP_FIRST: 207 if (sata_pmp_attached(ap)) 208 return ap->pmp_link; 209 /* fall through */ 210 case ATA_LITER_HOST_FIRST: 211 return &ap->link; 212 } 213 214 /* we just iterated over the host link, what's next? */ 215 if (link == &ap->link) 216 switch (mode) { 217 case ATA_LITER_HOST_FIRST: 218 if (sata_pmp_attached(ap)) 219 return ap->pmp_link; 220 /* fall through */ 221 case ATA_LITER_PMP_FIRST: 222 if (unlikely(ap->slave_link)) 223 return ap->slave_link; 224 /* fall through */ 225 case ATA_LITER_EDGE: 226 return NULL; 227 } 228 229 /* slave_link excludes PMP */ 230 if (unlikely(link == ap->slave_link)) 231 return NULL; 232 233 /* we were over a PMP link */ 234 if (++link < ap->pmp_link + ap->nr_pmp_links) 235 return link; 236 237 if (mode == ATA_LITER_PMP_FIRST) 238 return &ap->link; 239 240 return NULL; 241 } 242 243 /** 244 * ata_dev_next - device iteration helper 245 * @dev: the previous device, NULL to start 246 * @link: ATA link containing devices to iterate 247 * @mode: iteration mode, one of ATA_DITER_* 248 * 249 * LOCKING: 250 * Host lock or EH context. 251 * 252 * RETURNS: 253 * Pointer to the next device. 254 */ 255 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 256 enum ata_dev_iter_mode mode) 257 { 258 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 259 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 260 261 /* NULL dev indicates start of iteration */ 262 if (!dev) 263 switch (mode) { 264 case ATA_DITER_ENABLED: 265 case ATA_DITER_ALL: 266 dev = link->device; 267 goto check; 268 case ATA_DITER_ENABLED_REVERSE: 269 case ATA_DITER_ALL_REVERSE: 270 dev = link->device + ata_link_max_devices(link) - 1; 271 goto check; 272 } 273 274 next: 275 /* move to the next one */ 276 switch (mode) { 277 case ATA_DITER_ENABLED: 278 case ATA_DITER_ALL: 279 if (++dev < link->device + ata_link_max_devices(link)) 280 goto check; 281 return NULL; 282 case ATA_DITER_ENABLED_REVERSE: 283 case ATA_DITER_ALL_REVERSE: 284 if (--dev >= link->device) 285 goto check; 286 return NULL; 287 } 288 289 check: 290 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 291 !ata_dev_enabled(dev)) 292 goto next; 293 return dev; 294 } 295 296 /** 297 * ata_dev_phys_link - find physical link for a device 298 * @dev: ATA device to look up physical link for 299 * 300 * Look up physical link which @dev is attached to. Note that 301 * this is different from @dev->link only when @dev is on slave 302 * link. For all other cases, it's the same as @dev->link. 303 * 304 * LOCKING: 305 * Don't care. 306 * 307 * RETURNS: 308 * Pointer to the found physical link. 309 */ 310 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 311 { 312 struct ata_port *ap = dev->link->ap; 313 314 if (!ap->slave_link) 315 return dev->link; 316 if (!dev->devno) 317 return &ap->link; 318 return ap->slave_link; 319 } 320 321 /** 322 * ata_force_cbl - force cable type according to libata.force 323 * @ap: ATA port of interest 324 * 325 * Force cable type according to libata.force and whine about it. 326 * The last entry which has matching port number is used, so it 327 * can be specified as part of device force parameters. For 328 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 329 * same effect. 330 * 331 * LOCKING: 332 * EH context. 333 */ 334 void ata_force_cbl(struct ata_port *ap) 335 { 336 int i; 337 338 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 339 const struct ata_force_ent *fe = &ata_force_tbl[i]; 340 341 if (fe->port != -1 && fe->port != ap->print_id) 342 continue; 343 344 if (fe->param.cbl == ATA_CBL_NONE) 345 continue; 346 347 ap->cbl = fe->param.cbl; 348 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 349 return; 350 } 351 } 352 353 /** 354 * ata_force_link_limits - force link limits according to libata.force 355 * @link: ATA link of interest 356 * 357 * Force link flags and SATA spd limit according to libata.force 358 * and whine about it. When only the port part is specified 359 * (e.g. 1:), the limit applies to all links connected to both 360 * the host link and all fan-out ports connected via PMP. If the 361 * device part is specified as 0 (e.g. 1.00:), it specifies the 362 * first fan-out link not the host link. Device number 15 always 363 * points to the host link whether PMP is attached or not. If the 364 * controller has slave link, device number 16 points to it. 365 * 366 * LOCKING: 367 * EH context. 368 */ 369 static void ata_force_link_limits(struct ata_link *link) 370 { 371 bool did_spd = false; 372 int linkno = link->pmp; 373 int i; 374 375 if (ata_is_host_link(link)) 376 linkno += 15; 377 378 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 379 const struct ata_force_ent *fe = &ata_force_tbl[i]; 380 381 if (fe->port != -1 && fe->port != link->ap->print_id) 382 continue; 383 384 if (fe->device != -1 && fe->device != linkno) 385 continue; 386 387 /* only honor the first spd limit */ 388 if (!did_spd && fe->param.spd_limit) { 389 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 390 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 391 fe->param.name); 392 did_spd = true; 393 } 394 395 /* let lflags stack */ 396 if (fe->param.lflags) { 397 link->flags |= fe->param.lflags; 398 ata_link_notice(link, 399 "FORCE: link flag 0x%x forced -> 0x%x\n", 400 fe->param.lflags, link->flags); 401 } 402 } 403 } 404 405 /** 406 * ata_force_xfermask - force xfermask according to libata.force 407 * @dev: ATA device of interest 408 * 409 * Force xfer_mask according to libata.force and whine about it. 410 * For consistency with link selection, device number 15 selects 411 * the first device connected to the host link. 412 * 413 * LOCKING: 414 * EH context. 415 */ 416 static void ata_force_xfermask(struct ata_device *dev) 417 { 418 int devno = dev->link->pmp + dev->devno; 419 int alt_devno = devno; 420 int i; 421 422 /* allow n.15/16 for devices attached to host port */ 423 if (ata_is_host_link(dev->link)) 424 alt_devno += 15; 425 426 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 427 const struct ata_force_ent *fe = &ata_force_tbl[i]; 428 unsigned long pio_mask, mwdma_mask, udma_mask; 429 430 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 431 continue; 432 433 if (fe->device != -1 && fe->device != devno && 434 fe->device != alt_devno) 435 continue; 436 437 if (!fe->param.xfer_mask) 438 continue; 439 440 ata_unpack_xfermask(fe->param.xfer_mask, 441 &pio_mask, &mwdma_mask, &udma_mask); 442 if (udma_mask) 443 dev->udma_mask = udma_mask; 444 else if (mwdma_mask) { 445 dev->udma_mask = 0; 446 dev->mwdma_mask = mwdma_mask; 447 } else { 448 dev->udma_mask = 0; 449 dev->mwdma_mask = 0; 450 dev->pio_mask = pio_mask; 451 } 452 453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 454 fe->param.name); 455 return; 456 } 457 } 458 459 /** 460 * ata_force_horkage - force horkage according to libata.force 461 * @dev: ATA device of interest 462 * 463 * Force horkage according to libata.force and whine about it. 464 * For consistency with link selection, device number 15 selects 465 * the first device connected to the host link. 466 * 467 * LOCKING: 468 * EH context. 469 */ 470 static void ata_force_horkage(struct ata_device *dev) 471 { 472 int devno = dev->link->pmp + dev->devno; 473 int alt_devno = devno; 474 int i; 475 476 /* allow n.15/16 for devices attached to host port */ 477 if (ata_is_host_link(dev->link)) 478 alt_devno += 15; 479 480 for (i = 0; i < ata_force_tbl_size; i++) { 481 const struct ata_force_ent *fe = &ata_force_tbl[i]; 482 483 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 484 continue; 485 486 if (fe->device != -1 && fe->device != devno && 487 fe->device != alt_devno) 488 continue; 489 490 if (!(~dev->horkage & fe->param.horkage_on) && 491 !(dev->horkage & fe->param.horkage_off)) 492 continue; 493 494 dev->horkage |= fe->param.horkage_on; 495 dev->horkage &= ~fe->param.horkage_off; 496 497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 498 fe->param.name); 499 } 500 } 501 502 /** 503 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 504 * @opcode: SCSI opcode 505 * 506 * Determine ATAPI command type from @opcode. 507 * 508 * LOCKING: 509 * None. 510 * 511 * RETURNS: 512 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 513 */ 514 int atapi_cmd_type(u8 opcode) 515 { 516 switch (opcode) { 517 case GPCMD_READ_10: 518 case GPCMD_READ_12: 519 return ATAPI_READ; 520 521 case GPCMD_WRITE_10: 522 case GPCMD_WRITE_12: 523 case GPCMD_WRITE_AND_VERIFY_10: 524 return ATAPI_WRITE; 525 526 case GPCMD_READ_CD: 527 case GPCMD_READ_CD_MSF: 528 return ATAPI_READ_CD; 529 530 case ATA_16: 531 case ATA_12: 532 if (atapi_passthru16) 533 return ATAPI_PASS_THRU; 534 /* fall thru */ 535 default: 536 return ATAPI_MISC; 537 } 538 } 539 540 /** 541 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 542 * @tf: Taskfile to convert 543 * @pmp: Port multiplier port 544 * @is_cmd: This FIS is for command 545 * @fis: Buffer into which data will output 546 * 547 * Converts a standard ATA taskfile to a Serial ATA 548 * FIS structure (Register - Host to Device). 549 * 550 * LOCKING: 551 * Inherited from caller. 552 */ 553 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 554 { 555 fis[0] = 0x27; /* Register - Host to Device FIS */ 556 fis[1] = pmp & 0xf; /* Port multiplier number*/ 557 if (is_cmd) 558 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 559 560 fis[2] = tf->command; 561 fis[3] = tf->feature; 562 563 fis[4] = tf->lbal; 564 fis[5] = tf->lbam; 565 fis[6] = tf->lbah; 566 fis[7] = tf->device; 567 568 fis[8] = tf->hob_lbal; 569 fis[9] = tf->hob_lbam; 570 fis[10] = tf->hob_lbah; 571 fis[11] = tf->hob_feature; 572 573 fis[12] = tf->nsect; 574 fis[13] = tf->hob_nsect; 575 fis[14] = 0; 576 fis[15] = tf->ctl; 577 578 fis[16] = tf->auxiliary & 0xff; 579 fis[17] = (tf->auxiliary >> 8) & 0xff; 580 fis[18] = (tf->auxiliary >> 16) & 0xff; 581 fis[19] = (tf->auxiliary >> 24) & 0xff; 582 } 583 584 /** 585 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 586 * @fis: Buffer from which data will be input 587 * @tf: Taskfile to output 588 * 589 * Converts a serial ATA FIS structure to a standard ATA taskfile. 590 * 591 * LOCKING: 592 * Inherited from caller. 593 */ 594 595 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 596 { 597 tf->command = fis[2]; /* status */ 598 tf->feature = fis[3]; /* error */ 599 600 tf->lbal = fis[4]; 601 tf->lbam = fis[5]; 602 tf->lbah = fis[6]; 603 tf->device = fis[7]; 604 605 tf->hob_lbal = fis[8]; 606 tf->hob_lbam = fis[9]; 607 tf->hob_lbah = fis[10]; 608 609 tf->nsect = fis[12]; 610 tf->hob_nsect = fis[13]; 611 } 612 613 static const u8 ata_rw_cmds[] = { 614 /* pio multi */ 615 ATA_CMD_READ_MULTI, 616 ATA_CMD_WRITE_MULTI, 617 ATA_CMD_READ_MULTI_EXT, 618 ATA_CMD_WRITE_MULTI_EXT, 619 0, 620 0, 621 0, 622 ATA_CMD_WRITE_MULTI_FUA_EXT, 623 /* pio */ 624 ATA_CMD_PIO_READ, 625 ATA_CMD_PIO_WRITE, 626 ATA_CMD_PIO_READ_EXT, 627 ATA_CMD_PIO_WRITE_EXT, 628 0, 629 0, 630 0, 631 0, 632 /* dma */ 633 ATA_CMD_READ, 634 ATA_CMD_WRITE, 635 ATA_CMD_READ_EXT, 636 ATA_CMD_WRITE_EXT, 637 0, 638 0, 639 0, 640 ATA_CMD_WRITE_FUA_EXT 641 }; 642 643 /** 644 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 645 * @tf: command to examine and configure 646 * @dev: device tf belongs to 647 * 648 * Examine the device configuration and tf->flags to calculate 649 * the proper read/write commands and protocol to use. 650 * 651 * LOCKING: 652 * caller. 653 */ 654 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 655 { 656 u8 cmd; 657 658 int index, fua, lba48, write; 659 660 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 661 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 662 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 663 664 if (dev->flags & ATA_DFLAG_PIO) { 665 tf->protocol = ATA_PROT_PIO; 666 index = dev->multi_count ? 0 : 8; 667 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 668 /* Unable to use DMA due to host limitation */ 669 tf->protocol = ATA_PROT_PIO; 670 index = dev->multi_count ? 0 : 8; 671 } else { 672 tf->protocol = ATA_PROT_DMA; 673 index = 16; 674 } 675 676 cmd = ata_rw_cmds[index + fua + lba48 + write]; 677 if (cmd) { 678 tf->command = cmd; 679 return 0; 680 } 681 return -1; 682 } 683 684 /** 685 * ata_tf_read_block - Read block address from ATA taskfile 686 * @tf: ATA taskfile of interest 687 * @dev: ATA device @tf belongs to 688 * 689 * LOCKING: 690 * None. 691 * 692 * Read block address from @tf. This function can handle all 693 * three address formats - LBA, LBA48 and CHS. tf->protocol and 694 * flags select the address format to use. 695 * 696 * RETURNS: 697 * Block address read from @tf. 698 */ 699 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 700 { 701 u64 block = 0; 702 703 if (tf->flags & ATA_TFLAG_LBA) { 704 if (tf->flags & ATA_TFLAG_LBA48) { 705 block |= (u64)tf->hob_lbah << 40; 706 block |= (u64)tf->hob_lbam << 32; 707 block |= (u64)tf->hob_lbal << 24; 708 } else 709 block |= (tf->device & 0xf) << 24; 710 711 block |= tf->lbah << 16; 712 block |= tf->lbam << 8; 713 block |= tf->lbal; 714 } else { 715 u32 cyl, head, sect; 716 717 cyl = tf->lbam | (tf->lbah << 8); 718 head = tf->device & 0xf; 719 sect = tf->lbal; 720 721 if (!sect) { 722 ata_dev_warn(dev, 723 "device reported invalid CHS sector 0\n"); 724 return U64_MAX; 725 } 726 727 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 728 } 729 730 return block; 731 } 732 733 /** 734 * ata_build_rw_tf - Build ATA taskfile for given read/write request 735 * @tf: Target ATA taskfile 736 * @dev: ATA device @tf belongs to 737 * @block: Block address 738 * @n_block: Number of blocks 739 * @tf_flags: RW/FUA etc... 740 * @tag: tag 741 * 742 * LOCKING: 743 * None. 744 * 745 * Build ATA taskfile @tf for read/write request described by 746 * @block, @n_block, @tf_flags and @tag on @dev. 747 * 748 * RETURNS: 749 * 750 * 0 on success, -ERANGE if the request is too large for @dev, 751 * -EINVAL if the request is invalid. 752 */ 753 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 754 u64 block, u32 n_block, unsigned int tf_flags, 755 unsigned int tag) 756 { 757 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 758 tf->flags |= tf_flags; 759 760 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 761 /* yay, NCQ */ 762 if (!lba_48_ok(block, n_block)) 763 return -ERANGE; 764 765 tf->protocol = ATA_PROT_NCQ; 766 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 767 768 if (tf->flags & ATA_TFLAG_WRITE) 769 tf->command = ATA_CMD_FPDMA_WRITE; 770 else 771 tf->command = ATA_CMD_FPDMA_READ; 772 773 tf->nsect = tag << 3; 774 tf->hob_feature = (n_block >> 8) & 0xff; 775 tf->feature = n_block & 0xff; 776 777 tf->hob_lbah = (block >> 40) & 0xff; 778 tf->hob_lbam = (block >> 32) & 0xff; 779 tf->hob_lbal = (block >> 24) & 0xff; 780 tf->lbah = (block >> 16) & 0xff; 781 tf->lbam = (block >> 8) & 0xff; 782 tf->lbal = block & 0xff; 783 784 tf->device = ATA_LBA; 785 if (tf->flags & ATA_TFLAG_FUA) 786 tf->device |= 1 << 7; 787 } else if (dev->flags & ATA_DFLAG_LBA) { 788 tf->flags |= ATA_TFLAG_LBA; 789 790 if (lba_28_ok(block, n_block)) { 791 /* use LBA28 */ 792 tf->device |= (block >> 24) & 0xf; 793 } else if (lba_48_ok(block, n_block)) { 794 if (!(dev->flags & ATA_DFLAG_LBA48)) 795 return -ERANGE; 796 797 /* use LBA48 */ 798 tf->flags |= ATA_TFLAG_LBA48; 799 800 tf->hob_nsect = (n_block >> 8) & 0xff; 801 802 tf->hob_lbah = (block >> 40) & 0xff; 803 tf->hob_lbam = (block >> 32) & 0xff; 804 tf->hob_lbal = (block >> 24) & 0xff; 805 } else 806 /* request too large even for LBA48 */ 807 return -ERANGE; 808 809 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 810 return -EINVAL; 811 812 tf->nsect = n_block & 0xff; 813 814 tf->lbah = (block >> 16) & 0xff; 815 tf->lbam = (block >> 8) & 0xff; 816 tf->lbal = block & 0xff; 817 818 tf->device |= ATA_LBA; 819 } else { 820 /* CHS */ 821 u32 sect, head, cyl, track; 822 823 /* The request -may- be too large for CHS addressing. */ 824 if (!lba_28_ok(block, n_block)) 825 return -ERANGE; 826 827 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 828 return -EINVAL; 829 830 /* Convert LBA to CHS */ 831 track = (u32)block / dev->sectors; 832 cyl = track / dev->heads; 833 head = track % dev->heads; 834 sect = (u32)block % dev->sectors + 1; 835 836 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 837 (u32)block, track, cyl, head, sect); 838 839 /* Check whether the converted CHS can fit. 840 Cylinder: 0-65535 841 Head: 0-15 842 Sector: 1-255*/ 843 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 844 return -ERANGE; 845 846 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 847 tf->lbal = sect; 848 tf->lbam = cyl; 849 tf->lbah = cyl >> 8; 850 tf->device |= head; 851 } 852 853 return 0; 854 } 855 856 /** 857 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 858 * @pio_mask: pio_mask 859 * @mwdma_mask: mwdma_mask 860 * @udma_mask: udma_mask 861 * 862 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 863 * unsigned int xfer_mask. 864 * 865 * LOCKING: 866 * None. 867 * 868 * RETURNS: 869 * Packed xfer_mask. 870 */ 871 unsigned long ata_pack_xfermask(unsigned long pio_mask, 872 unsigned long mwdma_mask, 873 unsigned long udma_mask) 874 { 875 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 876 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 877 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 878 } 879 880 /** 881 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 882 * @xfer_mask: xfer_mask to unpack 883 * @pio_mask: resulting pio_mask 884 * @mwdma_mask: resulting mwdma_mask 885 * @udma_mask: resulting udma_mask 886 * 887 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 888 * Any NULL destination masks will be ignored. 889 */ 890 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 891 unsigned long *mwdma_mask, unsigned long *udma_mask) 892 { 893 if (pio_mask) 894 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 895 if (mwdma_mask) 896 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 897 if (udma_mask) 898 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 899 } 900 901 static const struct ata_xfer_ent { 902 int shift, bits; 903 u8 base; 904 } ata_xfer_tbl[] = { 905 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 906 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 907 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 908 { -1, }, 909 }; 910 911 /** 912 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 913 * @xfer_mask: xfer_mask of interest 914 * 915 * Return matching XFER_* value for @xfer_mask. Only the highest 916 * bit of @xfer_mask is considered. 917 * 918 * LOCKING: 919 * None. 920 * 921 * RETURNS: 922 * Matching XFER_* value, 0xff if no match found. 923 */ 924 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 925 { 926 int highbit = fls(xfer_mask) - 1; 927 const struct ata_xfer_ent *ent; 928 929 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 930 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 931 return ent->base + highbit - ent->shift; 932 return 0xff; 933 } 934 935 /** 936 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 937 * @xfer_mode: XFER_* of interest 938 * 939 * Return matching xfer_mask for @xfer_mode. 940 * 941 * LOCKING: 942 * None. 943 * 944 * RETURNS: 945 * Matching xfer_mask, 0 if no match found. 946 */ 947 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 948 { 949 const struct ata_xfer_ent *ent; 950 951 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 952 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 953 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 954 & ~((1 << ent->shift) - 1); 955 return 0; 956 } 957 958 /** 959 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 960 * @xfer_mode: XFER_* of interest 961 * 962 * Return matching xfer_shift for @xfer_mode. 963 * 964 * LOCKING: 965 * None. 966 * 967 * RETURNS: 968 * Matching xfer_shift, -1 if no match found. 969 */ 970 int ata_xfer_mode2shift(unsigned long xfer_mode) 971 { 972 const struct ata_xfer_ent *ent; 973 974 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 975 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 976 return ent->shift; 977 return -1; 978 } 979 980 /** 981 * ata_mode_string - convert xfer_mask to string 982 * @xfer_mask: mask of bits supported; only highest bit counts. 983 * 984 * Determine string which represents the highest speed 985 * (highest bit in @modemask). 986 * 987 * LOCKING: 988 * None. 989 * 990 * RETURNS: 991 * Constant C string representing highest speed listed in 992 * @mode_mask, or the constant C string "<n/a>". 993 */ 994 const char *ata_mode_string(unsigned long xfer_mask) 995 { 996 static const char * const xfer_mode_str[] = { 997 "PIO0", 998 "PIO1", 999 "PIO2", 1000 "PIO3", 1001 "PIO4", 1002 "PIO5", 1003 "PIO6", 1004 "MWDMA0", 1005 "MWDMA1", 1006 "MWDMA2", 1007 "MWDMA3", 1008 "MWDMA4", 1009 "UDMA/16", 1010 "UDMA/25", 1011 "UDMA/33", 1012 "UDMA/44", 1013 "UDMA/66", 1014 "UDMA/100", 1015 "UDMA/133", 1016 "UDMA7", 1017 }; 1018 int highbit; 1019 1020 highbit = fls(xfer_mask) - 1; 1021 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1022 return xfer_mode_str[highbit]; 1023 return "<n/a>"; 1024 } 1025 1026 const char *sata_spd_string(unsigned int spd) 1027 { 1028 static const char * const spd_str[] = { 1029 "1.5 Gbps", 1030 "3.0 Gbps", 1031 "6.0 Gbps", 1032 }; 1033 1034 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1035 return "<unknown>"; 1036 return spd_str[spd - 1]; 1037 } 1038 1039 /** 1040 * ata_dev_classify - determine device type based on ATA-spec signature 1041 * @tf: ATA taskfile register set for device to be identified 1042 * 1043 * Determine from taskfile register contents whether a device is 1044 * ATA or ATAPI, as per "Signature and persistence" section 1045 * of ATA/PI spec (volume 1, sect 5.14). 1046 * 1047 * LOCKING: 1048 * None. 1049 * 1050 * RETURNS: 1051 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1052 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1053 */ 1054 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1055 { 1056 /* Apple's open source Darwin code hints that some devices only 1057 * put a proper signature into the LBA mid/high registers, 1058 * So, we only check those. It's sufficient for uniqueness. 1059 * 1060 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1061 * signatures for ATA and ATAPI devices attached on SerialATA, 1062 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1063 * spec has never mentioned about using different signatures 1064 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1065 * Multiplier specification began to use 0x69/0x96 to identify 1066 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1067 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1068 * 0x69/0x96 shortly and described them as reserved for 1069 * SerialATA. 1070 * 1071 * We follow the current spec and consider that 0x69/0x96 1072 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1073 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1074 * SEMB signature. This is worked around in 1075 * ata_dev_read_id(). 1076 */ 1077 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1078 DPRINTK("found ATA device by sig\n"); 1079 return ATA_DEV_ATA; 1080 } 1081 1082 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1083 DPRINTK("found ATAPI device by sig\n"); 1084 return ATA_DEV_ATAPI; 1085 } 1086 1087 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1088 DPRINTK("found PMP device by sig\n"); 1089 return ATA_DEV_PMP; 1090 } 1091 1092 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1093 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1094 return ATA_DEV_SEMB; 1095 } 1096 1097 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1098 DPRINTK("found ZAC device by sig\n"); 1099 return ATA_DEV_ZAC; 1100 } 1101 1102 DPRINTK("unknown device\n"); 1103 return ATA_DEV_UNKNOWN; 1104 } 1105 1106 /** 1107 * ata_id_string - Convert IDENTIFY DEVICE page into string 1108 * @id: IDENTIFY DEVICE results we will examine 1109 * @s: string into which data is output 1110 * @ofs: offset into identify device page 1111 * @len: length of string to return. must be an even number. 1112 * 1113 * The strings in the IDENTIFY DEVICE page are broken up into 1114 * 16-bit chunks. Run through the string, and output each 1115 * 8-bit chunk linearly, regardless of platform. 1116 * 1117 * LOCKING: 1118 * caller. 1119 */ 1120 1121 void ata_id_string(const u16 *id, unsigned char *s, 1122 unsigned int ofs, unsigned int len) 1123 { 1124 unsigned int c; 1125 1126 BUG_ON(len & 1); 1127 1128 while (len > 0) { 1129 c = id[ofs] >> 8; 1130 *s = c; 1131 s++; 1132 1133 c = id[ofs] & 0xff; 1134 *s = c; 1135 s++; 1136 1137 ofs++; 1138 len -= 2; 1139 } 1140 } 1141 1142 /** 1143 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1144 * @id: IDENTIFY DEVICE results we will examine 1145 * @s: string into which data is output 1146 * @ofs: offset into identify device page 1147 * @len: length of string to return. must be an odd number. 1148 * 1149 * This function is identical to ata_id_string except that it 1150 * trims trailing spaces and terminates the resulting string with 1151 * null. @len must be actual maximum length (even number) + 1. 1152 * 1153 * LOCKING: 1154 * caller. 1155 */ 1156 void ata_id_c_string(const u16 *id, unsigned char *s, 1157 unsigned int ofs, unsigned int len) 1158 { 1159 unsigned char *p; 1160 1161 ata_id_string(id, s, ofs, len - 1); 1162 1163 p = s + strnlen(s, len - 1); 1164 while (p > s && p[-1] == ' ') 1165 p--; 1166 *p = '\0'; 1167 } 1168 1169 static u64 ata_id_n_sectors(const u16 *id) 1170 { 1171 if (ata_id_has_lba(id)) { 1172 if (ata_id_has_lba48(id)) 1173 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1174 else 1175 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1176 } else { 1177 if (ata_id_current_chs_valid(id)) 1178 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1179 id[ATA_ID_CUR_SECTORS]; 1180 else 1181 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1182 id[ATA_ID_SECTORS]; 1183 } 1184 } 1185 1186 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1187 { 1188 u64 sectors = 0; 1189 1190 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1191 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1192 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1193 sectors |= (tf->lbah & 0xff) << 16; 1194 sectors |= (tf->lbam & 0xff) << 8; 1195 sectors |= (tf->lbal & 0xff); 1196 1197 return sectors; 1198 } 1199 1200 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1201 { 1202 u64 sectors = 0; 1203 1204 sectors |= (tf->device & 0x0f) << 24; 1205 sectors |= (tf->lbah & 0xff) << 16; 1206 sectors |= (tf->lbam & 0xff) << 8; 1207 sectors |= (tf->lbal & 0xff); 1208 1209 return sectors; 1210 } 1211 1212 /** 1213 * ata_read_native_max_address - Read native max address 1214 * @dev: target device 1215 * @max_sectors: out parameter for the result native max address 1216 * 1217 * Perform an LBA48 or LBA28 native size query upon the device in 1218 * question. 1219 * 1220 * RETURNS: 1221 * 0 on success, -EACCES if command is aborted by the drive. 1222 * -EIO on other errors. 1223 */ 1224 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1225 { 1226 unsigned int err_mask; 1227 struct ata_taskfile tf; 1228 int lba48 = ata_id_has_lba48(dev->id); 1229 1230 ata_tf_init(dev, &tf); 1231 1232 /* always clear all address registers */ 1233 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1234 1235 if (lba48) { 1236 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1237 tf.flags |= ATA_TFLAG_LBA48; 1238 } else 1239 tf.command = ATA_CMD_READ_NATIVE_MAX; 1240 1241 tf.protocol |= ATA_PROT_NODATA; 1242 tf.device |= ATA_LBA; 1243 1244 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1245 if (err_mask) { 1246 ata_dev_warn(dev, 1247 "failed to read native max address (err_mask=0x%x)\n", 1248 err_mask); 1249 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1250 return -EACCES; 1251 return -EIO; 1252 } 1253 1254 if (lba48) 1255 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1256 else 1257 *max_sectors = ata_tf_to_lba(&tf) + 1; 1258 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1259 (*max_sectors)--; 1260 return 0; 1261 } 1262 1263 /** 1264 * ata_set_max_sectors - Set max sectors 1265 * @dev: target device 1266 * @new_sectors: new max sectors value to set for the device 1267 * 1268 * Set max sectors of @dev to @new_sectors. 1269 * 1270 * RETURNS: 1271 * 0 on success, -EACCES if command is aborted or denied (due to 1272 * previous non-volatile SET_MAX) by the drive. -EIO on other 1273 * errors. 1274 */ 1275 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1276 { 1277 unsigned int err_mask; 1278 struct ata_taskfile tf; 1279 int lba48 = ata_id_has_lba48(dev->id); 1280 1281 new_sectors--; 1282 1283 ata_tf_init(dev, &tf); 1284 1285 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1286 1287 if (lba48) { 1288 tf.command = ATA_CMD_SET_MAX_EXT; 1289 tf.flags |= ATA_TFLAG_LBA48; 1290 1291 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1292 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1293 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1294 } else { 1295 tf.command = ATA_CMD_SET_MAX; 1296 1297 tf.device |= (new_sectors >> 24) & 0xf; 1298 } 1299 1300 tf.protocol |= ATA_PROT_NODATA; 1301 tf.device |= ATA_LBA; 1302 1303 tf.lbal = (new_sectors >> 0) & 0xff; 1304 tf.lbam = (new_sectors >> 8) & 0xff; 1305 tf.lbah = (new_sectors >> 16) & 0xff; 1306 1307 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1308 if (err_mask) { 1309 ata_dev_warn(dev, 1310 "failed to set max address (err_mask=0x%x)\n", 1311 err_mask); 1312 if (err_mask == AC_ERR_DEV && 1313 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1314 return -EACCES; 1315 return -EIO; 1316 } 1317 1318 return 0; 1319 } 1320 1321 /** 1322 * ata_hpa_resize - Resize a device with an HPA set 1323 * @dev: Device to resize 1324 * 1325 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1326 * it if required to the full size of the media. The caller must check 1327 * the drive has the HPA feature set enabled. 1328 * 1329 * RETURNS: 1330 * 0 on success, -errno on failure. 1331 */ 1332 static int ata_hpa_resize(struct ata_device *dev) 1333 { 1334 struct ata_eh_context *ehc = &dev->link->eh_context; 1335 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1336 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1337 u64 sectors = ata_id_n_sectors(dev->id); 1338 u64 native_sectors; 1339 int rc; 1340 1341 /* do we need to do it? */ 1342 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1343 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1344 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1345 return 0; 1346 1347 /* read native max address */ 1348 rc = ata_read_native_max_address(dev, &native_sectors); 1349 if (rc) { 1350 /* If device aborted the command or HPA isn't going to 1351 * be unlocked, skip HPA resizing. 1352 */ 1353 if (rc == -EACCES || !unlock_hpa) { 1354 ata_dev_warn(dev, 1355 "HPA support seems broken, skipping HPA handling\n"); 1356 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1357 1358 /* we can continue if device aborted the command */ 1359 if (rc == -EACCES) 1360 rc = 0; 1361 } 1362 1363 return rc; 1364 } 1365 dev->n_native_sectors = native_sectors; 1366 1367 /* nothing to do? */ 1368 if (native_sectors <= sectors || !unlock_hpa) { 1369 if (!print_info || native_sectors == sectors) 1370 return 0; 1371 1372 if (native_sectors > sectors) 1373 ata_dev_info(dev, 1374 "HPA detected: current %llu, native %llu\n", 1375 (unsigned long long)sectors, 1376 (unsigned long long)native_sectors); 1377 else if (native_sectors < sectors) 1378 ata_dev_warn(dev, 1379 "native sectors (%llu) is smaller than sectors (%llu)\n", 1380 (unsigned long long)native_sectors, 1381 (unsigned long long)sectors); 1382 return 0; 1383 } 1384 1385 /* let's unlock HPA */ 1386 rc = ata_set_max_sectors(dev, native_sectors); 1387 if (rc == -EACCES) { 1388 /* if device aborted the command, skip HPA resizing */ 1389 ata_dev_warn(dev, 1390 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1391 (unsigned long long)sectors, 1392 (unsigned long long)native_sectors); 1393 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1394 return 0; 1395 } else if (rc) 1396 return rc; 1397 1398 /* re-read IDENTIFY data */ 1399 rc = ata_dev_reread_id(dev, 0); 1400 if (rc) { 1401 ata_dev_err(dev, 1402 "failed to re-read IDENTIFY data after HPA resizing\n"); 1403 return rc; 1404 } 1405 1406 if (print_info) { 1407 u64 new_sectors = ata_id_n_sectors(dev->id); 1408 ata_dev_info(dev, 1409 "HPA unlocked: %llu -> %llu, native %llu\n", 1410 (unsigned long long)sectors, 1411 (unsigned long long)new_sectors, 1412 (unsigned long long)native_sectors); 1413 } 1414 1415 return 0; 1416 } 1417 1418 /** 1419 * ata_dump_id - IDENTIFY DEVICE info debugging output 1420 * @id: IDENTIFY DEVICE page to dump 1421 * 1422 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1423 * page. 1424 * 1425 * LOCKING: 1426 * caller. 1427 */ 1428 1429 static inline void ata_dump_id(const u16 *id) 1430 { 1431 DPRINTK("49==0x%04x " 1432 "53==0x%04x " 1433 "63==0x%04x " 1434 "64==0x%04x " 1435 "75==0x%04x \n", 1436 id[49], 1437 id[53], 1438 id[63], 1439 id[64], 1440 id[75]); 1441 DPRINTK("80==0x%04x " 1442 "81==0x%04x " 1443 "82==0x%04x " 1444 "83==0x%04x " 1445 "84==0x%04x \n", 1446 id[80], 1447 id[81], 1448 id[82], 1449 id[83], 1450 id[84]); 1451 DPRINTK("88==0x%04x " 1452 "93==0x%04x\n", 1453 id[88], 1454 id[93]); 1455 } 1456 1457 /** 1458 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1459 * @id: IDENTIFY data to compute xfer mask from 1460 * 1461 * Compute the xfermask for this device. This is not as trivial 1462 * as it seems if we must consider early devices correctly. 1463 * 1464 * FIXME: pre IDE drive timing (do we care ?). 1465 * 1466 * LOCKING: 1467 * None. 1468 * 1469 * RETURNS: 1470 * Computed xfermask 1471 */ 1472 unsigned long ata_id_xfermask(const u16 *id) 1473 { 1474 unsigned long pio_mask, mwdma_mask, udma_mask; 1475 1476 /* Usual case. Word 53 indicates word 64 is valid */ 1477 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1478 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1479 pio_mask <<= 3; 1480 pio_mask |= 0x7; 1481 } else { 1482 /* If word 64 isn't valid then Word 51 high byte holds 1483 * the PIO timing number for the maximum. Turn it into 1484 * a mask. 1485 */ 1486 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1487 if (mode < 5) /* Valid PIO range */ 1488 pio_mask = (2 << mode) - 1; 1489 else 1490 pio_mask = 1; 1491 1492 /* But wait.. there's more. Design your standards by 1493 * committee and you too can get a free iordy field to 1494 * process. However its the speeds not the modes that 1495 * are supported... Note drivers using the timing API 1496 * will get this right anyway 1497 */ 1498 } 1499 1500 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1501 1502 if (ata_id_is_cfa(id)) { 1503 /* 1504 * Process compact flash extended modes 1505 */ 1506 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1507 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1508 1509 if (pio) 1510 pio_mask |= (1 << 5); 1511 if (pio > 1) 1512 pio_mask |= (1 << 6); 1513 if (dma) 1514 mwdma_mask |= (1 << 3); 1515 if (dma > 1) 1516 mwdma_mask |= (1 << 4); 1517 } 1518 1519 udma_mask = 0; 1520 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1521 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1522 1523 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1524 } 1525 1526 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1527 { 1528 struct completion *waiting = qc->private_data; 1529 1530 complete(waiting); 1531 } 1532 1533 /** 1534 * ata_exec_internal_sg - execute libata internal command 1535 * @dev: Device to which the command is sent 1536 * @tf: Taskfile registers for the command and the result 1537 * @cdb: CDB for packet command 1538 * @dma_dir: Data transfer direction of the command 1539 * @sgl: sg list for the data buffer of the command 1540 * @n_elem: Number of sg entries 1541 * @timeout: Timeout in msecs (0 for default) 1542 * 1543 * Executes libata internal command with timeout. @tf contains 1544 * command on entry and result on return. Timeout and error 1545 * conditions are reported via return value. No recovery action 1546 * is taken after a command times out. It's caller's duty to 1547 * clean up after timeout. 1548 * 1549 * LOCKING: 1550 * None. Should be called with kernel context, might sleep. 1551 * 1552 * RETURNS: 1553 * Zero on success, AC_ERR_* mask on failure 1554 */ 1555 unsigned ata_exec_internal_sg(struct ata_device *dev, 1556 struct ata_taskfile *tf, const u8 *cdb, 1557 int dma_dir, struct scatterlist *sgl, 1558 unsigned int n_elem, unsigned long timeout) 1559 { 1560 struct ata_link *link = dev->link; 1561 struct ata_port *ap = link->ap; 1562 u8 command = tf->command; 1563 int auto_timeout = 0; 1564 struct ata_queued_cmd *qc; 1565 unsigned int tag, preempted_tag; 1566 u32 preempted_sactive, preempted_qc_active; 1567 int preempted_nr_active_links; 1568 DECLARE_COMPLETION_ONSTACK(wait); 1569 unsigned long flags; 1570 unsigned int err_mask; 1571 int rc; 1572 1573 spin_lock_irqsave(ap->lock, flags); 1574 1575 /* no internal command while frozen */ 1576 if (ap->pflags & ATA_PFLAG_FROZEN) { 1577 spin_unlock_irqrestore(ap->lock, flags); 1578 return AC_ERR_SYSTEM; 1579 } 1580 1581 /* initialize internal qc */ 1582 1583 /* XXX: Tag 0 is used for drivers with legacy EH as some 1584 * drivers choke if any other tag is given. This breaks 1585 * ata_tag_internal() test for those drivers. Don't use new 1586 * EH stuff without converting to it. 1587 */ 1588 if (ap->ops->error_handler) 1589 tag = ATA_TAG_INTERNAL; 1590 else 1591 tag = 0; 1592 1593 qc = __ata_qc_from_tag(ap, tag); 1594 1595 qc->tag = tag; 1596 qc->scsicmd = NULL; 1597 qc->ap = ap; 1598 qc->dev = dev; 1599 ata_qc_reinit(qc); 1600 1601 preempted_tag = link->active_tag; 1602 preempted_sactive = link->sactive; 1603 preempted_qc_active = ap->qc_active; 1604 preempted_nr_active_links = ap->nr_active_links; 1605 link->active_tag = ATA_TAG_POISON; 1606 link->sactive = 0; 1607 ap->qc_active = 0; 1608 ap->nr_active_links = 0; 1609 1610 /* prepare & issue qc */ 1611 qc->tf = *tf; 1612 if (cdb) 1613 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1614 1615 /* some SATA bridges need us to indicate data xfer direction */ 1616 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1617 dma_dir == DMA_FROM_DEVICE) 1618 qc->tf.feature |= ATAPI_DMADIR; 1619 1620 qc->flags |= ATA_QCFLAG_RESULT_TF; 1621 qc->dma_dir = dma_dir; 1622 if (dma_dir != DMA_NONE) { 1623 unsigned int i, buflen = 0; 1624 struct scatterlist *sg; 1625 1626 for_each_sg(sgl, sg, n_elem, i) 1627 buflen += sg->length; 1628 1629 ata_sg_init(qc, sgl, n_elem); 1630 qc->nbytes = buflen; 1631 } 1632 1633 qc->private_data = &wait; 1634 qc->complete_fn = ata_qc_complete_internal; 1635 1636 ata_qc_issue(qc); 1637 1638 spin_unlock_irqrestore(ap->lock, flags); 1639 1640 if (!timeout) { 1641 if (ata_probe_timeout) 1642 timeout = ata_probe_timeout * 1000; 1643 else { 1644 timeout = ata_internal_cmd_timeout(dev, command); 1645 auto_timeout = 1; 1646 } 1647 } 1648 1649 if (ap->ops->error_handler) 1650 ata_eh_release(ap); 1651 1652 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1653 1654 if (ap->ops->error_handler) 1655 ata_eh_acquire(ap); 1656 1657 ata_sff_flush_pio_task(ap); 1658 1659 if (!rc) { 1660 spin_lock_irqsave(ap->lock, flags); 1661 1662 /* We're racing with irq here. If we lose, the 1663 * following test prevents us from completing the qc 1664 * twice. If we win, the port is frozen and will be 1665 * cleaned up by ->post_internal_cmd(). 1666 */ 1667 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1668 qc->err_mask |= AC_ERR_TIMEOUT; 1669 1670 if (ap->ops->error_handler) 1671 ata_port_freeze(ap); 1672 else 1673 ata_qc_complete(qc); 1674 1675 if (ata_msg_warn(ap)) 1676 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1677 command); 1678 } 1679 1680 spin_unlock_irqrestore(ap->lock, flags); 1681 } 1682 1683 /* do post_internal_cmd */ 1684 if (ap->ops->post_internal_cmd) 1685 ap->ops->post_internal_cmd(qc); 1686 1687 /* perform minimal error analysis */ 1688 if (qc->flags & ATA_QCFLAG_FAILED) { 1689 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1690 qc->err_mask |= AC_ERR_DEV; 1691 1692 if (!qc->err_mask) 1693 qc->err_mask |= AC_ERR_OTHER; 1694 1695 if (qc->err_mask & ~AC_ERR_OTHER) 1696 qc->err_mask &= ~AC_ERR_OTHER; 1697 } 1698 1699 /* finish up */ 1700 spin_lock_irqsave(ap->lock, flags); 1701 1702 *tf = qc->result_tf; 1703 err_mask = qc->err_mask; 1704 1705 ata_qc_free(qc); 1706 link->active_tag = preempted_tag; 1707 link->sactive = preempted_sactive; 1708 ap->qc_active = preempted_qc_active; 1709 ap->nr_active_links = preempted_nr_active_links; 1710 1711 spin_unlock_irqrestore(ap->lock, flags); 1712 1713 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1714 ata_internal_cmd_timed_out(dev, command); 1715 1716 return err_mask; 1717 } 1718 1719 /** 1720 * ata_exec_internal - execute libata internal command 1721 * @dev: Device to which the command is sent 1722 * @tf: Taskfile registers for the command and the result 1723 * @cdb: CDB for packet command 1724 * @dma_dir: Data transfer direction of the command 1725 * @buf: Data buffer of the command 1726 * @buflen: Length of data buffer 1727 * @timeout: Timeout in msecs (0 for default) 1728 * 1729 * Wrapper around ata_exec_internal_sg() which takes simple 1730 * buffer instead of sg list. 1731 * 1732 * LOCKING: 1733 * None. Should be called with kernel context, might sleep. 1734 * 1735 * RETURNS: 1736 * Zero on success, AC_ERR_* mask on failure 1737 */ 1738 unsigned ata_exec_internal(struct ata_device *dev, 1739 struct ata_taskfile *tf, const u8 *cdb, 1740 int dma_dir, void *buf, unsigned int buflen, 1741 unsigned long timeout) 1742 { 1743 struct scatterlist *psg = NULL, sg; 1744 unsigned int n_elem = 0; 1745 1746 if (dma_dir != DMA_NONE) { 1747 WARN_ON(!buf); 1748 sg_init_one(&sg, buf, buflen); 1749 psg = &sg; 1750 n_elem++; 1751 } 1752 1753 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1754 timeout); 1755 } 1756 1757 /** 1758 * ata_pio_need_iordy - check if iordy needed 1759 * @adev: ATA device 1760 * 1761 * Check if the current speed of the device requires IORDY. Used 1762 * by various controllers for chip configuration. 1763 */ 1764 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1765 { 1766 /* Don't set IORDY if we're preparing for reset. IORDY may 1767 * lead to controller lock up on certain controllers if the 1768 * port is not occupied. See bko#11703 for details. 1769 */ 1770 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1771 return 0; 1772 /* Controller doesn't support IORDY. Probably a pointless 1773 * check as the caller should know this. 1774 */ 1775 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1776 return 0; 1777 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1778 if (ata_id_is_cfa(adev->id) 1779 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1780 return 0; 1781 /* PIO3 and higher it is mandatory */ 1782 if (adev->pio_mode > XFER_PIO_2) 1783 return 1; 1784 /* We turn it on when possible */ 1785 if (ata_id_has_iordy(adev->id)) 1786 return 1; 1787 return 0; 1788 } 1789 1790 /** 1791 * ata_pio_mask_no_iordy - Return the non IORDY mask 1792 * @adev: ATA device 1793 * 1794 * Compute the highest mode possible if we are not using iordy. Return 1795 * -1 if no iordy mode is available. 1796 */ 1797 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1798 { 1799 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1800 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1801 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1802 /* Is the speed faster than the drive allows non IORDY ? */ 1803 if (pio) { 1804 /* This is cycle times not frequency - watch the logic! */ 1805 if (pio > 240) /* PIO2 is 240nS per cycle */ 1806 return 3 << ATA_SHIFT_PIO; 1807 return 7 << ATA_SHIFT_PIO; 1808 } 1809 } 1810 return 3 << ATA_SHIFT_PIO; 1811 } 1812 1813 /** 1814 * ata_do_dev_read_id - default ID read method 1815 * @dev: device 1816 * @tf: proposed taskfile 1817 * @id: data buffer 1818 * 1819 * Issue the identify taskfile and hand back the buffer containing 1820 * identify data. For some RAID controllers and for pre ATA devices 1821 * this function is wrapped or replaced by the driver 1822 */ 1823 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1824 struct ata_taskfile *tf, u16 *id) 1825 { 1826 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1827 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1828 } 1829 1830 /** 1831 * ata_dev_read_id - Read ID data from the specified device 1832 * @dev: target device 1833 * @p_class: pointer to class of the target device (may be changed) 1834 * @flags: ATA_READID_* flags 1835 * @id: buffer to read IDENTIFY data into 1836 * 1837 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1838 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1839 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1840 * for pre-ATA4 drives. 1841 * 1842 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1843 * now we abort if we hit that case. 1844 * 1845 * LOCKING: 1846 * Kernel thread context (may sleep) 1847 * 1848 * RETURNS: 1849 * 0 on success, -errno otherwise. 1850 */ 1851 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1852 unsigned int flags, u16 *id) 1853 { 1854 struct ata_port *ap = dev->link->ap; 1855 unsigned int class = *p_class; 1856 struct ata_taskfile tf; 1857 unsigned int err_mask = 0; 1858 const char *reason; 1859 bool is_semb = class == ATA_DEV_SEMB; 1860 int may_fallback = 1, tried_spinup = 0; 1861 int rc; 1862 1863 if (ata_msg_ctl(ap)) 1864 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1865 1866 retry: 1867 ata_tf_init(dev, &tf); 1868 1869 switch (class) { 1870 case ATA_DEV_SEMB: 1871 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1872 case ATA_DEV_ATA: 1873 case ATA_DEV_ZAC: 1874 tf.command = ATA_CMD_ID_ATA; 1875 break; 1876 case ATA_DEV_ATAPI: 1877 tf.command = ATA_CMD_ID_ATAPI; 1878 break; 1879 default: 1880 rc = -ENODEV; 1881 reason = "unsupported class"; 1882 goto err_out; 1883 } 1884 1885 tf.protocol = ATA_PROT_PIO; 1886 1887 /* Some devices choke if TF registers contain garbage. Make 1888 * sure those are properly initialized. 1889 */ 1890 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1891 1892 /* Device presence detection is unreliable on some 1893 * controllers. Always poll IDENTIFY if available. 1894 */ 1895 tf.flags |= ATA_TFLAG_POLLING; 1896 1897 if (ap->ops->read_id) 1898 err_mask = ap->ops->read_id(dev, &tf, id); 1899 else 1900 err_mask = ata_do_dev_read_id(dev, &tf, id); 1901 1902 if (err_mask) { 1903 if (err_mask & AC_ERR_NODEV_HINT) { 1904 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1905 return -ENOENT; 1906 } 1907 1908 if (is_semb) { 1909 ata_dev_info(dev, 1910 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1911 /* SEMB is not supported yet */ 1912 *p_class = ATA_DEV_SEMB_UNSUP; 1913 return 0; 1914 } 1915 1916 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1917 /* Device or controller might have reported 1918 * the wrong device class. Give a shot at the 1919 * other IDENTIFY if the current one is 1920 * aborted by the device. 1921 */ 1922 if (may_fallback) { 1923 may_fallback = 0; 1924 1925 if (class == ATA_DEV_ATA) 1926 class = ATA_DEV_ATAPI; 1927 else 1928 class = ATA_DEV_ATA; 1929 goto retry; 1930 } 1931 1932 /* Control reaches here iff the device aborted 1933 * both flavors of IDENTIFYs which happens 1934 * sometimes with phantom devices. 1935 */ 1936 ata_dev_dbg(dev, 1937 "both IDENTIFYs aborted, assuming NODEV\n"); 1938 return -ENOENT; 1939 } 1940 1941 rc = -EIO; 1942 reason = "I/O error"; 1943 goto err_out; 1944 } 1945 1946 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1947 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1948 "class=%d may_fallback=%d tried_spinup=%d\n", 1949 class, may_fallback, tried_spinup); 1950 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1951 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1952 } 1953 1954 /* Falling back doesn't make sense if ID data was read 1955 * successfully at least once. 1956 */ 1957 may_fallback = 0; 1958 1959 swap_buf_le16(id, ATA_ID_WORDS); 1960 1961 /* sanity check */ 1962 rc = -EINVAL; 1963 reason = "device reports invalid type"; 1964 1965 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1966 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1967 goto err_out; 1968 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1969 ata_id_is_ata(id)) { 1970 ata_dev_dbg(dev, 1971 "host indicates ignore ATA devices, ignored\n"); 1972 return -ENOENT; 1973 } 1974 } else { 1975 if (ata_id_is_ata(id)) 1976 goto err_out; 1977 } 1978 1979 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1980 tried_spinup = 1; 1981 /* 1982 * Drive powered-up in standby mode, and requires a specific 1983 * SET_FEATURES spin-up subcommand before it will accept 1984 * anything other than the original IDENTIFY command. 1985 */ 1986 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1987 if (err_mask && id[2] != 0x738c) { 1988 rc = -EIO; 1989 reason = "SPINUP failed"; 1990 goto err_out; 1991 } 1992 /* 1993 * If the drive initially returned incomplete IDENTIFY info, 1994 * we now must reissue the IDENTIFY command. 1995 */ 1996 if (id[2] == 0x37c8) 1997 goto retry; 1998 } 1999 2000 if ((flags & ATA_READID_POSTRESET) && 2001 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2002 /* 2003 * The exact sequence expected by certain pre-ATA4 drives is: 2004 * SRST RESET 2005 * IDENTIFY (optional in early ATA) 2006 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2007 * anything else.. 2008 * Some drives were very specific about that exact sequence. 2009 * 2010 * Note that ATA4 says lba is mandatory so the second check 2011 * should never trigger. 2012 */ 2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2014 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2015 if (err_mask) { 2016 rc = -EIO; 2017 reason = "INIT_DEV_PARAMS failed"; 2018 goto err_out; 2019 } 2020 2021 /* current CHS translation info (id[53-58]) might be 2022 * changed. reread the identify device info. 2023 */ 2024 flags &= ~ATA_READID_POSTRESET; 2025 goto retry; 2026 } 2027 } 2028 2029 *p_class = class; 2030 2031 return 0; 2032 2033 err_out: 2034 if (ata_msg_warn(ap)) 2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2036 reason, err_mask); 2037 return rc; 2038 } 2039 2040 static int ata_do_link_spd_horkage(struct ata_device *dev) 2041 { 2042 struct ata_link *plink = ata_dev_phys_link(dev); 2043 u32 target, target_limit; 2044 2045 if (!sata_scr_valid(plink)) 2046 return 0; 2047 2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2049 target = 1; 2050 else 2051 return 0; 2052 2053 target_limit = (1 << target) - 1; 2054 2055 /* if already on stricter limit, no need to push further */ 2056 if (plink->sata_spd_limit <= target_limit) 2057 return 0; 2058 2059 plink->sata_spd_limit = target_limit; 2060 2061 /* Request another EH round by returning -EAGAIN if link is 2062 * going faster than the target speed. Forward progress is 2063 * guaranteed by setting sata_spd_limit to target_limit above. 2064 */ 2065 if (plink->sata_spd > target) { 2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2067 sata_spd_string(target)); 2068 return -EAGAIN; 2069 } 2070 return 0; 2071 } 2072 2073 static inline u8 ata_dev_knobble(struct ata_device *dev) 2074 { 2075 struct ata_port *ap = dev->link->ap; 2076 2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2078 return 0; 2079 2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2081 } 2082 2083 static void ata_dev_config_ncq_send_recv(struct ata_device *dev) 2084 { 2085 struct ata_port *ap = dev->link->ap; 2086 unsigned int err_mask; 2087 int log_index = ATA_LOG_NCQ_SEND_RECV * 2; 2088 u16 log_pages; 2089 2090 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2091 0, ap->sector_buf, 1); 2092 if (err_mask) { 2093 ata_dev_dbg(dev, 2094 "failed to get Log Directory Emask 0x%x\n", 2095 err_mask); 2096 return; 2097 } 2098 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2099 if (!log_pages) { 2100 ata_dev_warn(dev, 2101 "NCQ Send/Recv Log not supported\n"); 2102 return; 2103 } 2104 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2105 0, ap->sector_buf, 1); 2106 if (err_mask) { 2107 ata_dev_dbg(dev, 2108 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2109 err_mask); 2110 } else { 2111 u8 *cmds = dev->ncq_send_recv_cmds; 2112 2113 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2114 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2115 2116 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2117 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2118 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2119 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2120 } 2121 } 2122 } 2123 2124 static void ata_dev_config_ncq_non_data(struct ata_device *dev) 2125 { 2126 struct ata_port *ap = dev->link->ap; 2127 unsigned int err_mask; 2128 int log_index = ATA_LOG_NCQ_NON_DATA * 2; 2129 u16 log_pages; 2130 2131 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2132 0, ap->sector_buf, 1); 2133 if (err_mask) { 2134 ata_dev_dbg(dev, 2135 "failed to get Log Directory Emask 0x%x\n", 2136 err_mask); 2137 return; 2138 } 2139 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2140 if (!log_pages) { 2141 ata_dev_warn(dev, 2142 "NCQ Send/Recv Log not supported\n"); 2143 return; 2144 } 2145 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, 2146 0, ap->sector_buf, 1); 2147 if (err_mask) { 2148 ata_dev_dbg(dev, 2149 "failed to get NCQ Non-Data Log Emask 0x%x\n", 2150 err_mask); 2151 } else { 2152 u8 *cmds = dev->ncq_non_data_cmds; 2153 2154 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); 2155 } 2156 } 2157 2158 static int ata_dev_config_ncq(struct ata_device *dev, 2159 char *desc, size_t desc_sz) 2160 { 2161 struct ata_port *ap = dev->link->ap; 2162 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2163 unsigned int err_mask; 2164 char *aa_desc = ""; 2165 2166 if (!ata_id_has_ncq(dev->id)) { 2167 desc[0] = '\0'; 2168 return 0; 2169 } 2170 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2171 snprintf(desc, desc_sz, "NCQ (not used)"); 2172 return 0; 2173 } 2174 if (ap->flags & ATA_FLAG_NCQ) { 2175 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2176 dev->flags |= ATA_DFLAG_NCQ; 2177 } 2178 2179 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2180 (ap->flags & ATA_FLAG_FPDMA_AA) && 2181 ata_id_has_fpdma_aa(dev->id)) { 2182 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2183 SATA_FPDMA_AA); 2184 if (err_mask) { 2185 ata_dev_err(dev, 2186 "failed to enable AA (error_mask=0x%x)\n", 2187 err_mask); 2188 if (err_mask != AC_ERR_DEV) { 2189 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2190 return -EIO; 2191 } 2192 } else 2193 aa_desc = ", AA"; 2194 } 2195 2196 if (hdepth >= ddepth) 2197 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2198 else 2199 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2200 ddepth, aa_desc); 2201 2202 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { 2203 if (ata_id_has_ncq_send_and_recv(dev->id)) 2204 ata_dev_config_ncq_send_recv(dev); 2205 if (ata_id_has_ncq_non_data(dev->id)) 2206 ata_dev_config_ncq_non_data(dev); 2207 } 2208 2209 return 0; 2210 } 2211 2212 static void ata_dev_config_sense_reporting(struct ata_device *dev) 2213 { 2214 unsigned int err_mask; 2215 2216 if (!ata_id_has_sense_reporting(dev->id)) 2217 return; 2218 2219 if (ata_id_sense_reporting_enabled(dev->id)) 2220 return; 2221 2222 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2223 if (err_mask) { 2224 ata_dev_dbg(dev, 2225 "failed to enable Sense Data Reporting, Emask 0x%x\n", 2226 err_mask); 2227 } 2228 } 2229 2230 static void ata_dev_config_zac(struct ata_device *dev) 2231 { 2232 struct ata_port *ap = dev->link->ap; 2233 unsigned int err_mask; 2234 u8 *identify_buf = ap->sector_buf; 2235 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0; 2236 u16 log_pages; 2237 2238 dev->zac_zones_optimal_open = U32_MAX; 2239 dev->zac_zones_optimal_nonseq = U32_MAX; 2240 dev->zac_zones_max_open = U32_MAX; 2241 2242 /* 2243 * Always set the 'ZAC' flag for Host-managed devices. 2244 */ 2245 if (dev->class == ATA_DEV_ZAC) 2246 dev->flags |= ATA_DFLAG_ZAC; 2247 else if (ata_id_zoned_cap(dev->id) == 0x01) 2248 /* 2249 * Check for host-aware devices. 2250 */ 2251 dev->flags |= ATA_DFLAG_ZAC; 2252 2253 if (!(dev->flags & ATA_DFLAG_ZAC)) 2254 return; 2255 2256 /* 2257 * Read Log Directory to figure out if IDENTIFY DEVICE log 2258 * is supported. 2259 */ 2260 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2261 0, ap->sector_buf, 1); 2262 if (err_mask) { 2263 ata_dev_info(dev, 2264 "failed to get Log Directory Emask 0x%x\n", 2265 err_mask); 2266 return; 2267 } 2268 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2269 if (log_pages == 0) { 2270 ata_dev_warn(dev, 2271 "ATA Identify Device Log not supported\n"); 2272 return; 2273 } 2274 /* 2275 * Read IDENTIFY DEVICE data log, page 0, to figure out 2276 * if page 9 is supported. 2277 */ 2278 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0, 2279 identify_buf, 1); 2280 if (err_mask) { 2281 ata_dev_info(dev, 2282 "failed to get Device Identify Log Emask 0x%x\n", 2283 err_mask); 2284 return; 2285 } 2286 log_pages = identify_buf[8]; 2287 for (i = 0; i < log_pages; i++) { 2288 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) { 2289 found++; 2290 break; 2291 } 2292 } 2293 if (!found) { 2294 ata_dev_warn(dev, 2295 "ATA Zoned Information Log not supported\n"); 2296 return; 2297 } 2298 2299 /* 2300 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) 2301 */ 2302 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 2303 ATA_LOG_ZONED_INFORMATION, 2304 identify_buf, 1); 2305 if (!err_mask) { 2306 u64 zoned_cap, opt_open, opt_nonseq, max_open; 2307 2308 zoned_cap = get_unaligned_le64(&identify_buf[8]); 2309 if ((zoned_cap >> 63)) 2310 dev->zac_zoned_cap = (zoned_cap & 1); 2311 opt_open = get_unaligned_le64(&identify_buf[24]); 2312 if ((opt_open >> 63)) 2313 dev->zac_zones_optimal_open = (u32)opt_open; 2314 opt_nonseq = get_unaligned_le64(&identify_buf[32]); 2315 if ((opt_nonseq >> 63)) 2316 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; 2317 max_open = get_unaligned_le64(&identify_buf[40]); 2318 if ((max_open >> 63)) 2319 dev->zac_zones_max_open = (u32)max_open; 2320 } 2321 } 2322 2323 /** 2324 * ata_dev_configure - Configure the specified ATA/ATAPI device 2325 * @dev: Target device to configure 2326 * 2327 * Configure @dev according to @dev->id. Generic and low-level 2328 * driver specific fixups are also applied. 2329 * 2330 * LOCKING: 2331 * Kernel thread context (may sleep) 2332 * 2333 * RETURNS: 2334 * 0 on success, -errno otherwise 2335 */ 2336 int ata_dev_configure(struct ata_device *dev) 2337 { 2338 struct ata_port *ap = dev->link->ap; 2339 struct ata_eh_context *ehc = &dev->link->eh_context; 2340 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2341 const u16 *id = dev->id; 2342 unsigned long xfer_mask; 2343 unsigned int err_mask; 2344 char revbuf[7]; /* XYZ-99\0 */ 2345 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2346 char modelbuf[ATA_ID_PROD_LEN+1]; 2347 int rc; 2348 2349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2350 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2351 return 0; 2352 } 2353 2354 if (ata_msg_probe(ap)) 2355 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2356 2357 /* set horkage */ 2358 dev->horkage |= ata_dev_blacklisted(dev); 2359 ata_force_horkage(dev); 2360 2361 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2362 ata_dev_info(dev, "unsupported device, disabling\n"); 2363 ata_dev_disable(dev); 2364 return 0; 2365 } 2366 2367 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2368 dev->class == ATA_DEV_ATAPI) { 2369 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2370 atapi_enabled ? "not supported with this driver" 2371 : "disabled"); 2372 ata_dev_disable(dev); 2373 return 0; 2374 } 2375 2376 rc = ata_do_link_spd_horkage(dev); 2377 if (rc) 2378 return rc; 2379 2380 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2381 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2382 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2383 dev->horkage |= ATA_HORKAGE_NOLPM; 2384 2385 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2386 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2387 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2388 } 2389 2390 /* let ACPI work its magic */ 2391 rc = ata_acpi_on_devcfg(dev); 2392 if (rc) 2393 return rc; 2394 2395 /* massage HPA, do it early as it might change IDENTIFY data */ 2396 rc = ata_hpa_resize(dev); 2397 if (rc) 2398 return rc; 2399 2400 /* print device capabilities */ 2401 if (ata_msg_probe(ap)) 2402 ata_dev_dbg(dev, 2403 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2404 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2405 __func__, 2406 id[49], id[82], id[83], id[84], 2407 id[85], id[86], id[87], id[88]); 2408 2409 /* initialize to-be-configured parameters */ 2410 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2411 dev->max_sectors = 0; 2412 dev->cdb_len = 0; 2413 dev->n_sectors = 0; 2414 dev->cylinders = 0; 2415 dev->heads = 0; 2416 dev->sectors = 0; 2417 dev->multi_count = 0; 2418 2419 /* 2420 * common ATA, ATAPI feature tests 2421 */ 2422 2423 /* find max transfer mode; for printk only */ 2424 xfer_mask = ata_id_xfermask(id); 2425 2426 if (ata_msg_probe(ap)) 2427 ata_dump_id(id); 2428 2429 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2430 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2431 sizeof(fwrevbuf)); 2432 2433 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2434 sizeof(modelbuf)); 2435 2436 /* ATA-specific feature tests */ 2437 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2438 if (ata_id_is_cfa(id)) { 2439 /* CPRM may make this media unusable */ 2440 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2441 ata_dev_warn(dev, 2442 "supports DRM functions and may not be fully accessible\n"); 2443 snprintf(revbuf, 7, "CFA"); 2444 } else { 2445 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2446 /* Warn the user if the device has TPM extensions */ 2447 if (ata_id_has_tpm(id)) 2448 ata_dev_warn(dev, 2449 "supports DRM functions and may not be fully accessible\n"); 2450 } 2451 2452 dev->n_sectors = ata_id_n_sectors(id); 2453 2454 /* get current R/W Multiple count setting */ 2455 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2456 unsigned int max = dev->id[47] & 0xff; 2457 unsigned int cnt = dev->id[59] & 0xff; 2458 /* only recognize/allow powers of two here */ 2459 if (is_power_of_2(max) && is_power_of_2(cnt)) 2460 if (cnt <= max) 2461 dev->multi_count = cnt; 2462 } 2463 2464 if (ata_id_has_lba(id)) { 2465 const char *lba_desc; 2466 char ncq_desc[24]; 2467 2468 lba_desc = "LBA"; 2469 dev->flags |= ATA_DFLAG_LBA; 2470 if (ata_id_has_lba48(id)) { 2471 dev->flags |= ATA_DFLAG_LBA48; 2472 lba_desc = "LBA48"; 2473 2474 if (dev->n_sectors >= (1UL << 28) && 2475 ata_id_has_flush_ext(id)) 2476 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2477 } 2478 2479 /* config NCQ */ 2480 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2481 if (rc) 2482 return rc; 2483 2484 /* print device info to dmesg */ 2485 if (ata_msg_drv(ap) && print_info) { 2486 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2487 revbuf, modelbuf, fwrevbuf, 2488 ata_mode_string(xfer_mask)); 2489 ata_dev_info(dev, 2490 "%llu sectors, multi %u: %s %s\n", 2491 (unsigned long long)dev->n_sectors, 2492 dev->multi_count, lba_desc, ncq_desc); 2493 } 2494 } else { 2495 /* CHS */ 2496 2497 /* Default translation */ 2498 dev->cylinders = id[1]; 2499 dev->heads = id[3]; 2500 dev->sectors = id[6]; 2501 2502 if (ata_id_current_chs_valid(id)) { 2503 /* Current CHS translation is valid. */ 2504 dev->cylinders = id[54]; 2505 dev->heads = id[55]; 2506 dev->sectors = id[56]; 2507 } 2508 2509 /* print device info to dmesg */ 2510 if (ata_msg_drv(ap) && print_info) { 2511 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2512 revbuf, modelbuf, fwrevbuf, 2513 ata_mode_string(xfer_mask)); 2514 ata_dev_info(dev, 2515 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2516 (unsigned long long)dev->n_sectors, 2517 dev->multi_count, dev->cylinders, 2518 dev->heads, dev->sectors); 2519 } 2520 } 2521 2522 /* Check and mark DevSlp capability. Get DevSlp timing variables 2523 * from SATA Settings page of Identify Device Data Log. 2524 */ 2525 if (ata_id_has_devslp(dev->id)) { 2526 u8 *sata_setting = ap->sector_buf; 2527 int i, j; 2528 2529 dev->flags |= ATA_DFLAG_DEVSLP; 2530 err_mask = ata_read_log_page(dev, 2531 ATA_LOG_SATA_ID_DEV_DATA, 2532 ATA_LOG_SATA_SETTINGS, 2533 sata_setting, 2534 1); 2535 if (err_mask) 2536 ata_dev_dbg(dev, 2537 "failed to get Identify Device Data, Emask 0x%x\n", 2538 err_mask); 2539 else 2540 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2541 j = ATA_LOG_DEVSLP_OFFSET + i; 2542 dev->devslp_timing[i] = sata_setting[j]; 2543 } 2544 } 2545 ata_dev_config_sense_reporting(dev); 2546 ata_dev_config_zac(dev); 2547 dev->cdb_len = 16; 2548 } 2549 2550 /* ATAPI-specific feature tests */ 2551 else if (dev->class == ATA_DEV_ATAPI) { 2552 const char *cdb_intr_string = ""; 2553 const char *atapi_an_string = ""; 2554 const char *dma_dir_string = ""; 2555 u32 sntf; 2556 2557 rc = atapi_cdb_len(id); 2558 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2559 if (ata_msg_warn(ap)) 2560 ata_dev_warn(dev, "unsupported CDB len\n"); 2561 rc = -EINVAL; 2562 goto err_out_nosup; 2563 } 2564 dev->cdb_len = (unsigned int) rc; 2565 2566 /* Enable ATAPI AN if both the host and device have 2567 * the support. If PMP is attached, SNTF is required 2568 * to enable ATAPI AN to discern between PHY status 2569 * changed notifications and ATAPI ANs. 2570 */ 2571 if (atapi_an && 2572 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2573 (!sata_pmp_attached(ap) || 2574 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2575 /* issue SET feature command to turn this on */ 2576 err_mask = ata_dev_set_feature(dev, 2577 SETFEATURES_SATA_ENABLE, SATA_AN); 2578 if (err_mask) 2579 ata_dev_err(dev, 2580 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2581 err_mask); 2582 else { 2583 dev->flags |= ATA_DFLAG_AN; 2584 atapi_an_string = ", ATAPI AN"; 2585 } 2586 } 2587 2588 if (ata_id_cdb_intr(dev->id)) { 2589 dev->flags |= ATA_DFLAG_CDB_INTR; 2590 cdb_intr_string = ", CDB intr"; 2591 } 2592 2593 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2594 dev->flags |= ATA_DFLAG_DMADIR; 2595 dma_dir_string = ", DMADIR"; 2596 } 2597 2598 if (ata_id_has_da(dev->id)) { 2599 dev->flags |= ATA_DFLAG_DA; 2600 zpodd_init(dev); 2601 } 2602 2603 /* print device info to dmesg */ 2604 if (ata_msg_drv(ap) && print_info) 2605 ata_dev_info(dev, 2606 "ATAPI: %s, %s, max %s%s%s%s\n", 2607 modelbuf, fwrevbuf, 2608 ata_mode_string(xfer_mask), 2609 cdb_intr_string, atapi_an_string, 2610 dma_dir_string); 2611 } 2612 2613 /* determine max_sectors */ 2614 dev->max_sectors = ATA_MAX_SECTORS; 2615 if (dev->flags & ATA_DFLAG_LBA48) 2616 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2617 2618 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2619 200 sectors */ 2620 if (ata_dev_knobble(dev)) { 2621 if (ata_msg_drv(ap) && print_info) 2622 ata_dev_info(dev, "applying bridge limits\n"); 2623 dev->udma_mask &= ATA_UDMA5; 2624 dev->max_sectors = ATA_MAX_SECTORS; 2625 } 2626 2627 if ((dev->class == ATA_DEV_ATAPI) && 2628 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2629 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2630 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2631 } 2632 2633 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2634 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2635 dev->max_sectors); 2636 2637 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2638 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2639 dev->max_sectors); 2640 2641 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2642 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2643 2644 if (ap->ops->dev_config) 2645 ap->ops->dev_config(dev); 2646 2647 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2648 /* Let the user know. We don't want to disallow opens for 2649 rescue purposes, or in case the vendor is just a blithering 2650 idiot. Do this after the dev_config call as some controllers 2651 with buggy firmware may want to avoid reporting false device 2652 bugs */ 2653 2654 if (print_info) { 2655 ata_dev_warn(dev, 2656 "Drive reports diagnostics failure. This may indicate a drive\n"); 2657 ata_dev_warn(dev, 2658 "fault or invalid emulation. Contact drive vendor for information.\n"); 2659 } 2660 } 2661 2662 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2663 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2664 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2665 } 2666 2667 return 0; 2668 2669 err_out_nosup: 2670 if (ata_msg_probe(ap)) 2671 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2672 return rc; 2673 } 2674 2675 /** 2676 * ata_cable_40wire - return 40 wire cable type 2677 * @ap: port 2678 * 2679 * Helper method for drivers which want to hardwire 40 wire cable 2680 * detection. 2681 */ 2682 2683 int ata_cable_40wire(struct ata_port *ap) 2684 { 2685 return ATA_CBL_PATA40; 2686 } 2687 2688 /** 2689 * ata_cable_80wire - return 80 wire cable type 2690 * @ap: port 2691 * 2692 * Helper method for drivers which want to hardwire 80 wire cable 2693 * detection. 2694 */ 2695 2696 int ata_cable_80wire(struct ata_port *ap) 2697 { 2698 return ATA_CBL_PATA80; 2699 } 2700 2701 /** 2702 * ata_cable_unknown - return unknown PATA cable. 2703 * @ap: port 2704 * 2705 * Helper method for drivers which have no PATA cable detection. 2706 */ 2707 2708 int ata_cable_unknown(struct ata_port *ap) 2709 { 2710 return ATA_CBL_PATA_UNK; 2711 } 2712 2713 /** 2714 * ata_cable_ignore - return ignored PATA cable. 2715 * @ap: port 2716 * 2717 * Helper method for drivers which don't use cable type to limit 2718 * transfer mode. 2719 */ 2720 int ata_cable_ignore(struct ata_port *ap) 2721 { 2722 return ATA_CBL_PATA_IGN; 2723 } 2724 2725 /** 2726 * ata_cable_sata - return SATA cable type 2727 * @ap: port 2728 * 2729 * Helper method for drivers which have SATA cables 2730 */ 2731 2732 int ata_cable_sata(struct ata_port *ap) 2733 { 2734 return ATA_CBL_SATA; 2735 } 2736 2737 /** 2738 * ata_bus_probe - Reset and probe ATA bus 2739 * @ap: Bus to probe 2740 * 2741 * Master ATA bus probing function. Initiates a hardware-dependent 2742 * bus reset, then attempts to identify any devices found on 2743 * the bus. 2744 * 2745 * LOCKING: 2746 * PCI/etc. bus probe sem. 2747 * 2748 * RETURNS: 2749 * Zero on success, negative errno otherwise. 2750 */ 2751 2752 int ata_bus_probe(struct ata_port *ap) 2753 { 2754 unsigned int classes[ATA_MAX_DEVICES]; 2755 int tries[ATA_MAX_DEVICES]; 2756 int rc; 2757 struct ata_device *dev; 2758 2759 ata_for_each_dev(dev, &ap->link, ALL) 2760 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2761 2762 retry: 2763 ata_for_each_dev(dev, &ap->link, ALL) { 2764 /* If we issue an SRST then an ATA drive (not ATAPI) 2765 * may change configuration and be in PIO0 timing. If 2766 * we do a hard reset (or are coming from power on) 2767 * this is true for ATA or ATAPI. Until we've set a 2768 * suitable controller mode we should not touch the 2769 * bus as we may be talking too fast. 2770 */ 2771 dev->pio_mode = XFER_PIO_0; 2772 dev->dma_mode = 0xff; 2773 2774 /* If the controller has a pio mode setup function 2775 * then use it to set the chipset to rights. Don't 2776 * touch the DMA setup as that will be dealt with when 2777 * configuring devices. 2778 */ 2779 if (ap->ops->set_piomode) 2780 ap->ops->set_piomode(ap, dev); 2781 } 2782 2783 /* reset and determine device classes */ 2784 ap->ops->phy_reset(ap); 2785 2786 ata_for_each_dev(dev, &ap->link, ALL) { 2787 if (dev->class != ATA_DEV_UNKNOWN) 2788 classes[dev->devno] = dev->class; 2789 else 2790 classes[dev->devno] = ATA_DEV_NONE; 2791 2792 dev->class = ATA_DEV_UNKNOWN; 2793 } 2794 2795 /* read IDENTIFY page and configure devices. We have to do the identify 2796 specific sequence bass-ackwards so that PDIAG- is released by 2797 the slave device */ 2798 2799 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2800 if (tries[dev->devno]) 2801 dev->class = classes[dev->devno]; 2802 2803 if (!ata_dev_enabled(dev)) 2804 continue; 2805 2806 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2807 dev->id); 2808 if (rc) 2809 goto fail; 2810 } 2811 2812 /* Now ask for the cable type as PDIAG- should have been released */ 2813 if (ap->ops->cable_detect) 2814 ap->cbl = ap->ops->cable_detect(ap); 2815 2816 /* We may have SATA bridge glue hiding here irrespective of 2817 * the reported cable types and sensed types. When SATA 2818 * drives indicate we have a bridge, we don't know which end 2819 * of the link the bridge is which is a problem. 2820 */ 2821 ata_for_each_dev(dev, &ap->link, ENABLED) 2822 if (ata_id_is_sata(dev->id)) 2823 ap->cbl = ATA_CBL_SATA; 2824 2825 /* After the identify sequence we can now set up the devices. We do 2826 this in the normal order so that the user doesn't get confused */ 2827 2828 ata_for_each_dev(dev, &ap->link, ENABLED) { 2829 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2830 rc = ata_dev_configure(dev); 2831 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2832 if (rc) 2833 goto fail; 2834 } 2835 2836 /* configure transfer mode */ 2837 rc = ata_set_mode(&ap->link, &dev); 2838 if (rc) 2839 goto fail; 2840 2841 ata_for_each_dev(dev, &ap->link, ENABLED) 2842 return 0; 2843 2844 return -ENODEV; 2845 2846 fail: 2847 tries[dev->devno]--; 2848 2849 switch (rc) { 2850 case -EINVAL: 2851 /* eeek, something went very wrong, give up */ 2852 tries[dev->devno] = 0; 2853 break; 2854 2855 case -ENODEV: 2856 /* give it just one more chance */ 2857 tries[dev->devno] = min(tries[dev->devno], 1); 2858 case -EIO: 2859 if (tries[dev->devno] == 1) { 2860 /* This is the last chance, better to slow 2861 * down than lose it. 2862 */ 2863 sata_down_spd_limit(&ap->link, 0); 2864 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2865 } 2866 } 2867 2868 if (!tries[dev->devno]) 2869 ata_dev_disable(dev); 2870 2871 goto retry; 2872 } 2873 2874 /** 2875 * sata_print_link_status - Print SATA link status 2876 * @link: SATA link to printk link status about 2877 * 2878 * This function prints link speed and status of a SATA link. 2879 * 2880 * LOCKING: 2881 * None. 2882 */ 2883 static void sata_print_link_status(struct ata_link *link) 2884 { 2885 u32 sstatus, scontrol, tmp; 2886 2887 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2888 return; 2889 sata_scr_read(link, SCR_CONTROL, &scontrol); 2890 2891 if (ata_phys_link_online(link)) { 2892 tmp = (sstatus >> 4) & 0xf; 2893 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2894 sata_spd_string(tmp), sstatus, scontrol); 2895 } else { 2896 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2897 sstatus, scontrol); 2898 } 2899 } 2900 2901 /** 2902 * ata_dev_pair - return other device on cable 2903 * @adev: device 2904 * 2905 * Obtain the other device on the same cable, or if none is 2906 * present NULL is returned 2907 */ 2908 2909 struct ata_device *ata_dev_pair(struct ata_device *adev) 2910 { 2911 struct ata_link *link = adev->link; 2912 struct ata_device *pair = &link->device[1 - adev->devno]; 2913 if (!ata_dev_enabled(pair)) 2914 return NULL; 2915 return pair; 2916 } 2917 2918 /** 2919 * sata_down_spd_limit - adjust SATA spd limit downward 2920 * @link: Link to adjust SATA spd limit for 2921 * @spd_limit: Additional limit 2922 * 2923 * Adjust SATA spd limit of @link downward. Note that this 2924 * function only adjusts the limit. The change must be applied 2925 * using sata_set_spd(). 2926 * 2927 * If @spd_limit is non-zero, the speed is limited to equal to or 2928 * lower than @spd_limit if such speed is supported. If 2929 * @spd_limit is slower than any supported speed, only the lowest 2930 * supported speed is allowed. 2931 * 2932 * LOCKING: 2933 * Inherited from caller. 2934 * 2935 * RETURNS: 2936 * 0 on success, negative errno on failure 2937 */ 2938 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2939 { 2940 u32 sstatus, spd, mask; 2941 int rc, bit; 2942 2943 if (!sata_scr_valid(link)) 2944 return -EOPNOTSUPP; 2945 2946 /* If SCR can be read, use it to determine the current SPD. 2947 * If not, use cached value in link->sata_spd. 2948 */ 2949 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2950 if (rc == 0 && ata_sstatus_online(sstatus)) 2951 spd = (sstatus >> 4) & 0xf; 2952 else 2953 spd = link->sata_spd; 2954 2955 mask = link->sata_spd_limit; 2956 if (mask <= 1) 2957 return -EINVAL; 2958 2959 /* unconditionally mask off the highest bit */ 2960 bit = fls(mask) - 1; 2961 mask &= ~(1 << bit); 2962 2963 /* Mask off all speeds higher than or equal to the current 2964 * one. Force 1.5Gbps if current SPD is not available. 2965 */ 2966 if (spd > 1) 2967 mask &= (1 << (spd - 1)) - 1; 2968 else 2969 mask &= 1; 2970 2971 /* were we already at the bottom? */ 2972 if (!mask) 2973 return -EINVAL; 2974 2975 if (spd_limit) { 2976 if (mask & ((1 << spd_limit) - 1)) 2977 mask &= (1 << spd_limit) - 1; 2978 else { 2979 bit = ffs(mask) - 1; 2980 mask = 1 << bit; 2981 } 2982 } 2983 2984 link->sata_spd_limit = mask; 2985 2986 ata_link_warn(link, "limiting SATA link speed to %s\n", 2987 sata_spd_string(fls(mask))); 2988 2989 return 0; 2990 } 2991 2992 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2993 { 2994 struct ata_link *host_link = &link->ap->link; 2995 u32 limit, target, spd; 2996 2997 limit = link->sata_spd_limit; 2998 2999 /* Don't configure downstream link faster than upstream link. 3000 * It doesn't speed up anything and some PMPs choke on such 3001 * configuration. 3002 */ 3003 if (!ata_is_host_link(link) && host_link->sata_spd) 3004 limit &= (1 << host_link->sata_spd) - 1; 3005 3006 if (limit == UINT_MAX) 3007 target = 0; 3008 else 3009 target = fls(limit); 3010 3011 spd = (*scontrol >> 4) & 0xf; 3012 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3013 3014 return spd != target; 3015 } 3016 3017 /** 3018 * sata_set_spd_needed - is SATA spd configuration needed 3019 * @link: Link in question 3020 * 3021 * Test whether the spd limit in SControl matches 3022 * @link->sata_spd_limit. This function is used to determine 3023 * whether hardreset is necessary to apply SATA spd 3024 * configuration. 3025 * 3026 * LOCKING: 3027 * Inherited from caller. 3028 * 3029 * RETURNS: 3030 * 1 if SATA spd configuration is needed, 0 otherwise. 3031 */ 3032 static int sata_set_spd_needed(struct ata_link *link) 3033 { 3034 u32 scontrol; 3035 3036 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3037 return 1; 3038 3039 return __sata_set_spd_needed(link, &scontrol); 3040 } 3041 3042 /** 3043 * sata_set_spd - set SATA spd according to spd limit 3044 * @link: Link to set SATA spd for 3045 * 3046 * Set SATA spd of @link according to sata_spd_limit. 3047 * 3048 * LOCKING: 3049 * Inherited from caller. 3050 * 3051 * RETURNS: 3052 * 0 if spd doesn't need to be changed, 1 if spd has been 3053 * changed. Negative errno if SCR registers are inaccessible. 3054 */ 3055 int sata_set_spd(struct ata_link *link) 3056 { 3057 u32 scontrol; 3058 int rc; 3059 3060 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3061 return rc; 3062 3063 if (!__sata_set_spd_needed(link, &scontrol)) 3064 return 0; 3065 3066 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3067 return rc; 3068 3069 return 1; 3070 } 3071 3072 /* 3073 * This mode timing computation functionality is ported over from 3074 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3075 */ 3076 /* 3077 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3078 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3079 * for UDMA6, which is currently supported only by Maxtor drives. 3080 * 3081 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3082 */ 3083 3084 static const struct ata_timing ata_timing[] = { 3085 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3086 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3087 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3088 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3089 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3090 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3091 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3092 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3093 3094 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3095 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3096 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3097 3098 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3099 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3100 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3101 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3102 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3103 3104 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3105 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3106 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3107 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3108 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3109 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3110 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3111 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3112 3113 { 0xFF } 3114 }; 3115 3116 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3117 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3118 3119 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3120 { 3121 q->setup = EZ(t->setup * 1000, T); 3122 q->act8b = EZ(t->act8b * 1000, T); 3123 q->rec8b = EZ(t->rec8b * 1000, T); 3124 q->cyc8b = EZ(t->cyc8b * 1000, T); 3125 q->active = EZ(t->active * 1000, T); 3126 q->recover = EZ(t->recover * 1000, T); 3127 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3128 q->cycle = EZ(t->cycle * 1000, T); 3129 q->udma = EZ(t->udma * 1000, UT); 3130 } 3131 3132 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3133 struct ata_timing *m, unsigned int what) 3134 { 3135 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3136 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3137 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3138 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3139 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3140 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3141 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3142 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3143 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3144 } 3145 3146 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3147 { 3148 const struct ata_timing *t = ata_timing; 3149 3150 while (xfer_mode > t->mode) 3151 t++; 3152 3153 if (xfer_mode == t->mode) 3154 return t; 3155 3156 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 3157 __func__, xfer_mode); 3158 3159 return NULL; 3160 } 3161 3162 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3163 struct ata_timing *t, int T, int UT) 3164 { 3165 const u16 *id = adev->id; 3166 const struct ata_timing *s; 3167 struct ata_timing p; 3168 3169 /* 3170 * Find the mode. 3171 */ 3172 3173 if (!(s = ata_timing_find_mode(speed))) 3174 return -EINVAL; 3175 3176 memcpy(t, s, sizeof(*s)); 3177 3178 /* 3179 * If the drive is an EIDE drive, it can tell us it needs extended 3180 * PIO/MW_DMA cycle timing. 3181 */ 3182 3183 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3184 memset(&p, 0, sizeof(p)); 3185 3186 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3187 if (speed <= XFER_PIO_2) 3188 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3189 else if ((speed <= XFER_PIO_4) || 3190 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3191 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3192 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3193 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3194 3195 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3196 } 3197 3198 /* 3199 * Convert the timing to bus clock counts. 3200 */ 3201 3202 ata_timing_quantize(t, t, T, UT); 3203 3204 /* 3205 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3206 * S.M.A.R.T * and some other commands. We have to ensure that the 3207 * DMA cycle timing is slower/equal than the fastest PIO timing. 3208 */ 3209 3210 if (speed > XFER_PIO_6) { 3211 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3212 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3213 } 3214 3215 /* 3216 * Lengthen active & recovery time so that cycle time is correct. 3217 */ 3218 3219 if (t->act8b + t->rec8b < t->cyc8b) { 3220 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3221 t->rec8b = t->cyc8b - t->act8b; 3222 } 3223 3224 if (t->active + t->recover < t->cycle) { 3225 t->active += (t->cycle - (t->active + t->recover)) / 2; 3226 t->recover = t->cycle - t->active; 3227 } 3228 3229 /* In a few cases quantisation may produce enough errors to 3230 leave t->cycle too low for the sum of active and recovery 3231 if so we must correct this */ 3232 if (t->active + t->recover > t->cycle) 3233 t->cycle = t->active + t->recover; 3234 3235 return 0; 3236 } 3237 3238 /** 3239 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3240 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3241 * @cycle: cycle duration in ns 3242 * 3243 * Return matching xfer mode for @cycle. The returned mode is of 3244 * the transfer type specified by @xfer_shift. If @cycle is too 3245 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3246 * than the fastest known mode, the fasted mode is returned. 3247 * 3248 * LOCKING: 3249 * None. 3250 * 3251 * RETURNS: 3252 * Matching xfer_mode, 0xff if no match found. 3253 */ 3254 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3255 { 3256 u8 base_mode = 0xff, last_mode = 0xff; 3257 const struct ata_xfer_ent *ent; 3258 const struct ata_timing *t; 3259 3260 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3261 if (ent->shift == xfer_shift) 3262 base_mode = ent->base; 3263 3264 for (t = ata_timing_find_mode(base_mode); 3265 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3266 unsigned short this_cycle; 3267 3268 switch (xfer_shift) { 3269 case ATA_SHIFT_PIO: 3270 case ATA_SHIFT_MWDMA: 3271 this_cycle = t->cycle; 3272 break; 3273 case ATA_SHIFT_UDMA: 3274 this_cycle = t->udma; 3275 break; 3276 default: 3277 return 0xff; 3278 } 3279 3280 if (cycle > this_cycle) 3281 break; 3282 3283 last_mode = t->mode; 3284 } 3285 3286 return last_mode; 3287 } 3288 3289 /** 3290 * ata_down_xfermask_limit - adjust dev xfer masks downward 3291 * @dev: Device to adjust xfer masks 3292 * @sel: ATA_DNXFER_* selector 3293 * 3294 * Adjust xfer masks of @dev downward. Note that this function 3295 * does not apply the change. Invoking ata_set_mode() afterwards 3296 * will apply the limit. 3297 * 3298 * LOCKING: 3299 * Inherited from caller. 3300 * 3301 * RETURNS: 3302 * 0 on success, negative errno on failure 3303 */ 3304 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3305 { 3306 char buf[32]; 3307 unsigned long orig_mask, xfer_mask; 3308 unsigned long pio_mask, mwdma_mask, udma_mask; 3309 int quiet, highbit; 3310 3311 quiet = !!(sel & ATA_DNXFER_QUIET); 3312 sel &= ~ATA_DNXFER_QUIET; 3313 3314 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3315 dev->mwdma_mask, 3316 dev->udma_mask); 3317 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3318 3319 switch (sel) { 3320 case ATA_DNXFER_PIO: 3321 highbit = fls(pio_mask) - 1; 3322 pio_mask &= ~(1 << highbit); 3323 break; 3324 3325 case ATA_DNXFER_DMA: 3326 if (udma_mask) { 3327 highbit = fls(udma_mask) - 1; 3328 udma_mask &= ~(1 << highbit); 3329 if (!udma_mask) 3330 return -ENOENT; 3331 } else if (mwdma_mask) { 3332 highbit = fls(mwdma_mask) - 1; 3333 mwdma_mask &= ~(1 << highbit); 3334 if (!mwdma_mask) 3335 return -ENOENT; 3336 } 3337 break; 3338 3339 case ATA_DNXFER_40C: 3340 udma_mask &= ATA_UDMA_MASK_40C; 3341 break; 3342 3343 case ATA_DNXFER_FORCE_PIO0: 3344 pio_mask &= 1; 3345 case ATA_DNXFER_FORCE_PIO: 3346 mwdma_mask = 0; 3347 udma_mask = 0; 3348 break; 3349 3350 default: 3351 BUG(); 3352 } 3353 3354 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3355 3356 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3357 return -ENOENT; 3358 3359 if (!quiet) { 3360 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3361 snprintf(buf, sizeof(buf), "%s:%s", 3362 ata_mode_string(xfer_mask), 3363 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3364 else 3365 snprintf(buf, sizeof(buf), "%s", 3366 ata_mode_string(xfer_mask)); 3367 3368 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3369 } 3370 3371 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3372 &dev->udma_mask); 3373 3374 return 0; 3375 } 3376 3377 static int ata_dev_set_mode(struct ata_device *dev) 3378 { 3379 struct ata_port *ap = dev->link->ap; 3380 struct ata_eh_context *ehc = &dev->link->eh_context; 3381 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3382 const char *dev_err_whine = ""; 3383 int ign_dev_err = 0; 3384 unsigned int err_mask = 0; 3385 int rc; 3386 3387 dev->flags &= ~ATA_DFLAG_PIO; 3388 if (dev->xfer_shift == ATA_SHIFT_PIO) 3389 dev->flags |= ATA_DFLAG_PIO; 3390 3391 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3392 dev_err_whine = " (SET_XFERMODE skipped)"; 3393 else { 3394 if (nosetxfer) 3395 ata_dev_warn(dev, 3396 "NOSETXFER but PATA detected - can't " 3397 "skip SETXFER, might malfunction\n"); 3398 err_mask = ata_dev_set_xfermode(dev); 3399 } 3400 3401 if (err_mask & ~AC_ERR_DEV) 3402 goto fail; 3403 3404 /* revalidate */ 3405 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3406 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3407 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3408 if (rc) 3409 return rc; 3410 3411 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3412 /* Old CFA may refuse this command, which is just fine */ 3413 if (ata_id_is_cfa(dev->id)) 3414 ign_dev_err = 1; 3415 /* Catch several broken garbage emulations plus some pre 3416 ATA devices */ 3417 if (ata_id_major_version(dev->id) == 0 && 3418 dev->pio_mode <= XFER_PIO_2) 3419 ign_dev_err = 1; 3420 /* Some very old devices and some bad newer ones fail 3421 any kind of SET_XFERMODE request but support PIO0-2 3422 timings and no IORDY */ 3423 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3424 ign_dev_err = 1; 3425 } 3426 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3427 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3428 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3429 dev->dma_mode == XFER_MW_DMA_0 && 3430 (dev->id[63] >> 8) & 1) 3431 ign_dev_err = 1; 3432 3433 /* if the device is actually configured correctly, ignore dev err */ 3434 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3435 ign_dev_err = 1; 3436 3437 if (err_mask & AC_ERR_DEV) { 3438 if (!ign_dev_err) 3439 goto fail; 3440 else 3441 dev_err_whine = " (device error ignored)"; 3442 } 3443 3444 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3445 dev->xfer_shift, (int)dev->xfer_mode); 3446 3447 ata_dev_info(dev, "configured for %s%s\n", 3448 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3449 dev_err_whine); 3450 3451 return 0; 3452 3453 fail: 3454 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3455 return -EIO; 3456 } 3457 3458 /** 3459 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3460 * @link: link on which timings will be programmed 3461 * @r_failed_dev: out parameter for failed device 3462 * 3463 * Standard implementation of the function used to tune and set 3464 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3465 * ata_dev_set_mode() fails, pointer to the failing device is 3466 * returned in @r_failed_dev. 3467 * 3468 * LOCKING: 3469 * PCI/etc. bus probe sem. 3470 * 3471 * RETURNS: 3472 * 0 on success, negative errno otherwise 3473 */ 3474 3475 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3476 { 3477 struct ata_port *ap = link->ap; 3478 struct ata_device *dev; 3479 int rc = 0, used_dma = 0, found = 0; 3480 3481 /* step 1: calculate xfer_mask */ 3482 ata_for_each_dev(dev, link, ENABLED) { 3483 unsigned long pio_mask, dma_mask; 3484 unsigned int mode_mask; 3485 3486 mode_mask = ATA_DMA_MASK_ATA; 3487 if (dev->class == ATA_DEV_ATAPI) 3488 mode_mask = ATA_DMA_MASK_ATAPI; 3489 else if (ata_id_is_cfa(dev->id)) 3490 mode_mask = ATA_DMA_MASK_CFA; 3491 3492 ata_dev_xfermask(dev); 3493 ata_force_xfermask(dev); 3494 3495 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3496 3497 if (libata_dma_mask & mode_mask) 3498 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3499 dev->udma_mask); 3500 else 3501 dma_mask = 0; 3502 3503 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3504 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3505 3506 found = 1; 3507 if (ata_dma_enabled(dev)) 3508 used_dma = 1; 3509 } 3510 if (!found) 3511 goto out; 3512 3513 /* step 2: always set host PIO timings */ 3514 ata_for_each_dev(dev, link, ENABLED) { 3515 if (dev->pio_mode == 0xff) { 3516 ata_dev_warn(dev, "no PIO support\n"); 3517 rc = -EINVAL; 3518 goto out; 3519 } 3520 3521 dev->xfer_mode = dev->pio_mode; 3522 dev->xfer_shift = ATA_SHIFT_PIO; 3523 if (ap->ops->set_piomode) 3524 ap->ops->set_piomode(ap, dev); 3525 } 3526 3527 /* step 3: set host DMA timings */ 3528 ata_for_each_dev(dev, link, ENABLED) { 3529 if (!ata_dma_enabled(dev)) 3530 continue; 3531 3532 dev->xfer_mode = dev->dma_mode; 3533 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3534 if (ap->ops->set_dmamode) 3535 ap->ops->set_dmamode(ap, dev); 3536 } 3537 3538 /* step 4: update devices' xfer mode */ 3539 ata_for_each_dev(dev, link, ENABLED) { 3540 rc = ata_dev_set_mode(dev); 3541 if (rc) 3542 goto out; 3543 } 3544 3545 /* Record simplex status. If we selected DMA then the other 3546 * host channels are not permitted to do so. 3547 */ 3548 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3549 ap->host->simplex_claimed = ap; 3550 3551 out: 3552 if (rc) 3553 *r_failed_dev = dev; 3554 return rc; 3555 } 3556 3557 /** 3558 * ata_wait_ready - wait for link to become ready 3559 * @link: link to be waited on 3560 * @deadline: deadline jiffies for the operation 3561 * @check_ready: callback to check link readiness 3562 * 3563 * Wait for @link to become ready. @check_ready should return 3564 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3565 * link doesn't seem to be occupied, other errno for other error 3566 * conditions. 3567 * 3568 * Transient -ENODEV conditions are allowed for 3569 * ATA_TMOUT_FF_WAIT. 3570 * 3571 * LOCKING: 3572 * EH context. 3573 * 3574 * RETURNS: 3575 * 0 if @link is ready before @deadline; otherwise, -errno. 3576 */ 3577 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3578 int (*check_ready)(struct ata_link *link)) 3579 { 3580 unsigned long start = jiffies; 3581 unsigned long nodev_deadline; 3582 int warned = 0; 3583 3584 /* choose which 0xff timeout to use, read comment in libata.h */ 3585 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3586 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3587 else 3588 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3589 3590 /* Slave readiness can't be tested separately from master. On 3591 * M/S emulation configuration, this function should be called 3592 * only on the master and it will handle both master and slave. 3593 */ 3594 WARN_ON(link == link->ap->slave_link); 3595 3596 if (time_after(nodev_deadline, deadline)) 3597 nodev_deadline = deadline; 3598 3599 while (1) { 3600 unsigned long now = jiffies; 3601 int ready, tmp; 3602 3603 ready = tmp = check_ready(link); 3604 if (ready > 0) 3605 return 0; 3606 3607 /* 3608 * -ENODEV could be transient. Ignore -ENODEV if link 3609 * is online. Also, some SATA devices take a long 3610 * time to clear 0xff after reset. Wait for 3611 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3612 * offline. 3613 * 3614 * Note that some PATA controllers (pata_ali) explode 3615 * if status register is read more than once when 3616 * there's no device attached. 3617 */ 3618 if (ready == -ENODEV) { 3619 if (ata_link_online(link)) 3620 ready = 0; 3621 else if ((link->ap->flags & ATA_FLAG_SATA) && 3622 !ata_link_offline(link) && 3623 time_before(now, nodev_deadline)) 3624 ready = 0; 3625 } 3626 3627 if (ready) 3628 return ready; 3629 if (time_after(now, deadline)) 3630 return -EBUSY; 3631 3632 if (!warned && time_after(now, start + 5 * HZ) && 3633 (deadline - now > 3 * HZ)) { 3634 ata_link_warn(link, 3635 "link is slow to respond, please be patient " 3636 "(ready=%d)\n", tmp); 3637 warned = 1; 3638 } 3639 3640 ata_msleep(link->ap, 50); 3641 } 3642 } 3643 3644 /** 3645 * ata_wait_after_reset - wait for link to become ready after reset 3646 * @link: link to be waited on 3647 * @deadline: deadline jiffies for the operation 3648 * @check_ready: callback to check link readiness 3649 * 3650 * Wait for @link to become ready after reset. 3651 * 3652 * LOCKING: 3653 * EH context. 3654 * 3655 * RETURNS: 3656 * 0 if @link is ready before @deadline; otherwise, -errno. 3657 */ 3658 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3659 int (*check_ready)(struct ata_link *link)) 3660 { 3661 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3662 3663 return ata_wait_ready(link, deadline, check_ready); 3664 } 3665 3666 /** 3667 * sata_link_debounce - debounce SATA phy status 3668 * @link: ATA link to debounce SATA phy status for 3669 * @params: timing parameters { interval, duration, timeout } in msec 3670 * @deadline: deadline jiffies for the operation 3671 * 3672 * Make sure SStatus of @link reaches stable state, determined by 3673 * holding the same value where DET is not 1 for @duration polled 3674 * every @interval, before @timeout. Timeout constraints the 3675 * beginning of the stable state. Because DET gets stuck at 1 on 3676 * some controllers after hot unplugging, this functions waits 3677 * until timeout then returns 0 if DET is stable at 1. 3678 * 3679 * @timeout is further limited by @deadline. The sooner of the 3680 * two is used. 3681 * 3682 * LOCKING: 3683 * Kernel thread context (may sleep) 3684 * 3685 * RETURNS: 3686 * 0 on success, -errno on failure. 3687 */ 3688 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3689 unsigned long deadline) 3690 { 3691 unsigned long interval = params[0]; 3692 unsigned long duration = params[1]; 3693 unsigned long last_jiffies, t; 3694 u32 last, cur; 3695 int rc; 3696 3697 t = ata_deadline(jiffies, params[2]); 3698 if (time_before(t, deadline)) 3699 deadline = t; 3700 3701 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3702 return rc; 3703 cur &= 0xf; 3704 3705 last = cur; 3706 last_jiffies = jiffies; 3707 3708 while (1) { 3709 ata_msleep(link->ap, interval); 3710 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3711 return rc; 3712 cur &= 0xf; 3713 3714 /* DET stable? */ 3715 if (cur == last) { 3716 if (cur == 1 && time_before(jiffies, deadline)) 3717 continue; 3718 if (time_after(jiffies, 3719 ata_deadline(last_jiffies, duration))) 3720 return 0; 3721 continue; 3722 } 3723 3724 /* unstable, start over */ 3725 last = cur; 3726 last_jiffies = jiffies; 3727 3728 /* Check deadline. If debouncing failed, return 3729 * -EPIPE to tell upper layer to lower link speed. 3730 */ 3731 if (time_after(jiffies, deadline)) 3732 return -EPIPE; 3733 } 3734 } 3735 3736 /** 3737 * sata_link_resume - resume SATA link 3738 * @link: ATA link to resume SATA 3739 * @params: timing parameters { interval, duration, timeout } in msec 3740 * @deadline: deadline jiffies for the operation 3741 * 3742 * Resume SATA phy @link and debounce it. 3743 * 3744 * LOCKING: 3745 * Kernel thread context (may sleep) 3746 * 3747 * RETURNS: 3748 * 0 on success, -errno on failure. 3749 */ 3750 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3751 unsigned long deadline) 3752 { 3753 int tries = ATA_LINK_RESUME_TRIES; 3754 u32 scontrol, serror; 3755 int rc; 3756 3757 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3758 return rc; 3759 3760 /* 3761 * Writes to SControl sometimes get ignored under certain 3762 * controllers (ata_piix SIDPR). Make sure DET actually is 3763 * cleared. 3764 */ 3765 do { 3766 scontrol = (scontrol & 0x0f0) | 0x300; 3767 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3768 return rc; 3769 /* 3770 * Some PHYs react badly if SStatus is pounded 3771 * immediately after resuming. Delay 200ms before 3772 * debouncing. 3773 */ 3774 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) 3775 ata_msleep(link->ap, 200); 3776 3777 /* is SControl restored correctly? */ 3778 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3779 return rc; 3780 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3781 3782 if ((scontrol & 0xf0f) != 0x300) { 3783 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3784 scontrol); 3785 return 0; 3786 } 3787 3788 if (tries < ATA_LINK_RESUME_TRIES) 3789 ata_link_warn(link, "link resume succeeded after %d retries\n", 3790 ATA_LINK_RESUME_TRIES - tries); 3791 3792 if ((rc = sata_link_debounce(link, params, deadline))) 3793 return rc; 3794 3795 /* clear SError, some PHYs require this even for SRST to work */ 3796 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3797 rc = sata_scr_write(link, SCR_ERROR, serror); 3798 3799 return rc != -EINVAL ? rc : 0; 3800 } 3801 3802 /** 3803 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3804 * @link: ATA link to manipulate SControl for 3805 * @policy: LPM policy to configure 3806 * @spm_wakeup: initiate LPM transition to active state 3807 * 3808 * Manipulate the IPM field of the SControl register of @link 3809 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3810 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3811 * the link. This function also clears PHYRDY_CHG before 3812 * returning. 3813 * 3814 * LOCKING: 3815 * EH context. 3816 * 3817 * RETURNS: 3818 * 0 on success, -errno otherwise. 3819 */ 3820 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3821 bool spm_wakeup) 3822 { 3823 struct ata_eh_context *ehc = &link->eh_context; 3824 bool woken_up = false; 3825 u32 scontrol; 3826 int rc; 3827 3828 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3829 if (rc) 3830 return rc; 3831 3832 switch (policy) { 3833 case ATA_LPM_MAX_POWER: 3834 /* disable all LPM transitions */ 3835 scontrol |= (0x7 << 8); 3836 /* initiate transition to active state */ 3837 if (spm_wakeup) { 3838 scontrol |= (0x4 << 12); 3839 woken_up = true; 3840 } 3841 break; 3842 case ATA_LPM_MED_POWER: 3843 /* allow LPM to PARTIAL */ 3844 scontrol &= ~(0x1 << 8); 3845 scontrol |= (0x6 << 8); 3846 break; 3847 case ATA_LPM_MIN_POWER: 3848 if (ata_link_nr_enabled(link) > 0) 3849 /* no restrictions on LPM transitions */ 3850 scontrol &= ~(0x7 << 8); 3851 else { 3852 /* empty port, power off */ 3853 scontrol &= ~0xf; 3854 scontrol |= (0x1 << 2); 3855 } 3856 break; 3857 default: 3858 WARN_ON(1); 3859 } 3860 3861 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3862 if (rc) 3863 return rc; 3864 3865 /* give the link time to transit out of LPM state */ 3866 if (woken_up) 3867 msleep(10); 3868 3869 /* clear PHYRDY_CHG from SError */ 3870 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3871 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3872 } 3873 3874 /** 3875 * ata_std_prereset - prepare for reset 3876 * @link: ATA link to be reset 3877 * @deadline: deadline jiffies for the operation 3878 * 3879 * @link is about to be reset. Initialize it. Failure from 3880 * prereset makes libata abort whole reset sequence and give up 3881 * that port, so prereset should be best-effort. It does its 3882 * best to prepare for reset sequence but if things go wrong, it 3883 * should just whine, not fail. 3884 * 3885 * LOCKING: 3886 * Kernel thread context (may sleep) 3887 * 3888 * RETURNS: 3889 * 0 on success, -errno otherwise. 3890 */ 3891 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3892 { 3893 struct ata_port *ap = link->ap; 3894 struct ata_eh_context *ehc = &link->eh_context; 3895 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3896 int rc; 3897 3898 /* if we're about to do hardreset, nothing more to do */ 3899 if (ehc->i.action & ATA_EH_HARDRESET) 3900 return 0; 3901 3902 /* if SATA, resume link */ 3903 if (ap->flags & ATA_FLAG_SATA) { 3904 rc = sata_link_resume(link, timing, deadline); 3905 /* whine about phy resume failure but proceed */ 3906 if (rc && rc != -EOPNOTSUPP) 3907 ata_link_warn(link, 3908 "failed to resume link for reset (errno=%d)\n", 3909 rc); 3910 } 3911 3912 /* no point in trying softreset on offline link */ 3913 if (ata_phys_link_offline(link)) 3914 ehc->i.action &= ~ATA_EH_SOFTRESET; 3915 3916 return 0; 3917 } 3918 3919 /** 3920 * sata_link_hardreset - reset link via SATA phy reset 3921 * @link: link to reset 3922 * @timing: timing parameters { interval, duration, timeout } in msec 3923 * @deadline: deadline jiffies for the operation 3924 * @online: optional out parameter indicating link onlineness 3925 * @check_ready: optional callback to check link readiness 3926 * 3927 * SATA phy-reset @link using DET bits of SControl register. 3928 * After hardreset, link readiness is waited upon using 3929 * ata_wait_ready() if @check_ready is specified. LLDs are 3930 * allowed to not specify @check_ready and wait itself after this 3931 * function returns. Device classification is LLD's 3932 * responsibility. 3933 * 3934 * *@online is set to one iff reset succeeded and @link is online 3935 * after reset. 3936 * 3937 * LOCKING: 3938 * Kernel thread context (may sleep) 3939 * 3940 * RETURNS: 3941 * 0 on success, -errno otherwise. 3942 */ 3943 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3944 unsigned long deadline, 3945 bool *online, int (*check_ready)(struct ata_link *)) 3946 { 3947 u32 scontrol; 3948 int rc; 3949 3950 DPRINTK("ENTER\n"); 3951 3952 if (online) 3953 *online = false; 3954 3955 if (sata_set_spd_needed(link)) { 3956 /* SATA spec says nothing about how to reconfigure 3957 * spd. To be on the safe side, turn off phy during 3958 * reconfiguration. This works for at least ICH7 AHCI 3959 * and Sil3124. 3960 */ 3961 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3962 goto out; 3963 3964 scontrol = (scontrol & 0x0f0) | 0x304; 3965 3966 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3967 goto out; 3968 3969 sata_set_spd(link); 3970 } 3971 3972 /* issue phy wake/reset */ 3973 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3974 goto out; 3975 3976 scontrol = (scontrol & 0x0f0) | 0x301; 3977 3978 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3979 goto out; 3980 3981 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3982 * 10.4.2 says at least 1 ms. 3983 */ 3984 ata_msleep(link->ap, 1); 3985 3986 /* bring link back */ 3987 rc = sata_link_resume(link, timing, deadline); 3988 if (rc) 3989 goto out; 3990 /* if link is offline nothing more to do */ 3991 if (ata_phys_link_offline(link)) 3992 goto out; 3993 3994 /* Link is online. From this point, -ENODEV too is an error. */ 3995 if (online) 3996 *online = true; 3997 3998 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3999 /* If PMP is supported, we have to do follow-up SRST. 4000 * Some PMPs don't send D2H Reg FIS after hardreset if 4001 * the first port is empty. Wait only for 4002 * ATA_TMOUT_PMP_SRST_WAIT. 4003 */ 4004 if (check_ready) { 4005 unsigned long pmp_deadline; 4006 4007 pmp_deadline = ata_deadline(jiffies, 4008 ATA_TMOUT_PMP_SRST_WAIT); 4009 if (time_after(pmp_deadline, deadline)) 4010 pmp_deadline = deadline; 4011 ata_wait_ready(link, pmp_deadline, check_ready); 4012 } 4013 rc = -EAGAIN; 4014 goto out; 4015 } 4016 4017 rc = 0; 4018 if (check_ready) 4019 rc = ata_wait_ready(link, deadline, check_ready); 4020 out: 4021 if (rc && rc != -EAGAIN) { 4022 /* online is set iff link is online && reset succeeded */ 4023 if (online) 4024 *online = false; 4025 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 4026 } 4027 DPRINTK("EXIT, rc=%d\n", rc); 4028 return rc; 4029 } 4030 4031 /** 4032 * sata_std_hardreset - COMRESET w/o waiting or classification 4033 * @link: link to reset 4034 * @class: resulting class of attached device 4035 * @deadline: deadline jiffies for the operation 4036 * 4037 * Standard SATA COMRESET w/o waiting or classification. 4038 * 4039 * LOCKING: 4040 * Kernel thread context (may sleep) 4041 * 4042 * RETURNS: 4043 * 0 if link offline, -EAGAIN if link online, -errno on errors. 4044 */ 4045 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4046 unsigned long deadline) 4047 { 4048 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4049 bool online; 4050 int rc; 4051 4052 /* do hardreset */ 4053 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 4054 return online ? -EAGAIN : rc; 4055 } 4056 4057 /** 4058 * ata_std_postreset - standard postreset callback 4059 * @link: the target ata_link 4060 * @classes: classes of attached devices 4061 * 4062 * This function is invoked after a successful reset. Note that 4063 * the device might have been reset more than once using 4064 * different reset methods before postreset is invoked. 4065 * 4066 * LOCKING: 4067 * Kernel thread context (may sleep) 4068 */ 4069 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4070 { 4071 u32 serror; 4072 4073 DPRINTK("ENTER\n"); 4074 4075 /* reset complete, clear SError */ 4076 if (!sata_scr_read(link, SCR_ERROR, &serror)) 4077 sata_scr_write(link, SCR_ERROR, serror); 4078 4079 /* print link status */ 4080 sata_print_link_status(link); 4081 4082 DPRINTK("EXIT\n"); 4083 } 4084 4085 /** 4086 * ata_dev_same_device - Determine whether new ID matches configured device 4087 * @dev: device to compare against 4088 * @new_class: class of the new device 4089 * @new_id: IDENTIFY page of the new device 4090 * 4091 * Compare @new_class and @new_id against @dev and determine 4092 * whether @dev is the device indicated by @new_class and 4093 * @new_id. 4094 * 4095 * LOCKING: 4096 * None. 4097 * 4098 * RETURNS: 4099 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4100 */ 4101 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4102 const u16 *new_id) 4103 { 4104 const u16 *old_id = dev->id; 4105 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4106 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4107 4108 if (dev->class != new_class) { 4109 ata_dev_info(dev, "class mismatch %d != %d\n", 4110 dev->class, new_class); 4111 return 0; 4112 } 4113 4114 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4115 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4116 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4117 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4118 4119 if (strcmp(model[0], model[1])) { 4120 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 4121 model[0], model[1]); 4122 return 0; 4123 } 4124 4125 if (strcmp(serial[0], serial[1])) { 4126 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 4127 serial[0], serial[1]); 4128 return 0; 4129 } 4130 4131 return 1; 4132 } 4133 4134 /** 4135 * ata_dev_reread_id - Re-read IDENTIFY data 4136 * @dev: target ATA device 4137 * @readid_flags: read ID flags 4138 * 4139 * Re-read IDENTIFY page and make sure @dev is still attached to 4140 * the port. 4141 * 4142 * LOCKING: 4143 * Kernel thread context (may sleep) 4144 * 4145 * RETURNS: 4146 * 0 on success, negative errno otherwise 4147 */ 4148 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4149 { 4150 unsigned int class = dev->class; 4151 u16 *id = (void *)dev->link->ap->sector_buf; 4152 int rc; 4153 4154 /* read ID data */ 4155 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4156 if (rc) 4157 return rc; 4158 4159 /* is the device still there? */ 4160 if (!ata_dev_same_device(dev, class, id)) 4161 return -ENODEV; 4162 4163 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4164 return 0; 4165 } 4166 4167 /** 4168 * ata_dev_revalidate - Revalidate ATA device 4169 * @dev: device to revalidate 4170 * @new_class: new class code 4171 * @readid_flags: read ID flags 4172 * 4173 * Re-read IDENTIFY page, make sure @dev is still attached to the 4174 * port and reconfigure it according to the new IDENTIFY page. 4175 * 4176 * LOCKING: 4177 * Kernel thread context (may sleep) 4178 * 4179 * RETURNS: 4180 * 0 on success, negative errno otherwise 4181 */ 4182 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4183 unsigned int readid_flags) 4184 { 4185 u64 n_sectors = dev->n_sectors; 4186 u64 n_native_sectors = dev->n_native_sectors; 4187 int rc; 4188 4189 if (!ata_dev_enabled(dev)) 4190 return -ENODEV; 4191 4192 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4193 if (ata_class_enabled(new_class) && 4194 new_class != ATA_DEV_ATA && 4195 new_class != ATA_DEV_ATAPI && 4196 new_class != ATA_DEV_ZAC && 4197 new_class != ATA_DEV_SEMB) { 4198 ata_dev_info(dev, "class mismatch %u != %u\n", 4199 dev->class, new_class); 4200 rc = -ENODEV; 4201 goto fail; 4202 } 4203 4204 /* re-read ID */ 4205 rc = ata_dev_reread_id(dev, readid_flags); 4206 if (rc) 4207 goto fail; 4208 4209 /* configure device according to the new ID */ 4210 rc = ata_dev_configure(dev); 4211 if (rc) 4212 goto fail; 4213 4214 /* verify n_sectors hasn't changed */ 4215 if (dev->class != ATA_DEV_ATA || !n_sectors || 4216 dev->n_sectors == n_sectors) 4217 return 0; 4218 4219 /* n_sectors has changed */ 4220 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4221 (unsigned long long)n_sectors, 4222 (unsigned long long)dev->n_sectors); 4223 4224 /* 4225 * Something could have caused HPA to be unlocked 4226 * involuntarily. If n_native_sectors hasn't changed and the 4227 * new size matches it, keep the device. 4228 */ 4229 if (dev->n_native_sectors == n_native_sectors && 4230 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4231 ata_dev_warn(dev, 4232 "new n_sectors matches native, probably " 4233 "late HPA unlock, n_sectors updated\n"); 4234 /* use the larger n_sectors */ 4235 return 0; 4236 } 4237 4238 /* 4239 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4240 * unlocking HPA in those cases. 4241 * 4242 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4243 */ 4244 if (dev->n_native_sectors == n_native_sectors && 4245 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4246 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4247 ata_dev_warn(dev, 4248 "old n_sectors matches native, probably " 4249 "late HPA lock, will try to unlock HPA\n"); 4250 /* try unlocking HPA */ 4251 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4252 rc = -EIO; 4253 } else 4254 rc = -ENODEV; 4255 4256 /* restore original n_[native_]sectors and fail */ 4257 dev->n_native_sectors = n_native_sectors; 4258 dev->n_sectors = n_sectors; 4259 fail: 4260 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4261 return rc; 4262 } 4263 4264 struct ata_blacklist_entry { 4265 const char *model_num; 4266 const char *model_rev; 4267 unsigned long horkage; 4268 }; 4269 4270 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4271 /* Devices with DMA related problems under Linux */ 4272 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4273 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4274 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4275 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4276 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4277 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4278 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4279 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4280 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4281 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4282 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4283 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4284 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4285 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4286 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4287 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4288 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4289 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4290 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4291 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4292 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4293 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4294 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4295 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4296 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4297 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4298 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4299 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4300 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4301 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, 4302 /* Odd clown on sil3726/4726 PMPs */ 4303 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4304 4305 /* Weird ATAPI devices */ 4306 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4307 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4308 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4309 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4310 4311 /* 4312 * Causes silent data corruption with higher max sects. 4313 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4314 */ 4315 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4316 4317 /* Devices we expect to fail diagnostics */ 4318 4319 /* Devices where NCQ should be avoided */ 4320 /* NCQ is slow */ 4321 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4322 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4323 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4324 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4325 /* NCQ is broken */ 4326 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4327 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4328 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4329 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4330 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4331 4332 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4333 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4334 ATA_HORKAGE_FIRMWARE_WARN }, 4335 4336 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4337 ATA_HORKAGE_FIRMWARE_WARN }, 4338 4339 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4340 ATA_HORKAGE_FIRMWARE_WARN }, 4341 4342 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4343 ATA_HORKAGE_FIRMWARE_WARN }, 4344 4345 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ 4346 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4347 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4348 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4349 4350 /* Blacklist entries taken from Silicon Image 3124/3132 4351 Windows driver .inf file - also several Linux problem reports */ 4352 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4353 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4354 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4355 4356 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4357 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4358 4359 /* devices which puke on READ_NATIVE_MAX */ 4360 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4361 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4362 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4363 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4364 4365 /* this one allows HPA unlocking but fails IOs on the area */ 4366 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4367 4368 /* Devices which report 1 sector over size HPA */ 4369 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4370 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4371 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4372 4373 /* Devices which get the IVB wrong */ 4374 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4375 /* Maybe we should just blacklist TSSTcorp... */ 4376 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4377 4378 /* Devices that do not need bridging limits applied */ 4379 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4380 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4381 4382 /* Devices which aren't very happy with higher link speeds */ 4383 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4384 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4385 4386 /* 4387 * Devices which choke on SETXFER. Applies only if both the 4388 * device and controller are SATA. 4389 */ 4390 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4391 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4392 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4393 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4394 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4395 4396 /* devices that don't properly handle queued TRIM commands */ 4397 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4398 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4399 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4400 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4401 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4402 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4403 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4404 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4405 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4406 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4407 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4408 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4409 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4410 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4411 4412 /* devices that don't properly handle TRIM commands */ 4413 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4414 4415 /* 4416 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4417 * (Return Zero After Trim) flags in the ATA Command Set are 4418 * unreliable in the sense that they only define what happens if 4419 * the device successfully executed the DSM TRIM command. TRIM 4420 * is only advisory, however, and the device is free to silently 4421 * ignore all or parts of the request. 4422 * 4423 * Whitelist drives that are known to reliably return zeroes 4424 * after TRIM. 4425 */ 4426 4427 /* 4428 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4429 * that model before whitelisting all other intel SSDs. 4430 */ 4431 { "INTEL*SSDSC2MH*", NULL, 0, }, 4432 4433 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4434 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4435 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4436 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4437 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4438 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4439 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4440 4441 /* 4442 * Some WD SATA-I drives spin up and down erratically when the link 4443 * is put into the slumber mode. We don't have full list of the 4444 * affected devices. Disable LPM if the device matches one of the 4445 * known prefixes and is SATA-1. As a side effect LPM partial is 4446 * lost too. 4447 * 4448 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4449 */ 4450 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4451 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4452 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4453 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4454 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4455 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4456 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4457 4458 /* End Marker */ 4459 { } 4460 }; 4461 4462 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4463 { 4464 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4465 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4466 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4467 4468 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4469 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4470 4471 while (ad->model_num) { 4472 if (glob_match(ad->model_num, model_num)) { 4473 if (ad->model_rev == NULL) 4474 return ad->horkage; 4475 if (glob_match(ad->model_rev, model_rev)) 4476 return ad->horkage; 4477 } 4478 ad++; 4479 } 4480 return 0; 4481 } 4482 4483 static int ata_dma_blacklisted(const struct ata_device *dev) 4484 { 4485 /* We don't support polling DMA. 4486 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4487 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4488 */ 4489 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4490 (dev->flags & ATA_DFLAG_CDB_INTR)) 4491 return 1; 4492 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4493 } 4494 4495 /** 4496 * ata_is_40wire - check drive side detection 4497 * @dev: device 4498 * 4499 * Perform drive side detection decoding, allowing for device vendors 4500 * who can't follow the documentation. 4501 */ 4502 4503 static int ata_is_40wire(struct ata_device *dev) 4504 { 4505 if (dev->horkage & ATA_HORKAGE_IVB) 4506 return ata_drive_40wire_relaxed(dev->id); 4507 return ata_drive_40wire(dev->id); 4508 } 4509 4510 /** 4511 * cable_is_40wire - 40/80/SATA decider 4512 * @ap: port to consider 4513 * 4514 * This function encapsulates the policy for speed management 4515 * in one place. At the moment we don't cache the result but 4516 * there is a good case for setting ap->cbl to the result when 4517 * we are called with unknown cables (and figuring out if it 4518 * impacts hotplug at all). 4519 * 4520 * Return 1 if the cable appears to be 40 wire. 4521 */ 4522 4523 static int cable_is_40wire(struct ata_port *ap) 4524 { 4525 struct ata_link *link; 4526 struct ata_device *dev; 4527 4528 /* If the controller thinks we are 40 wire, we are. */ 4529 if (ap->cbl == ATA_CBL_PATA40) 4530 return 1; 4531 4532 /* If the controller thinks we are 80 wire, we are. */ 4533 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4534 return 0; 4535 4536 /* If the system is known to be 40 wire short cable (eg 4537 * laptop), then we allow 80 wire modes even if the drive 4538 * isn't sure. 4539 */ 4540 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4541 return 0; 4542 4543 /* If the controller doesn't know, we scan. 4544 * 4545 * Note: We look for all 40 wire detects at this point. Any 4546 * 80 wire detect is taken to be 80 wire cable because 4547 * - in many setups only the one drive (slave if present) will 4548 * give a valid detect 4549 * - if you have a non detect capable drive you don't want it 4550 * to colour the choice 4551 */ 4552 ata_for_each_link(link, ap, EDGE) { 4553 ata_for_each_dev(dev, link, ENABLED) { 4554 if (!ata_is_40wire(dev)) 4555 return 0; 4556 } 4557 } 4558 return 1; 4559 } 4560 4561 /** 4562 * ata_dev_xfermask - Compute supported xfermask of the given device 4563 * @dev: Device to compute xfermask for 4564 * 4565 * Compute supported xfermask of @dev and store it in 4566 * dev->*_mask. This function is responsible for applying all 4567 * known limits including host controller limits, device 4568 * blacklist, etc... 4569 * 4570 * LOCKING: 4571 * None. 4572 */ 4573 static void ata_dev_xfermask(struct ata_device *dev) 4574 { 4575 struct ata_link *link = dev->link; 4576 struct ata_port *ap = link->ap; 4577 struct ata_host *host = ap->host; 4578 unsigned long xfer_mask; 4579 4580 /* controller modes available */ 4581 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4582 ap->mwdma_mask, ap->udma_mask); 4583 4584 /* drive modes available */ 4585 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4586 dev->mwdma_mask, dev->udma_mask); 4587 xfer_mask &= ata_id_xfermask(dev->id); 4588 4589 /* 4590 * CFA Advanced TrueIDE timings are not allowed on a shared 4591 * cable 4592 */ 4593 if (ata_dev_pair(dev)) { 4594 /* No PIO5 or PIO6 */ 4595 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4596 /* No MWDMA3 or MWDMA 4 */ 4597 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4598 } 4599 4600 if (ata_dma_blacklisted(dev)) { 4601 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4602 ata_dev_warn(dev, 4603 "device is on DMA blacklist, disabling DMA\n"); 4604 } 4605 4606 if ((host->flags & ATA_HOST_SIMPLEX) && 4607 host->simplex_claimed && host->simplex_claimed != ap) { 4608 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4609 ata_dev_warn(dev, 4610 "simplex DMA is claimed by other device, disabling DMA\n"); 4611 } 4612 4613 if (ap->flags & ATA_FLAG_NO_IORDY) 4614 xfer_mask &= ata_pio_mask_no_iordy(dev); 4615 4616 if (ap->ops->mode_filter) 4617 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4618 4619 /* Apply cable rule here. Don't apply it early because when 4620 * we handle hot plug the cable type can itself change. 4621 * Check this last so that we know if the transfer rate was 4622 * solely limited by the cable. 4623 * Unknown or 80 wire cables reported host side are checked 4624 * drive side as well. Cases where we know a 40wire cable 4625 * is used safely for 80 are not checked here. 4626 */ 4627 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4628 /* UDMA/44 or higher would be available */ 4629 if (cable_is_40wire(ap)) { 4630 ata_dev_warn(dev, 4631 "limited to UDMA/33 due to 40-wire cable\n"); 4632 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4633 } 4634 4635 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4636 &dev->mwdma_mask, &dev->udma_mask); 4637 } 4638 4639 /** 4640 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4641 * @dev: Device to which command will be sent 4642 * 4643 * Issue SET FEATURES - XFER MODE command to device @dev 4644 * on port @ap. 4645 * 4646 * LOCKING: 4647 * PCI/etc. bus probe sem. 4648 * 4649 * RETURNS: 4650 * 0 on success, AC_ERR_* mask otherwise. 4651 */ 4652 4653 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4654 { 4655 struct ata_taskfile tf; 4656 unsigned int err_mask; 4657 4658 /* set up set-features taskfile */ 4659 DPRINTK("set features - xfer mode\n"); 4660 4661 /* Some controllers and ATAPI devices show flaky interrupt 4662 * behavior after setting xfer mode. Use polling instead. 4663 */ 4664 ata_tf_init(dev, &tf); 4665 tf.command = ATA_CMD_SET_FEATURES; 4666 tf.feature = SETFEATURES_XFER; 4667 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4668 tf.protocol = ATA_PROT_NODATA; 4669 /* If we are using IORDY we must send the mode setting command */ 4670 if (ata_pio_need_iordy(dev)) 4671 tf.nsect = dev->xfer_mode; 4672 /* If the device has IORDY and the controller does not - turn it off */ 4673 else if (ata_id_has_iordy(dev->id)) 4674 tf.nsect = 0x01; 4675 else /* In the ancient relic department - skip all of this */ 4676 return 0; 4677 4678 /* On some disks, this command causes spin-up, so we need longer timeout */ 4679 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4680 4681 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4682 return err_mask; 4683 } 4684 4685 /** 4686 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4687 * @dev: Device to which command will be sent 4688 * @enable: Whether to enable or disable the feature 4689 * @feature: The sector count represents the feature to set 4690 * 4691 * Issue SET FEATURES - SATA FEATURES command to device @dev 4692 * on port @ap with sector count 4693 * 4694 * LOCKING: 4695 * PCI/etc. bus probe sem. 4696 * 4697 * RETURNS: 4698 * 0 on success, AC_ERR_* mask otherwise. 4699 */ 4700 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4701 { 4702 struct ata_taskfile tf; 4703 unsigned int err_mask; 4704 unsigned long timeout = 0; 4705 4706 /* set up set-features taskfile */ 4707 DPRINTK("set features - SATA features\n"); 4708 4709 ata_tf_init(dev, &tf); 4710 tf.command = ATA_CMD_SET_FEATURES; 4711 tf.feature = enable; 4712 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4713 tf.protocol = ATA_PROT_NODATA; 4714 tf.nsect = feature; 4715 4716 if (enable == SETFEATURES_SPINUP) 4717 timeout = ata_probe_timeout ? 4718 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; 4719 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); 4720 4721 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4722 return err_mask; 4723 } 4724 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4725 4726 /** 4727 * ata_dev_init_params - Issue INIT DEV PARAMS command 4728 * @dev: Device to which command will be sent 4729 * @heads: Number of heads (taskfile parameter) 4730 * @sectors: Number of sectors (taskfile parameter) 4731 * 4732 * LOCKING: 4733 * Kernel thread context (may sleep) 4734 * 4735 * RETURNS: 4736 * 0 on success, AC_ERR_* mask otherwise. 4737 */ 4738 static unsigned int ata_dev_init_params(struct ata_device *dev, 4739 u16 heads, u16 sectors) 4740 { 4741 struct ata_taskfile tf; 4742 unsigned int err_mask; 4743 4744 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4745 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4746 return AC_ERR_INVALID; 4747 4748 /* set up init dev params taskfile */ 4749 DPRINTK("init dev params \n"); 4750 4751 ata_tf_init(dev, &tf); 4752 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4753 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4754 tf.protocol = ATA_PROT_NODATA; 4755 tf.nsect = sectors; 4756 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4757 4758 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4759 /* A clean abort indicates an original or just out of spec drive 4760 and we should continue as we issue the setup based on the 4761 drive reported working geometry */ 4762 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4763 err_mask = 0; 4764 4765 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4766 return err_mask; 4767 } 4768 4769 /** 4770 * ata_sg_clean - Unmap DMA memory associated with command 4771 * @qc: Command containing DMA memory to be released 4772 * 4773 * Unmap all mapped DMA memory associated with this command. 4774 * 4775 * LOCKING: 4776 * spin_lock_irqsave(host lock) 4777 */ 4778 void ata_sg_clean(struct ata_queued_cmd *qc) 4779 { 4780 struct ata_port *ap = qc->ap; 4781 struct scatterlist *sg = qc->sg; 4782 int dir = qc->dma_dir; 4783 4784 WARN_ON_ONCE(sg == NULL); 4785 4786 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4787 4788 if (qc->n_elem) 4789 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4790 4791 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4792 qc->sg = NULL; 4793 } 4794 4795 /** 4796 * atapi_check_dma - Check whether ATAPI DMA can be supported 4797 * @qc: Metadata associated with taskfile to check 4798 * 4799 * Allow low-level driver to filter ATA PACKET commands, returning 4800 * a status indicating whether or not it is OK to use DMA for the 4801 * supplied PACKET command. 4802 * 4803 * LOCKING: 4804 * spin_lock_irqsave(host lock) 4805 * 4806 * RETURNS: 0 when ATAPI DMA can be used 4807 * nonzero otherwise 4808 */ 4809 int atapi_check_dma(struct ata_queued_cmd *qc) 4810 { 4811 struct ata_port *ap = qc->ap; 4812 4813 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4814 * few ATAPI devices choke on such DMA requests. 4815 */ 4816 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4817 unlikely(qc->nbytes & 15)) 4818 return 1; 4819 4820 if (ap->ops->check_atapi_dma) 4821 return ap->ops->check_atapi_dma(qc); 4822 4823 return 0; 4824 } 4825 4826 /** 4827 * ata_std_qc_defer - Check whether a qc needs to be deferred 4828 * @qc: ATA command in question 4829 * 4830 * Non-NCQ commands cannot run with any other command, NCQ or 4831 * not. As upper layer only knows the queue depth, we are 4832 * responsible for maintaining exclusion. This function checks 4833 * whether a new command @qc can be issued. 4834 * 4835 * LOCKING: 4836 * spin_lock_irqsave(host lock) 4837 * 4838 * RETURNS: 4839 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4840 */ 4841 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4842 { 4843 struct ata_link *link = qc->dev->link; 4844 4845 if (qc->tf.protocol == ATA_PROT_NCQ) { 4846 if (!ata_tag_valid(link->active_tag)) 4847 return 0; 4848 } else { 4849 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4850 return 0; 4851 } 4852 4853 return ATA_DEFER_LINK; 4854 } 4855 4856 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4857 4858 /** 4859 * ata_sg_init - Associate command with scatter-gather table. 4860 * @qc: Command to be associated 4861 * @sg: Scatter-gather table. 4862 * @n_elem: Number of elements in s/g table. 4863 * 4864 * Initialize the data-related elements of queued_cmd @qc 4865 * to point to a scatter-gather table @sg, containing @n_elem 4866 * elements. 4867 * 4868 * LOCKING: 4869 * spin_lock_irqsave(host lock) 4870 */ 4871 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4872 unsigned int n_elem) 4873 { 4874 qc->sg = sg; 4875 qc->n_elem = n_elem; 4876 qc->cursg = qc->sg; 4877 } 4878 4879 /** 4880 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4881 * @qc: Command with scatter-gather table to be mapped. 4882 * 4883 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4884 * 4885 * LOCKING: 4886 * spin_lock_irqsave(host lock) 4887 * 4888 * RETURNS: 4889 * Zero on success, negative on error. 4890 * 4891 */ 4892 static int ata_sg_setup(struct ata_queued_cmd *qc) 4893 { 4894 struct ata_port *ap = qc->ap; 4895 unsigned int n_elem; 4896 4897 VPRINTK("ENTER, ata%u\n", ap->print_id); 4898 4899 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4900 if (n_elem < 1) 4901 return -1; 4902 4903 DPRINTK("%d sg elements mapped\n", n_elem); 4904 qc->orig_n_elem = qc->n_elem; 4905 qc->n_elem = n_elem; 4906 qc->flags |= ATA_QCFLAG_DMAMAP; 4907 4908 return 0; 4909 } 4910 4911 /** 4912 * swap_buf_le16 - swap halves of 16-bit words in place 4913 * @buf: Buffer to swap 4914 * @buf_words: Number of 16-bit words in buffer. 4915 * 4916 * Swap halves of 16-bit words if needed to convert from 4917 * little-endian byte order to native cpu byte order, or 4918 * vice-versa. 4919 * 4920 * LOCKING: 4921 * Inherited from caller. 4922 */ 4923 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4924 { 4925 #ifdef __BIG_ENDIAN 4926 unsigned int i; 4927 4928 for (i = 0; i < buf_words; i++) 4929 buf[i] = le16_to_cpu(buf[i]); 4930 #endif /* __BIG_ENDIAN */ 4931 } 4932 4933 /** 4934 * ata_qc_new_init - Request an available ATA command, and initialize it 4935 * @dev: Device from whom we request an available command structure 4936 * @tag: tag 4937 * 4938 * LOCKING: 4939 * None. 4940 */ 4941 4942 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 4943 { 4944 struct ata_port *ap = dev->link->ap; 4945 struct ata_queued_cmd *qc; 4946 4947 /* no command while frozen */ 4948 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4949 return NULL; 4950 4951 /* libsas case */ 4952 if (ap->flags & ATA_FLAG_SAS_HOST) { 4953 tag = ata_sas_allocate_tag(ap); 4954 if (tag < 0) 4955 return NULL; 4956 } 4957 4958 qc = __ata_qc_from_tag(ap, tag); 4959 qc->tag = tag; 4960 qc->scsicmd = NULL; 4961 qc->ap = ap; 4962 qc->dev = dev; 4963 4964 ata_qc_reinit(qc); 4965 4966 return qc; 4967 } 4968 4969 /** 4970 * ata_qc_free - free unused ata_queued_cmd 4971 * @qc: Command to complete 4972 * 4973 * Designed to free unused ata_queued_cmd object 4974 * in case something prevents using it. 4975 * 4976 * LOCKING: 4977 * spin_lock_irqsave(host lock) 4978 */ 4979 void ata_qc_free(struct ata_queued_cmd *qc) 4980 { 4981 struct ata_port *ap; 4982 unsigned int tag; 4983 4984 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4985 ap = qc->ap; 4986 4987 qc->flags = 0; 4988 tag = qc->tag; 4989 if (likely(ata_tag_valid(tag))) { 4990 qc->tag = ATA_TAG_POISON; 4991 if (ap->flags & ATA_FLAG_SAS_HOST) 4992 ata_sas_free_tag(tag, ap); 4993 } 4994 } 4995 4996 void __ata_qc_complete(struct ata_queued_cmd *qc) 4997 { 4998 struct ata_port *ap; 4999 struct ata_link *link; 5000 5001 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5002 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5003 ap = qc->ap; 5004 link = qc->dev->link; 5005 5006 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5007 ata_sg_clean(qc); 5008 5009 /* command should be marked inactive atomically with qc completion */ 5010 if (qc->tf.protocol == ATA_PROT_NCQ) { 5011 link->sactive &= ~(1 << qc->tag); 5012 if (!link->sactive) 5013 ap->nr_active_links--; 5014 } else { 5015 link->active_tag = ATA_TAG_POISON; 5016 ap->nr_active_links--; 5017 } 5018 5019 /* clear exclusive status */ 5020 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5021 ap->excl_link == link)) 5022 ap->excl_link = NULL; 5023 5024 /* atapi: mark qc as inactive to prevent the interrupt handler 5025 * from completing the command twice later, before the error handler 5026 * is called. (when rc != 0 and atapi request sense is needed) 5027 */ 5028 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5029 ap->qc_active &= ~(1 << qc->tag); 5030 5031 /* call completion callback */ 5032 qc->complete_fn(qc); 5033 } 5034 5035 static void fill_result_tf(struct ata_queued_cmd *qc) 5036 { 5037 struct ata_port *ap = qc->ap; 5038 5039 qc->result_tf.flags = qc->tf.flags; 5040 ap->ops->qc_fill_rtf(qc); 5041 } 5042 5043 static void ata_verify_xfer(struct ata_queued_cmd *qc) 5044 { 5045 struct ata_device *dev = qc->dev; 5046 5047 if (ata_is_nodata(qc->tf.protocol)) 5048 return; 5049 5050 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5051 return; 5052 5053 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5054 } 5055 5056 /** 5057 * ata_qc_complete - Complete an active ATA command 5058 * @qc: Command to complete 5059 * 5060 * Indicate to the mid and upper layers that an ATA command has 5061 * completed, with either an ok or not-ok status. 5062 * 5063 * Refrain from calling this function multiple times when 5064 * successfully completing multiple NCQ commands. 5065 * ata_qc_complete_multiple() should be used instead, which will 5066 * properly update IRQ expect state. 5067 * 5068 * LOCKING: 5069 * spin_lock_irqsave(host lock) 5070 */ 5071 void ata_qc_complete(struct ata_queued_cmd *qc) 5072 { 5073 struct ata_port *ap = qc->ap; 5074 5075 /* XXX: New EH and old EH use different mechanisms to 5076 * synchronize EH with regular execution path. 5077 * 5078 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5079 * Normal execution path is responsible for not accessing a 5080 * failed qc. libata core enforces the rule by returning NULL 5081 * from ata_qc_from_tag() for failed qcs. 5082 * 5083 * Old EH depends on ata_qc_complete() nullifying completion 5084 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5085 * not synchronize with interrupt handler. Only PIO task is 5086 * taken care of. 5087 */ 5088 if (ap->ops->error_handler) { 5089 struct ata_device *dev = qc->dev; 5090 struct ata_eh_info *ehi = &dev->link->eh_info; 5091 5092 if (unlikely(qc->err_mask)) 5093 qc->flags |= ATA_QCFLAG_FAILED; 5094 5095 /* 5096 * Finish internal commands without any further processing 5097 * and always with the result TF filled. 5098 */ 5099 if (unlikely(ata_tag_internal(qc->tag))) { 5100 fill_result_tf(qc); 5101 trace_ata_qc_complete_internal(qc); 5102 __ata_qc_complete(qc); 5103 return; 5104 } 5105 5106 /* 5107 * Non-internal qc has failed. Fill the result TF and 5108 * summon EH. 5109 */ 5110 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5111 fill_result_tf(qc); 5112 trace_ata_qc_complete_failed(qc); 5113 ata_qc_schedule_eh(qc); 5114 return; 5115 } 5116 5117 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 5118 5119 /* read result TF if requested */ 5120 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5121 fill_result_tf(qc); 5122 5123 trace_ata_qc_complete_done(qc); 5124 /* Some commands need post-processing after successful 5125 * completion. 5126 */ 5127 switch (qc->tf.command) { 5128 case ATA_CMD_SET_FEATURES: 5129 if (qc->tf.feature != SETFEATURES_WC_ON && 5130 qc->tf.feature != SETFEATURES_WC_OFF) 5131 break; 5132 /* fall through */ 5133 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5134 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5135 /* revalidate device */ 5136 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5137 ata_port_schedule_eh(ap); 5138 break; 5139 5140 case ATA_CMD_SLEEP: 5141 dev->flags |= ATA_DFLAG_SLEEPING; 5142 break; 5143 } 5144 5145 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5146 ata_verify_xfer(qc); 5147 5148 __ata_qc_complete(qc); 5149 } else { 5150 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5151 return; 5152 5153 /* read result TF if failed or requested */ 5154 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5155 fill_result_tf(qc); 5156 5157 __ata_qc_complete(qc); 5158 } 5159 } 5160 5161 /** 5162 * ata_qc_complete_multiple - Complete multiple qcs successfully 5163 * @ap: port in question 5164 * @qc_active: new qc_active mask 5165 * 5166 * Complete in-flight commands. This functions is meant to be 5167 * called from low-level driver's interrupt routine to complete 5168 * requests normally. ap->qc_active and @qc_active is compared 5169 * and commands are completed accordingly. 5170 * 5171 * Always use this function when completing multiple NCQ commands 5172 * from IRQ handlers instead of calling ata_qc_complete() 5173 * multiple times to keep IRQ expect status properly in sync. 5174 * 5175 * LOCKING: 5176 * spin_lock_irqsave(host lock) 5177 * 5178 * RETURNS: 5179 * Number of completed commands on success, -errno otherwise. 5180 */ 5181 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5182 { 5183 int nr_done = 0; 5184 u32 done_mask; 5185 5186 done_mask = ap->qc_active ^ qc_active; 5187 5188 if (unlikely(done_mask & qc_active)) { 5189 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5190 ap->qc_active, qc_active); 5191 return -EINVAL; 5192 } 5193 5194 while (done_mask) { 5195 struct ata_queued_cmd *qc; 5196 unsigned int tag = __ffs(done_mask); 5197 5198 qc = ata_qc_from_tag(ap, tag); 5199 if (qc) { 5200 ata_qc_complete(qc); 5201 nr_done++; 5202 } 5203 done_mask &= ~(1 << tag); 5204 } 5205 5206 return nr_done; 5207 } 5208 5209 /** 5210 * ata_qc_issue - issue taskfile to device 5211 * @qc: command to issue to device 5212 * 5213 * Prepare an ATA command to submission to device. 5214 * This includes mapping the data into a DMA-able 5215 * area, filling in the S/G table, and finally 5216 * writing the taskfile to hardware, starting the command. 5217 * 5218 * LOCKING: 5219 * spin_lock_irqsave(host lock) 5220 */ 5221 void ata_qc_issue(struct ata_queued_cmd *qc) 5222 { 5223 struct ata_port *ap = qc->ap; 5224 struct ata_link *link = qc->dev->link; 5225 u8 prot = qc->tf.protocol; 5226 5227 /* Make sure only one non-NCQ command is outstanding. The 5228 * check is skipped for old EH because it reuses active qc to 5229 * request ATAPI sense. 5230 */ 5231 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5232 5233 if (ata_is_ncq(prot)) { 5234 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5235 5236 if (!link->sactive) 5237 ap->nr_active_links++; 5238 link->sactive |= 1 << qc->tag; 5239 } else { 5240 WARN_ON_ONCE(link->sactive); 5241 5242 ap->nr_active_links++; 5243 link->active_tag = qc->tag; 5244 } 5245 5246 qc->flags |= ATA_QCFLAG_ACTIVE; 5247 ap->qc_active |= 1 << qc->tag; 5248 5249 /* 5250 * We guarantee to LLDs that they will have at least one 5251 * non-zero sg if the command is a data command. 5252 */ 5253 if (WARN_ON_ONCE(ata_is_data(prot) && 5254 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5255 goto sys_err; 5256 5257 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5258 (ap->flags & ATA_FLAG_PIO_DMA))) 5259 if (ata_sg_setup(qc)) 5260 goto sys_err; 5261 5262 /* if device is sleeping, schedule reset and abort the link */ 5263 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5264 link->eh_info.action |= ATA_EH_RESET; 5265 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5266 ata_link_abort(link); 5267 return; 5268 } 5269 5270 ap->ops->qc_prep(qc); 5271 trace_ata_qc_issue(qc); 5272 qc->err_mask |= ap->ops->qc_issue(qc); 5273 if (unlikely(qc->err_mask)) 5274 goto err; 5275 return; 5276 5277 sys_err: 5278 qc->err_mask |= AC_ERR_SYSTEM; 5279 err: 5280 ata_qc_complete(qc); 5281 } 5282 5283 /** 5284 * sata_scr_valid - test whether SCRs are accessible 5285 * @link: ATA link to test SCR accessibility for 5286 * 5287 * Test whether SCRs are accessible for @link. 5288 * 5289 * LOCKING: 5290 * None. 5291 * 5292 * RETURNS: 5293 * 1 if SCRs are accessible, 0 otherwise. 5294 */ 5295 int sata_scr_valid(struct ata_link *link) 5296 { 5297 struct ata_port *ap = link->ap; 5298 5299 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5300 } 5301 5302 /** 5303 * sata_scr_read - read SCR register of the specified port 5304 * @link: ATA link to read SCR for 5305 * @reg: SCR to read 5306 * @val: Place to store read value 5307 * 5308 * Read SCR register @reg of @link into *@val. This function is 5309 * guaranteed to succeed if @link is ap->link, the cable type of 5310 * the port is SATA and the port implements ->scr_read. 5311 * 5312 * LOCKING: 5313 * None if @link is ap->link. Kernel thread context otherwise. 5314 * 5315 * RETURNS: 5316 * 0 on success, negative errno on failure. 5317 */ 5318 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5319 { 5320 if (ata_is_host_link(link)) { 5321 if (sata_scr_valid(link)) 5322 return link->ap->ops->scr_read(link, reg, val); 5323 return -EOPNOTSUPP; 5324 } 5325 5326 return sata_pmp_scr_read(link, reg, val); 5327 } 5328 5329 /** 5330 * sata_scr_write - write SCR register of the specified port 5331 * @link: ATA link to write SCR for 5332 * @reg: SCR to write 5333 * @val: value to write 5334 * 5335 * Write @val to SCR register @reg of @link. This function is 5336 * guaranteed to succeed if @link is ap->link, the cable type of 5337 * the port is SATA and the port implements ->scr_read. 5338 * 5339 * LOCKING: 5340 * None if @link is ap->link. Kernel thread context otherwise. 5341 * 5342 * RETURNS: 5343 * 0 on success, negative errno on failure. 5344 */ 5345 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5346 { 5347 if (ata_is_host_link(link)) { 5348 if (sata_scr_valid(link)) 5349 return link->ap->ops->scr_write(link, reg, val); 5350 return -EOPNOTSUPP; 5351 } 5352 5353 return sata_pmp_scr_write(link, reg, val); 5354 } 5355 5356 /** 5357 * sata_scr_write_flush - write SCR register of the specified port and flush 5358 * @link: ATA link to write SCR for 5359 * @reg: SCR to write 5360 * @val: value to write 5361 * 5362 * This function is identical to sata_scr_write() except that this 5363 * function performs flush after writing to the register. 5364 * 5365 * LOCKING: 5366 * None if @link is ap->link. Kernel thread context otherwise. 5367 * 5368 * RETURNS: 5369 * 0 on success, negative errno on failure. 5370 */ 5371 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5372 { 5373 if (ata_is_host_link(link)) { 5374 int rc; 5375 5376 if (sata_scr_valid(link)) { 5377 rc = link->ap->ops->scr_write(link, reg, val); 5378 if (rc == 0) 5379 rc = link->ap->ops->scr_read(link, reg, &val); 5380 return rc; 5381 } 5382 return -EOPNOTSUPP; 5383 } 5384 5385 return sata_pmp_scr_write(link, reg, val); 5386 } 5387 5388 /** 5389 * ata_phys_link_online - test whether the given link is online 5390 * @link: ATA link to test 5391 * 5392 * Test whether @link is online. Note that this function returns 5393 * 0 if online status of @link cannot be obtained, so 5394 * ata_link_online(link) != !ata_link_offline(link). 5395 * 5396 * LOCKING: 5397 * None. 5398 * 5399 * RETURNS: 5400 * True if the port online status is available and online. 5401 */ 5402 bool ata_phys_link_online(struct ata_link *link) 5403 { 5404 u32 sstatus; 5405 5406 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5407 ata_sstatus_online(sstatus)) 5408 return true; 5409 return false; 5410 } 5411 5412 /** 5413 * ata_phys_link_offline - test whether the given link is offline 5414 * @link: ATA link to test 5415 * 5416 * Test whether @link is offline. Note that this function 5417 * returns 0 if offline status of @link cannot be obtained, so 5418 * ata_link_online(link) != !ata_link_offline(link). 5419 * 5420 * LOCKING: 5421 * None. 5422 * 5423 * RETURNS: 5424 * True if the port offline status is available and offline. 5425 */ 5426 bool ata_phys_link_offline(struct ata_link *link) 5427 { 5428 u32 sstatus; 5429 5430 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5431 !ata_sstatus_online(sstatus)) 5432 return true; 5433 return false; 5434 } 5435 5436 /** 5437 * ata_link_online - test whether the given link is online 5438 * @link: ATA link to test 5439 * 5440 * Test whether @link is online. This is identical to 5441 * ata_phys_link_online() when there's no slave link. When 5442 * there's a slave link, this function should only be called on 5443 * the master link and will return true if any of M/S links is 5444 * online. 5445 * 5446 * LOCKING: 5447 * None. 5448 * 5449 * RETURNS: 5450 * True if the port online status is available and online. 5451 */ 5452 bool ata_link_online(struct ata_link *link) 5453 { 5454 struct ata_link *slave = link->ap->slave_link; 5455 5456 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5457 5458 return ata_phys_link_online(link) || 5459 (slave && ata_phys_link_online(slave)); 5460 } 5461 5462 /** 5463 * ata_link_offline - test whether the given link is offline 5464 * @link: ATA link to test 5465 * 5466 * Test whether @link is offline. This is identical to 5467 * ata_phys_link_offline() when there's no slave link. When 5468 * there's a slave link, this function should only be called on 5469 * the master link and will return true if both M/S links are 5470 * offline. 5471 * 5472 * LOCKING: 5473 * None. 5474 * 5475 * RETURNS: 5476 * True if the port offline status is available and offline. 5477 */ 5478 bool ata_link_offline(struct ata_link *link) 5479 { 5480 struct ata_link *slave = link->ap->slave_link; 5481 5482 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5483 5484 return ata_phys_link_offline(link) && 5485 (!slave || ata_phys_link_offline(slave)); 5486 } 5487 5488 #ifdef CONFIG_PM 5489 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5490 unsigned int action, unsigned int ehi_flags, 5491 bool async) 5492 { 5493 struct ata_link *link; 5494 unsigned long flags; 5495 5496 /* Previous resume operation might still be in 5497 * progress. Wait for PM_PENDING to clear. 5498 */ 5499 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5500 ata_port_wait_eh(ap); 5501 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5502 } 5503 5504 /* request PM ops to EH */ 5505 spin_lock_irqsave(ap->lock, flags); 5506 5507 ap->pm_mesg = mesg; 5508 ap->pflags |= ATA_PFLAG_PM_PENDING; 5509 ata_for_each_link(link, ap, HOST_FIRST) { 5510 link->eh_info.action |= action; 5511 link->eh_info.flags |= ehi_flags; 5512 } 5513 5514 ata_port_schedule_eh(ap); 5515 5516 spin_unlock_irqrestore(ap->lock, flags); 5517 5518 if (!async) { 5519 ata_port_wait_eh(ap); 5520 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5521 } 5522 } 5523 5524 /* 5525 * On some hardware, device fails to respond after spun down for suspend. As 5526 * the device won't be used before being resumed, we don't need to touch the 5527 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5528 * 5529 * http://thread.gmane.org/gmane.linux.ide/46764 5530 */ 5531 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5532 | ATA_EHI_NO_AUTOPSY 5533 | ATA_EHI_NO_RECOVERY; 5534 5535 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5536 { 5537 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5538 } 5539 5540 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5541 { 5542 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5543 } 5544 5545 static int ata_port_pm_suspend(struct device *dev) 5546 { 5547 struct ata_port *ap = to_ata_port(dev); 5548 5549 if (pm_runtime_suspended(dev)) 5550 return 0; 5551 5552 ata_port_suspend(ap, PMSG_SUSPEND); 5553 return 0; 5554 } 5555 5556 static int ata_port_pm_freeze(struct device *dev) 5557 { 5558 struct ata_port *ap = to_ata_port(dev); 5559 5560 if (pm_runtime_suspended(dev)) 5561 return 0; 5562 5563 ata_port_suspend(ap, PMSG_FREEZE); 5564 return 0; 5565 } 5566 5567 static int ata_port_pm_poweroff(struct device *dev) 5568 { 5569 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5570 return 0; 5571 } 5572 5573 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5574 | ATA_EHI_QUIET; 5575 5576 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5577 { 5578 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5579 } 5580 5581 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5582 { 5583 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5584 } 5585 5586 static int ata_port_pm_resume(struct device *dev) 5587 { 5588 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5589 pm_runtime_disable(dev); 5590 pm_runtime_set_active(dev); 5591 pm_runtime_enable(dev); 5592 return 0; 5593 } 5594 5595 /* 5596 * For ODDs, the upper layer will poll for media change every few seconds, 5597 * which will make it enter and leave suspend state every few seconds. And 5598 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5599 * is very little and the ODD may malfunction after constantly being reset. 5600 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5601 * ODD is attached to the port. 5602 */ 5603 static int ata_port_runtime_idle(struct device *dev) 5604 { 5605 struct ata_port *ap = to_ata_port(dev); 5606 struct ata_link *link; 5607 struct ata_device *adev; 5608 5609 ata_for_each_link(link, ap, HOST_FIRST) { 5610 ata_for_each_dev(adev, link, ENABLED) 5611 if (adev->class == ATA_DEV_ATAPI && 5612 !zpodd_dev_enabled(adev)) 5613 return -EBUSY; 5614 } 5615 5616 return 0; 5617 } 5618 5619 static int ata_port_runtime_suspend(struct device *dev) 5620 { 5621 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5622 return 0; 5623 } 5624 5625 static int ata_port_runtime_resume(struct device *dev) 5626 { 5627 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5628 return 0; 5629 } 5630 5631 static const struct dev_pm_ops ata_port_pm_ops = { 5632 .suspend = ata_port_pm_suspend, 5633 .resume = ata_port_pm_resume, 5634 .freeze = ata_port_pm_freeze, 5635 .thaw = ata_port_pm_resume, 5636 .poweroff = ata_port_pm_poweroff, 5637 .restore = ata_port_pm_resume, 5638 5639 .runtime_suspend = ata_port_runtime_suspend, 5640 .runtime_resume = ata_port_runtime_resume, 5641 .runtime_idle = ata_port_runtime_idle, 5642 }; 5643 5644 /* sas ports don't participate in pm runtime management of ata_ports, 5645 * and need to resume ata devices at the domain level, not the per-port 5646 * level. sas suspend/resume is async to allow parallel port recovery 5647 * since sas has multiple ata_port instances per Scsi_Host. 5648 */ 5649 void ata_sas_port_suspend(struct ata_port *ap) 5650 { 5651 ata_port_suspend_async(ap, PMSG_SUSPEND); 5652 } 5653 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5654 5655 void ata_sas_port_resume(struct ata_port *ap) 5656 { 5657 ata_port_resume_async(ap, PMSG_RESUME); 5658 } 5659 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5660 5661 /** 5662 * ata_host_suspend - suspend host 5663 * @host: host to suspend 5664 * @mesg: PM message 5665 * 5666 * Suspend @host. Actual operation is performed by port suspend. 5667 */ 5668 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5669 { 5670 host->dev->power.power_state = mesg; 5671 return 0; 5672 } 5673 5674 /** 5675 * ata_host_resume - resume host 5676 * @host: host to resume 5677 * 5678 * Resume @host. Actual operation is performed by port resume. 5679 */ 5680 void ata_host_resume(struct ata_host *host) 5681 { 5682 host->dev->power.power_state = PMSG_ON; 5683 } 5684 #endif 5685 5686 struct device_type ata_port_type = { 5687 .name = "ata_port", 5688 #ifdef CONFIG_PM 5689 .pm = &ata_port_pm_ops, 5690 #endif 5691 }; 5692 5693 /** 5694 * ata_dev_init - Initialize an ata_device structure 5695 * @dev: Device structure to initialize 5696 * 5697 * Initialize @dev in preparation for probing. 5698 * 5699 * LOCKING: 5700 * Inherited from caller. 5701 */ 5702 void ata_dev_init(struct ata_device *dev) 5703 { 5704 struct ata_link *link = ata_dev_phys_link(dev); 5705 struct ata_port *ap = link->ap; 5706 unsigned long flags; 5707 5708 /* SATA spd limit is bound to the attached device, reset together */ 5709 link->sata_spd_limit = link->hw_sata_spd_limit; 5710 link->sata_spd = 0; 5711 5712 /* High bits of dev->flags are used to record warm plug 5713 * requests which occur asynchronously. Synchronize using 5714 * host lock. 5715 */ 5716 spin_lock_irqsave(ap->lock, flags); 5717 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5718 dev->horkage = 0; 5719 spin_unlock_irqrestore(ap->lock, flags); 5720 5721 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5722 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5723 dev->pio_mask = UINT_MAX; 5724 dev->mwdma_mask = UINT_MAX; 5725 dev->udma_mask = UINT_MAX; 5726 } 5727 5728 /** 5729 * ata_link_init - Initialize an ata_link structure 5730 * @ap: ATA port link is attached to 5731 * @link: Link structure to initialize 5732 * @pmp: Port multiplier port number 5733 * 5734 * Initialize @link. 5735 * 5736 * LOCKING: 5737 * Kernel thread context (may sleep) 5738 */ 5739 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5740 { 5741 int i; 5742 5743 /* clear everything except for devices */ 5744 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5745 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5746 5747 link->ap = ap; 5748 link->pmp = pmp; 5749 link->active_tag = ATA_TAG_POISON; 5750 link->hw_sata_spd_limit = UINT_MAX; 5751 5752 /* can't use iterator, ap isn't initialized yet */ 5753 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5754 struct ata_device *dev = &link->device[i]; 5755 5756 dev->link = link; 5757 dev->devno = dev - link->device; 5758 #ifdef CONFIG_ATA_ACPI 5759 dev->gtf_filter = ata_acpi_gtf_filter; 5760 #endif 5761 ata_dev_init(dev); 5762 } 5763 } 5764 5765 /** 5766 * sata_link_init_spd - Initialize link->sata_spd_limit 5767 * @link: Link to configure sata_spd_limit for 5768 * 5769 * Initialize @link->[hw_]sata_spd_limit to the currently 5770 * configured value. 5771 * 5772 * LOCKING: 5773 * Kernel thread context (may sleep). 5774 * 5775 * RETURNS: 5776 * 0 on success, -errno on failure. 5777 */ 5778 int sata_link_init_spd(struct ata_link *link) 5779 { 5780 u8 spd; 5781 int rc; 5782 5783 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5784 if (rc) 5785 return rc; 5786 5787 spd = (link->saved_scontrol >> 4) & 0xf; 5788 if (spd) 5789 link->hw_sata_spd_limit &= (1 << spd) - 1; 5790 5791 ata_force_link_limits(link); 5792 5793 link->sata_spd_limit = link->hw_sata_spd_limit; 5794 5795 return 0; 5796 } 5797 5798 /** 5799 * ata_port_alloc - allocate and initialize basic ATA port resources 5800 * @host: ATA host this allocated port belongs to 5801 * 5802 * Allocate and initialize basic ATA port resources. 5803 * 5804 * RETURNS: 5805 * Allocate ATA port on success, NULL on failure. 5806 * 5807 * LOCKING: 5808 * Inherited from calling layer (may sleep). 5809 */ 5810 struct ata_port *ata_port_alloc(struct ata_host *host) 5811 { 5812 struct ata_port *ap; 5813 5814 DPRINTK("ENTER\n"); 5815 5816 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5817 if (!ap) 5818 return NULL; 5819 5820 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5821 ap->lock = &host->lock; 5822 ap->print_id = -1; 5823 ap->local_port_no = -1; 5824 ap->host = host; 5825 ap->dev = host->dev; 5826 5827 #if defined(ATA_VERBOSE_DEBUG) 5828 /* turn on all debugging levels */ 5829 ap->msg_enable = 0x00FF; 5830 #elif defined(ATA_DEBUG) 5831 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5832 #else 5833 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5834 #endif 5835 5836 mutex_init(&ap->scsi_scan_mutex); 5837 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5838 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5839 INIT_LIST_HEAD(&ap->eh_done_q); 5840 init_waitqueue_head(&ap->eh_wait_q); 5841 init_completion(&ap->park_req_pending); 5842 init_timer_deferrable(&ap->fastdrain_timer); 5843 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5844 ap->fastdrain_timer.data = (unsigned long)ap; 5845 5846 ap->cbl = ATA_CBL_NONE; 5847 5848 ata_link_init(ap, &ap->link, 0); 5849 5850 #ifdef ATA_IRQ_TRAP 5851 ap->stats.unhandled_irq = 1; 5852 ap->stats.idle_irq = 1; 5853 #endif 5854 ata_sff_port_init(ap); 5855 5856 return ap; 5857 } 5858 5859 static void ata_host_release(struct device *gendev, void *res) 5860 { 5861 struct ata_host *host = dev_get_drvdata(gendev); 5862 int i; 5863 5864 for (i = 0; i < host->n_ports; i++) { 5865 struct ata_port *ap = host->ports[i]; 5866 5867 if (!ap) 5868 continue; 5869 5870 if (ap->scsi_host) 5871 scsi_host_put(ap->scsi_host); 5872 5873 kfree(ap->pmp_link); 5874 kfree(ap->slave_link); 5875 kfree(ap); 5876 host->ports[i] = NULL; 5877 } 5878 5879 dev_set_drvdata(gendev, NULL); 5880 } 5881 5882 /** 5883 * ata_host_alloc - allocate and init basic ATA host resources 5884 * @dev: generic device this host is associated with 5885 * @max_ports: maximum number of ATA ports associated with this host 5886 * 5887 * Allocate and initialize basic ATA host resources. LLD calls 5888 * this function to allocate a host, initializes it fully and 5889 * attaches it using ata_host_register(). 5890 * 5891 * @max_ports ports are allocated and host->n_ports is 5892 * initialized to @max_ports. The caller is allowed to decrease 5893 * host->n_ports before calling ata_host_register(). The unused 5894 * ports will be automatically freed on registration. 5895 * 5896 * RETURNS: 5897 * Allocate ATA host on success, NULL on failure. 5898 * 5899 * LOCKING: 5900 * Inherited from calling layer (may sleep). 5901 */ 5902 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5903 { 5904 struct ata_host *host; 5905 size_t sz; 5906 int i; 5907 5908 DPRINTK("ENTER\n"); 5909 5910 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5911 return NULL; 5912 5913 /* alloc a container for our list of ATA ports (buses) */ 5914 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5915 /* alloc a container for our list of ATA ports (buses) */ 5916 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5917 if (!host) 5918 goto err_out; 5919 5920 devres_add(dev, host); 5921 dev_set_drvdata(dev, host); 5922 5923 spin_lock_init(&host->lock); 5924 mutex_init(&host->eh_mutex); 5925 host->dev = dev; 5926 host->n_ports = max_ports; 5927 5928 /* allocate ports bound to this host */ 5929 for (i = 0; i < max_ports; i++) { 5930 struct ata_port *ap; 5931 5932 ap = ata_port_alloc(host); 5933 if (!ap) 5934 goto err_out; 5935 5936 ap->port_no = i; 5937 host->ports[i] = ap; 5938 } 5939 5940 devres_remove_group(dev, NULL); 5941 return host; 5942 5943 err_out: 5944 devres_release_group(dev, NULL); 5945 return NULL; 5946 } 5947 5948 /** 5949 * ata_host_alloc_pinfo - alloc host and init with port_info array 5950 * @dev: generic device this host is associated with 5951 * @ppi: array of ATA port_info to initialize host with 5952 * @n_ports: number of ATA ports attached to this host 5953 * 5954 * Allocate ATA host and initialize with info from @ppi. If NULL 5955 * terminated, @ppi may contain fewer entries than @n_ports. The 5956 * last entry will be used for the remaining ports. 5957 * 5958 * RETURNS: 5959 * Allocate ATA host on success, NULL on failure. 5960 * 5961 * LOCKING: 5962 * Inherited from calling layer (may sleep). 5963 */ 5964 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5965 const struct ata_port_info * const * ppi, 5966 int n_ports) 5967 { 5968 const struct ata_port_info *pi; 5969 struct ata_host *host; 5970 int i, j; 5971 5972 host = ata_host_alloc(dev, n_ports); 5973 if (!host) 5974 return NULL; 5975 5976 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5977 struct ata_port *ap = host->ports[i]; 5978 5979 if (ppi[j]) 5980 pi = ppi[j++]; 5981 5982 ap->pio_mask = pi->pio_mask; 5983 ap->mwdma_mask = pi->mwdma_mask; 5984 ap->udma_mask = pi->udma_mask; 5985 ap->flags |= pi->flags; 5986 ap->link.flags |= pi->link_flags; 5987 ap->ops = pi->port_ops; 5988 5989 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5990 host->ops = pi->port_ops; 5991 } 5992 5993 return host; 5994 } 5995 5996 /** 5997 * ata_slave_link_init - initialize slave link 5998 * @ap: port to initialize slave link for 5999 * 6000 * Create and initialize slave link for @ap. This enables slave 6001 * link handling on the port. 6002 * 6003 * In libata, a port contains links and a link contains devices. 6004 * There is single host link but if a PMP is attached to it, 6005 * there can be multiple fan-out links. On SATA, there's usually 6006 * a single device connected to a link but PATA and SATA 6007 * controllers emulating TF based interface can have two - master 6008 * and slave. 6009 * 6010 * However, there are a few controllers which don't fit into this 6011 * abstraction too well - SATA controllers which emulate TF 6012 * interface with both master and slave devices but also have 6013 * separate SCR register sets for each device. These controllers 6014 * need separate links for physical link handling 6015 * (e.g. onlineness, link speed) but should be treated like a 6016 * traditional M/S controller for everything else (e.g. command 6017 * issue, softreset). 6018 * 6019 * slave_link is libata's way of handling this class of 6020 * controllers without impacting core layer too much. For 6021 * anything other than physical link handling, the default host 6022 * link is used for both master and slave. For physical link 6023 * handling, separate @ap->slave_link is used. All dirty details 6024 * are implemented inside libata core layer. From LLD's POV, the 6025 * only difference is that prereset, hardreset and postreset are 6026 * called once more for the slave link, so the reset sequence 6027 * looks like the following. 6028 * 6029 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 6030 * softreset(M) -> postreset(M) -> postreset(S) 6031 * 6032 * Note that softreset is called only for the master. Softreset 6033 * resets both M/S by definition, so SRST on master should handle 6034 * both (the standard method will work just fine). 6035 * 6036 * LOCKING: 6037 * Should be called before host is registered. 6038 * 6039 * RETURNS: 6040 * 0 on success, -errno on failure. 6041 */ 6042 int ata_slave_link_init(struct ata_port *ap) 6043 { 6044 struct ata_link *link; 6045 6046 WARN_ON(ap->slave_link); 6047 WARN_ON(ap->flags & ATA_FLAG_PMP); 6048 6049 link = kzalloc(sizeof(*link), GFP_KERNEL); 6050 if (!link) 6051 return -ENOMEM; 6052 6053 ata_link_init(ap, link, 1); 6054 ap->slave_link = link; 6055 return 0; 6056 } 6057 6058 static void ata_host_stop(struct device *gendev, void *res) 6059 { 6060 struct ata_host *host = dev_get_drvdata(gendev); 6061 int i; 6062 6063 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6064 6065 for (i = 0; i < host->n_ports; i++) { 6066 struct ata_port *ap = host->ports[i]; 6067 6068 if (ap->ops->port_stop) 6069 ap->ops->port_stop(ap); 6070 } 6071 6072 if (host->ops->host_stop) 6073 host->ops->host_stop(host); 6074 } 6075 6076 /** 6077 * ata_finalize_port_ops - finalize ata_port_operations 6078 * @ops: ata_port_operations to finalize 6079 * 6080 * An ata_port_operations can inherit from another ops and that 6081 * ops can again inherit from another. This can go on as many 6082 * times as necessary as long as there is no loop in the 6083 * inheritance chain. 6084 * 6085 * Ops tables are finalized when the host is started. NULL or 6086 * unspecified entries are inherited from the closet ancestor 6087 * which has the method and the entry is populated with it. 6088 * After finalization, the ops table directly points to all the 6089 * methods and ->inherits is no longer necessary and cleared. 6090 * 6091 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 6092 * 6093 * LOCKING: 6094 * None. 6095 */ 6096 static void ata_finalize_port_ops(struct ata_port_operations *ops) 6097 { 6098 static DEFINE_SPINLOCK(lock); 6099 const struct ata_port_operations *cur; 6100 void **begin = (void **)ops; 6101 void **end = (void **)&ops->inherits; 6102 void **pp; 6103 6104 if (!ops || !ops->inherits) 6105 return; 6106 6107 spin_lock(&lock); 6108 6109 for (cur = ops->inherits; cur; cur = cur->inherits) { 6110 void **inherit = (void **)cur; 6111 6112 for (pp = begin; pp < end; pp++, inherit++) 6113 if (!*pp) 6114 *pp = *inherit; 6115 } 6116 6117 for (pp = begin; pp < end; pp++) 6118 if (IS_ERR(*pp)) 6119 *pp = NULL; 6120 6121 ops->inherits = NULL; 6122 6123 spin_unlock(&lock); 6124 } 6125 6126 /** 6127 * ata_host_start - start and freeze ports of an ATA host 6128 * @host: ATA host to start ports for 6129 * 6130 * Start and then freeze ports of @host. Started status is 6131 * recorded in host->flags, so this function can be called 6132 * multiple times. Ports are guaranteed to get started only 6133 * once. If host->ops isn't initialized yet, its set to the 6134 * first non-dummy port ops. 6135 * 6136 * LOCKING: 6137 * Inherited from calling layer (may sleep). 6138 * 6139 * RETURNS: 6140 * 0 if all ports are started successfully, -errno otherwise. 6141 */ 6142 int ata_host_start(struct ata_host *host) 6143 { 6144 int have_stop = 0; 6145 void *start_dr = NULL; 6146 int i, rc; 6147 6148 if (host->flags & ATA_HOST_STARTED) 6149 return 0; 6150 6151 ata_finalize_port_ops(host->ops); 6152 6153 for (i = 0; i < host->n_ports; i++) { 6154 struct ata_port *ap = host->ports[i]; 6155 6156 ata_finalize_port_ops(ap->ops); 6157 6158 if (!host->ops && !ata_port_is_dummy(ap)) 6159 host->ops = ap->ops; 6160 6161 if (ap->ops->port_stop) 6162 have_stop = 1; 6163 } 6164 6165 if (host->ops->host_stop) 6166 have_stop = 1; 6167 6168 if (have_stop) { 6169 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6170 if (!start_dr) 6171 return -ENOMEM; 6172 } 6173 6174 for (i = 0; i < host->n_ports; i++) { 6175 struct ata_port *ap = host->ports[i]; 6176 6177 if (ap->ops->port_start) { 6178 rc = ap->ops->port_start(ap); 6179 if (rc) { 6180 if (rc != -ENODEV) 6181 dev_err(host->dev, 6182 "failed to start port %d (errno=%d)\n", 6183 i, rc); 6184 goto err_out; 6185 } 6186 } 6187 ata_eh_freeze_port(ap); 6188 } 6189 6190 if (start_dr) 6191 devres_add(host->dev, start_dr); 6192 host->flags |= ATA_HOST_STARTED; 6193 return 0; 6194 6195 err_out: 6196 while (--i >= 0) { 6197 struct ata_port *ap = host->ports[i]; 6198 6199 if (ap->ops->port_stop) 6200 ap->ops->port_stop(ap); 6201 } 6202 devres_free(start_dr); 6203 return rc; 6204 } 6205 6206 /** 6207 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6208 * @host: host to initialize 6209 * @dev: device host is attached to 6210 * @ops: port_ops 6211 * 6212 */ 6213 void ata_host_init(struct ata_host *host, struct device *dev, 6214 struct ata_port_operations *ops) 6215 { 6216 spin_lock_init(&host->lock); 6217 mutex_init(&host->eh_mutex); 6218 host->n_tags = ATA_MAX_QUEUE - 1; 6219 host->dev = dev; 6220 host->ops = ops; 6221 } 6222 6223 void __ata_port_probe(struct ata_port *ap) 6224 { 6225 struct ata_eh_info *ehi = &ap->link.eh_info; 6226 unsigned long flags; 6227 6228 /* kick EH for boot probing */ 6229 spin_lock_irqsave(ap->lock, flags); 6230 6231 ehi->probe_mask |= ATA_ALL_DEVICES; 6232 ehi->action |= ATA_EH_RESET; 6233 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6234 6235 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6236 ap->pflags |= ATA_PFLAG_LOADING; 6237 ata_port_schedule_eh(ap); 6238 6239 spin_unlock_irqrestore(ap->lock, flags); 6240 } 6241 6242 int ata_port_probe(struct ata_port *ap) 6243 { 6244 int rc = 0; 6245 6246 if (ap->ops->error_handler) { 6247 __ata_port_probe(ap); 6248 ata_port_wait_eh(ap); 6249 } else { 6250 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6251 rc = ata_bus_probe(ap); 6252 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6253 } 6254 return rc; 6255 } 6256 6257 6258 static void async_port_probe(void *data, async_cookie_t cookie) 6259 { 6260 struct ata_port *ap = data; 6261 6262 /* 6263 * If we're not allowed to scan this host in parallel, 6264 * we need to wait until all previous scans have completed 6265 * before going further. 6266 * Jeff Garzik says this is only within a controller, so we 6267 * don't need to wait for port 0, only for later ports. 6268 */ 6269 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6270 async_synchronize_cookie(cookie); 6271 6272 (void)ata_port_probe(ap); 6273 6274 /* in order to keep device order, we need to synchronize at this point */ 6275 async_synchronize_cookie(cookie); 6276 6277 ata_scsi_scan_host(ap, 1); 6278 } 6279 6280 /** 6281 * ata_host_register - register initialized ATA host 6282 * @host: ATA host to register 6283 * @sht: template for SCSI host 6284 * 6285 * Register initialized ATA host. @host is allocated using 6286 * ata_host_alloc() and fully initialized by LLD. This function 6287 * starts ports, registers @host with ATA and SCSI layers and 6288 * probe registered devices. 6289 * 6290 * LOCKING: 6291 * Inherited from calling layer (may sleep). 6292 * 6293 * RETURNS: 6294 * 0 on success, -errno otherwise. 6295 */ 6296 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6297 { 6298 int i, rc; 6299 6300 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6301 6302 /* host must have been started */ 6303 if (!(host->flags & ATA_HOST_STARTED)) { 6304 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6305 WARN_ON(1); 6306 return -EINVAL; 6307 } 6308 6309 /* Blow away unused ports. This happens when LLD can't 6310 * determine the exact number of ports to allocate at 6311 * allocation time. 6312 */ 6313 for (i = host->n_ports; host->ports[i]; i++) 6314 kfree(host->ports[i]); 6315 6316 /* give ports names and add SCSI hosts */ 6317 for (i = 0; i < host->n_ports; i++) { 6318 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6319 host->ports[i]->local_port_no = i + 1; 6320 } 6321 6322 /* Create associated sysfs transport objects */ 6323 for (i = 0; i < host->n_ports; i++) { 6324 rc = ata_tport_add(host->dev,host->ports[i]); 6325 if (rc) { 6326 goto err_tadd; 6327 } 6328 } 6329 6330 rc = ata_scsi_add_hosts(host, sht); 6331 if (rc) 6332 goto err_tadd; 6333 6334 /* set cable, sata_spd_limit and report */ 6335 for (i = 0; i < host->n_ports; i++) { 6336 struct ata_port *ap = host->ports[i]; 6337 unsigned long xfer_mask; 6338 6339 /* set SATA cable type if still unset */ 6340 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6341 ap->cbl = ATA_CBL_SATA; 6342 6343 /* init sata_spd_limit to the current value */ 6344 sata_link_init_spd(&ap->link); 6345 if (ap->slave_link) 6346 sata_link_init_spd(ap->slave_link); 6347 6348 /* print per-port info to dmesg */ 6349 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6350 ap->udma_mask); 6351 6352 if (!ata_port_is_dummy(ap)) { 6353 ata_port_info(ap, "%cATA max %s %s\n", 6354 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6355 ata_mode_string(xfer_mask), 6356 ap->link.eh_info.desc); 6357 ata_ehi_clear_desc(&ap->link.eh_info); 6358 } else 6359 ata_port_info(ap, "DUMMY\n"); 6360 } 6361 6362 /* perform each probe asynchronously */ 6363 for (i = 0; i < host->n_ports; i++) { 6364 struct ata_port *ap = host->ports[i]; 6365 async_schedule(async_port_probe, ap); 6366 } 6367 6368 return 0; 6369 6370 err_tadd: 6371 while (--i >= 0) { 6372 ata_tport_delete(host->ports[i]); 6373 } 6374 return rc; 6375 6376 } 6377 6378 /** 6379 * ata_host_activate - start host, request IRQ and register it 6380 * @host: target ATA host 6381 * @irq: IRQ to request 6382 * @irq_handler: irq_handler used when requesting IRQ 6383 * @irq_flags: irq_flags used when requesting IRQ 6384 * @sht: scsi_host_template to use when registering the host 6385 * 6386 * After allocating an ATA host and initializing it, most libata 6387 * LLDs perform three steps to activate the host - start host, 6388 * request IRQ and register it. This helper takes necessary 6389 * arguments and performs the three steps in one go. 6390 * 6391 * An invalid IRQ skips the IRQ registration and expects the host to 6392 * have set polling mode on the port. In this case, @irq_handler 6393 * should be NULL. 6394 * 6395 * LOCKING: 6396 * Inherited from calling layer (may sleep). 6397 * 6398 * RETURNS: 6399 * 0 on success, -errno otherwise. 6400 */ 6401 int ata_host_activate(struct ata_host *host, int irq, 6402 irq_handler_t irq_handler, unsigned long irq_flags, 6403 struct scsi_host_template *sht) 6404 { 6405 int i, rc; 6406 char *irq_desc; 6407 6408 rc = ata_host_start(host); 6409 if (rc) 6410 return rc; 6411 6412 /* Special case for polling mode */ 6413 if (!irq) { 6414 WARN_ON(irq_handler); 6415 return ata_host_register(host, sht); 6416 } 6417 6418 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", 6419 dev_driver_string(host->dev), 6420 dev_name(host->dev)); 6421 if (!irq_desc) 6422 return -ENOMEM; 6423 6424 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6425 irq_desc, host); 6426 if (rc) 6427 return rc; 6428 6429 for (i = 0; i < host->n_ports; i++) 6430 ata_port_desc(host->ports[i], "irq %d", irq); 6431 6432 rc = ata_host_register(host, sht); 6433 /* if failed, just free the IRQ and leave ports alone */ 6434 if (rc) 6435 devm_free_irq(host->dev, irq, host); 6436 6437 return rc; 6438 } 6439 6440 /** 6441 * ata_port_detach - Detach ATA port in preparation of device removal 6442 * @ap: ATA port to be detached 6443 * 6444 * Detach all ATA devices and the associated SCSI devices of @ap; 6445 * then, remove the associated SCSI host. @ap is guaranteed to 6446 * be quiescent on return from this function. 6447 * 6448 * LOCKING: 6449 * Kernel thread context (may sleep). 6450 */ 6451 static void ata_port_detach(struct ata_port *ap) 6452 { 6453 unsigned long flags; 6454 struct ata_link *link; 6455 struct ata_device *dev; 6456 6457 if (!ap->ops->error_handler) 6458 goto skip_eh; 6459 6460 /* tell EH we're leaving & flush EH */ 6461 spin_lock_irqsave(ap->lock, flags); 6462 ap->pflags |= ATA_PFLAG_UNLOADING; 6463 ata_port_schedule_eh(ap); 6464 spin_unlock_irqrestore(ap->lock, flags); 6465 6466 /* wait till EH commits suicide */ 6467 ata_port_wait_eh(ap); 6468 6469 /* it better be dead now */ 6470 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6471 6472 cancel_delayed_work_sync(&ap->hotplug_task); 6473 6474 skip_eh: 6475 /* clean up zpodd on port removal */ 6476 ata_for_each_link(link, ap, HOST_FIRST) { 6477 ata_for_each_dev(dev, link, ALL) { 6478 if (zpodd_dev_enabled(dev)) 6479 zpodd_exit(dev); 6480 } 6481 } 6482 if (ap->pmp_link) { 6483 int i; 6484 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6485 ata_tlink_delete(&ap->pmp_link[i]); 6486 } 6487 /* remove the associated SCSI host */ 6488 scsi_remove_host(ap->scsi_host); 6489 ata_tport_delete(ap); 6490 } 6491 6492 /** 6493 * ata_host_detach - Detach all ports of an ATA host 6494 * @host: Host to detach 6495 * 6496 * Detach all ports of @host. 6497 * 6498 * LOCKING: 6499 * Kernel thread context (may sleep). 6500 */ 6501 void ata_host_detach(struct ata_host *host) 6502 { 6503 int i; 6504 6505 for (i = 0; i < host->n_ports; i++) 6506 ata_port_detach(host->ports[i]); 6507 6508 /* the host is dead now, dissociate ACPI */ 6509 ata_acpi_dissociate(host); 6510 } 6511 6512 #ifdef CONFIG_PCI 6513 6514 /** 6515 * ata_pci_remove_one - PCI layer callback for device removal 6516 * @pdev: PCI device that was removed 6517 * 6518 * PCI layer indicates to libata via this hook that hot-unplug or 6519 * module unload event has occurred. Detach all ports. Resource 6520 * release is handled via devres. 6521 * 6522 * LOCKING: 6523 * Inherited from PCI layer (may sleep). 6524 */ 6525 void ata_pci_remove_one(struct pci_dev *pdev) 6526 { 6527 struct ata_host *host = pci_get_drvdata(pdev); 6528 6529 ata_host_detach(host); 6530 } 6531 6532 /* move to PCI subsystem */ 6533 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6534 { 6535 unsigned long tmp = 0; 6536 6537 switch (bits->width) { 6538 case 1: { 6539 u8 tmp8 = 0; 6540 pci_read_config_byte(pdev, bits->reg, &tmp8); 6541 tmp = tmp8; 6542 break; 6543 } 6544 case 2: { 6545 u16 tmp16 = 0; 6546 pci_read_config_word(pdev, bits->reg, &tmp16); 6547 tmp = tmp16; 6548 break; 6549 } 6550 case 4: { 6551 u32 tmp32 = 0; 6552 pci_read_config_dword(pdev, bits->reg, &tmp32); 6553 tmp = tmp32; 6554 break; 6555 } 6556 6557 default: 6558 return -EINVAL; 6559 } 6560 6561 tmp &= bits->mask; 6562 6563 return (tmp == bits->val) ? 1 : 0; 6564 } 6565 6566 #ifdef CONFIG_PM 6567 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6568 { 6569 pci_save_state(pdev); 6570 pci_disable_device(pdev); 6571 6572 if (mesg.event & PM_EVENT_SLEEP) 6573 pci_set_power_state(pdev, PCI_D3hot); 6574 } 6575 6576 int ata_pci_device_do_resume(struct pci_dev *pdev) 6577 { 6578 int rc; 6579 6580 pci_set_power_state(pdev, PCI_D0); 6581 pci_restore_state(pdev); 6582 6583 rc = pcim_enable_device(pdev); 6584 if (rc) { 6585 dev_err(&pdev->dev, 6586 "failed to enable device after resume (%d)\n", rc); 6587 return rc; 6588 } 6589 6590 pci_set_master(pdev); 6591 return 0; 6592 } 6593 6594 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6595 { 6596 struct ata_host *host = pci_get_drvdata(pdev); 6597 int rc = 0; 6598 6599 rc = ata_host_suspend(host, mesg); 6600 if (rc) 6601 return rc; 6602 6603 ata_pci_device_do_suspend(pdev, mesg); 6604 6605 return 0; 6606 } 6607 6608 int ata_pci_device_resume(struct pci_dev *pdev) 6609 { 6610 struct ata_host *host = pci_get_drvdata(pdev); 6611 int rc; 6612 6613 rc = ata_pci_device_do_resume(pdev); 6614 if (rc == 0) 6615 ata_host_resume(host); 6616 return rc; 6617 } 6618 #endif /* CONFIG_PM */ 6619 6620 #endif /* CONFIG_PCI */ 6621 6622 /** 6623 * ata_platform_remove_one - Platform layer callback for device removal 6624 * @pdev: Platform device that was removed 6625 * 6626 * Platform layer indicates to libata via this hook that hot-unplug or 6627 * module unload event has occurred. Detach all ports. Resource 6628 * release is handled via devres. 6629 * 6630 * LOCKING: 6631 * Inherited from platform layer (may sleep). 6632 */ 6633 int ata_platform_remove_one(struct platform_device *pdev) 6634 { 6635 struct ata_host *host = platform_get_drvdata(pdev); 6636 6637 ata_host_detach(host); 6638 6639 return 0; 6640 } 6641 6642 static int __init ata_parse_force_one(char **cur, 6643 struct ata_force_ent *force_ent, 6644 const char **reason) 6645 { 6646 static const struct ata_force_param force_tbl[] __initconst = { 6647 { "40c", .cbl = ATA_CBL_PATA40 }, 6648 { "80c", .cbl = ATA_CBL_PATA80 }, 6649 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6650 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6651 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6652 { "sata", .cbl = ATA_CBL_SATA }, 6653 { "1.5Gbps", .spd_limit = 1 }, 6654 { "3.0Gbps", .spd_limit = 2 }, 6655 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6656 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6657 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, 6658 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, 6659 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6660 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6661 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6662 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6663 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6664 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6665 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6666 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6667 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6668 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6669 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6670 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6671 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6672 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6673 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6674 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6675 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6676 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6677 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6678 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6679 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6680 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6681 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6682 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6683 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6684 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6685 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6686 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6687 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6688 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6689 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6690 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6691 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6692 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6693 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6694 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6695 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6696 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6697 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6698 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6699 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6700 }; 6701 char *start = *cur, *p = *cur; 6702 char *id, *val, *endp; 6703 const struct ata_force_param *match_fp = NULL; 6704 int nr_matches = 0, i; 6705 6706 /* find where this param ends and update *cur */ 6707 while (*p != '\0' && *p != ',') 6708 p++; 6709 6710 if (*p == '\0') 6711 *cur = p; 6712 else 6713 *cur = p + 1; 6714 6715 *p = '\0'; 6716 6717 /* parse */ 6718 p = strchr(start, ':'); 6719 if (!p) { 6720 val = strstrip(start); 6721 goto parse_val; 6722 } 6723 *p = '\0'; 6724 6725 id = strstrip(start); 6726 val = strstrip(p + 1); 6727 6728 /* parse id */ 6729 p = strchr(id, '.'); 6730 if (p) { 6731 *p++ = '\0'; 6732 force_ent->device = simple_strtoul(p, &endp, 10); 6733 if (p == endp || *endp != '\0') { 6734 *reason = "invalid device"; 6735 return -EINVAL; 6736 } 6737 } 6738 6739 force_ent->port = simple_strtoul(id, &endp, 10); 6740 if (p == endp || *endp != '\0') { 6741 *reason = "invalid port/link"; 6742 return -EINVAL; 6743 } 6744 6745 parse_val: 6746 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6747 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6748 const struct ata_force_param *fp = &force_tbl[i]; 6749 6750 if (strncasecmp(val, fp->name, strlen(val))) 6751 continue; 6752 6753 nr_matches++; 6754 match_fp = fp; 6755 6756 if (strcasecmp(val, fp->name) == 0) { 6757 nr_matches = 1; 6758 break; 6759 } 6760 } 6761 6762 if (!nr_matches) { 6763 *reason = "unknown value"; 6764 return -EINVAL; 6765 } 6766 if (nr_matches > 1) { 6767 *reason = "ambigious value"; 6768 return -EINVAL; 6769 } 6770 6771 force_ent->param = *match_fp; 6772 6773 return 0; 6774 } 6775 6776 static void __init ata_parse_force_param(void) 6777 { 6778 int idx = 0, size = 1; 6779 int last_port = -1, last_device = -1; 6780 char *p, *cur, *next; 6781 6782 /* calculate maximum number of params and allocate force_tbl */ 6783 for (p = ata_force_param_buf; *p; p++) 6784 if (*p == ',') 6785 size++; 6786 6787 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6788 if (!ata_force_tbl) { 6789 printk(KERN_WARNING "ata: failed to extend force table, " 6790 "libata.force ignored\n"); 6791 return; 6792 } 6793 6794 /* parse and populate the table */ 6795 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6796 const char *reason = ""; 6797 struct ata_force_ent te = { .port = -1, .device = -1 }; 6798 6799 next = cur; 6800 if (ata_parse_force_one(&next, &te, &reason)) { 6801 printk(KERN_WARNING "ata: failed to parse force " 6802 "parameter \"%s\" (%s)\n", 6803 cur, reason); 6804 continue; 6805 } 6806 6807 if (te.port == -1) { 6808 te.port = last_port; 6809 te.device = last_device; 6810 } 6811 6812 ata_force_tbl[idx++] = te; 6813 6814 last_port = te.port; 6815 last_device = te.device; 6816 } 6817 6818 ata_force_tbl_size = idx; 6819 } 6820 6821 static int __init ata_init(void) 6822 { 6823 int rc; 6824 6825 ata_parse_force_param(); 6826 6827 rc = ata_sff_init(); 6828 if (rc) { 6829 kfree(ata_force_tbl); 6830 return rc; 6831 } 6832 6833 libata_transport_init(); 6834 ata_scsi_transport_template = ata_attach_transport(); 6835 if (!ata_scsi_transport_template) { 6836 ata_sff_exit(); 6837 rc = -ENOMEM; 6838 goto err_out; 6839 } 6840 6841 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6842 return 0; 6843 6844 err_out: 6845 return rc; 6846 } 6847 6848 static void __exit ata_exit(void) 6849 { 6850 ata_release_transport(ata_scsi_transport_template); 6851 libata_transport_exit(); 6852 ata_sff_exit(); 6853 kfree(ata_force_tbl); 6854 } 6855 6856 subsys_initcall(ata_init); 6857 module_exit(ata_exit); 6858 6859 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6860 6861 int ata_ratelimit(void) 6862 { 6863 return __ratelimit(&ratelimit); 6864 } 6865 6866 /** 6867 * ata_msleep - ATA EH owner aware msleep 6868 * @ap: ATA port to attribute the sleep to 6869 * @msecs: duration to sleep in milliseconds 6870 * 6871 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6872 * ownership is released before going to sleep and reacquired 6873 * after the sleep is complete. IOW, other ports sharing the 6874 * @ap->host will be allowed to own the EH while this task is 6875 * sleeping. 6876 * 6877 * LOCKING: 6878 * Might sleep. 6879 */ 6880 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6881 { 6882 bool owns_eh = ap && ap->host->eh_owner == current; 6883 6884 if (owns_eh) 6885 ata_eh_release(ap); 6886 6887 if (msecs < 20) { 6888 unsigned long usecs = msecs * USEC_PER_MSEC; 6889 usleep_range(usecs, usecs + 50); 6890 } else { 6891 msleep(msecs); 6892 } 6893 6894 if (owns_eh) 6895 ata_eh_acquire(ap); 6896 } 6897 6898 /** 6899 * ata_wait_register - wait until register value changes 6900 * @ap: ATA port to wait register for, can be NULL 6901 * @reg: IO-mapped register 6902 * @mask: Mask to apply to read register value 6903 * @val: Wait condition 6904 * @interval: polling interval in milliseconds 6905 * @timeout: timeout in milliseconds 6906 * 6907 * Waiting for some bits of register to change is a common 6908 * operation for ATA controllers. This function reads 32bit LE 6909 * IO-mapped register @reg and tests for the following condition. 6910 * 6911 * (*@reg & mask) != val 6912 * 6913 * If the condition is met, it returns; otherwise, the process is 6914 * repeated after @interval_msec until timeout. 6915 * 6916 * LOCKING: 6917 * Kernel thread context (may sleep) 6918 * 6919 * RETURNS: 6920 * The final register value. 6921 */ 6922 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6923 unsigned long interval, unsigned long timeout) 6924 { 6925 unsigned long deadline; 6926 u32 tmp; 6927 6928 tmp = ioread32(reg); 6929 6930 /* Calculate timeout _after_ the first read to make sure 6931 * preceding writes reach the controller before starting to 6932 * eat away the timeout. 6933 */ 6934 deadline = ata_deadline(jiffies, timeout); 6935 6936 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6937 ata_msleep(ap, interval); 6938 tmp = ioread32(reg); 6939 } 6940 6941 return tmp; 6942 } 6943 6944 /** 6945 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 6946 * @link: Link receiving the event 6947 * 6948 * Test whether the received PHY event has to be ignored or not. 6949 * 6950 * LOCKING: 6951 * None: 6952 * 6953 * RETURNS: 6954 * True if the event has to be ignored. 6955 */ 6956 bool sata_lpm_ignore_phy_events(struct ata_link *link) 6957 { 6958 unsigned long lpm_timeout = link->last_lpm_change + 6959 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 6960 6961 /* if LPM is enabled, PHYRDY doesn't mean anything */ 6962 if (link->lpm_policy > ATA_LPM_MAX_POWER) 6963 return true; 6964 6965 /* ignore the first PHY event after the LPM policy changed 6966 * as it is might be spurious 6967 */ 6968 if ((link->flags & ATA_LFLAG_CHANGED) && 6969 time_before(jiffies, lpm_timeout)) 6970 return true; 6971 6972 return false; 6973 } 6974 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 6975 6976 /* 6977 * Dummy port_ops 6978 */ 6979 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6980 { 6981 return AC_ERR_SYSTEM; 6982 } 6983 6984 static void ata_dummy_error_handler(struct ata_port *ap) 6985 { 6986 /* truly dummy */ 6987 } 6988 6989 struct ata_port_operations ata_dummy_port_ops = { 6990 .qc_prep = ata_noop_qc_prep, 6991 .qc_issue = ata_dummy_qc_issue, 6992 .error_handler = ata_dummy_error_handler, 6993 .sched_eh = ata_std_sched_eh, 6994 .end_eh = ata_std_end_eh, 6995 }; 6996 6997 const struct ata_port_info ata_dummy_port_info = { 6998 .port_ops = &ata_dummy_port_ops, 6999 }; 7000 7001 /* 7002 * Utility print functions 7003 */ 7004 void ata_port_printk(const struct ata_port *ap, const char *level, 7005 const char *fmt, ...) 7006 { 7007 struct va_format vaf; 7008 va_list args; 7009 7010 va_start(args, fmt); 7011 7012 vaf.fmt = fmt; 7013 vaf.va = &args; 7014 7015 printk("%sata%u: %pV", level, ap->print_id, &vaf); 7016 7017 va_end(args); 7018 } 7019 EXPORT_SYMBOL(ata_port_printk); 7020 7021 void ata_link_printk(const struct ata_link *link, const char *level, 7022 const char *fmt, ...) 7023 { 7024 struct va_format vaf; 7025 va_list args; 7026 7027 va_start(args, fmt); 7028 7029 vaf.fmt = fmt; 7030 vaf.va = &args; 7031 7032 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 7033 printk("%sata%u.%02u: %pV", 7034 level, link->ap->print_id, link->pmp, &vaf); 7035 else 7036 printk("%sata%u: %pV", 7037 level, link->ap->print_id, &vaf); 7038 7039 va_end(args); 7040 } 7041 EXPORT_SYMBOL(ata_link_printk); 7042 7043 void ata_dev_printk(const struct ata_device *dev, const char *level, 7044 const char *fmt, ...) 7045 { 7046 struct va_format vaf; 7047 va_list args; 7048 7049 va_start(args, fmt); 7050 7051 vaf.fmt = fmt; 7052 vaf.va = &args; 7053 7054 printk("%sata%u.%02u: %pV", 7055 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 7056 &vaf); 7057 7058 va_end(args); 7059 } 7060 EXPORT_SYMBOL(ata_dev_printk); 7061 7062 void ata_print_version(const struct device *dev, const char *version) 7063 { 7064 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 7065 } 7066 EXPORT_SYMBOL(ata_print_version); 7067 7068 /* 7069 * libata is essentially a library of internal helper functions for 7070 * low-level ATA host controller drivers. As such, the API/ABI is 7071 * likely to change as new drivers are added and updated. 7072 * Do not depend on ABI/API stability. 7073 */ 7074 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7075 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7076 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7077 EXPORT_SYMBOL_GPL(ata_base_port_ops); 7078 EXPORT_SYMBOL_GPL(sata_port_ops); 7079 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7080 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7081 EXPORT_SYMBOL_GPL(ata_link_next); 7082 EXPORT_SYMBOL_GPL(ata_dev_next); 7083 EXPORT_SYMBOL_GPL(ata_std_bios_param); 7084 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 7085 EXPORT_SYMBOL_GPL(ata_host_init); 7086 EXPORT_SYMBOL_GPL(ata_host_alloc); 7087 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7088 EXPORT_SYMBOL_GPL(ata_slave_link_init); 7089 EXPORT_SYMBOL_GPL(ata_host_start); 7090 EXPORT_SYMBOL_GPL(ata_host_register); 7091 EXPORT_SYMBOL_GPL(ata_host_activate); 7092 EXPORT_SYMBOL_GPL(ata_host_detach); 7093 EXPORT_SYMBOL_GPL(ata_sg_init); 7094 EXPORT_SYMBOL_GPL(ata_qc_complete); 7095 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7096 EXPORT_SYMBOL_GPL(atapi_cmd_type); 7097 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7098 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7099 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7100 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7101 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7102 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7103 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7104 EXPORT_SYMBOL_GPL(ata_mode_string); 7105 EXPORT_SYMBOL_GPL(ata_id_xfermask); 7106 EXPORT_SYMBOL_GPL(ata_do_set_mode); 7107 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7108 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7109 EXPORT_SYMBOL_GPL(ata_dev_disable); 7110 EXPORT_SYMBOL_GPL(sata_set_spd); 7111 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7112 EXPORT_SYMBOL_GPL(sata_link_debounce); 7113 EXPORT_SYMBOL_GPL(sata_link_resume); 7114 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 7115 EXPORT_SYMBOL_GPL(ata_std_prereset); 7116 EXPORT_SYMBOL_GPL(sata_link_hardreset); 7117 EXPORT_SYMBOL_GPL(sata_std_hardreset); 7118 EXPORT_SYMBOL_GPL(ata_std_postreset); 7119 EXPORT_SYMBOL_GPL(ata_dev_classify); 7120 EXPORT_SYMBOL_GPL(ata_dev_pair); 7121 EXPORT_SYMBOL_GPL(ata_ratelimit); 7122 EXPORT_SYMBOL_GPL(ata_msleep); 7123 EXPORT_SYMBOL_GPL(ata_wait_register); 7124 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7125 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7126 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7127 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7128 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 7129 EXPORT_SYMBOL_GPL(sata_scr_valid); 7130 EXPORT_SYMBOL_GPL(sata_scr_read); 7131 EXPORT_SYMBOL_GPL(sata_scr_write); 7132 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7133 EXPORT_SYMBOL_GPL(ata_link_online); 7134 EXPORT_SYMBOL_GPL(ata_link_offline); 7135 #ifdef CONFIG_PM 7136 EXPORT_SYMBOL_GPL(ata_host_suspend); 7137 EXPORT_SYMBOL_GPL(ata_host_resume); 7138 #endif /* CONFIG_PM */ 7139 EXPORT_SYMBOL_GPL(ata_id_string); 7140 EXPORT_SYMBOL_GPL(ata_id_c_string); 7141 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 7142 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7143 7144 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7145 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7146 EXPORT_SYMBOL_GPL(ata_timing_compute); 7147 EXPORT_SYMBOL_GPL(ata_timing_merge); 7148 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7149 7150 #ifdef CONFIG_PCI 7151 EXPORT_SYMBOL_GPL(pci_test_config_bits); 7152 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7153 #ifdef CONFIG_PM 7154 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7155 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7156 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7157 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7158 #endif /* CONFIG_PM */ 7159 #endif /* CONFIG_PCI */ 7160 7161 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 7162 7163 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7164 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7165 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7166 EXPORT_SYMBOL_GPL(ata_port_desc); 7167 #ifdef CONFIG_PCI 7168 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7169 #endif /* CONFIG_PCI */ 7170 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7171 EXPORT_SYMBOL_GPL(ata_link_abort); 7172 EXPORT_SYMBOL_GPL(ata_port_abort); 7173 EXPORT_SYMBOL_GPL(ata_port_freeze); 7174 EXPORT_SYMBOL_GPL(sata_async_notification); 7175 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7176 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7177 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7178 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7179 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 7180 EXPORT_SYMBOL_GPL(ata_do_eh); 7181 EXPORT_SYMBOL_GPL(ata_std_error_handler); 7182 7183 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7184 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7185 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7186 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7187 EXPORT_SYMBOL_GPL(ata_cable_sata); 7188