1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_host.h> 65 #include <linux/libata.h> 66 #include <asm/byteorder.h> 67 #include <linux/cdrom.h> 68 #include <linux/ratelimit.h> 69 70 #include "libata.h" 71 #include "libata-transport.h" 72 73 /* debounce timing parameters in msecs { interval, duration, timeout } */ 74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 77 78 const struct ata_port_operations ata_base_port_ops = { 79 .prereset = ata_std_prereset, 80 .postreset = ata_std_postreset, 81 .error_handler = ata_std_error_handler, 82 }; 83 84 const struct ata_port_operations sata_port_ops = { 85 .inherits = &ata_base_port_ops, 86 87 .qc_defer = ata_std_qc_defer, 88 .hardreset = sata_std_hardreset, 89 }; 90 91 static unsigned int ata_dev_init_params(struct ata_device *dev, 92 u16 heads, u16 sectors); 93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 94 static void ata_dev_xfermask(struct ata_device *dev); 95 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 96 97 unsigned int ata_print_id = 1; 98 99 struct ata_force_param { 100 const char *name; 101 unsigned int cbl; 102 int spd_limit; 103 unsigned long xfer_mask; 104 unsigned int horkage_on; 105 unsigned int horkage_off; 106 unsigned int lflags; 107 }; 108 109 struct ata_force_ent { 110 int port; 111 int device; 112 struct ata_force_param param; 113 }; 114 115 static struct ata_force_ent *ata_force_tbl; 116 static int ata_force_tbl_size; 117 118 static char ata_force_param_buf[PAGE_SIZE] __initdata; 119 /* param_buf is thrown away after initialization, disallow read */ 120 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 121 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 122 123 static int atapi_enabled = 1; 124 module_param(atapi_enabled, int, 0444); 125 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 126 127 static int atapi_dmadir = 0; 128 module_param(atapi_dmadir, int, 0444); 129 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 130 131 int atapi_passthru16 = 1; 132 module_param(atapi_passthru16, int, 0444); 133 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 134 135 int libata_fua = 0; 136 module_param_named(fua, libata_fua, int, 0444); 137 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 138 139 static int ata_ignore_hpa; 140 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 141 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 142 143 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 144 module_param_named(dma, libata_dma_mask, int, 0444); 145 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 146 147 static int ata_probe_timeout; 148 module_param(ata_probe_timeout, int, 0444); 149 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 150 151 int libata_noacpi = 0; 152 module_param_named(noacpi, libata_noacpi, int, 0444); 153 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 154 155 int libata_allow_tpm = 0; 156 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 157 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 158 159 static int atapi_an; 160 module_param(atapi_an, int, 0444); 161 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 162 163 MODULE_AUTHOR("Jeff Garzik"); 164 MODULE_DESCRIPTION("Library module for ATA devices"); 165 MODULE_LICENSE("GPL"); 166 MODULE_VERSION(DRV_VERSION); 167 168 169 static bool ata_sstatus_online(u32 sstatus) 170 { 171 return (sstatus & 0xf) == 0x3; 172 } 173 174 /** 175 * ata_link_next - link iteration helper 176 * @link: the previous link, NULL to start 177 * @ap: ATA port containing links to iterate 178 * @mode: iteration mode, one of ATA_LITER_* 179 * 180 * LOCKING: 181 * Host lock or EH context. 182 * 183 * RETURNS: 184 * Pointer to the next link. 185 */ 186 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 187 enum ata_link_iter_mode mode) 188 { 189 BUG_ON(mode != ATA_LITER_EDGE && 190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 191 192 /* NULL link indicates start of iteration */ 193 if (!link) 194 switch (mode) { 195 case ATA_LITER_EDGE: 196 case ATA_LITER_PMP_FIRST: 197 if (sata_pmp_attached(ap)) 198 return ap->pmp_link; 199 /* fall through */ 200 case ATA_LITER_HOST_FIRST: 201 return &ap->link; 202 } 203 204 /* we just iterated over the host link, what's next? */ 205 if (link == &ap->link) 206 switch (mode) { 207 case ATA_LITER_HOST_FIRST: 208 if (sata_pmp_attached(ap)) 209 return ap->pmp_link; 210 /* fall through */ 211 case ATA_LITER_PMP_FIRST: 212 if (unlikely(ap->slave_link)) 213 return ap->slave_link; 214 /* fall through */ 215 case ATA_LITER_EDGE: 216 return NULL; 217 } 218 219 /* slave_link excludes PMP */ 220 if (unlikely(link == ap->slave_link)) 221 return NULL; 222 223 /* we were over a PMP link */ 224 if (++link < ap->pmp_link + ap->nr_pmp_links) 225 return link; 226 227 if (mode == ATA_LITER_PMP_FIRST) 228 return &ap->link; 229 230 return NULL; 231 } 232 233 /** 234 * ata_dev_next - device iteration helper 235 * @dev: the previous device, NULL to start 236 * @link: ATA link containing devices to iterate 237 * @mode: iteration mode, one of ATA_DITER_* 238 * 239 * LOCKING: 240 * Host lock or EH context. 241 * 242 * RETURNS: 243 * Pointer to the next device. 244 */ 245 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 246 enum ata_dev_iter_mode mode) 247 { 248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 250 251 /* NULL dev indicates start of iteration */ 252 if (!dev) 253 switch (mode) { 254 case ATA_DITER_ENABLED: 255 case ATA_DITER_ALL: 256 dev = link->device; 257 goto check; 258 case ATA_DITER_ENABLED_REVERSE: 259 case ATA_DITER_ALL_REVERSE: 260 dev = link->device + ata_link_max_devices(link) - 1; 261 goto check; 262 } 263 264 next: 265 /* move to the next one */ 266 switch (mode) { 267 case ATA_DITER_ENABLED: 268 case ATA_DITER_ALL: 269 if (++dev < link->device + ata_link_max_devices(link)) 270 goto check; 271 return NULL; 272 case ATA_DITER_ENABLED_REVERSE: 273 case ATA_DITER_ALL_REVERSE: 274 if (--dev >= link->device) 275 goto check; 276 return NULL; 277 } 278 279 check: 280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 281 !ata_dev_enabled(dev)) 282 goto next; 283 return dev; 284 } 285 286 /** 287 * ata_dev_phys_link - find physical link for a device 288 * @dev: ATA device to look up physical link for 289 * 290 * Look up physical link which @dev is attached to. Note that 291 * this is different from @dev->link only when @dev is on slave 292 * link. For all other cases, it's the same as @dev->link. 293 * 294 * LOCKING: 295 * Don't care. 296 * 297 * RETURNS: 298 * Pointer to the found physical link. 299 */ 300 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 301 { 302 struct ata_port *ap = dev->link->ap; 303 304 if (!ap->slave_link) 305 return dev->link; 306 if (!dev->devno) 307 return &ap->link; 308 return ap->slave_link; 309 } 310 311 /** 312 * ata_force_cbl - force cable type according to libata.force 313 * @ap: ATA port of interest 314 * 315 * Force cable type according to libata.force and whine about it. 316 * The last entry which has matching port number is used, so it 317 * can be specified as part of device force parameters. For 318 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 319 * same effect. 320 * 321 * LOCKING: 322 * EH context. 323 */ 324 void ata_force_cbl(struct ata_port *ap) 325 { 326 int i; 327 328 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 329 const struct ata_force_ent *fe = &ata_force_tbl[i]; 330 331 if (fe->port != -1 && fe->port != ap->print_id) 332 continue; 333 334 if (fe->param.cbl == ATA_CBL_NONE) 335 continue; 336 337 ap->cbl = fe->param.cbl; 338 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 339 return; 340 } 341 } 342 343 /** 344 * ata_force_link_limits - force link limits according to libata.force 345 * @link: ATA link of interest 346 * 347 * Force link flags and SATA spd limit according to libata.force 348 * and whine about it. When only the port part is specified 349 * (e.g. 1:), the limit applies to all links connected to both 350 * the host link and all fan-out ports connected via PMP. If the 351 * device part is specified as 0 (e.g. 1.00:), it specifies the 352 * first fan-out link not the host link. Device number 15 always 353 * points to the host link whether PMP is attached or not. If the 354 * controller has slave link, device number 16 points to it. 355 * 356 * LOCKING: 357 * EH context. 358 */ 359 static void ata_force_link_limits(struct ata_link *link) 360 { 361 bool did_spd = false; 362 int linkno = link->pmp; 363 int i; 364 365 if (ata_is_host_link(link)) 366 linkno += 15; 367 368 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 369 const struct ata_force_ent *fe = &ata_force_tbl[i]; 370 371 if (fe->port != -1 && fe->port != link->ap->print_id) 372 continue; 373 374 if (fe->device != -1 && fe->device != linkno) 375 continue; 376 377 /* only honor the first spd limit */ 378 if (!did_spd && fe->param.spd_limit) { 379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 380 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 381 fe->param.name); 382 did_spd = true; 383 } 384 385 /* let lflags stack */ 386 if (fe->param.lflags) { 387 link->flags |= fe->param.lflags; 388 ata_link_notice(link, 389 "FORCE: link flag 0x%x forced -> 0x%x\n", 390 fe->param.lflags, link->flags); 391 } 392 } 393 } 394 395 /** 396 * ata_force_xfermask - force xfermask according to libata.force 397 * @dev: ATA device of interest 398 * 399 * Force xfer_mask according to libata.force and whine about it. 400 * For consistency with link selection, device number 15 selects 401 * the first device connected to the host link. 402 * 403 * LOCKING: 404 * EH context. 405 */ 406 static void ata_force_xfermask(struct ata_device *dev) 407 { 408 int devno = dev->link->pmp + dev->devno; 409 int alt_devno = devno; 410 int i; 411 412 /* allow n.15/16 for devices attached to host port */ 413 if (ata_is_host_link(dev->link)) 414 alt_devno += 15; 415 416 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 417 const struct ata_force_ent *fe = &ata_force_tbl[i]; 418 unsigned long pio_mask, mwdma_mask, udma_mask; 419 420 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 421 continue; 422 423 if (fe->device != -1 && fe->device != devno && 424 fe->device != alt_devno) 425 continue; 426 427 if (!fe->param.xfer_mask) 428 continue; 429 430 ata_unpack_xfermask(fe->param.xfer_mask, 431 &pio_mask, &mwdma_mask, &udma_mask); 432 if (udma_mask) 433 dev->udma_mask = udma_mask; 434 else if (mwdma_mask) { 435 dev->udma_mask = 0; 436 dev->mwdma_mask = mwdma_mask; 437 } else { 438 dev->udma_mask = 0; 439 dev->mwdma_mask = 0; 440 dev->pio_mask = pio_mask; 441 } 442 443 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 444 fe->param.name); 445 return; 446 } 447 } 448 449 /** 450 * ata_force_horkage - force horkage according to libata.force 451 * @dev: ATA device of interest 452 * 453 * Force horkage according to libata.force and whine about it. 454 * For consistency with link selection, device number 15 selects 455 * the first device connected to the host link. 456 * 457 * LOCKING: 458 * EH context. 459 */ 460 static void ata_force_horkage(struct ata_device *dev) 461 { 462 int devno = dev->link->pmp + dev->devno; 463 int alt_devno = devno; 464 int i; 465 466 /* allow n.15/16 for devices attached to host port */ 467 if (ata_is_host_link(dev->link)) 468 alt_devno += 15; 469 470 for (i = 0; i < ata_force_tbl_size; i++) { 471 const struct ata_force_ent *fe = &ata_force_tbl[i]; 472 473 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 474 continue; 475 476 if (fe->device != -1 && fe->device != devno && 477 fe->device != alt_devno) 478 continue; 479 480 if (!(~dev->horkage & fe->param.horkage_on) && 481 !(dev->horkage & fe->param.horkage_off)) 482 continue; 483 484 dev->horkage |= fe->param.horkage_on; 485 dev->horkage &= ~fe->param.horkage_off; 486 487 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 488 fe->param.name); 489 } 490 } 491 492 /** 493 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 494 * @opcode: SCSI opcode 495 * 496 * Determine ATAPI command type from @opcode. 497 * 498 * LOCKING: 499 * None. 500 * 501 * RETURNS: 502 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 503 */ 504 int atapi_cmd_type(u8 opcode) 505 { 506 switch (opcode) { 507 case GPCMD_READ_10: 508 case GPCMD_READ_12: 509 return ATAPI_READ; 510 511 case GPCMD_WRITE_10: 512 case GPCMD_WRITE_12: 513 case GPCMD_WRITE_AND_VERIFY_10: 514 return ATAPI_WRITE; 515 516 case GPCMD_READ_CD: 517 case GPCMD_READ_CD_MSF: 518 return ATAPI_READ_CD; 519 520 case ATA_16: 521 case ATA_12: 522 if (atapi_passthru16) 523 return ATAPI_PASS_THRU; 524 /* fall thru */ 525 default: 526 return ATAPI_MISC; 527 } 528 } 529 530 /** 531 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 532 * @tf: Taskfile to convert 533 * @pmp: Port multiplier port 534 * @is_cmd: This FIS is for command 535 * @fis: Buffer into which data will output 536 * 537 * Converts a standard ATA taskfile to a Serial ATA 538 * FIS structure (Register - Host to Device). 539 * 540 * LOCKING: 541 * Inherited from caller. 542 */ 543 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 544 { 545 fis[0] = 0x27; /* Register - Host to Device FIS */ 546 fis[1] = pmp & 0xf; /* Port multiplier number*/ 547 if (is_cmd) 548 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 549 550 fis[2] = tf->command; 551 fis[3] = tf->feature; 552 553 fis[4] = tf->lbal; 554 fis[5] = tf->lbam; 555 fis[6] = tf->lbah; 556 fis[7] = tf->device; 557 558 fis[8] = tf->hob_lbal; 559 fis[9] = tf->hob_lbam; 560 fis[10] = tf->hob_lbah; 561 fis[11] = tf->hob_feature; 562 563 fis[12] = tf->nsect; 564 fis[13] = tf->hob_nsect; 565 fis[14] = 0; 566 fis[15] = tf->ctl; 567 568 fis[16] = 0; 569 fis[17] = 0; 570 fis[18] = 0; 571 fis[19] = 0; 572 } 573 574 /** 575 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 576 * @fis: Buffer from which data will be input 577 * @tf: Taskfile to output 578 * 579 * Converts a serial ATA FIS structure to a standard ATA taskfile. 580 * 581 * LOCKING: 582 * Inherited from caller. 583 */ 584 585 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 586 { 587 tf->command = fis[2]; /* status */ 588 tf->feature = fis[3]; /* error */ 589 590 tf->lbal = fis[4]; 591 tf->lbam = fis[5]; 592 tf->lbah = fis[6]; 593 tf->device = fis[7]; 594 595 tf->hob_lbal = fis[8]; 596 tf->hob_lbam = fis[9]; 597 tf->hob_lbah = fis[10]; 598 599 tf->nsect = fis[12]; 600 tf->hob_nsect = fis[13]; 601 } 602 603 static const u8 ata_rw_cmds[] = { 604 /* pio multi */ 605 ATA_CMD_READ_MULTI, 606 ATA_CMD_WRITE_MULTI, 607 ATA_CMD_READ_MULTI_EXT, 608 ATA_CMD_WRITE_MULTI_EXT, 609 0, 610 0, 611 0, 612 ATA_CMD_WRITE_MULTI_FUA_EXT, 613 /* pio */ 614 ATA_CMD_PIO_READ, 615 ATA_CMD_PIO_WRITE, 616 ATA_CMD_PIO_READ_EXT, 617 ATA_CMD_PIO_WRITE_EXT, 618 0, 619 0, 620 0, 621 0, 622 /* dma */ 623 ATA_CMD_READ, 624 ATA_CMD_WRITE, 625 ATA_CMD_READ_EXT, 626 ATA_CMD_WRITE_EXT, 627 0, 628 0, 629 0, 630 ATA_CMD_WRITE_FUA_EXT 631 }; 632 633 /** 634 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 635 * @tf: command to examine and configure 636 * @dev: device tf belongs to 637 * 638 * Examine the device configuration and tf->flags to calculate 639 * the proper read/write commands and protocol to use. 640 * 641 * LOCKING: 642 * caller. 643 */ 644 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 645 { 646 u8 cmd; 647 648 int index, fua, lba48, write; 649 650 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 651 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 652 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 653 654 if (dev->flags & ATA_DFLAG_PIO) { 655 tf->protocol = ATA_PROT_PIO; 656 index = dev->multi_count ? 0 : 8; 657 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 658 /* Unable to use DMA due to host limitation */ 659 tf->protocol = ATA_PROT_PIO; 660 index = dev->multi_count ? 0 : 8; 661 } else { 662 tf->protocol = ATA_PROT_DMA; 663 index = 16; 664 } 665 666 cmd = ata_rw_cmds[index + fua + lba48 + write]; 667 if (cmd) { 668 tf->command = cmd; 669 return 0; 670 } 671 return -1; 672 } 673 674 /** 675 * ata_tf_read_block - Read block address from ATA taskfile 676 * @tf: ATA taskfile of interest 677 * @dev: ATA device @tf belongs to 678 * 679 * LOCKING: 680 * None. 681 * 682 * Read block address from @tf. This function can handle all 683 * three address formats - LBA, LBA48 and CHS. tf->protocol and 684 * flags select the address format to use. 685 * 686 * RETURNS: 687 * Block address read from @tf. 688 */ 689 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 690 { 691 u64 block = 0; 692 693 if (tf->flags & ATA_TFLAG_LBA) { 694 if (tf->flags & ATA_TFLAG_LBA48) { 695 block |= (u64)tf->hob_lbah << 40; 696 block |= (u64)tf->hob_lbam << 32; 697 block |= (u64)tf->hob_lbal << 24; 698 } else 699 block |= (tf->device & 0xf) << 24; 700 701 block |= tf->lbah << 16; 702 block |= tf->lbam << 8; 703 block |= tf->lbal; 704 } else { 705 u32 cyl, head, sect; 706 707 cyl = tf->lbam | (tf->lbah << 8); 708 head = tf->device & 0xf; 709 sect = tf->lbal; 710 711 if (!sect) { 712 ata_dev_warn(dev, 713 "device reported invalid CHS sector 0\n"); 714 sect = 1; /* oh well */ 715 } 716 717 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 718 } 719 720 return block; 721 } 722 723 /** 724 * ata_build_rw_tf - Build ATA taskfile for given read/write request 725 * @tf: Target ATA taskfile 726 * @dev: ATA device @tf belongs to 727 * @block: Block address 728 * @n_block: Number of blocks 729 * @tf_flags: RW/FUA etc... 730 * @tag: tag 731 * 732 * LOCKING: 733 * None. 734 * 735 * Build ATA taskfile @tf for read/write request described by 736 * @block, @n_block, @tf_flags and @tag on @dev. 737 * 738 * RETURNS: 739 * 740 * 0 on success, -ERANGE if the request is too large for @dev, 741 * -EINVAL if the request is invalid. 742 */ 743 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 744 u64 block, u32 n_block, unsigned int tf_flags, 745 unsigned int tag) 746 { 747 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 748 tf->flags |= tf_flags; 749 750 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 751 /* yay, NCQ */ 752 if (!lba_48_ok(block, n_block)) 753 return -ERANGE; 754 755 tf->protocol = ATA_PROT_NCQ; 756 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 757 758 if (tf->flags & ATA_TFLAG_WRITE) 759 tf->command = ATA_CMD_FPDMA_WRITE; 760 else 761 tf->command = ATA_CMD_FPDMA_READ; 762 763 tf->nsect = tag << 3; 764 tf->hob_feature = (n_block >> 8) & 0xff; 765 tf->feature = n_block & 0xff; 766 767 tf->hob_lbah = (block >> 40) & 0xff; 768 tf->hob_lbam = (block >> 32) & 0xff; 769 tf->hob_lbal = (block >> 24) & 0xff; 770 tf->lbah = (block >> 16) & 0xff; 771 tf->lbam = (block >> 8) & 0xff; 772 tf->lbal = block & 0xff; 773 774 tf->device = 1 << 6; 775 if (tf->flags & ATA_TFLAG_FUA) 776 tf->device |= 1 << 7; 777 } else if (dev->flags & ATA_DFLAG_LBA) { 778 tf->flags |= ATA_TFLAG_LBA; 779 780 if (lba_28_ok(block, n_block)) { 781 /* use LBA28 */ 782 tf->device |= (block >> 24) & 0xf; 783 } else if (lba_48_ok(block, n_block)) { 784 if (!(dev->flags & ATA_DFLAG_LBA48)) 785 return -ERANGE; 786 787 /* use LBA48 */ 788 tf->flags |= ATA_TFLAG_LBA48; 789 790 tf->hob_nsect = (n_block >> 8) & 0xff; 791 792 tf->hob_lbah = (block >> 40) & 0xff; 793 tf->hob_lbam = (block >> 32) & 0xff; 794 tf->hob_lbal = (block >> 24) & 0xff; 795 } else 796 /* request too large even for LBA48 */ 797 return -ERANGE; 798 799 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 800 return -EINVAL; 801 802 tf->nsect = n_block & 0xff; 803 804 tf->lbah = (block >> 16) & 0xff; 805 tf->lbam = (block >> 8) & 0xff; 806 tf->lbal = block & 0xff; 807 808 tf->device |= ATA_LBA; 809 } else { 810 /* CHS */ 811 u32 sect, head, cyl, track; 812 813 /* The request -may- be too large for CHS addressing. */ 814 if (!lba_28_ok(block, n_block)) 815 return -ERANGE; 816 817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 818 return -EINVAL; 819 820 /* Convert LBA to CHS */ 821 track = (u32)block / dev->sectors; 822 cyl = track / dev->heads; 823 head = track % dev->heads; 824 sect = (u32)block % dev->sectors + 1; 825 826 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 827 (u32)block, track, cyl, head, sect); 828 829 /* Check whether the converted CHS can fit. 830 Cylinder: 0-65535 831 Head: 0-15 832 Sector: 1-255*/ 833 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 834 return -ERANGE; 835 836 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 837 tf->lbal = sect; 838 tf->lbam = cyl; 839 tf->lbah = cyl >> 8; 840 tf->device |= head; 841 } 842 843 return 0; 844 } 845 846 /** 847 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 848 * @pio_mask: pio_mask 849 * @mwdma_mask: mwdma_mask 850 * @udma_mask: udma_mask 851 * 852 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 853 * unsigned int xfer_mask. 854 * 855 * LOCKING: 856 * None. 857 * 858 * RETURNS: 859 * Packed xfer_mask. 860 */ 861 unsigned long ata_pack_xfermask(unsigned long pio_mask, 862 unsigned long mwdma_mask, 863 unsigned long udma_mask) 864 { 865 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 866 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 867 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 868 } 869 870 /** 871 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 872 * @xfer_mask: xfer_mask to unpack 873 * @pio_mask: resulting pio_mask 874 * @mwdma_mask: resulting mwdma_mask 875 * @udma_mask: resulting udma_mask 876 * 877 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 878 * Any NULL distination masks will be ignored. 879 */ 880 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 881 unsigned long *mwdma_mask, unsigned long *udma_mask) 882 { 883 if (pio_mask) 884 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 885 if (mwdma_mask) 886 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 887 if (udma_mask) 888 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 889 } 890 891 static const struct ata_xfer_ent { 892 int shift, bits; 893 u8 base; 894 } ata_xfer_tbl[] = { 895 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 896 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 897 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 898 { -1, }, 899 }; 900 901 /** 902 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 903 * @xfer_mask: xfer_mask of interest 904 * 905 * Return matching XFER_* value for @xfer_mask. Only the highest 906 * bit of @xfer_mask is considered. 907 * 908 * LOCKING: 909 * None. 910 * 911 * RETURNS: 912 * Matching XFER_* value, 0xff if no match found. 913 */ 914 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 915 { 916 int highbit = fls(xfer_mask) - 1; 917 const struct ata_xfer_ent *ent; 918 919 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 920 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 921 return ent->base + highbit - ent->shift; 922 return 0xff; 923 } 924 925 /** 926 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 927 * @xfer_mode: XFER_* of interest 928 * 929 * Return matching xfer_mask for @xfer_mode. 930 * 931 * LOCKING: 932 * None. 933 * 934 * RETURNS: 935 * Matching xfer_mask, 0 if no match found. 936 */ 937 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 938 { 939 const struct ata_xfer_ent *ent; 940 941 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 942 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 943 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 944 & ~((1 << ent->shift) - 1); 945 return 0; 946 } 947 948 /** 949 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 950 * @xfer_mode: XFER_* of interest 951 * 952 * Return matching xfer_shift for @xfer_mode. 953 * 954 * LOCKING: 955 * None. 956 * 957 * RETURNS: 958 * Matching xfer_shift, -1 if no match found. 959 */ 960 int ata_xfer_mode2shift(unsigned long xfer_mode) 961 { 962 const struct ata_xfer_ent *ent; 963 964 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 965 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 966 return ent->shift; 967 return -1; 968 } 969 970 /** 971 * ata_mode_string - convert xfer_mask to string 972 * @xfer_mask: mask of bits supported; only highest bit counts. 973 * 974 * Determine string which represents the highest speed 975 * (highest bit in @modemask). 976 * 977 * LOCKING: 978 * None. 979 * 980 * RETURNS: 981 * Constant C string representing highest speed listed in 982 * @mode_mask, or the constant C string "<n/a>". 983 */ 984 const char *ata_mode_string(unsigned long xfer_mask) 985 { 986 static const char * const xfer_mode_str[] = { 987 "PIO0", 988 "PIO1", 989 "PIO2", 990 "PIO3", 991 "PIO4", 992 "PIO5", 993 "PIO6", 994 "MWDMA0", 995 "MWDMA1", 996 "MWDMA2", 997 "MWDMA3", 998 "MWDMA4", 999 "UDMA/16", 1000 "UDMA/25", 1001 "UDMA/33", 1002 "UDMA/44", 1003 "UDMA/66", 1004 "UDMA/100", 1005 "UDMA/133", 1006 "UDMA7", 1007 }; 1008 int highbit; 1009 1010 highbit = fls(xfer_mask) - 1; 1011 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1012 return xfer_mode_str[highbit]; 1013 return "<n/a>"; 1014 } 1015 1016 const char *sata_spd_string(unsigned int spd) 1017 { 1018 static const char * const spd_str[] = { 1019 "1.5 Gbps", 1020 "3.0 Gbps", 1021 "6.0 Gbps", 1022 }; 1023 1024 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1025 return "<unknown>"; 1026 return spd_str[spd - 1]; 1027 } 1028 1029 /** 1030 * ata_dev_classify - determine device type based on ATA-spec signature 1031 * @tf: ATA taskfile register set for device to be identified 1032 * 1033 * Determine from taskfile register contents whether a device is 1034 * ATA or ATAPI, as per "Signature and persistence" section 1035 * of ATA/PI spec (volume 1, sect 5.14). 1036 * 1037 * LOCKING: 1038 * None. 1039 * 1040 * RETURNS: 1041 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1042 * %ATA_DEV_UNKNOWN the event of failure. 1043 */ 1044 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1045 { 1046 /* Apple's open source Darwin code hints that some devices only 1047 * put a proper signature into the LBA mid/high registers, 1048 * So, we only check those. It's sufficient for uniqueness. 1049 * 1050 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1051 * signatures for ATA and ATAPI devices attached on SerialATA, 1052 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1053 * spec has never mentioned about using different signatures 1054 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1055 * Multiplier specification began to use 0x69/0x96 to identify 1056 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1057 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1058 * 0x69/0x96 shortly and described them as reserved for 1059 * SerialATA. 1060 * 1061 * We follow the current spec and consider that 0x69/0x96 1062 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1063 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1064 * SEMB signature. This is worked around in 1065 * ata_dev_read_id(). 1066 */ 1067 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1068 DPRINTK("found ATA device by sig\n"); 1069 return ATA_DEV_ATA; 1070 } 1071 1072 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1073 DPRINTK("found ATAPI device by sig\n"); 1074 return ATA_DEV_ATAPI; 1075 } 1076 1077 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1078 DPRINTK("found PMP device by sig\n"); 1079 return ATA_DEV_PMP; 1080 } 1081 1082 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1083 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1084 return ATA_DEV_SEMB; 1085 } 1086 1087 DPRINTK("unknown device\n"); 1088 return ATA_DEV_UNKNOWN; 1089 } 1090 1091 /** 1092 * ata_id_string - Convert IDENTIFY DEVICE page into string 1093 * @id: IDENTIFY DEVICE results we will examine 1094 * @s: string into which data is output 1095 * @ofs: offset into identify device page 1096 * @len: length of string to return. must be an even number. 1097 * 1098 * The strings in the IDENTIFY DEVICE page are broken up into 1099 * 16-bit chunks. Run through the string, and output each 1100 * 8-bit chunk linearly, regardless of platform. 1101 * 1102 * LOCKING: 1103 * caller. 1104 */ 1105 1106 void ata_id_string(const u16 *id, unsigned char *s, 1107 unsigned int ofs, unsigned int len) 1108 { 1109 unsigned int c; 1110 1111 BUG_ON(len & 1); 1112 1113 while (len > 0) { 1114 c = id[ofs] >> 8; 1115 *s = c; 1116 s++; 1117 1118 c = id[ofs] & 0xff; 1119 *s = c; 1120 s++; 1121 1122 ofs++; 1123 len -= 2; 1124 } 1125 } 1126 1127 /** 1128 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1129 * @id: IDENTIFY DEVICE results we will examine 1130 * @s: string into which data is output 1131 * @ofs: offset into identify device page 1132 * @len: length of string to return. must be an odd number. 1133 * 1134 * This function is identical to ata_id_string except that it 1135 * trims trailing spaces and terminates the resulting string with 1136 * null. @len must be actual maximum length (even number) + 1. 1137 * 1138 * LOCKING: 1139 * caller. 1140 */ 1141 void ata_id_c_string(const u16 *id, unsigned char *s, 1142 unsigned int ofs, unsigned int len) 1143 { 1144 unsigned char *p; 1145 1146 ata_id_string(id, s, ofs, len - 1); 1147 1148 p = s + strnlen(s, len - 1); 1149 while (p > s && p[-1] == ' ') 1150 p--; 1151 *p = '\0'; 1152 } 1153 1154 static u64 ata_id_n_sectors(const u16 *id) 1155 { 1156 if (ata_id_has_lba(id)) { 1157 if (ata_id_has_lba48(id)) 1158 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1159 else 1160 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1161 } else { 1162 if (ata_id_current_chs_valid(id)) 1163 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1164 id[ATA_ID_CUR_SECTORS]; 1165 else 1166 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1167 id[ATA_ID_SECTORS]; 1168 } 1169 } 1170 1171 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1172 { 1173 u64 sectors = 0; 1174 1175 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1176 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1177 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1178 sectors |= (tf->lbah & 0xff) << 16; 1179 sectors |= (tf->lbam & 0xff) << 8; 1180 sectors |= (tf->lbal & 0xff); 1181 1182 return sectors; 1183 } 1184 1185 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1186 { 1187 u64 sectors = 0; 1188 1189 sectors |= (tf->device & 0x0f) << 24; 1190 sectors |= (tf->lbah & 0xff) << 16; 1191 sectors |= (tf->lbam & 0xff) << 8; 1192 sectors |= (tf->lbal & 0xff); 1193 1194 return sectors; 1195 } 1196 1197 /** 1198 * ata_read_native_max_address - Read native max address 1199 * @dev: target device 1200 * @max_sectors: out parameter for the result native max address 1201 * 1202 * Perform an LBA48 or LBA28 native size query upon the device in 1203 * question. 1204 * 1205 * RETURNS: 1206 * 0 on success, -EACCES if command is aborted by the drive. 1207 * -EIO on other errors. 1208 */ 1209 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1210 { 1211 unsigned int err_mask; 1212 struct ata_taskfile tf; 1213 int lba48 = ata_id_has_lba48(dev->id); 1214 1215 ata_tf_init(dev, &tf); 1216 1217 /* always clear all address registers */ 1218 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1219 1220 if (lba48) { 1221 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1222 tf.flags |= ATA_TFLAG_LBA48; 1223 } else 1224 tf.command = ATA_CMD_READ_NATIVE_MAX; 1225 1226 tf.protocol |= ATA_PROT_NODATA; 1227 tf.device |= ATA_LBA; 1228 1229 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1230 if (err_mask) { 1231 ata_dev_warn(dev, 1232 "failed to read native max address (err_mask=0x%x)\n", 1233 err_mask); 1234 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1235 return -EACCES; 1236 return -EIO; 1237 } 1238 1239 if (lba48) 1240 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1241 else 1242 *max_sectors = ata_tf_to_lba(&tf) + 1; 1243 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1244 (*max_sectors)--; 1245 return 0; 1246 } 1247 1248 /** 1249 * ata_set_max_sectors - Set max sectors 1250 * @dev: target device 1251 * @new_sectors: new max sectors value to set for the device 1252 * 1253 * Set max sectors of @dev to @new_sectors. 1254 * 1255 * RETURNS: 1256 * 0 on success, -EACCES if command is aborted or denied (due to 1257 * previous non-volatile SET_MAX) by the drive. -EIO on other 1258 * errors. 1259 */ 1260 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1261 { 1262 unsigned int err_mask; 1263 struct ata_taskfile tf; 1264 int lba48 = ata_id_has_lba48(dev->id); 1265 1266 new_sectors--; 1267 1268 ata_tf_init(dev, &tf); 1269 1270 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1271 1272 if (lba48) { 1273 tf.command = ATA_CMD_SET_MAX_EXT; 1274 tf.flags |= ATA_TFLAG_LBA48; 1275 1276 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1277 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1278 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1279 } else { 1280 tf.command = ATA_CMD_SET_MAX; 1281 1282 tf.device |= (new_sectors >> 24) & 0xf; 1283 } 1284 1285 tf.protocol |= ATA_PROT_NODATA; 1286 tf.device |= ATA_LBA; 1287 1288 tf.lbal = (new_sectors >> 0) & 0xff; 1289 tf.lbam = (new_sectors >> 8) & 0xff; 1290 tf.lbah = (new_sectors >> 16) & 0xff; 1291 1292 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1293 if (err_mask) { 1294 ata_dev_warn(dev, 1295 "failed to set max address (err_mask=0x%x)\n", 1296 err_mask); 1297 if (err_mask == AC_ERR_DEV && 1298 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1299 return -EACCES; 1300 return -EIO; 1301 } 1302 1303 return 0; 1304 } 1305 1306 /** 1307 * ata_hpa_resize - Resize a device with an HPA set 1308 * @dev: Device to resize 1309 * 1310 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1311 * it if required to the full size of the media. The caller must check 1312 * the drive has the HPA feature set enabled. 1313 * 1314 * RETURNS: 1315 * 0 on success, -errno on failure. 1316 */ 1317 static int ata_hpa_resize(struct ata_device *dev) 1318 { 1319 struct ata_eh_context *ehc = &dev->link->eh_context; 1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1322 u64 sectors = ata_id_n_sectors(dev->id); 1323 u64 native_sectors; 1324 int rc; 1325 1326 /* do we need to do it? */ 1327 if (dev->class != ATA_DEV_ATA || 1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1330 return 0; 1331 1332 /* read native max address */ 1333 rc = ata_read_native_max_address(dev, &native_sectors); 1334 if (rc) { 1335 /* If device aborted the command or HPA isn't going to 1336 * be unlocked, skip HPA resizing. 1337 */ 1338 if (rc == -EACCES || !unlock_hpa) { 1339 ata_dev_warn(dev, 1340 "HPA support seems broken, skipping HPA handling\n"); 1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1342 1343 /* we can continue if device aborted the command */ 1344 if (rc == -EACCES) 1345 rc = 0; 1346 } 1347 1348 return rc; 1349 } 1350 dev->n_native_sectors = native_sectors; 1351 1352 /* nothing to do? */ 1353 if (native_sectors <= sectors || !unlock_hpa) { 1354 if (!print_info || native_sectors == sectors) 1355 return 0; 1356 1357 if (native_sectors > sectors) 1358 ata_dev_info(dev, 1359 "HPA detected: current %llu, native %llu\n", 1360 (unsigned long long)sectors, 1361 (unsigned long long)native_sectors); 1362 else if (native_sectors < sectors) 1363 ata_dev_warn(dev, 1364 "native sectors (%llu) is smaller than sectors (%llu)\n", 1365 (unsigned long long)native_sectors, 1366 (unsigned long long)sectors); 1367 return 0; 1368 } 1369 1370 /* let's unlock HPA */ 1371 rc = ata_set_max_sectors(dev, native_sectors); 1372 if (rc == -EACCES) { 1373 /* if device aborted the command, skip HPA resizing */ 1374 ata_dev_warn(dev, 1375 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1376 (unsigned long long)sectors, 1377 (unsigned long long)native_sectors); 1378 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1379 return 0; 1380 } else if (rc) 1381 return rc; 1382 1383 /* re-read IDENTIFY data */ 1384 rc = ata_dev_reread_id(dev, 0); 1385 if (rc) { 1386 ata_dev_err(dev, 1387 "failed to re-read IDENTIFY data after HPA resizing\n"); 1388 return rc; 1389 } 1390 1391 if (print_info) { 1392 u64 new_sectors = ata_id_n_sectors(dev->id); 1393 ata_dev_info(dev, 1394 "HPA unlocked: %llu -> %llu, native %llu\n", 1395 (unsigned long long)sectors, 1396 (unsigned long long)new_sectors, 1397 (unsigned long long)native_sectors); 1398 } 1399 1400 return 0; 1401 } 1402 1403 /** 1404 * ata_dump_id - IDENTIFY DEVICE info debugging output 1405 * @id: IDENTIFY DEVICE page to dump 1406 * 1407 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1408 * page. 1409 * 1410 * LOCKING: 1411 * caller. 1412 */ 1413 1414 static inline void ata_dump_id(const u16 *id) 1415 { 1416 DPRINTK("49==0x%04x " 1417 "53==0x%04x " 1418 "63==0x%04x " 1419 "64==0x%04x " 1420 "75==0x%04x \n", 1421 id[49], 1422 id[53], 1423 id[63], 1424 id[64], 1425 id[75]); 1426 DPRINTK("80==0x%04x " 1427 "81==0x%04x " 1428 "82==0x%04x " 1429 "83==0x%04x " 1430 "84==0x%04x \n", 1431 id[80], 1432 id[81], 1433 id[82], 1434 id[83], 1435 id[84]); 1436 DPRINTK("88==0x%04x " 1437 "93==0x%04x\n", 1438 id[88], 1439 id[93]); 1440 } 1441 1442 /** 1443 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1444 * @id: IDENTIFY data to compute xfer mask from 1445 * 1446 * Compute the xfermask for this device. This is not as trivial 1447 * as it seems if we must consider early devices correctly. 1448 * 1449 * FIXME: pre IDE drive timing (do we care ?). 1450 * 1451 * LOCKING: 1452 * None. 1453 * 1454 * RETURNS: 1455 * Computed xfermask 1456 */ 1457 unsigned long ata_id_xfermask(const u16 *id) 1458 { 1459 unsigned long pio_mask, mwdma_mask, udma_mask; 1460 1461 /* Usual case. Word 53 indicates word 64 is valid */ 1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1464 pio_mask <<= 3; 1465 pio_mask |= 0x7; 1466 } else { 1467 /* If word 64 isn't valid then Word 51 high byte holds 1468 * the PIO timing number for the maximum. Turn it into 1469 * a mask. 1470 */ 1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1472 if (mode < 5) /* Valid PIO range */ 1473 pio_mask = (2 << mode) - 1; 1474 else 1475 pio_mask = 1; 1476 1477 /* But wait.. there's more. Design your standards by 1478 * committee and you too can get a free iordy field to 1479 * process. However its the speeds not the modes that 1480 * are supported... Note drivers using the timing API 1481 * will get this right anyway 1482 */ 1483 } 1484 1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1486 1487 if (ata_id_is_cfa(id)) { 1488 /* 1489 * Process compact flash extended modes 1490 */ 1491 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1492 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1493 1494 if (pio) 1495 pio_mask |= (1 << 5); 1496 if (pio > 1) 1497 pio_mask |= (1 << 6); 1498 if (dma) 1499 mwdma_mask |= (1 << 3); 1500 if (dma > 1) 1501 mwdma_mask |= (1 << 4); 1502 } 1503 1504 udma_mask = 0; 1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1507 1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1509 } 1510 1511 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1512 { 1513 struct completion *waiting = qc->private_data; 1514 1515 complete(waiting); 1516 } 1517 1518 /** 1519 * ata_exec_internal_sg - execute libata internal command 1520 * @dev: Device to which the command is sent 1521 * @tf: Taskfile registers for the command and the result 1522 * @cdb: CDB for packet command 1523 * @dma_dir: Data tranfer direction of the command 1524 * @sgl: sg list for the data buffer of the command 1525 * @n_elem: Number of sg entries 1526 * @timeout: Timeout in msecs (0 for default) 1527 * 1528 * Executes libata internal command with timeout. @tf contains 1529 * command on entry and result on return. Timeout and error 1530 * conditions are reported via return value. No recovery action 1531 * is taken after a command times out. It's caller's duty to 1532 * clean up after timeout. 1533 * 1534 * LOCKING: 1535 * None. Should be called with kernel context, might sleep. 1536 * 1537 * RETURNS: 1538 * Zero on success, AC_ERR_* mask on failure 1539 */ 1540 unsigned ata_exec_internal_sg(struct ata_device *dev, 1541 struct ata_taskfile *tf, const u8 *cdb, 1542 int dma_dir, struct scatterlist *sgl, 1543 unsigned int n_elem, unsigned long timeout) 1544 { 1545 struct ata_link *link = dev->link; 1546 struct ata_port *ap = link->ap; 1547 u8 command = tf->command; 1548 int auto_timeout = 0; 1549 struct ata_queued_cmd *qc; 1550 unsigned int tag, preempted_tag; 1551 u32 preempted_sactive, preempted_qc_active; 1552 int preempted_nr_active_links; 1553 DECLARE_COMPLETION_ONSTACK(wait); 1554 unsigned long flags; 1555 unsigned int err_mask; 1556 int rc; 1557 1558 spin_lock_irqsave(ap->lock, flags); 1559 1560 /* no internal command while frozen */ 1561 if (ap->pflags & ATA_PFLAG_FROZEN) { 1562 spin_unlock_irqrestore(ap->lock, flags); 1563 return AC_ERR_SYSTEM; 1564 } 1565 1566 /* initialize internal qc */ 1567 1568 /* XXX: Tag 0 is used for drivers with legacy EH as some 1569 * drivers choke if any other tag is given. This breaks 1570 * ata_tag_internal() test for those drivers. Don't use new 1571 * EH stuff without converting to it. 1572 */ 1573 if (ap->ops->error_handler) 1574 tag = ATA_TAG_INTERNAL; 1575 else 1576 tag = 0; 1577 1578 if (test_and_set_bit(tag, &ap->qc_allocated)) 1579 BUG(); 1580 qc = __ata_qc_from_tag(ap, tag); 1581 1582 qc->tag = tag; 1583 qc->scsicmd = NULL; 1584 qc->ap = ap; 1585 qc->dev = dev; 1586 ata_qc_reinit(qc); 1587 1588 preempted_tag = link->active_tag; 1589 preempted_sactive = link->sactive; 1590 preempted_qc_active = ap->qc_active; 1591 preempted_nr_active_links = ap->nr_active_links; 1592 link->active_tag = ATA_TAG_POISON; 1593 link->sactive = 0; 1594 ap->qc_active = 0; 1595 ap->nr_active_links = 0; 1596 1597 /* prepare & issue qc */ 1598 qc->tf = *tf; 1599 if (cdb) 1600 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1601 qc->flags |= ATA_QCFLAG_RESULT_TF; 1602 qc->dma_dir = dma_dir; 1603 if (dma_dir != DMA_NONE) { 1604 unsigned int i, buflen = 0; 1605 struct scatterlist *sg; 1606 1607 for_each_sg(sgl, sg, n_elem, i) 1608 buflen += sg->length; 1609 1610 ata_sg_init(qc, sgl, n_elem); 1611 qc->nbytes = buflen; 1612 } 1613 1614 qc->private_data = &wait; 1615 qc->complete_fn = ata_qc_complete_internal; 1616 1617 ata_qc_issue(qc); 1618 1619 spin_unlock_irqrestore(ap->lock, flags); 1620 1621 if (!timeout) { 1622 if (ata_probe_timeout) 1623 timeout = ata_probe_timeout * 1000; 1624 else { 1625 timeout = ata_internal_cmd_timeout(dev, command); 1626 auto_timeout = 1; 1627 } 1628 } 1629 1630 if (ap->ops->error_handler) 1631 ata_eh_release(ap); 1632 1633 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1634 1635 if (ap->ops->error_handler) 1636 ata_eh_acquire(ap); 1637 1638 ata_sff_flush_pio_task(ap); 1639 1640 if (!rc) { 1641 spin_lock_irqsave(ap->lock, flags); 1642 1643 /* We're racing with irq here. If we lose, the 1644 * following test prevents us from completing the qc 1645 * twice. If we win, the port is frozen and will be 1646 * cleaned up by ->post_internal_cmd(). 1647 */ 1648 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1649 qc->err_mask |= AC_ERR_TIMEOUT; 1650 1651 if (ap->ops->error_handler) 1652 ata_port_freeze(ap); 1653 else 1654 ata_qc_complete(qc); 1655 1656 if (ata_msg_warn(ap)) 1657 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1658 command); 1659 } 1660 1661 spin_unlock_irqrestore(ap->lock, flags); 1662 } 1663 1664 /* do post_internal_cmd */ 1665 if (ap->ops->post_internal_cmd) 1666 ap->ops->post_internal_cmd(qc); 1667 1668 /* perform minimal error analysis */ 1669 if (qc->flags & ATA_QCFLAG_FAILED) { 1670 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1671 qc->err_mask |= AC_ERR_DEV; 1672 1673 if (!qc->err_mask) 1674 qc->err_mask |= AC_ERR_OTHER; 1675 1676 if (qc->err_mask & ~AC_ERR_OTHER) 1677 qc->err_mask &= ~AC_ERR_OTHER; 1678 } 1679 1680 /* finish up */ 1681 spin_lock_irqsave(ap->lock, flags); 1682 1683 *tf = qc->result_tf; 1684 err_mask = qc->err_mask; 1685 1686 ata_qc_free(qc); 1687 link->active_tag = preempted_tag; 1688 link->sactive = preempted_sactive; 1689 ap->qc_active = preempted_qc_active; 1690 ap->nr_active_links = preempted_nr_active_links; 1691 1692 spin_unlock_irqrestore(ap->lock, flags); 1693 1694 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1695 ata_internal_cmd_timed_out(dev, command); 1696 1697 return err_mask; 1698 } 1699 1700 /** 1701 * ata_exec_internal - execute libata internal command 1702 * @dev: Device to which the command is sent 1703 * @tf: Taskfile registers for the command and the result 1704 * @cdb: CDB for packet command 1705 * @dma_dir: Data tranfer direction of the command 1706 * @buf: Data buffer of the command 1707 * @buflen: Length of data buffer 1708 * @timeout: Timeout in msecs (0 for default) 1709 * 1710 * Wrapper around ata_exec_internal_sg() which takes simple 1711 * buffer instead of sg list. 1712 * 1713 * LOCKING: 1714 * None. Should be called with kernel context, might sleep. 1715 * 1716 * RETURNS: 1717 * Zero on success, AC_ERR_* mask on failure 1718 */ 1719 unsigned ata_exec_internal(struct ata_device *dev, 1720 struct ata_taskfile *tf, const u8 *cdb, 1721 int dma_dir, void *buf, unsigned int buflen, 1722 unsigned long timeout) 1723 { 1724 struct scatterlist *psg = NULL, sg; 1725 unsigned int n_elem = 0; 1726 1727 if (dma_dir != DMA_NONE) { 1728 WARN_ON(!buf); 1729 sg_init_one(&sg, buf, buflen); 1730 psg = &sg; 1731 n_elem++; 1732 } 1733 1734 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1735 timeout); 1736 } 1737 1738 /** 1739 * ata_do_simple_cmd - execute simple internal command 1740 * @dev: Device to which the command is sent 1741 * @cmd: Opcode to execute 1742 * 1743 * Execute a 'simple' command, that only consists of the opcode 1744 * 'cmd' itself, without filling any other registers 1745 * 1746 * LOCKING: 1747 * Kernel thread context (may sleep). 1748 * 1749 * RETURNS: 1750 * Zero on success, AC_ERR_* mask on failure 1751 */ 1752 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1753 { 1754 struct ata_taskfile tf; 1755 1756 ata_tf_init(dev, &tf); 1757 1758 tf.command = cmd; 1759 tf.flags |= ATA_TFLAG_DEVICE; 1760 tf.protocol = ATA_PROT_NODATA; 1761 1762 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1763 } 1764 1765 /** 1766 * ata_pio_need_iordy - check if iordy needed 1767 * @adev: ATA device 1768 * 1769 * Check if the current speed of the device requires IORDY. Used 1770 * by various controllers for chip configuration. 1771 */ 1772 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1773 { 1774 /* Don't set IORDY if we're preparing for reset. IORDY may 1775 * lead to controller lock up on certain controllers if the 1776 * port is not occupied. See bko#11703 for details. 1777 */ 1778 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1779 return 0; 1780 /* Controller doesn't support IORDY. Probably a pointless 1781 * check as the caller should know this. 1782 */ 1783 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1784 return 0; 1785 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1786 if (ata_id_is_cfa(adev->id) 1787 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1788 return 0; 1789 /* PIO3 and higher it is mandatory */ 1790 if (adev->pio_mode > XFER_PIO_2) 1791 return 1; 1792 /* We turn it on when possible */ 1793 if (ata_id_has_iordy(adev->id)) 1794 return 1; 1795 return 0; 1796 } 1797 1798 /** 1799 * ata_pio_mask_no_iordy - Return the non IORDY mask 1800 * @adev: ATA device 1801 * 1802 * Compute the highest mode possible if we are not using iordy. Return 1803 * -1 if no iordy mode is available. 1804 */ 1805 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1806 { 1807 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1808 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1809 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1810 /* Is the speed faster than the drive allows non IORDY ? */ 1811 if (pio) { 1812 /* This is cycle times not frequency - watch the logic! */ 1813 if (pio > 240) /* PIO2 is 240nS per cycle */ 1814 return 3 << ATA_SHIFT_PIO; 1815 return 7 << ATA_SHIFT_PIO; 1816 } 1817 } 1818 return 3 << ATA_SHIFT_PIO; 1819 } 1820 1821 /** 1822 * ata_do_dev_read_id - default ID read method 1823 * @dev: device 1824 * @tf: proposed taskfile 1825 * @id: data buffer 1826 * 1827 * Issue the identify taskfile and hand back the buffer containing 1828 * identify data. For some RAID controllers and for pre ATA devices 1829 * this function is wrapped or replaced by the driver 1830 */ 1831 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1832 struct ata_taskfile *tf, u16 *id) 1833 { 1834 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1835 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1836 } 1837 1838 /** 1839 * ata_dev_read_id - Read ID data from the specified device 1840 * @dev: target device 1841 * @p_class: pointer to class of the target device (may be changed) 1842 * @flags: ATA_READID_* flags 1843 * @id: buffer to read IDENTIFY data into 1844 * 1845 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1846 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1847 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1848 * for pre-ATA4 drives. 1849 * 1850 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1851 * now we abort if we hit that case. 1852 * 1853 * LOCKING: 1854 * Kernel thread context (may sleep) 1855 * 1856 * RETURNS: 1857 * 0 on success, -errno otherwise. 1858 */ 1859 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1860 unsigned int flags, u16 *id) 1861 { 1862 struct ata_port *ap = dev->link->ap; 1863 unsigned int class = *p_class; 1864 struct ata_taskfile tf; 1865 unsigned int err_mask = 0; 1866 const char *reason; 1867 bool is_semb = class == ATA_DEV_SEMB; 1868 int may_fallback = 1, tried_spinup = 0; 1869 int rc; 1870 1871 if (ata_msg_ctl(ap)) 1872 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1873 1874 retry: 1875 ata_tf_init(dev, &tf); 1876 1877 switch (class) { 1878 case ATA_DEV_SEMB: 1879 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1880 case ATA_DEV_ATA: 1881 tf.command = ATA_CMD_ID_ATA; 1882 break; 1883 case ATA_DEV_ATAPI: 1884 tf.command = ATA_CMD_ID_ATAPI; 1885 break; 1886 default: 1887 rc = -ENODEV; 1888 reason = "unsupported class"; 1889 goto err_out; 1890 } 1891 1892 tf.protocol = ATA_PROT_PIO; 1893 1894 /* Some devices choke if TF registers contain garbage. Make 1895 * sure those are properly initialized. 1896 */ 1897 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1898 1899 /* Device presence detection is unreliable on some 1900 * controllers. Always poll IDENTIFY if available. 1901 */ 1902 tf.flags |= ATA_TFLAG_POLLING; 1903 1904 if (ap->ops->read_id) 1905 err_mask = ap->ops->read_id(dev, &tf, id); 1906 else 1907 err_mask = ata_do_dev_read_id(dev, &tf, id); 1908 1909 if (err_mask) { 1910 if (err_mask & AC_ERR_NODEV_HINT) { 1911 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1912 return -ENOENT; 1913 } 1914 1915 if (is_semb) { 1916 ata_dev_info(dev, 1917 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1918 /* SEMB is not supported yet */ 1919 *p_class = ATA_DEV_SEMB_UNSUP; 1920 return 0; 1921 } 1922 1923 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1924 /* Device or controller might have reported 1925 * the wrong device class. Give a shot at the 1926 * other IDENTIFY if the current one is 1927 * aborted by the device. 1928 */ 1929 if (may_fallback) { 1930 may_fallback = 0; 1931 1932 if (class == ATA_DEV_ATA) 1933 class = ATA_DEV_ATAPI; 1934 else 1935 class = ATA_DEV_ATA; 1936 goto retry; 1937 } 1938 1939 /* Control reaches here iff the device aborted 1940 * both flavors of IDENTIFYs which happens 1941 * sometimes with phantom devices. 1942 */ 1943 ata_dev_dbg(dev, 1944 "both IDENTIFYs aborted, assuming NODEV\n"); 1945 return -ENOENT; 1946 } 1947 1948 rc = -EIO; 1949 reason = "I/O error"; 1950 goto err_out; 1951 } 1952 1953 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1954 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1955 "class=%d may_fallback=%d tried_spinup=%d\n", 1956 class, may_fallback, tried_spinup); 1957 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1958 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1959 } 1960 1961 /* Falling back doesn't make sense if ID data was read 1962 * successfully at least once. 1963 */ 1964 may_fallback = 0; 1965 1966 swap_buf_le16(id, ATA_ID_WORDS); 1967 1968 /* sanity check */ 1969 rc = -EINVAL; 1970 reason = "device reports invalid type"; 1971 1972 if (class == ATA_DEV_ATA) { 1973 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1974 goto err_out; 1975 } else { 1976 if (ata_id_is_ata(id)) 1977 goto err_out; 1978 } 1979 1980 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1981 tried_spinup = 1; 1982 /* 1983 * Drive powered-up in standby mode, and requires a specific 1984 * SET_FEATURES spin-up subcommand before it will accept 1985 * anything other than the original IDENTIFY command. 1986 */ 1987 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1988 if (err_mask && id[2] != 0x738c) { 1989 rc = -EIO; 1990 reason = "SPINUP failed"; 1991 goto err_out; 1992 } 1993 /* 1994 * If the drive initially returned incomplete IDENTIFY info, 1995 * we now must reissue the IDENTIFY command. 1996 */ 1997 if (id[2] == 0x37c8) 1998 goto retry; 1999 } 2000 2001 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2002 /* 2003 * The exact sequence expected by certain pre-ATA4 drives is: 2004 * SRST RESET 2005 * IDENTIFY (optional in early ATA) 2006 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2007 * anything else.. 2008 * Some drives were very specific about that exact sequence. 2009 * 2010 * Note that ATA4 says lba is mandatory so the second check 2011 * should never trigger. 2012 */ 2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2014 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2015 if (err_mask) { 2016 rc = -EIO; 2017 reason = "INIT_DEV_PARAMS failed"; 2018 goto err_out; 2019 } 2020 2021 /* current CHS translation info (id[53-58]) might be 2022 * changed. reread the identify device info. 2023 */ 2024 flags &= ~ATA_READID_POSTRESET; 2025 goto retry; 2026 } 2027 } 2028 2029 *p_class = class; 2030 2031 return 0; 2032 2033 err_out: 2034 if (ata_msg_warn(ap)) 2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2036 reason, err_mask); 2037 return rc; 2038 } 2039 2040 static int ata_do_link_spd_horkage(struct ata_device *dev) 2041 { 2042 struct ata_link *plink = ata_dev_phys_link(dev); 2043 u32 target, target_limit; 2044 2045 if (!sata_scr_valid(plink)) 2046 return 0; 2047 2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2049 target = 1; 2050 else 2051 return 0; 2052 2053 target_limit = (1 << target) - 1; 2054 2055 /* if already on stricter limit, no need to push further */ 2056 if (plink->sata_spd_limit <= target_limit) 2057 return 0; 2058 2059 plink->sata_spd_limit = target_limit; 2060 2061 /* Request another EH round by returning -EAGAIN if link is 2062 * going faster than the target speed. Forward progress is 2063 * guaranteed by setting sata_spd_limit to target_limit above. 2064 */ 2065 if (plink->sata_spd > target) { 2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2067 sata_spd_string(target)); 2068 return -EAGAIN; 2069 } 2070 return 0; 2071 } 2072 2073 static inline u8 ata_dev_knobble(struct ata_device *dev) 2074 { 2075 struct ata_port *ap = dev->link->ap; 2076 2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2078 return 0; 2079 2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2081 } 2082 2083 static int ata_dev_config_ncq(struct ata_device *dev, 2084 char *desc, size_t desc_sz) 2085 { 2086 struct ata_port *ap = dev->link->ap; 2087 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2088 unsigned int err_mask; 2089 char *aa_desc = ""; 2090 2091 if (!ata_id_has_ncq(dev->id)) { 2092 desc[0] = '\0'; 2093 return 0; 2094 } 2095 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2096 snprintf(desc, desc_sz, "NCQ (not used)"); 2097 return 0; 2098 } 2099 if (ap->flags & ATA_FLAG_NCQ) { 2100 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2101 dev->flags |= ATA_DFLAG_NCQ; 2102 } 2103 2104 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2105 (ap->flags & ATA_FLAG_FPDMA_AA) && 2106 ata_id_has_fpdma_aa(dev->id)) { 2107 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2108 SATA_FPDMA_AA); 2109 if (err_mask) { 2110 ata_dev_err(dev, 2111 "failed to enable AA (error_mask=0x%x)\n", 2112 err_mask); 2113 if (err_mask != AC_ERR_DEV) { 2114 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2115 return -EIO; 2116 } 2117 } else 2118 aa_desc = ", AA"; 2119 } 2120 2121 if (hdepth >= ddepth) 2122 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2123 else 2124 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2125 ddepth, aa_desc); 2126 return 0; 2127 } 2128 2129 /** 2130 * ata_dev_configure - Configure the specified ATA/ATAPI device 2131 * @dev: Target device to configure 2132 * 2133 * Configure @dev according to @dev->id. Generic and low-level 2134 * driver specific fixups are also applied. 2135 * 2136 * LOCKING: 2137 * Kernel thread context (may sleep) 2138 * 2139 * RETURNS: 2140 * 0 on success, -errno otherwise 2141 */ 2142 int ata_dev_configure(struct ata_device *dev) 2143 { 2144 struct ata_port *ap = dev->link->ap; 2145 struct ata_eh_context *ehc = &dev->link->eh_context; 2146 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2147 const u16 *id = dev->id; 2148 unsigned long xfer_mask; 2149 char revbuf[7]; /* XYZ-99\0 */ 2150 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2151 char modelbuf[ATA_ID_PROD_LEN+1]; 2152 int rc; 2153 2154 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2155 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2156 return 0; 2157 } 2158 2159 if (ata_msg_probe(ap)) 2160 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2161 2162 /* set horkage */ 2163 dev->horkage |= ata_dev_blacklisted(dev); 2164 ata_force_horkage(dev); 2165 2166 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2167 ata_dev_info(dev, "unsupported device, disabling\n"); 2168 ata_dev_disable(dev); 2169 return 0; 2170 } 2171 2172 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2173 dev->class == ATA_DEV_ATAPI) { 2174 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2175 atapi_enabled ? "not supported with this driver" 2176 : "disabled"); 2177 ata_dev_disable(dev); 2178 return 0; 2179 } 2180 2181 rc = ata_do_link_spd_horkage(dev); 2182 if (rc) 2183 return rc; 2184 2185 /* let ACPI work its magic */ 2186 rc = ata_acpi_on_devcfg(dev); 2187 if (rc) 2188 return rc; 2189 2190 /* massage HPA, do it early as it might change IDENTIFY data */ 2191 rc = ata_hpa_resize(dev); 2192 if (rc) 2193 return rc; 2194 2195 /* print device capabilities */ 2196 if (ata_msg_probe(ap)) 2197 ata_dev_dbg(dev, 2198 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2199 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2200 __func__, 2201 id[49], id[82], id[83], id[84], 2202 id[85], id[86], id[87], id[88]); 2203 2204 /* initialize to-be-configured parameters */ 2205 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2206 dev->max_sectors = 0; 2207 dev->cdb_len = 0; 2208 dev->n_sectors = 0; 2209 dev->cylinders = 0; 2210 dev->heads = 0; 2211 dev->sectors = 0; 2212 dev->multi_count = 0; 2213 2214 /* 2215 * common ATA, ATAPI feature tests 2216 */ 2217 2218 /* find max transfer mode; for printk only */ 2219 xfer_mask = ata_id_xfermask(id); 2220 2221 if (ata_msg_probe(ap)) 2222 ata_dump_id(id); 2223 2224 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2225 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2226 sizeof(fwrevbuf)); 2227 2228 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2229 sizeof(modelbuf)); 2230 2231 /* ATA-specific feature tests */ 2232 if (dev->class == ATA_DEV_ATA) { 2233 if (ata_id_is_cfa(id)) { 2234 /* CPRM may make this media unusable */ 2235 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2236 ata_dev_warn(dev, 2237 "supports DRM functions and may not be fully accessible\n"); 2238 snprintf(revbuf, 7, "CFA"); 2239 } else { 2240 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2241 /* Warn the user if the device has TPM extensions */ 2242 if (ata_id_has_tpm(id)) 2243 ata_dev_warn(dev, 2244 "supports DRM functions and may not be fully accessible\n"); 2245 } 2246 2247 dev->n_sectors = ata_id_n_sectors(id); 2248 2249 /* get current R/W Multiple count setting */ 2250 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2251 unsigned int max = dev->id[47] & 0xff; 2252 unsigned int cnt = dev->id[59] & 0xff; 2253 /* only recognize/allow powers of two here */ 2254 if (is_power_of_2(max) && is_power_of_2(cnt)) 2255 if (cnt <= max) 2256 dev->multi_count = cnt; 2257 } 2258 2259 if (ata_id_has_lba(id)) { 2260 const char *lba_desc; 2261 char ncq_desc[24]; 2262 2263 lba_desc = "LBA"; 2264 dev->flags |= ATA_DFLAG_LBA; 2265 if (ata_id_has_lba48(id)) { 2266 dev->flags |= ATA_DFLAG_LBA48; 2267 lba_desc = "LBA48"; 2268 2269 if (dev->n_sectors >= (1UL << 28) && 2270 ata_id_has_flush_ext(id)) 2271 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2272 } 2273 2274 /* config NCQ */ 2275 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2276 if (rc) 2277 return rc; 2278 2279 /* print device info to dmesg */ 2280 if (ata_msg_drv(ap) && print_info) { 2281 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2282 revbuf, modelbuf, fwrevbuf, 2283 ata_mode_string(xfer_mask)); 2284 ata_dev_info(dev, 2285 "%llu sectors, multi %u: %s %s\n", 2286 (unsigned long long)dev->n_sectors, 2287 dev->multi_count, lba_desc, ncq_desc); 2288 } 2289 } else { 2290 /* CHS */ 2291 2292 /* Default translation */ 2293 dev->cylinders = id[1]; 2294 dev->heads = id[3]; 2295 dev->sectors = id[6]; 2296 2297 if (ata_id_current_chs_valid(id)) { 2298 /* Current CHS translation is valid. */ 2299 dev->cylinders = id[54]; 2300 dev->heads = id[55]; 2301 dev->sectors = id[56]; 2302 } 2303 2304 /* print device info to dmesg */ 2305 if (ata_msg_drv(ap) && print_info) { 2306 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2307 revbuf, modelbuf, fwrevbuf, 2308 ata_mode_string(xfer_mask)); 2309 ata_dev_info(dev, 2310 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2311 (unsigned long long)dev->n_sectors, 2312 dev->multi_count, dev->cylinders, 2313 dev->heads, dev->sectors); 2314 } 2315 } 2316 2317 dev->cdb_len = 16; 2318 } 2319 2320 /* ATAPI-specific feature tests */ 2321 else if (dev->class == ATA_DEV_ATAPI) { 2322 const char *cdb_intr_string = ""; 2323 const char *atapi_an_string = ""; 2324 const char *dma_dir_string = ""; 2325 u32 sntf; 2326 2327 rc = atapi_cdb_len(id); 2328 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2329 if (ata_msg_warn(ap)) 2330 ata_dev_warn(dev, "unsupported CDB len\n"); 2331 rc = -EINVAL; 2332 goto err_out_nosup; 2333 } 2334 dev->cdb_len = (unsigned int) rc; 2335 2336 /* Enable ATAPI AN if both the host and device have 2337 * the support. If PMP is attached, SNTF is required 2338 * to enable ATAPI AN to discern between PHY status 2339 * changed notifications and ATAPI ANs. 2340 */ 2341 if (atapi_an && 2342 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2343 (!sata_pmp_attached(ap) || 2344 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2345 unsigned int err_mask; 2346 2347 /* issue SET feature command to turn this on */ 2348 err_mask = ata_dev_set_feature(dev, 2349 SETFEATURES_SATA_ENABLE, SATA_AN); 2350 if (err_mask) 2351 ata_dev_err(dev, 2352 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2353 err_mask); 2354 else { 2355 dev->flags |= ATA_DFLAG_AN; 2356 atapi_an_string = ", ATAPI AN"; 2357 } 2358 } 2359 2360 if (ata_id_cdb_intr(dev->id)) { 2361 dev->flags |= ATA_DFLAG_CDB_INTR; 2362 cdb_intr_string = ", CDB intr"; 2363 } 2364 2365 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2366 dev->flags |= ATA_DFLAG_DMADIR; 2367 dma_dir_string = ", DMADIR"; 2368 } 2369 2370 /* print device info to dmesg */ 2371 if (ata_msg_drv(ap) && print_info) 2372 ata_dev_info(dev, 2373 "ATAPI: %s, %s, max %s%s%s%s\n", 2374 modelbuf, fwrevbuf, 2375 ata_mode_string(xfer_mask), 2376 cdb_intr_string, atapi_an_string, 2377 dma_dir_string); 2378 } 2379 2380 /* determine max_sectors */ 2381 dev->max_sectors = ATA_MAX_SECTORS; 2382 if (dev->flags & ATA_DFLAG_LBA48) 2383 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2384 2385 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2386 200 sectors */ 2387 if (ata_dev_knobble(dev)) { 2388 if (ata_msg_drv(ap) && print_info) 2389 ata_dev_info(dev, "applying bridge limits\n"); 2390 dev->udma_mask &= ATA_UDMA5; 2391 dev->max_sectors = ATA_MAX_SECTORS; 2392 } 2393 2394 if ((dev->class == ATA_DEV_ATAPI) && 2395 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2396 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2397 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2398 } 2399 2400 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2401 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2402 dev->max_sectors); 2403 2404 if (ap->ops->dev_config) 2405 ap->ops->dev_config(dev); 2406 2407 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2408 /* Let the user know. We don't want to disallow opens for 2409 rescue purposes, or in case the vendor is just a blithering 2410 idiot. Do this after the dev_config call as some controllers 2411 with buggy firmware may want to avoid reporting false device 2412 bugs */ 2413 2414 if (print_info) { 2415 ata_dev_warn(dev, 2416 "Drive reports diagnostics failure. This may indicate a drive\n"); 2417 ata_dev_warn(dev, 2418 "fault or invalid emulation. Contact drive vendor for information.\n"); 2419 } 2420 } 2421 2422 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2423 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2424 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2425 } 2426 2427 return 0; 2428 2429 err_out_nosup: 2430 if (ata_msg_probe(ap)) 2431 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2432 return rc; 2433 } 2434 2435 /** 2436 * ata_cable_40wire - return 40 wire cable type 2437 * @ap: port 2438 * 2439 * Helper method for drivers which want to hardwire 40 wire cable 2440 * detection. 2441 */ 2442 2443 int ata_cable_40wire(struct ata_port *ap) 2444 { 2445 return ATA_CBL_PATA40; 2446 } 2447 2448 /** 2449 * ata_cable_80wire - return 80 wire cable type 2450 * @ap: port 2451 * 2452 * Helper method for drivers which want to hardwire 80 wire cable 2453 * detection. 2454 */ 2455 2456 int ata_cable_80wire(struct ata_port *ap) 2457 { 2458 return ATA_CBL_PATA80; 2459 } 2460 2461 /** 2462 * ata_cable_unknown - return unknown PATA cable. 2463 * @ap: port 2464 * 2465 * Helper method for drivers which have no PATA cable detection. 2466 */ 2467 2468 int ata_cable_unknown(struct ata_port *ap) 2469 { 2470 return ATA_CBL_PATA_UNK; 2471 } 2472 2473 /** 2474 * ata_cable_ignore - return ignored PATA cable. 2475 * @ap: port 2476 * 2477 * Helper method for drivers which don't use cable type to limit 2478 * transfer mode. 2479 */ 2480 int ata_cable_ignore(struct ata_port *ap) 2481 { 2482 return ATA_CBL_PATA_IGN; 2483 } 2484 2485 /** 2486 * ata_cable_sata - return SATA cable type 2487 * @ap: port 2488 * 2489 * Helper method for drivers which have SATA cables 2490 */ 2491 2492 int ata_cable_sata(struct ata_port *ap) 2493 { 2494 return ATA_CBL_SATA; 2495 } 2496 2497 /** 2498 * ata_bus_probe - Reset and probe ATA bus 2499 * @ap: Bus to probe 2500 * 2501 * Master ATA bus probing function. Initiates a hardware-dependent 2502 * bus reset, then attempts to identify any devices found on 2503 * the bus. 2504 * 2505 * LOCKING: 2506 * PCI/etc. bus probe sem. 2507 * 2508 * RETURNS: 2509 * Zero on success, negative errno otherwise. 2510 */ 2511 2512 int ata_bus_probe(struct ata_port *ap) 2513 { 2514 unsigned int classes[ATA_MAX_DEVICES]; 2515 int tries[ATA_MAX_DEVICES]; 2516 int rc; 2517 struct ata_device *dev; 2518 2519 ata_for_each_dev(dev, &ap->link, ALL) 2520 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2521 2522 retry: 2523 ata_for_each_dev(dev, &ap->link, ALL) { 2524 /* If we issue an SRST then an ATA drive (not ATAPI) 2525 * may change configuration and be in PIO0 timing. If 2526 * we do a hard reset (or are coming from power on) 2527 * this is true for ATA or ATAPI. Until we've set a 2528 * suitable controller mode we should not touch the 2529 * bus as we may be talking too fast. 2530 */ 2531 dev->pio_mode = XFER_PIO_0; 2532 2533 /* If the controller has a pio mode setup function 2534 * then use it to set the chipset to rights. Don't 2535 * touch the DMA setup as that will be dealt with when 2536 * configuring devices. 2537 */ 2538 if (ap->ops->set_piomode) 2539 ap->ops->set_piomode(ap, dev); 2540 } 2541 2542 /* reset and determine device classes */ 2543 ap->ops->phy_reset(ap); 2544 2545 ata_for_each_dev(dev, &ap->link, ALL) { 2546 if (dev->class != ATA_DEV_UNKNOWN) 2547 classes[dev->devno] = dev->class; 2548 else 2549 classes[dev->devno] = ATA_DEV_NONE; 2550 2551 dev->class = ATA_DEV_UNKNOWN; 2552 } 2553 2554 /* read IDENTIFY page and configure devices. We have to do the identify 2555 specific sequence bass-ackwards so that PDIAG- is released by 2556 the slave device */ 2557 2558 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2559 if (tries[dev->devno]) 2560 dev->class = classes[dev->devno]; 2561 2562 if (!ata_dev_enabled(dev)) 2563 continue; 2564 2565 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2566 dev->id); 2567 if (rc) 2568 goto fail; 2569 } 2570 2571 /* Now ask for the cable type as PDIAG- should have been released */ 2572 if (ap->ops->cable_detect) 2573 ap->cbl = ap->ops->cable_detect(ap); 2574 2575 /* We may have SATA bridge glue hiding here irrespective of 2576 * the reported cable types and sensed types. When SATA 2577 * drives indicate we have a bridge, we don't know which end 2578 * of the link the bridge is which is a problem. 2579 */ 2580 ata_for_each_dev(dev, &ap->link, ENABLED) 2581 if (ata_id_is_sata(dev->id)) 2582 ap->cbl = ATA_CBL_SATA; 2583 2584 /* After the identify sequence we can now set up the devices. We do 2585 this in the normal order so that the user doesn't get confused */ 2586 2587 ata_for_each_dev(dev, &ap->link, ENABLED) { 2588 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2589 rc = ata_dev_configure(dev); 2590 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2591 if (rc) 2592 goto fail; 2593 } 2594 2595 /* configure transfer mode */ 2596 rc = ata_set_mode(&ap->link, &dev); 2597 if (rc) 2598 goto fail; 2599 2600 ata_for_each_dev(dev, &ap->link, ENABLED) 2601 return 0; 2602 2603 return -ENODEV; 2604 2605 fail: 2606 tries[dev->devno]--; 2607 2608 switch (rc) { 2609 case -EINVAL: 2610 /* eeek, something went very wrong, give up */ 2611 tries[dev->devno] = 0; 2612 break; 2613 2614 case -ENODEV: 2615 /* give it just one more chance */ 2616 tries[dev->devno] = min(tries[dev->devno], 1); 2617 case -EIO: 2618 if (tries[dev->devno] == 1) { 2619 /* This is the last chance, better to slow 2620 * down than lose it. 2621 */ 2622 sata_down_spd_limit(&ap->link, 0); 2623 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2624 } 2625 } 2626 2627 if (!tries[dev->devno]) 2628 ata_dev_disable(dev); 2629 2630 goto retry; 2631 } 2632 2633 /** 2634 * sata_print_link_status - Print SATA link status 2635 * @link: SATA link to printk link status about 2636 * 2637 * This function prints link speed and status of a SATA link. 2638 * 2639 * LOCKING: 2640 * None. 2641 */ 2642 static void sata_print_link_status(struct ata_link *link) 2643 { 2644 u32 sstatus, scontrol, tmp; 2645 2646 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2647 return; 2648 sata_scr_read(link, SCR_CONTROL, &scontrol); 2649 2650 if (ata_phys_link_online(link)) { 2651 tmp = (sstatus >> 4) & 0xf; 2652 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2653 sata_spd_string(tmp), sstatus, scontrol); 2654 } else { 2655 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2656 sstatus, scontrol); 2657 } 2658 } 2659 2660 /** 2661 * ata_dev_pair - return other device on cable 2662 * @adev: device 2663 * 2664 * Obtain the other device on the same cable, or if none is 2665 * present NULL is returned 2666 */ 2667 2668 struct ata_device *ata_dev_pair(struct ata_device *adev) 2669 { 2670 struct ata_link *link = adev->link; 2671 struct ata_device *pair = &link->device[1 - adev->devno]; 2672 if (!ata_dev_enabled(pair)) 2673 return NULL; 2674 return pair; 2675 } 2676 2677 /** 2678 * sata_down_spd_limit - adjust SATA spd limit downward 2679 * @link: Link to adjust SATA spd limit for 2680 * @spd_limit: Additional limit 2681 * 2682 * Adjust SATA spd limit of @link downward. Note that this 2683 * function only adjusts the limit. The change must be applied 2684 * using sata_set_spd(). 2685 * 2686 * If @spd_limit is non-zero, the speed is limited to equal to or 2687 * lower than @spd_limit if such speed is supported. If 2688 * @spd_limit is slower than any supported speed, only the lowest 2689 * supported speed is allowed. 2690 * 2691 * LOCKING: 2692 * Inherited from caller. 2693 * 2694 * RETURNS: 2695 * 0 on success, negative errno on failure 2696 */ 2697 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2698 { 2699 u32 sstatus, spd, mask; 2700 int rc, bit; 2701 2702 if (!sata_scr_valid(link)) 2703 return -EOPNOTSUPP; 2704 2705 /* If SCR can be read, use it to determine the current SPD. 2706 * If not, use cached value in link->sata_spd. 2707 */ 2708 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2709 if (rc == 0 && ata_sstatus_online(sstatus)) 2710 spd = (sstatus >> 4) & 0xf; 2711 else 2712 spd = link->sata_spd; 2713 2714 mask = link->sata_spd_limit; 2715 if (mask <= 1) 2716 return -EINVAL; 2717 2718 /* unconditionally mask off the highest bit */ 2719 bit = fls(mask) - 1; 2720 mask &= ~(1 << bit); 2721 2722 /* Mask off all speeds higher than or equal to the current 2723 * one. Force 1.5Gbps if current SPD is not available. 2724 */ 2725 if (spd > 1) 2726 mask &= (1 << (spd - 1)) - 1; 2727 else 2728 mask &= 1; 2729 2730 /* were we already at the bottom? */ 2731 if (!mask) 2732 return -EINVAL; 2733 2734 if (spd_limit) { 2735 if (mask & ((1 << spd_limit) - 1)) 2736 mask &= (1 << spd_limit) - 1; 2737 else { 2738 bit = ffs(mask) - 1; 2739 mask = 1 << bit; 2740 } 2741 } 2742 2743 link->sata_spd_limit = mask; 2744 2745 ata_link_warn(link, "limiting SATA link speed to %s\n", 2746 sata_spd_string(fls(mask))); 2747 2748 return 0; 2749 } 2750 2751 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2752 { 2753 struct ata_link *host_link = &link->ap->link; 2754 u32 limit, target, spd; 2755 2756 limit = link->sata_spd_limit; 2757 2758 /* Don't configure downstream link faster than upstream link. 2759 * It doesn't speed up anything and some PMPs choke on such 2760 * configuration. 2761 */ 2762 if (!ata_is_host_link(link) && host_link->sata_spd) 2763 limit &= (1 << host_link->sata_spd) - 1; 2764 2765 if (limit == UINT_MAX) 2766 target = 0; 2767 else 2768 target = fls(limit); 2769 2770 spd = (*scontrol >> 4) & 0xf; 2771 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2772 2773 return spd != target; 2774 } 2775 2776 /** 2777 * sata_set_spd_needed - is SATA spd configuration needed 2778 * @link: Link in question 2779 * 2780 * Test whether the spd limit in SControl matches 2781 * @link->sata_spd_limit. This function is used to determine 2782 * whether hardreset is necessary to apply SATA spd 2783 * configuration. 2784 * 2785 * LOCKING: 2786 * Inherited from caller. 2787 * 2788 * RETURNS: 2789 * 1 if SATA spd configuration is needed, 0 otherwise. 2790 */ 2791 static int sata_set_spd_needed(struct ata_link *link) 2792 { 2793 u32 scontrol; 2794 2795 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2796 return 1; 2797 2798 return __sata_set_spd_needed(link, &scontrol); 2799 } 2800 2801 /** 2802 * sata_set_spd - set SATA spd according to spd limit 2803 * @link: Link to set SATA spd for 2804 * 2805 * Set SATA spd of @link according to sata_spd_limit. 2806 * 2807 * LOCKING: 2808 * Inherited from caller. 2809 * 2810 * RETURNS: 2811 * 0 if spd doesn't need to be changed, 1 if spd has been 2812 * changed. Negative errno if SCR registers are inaccessible. 2813 */ 2814 int sata_set_spd(struct ata_link *link) 2815 { 2816 u32 scontrol; 2817 int rc; 2818 2819 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2820 return rc; 2821 2822 if (!__sata_set_spd_needed(link, &scontrol)) 2823 return 0; 2824 2825 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2826 return rc; 2827 2828 return 1; 2829 } 2830 2831 /* 2832 * This mode timing computation functionality is ported over from 2833 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2834 */ 2835 /* 2836 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2837 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2838 * for UDMA6, which is currently supported only by Maxtor drives. 2839 * 2840 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2841 */ 2842 2843 static const struct ata_timing ata_timing[] = { 2844 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2845 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2846 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2847 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2848 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2849 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2850 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2851 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2852 2853 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2854 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2855 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2856 2857 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2858 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2859 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2860 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2861 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2862 2863 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2864 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2865 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2866 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2867 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2868 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2869 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2870 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2871 2872 { 0xFF } 2873 }; 2874 2875 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2876 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2877 2878 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2879 { 2880 q->setup = EZ(t->setup * 1000, T); 2881 q->act8b = EZ(t->act8b * 1000, T); 2882 q->rec8b = EZ(t->rec8b * 1000, T); 2883 q->cyc8b = EZ(t->cyc8b * 1000, T); 2884 q->active = EZ(t->active * 1000, T); 2885 q->recover = EZ(t->recover * 1000, T); 2886 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2887 q->cycle = EZ(t->cycle * 1000, T); 2888 q->udma = EZ(t->udma * 1000, UT); 2889 } 2890 2891 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2892 struct ata_timing *m, unsigned int what) 2893 { 2894 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2895 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2896 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2897 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2898 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2899 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2900 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2901 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2902 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2903 } 2904 2905 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2906 { 2907 const struct ata_timing *t = ata_timing; 2908 2909 while (xfer_mode > t->mode) 2910 t++; 2911 2912 if (xfer_mode == t->mode) 2913 return t; 2914 return NULL; 2915 } 2916 2917 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2918 struct ata_timing *t, int T, int UT) 2919 { 2920 const u16 *id = adev->id; 2921 const struct ata_timing *s; 2922 struct ata_timing p; 2923 2924 /* 2925 * Find the mode. 2926 */ 2927 2928 if (!(s = ata_timing_find_mode(speed))) 2929 return -EINVAL; 2930 2931 memcpy(t, s, sizeof(*s)); 2932 2933 /* 2934 * If the drive is an EIDE drive, it can tell us it needs extended 2935 * PIO/MW_DMA cycle timing. 2936 */ 2937 2938 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2939 memset(&p, 0, sizeof(p)); 2940 2941 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2942 if (speed <= XFER_PIO_2) 2943 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 2944 else if ((speed <= XFER_PIO_4) || 2945 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 2946 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 2947 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 2948 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 2949 2950 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 2951 } 2952 2953 /* 2954 * Convert the timing to bus clock counts. 2955 */ 2956 2957 ata_timing_quantize(t, t, T, UT); 2958 2959 /* 2960 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 2961 * S.M.A.R.T * and some other commands. We have to ensure that the 2962 * DMA cycle timing is slower/equal than the fastest PIO timing. 2963 */ 2964 2965 if (speed > XFER_PIO_6) { 2966 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 2967 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 2968 } 2969 2970 /* 2971 * Lengthen active & recovery time so that cycle time is correct. 2972 */ 2973 2974 if (t->act8b + t->rec8b < t->cyc8b) { 2975 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 2976 t->rec8b = t->cyc8b - t->act8b; 2977 } 2978 2979 if (t->active + t->recover < t->cycle) { 2980 t->active += (t->cycle - (t->active + t->recover)) / 2; 2981 t->recover = t->cycle - t->active; 2982 } 2983 2984 /* In a few cases quantisation may produce enough errors to 2985 leave t->cycle too low for the sum of active and recovery 2986 if so we must correct this */ 2987 if (t->active + t->recover > t->cycle) 2988 t->cycle = t->active + t->recover; 2989 2990 return 0; 2991 } 2992 2993 /** 2994 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 2995 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 2996 * @cycle: cycle duration in ns 2997 * 2998 * Return matching xfer mode for @cycle. The returned mode is of 2999 * the transfer type specified by @xfer_shift. If @cycle is too 3000 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3001 * than the fastest known mode, the fasted mode is returned. 3002 * 3003 * LOCKING: 3004 * None. 3005 * 3006 * RETURNS: 3007 * Matching xfer_mode, 0xff if no match found. 3008 */ 3009 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3010 { 3011 u8 base_mode = 0xff, last_mode = 0xff; 3012 const struct ata_xfer_ent *ent; 3013 const struct ata_timing *t; 3014 3015 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3016 if (ent->shift == xfer_shift) 3017 base_mode = ent->base; 3018 3019 for (t = ata_timing_find_mode(base_mode); 3020 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3021 unsigned short this_cycle; 3022 3023 switch (xfer_shift) { 3024 case ATA_SHIFT_PIO: 3025 case ATA_SHIFT_MWDMA: 3026 this_cycle = t->cycle; 3027 break; 3028 case ATA_SHIFT_UDMA: 3029 this_cycle = t->udma; 3030 break; 3031 default: 3032 return 0xff; 3033 } 3034 3035 if (cycle > this_cycle) 3036 break; 3037 3038 last_mode = t->mode; 3039 } 3040 3041 return last_mode; 3042 } 3043 3044 /** 3045 * ata_down_xfermask_limit - adjust dev xfer masks downward 3046 * @dev: Device to adjust xfer masks 3047 * @sel: ATA_DNXFER_* selector 3048 * 3049 * Adjust xfer masks of @dev downward. Note that this function 3050 * does not apply the change. Invoking ata_set_mode() afterwards 3051 * will apply the limit. 3052 * 3053 * LOCKING: 3054 * Inherited from caller. 3055 * 3056 * RETURNS: 3057 * 0 on success, negative errno on failure 3058 */ 3059 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3060 { 3061 char buf[32]; 3062 unsigned long orig_mask, xfer_mask; 3063 unsigned long pio_mask, mwdma_mask, udma_mask; 3064 int quiet, highbit; 3065 3066 quiet = !!(sel & ATA_DNXFER_QUIET); 3067 sel &= ~ATA_DNXFER_QUIET; 3068 3069 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3070 dev->mwdma_mask, 3071 dev->udma_mask); 3072 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3073 3074 switch (sel) { 3075 case ATA_DNXFER_PIO: 3076 highbit = fls(pio_mask) - 1; 3077 pio_mask &= ~(1 << highbit); 3078 break; 3079 3080 case ATA_DNXFER_DMA: 3081 if (udma_mask) { 3082 highbit = fls(udma_mask) - 1; 3083 udma_mask &= ~(1 << highbit); 3084 if (!udma_mask) 3085 return -ENOENT; 3086 } else if (mwdma_mask) { 3087 highbit = fls(mwdma_mask) - 1; 3088 mwdma_mask &= ~(1 << highbit); 3089 if (!mwdma_mask) 3090 return -ENOENT; 3091 } 3092 break; 3093 3094 case ATA_DNXFER_40C: 3095 udma_mask &= ATA_UDMA_MASK_40C; 3096 break; 3097 3098 case ATA_DNXFER_FORCE_PIO0: 3099 pio_mask &= 1; 3100 case ATA_DNXFER_FORCE_PIO: 3101 mwdma_mask = 0; 3102 udma_mask = 0; 3103 break; 3104 3105 default: 3106 BUG(); 3107 } 3108 3109 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3110 3111 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3112 return -ENOENT; 3113 3114 if (!quiet) { 3115 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3116 snprintf(buf, sizeof(buf), "%s:%s", 3117 ata_mode_string(xfer_mask), 3118 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3119 else 3120 snprintf(buf, sizeof(buf), "%s", 3121 ata_mode_string(xfer_mask)); 3122 3123 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3124 } 3125 3126 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3127 &dev->udma_mask); 3128 3129 return 0; 3130 } 3131 3132 static int ata_dev_set_mode(struct ata_device *dev) 3133 { 3134 struct ata_port *ap = dev->link->ap; 3135 struct ata_eh_context *ehc = &dev->link->eh_context; 3136 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3137 const char *dev_err_whine = ""; 3138 int ign_dev_err = 0; 3139 unsigned int err_mask = 0; 3140 int rc; 3141 3142 dev->flags &= ~ATA_DFLAG_PIO; 3143 if (dev->xfer_shift == ATA_SHIFT_PIO) 3144 dev->flags |= ATA_DFLAG_PIO; 3145 3146 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3147 dev_err_whine = " (SET_XFERMODE skipped)"; 3148 else { 3149 if (nosetxfer) 3150 ata_dev_warn(dev, 3151 "NOSETXFER but PATA detected - can't " 3152 "skip SETXFER, might malfunction\n"); 3153 err_mask = ata_dev_set_xfermode(dev); 3154 } 3155 3156 if (err_mask & ~AC_ERR_DEV) 3157 goto fail; 3158 3159 /* revalidate */ 3160 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3161 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3162 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3163 if (rc) 3164 return rc; 3165 3166 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3167 /* Old CFA may refuse this command, which is just fine */ 3168 if (ata_id_is_cfa(dev->id)) 3169 ign_dev_err = 1; 3170 /* Catch several broken garbage emulations plus some pre 3171 ATA devices */ 3172 if (ata_id_major_version(dev->id) == 0 && 3173 dev->pio_mode <= XFER_PIO_2) 3174 ign_dev_err = 1; 3175 /* Some very old devices and some bad newer ones fail 3176 any kind of SET_XFERMODE request but support PIO0-2 3177 timings and no IORDY */ 3178 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3179 ign_dev_err = 1; 3180 } 3181 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3182 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3183 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3184 dev->dma_mode == XFER_MW_DMA_0 && 3185 (dev->id[63] >> 8) & 1) 3186 ign_dev_err = 1; 3187 3188 /* if the device is actually configured correctly, ignore dev err */ 3189 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3190 ign_dev_err = 1; 3191 3192 if (err_mask & AC_ERR_DEV) { 3193 if (!ign_dev_err) 3194 goto fail; 3195 else 3196 dev_err_whine = " (device error ignored)"; 3197 } 3198 3199 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3200 dev->xfer_shift, (int)dev->xfer_mode); 3201 3202 ata_dev_info(dev, "configured for %s%s\n", 3203 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3204 dev_err_whine); 3205 3206 return 0; 3207 3208 fail: 3209 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3210 return -EIO; 3211 } 3212 3213 /** 3214 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3215 * @link: link on which timings will be programmed 3216 * @r_failed_dev: out parameter for failed device 3217 * 3218 * Standard implementation of the function used to tune and set 3219 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3220 * ata_dev_set_mode() fails, pointer to the failing device is 3221 * returned in @r_failed_dev. 3222 * 3223 * LOCKING: 3224 * PCI/etc. bus probe sem. 3225 * 3226 * RETURNS: 3227 * 0 on success, negative errno otherwise 3228 */ 3229 3230 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3231 { 3232 struct ata_port *ap = link->ap; 3233 struct ata_device *dev; 3234 int rc = 0, used_dma = 0, found = 0; 3235 3236 /* step 1: calculate xfer_mask */ 3237 ata_for_each_dev(dev, link, ENABLED) { 3238 unsigned long pio_mask, dma_mask; 3239 unsigned int mode_mask; 3240 3241 mode_mask = ATA_DMA_MASK_ATA; 3242 if (dev->class == ATA_DEV_ATAPI) 3243 mode_mask = ATA_DMA_MASK_ATAPI; 3244 else if (ata_id_is_cfa(dev->id)) 3245 mode_mask = ATA_DMA_MASK_CFA; 3246 3247 ata_dev_xfermask(dev); 3248 ata_force_xfermask(dev); 3249 3250 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3251 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3252 3253 if (libata_dma_mask & mode_mask) 3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3255 else 3256 dma_mask = 0; 3257 3258 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3259 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3260 3261 found = 1; 3262 if (ata_dma_enabled(dev)) 3263 used_dma = 1; 3264 } 3265 if (!found) 3266 goto out; 3267 3268 /* step 2: always set host PIO timings */ 3269 ata_for_each_dev(dev, link, ENABLED) { 3270 if (dev->pio_mode == 0xff) { 3271 ata_dev_warn(dev, "no PIO support\n"); 3272 rc = -EINVAL; 3273 goto out; 3274 } 3275 3276 dev->xfer_mode = dev->pio_mode; 3277 dev->xfer_shift = ATA_SHIFT_PIO; 3278 if (ap->ops->set_piomode) 3279 ap->ops->set_piomode(ap, dev); 3280 } 3281 3282 /* step 3: set host DMA timings */ 3283 ata_for_each_dev(dev, link, ENABLED) { 3284 if (!ata_dma_enabled(dev)) 3285 continue; 3286 3287 dev->xfer_mode = dev->dma_mode; 3288 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3289 if (ap->ops->set_dmamode) 3290 ap->ops->set_dmamode(ap, dev); 3291 } 3292 3293 /* step 4: update devices' xfer mode */ 3294 ata_for_each_dev(dev, link, ENABLED) { 3295 rc = ata_dev_set_mode(dev); 3296 if (rc) 3297 goto out; 3298 } 3299 3300 /* Record simplex status. If we selected DMA then the other 3301 * host channels are not permitted to do so. 3302 */ 3303 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3304 ap->host->simplex_claimed = ap; 3305 3306 out: 3307 if (rc) 3308 *r_failed_dev = dev; 3309 return rc; 3310 } 3311 3312 /** 3313 * ata_wait_ready - wait for link to become ready 3314 * @link: link to be waited on 3315 * @deadline: deadline jiffies for the operation 3316 * @check_ready: callback to check link readiness 3317 * 3318 * Wait for @link to become ready. @check_ready should return 3319 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3320 * link doesn't seem to be occupied, other errno for other error 3321 * conditions. 3322 * 3323 * Transient -ENODEV conditions are allowed for 3324 * ATA_TMOUT_FF_WAIT. 3325 * 3326 * LOCKING: 3327 * EH context. 3328 * 3329 * RETURNS: 3330 * 0 if @linke is ready before @deadline; otherwise, -errno. 3331 */ 3332 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3333 int (*check_ready)(struct ata_link *link)) 3334 { 3335 unsigned long start = jiffies; 3336 unsigned long nodev_deadline; 3337 int warned = 0; 3338 3339 /* choose which 0xff timeout to use, read comment in libata.h */ 3340 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3341 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3342 else 3343 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3344 3345 /* Slave readiness can't be tested separately from master. On 3346 * M/S emulation configuration, this function should be called 3347 * only on the master and it will handle both master and slave. 3348 */ 3349 WARN_ON(link == link->ap->slave_link); 3350 3351 if (time_after(nodev_deadline, deadline)) 3352 nodev_deadline = deadline; 3353 3354 while (1) { 3355 unsigned long now = jiffies; 3356 int ready, tmp; 3357 3358 ready = tmp = check_ready(link); 3359 if (ready > 0) 3360 return 0; 3361 3362 /* 3363 * -ENODEV could be transient. Ignore -ENODEV if link 3364 * is online. Also, some SATA devices take a long 3365 * time to clear 0xff after reset. Wait for 3366 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3367 * offline. 3368 * 3369 * Note that some PATA controllers (pata_ali) explode 3370 * if status register is read more than once when 3371 * there's no device attached. 3372 */ 3373 if (ready == -ENODEV) { 3374 if (ata_link_online(link)) 3375 ready = 0; 3376 else if ((link->ap->flags & ATA_FLAG_SATA) && 3377 !ata_link_offline(link) && 3378 time_before(now, nodev_deadline)) 3379 ready = 0; 3380 } 3381 3382 if (ready) 3383 return ready; 3384 if (time_after(now, deadline)) 3385 return -EBUSY; 3386 3387 if (!warned && time_after(now, start + 5 * HZ) && 3388 (deadline - now > 3 * HZ)) { 3389 ata_link_warn(link, 3390 "link is slow to respond, please be patient " 3391 "(ready=%d)\n", tmp); 3392 warned = 1; 3393 } 3394 3395 ata_msleep(link->ap, 50); 3396 } 3397 } 3398 3399 /** 3400 * ata_wait_after_reset - wait for link to become ready after reset 3401 * @link: link to be waited on 3402 * @deadline: deadline jiffies for the operation 3403 * @check_ready: callback to check link readiness 3404 * 3405 * Wait for @link to become ready after reset. 3406 * 3407 * LOCKING: 3408 * EH context. 3409 * 3410 * RETURNS: 3411 * 0 if @linke is ready before @deadline; otherwise, -errno. 3412 */ 3413 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3414 int (*check_ready)(struct ata_link *link)) 3415 { 3416 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3417 3418 return ata_wait_ready(link, deadline, check_ready); 3419 } 3420 3421 /** 3422 * sata_link_debounce - debounce SATA phy status 3423 * @link: ATA link to debounce SATA phy status for 3424 * @params: timing parameters { interval, duratinon, timeout } in msec 3425 * @deadline: deadline jiffies for the operation 3426 * 3427 * Make sure SStatus of @link reaches stable state, determined by 3428 * holding the same value where DET is not 1 for @duration polled 3429 * every @interval, before @timeout. Timeout constraints the 3430 * beginning of the stable state. Because DET gets stuck at 1 on 3431 * some controllers after hot unplugging, this functions waits 3432 * until timeout then returns 0 if DET is stable at 1. 3433 * 3434 * @timeout is further limited by @deadline. The sooner of the 3435 * two is used. 3436 * 3437 * LOCKING: 3438 * Kernel thread context (may sleep) 3439 * 3440 * RETURNS: 3441 * 0 on success, -errno on failure. 3442 */ 3443 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3444 unsigned long deadline) 3445 { 3446 unsigned long interval = params[0]; 3447 unsigned long duration = params[1]; 3448 unsigned long last_jiffies, t; 3449 u32 last, cur; 3450 int rc; 3451 3452 t = ata_deadline(jiffies, params[2]); 3453 if (time_before(t, deadline)) 3454 deadline = t; 3455 3456 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3457 return rc; 3458 cur &= 0xf; 3459 3460 last = cur; 3461 last_jiffies = jiffies; 3462 3463 while (1) { 3464 ata_msleep(link->ap, interval); 3465 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3466 return rc; 3467 cur &= 0xf; 3468 3469 /* DET stable? */ 3470 if (cur == last) { 3471 if (cur == 1 && time_before(jiffies, deadline)) 3472 continue; 3473 if (time_after(jiffies, 3474 ata_deadline(last_jiffies, duration))) 3475 return 0; 3476 continue; 3477 } 3478 3479 /* unstable, start over */ 3480 last = cur; 3481 last_jiffies = jiffies; 3482 3483 /* Check deadline. If debouncing failed, return 3484 * -EPIPE to tell upper layer to lower link speed. 3485 */ 3486 if (time_after(jiffies, deadline)) 3487 return -EPIPE; 3488 } 3489 } 3490 3491 /** 3492 * sata_link_resume - resume SATA link 3493 * @link: ATA link to resume SATA 3494 * @params: timing parameters { interval, duratinon, timeout } in msec 3495 * @deadline: deadline jiffies for the operation 3496 * 3497 * Resume SATA phy @link and debounce it. 3498 * 3499 * LOCKING: 3500 * Kernel thread context (may sleep) 3501 * 3502 * RETURNS: 3503 * 0 on success, -errno on failure. 3504 */ 3505 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3506 unsigned long deadline) 3507 { 3508 int tries = ATA_LINK_RESUME_TRIES; 3509 u32 scontrol, serror; 3510 int rc; 3511 3512 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3513 return rc; 3514 3515 /* 3516 * Writes to SControl sometimes get ignored under certain 3517 * controllers (ata_piix SIDPR). Make sure DET actually is 3518 * cleared. 3519 */ 3520 do { 3521 scontrol = (scontrol & 0x0f0) | 0x300; 3522 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3523 return rc; 3524 /* 3525 * Some PHYs react badly if SStatus is pounded 3526 * immediately after resuming. Delay 200ms before 3527 * debouncing. 3528 */ 3529 ata_msleep(link->ap, 200); 3530 3531 /* is SControl restored correctly? */ 3532 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3533 return rc; 3534 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3535 3536 if ((scontrol & 0xf0f) != 0x300) { 3537 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3538 scontrol); 3539 return 0; 3540 } 3541 3542 if (tries < ATA_LINK_RESUME_TRIES) 3543 ata_link_warn(link, "link resume succeeded after %d retries\n", 3544 ATA_LINK_RESUME_TRIES - tries); 3545 3546 if ((rc = sata_link_debounce(link, params, deadline))) 3547 return rc; 3548 3549 /* clear SError, some PHYs require this even for SRST to work */ 3550 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3551 rc = sata_scr_write(link, SCR_ERROR, serror); 3552 3553 return rc != -EINVAL ? rc : 0; 3554 } 3555 3556 /** 3557 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3558 * @link: ATA link to manipulate SControl for 3559 * @policy: LPM policy to configure 3560 * @spm_wakeup: initiate LPM transition to active state 3561 * 3562 * Manipulate the IPM field of the SControl register of @link 3563 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3564 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3565 * the link. This function also clears PHYRDY_CHG before 3566 * returning. 3567 * 3568 * LOCKING: 3569 * EH context. 3570 * 3571 * RETURNS: 3572 * 0 on succes, -errno otherwise. 3573 */ 3574 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3575 bool spm_wakeup) 3576 { 3577 struct ata_eh_context *ehc = &link->eh_context; 3578 bool woken_up = false; 3579 u32 scontrol; 3580 int rc; 3581 3582 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3583 if (rc) 3584 return rc; 3585 3586 switch (policy) { 3587 case ATA_LPM_MAX_POWER: 3588 /* disable all LPM transitions */ 3589 scontrol |= (0x3 << 8); 3590 /* initiate transition to active state */ 3591 if (spm_wakeup) { 3592 scontrol |= (0x4 << 12); 3593 woken_up = true; 3594 } 3595 break; 3596 case ATA_LPM_MED_POWER: 3597 /* allow LPM to PARTIAL */ 3598 scontrol &= ~(0x1 << 8); 3599 scontrol |= (0x2 << 8); 3600 break; 3601 case ATA_LPM_MIN_POWER: 3602 if (ata_link_nr_enabled(link) > 0) 3603 /* no restrictions on LPM transitions */ 3604 scontrol &= ~(0x3 << 8); 3605 else { 3606 /* empty port, power off */ 3607 scontrol &= ~0xf; 3608 scontrol |= (0x1 << 2); 3609 } 3610 break; 3611 default: 3612 WARN_ON(1); 3613 } 3614 3615 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3616 if (rc) 3617 return rc; 3618 3619 /* give the link time to transit out of LPM state */ 3620 if (woken_up) 3621 msleep(10); 3622 3623 /* clear PHYRDY_CHG from SError */ 3624 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3625 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3626 } 3627 3628 /** 3629 * ata_std_prereset - prepare for reset 3630 * @link: ATA link to be reset 3631 * @deadline: deadline jiffies for the operation 3632 * 3633 * @link is about to be reset. Initialize it. Failure from 3634 * prereset makes libata abort whole reset sequence and give up 3635 * that port, so prereset should be best-effort. It does its 3636 * best to prepare for reset sequence but if things go wrong, it 3637 * should just whine, not fail. 3638 * 3639 * LOCKING: 3640 * Kernel thread context (may sleep) 3641 * 3642 * RETURNS: 3643 * 0 on success, -errno otherwise. 3644 */ 3645 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3646 { 3647 struct ata_port *ap = link->ap; 3648 struct ata_eh_context *ehc = &link->eh_context; 3649 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3650 int rc; 3651 3652 /* if we're about to do hardreset, nothing more to do */ 3653 if (ehc->i.action & ATA_EH_HARDRESET) 3654 return 0; 3655 3656 /* if SATA, resume link */ 3657 if (ap->flags & ATA_FLAG_SATA) { 3658 rc = sata_link_resume(link, timing, deadline); 3659 /* whine about phy resume failure but proceed */ 3660 if (rc && rc != -EOPNOTSUPP) 3661 ata_link_warn(link, 3662 "failed to resume link for reset (errno=%d)\n", 3663 rc); 3664 } 3665 3666 /* no point in trying softreset on offline link */ 3667 if (ata_phys_link_offline(link)) 3668 ehc->i.action &= ~ATA_EH_SOFTRESET; 3669 3670 return 0; 3671 } 3672 3673 /** 3674 * sata_link_hardreset - reset link via SATA phy reset 3675 * @link: link to reset 3676 * @timing: timing parameters { interval, duratinon, timeout } in msec 3677 * @deadline: deadline jiffies for the operation 3678 * @online: optional out parameter indicating link onlineness 3679 * @check_ready: optional callback to check link readiness 3680 * 3681 * SATA phy-reset @link using DET bits of SControl register. 3682 * After hardreset, link readiness is waited upon using 3683 * ata_wait_ready() if @check_ready is specified. LLDs are 3684 * allowed to not specify @check_ready and wait itself after this 3685 * function returns. Device classification is LLD's 3686 * responsibility. 3687 * 3688 * *@online is set to one iff reset succeeded and @link is online 3689 * after reset. 3690 * 3691 * LOCKING: 3692 * Kernel thread context (may sleep) 3693 * 3694 * RETURNS: 3695 * 0 on success, -errno otherwise. 3696 */ 3697 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3698 unsigned long deadline, 3699 bool *online, int (*check_ready)(struct ata_link *)) 3700 { 3701 u32 scontrol; 3702 int rc; 3703 3704 DPRINTK("ENTER\n"); 3705 3706 if (online) 3707 *online = false; 3708 3709 if (sata_set_spd_needed(link)) { 3710 /* SATA spec says nothing about how to reconfigure 3711 * spd. To be on the safe side, turn off phy during 3712 * reconfiguration. This works for at least ICH7 AHCI 3713 * and Sil3124. 3714 */ 3715 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3716 goto out; 3717 3718 scontrol = (scontrol & 0x0f0) | 0x304; 3719 3720 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3721 goto out; 3722 3723 sata_set_spd(link); 3724 } 3725 3726 /* issue phy wake/reset */ 3727 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3728 goto out; 3729 3730 scontrol = (scontrol & 0x0f0) | 0x301; 3731 3732 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3733 goto out; 3734 3735 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3736 * 10.4.2 says at least 1 ms. 3737 */ 3738 ata_msleep(link->ap, 1); 3739 3740 /* bring link back */ 3741 rc = sata_link_resume(link, timing, deadline); 3742 if (rc) 3743 goto out; 3744 /* if link is offline nothing more to do */ 3745 if (ata_phys_link_offline(link)) 3746 goto out; 3747 3748 /* Link is online. From this point, -ENODEV too is an error. */ 3749 if (online) 3750 *online = true; 3751 3752 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3753 /* If PMP is supported, we have to do follow-up SRST. 3754 * Some PMPs don't send D2H Reg FIS after hardreset if 3755 * the first port is empty. Wait only for 3756 * ATA_TMOUT_PMP_SRST_WAIT. 3757 */ 3758 if (check_ready) { 3759 unsigned long pmp_deadline; 3760 3761 pmp_deadline = ata_deadline(jiffies, 3762 ATA_TMOUT_PMP_SRST_WAIT); 3763 if (time_after(pmp_deadline, deadline)) 3764 pmp_deadline = deadline; 3765 ata_wait_ready(link, pmp_deadline, check_ready); 3766 } 3767 rc = -EAGAIN; 3768 goto out; 3769 } 3770 3771 rc = 0; 3772 if (check_ready) 3773 rc = ata_wait_ready(link, deadline, check_ready); 3774 out: 3775 if (rc && rc != -EAGAIN) { 3776 /* online is set iff link is online && reset succeeded */ 3777 if (online) 3778 *online = false; 3779 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3780 } 3781 DPRINTK("EXIT, rc=%d\n", rc); 3782 return rc; 3783 } 3784 3785 /** 3786 * sata_std_hardreset - COMRESET w/o waiting or classification 3787 * @link: link to reset 3788 * @class: resulting class of attached device 3789 * @deadline: deadline jiffies for the operation 3790 * 3791 * Standard SATA COMRESET w/o waiting or classification. 3792 * 3793 * LOCKING: 3794 * Kernel thread context (may sleep) 3795 * 3796 * RETURNS: 3797 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3798 */ 3799 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3800 unsigned long deadline) 3801 { 3802 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3803 bool online; 3804 int rc; 3805 3806 /* do hardreset */ 3807 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3808 return online ? -EAGAIN : rc; 3809 } 3810 3811 /** 3812 * ata_std_postreset - standard postreset callback 3813 * @link: the target ata_link 3814 * @classes: classes of attached devices 3815 * 3816 * This function is invoked after a successful reset. Note that 3817 * the device might have been reset more than once using 3818 * different reset methods before postreset is invoked. 3819 * 3820 * LOCKING: 3821 * Kernel thread context (may sleep) 3822 */ 3823 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3824 { 3825 u32 serror; 3826 3827 DPRINTK("ENTER\n"); 3828 3829 /* reset complete, clear SError */ 3830 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3831 sata_scr_write(link, SCR_ERROR, serror); 3832 3833 /* print link status */ 3834 sata_print_link_status(link); 3835 3836 DPRINTK("EXIT\n"); 3837 } 3838 3839 /** 3840 * ata_dev_same_device - Determine whether new ID matches configured device 3841 * @dev: device to compare against 3842 * @new_class: class of the new device 3843 * @new_id: IDENTIFY page of the new device 3844 * 3845 * Compare @new_class and @new_id against @dev and determine 3846 * whether @dev is the device indicated by @new_class and 3847 * @new_id. 3848 * 3849 * LOCKING: 3850 * None. 3851 * 3852 * RETURNS: 3853 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3854 */ 3855 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3856 const u16 *new_id) 3857 { 3858 const u16 *old_id = dev->id; 3859 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3860 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3861 3862 if (dev->class != new_class) { 3863 ata_dev_info(dev, "class mismatch %d != %d\n", 3864 dev->class, new_class); 3865 return 0; 3866 } 3867 3868 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3869 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3870 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3871 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3872 3873 if (strcmp(model[0], model[1])) { 3874 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3875 model[0], model[1]); 3876 return 0; 3877 } 3878 3879 if (strcmp(serial[0], serial[1])) { 3880 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3881 serial[0], serial[1]); 3882 return 0; 3883 } 3884 3885 return 1; 3886 } 3887 3888 /** 3889 * ata_dev_reread_id - Re-read IDENTIFY data 3890 * @dev: target ATA device 3891 * @readid_flags: read ID flags 3892 * 3893 * Re-read IDENTIFY page and make sure @dev is still attached to 3894 * the port. 3895 * 3896 * LOCKING: 3897 * Kernel thread context (may sleep) 3898 * 3899 * RETURNS: 3900 * 0 on success, negative errno otherwise 3901 */ 3902 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3903 { 3904 unsigned int class = dev->class; 3905 u16 *id = (void *)dev->link->ap->sector_buf; 3906 int rc; 3907 3908 /* read ID data */ 3909 rc = ata_dev_read_id(dev, &class, readid_flags, id); 3910 if (rc) 3911 return rc; 3912 3913 /* is the device still there? */ 3914 if (!ata_dev_same_device(dev, class, id)) 3915 return -ENODEV; 3916 3917 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3918 return 0; 3919 } 3920 3921 /** 3922 * ata_dev_revalidate - Revalidate ATA device 3923 * @dev: device to revalidate 3924 * @new_class: new class code 3925 * @readid_flags: read ID flags 3926 * 3927 * Re-read IDENTIFY page, make sure @dev is still attached to the 3928 * port and reconfigure it according to the new IDENTIFY page. 3929 * 3930 * LOCKING: 3931 * Kernel thread context (may sleep) 3932 * 3933 * RETURNS: 3934 * 0 on success, negative errno otherwise 3935 */ 3936 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 3937 unsigned int readid_flags) 3938 { 3939 u64 n_sectors = dev->n_sectors; 3940 u64 n_native_sectors = dev->n_native_sectors; 3941 int rc; 3942 3943 if (!ata_dev_enabled(dev)) 3944 return -ENODEV; 3945 3946 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 3947 if (ata_class_enabled(new_class) && 3948 new_class != ATA_DEV_ATA && 3949 new_class != ATA_DEV_ATAPI && 3950 new_class != ATA_DEV_SEMB) { 3951 ata_dev_info(dev, "class mismatch %u != %u\n", 3952 dev->class, new_class); 3953 rc = -ENODEV; 3954 goto fail; 3955 } 3956 3957 /* re-read ID */ 3958 rc = ata_dev_reread_id(dev, readid_flags); 3959 if (rc) 3960 goto fail; 3961 3962 /* configure device according to the new ID */ 3963 rc = ata_dev_configure(dev); 3964 if (rc) 3965 goto fail; 3966 3967 /* verify n_sectors hasn't changed */ 3968 if (dev->class != ATA_DEV_ATA || !n_sectors || 3969 dev->n_sectors == n_sectors) 3970 return 0; 3971 3972 /* n_sectors has changed */ 3973 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 3974 (unsigned long long)n_sectors, 3975 (unsigned long long)dev->n_sectors); 3976 3977 /* 3978 * Something could have caused HPA to be unlocked 3979 * involuntarily. If n_native_sectors hasn't changed and the 3980 * new size matches it, keep the device. 3981 */ 3982 if (dev->n_native_sectors == n_native_sectors && 3983 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 3984 ata_dev_warn(dev, 3985 "new n_sectors matches native, probably " 3986 "late HPA unlock, n_sectors updated\n"); 3987 /* use the larger n_sectors */ 3988 return 0; 3989 } 3990 3991 /* 3992 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 3993 * unlocking HPA in those cases. 3994 * 3995 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 3996 */ 3997 if (dev->n_native_sectors == n_native_sectors && 3998 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 3999 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4000 ata_dev_warn(dev, 4001 "old n_sectors matches native, probably " 4002 "late HPA lock, will try to unlock HPA\n"); 4003 /* try unlocking HPA */ 4004 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4005 rc = -EIO; 4006 } else 4007 rc = -ENODEV; 4008 4009 /* restore original n_[native_]sectors and fail */ 4010 dev->n_native_sectors = n_native_sectors; 4011 dev->n_sectors = n_sectors; 4012 fail: 4013 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4014 return rc; 4015 } 4016 4017 struct ata_blacklist_entry { 4018 const char *model_num; 4019 const char *model_rev; 4020 unsigned long horkage; 4021 }; 4022 4023 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4024 /* Devices with DMA related problems under Linux */ 4025 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4026 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4027 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4028 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4029 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4030 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4031 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4032 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4033 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4034 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4035 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4036 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4037 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4038 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4039 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4040 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4041 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4042 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4043 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4044 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4045 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4046 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4047 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4048 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4049 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4050 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4051 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4052 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4053 /* Odd clown on sil3726/4726 PMPs */ 4054 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4055 4056 /* Weird ATAPI devices */ 4057 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4058 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4059 4060 /* Devices we expect to fail diagnostics */ 4061 4062 /* Devices where NCQ should be avoided */ 4063 /* NCQ is slow */ 4064 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4065 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4066 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4067 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4068 /* NCQ is broken */ 4069 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4070 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4071 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4072 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4073 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4074 4075 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4076 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4077 ATA_HORKAGE_FIRMWARE_WARN }, 4078 4079 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4080 ATA_HORKAGE_FIRMWARE_WARN }, 4081 4082 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4083 ATA_HORKAGE_FIRMWARE_WARN }, 4084 4085 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4086 ATA_HORKAGE_FIRMWARE_WARN }, 4087 4088 /* Blacklist entries taken from Silicon Image 3124/3132 4089 Windows driver .inf file - also several Linux problem reports */ 4090 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4091 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4092 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4093 4094 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4095 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4096 4097 /* devices which puke on READ_NATIVE_MAX */ 4098 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4099 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4100 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4101 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4102 4103 /* this one allows HPA unlocking but fails IOs on the area */ 4104 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4105 4106 /* Devices which report 1 sector over size HPA */ 4107 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4108 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4109 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4110 4111 /* Devices which get the IVB wrong */ 4112 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4113 /* Maybe we should just blacklist TSSTcorp... */ 4114 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4115 4116 /* Devices that do not need bridging limits applied */ 4117 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4118 4119 /* Devices which aren't very happy with higher link speeds */ 4120 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4121 4122 /* 4123 * Devices which choke on SETXFER. Applies only if both the 4124 * device and controller are SATA. 4125 */ 4126 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4127 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4128 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4129 4130 /* End Marker */ 4131 { } 4132 }; 4133 4134 /** 4135 * glob_match - match a text string against a glob-style pattern 4136 * @text: the string to be examined 4137 * @pattern: the glob-style pattern to be matched against 4138 * 4139 * Either/both of text and pattern can be empty strings. 4140 * 4141 * Match text against a glob-style pattern, with wildcards and simple sets: 4142 * 4143 * ? matches any single character. 4144 * * matches any run of characters. 4145 * [xyz] matches a single character from the set: x, y, or z. 4146 * [a-d] matches a single character from the range: a, b, c, or d. 4147 * [a-d0-9] matches a single character from either range. 4148 * 4149 * The special characters ?, [, -, or *, can be matched using a set, eg. [*] 4150 * Behaviour with malformed patterns is undefined, though generally reasonable. 4151 * 4152 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" 4153 * 4154 * This function uses one level of recursion per '*' in pattern. 4155 * Since it calls _nothing_ else, and has _no_ explicit local variables, 4156 * this will not cause stack problems for any reasonable use here. 4157 * 4158 * RETURNS: 4159 * 0 on match, 1 otherwise. 4160 */ 4161 static int glob_match (const char *text, const char *pattern) 4162 { 4163 do { 4164 /* Match single character or a '?' wildcard */ 4165 if (*text == *pattern || *pattern == '?') { 4166 if (!*pattern++) 4167 return 0; /* End of both strings: match */ 4168 } else { 4169 /* Match single char against a '[' bracketed ']' pattern set */ 4170 if (!*text || *pattern != '[') 4171 break; /* Not a pattern set */ 4172 while (*++pattern && *pattern != ']' && *text != *pattern) { 4173 if (*pattern == '-' && *(pattern - 1) != '[') 4174 if (*text > *(pattern - 1) && *text < *(pattern + 1)) { 4175 ++pattern; 4176 break; 4177 } 4178 } 4179 if (!*pattern || *pattern == ']') 4180 return 1; /* No match */ 4181 while (*pattern && *pattern++ != ']'); 4182 } 4183 } while (*++text && *pattern); 4184 4185 /* Match any run of chars against a '*' wildcard */ 4186 if (*pattern == '*') { 4187 if (!*++pattern) 4188 return 0; /* Match: avoid recursion at end of pattern */ 4189 /* Loop to handle additional pattern chars after the wildcard */ 4190 while (*text) { 4191 if (glob_match(text, pattern) == 0) 4192 return 0; /* Remainder matched */ 4193 ++text; /* Absorb (match) this char and try again */ 4194 } 4195 } 4196 if (!*text && !*pattern) 4197 return 0; /* End of both strings: match */ 4198 return 1; /* No match */ 4199 } 4200 4201 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4202 { 4203 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4204 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4205 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4206 4207 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4208 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4209 4210 while (ad->model_num) { 4211 if (!glob_match(model_num, ad->model_num)) { 4212 if (ad->model_rev == NULL) 4213 return ad->horkage; 4214 if (!glob_match(model_rev, ad->model_rev)) 4215 return ad->horkage; 4216 } 4217 ad++; 4218 } 4219 return 0; 4220 } 4221 4222 static int ata_dma_blacklisted(const struct ata_device *dev) 4223 { 4224 /* We don't support polling DMA. 4225 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4226 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4227 */ 4228 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4229 (dev->flags & ATA_DFLAG_CDB_INTR)) 4230 return 1; 4231 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4232 } 4233 4234 /** 4235 * ata_is_40wire - check drive side detection 4236 * @dev: device 4237 * 4238 * Perform drive side detection decoding, allowing for device vendors 4239 * who can't follow the documentation. 4240 */ 4241 4242 static int ata_is_40wire(struct ata_device *dev) 4243 { 4244 if (dev->horkage & ATA_HORKAGE_IVB) 4245 return ata_drive_40wire_relaxed(dev->id); 4246 return ata_drive_40wire(dev->id); 4247 } 4248 4249 /** 4250 * cable_is_40wire - 40/80/SATA decider 4251 * @ap: port to consider 4252 * 4253 * This function encapsulates the policy for speed management 4254 * in one place. At the moment we don't cache the result but 4255 * there is a good case for setting ap->cbl to the result when 4256 * we are called with unknown cables (and figuring out if it 4257 * impacts hotplug at all). 4258 * 4259 * Return 1 if the cable appears to be 40 wire. 4260 */ 4261 4262 static int cable_is_40wire(struct ata_port *ap) 4263 { 4264 struct ata_link *link; 4265 struct ata_device *dev; 4266 4267 /* If the controller thinks we are 40 wire, we are. */ 4268 if (ap->cbl == ATA_CBL_PATA40) 4269 return 1; 4270 4271 /* If the controller thinks we are 80 wire, we are. */ 4272 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4273 return 0; 4274 4275 /* If the system is known to be 40 wire short cable (eg 4276 * laptop), then we allow 80 wire modes even if the drive 4277 * isn't sure. 4278 */ 4279 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4280 return 0; 4281 4282 /* If the controller doesn't know, we scan. 4283 * 4284 * Note: We look for all 40 wire detects at this point. Any 4285 * 80 wire detect is taken to be 80 wire cable because 4286 * - in many setups only the one drive (slave if present) will 4287 * give a valid detect 4288 * - if you have a non detect capable drive you don't want it 4289 * to colour the choice 4290 */ 4291 ata_for_each_link(link, ap, EDGE) { 4292 ata_for_each_dev(dev, link, ENABLED) { 4293 if (!ata_is_40wire(dev)) 4294 return 0; 4295 } 4296 } 4297 return 1; 4298 } 4299 4300 /** 4301 * ata_dev_xfermask - Compute supported xfermask of the given device 4302 * @dev: Device to compute xfermask for 4303 * 4304 * Compute supported xfermask of @dev and store it in 4305 * dev->*_mask. This function is responsible for applying all 4306 * known limits including host controller limits, device 4307 * blacklist, etc... 4308 * 4309 * LOCKING: 4310 * None. 4311 */ 4312 static void ata_dev_xfermask(struct ata_device *dev) 4313 { 4314 struct ata_link *link = dev->link; 4315 struct ata_port *ap = link->ap; 4316 struct ata_host *host = ap->host; 4317 unsigned long xfer_mask; 4318 4319 /* controller modes available */ 4320 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4321 ap->mwdma_mask, ap->udma_mask); 4322 4323 /* drive modes available */ 4324 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4325 dev->mwdma_mask, dev->udma_mask); 4326 xfer_mask &= ata_id_xfermask(dev->id); 4327 4328 /* 4329 * CFA Advanced TrueIDE timings are not allowed on a shared 4330 * cable 4331 */ 4332 if (ata_dev_pair(dev)) { 4333 /* No PIO5 or PIO6 */ 4334 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4335 /* No MWDMA3 or MWDMA 4 */ 4336 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4337 } 4338 4339 if (ata_dma_blacklisted(dev)) { 4340 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4341 ata_dev_warn(dev, 4342 "device is on DMA blacklist, disabling DMA\n"); 4343 } 4344 4345 if ((host->flags & ATA_HOST_SIMPLEX) && 4346 host->simplex_claimed && host->simplex_claimed != ap) { 4347 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4348 ata_dev_warn(dev, 4349 "simplex DMA is claimed by other device, disabling DMA\n"); 4350 } 4351 4352 if (ap->flags & ATA_FLAG_NO_IORDY) 4353 xfer_mask &= ata_pio_mask_no_iordy(dev); 4354 4355 if (ap->ops->mode_filter) 4356 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4357 4358 /* Apply cable rule here. Don't apply it early because when 4359 * we handle hot plug the cable type can itself change. 4360 * Check this last so that we know if the transfer rate was 4361 * solely limited by the cable. 4362 * Unknown or 80 wire cables reported host side are checked 4363 * drive side as well. Cases where we know a 40wire cable 4364 * is used safely for 80 are not checked here. 4365 */ 4366 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4367 /* UDMA/44 or higher would be available */ 4368 if (cable_is_40wire(ap)) { 4369 ata_dev_warn(dev, 4370 "limited to UDMA/33 due to 40-wire cable\n"); 4371 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4372 } 4373 4374 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4375 &dev->mwdma_mask, &dev->udma_mask); 4376 } 4377 4378 /** 4379 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4380 * @dev: Device to which command will be sent 4381 * 4382 * Issue SET FEATURES - XFER MODE command to device @dev 4383 * on port @ap. 4384 * 4385 * LOCKING: 4386 * PCI/etc. bus probe sem. 4387 * 4388 * RETURNS: 4389 * 0 on success, AC_ERR_* mask otherwise. 4390 */ 4391 4392 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4393 { 4394 struct ata_taskfile tf; 4395 unsigned int err_mask; 4396 4397 /* set up set-features taskfile */ 4398 DPRINTK("set features - xfer mode\n"); 4399 4400 /* Some controllers and ATAPI devices show flaky interrupt 4401 * behavior after setting xfer mode. Use polling instead. 4402 */ 4403 ata_tf_init(dev, &tf); 4404 tf.command = ATA_CMD_SET_FEATURES; 4405 tf.feature = SETFEATURES_XFER; 4406 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4407 tf.protocol = ATA_PROT_NODATA; 4408 /* If we are using IORDY we must send the mode setting command */ 4409 if (ata_pio_need_iordy(dev)) 4410 tf.nsect = dev->xfer_mode; 4411 /* If the device has IORDY and the controller does not - turn it off */ 4412 else if (ata_id_has_iordy(dev->id)) 4413 tf.nsect = 0x01; 4414 else /* In the ancient relic department - skip all of this */ 4415 return 0; 4416 4417 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4418 4419 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4420 return err_mask; 4421 } 4422 4423 /** 4424 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4425 * @dev: Device to which command will be sent 4426 * @enable: Whether to enable or disable the feature 4427 * @feature: The sector count represents the feature to set 4428 * 4429 * Issue SET FEATURES - SATA FEATURES command to device @dev 4430 * on port @ap with sector count 4431 * 4432 * LOCKING: 4433 * PCI/etc. bus probe sem. 4434 * 4435 * RETURNS: 4436 * 0 on success, AC_ERR_* mask otherwise. 4437 */ 4438 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4439 { 4440 struct ata_taskfile tf; 4441 unsigned int err_mask; 4442 4443 /* set up set-features taskfile */ 4444 DPRINTK("set features - SATA features\n"); 4445 4446 ata_tf_init(dev, &tf); 4447 tf.command = ATA_CMD_SET_FEATURES; 4448 tf.feature = enable; 4449 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4450 tf.protocol = ATA_PROT_NODATA; 4451 tf.nsect = feature; 4452 4453 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4454 4455 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4456 return err_mask; 4457 } 4458 4459 /** 4460 * ata_dev_init_params - Issue INIT DEV PARAMS command 4461 * @dev: Device to which command will be sent 4462 * @heads: Number of heads (taskfile parameter) 4463 * @sectors: Number of sectors (taskfile parameter) 4464 * 4465 * LOCKING: 4466 * Kernel thread context (may sleep) 4467 * 4468 * RETURNS: 4469 * 0 on success, AC_ERR_* mask otherwise. 4470 */ 4471 static unsigned int ata_dev_init_params(struct ata_device *dev, 4472 u16 heads, u16 sectors) 4473 { 4474 struct ata_taskfile tf; 4475 unsigned int err_mask; 4476 4477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4479 return AC_ERR_INVALID; 4480 4481 /* set up init dev params taskfile */ 4482 DPRINTK("init dev params \n"); 4483 4484 ata_tf_init(dev, &tf); 4485 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4486 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4487 tf.protocol = ATA_PROT_NODATA; 4488 tf.nsect = sectors; 4489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4490 4491 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4492 /* A clean abort indicates an original or just out of spec drive 4493 and we should continue as we issue the setup based on the 4494 drive reported working geometry */ 4495 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4496 err_mask = 0; 4497 4498 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4499 return err_mask; 4500 } 4501 4502 /** 4503 * ata_sg_clean - Unmap DMA memory associated with command 4504 * @qc: Command containing DMA memory to be released 4505 * 4506 * Unmap all mapped DMA memory associated with this command. 4507 * 4508 * LOCKING: 4509 * spin_lock_irqsave(host lock) 4510 */ 4511 void ata_sg_clean(struct ata_queued_cmd *qc) 4512 { 4513 struct ata_port *ap = qc->ap; 4514 struct scatterlist *sg = qc->sg; 4515 int dir = qc->dma_dir; 4516 4517 WARN_ON_ONCE(sg == NULL); 4518 4519 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4520 4521 if (qc->n_elem) 4522 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4523 4524 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4525 qc->sg = NULL; 4526 } 4527 4528 /** 4529 * atapi_check_dma - Check whether ATAPI DMA can be supported 4530 * @qc: Metadata associated with taskfile to check 4531 * 4532 * Allow low-level driver to filter ATA PACKET commands, returning 4533 * a status indicating whether or not it is OK to use DMA for the 4534 * supplied PACKET command. 4535 * 4536 * LOCKING: 4537 * spin_lock_irqsave(host lock) 4538 * 4539 * RETURNS: 0 when ATAPI DMA can be used 4540 * nonzero otherwise 4541 */ 4542 int atapi_check_dma(struct ata_queued_cmd *qc) 4543 { 4544 struct ata_port *ap = qc->ap; 4545 4546 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4547 * few ATAPI devices choke on such DMA requests. 4548 */ 4549 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4550 unlikely(qc->nbytes & 15)) 4551 return 1; 4552 4553 if (ap->ops->check_atapi_dma) 4554 return ap->ops->check_atapi_dma(qc); 4555 4556 return 0; 4557 } 4558 4559 /** 4560 * ata_std_qc_defer - Check whether a qc needs to be deferred 4561 * @qc: ATA command in question 4562 * 4563 * Non-NCQ commands cannot run with any other command, NCQ or 4564 * not. As upper layer only knows the queue depth, we are 4565 * responsible for maintaining exclusion. This function checks 4566 * whether a new command @qc can be issued. 4567 * 4568 * LOCKING: 4569 * spin_lock_irqsave(host lock) 4570 * 4571 * RETURNS: 4572 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4573 */ 4574 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4575 { 4576 struct ata_link *link = qc->dev->link; 4577 4578 if (qc->tf.protocol == ATA_PROT_NCQ) { 4579 if (!ata_tag_valid(link->active_tag)) 4580 return 0; 4581 } else { 4582 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4583 return 0; 4584 } 4585 4586 return ATA_DEFER_LINK; 4587 } 4588 4589 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4590 4591 /** 4592 * ata_sg_init - Associate command with scatter-gather table. 4593 * @qc: Command to be associated 4594 * @sg: Scatter-gather table. 4595 * @n_elem: Number of elements in s/g table. 4596 * 4597 * Initialize the data-related elements of queued_cmd @qc 4598 * to point to a scatter-gather table @sg, containing @n_elem 4599 * elements. 4600 * 4601 * LOCKING: 4602 * spin_lock_irqsave(host lock) 4603 */ 4604 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4605 unsigned int n_elem) 4606 { 4607 qc->sg = sg; 4608 qc->n_elem = n_elem; 4609 qc->cursg = qc->sg; 4610 } 4611 4612 /** 4613 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4614 * @qc: Command with scatter-gather table to be mapped. 4615 * 4616 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4617 * 4618 * LOCKING: 4619 * spin_lock_irqsave(host lock) 4620 * 4621 * RETURNS: 4622 * Zero on success, negative on error. 4623 * 4624 */ 4625 static int ata_sg_setup(struct ata_queued_cmd *qc) 4626 { 4627 struct ata_port *ap = qc->ap; 4628 unsigned int n_elem; 4629 4630 VPRINTK("ENTER, ata%u\n", ap->print_id); 4631 4632 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4633 if (n_elem < 1) 4634 return -1; 4635 4636 DPRINTK("%d sg elements mapped\n", n_elem); 4637 qc->orig_n_elem = qc->n_elem; 4638 qc->n_elem = n_elem; 4639 qc->flags |= ATA_QCFLAG_DMAMAP; 4640 4641 return 0; 4642 } 4643 4644 /** 4645 * swap_buf_le16 - swap halves of 16-bit words in place 4646 * @buf: Buffer to swap 4647 * @buf_words: Number of 16-bit words in buffer. 4648 * 4649 * Swap halves of 16-bit words if needed to convert from 4650 * little-endian byte order to native cpu byte order, or 4651 * vice-versa. 4652 * 4653 * LOCKING: 4654 * Inherited from caller. 4655 */ 4656 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4657 { 4658 #ifdef __BIG_ENDIAN 4659 unsigned int i; 4660 4661 for (i = 0; i < buf_words; i++) 4662 buf[i] = le16_to_cpu(buf[i]); 4663 #endif /* __BIG_ENDIAN */ 4664 } 4665 4666 /** 4667 * ata_qc_new - Request an available ATA command, for queueing 4668 * @ap: target port 4669 * 4670 * LOCKING: 4671 * None. 4672 */ 4673 4674 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4675 { 4676 struct ata_queued_cmd *qc = NULL; 4677 unsigned int i; 4678 4679 /* no command while frozen */ 4680 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4681 return NULL; 4682 4683 /* the last tag is reserved for internal command. */ 4684 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4685 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4686 qc = __ata_qc_from_tag(ap, i); 4687 break; 4688 } 4689 4690 if (qc) 4691 qc->tag = i; 4692 4693 return qc; 4694 } 4695 4696 /** 4697 * ata_qc_new_init - Request an available ATA command, and initialize it 4698 * @dev: Device from whom we request an available command structure 4699 * 4700 * LOCKING: 4701 * None. 4702 */ 4703 4704 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4705 { 4706 struct ata_port *ap = dev->link->ap; 4707 struct ata_queued_cmd *qc; 4708 4709 qc = ata_qc_new(ap); 4710 if (qc) { 4711 qc->scsicmd = NULL; 4712 qc->ap = ap; 4713 qc->dev = dev; 4714 4715 ata_qc_reinit(qc); 4716 } 4717 4718 return qc; 4719 } 4720 4721 /** 4722 * ata_qc_free - free unused ata_queued_cmd 4723 * @qc: Command to complete 4724 * 4725 * Designed to free unused ata_queued_cmd object 4726 * in case something prevents using it. 4727 * 4728 * LOCKING: 4729 * spin_lock_irqsave(host lock) 4730 */ 4731 void ata_qc_free(struct ata_queued_cmd *qc) 4732 { 4733 struct ata_port *ap; 4734 unsigned int tag; 4735 4736 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4737 ap = qc->ap; 4738 4739 qc->flags = 0; 4740 tag = qc->tag; 4741 if (likely(ata_tag_valid(tag))) { 4742 qc->tag = ATA_TAG_POISON; 4743 clear_bit(tag, &ap->qc_allocated); 4744 } 4745 } 4746 4747 void __ata_qc_complete(struct ata_queued_cmd *qc) 4748 { 4749 struct ata_port *ap; 4750 struct ata_link *link; 4751 4752 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4753 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4754 ap = qc->ap; 4755 link = qc->dev->link; 4756 4757 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4758 ata_sg_clean(qc); 4759 4760 /* command should be marked inactive atomically with qc completion */ 4761 if (qc->tf.protocol == ATA_PROT_NCQ) { 4762 link->sactive &= ~(1 << qc->tag); 4763 if (!link->sactive) 4764 ap->nr_active_links--; 4765 } else { 4766 link->active_tag = ATA_TAG_POISON; 4767 ap->nr_active_links--; 4768 } 4769 4770 /* clear exclusive status */ 4771 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4772 ap->excl_link == link)) 4773 ap->excl_link = NULL; 4774 4775 /* atapi: mark qc as inactive to prevent the interrupt handler 4776 * from completing the command twice later, before the error handler 4777 * is called. (when rc != 0 and atapi request sense is needed) 4778 */ 4779 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4780 ap->qc_active &= ~(1 << qc->tag); 4781 4782 /* call completion callback */ 4783 qc->complete_fn(qc); 4784 } 4785 4786 static void fill_result_tf(struct ata_queued_cmd *qc) 4787 { 4788 struct ata_port *ap = qc->ap; 4789 4790 qc->result_tf.flags = qc->tf.flags; 4791 ap->ops->qc_fill_rtf(qc); 4792 } 4793 4794 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4795 { 4796 struct ata_device *dev = qc->dev; 4797 4798 if (ata_is_nodata(qc->tf.protocol)) 4799 return; 4800 4801 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4802 return; 4803 4804 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4805 } 4806 4807 /** 4808 * ata_qc_complete - Complete an active ATA command 4809 * @qc: Command to complete 4810 * 4811 * Indicate to the mid and upper layers that an ATA command has 4812 * completed, with either an ok or not-ok status. 4813 * 4814 * Refrain from calling this function multiple times when 4815 * successfully completing multiple NCQ commands. 4816 * ata_qc_complete_multiple() should be used instead, which will 4817 * properly update IRQ expect state. 4818 * 4819 * LOCKING: 4820 * spin_lock_irqsave(host lock) 4821 */ 4822 void ata_qc_complete(struct ata_queued_cmd *qc) 4823 { 4824 struct ata_port *ap = qc->ap; 4825 4826 /* XXX: New EH and old EH use different mechanisms to 4827 * synchronize EH with regular execution path. 4828 * 4829 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4830 * Normal execution path is responsible for not accessing a 4831 * failed qc. libata core enforces the rule by returning NULL 4832 * from ata_qc_from_tag() for failed qcs. 4833 * 4834 * Old EH depends on ata_qc_complete() nullifying completion 4835 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4836 * not synchronize with interrupt handler. Only PIO task is 4837 * taken care of. 4838 */ 4839 if (ap->ops->error_handler) { 4840 struct ata_device *dev = qc->dev; 4841 struct ata_eh_info *ehi = &dev->link->eh_info; 4842 4843 if (unlikely(qc->err_mask)) 4844 qc->flags |= ATA_QCFLAG_FAILED; 4845 4846 /* 4847 * Finish internal commands without any further processing 4848 * and always with the result TF filled. 4849 */ 4850 if (unlikely(ata_tag_internal(qc->tag))) { 4851 fill_result_tf(qc); 4852 __ata_qc_complete(qc); 4853 return; 4854 } 4855 4856 /* 4857 * Non-internal qc has failed. Fill the result TF and 4858 * summon EH. 4859 */ 4860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4861 fill_result_tf(qc); 4862 ata_qc_schedule_eh(qc); 4863 return; 4864 } 4865 4866 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4867 4868 /* read result TF if requested */ 4869 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4870 fill_result_tf(qc); 4871 4872 /* Some commands need post-processing after successful 4873 * completion. 4874 */ 4875 switch (qc->tf.command) { 4876 case ATA_CMD_SET_FEATURES: 4877 if (qc->tf.feature != SETFEATURES_WC_ON && 4878 qc->tf.feature != SETFEATURES_WC_OFF) 4879 break; 4880 /* fall through */ 4881 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4882 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4883 /* revalidate device */ 4884 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4885 ata_port_schedule_eh(ap); 4886 break; 4887 4888 case ATA_CMD_SLEEP: 4889 dev->flags |= ATA_DFLAG_SLEEPING; 4890 break; 4891 } 4892 4893 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4894 ata_verify_xfer(qc); 4895 4896 __ata_qc_complete(qc); 4897 } else { 4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4899 return; 4900 4901 /* read result TF if failed or requested */ 4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4903 fill_result_tf(qc); 4904 4905 __ata_qc_complete(qc); 4906 } 4907 } 4908 4909 /** 4910 * ata_qc_complete_multiple - Complete multiple qcs successfully 4911 * @ap: port in question 4912 * @qc_active: new qc_active mask 4913 * 4914 * Complete in-flight commands. This functions is meant to be 4915 * called from low-level driver's interrupt routine to complete 4916 * requests normally. ap->qc_active and @qc_active is compared 4917 * and commands are completed accordingly. 4918 * 4919 * Always use this function when completing multiple NCQ commands 4920 * from IRQ handlers instead of calling ata_qc_complete() 4921 * multiple times to keep IRQ expect status properly in sync. 4922 * 4923 * LOCKING: 4924 * spin_lock_irqsave(host lock) 4925 * 4926 * RETURNS: 4927 * Number of completed commands on success, -errno otherwise. 4928 */ 4929 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 4930 { 4931 int nr_done = 0; 4932 u32 done_mask; 4933 4934 done_mask = ap->qc_active ^ qc_active; 4935 4936 if (unlikely(done_mask & qc_active)) { 4937 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 4938 ap->qc_active, qc_active); 4939 return -EINVAL; 4940 } 4941 4942 while (done_mask) { 4943 struct ata_queued_cmd *qc; 4944 unsigned int tag = __ffs(done_mask); 4945 4946 qc = ata_qc_from_tag(ap, tag); 4947 if (qc) { 4948 ata_qc_complete(qc); 4949 nr_done++; 4950 } 4951 done_mask &= ~(1 << tag); 4952 } 4953 4954 return nr_done; 4955 } 4956 4957 /** 4958 * ata_qc_issue - issue taskfile to device 4959 * @qc: command to issue to device 4960 * 4961 * Prepare an ATA command to submission to device. 4962 * This includes mapping the data into a DMA-able 4963 * area, filling in the S/G table, and finally 4964 * writing the taskfile to hardware, starting the command. 4965 * 4966 * LOCKING: 4967 * spin_lock_irqsave(host lock) 4968 */ 4969 void ata_qc_issue(struct ata_queued_cmd *qc) 4970 { 4971 struct ata_port *ap = qc->ap; 4972 struct ata_link *link = qc->dev->link; 4973 u8 prot = qc->tf.protocol; 4974 4975 /* Make sure only one non-NCQ command is outstanding. The 4976 * check is skipped for old EH because it reuses active qc to 4977 * request ATAPI sense. 4978 */ 4979 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 4980 4981 if (ata_is_ncq(prot)) { 4982 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 4983 4984 if (!link->sactive) 4985 ap->nr_active_links++; 4986 link->sactive |= 1 << qc->tag; 4987 } else { 4988 WARN_ON_ONCE(link->sactive); 4989 4990 ap->nr_active_links++; 4991 link->active_tag = qc->tag; 4992 } 4993 4994 qc->flags |= ATA_QCFLAG_ACTIVE; 4995 ap->qc_active |= 1 << qc->tag; 4996 4997 /* 4998 * We guarantee to LLDs that they will have at least one 4999 * non-zero sg if the command is a data command. 5000 */ 5001 if (WARN_ON_ONCE(ata_is_data(prot) && 5002 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5003 goto sys_err; 5004 5005 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5006 (ap->flags & ATA_FLAG_PIO_DMA))) 5007 if (ata_sg_setup(qc)) 5008 goto sys_err; 5009 5010 /* if device is sleeping, schedule reset and abort the link */ 5011 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5012 link->eh_info.action |= ATA_EH_RESET; 5013 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5014 ata_link_abort(link); 5015 return; 5016 } 5017 5018 ap->ops->qc_prep(qc); 5019 5020 qc->err_mask |= ap->ops->qc_issue(qc); 5021 if (unlikely(qc->err_mask)) 5022 goto err; 5023 return; 5024 5025 sys_err: 5026 qc->err_mask |= AC_ERR_SYSTEM; 5027 err: 5028 ata_qc_complete(qc); 5029 } 5030 5031 /** 5032 * sata_scr_valid - test whether SCRs are accessible 5033 * @link: ATA link to test SCR accessibility for 5034 * 5035 * Test whether SCRs are accessible for @link. 5036 * 5037 * LOCKING: 5038 * None. 5039 * 5040 * RETURNS: 5041 * 1 if SCRs are accessible, 0 otherwise. 5042 */ 5043 int sata_scr_valid(struct ata_link *link) 5044 { 5045 struct ata_port *ap = link->ap; 5046 5047 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5048 } 5049 5050 /** 5051 * sata_scr_read - read SCR register of the specified port 5052 * @link: ATA link to read SCR for 5053 * @reg: SCR to read 5054 * @val: Place to store read value 5055 * 5056 * Read SCR register @reg of @link into *@val. This function is 5057 * guaranteed to succeed if @link is ap->link, the cable type of 5058 * the port is SATA and the port implements ->scr_read. 5059 * 5060 * LOCKING: 5061 * None if @link is ap->link. Kernel thread context otherwise. 5062 * 5063 * RETURNS: 5064 * 0 on success, negative errno on failure. 5065 */ 5066 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5067 { 5068 if (ata_is_host_link(link)) { 5069 if (sata_scr_valid(link)) 5070 return link->ap->ops->scr_read(link, reg, val); 5071 return -EOPNOTSUPP; 5072 } 5073 5074 return sata_pmp_scr_read(link, reg, val); 5075 } 5076 5077 /** 5078 * sata_scr_write - write SCR register of the specified port 5079 * @link: ATA link to write SCR for 5080 * @reg: SCR to write 5081 * @val: value to write 5082 * 5083 * Write @val to SCR register @reg of @link. This function is 5084 * guaranteed to succeed if @link is ap->link, the cable type of 5085 * the port is SATA and the port implements ->scr_read. 5086 * 5087 * LOCKING: 5088 * None if @link is ap->link. Kernel thread context otherwise. 5089 * 5090 * RETURNS: 5091 * 0 on success, negative errno on failure. 5092 */ 5093 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5094 { 5095 if (ata_is_host_link(link)) { 5096 if (sata_scr_valid(link)) 5097 return link->ap->ops->scr_write(link, reg, val); 5098 return -EOPNOTSUPP; 5099 } 5100 5101 return sata_pmp_scr_write(link, reg, val); 5102 } 5103 5104 /** 5105 * sata_scr_write_flush - write SCR register of the specified port and flush 5106 * @link: ATA link to write SCR for 5107 * @reg: SCR to write 5108 * @val: value to write 5109 * 5110 * This function is identical to sata_scr_write() except that this 5111 * function performs flush after writing to the register. 5112 * 5113 * LOCKING: 5114 * None if @link is ap->link. Kernel thread context otherwise. 5115 * 5116 * RETURNS: 5117 * 0 on success, negative errno on failure. 5118 */ 5119 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5120 { 5121 if (ata_is_host_link(link)) { 5122 int rc; 5123 5124 if (sata_scr_valid(link)) { 5125 rc = link->ap->ops->scr_write(link, reg, val); 5126 if (rc == 0) 5127 rc = link->ap->ops->scr_read(link, reg, &val); 5128 return rc; 5129 } 5130 return -EOPNOTSUPP; 5131 } 5132 5133 return sata_pmp_scr_write(link, reg, val); 5134 } 5135 5136 /** 5137 * ata_phys_link_online - test whether the given link is online 5138 * @link: ATA link to test 5139 * 5140 * Test whether @link is online. Note that this function returns 5141 * 0 if online status of @link cannot be obtained, so 5142 * ata_link_online(link) != !ata_link_offline(link). 5143 * 5144 * LOCKING: 5145 * None. 5146 * 5147 * RETURNS: 5148 * True if the port online status is available and online. 5149 */ 5150 bool ata_phys_link_online(struct ata_link *link) 5151 { 5152 u32 sstatus; 5153 5154 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5155 ata_sstatus_online(sstatus)) 5156 return true; 5157 return false; 5158 } 5159 5160 /** 5161 * ata_phys_link_offline - test whether the given link is offline 5162 * @link: ATA link to test 5163 * 5164 * Test whether @link is offline. Note that this function 5165 * returns 0 if offline status of @link cannot be obtained, so 5166 * ata_link_online(link) != !ata_link_offline(link). 5167 * 5168 * LOCKING: 5169 * None. 5170 * 5171 * RETURNS: 5172 * True if the port offline status is available and offline. 5173 */ 5174 bool ata_phys_link_offline(struct ata_link *link) 5175 { 5176 u32 sstatus; 5177 5178 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5179 !ata_sstatus_online(sstatus)) 5180 return true; 5181 return false; 5182 } 5183 5184 /** 5185 * ata_link_online - test whether the given link is online 5186 * @link: ATA link to test 5187 * 5188 * Test whether @link is online. This is identical to 5189 * ata_phys_link_online() when there's no slave link. When 5190 * there's a slave link, this function should only be called on 5191 * the master link and will return true if any of M/S links is 5192 * online. 5193 * 5194 * LOCKING: 5195 * None. 5196 * 5197 * RETURNS: 5198 * True if the port online status is available and online. 5199 */ 5200 bool ata_link_online(struct ata_link *link) 5201 { 5202 struct ata_link *slave = link->ap->slave_link; 5203 5204 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5205 5206 return ata_phys_link_online(link) || 5207 (slave && ata_phys_link_online(slave)); 5208 } 5209 5210 /** 5211 * ata_link_offline - test whether the given link is offline 5212 * @link: ATA link to test 5213 * 5214 * Test whether @link is offline. This is identical to 5215 * ata_phys_link_offline() when there's no slave link. When 5216 * there's a slave link, this function should only be called on 5217 * the master link and will return true if both M/S links are 5218 * offline. 5219 * 5220 * LOCKING: 5221 * None. 5222 * 5223 * RETURNS: 5224 * True if the port offline status is available and offline. 5225 */ 5226 bool ata_link_offline(struct ata_link *link) 5227 { 5228 struct ata_link *slave = link->ap->slave_link; 5229 5230 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5231 5232 return ata_phys_link_offline(link) && 5233 (!slave || ata_phys_link_offline(slave)); 5234 } 5235 5236 #ifdef CONFIG_PM 5237 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5238 unsigned int action, unsigned int ehi_flags, 5239 int wait) 5240 { 5241 unsigned long flags; 5242 int i, rc; 5243 5244 for (i = 0; i < host->n_ports; i++) { 5245 struct ata_port *ap = host->ports[i]; 5246 struct ata_link *link; 5247 5248 /* Previous resume operation might still be in 5249 * progress. Wait for PM_PENDING to clear. 5250 */ 5251 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5252 ata_port_wait_eh(ap); 5253 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5254 } 5255 5256 /* request PM ops to EH */ 5257 spin_lock_irqsave(ap->lock, flags); 5258 5259 ap->pm_mesg = mesg; 5260 if (wait) { 5261 rc = 0; 5262 ap->pm_result = &rc; 5263 } 5264 5265 ap->pflags |= ATA_PFLAG_PM_PENDING; 5266 ata_for_each_link(link, ap, HOST_FIRST) { 5267 link->eh_info.action |= action; 5268 link->eh_info.flags |= ehi_flags; 5269 } 5270 5271 ata_port_schedule_eh(ap); 5272 5273 spin_unlock_irqrestore(ap->lock, flags); 5274 5275 /* wait and check result */ 5276 if (wait) { 5277 ata_port_wait_eh(ap); 5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5279 if (rc) 5280 return rc; 5281 } 5282 } 5283 5284 return 0; 5285 } 5286 5287 /** 5288 * ata_host_suspend - suspend host 5289 * @host: host to suspend 5290 * @mesg: PM message 5291 * 5292 * Suspend @host. Actual operation is performed by EH. This 5293 * function requests EH to perform PM operations and waits for EH 5294 * to finish. 5295 * 5296 * LOCKING: 5297 * Kernel thread context (may sleep). 5298 * 5299 * RETURNS: 5300 * 0 on success, -errno on failure. 5301 */ 5302 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5303 { 5304 unsigned int ehi_flags = ATA_EHI_QUIET; 5305 int rc; 5306 5307 /* 5308 * On some hardware, device fails to respond after spun down 5309 * for suspend. As the device won't be used before being 5310 * resumed, we don't need to touch the device. Ask EH to skip 5311 * the usual stuff and proceed directly to suspend. 5312 * 5313 * http://thread.gmane.org/gmane.linux.ide/46764 5314 */ 5315 if (mesg.event == PM_EVENT_SUSPEND) 5316 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; 5317 5318 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); 5319 if (rc == 0) 5320 host->dev->power.power_state = mesg; 5321 return rc; 5322 } 5323 5324 /** 5325 * ata_host_resume - resume host 5326 * @host: host to resume 5327 * 5328 * Resume @host. Actual operation is performed by EH. This 5329 * function requests EH to perform PM operations and returns. 5330 * Note that all resume operations are performed parallelly. 5331 * 5332 * LOCKING: 5333 * Kernel thread context (may sleep). 5334 */ 5335 void ata_host_resume(struct ata_host *host) 5336 { 5337 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5338 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5339 host->dev->power.power_state = PMSG_ON; 5340 } 5341 #endif 5342 5343 /** 5344 * ata_dev_init - Initialize an ata_device structure 5345 * @dev: Device structure to initialize 5346 * 5347 * Initialize @dev in preparation for probing. 5348 * 5349 * LOCKING: 5350 * Inherited from caller. 5351 */ 5352 void ata_dev_init(struct ata_device *dev) 5353 { 5354 struct ata_link *link = ata_dev_phys_link(dev); 5355 struct ata_port *ap = link->ap; 5356 unsigned long flags; 5357 5358 /* SATA spd limit is bound to the attached device, reset together */ 5359 link->sata_spd_limit = link->hw_sata_spd_limit; 5360 link->sata_spd = 0; 5361 5362 /* High bits of dev->flags are used to record warm plug 5363 * requests which occur asynchronously. Synchronize using 5364 * host lock. 5365 */ 5366 spin_lock_irqsave(ap->lock, flags); 5367 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5368 dev->horkage = 0; 5369 spin_unlock_irqrestore(ap->lock, flags); 5370 5371 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5372 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5373 dev->pio_mask = UINT_MAX; 5374 dev->mwdma_mask = UINT_MAX; 5375 dev->udma_mask = UINT_MAX; 5376 } 5377 5378 /** 5379 * ata_link_init - Initialize an ata_link structure 5380 * @ap: ATA port link is attached to 5381 * @link: Link structure to initialize 5382 * @pmp: Port multiplier port number 5383 * 5384 * Initialize @link. 5385 * 5386 * LOCKING: 5387 * Kernel thread context (may sleep) 5388 */ 5389 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5390 { 5391 int i; 5392 5393 /* clear everything except for devices */ 5394 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5395 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5396 5397 link->ap = ap; 5398 link->pmp = pmp; 5399 link->active_tag = ATA_TAG_POISON; 5400 link->hw_sata_spd_limit = UINT_MAX; 5401 5402 /* can't use iterator, ap isn't initialized yet */ 5403 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5404 struct ata_device *dev = &link->device[i]; 5405 5406 dev->link = link; 5407 dev->devno = dev - link->device; 5408 #ifdef CONFIG_ATA_ACPI 5409 dev->gtf_filter = ata_acpi_gtf_filter; 5410 #endif 5411 ata_dev_init(dev); 5412 } 5413 } 5414 5415 /** 5416 * sata_link_init_spd - Initialize link->sata_spd_limit 5417 * @link: Link to configure sata_spd_limit for 5418 * 5419 * Initialize @link->[hw_]sata_spd_limit to the currently 5420 * configured value. 5421 * 5422 * LOCKING: 5423 * Kernel thread context (may sleep). 5424 * 5425 * RETURNS: 5426 * 0 on success, -errno on failure. 5427 */ 5428 int sata_link_init_spd(struct ata_link *link) 5429 { 5430 u8 spd; 5431 int rc; 5432 5433 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5434 if (rc) 5435 return rc; 5436 5437 spd = (link->saved_scontrol >> 4) & 0xf; 5438 if (spd) 5439 link->hw_sata_spd_limit &= (1 << spd) - 1; 5440 5441 ata_force_link_limits(link); 5442 5443 link->sata_spd_limit = link->hw_sata_spd_limit; 5444 5445 return 0; 5446 } 5447 5448 /** 5449 * ata_port_alloc - allocate and initialize basic ATA port resources 5450 * @host: ATA host this allocated port belongs to 5451 * 5452 * Allocate and initialize basic ATA port resources. 5453 * 5454 * RETURNS: 5455 * Allocate ATA port on success, NULL on failure. 5456 * 5457 * LOCKING: 5458 * Inherited from calling layer (may sleep). 5459 */ 5460 struct ata_port *ata_port_alloc(struct ata_host *host) 5461 { 5462 struct ata_port *ap; 5463 5464 DPRINTK("ENTER\n"); 5465 5466 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5467 if (!ap) 5468 return NULL; 5469 5470 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5471 ap->lock = &host->lock; 5472 ap->print_id = -1; 5473 ap->host = host; 5474 ap->dev = host->dev; 5475 5476 #if defined(ATA_VERBOSE_DEBUG) 5477 /* turn on all debugging levels */ 5478 ap->msg_enable = 0x00FF; 5479 #elif defined(ATA_DEBUG) 5480 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5481 #else 5482 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5483 #endif 5484 5485 mutex_init(&ap->scsi_scan_mutex); 5486 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5487 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5488 INIT_LIST_HEAD(&ap->eh_done_q); 5489 init_waitqueue_head(&ap->eh_wait_q); 5490 init_completion(&ap->park_req_pending); 5491 init_timer_deferrable(&ap->fastdrain_timer); 5492 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5493 ap->fastdrain_timer.data = (unsigned long)ap; 5494 5495 ap->cbl = ATA_CBL_NONE; 5496 5497 ata_link_init(ap, &ap->link, 0); 5498 5499 #ifdef ATA_IRQ_TRAP 5500 ap->stats.unhandled_irq = 1; 5501 ap->stats.idle_irq = 1; 5502 #endif 5503 ata_sff_port_init(ap); 5504 5505 return ap; 5506 } 5507 5508 static void ata_host_release(struct device *gendev, void *res) 5509 { 5510 struct ata_host *host = dev_get_drvdata(gendev); 5511 int i; 5512 5513 for (i = 0; i < host->n_ports; i++) { 5514 struct ata_port *ap = host->ports[i]; 5515 5516 if (!ap) 5517 continue; 5518 5519 if (ap->scsi_host) 5520 scsi_host_put(ap->scsi_host); 5521 5522 kfree(ap->pmp_link); 5523 kfree(ap->slave_link); 5524 kfree(ap); 5525 host->ports[i] = NULL; 5526 } 5527 5528 dev_set_drvdata(gendev, NULL); 5529 } 5530 5531 /** 5532 * ata_host_alloc - allocate and init basic ATA host resources 5533 * @dev: generic device this host is associated with 5534 * @max_ports: maximum number of ATA ports associated with this host 5535 * 5536 * Allocate and initialize basic ATA host resources. LLD calls 5537 * this function to allocate a host, initializes it fully and 5538 * attaches it using ata_host_register(). 5539 * 5540 * @max_ports ports are allocated and host->n_ports is 5541 * initialized to @max_ports. The caller is allowed to decrease 5542 * host->n_ports before calling ata_host_register(). The unused 5543 * ports will be automatically freed on registration. 5544 * 5545 * RETURNS: 5546 * Allocate ATA host on success, NULL on failure. 5547 * 5548 * LOCKING: 5549 * Inherited from calling layer (may sleep). 5550 */ 5551 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5552 { 5553 struct ata_host *host; 5554 size_t sz; 5555 int i; 5556 5557 DPRINTK("ENTER\n"); 5558 5559 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5560 return NULL; 5561 5562 /* alloc a container for our list of ATA ports (buses) */ 5563 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5564 /* alloc a container for our list of ATA ports (buses) */ 5565 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5566 if (!host) 5567 goto err_out; 5568 5569 devres_add(dev, host); 5570 dev_set_drvdata(dev, host); 5571 5572 spin_lock_init(&host->lock); 5573 mutex_init(&host->eh_mutex); 5574 host->dev = dev; 5575 host->n_ports = max_ports; 5576 5577 /* allocate ports bound to this host */ 5578 for (i = 0; i < max_ports; i++) { 5579 struct ata_port *ap; 5580 5581 ap = ata_port_alloc(host); 5582 if (!ap) 5583 goto err_out; 5584 5585 ap->port_no = i; 5586 host->ports[i] = ap; 5587 } 5588 5589 devres_remove_group(dev, NULL); 5590 return host; 5591 5592 err_out: 5593 devres_release_group(dev, NULL); 5594 return NULL; 5595 } 5596 5597 /** 5598 * ata_host_alloc_pinfo - alloc host and init with port_info array 5599 * @dev: generic device this host is associated with 5600 * @ppi: array of ATA port_info to initialize host with 5601 * @n_ports: number of ATA ports attached to this host 5602 * 5603 * Allocate ATA host and initialize with info from @ppi. If NULL 5604 * terminated, @ppi may contain fewer entries than @n_ports. The 5605 * last entry will be used for the remaining ports. 5606 * 5607 * RETURNS: 5608 * Allocate ATA host on success, NULL on failure. 5609 * 5610 * LOCKING: 5611 * Inherited from calling layer (may sleep). 5612 */ 5613 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5614 const struct ata_port_info * const * ppi, 5615 int n_ports) 5616 { 5617 const struct ata_port_info *pi; 5618 struct ata_host *host; 5619 int i, j; 5620 5621 host = ata_host_alloc(dev, n_ports); 5622 if (!host) 5623 return NULL; 5624 5625 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5626 struct ata_port *ap = host->ports[i]; 5627 5628 if (ppi[j]) 5629 pi = ppi[j++]; 5630 5631 ap->pio_mask = pi->pio_mask; 5632 ap->mwdma_mask = pi->mwdma_mask; 5633 ap->udma_mask = pi->udma_mask; 5634 ap->flags |= pi->flags; 5635 ap->link.flags |= pi->link_flags; 5636 ap->ops = pi->port_ops; 5637 5638 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5639 host->ops = pi->port_ops; 5640 } 5641 5642 return host; 5643 } 5644 5645 /** 5646 * ata_slave_link_init - initialize slave link 5647 * @ap: port to initialize slave link for 5648 * 5649 * Create and initialize slave link for @ap. This enables slave 5650 * link handling on the port. 5651 * 5652 * In libata, a port contains links and a link contains devices. 5653 * There is single host link but if a PMP is attached to it, 5654 * there can be multiple fan-out links. On SATA, there's usually 5655 * a single device connected to a link but PATA and SATA 5656 * controllers emulating TF based interface can have two - master 5657 * and slave. 5658 * 5659 * However, there are a few controllers which don't fit into this 5660 * abstraction too well - SATA controllers which emulate TF 5661 * interface with both master and slave devices but also have 5662 * separate SCR register sets for each device. These controllers 5663 * need separate links for physical link handling 5664 * (e.g. onlineness, link speed) but should be treated like a 5665 * traditional M/S controller for everything else (e.g. command 5666 * issue, softreset). 5667 * 5668 * slave_link is libata's way of handling this class of 5669 * controllers without impacting core layer too much. For 5670 * anything other than physical link handling, the default host 5671 * link is used for both master and slave. For physical link 5672 * handling, separate @ap->slave_link is used. All dirty details 5673 * are implemented inside libata core layer. From LLD's POV, the 5674 * only difference is that prereset, hardreset and postreset are 5675 * called once more for the slave link, so the reset sequence 5676 * looks like the following. 5677 * 5678 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5679 * softreset(M) -> postreset(M) -> postreset(S) 5680 * 5681 * Note that softreset is called only for the master. Softreset 5682 * resets both M/S by definition, so SRST on master should handle 5683 * both (the standard method will work just fine). 5684 * 5685 * LOCKING: 5686 * Should be called before host is registered. 5687 * 5688 * RETURNS: 5689 * 0 on success, -errno on failure. 5690 */ 5691 int ata_slave_link_init(struct ata_port *ap) 5692 { 5693 struct ata_link *link; 5694 5695 WARN_ON(ap->slave_link); 5696 WARN_ON(ap->flags & ATA_FLAG_PMP); 5697 5698 link = kzalloc(sizeof(*link), GFP_KERNEL); 5699 if (!link) 5700 return -ENOMEM; 5701 5702 ata_link_init(ap, link, 1); 5703 ap->slave_link = link; 5704 return 0; 5705 } 5706 5707 static void ata_host_stop(struct device *gendev, void *res) 5708 { 5709 struct ata_host *host = dev_get_drvdata(gendev); 5710 int i; 5711 5712 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5713 5714 for (i = 0; i < host->n_ports; i++) { 5715 struct ata_port *ap = host->ports[i]; 5716 5717 if (ap->ops->port_stop) 5718 ap->ops->port_stop(ap); 5719 } 5720 5721 if (host->ops->host_stop) 5722 host->ops->host_stop(host); 5723 } 5724 5725 /** 5726 * ata_finalize_port_ops - finalize ata_port_operations 5727 * @ops: ata_port_operations to finalize 5728 * 5729 * An ata_port_operations can inherit from another ops and that 5730 * ops can again inherit from another. This can go on as many 5731 * times as necessary as long as there is no loop in the 5732 * inheritance chain. 5733 * 5734 * Ops tables are finalized when the host is started. NULL or 5735 * unspecified entries are inherited from the closet ancestor 5736 * which has the method and the entry is populated with it. 5737 * After finalization, the ops table directly points to all the 5738 * methods and ->inherits is no longer necessary and cleared. 5739 * 5740 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5741 * 5742 * LOCKING: 5743 * None. 5744 */ 5745 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5746 { 5747 static DEFINE_SPINLOCK(lock); 5748 const struct ata_port_operations *cur; 5749 void **begin = (void **)ops; 5750 void **end = (void **)&ops->inherits; 5751 void **pp; 5752 5753 if (!ops || !ops->inherits) 5754 return; 5755 5756 spin_lock(&lock); 5757 5758 for (cur = ops->inherits; cur; cur = cur->inherits) { 5759 void **inherit = (void **)cur; 5760 5761 for (pp = begin; pp < end; pp++, inherit++) 5762 if (!*pp) 5763 *pp = *inherit; 5764 } 5765 5766 for (pp = begin; pp < end; pp++) 5767 if (IS_ERR(*pp)) 5768 *pp = NULL; 5769 5770 ops->inherits = NULL; 5771 5772 spin_unlock(&lock); 5773 } 5774 5775 /** 5776 * ata_host_start - start and freeze ports of an ATA host 5777 * @host: ATA host to start ports for 5778 * 5779 * Start and then freeze ports of @host. Started status is 5780 * recorded in host->flags, so this function can be called 5781 * multiple times. Ports are guaranteed to get started only 5782 * once. If host->ops isn't initialized yet, its set to the 5783 * first non-dummy port ops. 5784 * 5785 * LOCKING: 5786 * Inherited from calling layer (may sleep). 5787 * 5788 * RETURNS: 5789 * 0 if all ports are started successfully, -errno otherwise. 5790 */ 5791 int ata_host_start(struct ata_host *host) 5792 { 5793 int have_stop = 0; 5794 void *start_dr = NULL; 5795 int i, rc; 5796 5797 if (host->flags & ATA_HOST_STARTED) 5798 return 0; 5799 5800 ata_finalize_port_ops(host->ops); 5801 5802 for (i = 0; i < host->n_ports; i++) { 5803 struct ata_port *ap = host->ports[i]; 5804 5805 ata_finalize_port_ops(ap->ops); 5806 5807 if (!host->ops && !ata_port_is_dummy(ap)) 5808 host->ops = ap->ops; 5809 5810 if (ap->ops->port_stop) 5811 have_stop = 1; 5812 } 5813 5814 if (host->ops->host_stop) 5815 have_stop = 1; 5816 5817 if (have_stop) { 5818 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5819 if (!start_dr) 5820 return -ENOMEM; 5821 } 5822 5823 for (i = 0; i < host->n_ports; i++) { 5824 struct ata_port *ap = host->ports[i]; 5825 5826 if (ap->ops->port_start) { 5827 rc = ap->ops->port_start(ap); 5828 if (rc) { 5829 if (rc != -ENODEV) 5830 dev_err(host->dev, 5831 "failed to start port %d (errno=%d)\n", 5832 i, rc); 5833 goto err_out; 5834 } 5835 } 5836 ata_eh_freeze_port(ap); 5837 } 5838 5839 if (start_dr) 5840 devres_add(host->dev, start_dr); 5841 host->flags |= ATA_HOST_STARTED; 5842 return 0; 5843 5844 err_out: 5845 while (--i >= 0) { 5846 struct ata_port *ap = host->ports[i]; 5847 5848 if (ap->ops->port_stop) 5849 ap->ops->port_stop(ap); 5850 } 5851 devres_free(start_dr); 5852 return rc; 5853 } 5854 5855 /** 5856 * ata_sas_host_init - Initialize a host struct 5857 * @host: host to initialize 5858 * @dev: device host is attached to 5859 * @flags: host flags 5860 * @ops: port_ops 5861 * 5862 * LOCKING: 5863 * PCI/etc. bus probe sem. 5864 * 5865 */ 5866 /* KILLME - the only user left is ipr */ 5867 void ata_host_init(struct ata_host *host, struct device *dev, 5868 unsigned long flags, struct ata_port_operations *ops) 5869 { 5870 spin_lock_init(&host->lock); 5871 mutex_init(&host->eh_mutex); 5872 host->dev = dev; 5873 host->flags = flags; 5874 host->ops = ops; 5875 } 5876 5877 int ata_port_probe(struct ata_port *ap) 5878 { 5879 int rc = 0; 5880 5881 /* probe */ 5882 if (ap->ops->error_handler) { 5883 struct ata_eh_info *ehi = &ap->link.eh_info; 5884 unsigned long flags; 5885 5886 /* kick EH for boot probing */ 5887 spin_lock_irqsave(ap->lock, flags); 5888 5889 ehi->probe_mask |= ATA_ALL_DEVICES; 5890 ehi->action |= ATA_EH_RESET; 5891 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5892 5893 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5894 ap->pflags |= ATA_PFLAG_LOADING; 5895 ata_port_schedule_eh(ap); 5896 5897 spin_unlock_irqrestore(ap->lock, flags); 5898 5899 /* wait for EH to finish */ 5900 ata_port_wait_eh(ap); 5901 } else { 5902 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 5903 rc = ata_bus_probe(ap); 5904 DPRINTK("ata%u: bus probe end\n", ap->print_id); 5905 } 5906 return rc; 5907 } 5908 5909 5910 static void async_port_probe(void *data, async_cookie_t cookie) 5911 { 5912 struct ata_port *ap = data; 5913 5914 /* 5915 * If we're not allowed to scan this host in parallel, 5916 * we need to wait until all previous scans have completed 5917 * before going further. 5918 * Jeff Garzik says this is only within a controller, so we 5919 * don't need to wait for port 0, only for later ports. 5920 */ 5921 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 5922 async_synchronize_cookie(cookie); 5923 5924 (void)ata_port_probe(ap); 5925 5926 /* in order to keep device order, we need to synchronize at this point */ 5927 async_synchronize_cookie(cookie); 5928 5929 ata_scsi_scan_host(ap, 1); 5930 } 5931 5932 /** 5933 * ata_host_register - register initialized ATA host 5934 * @host: ATA host to register 5935 * @sht: template for SCSI host 5936 * 5937 * Register initialized ATA host. @host is allocated using 5938 * ata_host_alloc() and fully initialized by LLD. This function 5939 * starts ports, registers @host with ATA and SCSI layers and 5940 * probe registered devices. 5941 * 5942 * LOCKING: 5943 * Inherited from calling layer (may sleep). 5944 * 5945 * RETURNS: 5946 * 0 on success, -errno otherwise. 5947 */ 5948 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 5949 { 5950 int i, rc; 5951 5952 /* host must have been started */ 5953 if (!(host->flags & ATA_HOST_STARTED)) { 5954 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 5955 WARN_ON(1); 5956 return -EINVAL; 5957 } 5958 5959 /* Blow away unused ports. This happens when LLD can't 5960 * determine the exact number of ports to allocate at 5961 * allocation time. 5962 */ 5963 for (i = host->n_ports; host->ports[i]; i++) 5964 kfree(host->ports[i]); 5965 5966 /* give ports names and add SCSI hosts */ 5967 for (i = 0; i < host->n_ports; i++) 5968 host->ports[i]->print_id = ata_print_id++; 5969 5970 5971 /* Create associated sysfs transport objects */ 5972 for (i = 0; i < host->n_ports; i++) { 5973 rc = ata_tport_add(host->dev,host->ports[i]); 5974 if (rc) { 5975 goto err_tadd; 5976 } 5977 } 5978 5979 rc = ata_scsi_add_hosts(host, sht); 5980 if (rc) 5981 goto err_tadd; 5982 5983 /* associate with ACPI nodes */ 5984 ata_acpi_associate(host); 5985 5986 /* set cable, sata_spd_limit and report */ 5987 for (i = 0; i < host->n_ports; i++) { 5988 struct ata_port *ap = host->ports[i]; 5989 unsigned long xfer_mask; 5990 5991 /* set SATA cable type if still unset */ 5992 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 5993 ap->cbl = ATA_CBL_SATA; 5994 5995 /* init sata_spd_limit to the current value */ 5996 sata_link_init_spd(&ap->link); 5997 if (ap->slave_link) 5998 sata_link_init_spd(ap->slave_link); 5999 6000 /* print per-port info to dmesg */ 6001 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6002 ap->udma_mask); 6003 6004 if (!ata_port_is_dummy(ap)) { 6005 ata_port_info(ap, "%cATA max %s %s\n", 6006 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6007 ata_mode_string(xfer_mask), 6008 ap->link.eh_info.desc); 6009 ata_ehi_clear_desc(&ap->link.eh_info); 6010 } else 6011 ata_port_info(ap, "DUMMY\n"); 6012 } 6013 6014 /* perform each probe asynchronously */ 6015 for (i = 0; i < host->n_ports; i++) { 6016 struct ata_port *ap = host->ports[i]; 6017 async_schedule(async_port_probe, ap); 6018 } 6019 6020 return 0; 6021 6022 err_tadd: 6023 while (--i >= 0) { 6024 ata_tport_delete(host->ports[i]); 6025 } 6026 return rc; 6027 6028 } 6029 6030 /** 6031 * ata_host_activate - start host, request IRQ and register it 6032 * @host: target ATA host 6033 * @irq: IRQ to request 6034 * @irq_handler: irq_handler used when requesting IRQ 6035 * @irq_flags: irq_flags used when requesting IRQ 6036 * @sht: scsi_host_template to use when registering the host 6037 * 6038 * After allocating an ATA host and initializing it, most libata 6039 * LLDs perform three steps to activate the host - start host, 6040 * request IRQ and register it. This helper takes necessasry 6041 * arguments and performs the three steps in one go. 6042 * 6043 * An invalid IRQ skips the IRQ registration and expects the host to 6044 * have set polling mode on the port. In this case, @irq_handler 6045 * should be NULL. 6046 * 6047 * LOCKING: 6048 * Inherited from calling layer (may sleep). 6049 * 6050 * RETURNS: 6051 * 0 on success, -errno otherwise. 6052 */ 6053 int ata_host_activate(struct ata_host *host, int irq, 6054 irq_handler_t irq_handler, unsigned long irq_flags, 6055 struct scsi_host_template *sht) 6056 { 6057 int i, rc; 6058 6059 rc = ata_host_start(host); 6060 if (rc) 6061 return rc; 6062 6063 /* Special case for polling mode */ 6064 if (!irq) { 6065 WARN_ON(irq_handler); 6066 return ata_host_register(host, sht); 6067 } 6068 6069 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6070 dev_driver_string(host->dev), host); 6071 if (rc) 6072 return rc; 6073 6074 for (i = 0; i < host->n_ports; i++) 6075 ata_port_desc(host->ports[i], "irq %d", irq); 6076 6077 rc = ata_host_register(host, sht); 6078 /* if failed, just free the IRQ and leave ports alone */ 6079 if (rc) 6080 devm_free_irq(host->dev, irq, host); 6081 6082 return rc; 6083 } 6084 6085 /** 6086 * ata_port_detach - Detach ATA port in prepration of device removal 6087 * @ap: ATA port to be detached 6088 * 6089 * Detach all ATA devices and the associated SCSI devices of @ap; 6090 * then, remove the associated SCSI host. @ap is guaranteed to 6091 * be quiescent on return from this function. 6092 * 6093 * LOCKING: 6094 * Kernel thread context (may sleep). 6095 */ 6096 static void ata_port_detach(struct ata_port *ap) 6097 { 6098 unsigned long flags; 6099 6100 if (!ap->ops->error_handler) 6101 goto skip_eh; 6102 6103 /* tell EH we're leaving & flush EH */ 6104 spin_lock_irqsave(ap->lock, flags); 6105 ap->pflags |= ATA_PFLAG_UNLOADING; 6106 ata_port_schedule_eh(ap); 6107 spin_unlock_irqrestore(ap->lock, flags); 6108 6109 /* wait till EH commits suicide */ 6110 ata_port_wait_eh(ap); 6111 6112 /* it better be dead now */ 6113 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6114 6115 cancel_delayed_work_sync(&ap->hotplug_task); 6116 6117 skip_eh: 6118 if (ap->pmp_link) { 6119 int i; 6120 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6121 ata_tlink_delete(&ap->pmp_link[i]); 6122 } 6123 ata_tport_delete(ap); 6124 6125 /* remove the associated SCSI host */ 6126 scsi_remove_host(ap->scsi_host); 6127 } 6128 6129 /** 6130 * ata_host_detach - Detach all ports of an ATA host 6131 * @host: Host to detach 6132 * 6133 * Detach all ports of @host. 6134 * 6135 * LOCKING: 6136 * Kernel thread context (may sleep). 6137 */ 6138 void ata_host_detach(struct ata_host *host) 6139 { 6140 int i; 6141 6142 for (i = 0; i < host->n_ports; i++) 6143 ata_port_detach(host->ports[i]); 6144 6145 /* the host is dead now, dissociate ACPI */ 6146 ata_acpi_dissociate(host); 6147 } 6148 6149 #ifdef CONFIG_PCI 6150 6151 /** 6152 * ata_pci_remove_one - PCI layer callback for device removal 6153 * @pdev: PCI device that was removed 6154 * 6155 * PCI layer indicates to libata via this hook that hot-unplug or 6156 * module unload event has occurred. Detach all ports. Resource 6157 * release is handled via devres. 6158 * 6159 * LOCKING: 6160 * Inherited from PCI layer (may sleep). 6161 */ 6162 void ata_pci_remove_one(struct pci_dev *pdev) 6163 { 6164 struct device *dev = &pdev->dev; 6165 struct ata_host *host = dev_get_drvdata(dev); 6166 6167 ata_host_detach(host); 6168 } 6169 6170 /* move to PCI subsystem */ 6171 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6172 { 6173 unsigned long tmp = 0; 6174 6175 switch (bits->width) { 6176 case 1: { 6177 u8 tmp8 = 0; 6178 pci_read_config_byte(pdev, bits->reg, &tmp8); 6179 tmp = tmp8; 6180 break; 6181 } 6182 case 2: { 6183 u16 tmp16 = 0; 6184 pci_read_config_word(pdev, bits->reg, &tmp16); 6185 tmp = tmp16; 6186 break; 6187 } 6188 case 4: { 6189 u32 tmp32 = 0; 6190 pci_read_config_dword(pdev, bits->reg, &tmp32); 6191 tmp = tmp32; 6192 break; 6193 } 6194 6195 default: 6196 return -EINVAL; 6197 } 6198 6199 tmp &= bits->mask; 6200 6201 return (tmp == bits->val) ? 1 : 0; 6202 } 6203 6204 #ifdef CONFIG_PM 6205 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6206 { 6207 pci_save_state(pdev); 6208 pci_disable_device(pdev); 6209 6210 if (mesg.event & PM_EVENT_SLEEP) 6211 pci_set_power_state(pdev, PCI_D3hot); 6212 } 6213 6214 int ata_pci_device_do_resume(struct pci_dev *pdev) 6215 { 6216 int rc; 6217 6218 pci_set_power_state(pdev, PCI_D0); 6219 pci_restore_state(pdev); 6220 6221 rc = pcim_enable_device(pdev); 6222 if (rc) { 6223 dev_err(&pdev->dev, 6224 "failed to enable device after resume (%d)\n", rc); 6225 return rc; 6226 } 6227 6228 pci_set_master(pdev); 6229 return 0; 6230 } 6231 6232 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6233 { 6234 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6235 int rc = 0; 6236 6237 rc = ata_host_suspend(host, mesg); 6238 if (rc) 6239 return rc; 6240 6241 ata_pci_device_do_suspend(pdev, mesg); 6242 6243 return 0; 6244 } 6245 6246 int ata_pci_device_resume(struct pci_dev *pdev) 6247 { 6248 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6249 int rc; 6250 6251 rc = ata_pci_device_do_resume(pdev); 6252 if (rc == 0) 6253 ata_host_resume(host); 6254 return rc; 6255 } 6256 #endif /* CONFIG_PM */ 6257 6258 #endif /* CONFIG_PCI */ 6259 6260 static int __init ata_parse_force_one(char **cur, 6261 struct ata_force_ent *force_ent, 6262 const char **reason) 6263 { 6264 /* FIXME: Currently, there's no way to tag init const data and 6265 * using __initdata causes build failure on some versions of 6266 * gcc. Once __initdataconst is implemented, add const to the 6267 * following structure. 6268 */ 6269 static struct ata_force_param force_tbl[] __initdata = { 6270 { "40c", .cbl = ATA_CBL_PATA40 }, 6271 { "80c", .cbl = ATA_CBL_PATA80 }, 6272 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6273 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6274 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6275 { "sata", .cbl = ATA_CBL_SATA }, 6276 { "1.5Gbps", .spd_limit = 1 }, 6277 { "3.0Gbps", .spd_limit = 2 }, 6278 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6279 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6280 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6281 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6282 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6283 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6284 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6285 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6286 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6287 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6288 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6289 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6290 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6291 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6292 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6293 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6294 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6295 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6296 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6297 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6298 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6299 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6300 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6301 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6302 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6303 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6304 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6305 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6306 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6307 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6308 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6309 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6310 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6311 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6312 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6313 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6314 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6315 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6316 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6317 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6318 }; 6319 char *start = *cur, *p = *cur; 6320 char *id, *val, *endp; 6321 const struct ata_force_param *match_fp = NULL; 6322 int nr_matches = 0, i; 6323 6324 /* find where this param ends and update *cur */ 6325 while (*p != '\0' && *p != ',') 6326 p++; 6327 6328 if (*p == '\0') 6329 *cur = p; 6330 else 6331 *cur = p + 1; 6332 6333 *p = '\0'; 6334 6335 /* parse */ 6336 p = strchr(start, ':'); 6337 if (!p) { 6338 val = strstrip(start); 6339 goto parse_val; 6340 } 6341 *p = '\0'; 6342 6343 id = strstrip(start); 6344 val = strstrip(p + 1); 6345 6346 /* parse id */ 6347 p = strchr(id, '.'); 6348 if (p) { 6349 *p++ = '\0'; 6350 force_ent->device = simple_strtoul(p, &endp, 10); 6351 if (p == endp || *endp != '\0') { 6352 *reason = "invalid device"; 6353 return -EINVAL; 6354 } 6355 } 6356 6357 force_ent->port = simple_strtoul(id, &endp, 10); 6358 if (p == endp || *endp != '\0') { 6359 *reason = "invalid port/link"; 6360 return -EINVAL; 6361 } 6362 6363 parse_val: 6364 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6365 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6366 const struct ata_force_param *fp = &force_tbl[i]; 6367 6368 if (strncasecmp(val, fp->name, strlen(val))) 6369 continue; 6370 6371 nr_matches++; 6372 match_fp = fp; 6373 6374 if (strcasecmp(val, fp->name) == 0) { 6375 nr_matches = 1; 6376 break; 6377 } 6378 } 6379 6380 if (!nr_matches) { 6381 *reason = "unknown value"; 6382 return -EINVAL; 6383 } 6384 if (nr_matches > 1) { 6385 *reason = "ambigious value"; 6386 return -EINVAL; 6387 } 6388 6389 force_ent->param = *match_fp; 6390 6391 return 0; 6392 } 6393 6394 static void __init ata_parse_force_param(void) 6395 { 6396 int idx = 0, size = 1; 6397 int last_port = -1, last_device = -1; 6398 char *p, *cur, *next; 6399 6400 /* calculate maximum number of params and allocate force_tbl */ 6401 for (p = ata_force_param_buf; *p; p++) 6402 if (*p == ',') 6403 size++; 6404 6405 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6406 if (!ata_force_tbl) { 6407 printk(KERN_WARNING "ata: failed to extend force table, " 6408 "libata.force ignored\n"); 6409 return; 6410 } 6411 6412 /* parse and populate the table */ 6413 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6414 const char *reason = ""; 6415 struct ata_force_ent te = { .port = -1, .device = -1 }; 6416 6417 next = cur; 6418 if (ata_parse_force_one(&next, &te, &reason)) { 6419 printk(KERN_WARNING "ata: failed to parse force " 6420 "parameter \"%s\" (%s)\n", 6421 cur, reason); 6422 continue; 6423 } 6424 6425 if (te.port == -1) { 6426 te.port = last_port; 6427 te.device = last_device; 6428 } 6429 6430 ata_force_tbl[idx++] = te; 6431 6432 last_port = te.port; 6433 last_device = te.device; 6434 } 6435 6436 ata_force_tbl_size = idx; 6437 } 6438 6439 static int __init ata_init(void) 6440 { 6441 int rc; 6442 6443 ata_parse_force_param(); 6444 6445 rc = ata_sff_init(); 6446 if (rc) { 6447 kfree(ata_force_tbl); 6448 return rc; 6449 } 6450 6451 libata_transport_init(); 6452 ata_scsi_transport_template = ata_attach_transport(); 6453 if (!ata_scsi_transport_template) { 6454 ata_sff_exit(); 6455 rc = -ENOMEM; 6456 goto err_out; 6457 } 6458 6459 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6460 return 0; 6461 6462 err_out: 6463 return rc; 6464 } 6465 6466 static void __exit ata_exit(void) 6467 { 6468 ata_release_transport(ata_scsi_transport_template); 6469 libata_transport_exit(); 6470 ata_sff_exit(); 6471 kfree(ata_force_tbl); 6472 } 6473 6474 subsys_initcall(ata_init); 6475 module_exit(ata_exit); 6476 6477 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6478 6479 int ata_ratelimit(void) 6480 { 6481 return __ratelimit(&ratelimit); 6482 } 6483 6484 /** 6485 * ata_msleep - ATA EH owner aware msleep 6486 * @ap: ATA port to attribute the sleep to 6487 * @msecs: duration to sleep in milliseconds 6488 * 6489 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6490 * ownership is released before going to sleep and reacquired 6491 * after the sleep is complete. IOW, other ports sharing the 6492 * @ap->host will be allowed to own the EH while this task is 6493 * sleeping. 6494 * 6495 * LOCKING: 6496 * Might sleep. 6497 */ 6498 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6499 { 6500 bool owns_eh = ap && ap->host->eh_owner == current; 6501 6502 if (owns_eh) 6503 ata_eh_release(ap); 6504 6505 msleep(msecs); 6506 6507 if (owns_eh) 6508 ata_eh_acquire(ap); 6509 } 6510 6511 /** 6512 * ata_wait_register - wait until register value changes 6513 * @ap: ATA port to wait register for, can be NULL 6514 * @reg: IO-mapped register 6515 * @mask: Mask to apply to read register value 6516 * @val: Wait condition 6517 * @interval: polling interval in milliseconds 6518 * @timeout: timeout in milliseconds 6519 * 6520 * Waiting for some bits of register to change is a common 6521 * operation for ATA controllers. This function reads 32bit LE 6522 * IO-mapped register @reg and tests for the following condition. 6523 * 6524 * (*@reg & mask) != val 6525 * 6526 * If the condition is met, it returns; otherwise, the process is 6527 * repeated after @interval_msec until timeout. 6528 * 6529 * LOCKING: 6530 * Kernel thread context (may sleep) 6531 * 6532 * RETURNS: 6533 * The final register value. 6534 */ 6535 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6536 unsigned long interval, unsigned long timeout) 6537 { 6538 unsigned long deadline; 6539 u32 tmp; 6540 6541 tmp = ioread32(reg); 6542 6543 /* Calculate timeout _after_ the first read to make sure 6544 * preceding writes reach the controller before starting to 6545 * eat away the timeout. 6546 */ 6547 deadline = ata_deadline(jiffies, timeout); 6548 6549 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6550 ata_msleep(ap, interval); 6551 tmp = ioread32(reg); 6552 } 6553 6554 return tmp; 6555 } 6556 6557 /* 6558 * Dummy port_ops 6559 */ 6560 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6561 { 6562 return AC_ERR_SYSTEM; 6563 } 6564 6565 static void ata_dummy_error_handler(struct ata_port *ap) 6566 { 6567 /* truly dummy */ 6568 } 6569 6570 struct ata_port_operations ata_dummy_port_ops = { 6571 .qc_prep = ata_noop_qc_prep, 6572 .qc_issue = ata_dummy_qc_issue, 6573 .error_handler = ata_dummy_error_handler, 6574 }; 6575 6576 const struct ata_port_info ata_dummy_port_info = { 6577 .port_ops = &ata_dummy_port_ops, 6578 }; 6579 6580 /* 6581 * Utility print functions 6582 */ 6583 int ata_port_printk(const struct ata_port *ap, const char *level, 6584 const char *fmt, ...) 6585 { 6586 struct va_format vaf; 6587 va_list args; 6588 int r; 6589 6590 va_start(args, fmt); 6591 6592 vaf.fmt = fmt; 6593 vaf.va = &args; 6594 6595 r = printk("%sata%u: %pV", level, ap->print_id, &vaf); 6596 6597 va_end(args); 6598 6599 return r; 6600 } 6601 EXPORT_SYMBOL(ata_port_printk); 6602 6603 int ata_link_printk(const struct ata_link *link, const char *level, 6604 const char *fmt, ...) 6605 { 6606 struct va_format vaf; 6607 va_list args; 6608 int r; 6609 6610 va_start(args, fmt); 6611 6612 vaf.fmt = fmt; 6613 vaf.va = &args; 6614 6615 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6616 r = printk("%sata%u.%02u: %pV", 6617 level, link->ap->print_id, link->pmp, &vaf); 6618 else 6619 r = printk("%sata%u: %pV", 6620 level, link->ap->print_id, &vaf); 6621 6622 va_end(args); 6623 6624 return r; 6625 } 6626 EXPORT_SYMBOL(ata_link_printk); 6627 6628 int ata_dev_printk(const struct ata_device *dev, const char *level, 6629 const char *fmt, ...) 6630 { 6631 struct va_format vaf; 6632 va_list args; 6633 int r; 6634 6635 va_start(args, fmt); 6636 6637 vaf.fmt = fmt; 6638 vaf.va = &args; 6639 6640 r = printk("%sata%u.%02u: %pV", 6641 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6642 &vaf); 6643 6644 va_end(args); 6645 6646 return r; 6647 } 6648 EXPORT_SYMBOL(ata_dev_printk); 6649 6650 void ata_print_version(const struct device *dev, const char *version) 6651 { 6652 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6653 } 6654 EXPORT_SYMBOL(ata_print_version); 6655 6656 /* 6657 * libata is essentially a library of internal helper functions for 6658 * low-level ATA host controller drivers. As such, the API/ABI is 6659 * likely to change as new drivers are added and updated. 6660 * Do not depend on ABI/API stability. 6661 */ 6662 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6663 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6664 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6665 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6666 EXPORT_SYMBOL_GPL(sata_port_ops); 6667 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6668 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6669 EXPORT_SYMBOL_GPL(ata_link_next); 6670 EXPORT_SYMBOL_GPL(ata_dev_next); 6671 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6672 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6673 EXPORT_SYMBOL_GPL(ata_host_init); 6674 EXPORT_SYMBOL_GPL(ata_host_alloc); 6675 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6676 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6677 EXPORT_SYMBOL_GPL(ata_host_start); 6678 EXPORT_SYMBOL_GPL(ata_host_register); 6679 EXPORT_SYMBOL_GPL(ata_host_activate); 6680 EXPORT_SYMBOL_GPL(ata_host_detach); 6681 EXPORT_SYMBOL_GPL(ata_sg_init); 6682 EXPORT_SYMBOL_GPL(ata_qc_complete); 6683 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6684 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6685 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6686 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6687 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6688 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6689 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6690 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6691 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6692 EXPORT_SYMBOL_GPL(ata_mode_string); 6693 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6694 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6695 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6696 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6697 EXPORT_SYMBOL_GPL(ata_dev_disable); 6698 EXPORT_SYMBOL_GPL(sata_set_spd); 6699 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6700 EXPORT_SYMBOL_GPL(sata_link_debounce); 6701 EXPORT_SYMBOL_GPL(sata_link_resume); 6702 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6703 EXPORT_SYMBOL_GPL(ata_std_prereset); 6704 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6705 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6706 EXPORT_SYMBOL_GPL(ata_std_postreset); 6707 EXPORT_SYMBOL_GPL(ata_dev_classify); 6708 EXPORT_SYMBOL_GPL(ata_dev_pair); 6709 EXPORT_SYMBOL_GPL(ata_ratelimit); 6710 EXPORT_SYMBOL_GPL(ata_msleep); 6711 EXPORT_SYMBOL_GPL(ata_wait_register); 6712 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6713 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6714 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6715 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6716 EXPORT_SYMBOL_GPL(sata_scr_valid); 6717 EXPORT_SYMBOL_GPL(sata_scr_read); 6718 EXPORT_SYMBOL_GPL(sata_scr_write); 6719 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6720 EXPORT_SYMBOL_GPL(ata_link_online); 6721 EXPORT_SYMBOL_GPL(ata_link_offline); 6722 #ifdef CONFIG_PM 6723 EXPORT_SYMBOL_GPL(ata_host_suspend); 6724 EXPORT_SYMBOL_GPL(ata_host_resume); 6725 #endif /* CONFIG_PM */ 6726 EXPORT_SYMBOL_GPL(ata_id_string); 6727 EXPORT_SYMBOL_GPL(ata_id_c_string); 6728 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6729 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6730 6731 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6732 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6733 EXPORT_SYMBOL_GPL(ata_timing_compute); 6734 EXPORT_SYMBOL_GPL(ata_timing_merge); 6735 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6736 6737 #ifdef CONFIG_PCI 6738 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6739 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6740 #ifdef CONFIG_PM 6741 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6742 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6743 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6744 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6745 #endif /* CONFIG_PM */ 6746 #endif /* CONFIG_PCI */ 6747 6748 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6749 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6750 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6751 EXPORT_SYMBOL_GPL(ata_port_desc); 6752 #ifdef CONFIG_PCI 6753 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6754 #endif /* CONFIG_PCI */ 6755 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6756 EXPORT_SYMBOL_GPL(ata_link_abort); 6757 EXPORT_SYMBOL_GPL(ata_port_abort); 6758 EXPORT_SYMBOL_GPL(ata_port_freeze); 6759 EXPORT_SYMBOL_GPL(sata_async_notification); 6760 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6761 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6762 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6763 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6764 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6765 EXPORT_SYMBOL_GPL(ata_do_eh); 6766 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6767 6768 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6769 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6770 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6771 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6772 EXPORT_SYMBOL_GPL(ata_cable_sata); 6773