1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <linux/glob.h> 63 #include <scsi/scsi.h> 64 #include <scsi/scsi_cmnd.h> 65 #include <scsi/scsi_host.h> 66 #include <linux/libata.h> 67 #include <asm/byteorder.h> 68 #include <linux/cdrom.h> 69 #include <linux/ratelimit.h> 70 #include <linux/pm_runtime.h> 71 #include <linux/platform_device.h> 72 73 #define CREATE_TRACE_POINTS 74 #include <trace/events/libata.h> 75 76 #include "libata.h" 77 #include "libata-transport.h" 78 79 /* debounce timing parameters in msecs { interval, duration, timeout } */ 80 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 81 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 82 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 83 84 const struct ata_port_operations ata_base_port_ops = { 85 .prereset = ata_std_prereset, 86 .postreset = ata_std_postreset, 87 .error_handler = ata_std_error_handler, 88 .sched_eh = ata_std_sched_eh, 89 .end_eh = ata_std_end_eh, 90 }; 91 92 const struct ata_port_operations sata_port_ops = { 93 .inherits = &ata_base_port_ops, 94 95 .qc_defer = ata_std_qc_defer, 96 .hardreset = sata_std_hardreset, 97 }; 98 99 static unsigned int ata_dev_init_params(struct ata_device *dev, 100 u16 heads, u16 sectors); 101 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 102 static void ata_dev_xfermask(struct ata_device *dev); 103 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 104 105 atomic_t ata_print_id = ATOMIC_INIT(0); 106 107 struct ata_force_param { 108 const char *name; 109 unsigned int cbl; 110 int spd_limit; 111 unsigned long xfer_mask; 112 unsigned int horkage_on; 113 unsigned int horkage_off; 114 unsigned int lflags; 115 }; 116 117 struct ata_force_ent { 118 int port; 119 int device; 120 struct ata_force_param param; 121 }; 122 123 static struct ata_force_ent *ata_force_tbl; 124 static int ata_force_tbl_size; 125 126 static char ata_force_param_buf[PAGE_SIZE] __initdata; 127 /* param_buf is thrown away after initialization, disallow read */ 128 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 129 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 130 131 static int atapi_enabled = 1; 132 module_param(atapi_enabled, int, 0444); 133 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 134 135 static int atapi_dmadir = 0; 136 module_param(atapi_dmadir, int, 0444); 137 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 138 139 int atapi_passthru16 = 1; 140 module_param(atapi_passthru16, int, 0444); 141 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 142 143 int libata_fua = 0; 144 module_param_named(fua, libata_fua, int, 0444); 145 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 146 147 static int ata_ignore_hpa; 148 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 149 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 150 151 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 152 module_param_named(dma, libata_dma_mask, int, 0444); 153 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 154 155 static int ata_probe_timeout; 156 module_param(ata_probe_timeout, int, 0444); 157 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 158 159 int libata_noacpi = 0; 160 module_param_named(noacpi, libata_noacpi, int, 0444); 161 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 162 163 int libata_allow_tpm = 0; 164 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 165 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 166 167 static int atapi_an; 168 module_param(atapi_an, int, 0444); 169 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 170 171 MODULE_AUTHOR("Jeff Garzik"); 172 MODULE_DESCRIPTION("Library module for ATA devices"); 173 MODULE_LICENSE("GPL"); 174 MODULE_VERSION(DRV_VERSION); 175 176 177 static bool ata_sstatus_online(u32 sstatus) 178 { 179 return (sstatus & 0xf) == 0x3; 180 } 181 182 /** 183 * ata_link_next - link iteration helper 184 * @link: the previous link, NULL to start 185 * @ap: ATA port containing links to iterate 186 * @mode: iteration mode, one of ATA_LITER_* 187 * 188 * LOCKING: 189 * Host lock or EH context. 190 * 191 * RETURNS: 192 * Pointer to the next link. 193 */ 194 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 195 enum ata_link_iter_mode mode) 196 { 197 BUG_ON(mode != ATA_LITER_EDGE && 198 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 199 200 /* NULL link indicates start of iteration */ 201 if (!link) 202 switch (mode) { 203 case ATA_LITER_EDGE: 204 case ATA_LITER_PMP_FIRST: 205 if (sata_pmp_attached(ap)) 206 return ap->pmp_link; 207 /* fall through */ 208 case ATA_LITER_HOST_FIRST: 209 return &ap->link; 210 } 211 212 /* we just iterated over the host link, what's next? */ 213 if (link == &ap->link) 214 switch (mode) { 215 case ATA_LITER_HOST_FIRST: 216 if (sata_pmp_attached(ap)) 217 return ap->pmp_link; 218 /* fall through */ 219 case ATA_LITER_PMP_FIRST: 220 if (unlikely(ap->slave_link)) 221 return ap->slave_link; 222 /* fall through */ 223 case ATA_LITER_EDGE: 224 return NULL; 225 } 226 227 /* slave_link excludes PMP */ 228 if (unlikely(link == ap->slave_link)) 229 return NULL; 230 231 /* we were over a PMP link */ 232 if (++link < ap->pmp_link + ap->nr_pmp_links) 233 return link; 234 235 if (mode == ATA_LITER_PMP_FIRST) 236 return &ap->link; 237 238 return NULL; 239 } 240 241 /** 242 * ata_dev_next - device iteration helper 243 * @dev: the previous device, NULL to start 244 * @link: ATA link containing devices to iterate 245 * @mode: iteration mode, one of ATA_DITER_* 246 * 247 * LOCKING: 248 * Host lock or EH context. 249 * 250 * RETURNS: 251 * Pointer to the next device. 252 */ 253 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 254 enum ata_dev_iter_mode mode) 255 { 256 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 257 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 258 259 /* NULL dev indicates start of iteration */ 260 if (!dev) 261 switch (mode) { 262 case ATA_DITER_ENABLED: 263 case ATA_DITER_ALL: 264 dev = link->device; 265 goto check; 266 case ATA_DITER_ENABLED_REVERSE: 267 case ATA_DITER_ALL_REVERSE: 268 dev = link->device + ata_link_max_devices(link) - 1; 269 goto check; 270 } 271 272 next: 273 /* move to the next one */ 274 switch (mode) { 275 case ATA_DITER_ENABLED: 276 case ATA_DITER_ALL: 277 if (++dev < link->device + ata_link_max_devices(link)) 278 goto check; 279 return NULL; 280 case ATA_DITER_ENABLED_REVERSE: 281 case ATA_DITER_ALL_REVERSE: 282 if (--dev >= link->device) 283 goto check; 284 return NULL; 285 } 286 287 check: 288 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 289 !ata_dev_enabled(dev)) 290 goto next; 291 return dev; 292 } 293 294 /** 295 * ata_dev_phys_link - find physical link for a device 296 * @dev: ATA device to look up physical link for 297 * 298 * Look up physical link which @dev is attached to. Note that 299 * this is different from @dev->link only when @dev is on slave 300 * link. For all other cases, it's the same as @dev->link. 301 * 302 * LOCKING: 303 * Don't care. 304 * 305 * RETURNS: 306 * Pointer to the found physical link. 307 */ 308 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 309 { 310 struct ata_port *ap = dev->link->ap; 311 312 if (!ap->slave_link) 313 return dev->link; 314 if (!dev->devno) 315 return &ap->link; 316 return ap->slave_link; 317 } 318 319 /** 320 * ata_force_cbl - force cable type according to libata.force 321 * @ap: ATA port of interest 322 * 323 * Force cable type according to libata.force and whine about it. 324 * The last entry which has matching port number is used, so it 325 * can be specified as part of device force parameters. For 326 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 327 * same effect. 328 * 329 * LOCKING: 330 * EH context. 331 */ 332 void ata_force_cbl(struct ata_port *ap) 333 { 334 int i; 335 336 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 337 const struct ata_force_ent *fe = &ata_force_tbl[i]; 338 339 if (fe->port != -1 && fe->port != ap->print_id) 340 continue; 341 342 if (fe->param.cbl == ATA_CBL_NONE) 343 continue; 344 345 ap->cbl = fe->param.cbl; 346 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 347 return; 348 } 349 } 350 351 /** 352 * ata_force_link_limits - force link limits according to libata.force 353 * @link: ATA link of interest 354 * 355 * Force link flags and SATA spd limit according to libata.force 356 * and whine about it. When only the port part is specified 357 * (e.g. 1:), the limit applies to all links connected to both 358 * the host link and all fan-out ports connected via PMP. If the 359 * device part is specified as 0 (e.g. 1.00:), it specifies the 360 * first fan-out link not the host link. Device number 15 always 361 * points to the host link whether PMP is attached or not. If the 362 * controller has slave link, device number 16 points to it. 363 * 364 * LOCKING: 365 * EH context. 366 */ 367 static void ata_force_link_limits(struct ata_link *link) 368 { 369 bool did_spd = false; 370 int linkno = link->pmp; 371 int i; 372 373 if (ata_is_host_link(link)) 374 linkno += 15; 375 376 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 377 const struct ata_force_ent *fe = &ata_force_tbl[i]; 378 379 if (fe->port != -1 && fe->port != link->ap->print_id) 380 continue; 381 382 if (fe->device != -1 && fe->device != linkno) 383 continue; 384 385 /* only honor the first spd limit */ 386 if (!did_spd && fe->param.spd_limit) { 387 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 388 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 389 fe->param.name); 390 did_spd = true; 391 } 392 393 /* let lflags stack */ 394 if (fe->param.lflags) { 395 link->flags |= fe->param.lflags; 396 ata_link_notice(link, 397 "FORCE: link flag 0x%x forced -> 0x%x\n", 398 fe->param.lflags, link->flags); 399 } 400 } 401 } 402 403 /** 404 * ata_force_xfermask - force xfermask according to libata.force 405 * @dev: ATA device of interest 406 * 407 * Force xfer_mask according to libata.force and whine about it. 408 * For consistency with link selection, device number 15 selects 409 * the first device connected to the host link. 410 * 411 * LOCKING: 412 * EH context. 413 */ 414 static void ata_force_xfermask(struct ata_device *dev) 415 { 416 int devno = dev->link->pmp + dev->devno; 417 int alt_devno = devno; 418 int i; 419 420 /* allow n.15/16 for devices attached to host port */ 421 if (ata_is_host_link(dev->link)) 422 alt_devno += 15; 423 424 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 425 const struct ata_force_ent *fe = &ata_force_tbl[i]; 426 unsigned long pio_mask, mwdma_mask, udma_mask; 427 428 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 429 continue; 430 431 if (fe->device != -1 && fe->device != devno && 432 fe->device != alt_devno) 433 continue; 434 435 if (!fe->param.xfer_mask) 436 continue; 437 438 ata_unpack_xfermask(fe->param.xfer_mask, 439 &pio_mask, &mwdma_mask, &udma_mask); 440 if (udma_mask) 441 dev->udma_mask = udma_mask; 442 else if (mwdma_mask) { 443 dev->udma_mask = 0; 444 dev->mwdma_mask = mwdma_mask; 445 } else { 446 dev->udma_mask = 0; 447 dev->mwdma_mask = 0; 448 dev->pio_mask = pio_mask; 449 } 450 451 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 452 fe->param.name); 453 return; 454 } 455 } 456 457 /** 458 * ata_force_horkage - force horkage according to libata.force 459 * @dev: ATA device of interest 460 * 461 * Force horkage according to libata.force and whine about it. 462 * For consistency with link selection, device number 15 selects 463 * the first device connected to the host link. 464 * 465 * LOCKING: 466 * EH context. 467 */ 468 static void ata_force_horkage(struct ata_device *dev) 469 { 470 int devno = dev->link->pmp + dev->devno; 471 int alt_devno = devno; 472 int i; 473 474 /* allow n.15/16 for devices attached to host port */ 475 if (ata_is_host_link(dev->link)) 476 alt_devno += 15; 477 478 for (i = 0; i < ata_force_tbl_size; i++) { 479 const struct ata_force_ent *fe = &ata_force_tbl[i]; 480 481 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 482 continue; 483 484 if (fe->device != -1 && fe->device != devno && 485 fe->device != alt_devno) 486 continue; 487 488 if (!(~dev->horkage & fe->param.horkage_on) && 489 !(dev->horkage & fe->param.horkage_off)) 490 continue; 491 492 dev->horkage |= fe->param.horkage_on; 493 dev->horkage &= ~fe->param.horkage_off; 494 495 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 496 fe->param.name); 497 } 498 } 499 500 /** 501 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 502 * @opcode: SCSI opcode 503 * 504 * Determine ATAPI command type from @opcode. 505 * 506 * LOCKING: 507 * None. 508 * 509 * RETURNS: 510 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 511 */ 512 int atapi_cmd_type(u8 opcode) 513 { 514 switch (opcode) { 515 case GPCMD_READ_10: 516 case GPCMD_READ_12: 517 return ATAPI_READ; 518 519 case GPCMD_WRITE_10: 520 case GPCMD_WRITE_12: 521 case GPCMD_WRITE_AND_VERIFY_10: 522 return ATAPI_WRITE; 523 524 case GPCMD_READ_CD: 525 case GPCMD_READ_CD_MSF: 526 return ATAPI_READ_CD; 527 528 case ATA_16: 529 case ATA_12: 530 if (atapi_passthru16) 531 return ATAPI_PASS_THRU; 532 /* fall thru */ 533 default: 534 return ATAPI_MISC; 535 } 536 } 537 538 /** 539 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 540 * @tf: Taskfile to convert 541 * @pmp: Port multiplier port 542 * @is_cmd: This FIS is for command 543 * @fis: Buffer into which data will output 544 * 545 * Converts a standard ATA taskfile to a Serial ATA 546 * FIS structure (Register - Host to Device). 547 * 548 * LOCKING: 549 * Inherited from caller. 550 */ 551 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 552 { 553 fis[0] = 0x27; /* Register - Host to Device FIS */ 554 fis[1] = pmp & 0xf; /* Port multiplier number*/ 555 if (is_cmd) 556 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 557 558 fis[2] = tf->command; 559 fis[3] = tf->feature; 560 561 fis[4] = tf->lbal; 562 fis[5] = tf->lbam; 563 fis[6] = tf->lbah; 564 fis[7] = tf->device; 565 566 fis[8] = tf->hob_lbal; 567 fis[9] = tf->hob_lbam; 568 fis[10] = tf->hob_lbah; 569 fis[11] = tf->hob_feature; 570 571 fis[12] = tf->nsect; 572 fis[13] = tf->hob_nsect; 573 fis[14] = 0; 574 fis[15] = tf->ctl; 575 576 fis[16] = tf->auxiliary & 0xff; 577 fis[17] = (tf->auxiliary >> 8) & 0xff; 578 fis[18] = (tf->auxiliary >> 16) & 0xff; 579 fis[19] = (tf->auxiliary >> 24) & 0xff; 580 } 581 582 /** 583 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 584 * @fis: Buffer from which data will be input 585 * @tf: Taskfile to output 586 * 587 * Converts a serial ATA FIS structure to a standard ATA taskfile. 588 * 589 * LOCKING: 590 * Inherited from caller. 591 */ 592 593 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 594 { 595 tf->command = fis[2]; /* status */ 596 tf->feature = fis[3]; /* error */ 597 598 tf->lbal = fis[4]; 599 tf->lbam = fis[5]; 600 tf->lbah = fis[6]; 601 tf->device = fis[7]; 602 603 tf->hob_lbal = fis[8]; 604 tf->hob_lbam = fis[9]; 605 tf->hob_lbah = fis[10]; 606 607 tf->nsect = fis[12]; 608 tf->hob_nsect = fis[13]; 609 } 610 611 static const u8 ata_rw_cmds[] = { 612 /* pio multi */ 613 ATA_CMD_READ_MULTI, 614 ATA_CMD_WRITE_MULTI, 615 ATA_CMD_READ_MULTI_EXT, 616 ATA_CMD_WRITE_MULTI_EXT, 617 0, 618 0, 619 0, 620 ATA_CMD_WRITE_MULTI_FUA_EXT, 621 /* pio */ 622 ATA_CMD_PIO_READ, 623 ATA_CMD_PIO_WRITE, 624 ATA_CMD_PIO_READ_EXT, 625 ATA_CMD_PIO_WRITE_EXT, 626 0, 627 0, 628 0, 629 0, 630 /* dma */ 631 ATA_CMD_READ, 632 ATA_CMD_WRITE, 633 ATA_CMD_READ_EXT, 634 ATA_CMD_WRITE_EXT, 635 0, 636 0, 637 0, 638 ATA_CMD_WRITE_FUA_EXT 639 }; 640 641 /** 642 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 643 * @tf: command to examine and configure 644 * @dev: device tf belongs to 645 * 646 * Examine the device configuration and tf->flags to calculate 647 * the proper read/write commands and protocol to use. 648 * 649 * LOCKING: 650 * caller. 651 */ 652 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 653 { 654 u8 cmd; 655 656 int index, fua, lba48, write; 657 658 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 659 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 660 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 661 662 if (dev->flags & ATA_DFLAG_PIO) { 663 tf->protocol = ATA_PROT_PIO; 664 index = dev->multi_count ? 0 : 8; 665 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 666 /* Unable to use DMA due to host limitation */ 667 tf->protocol = ATA_PROT_PIO; 668 index = dev->multi_count ? 0 : 8; 669 } else { 670 tf->protocol = ATA_PROT_DMA; 671 index = 16; 672 } 673 674 cmd = ata_rw_cmds[index + fua + lba48 + write]; 675 if (cmd) { 676 tf->command = cmd; 677 return 0; 678 } 679 return -1; 680 } 681 682 /** 683 * ata_tf_read_block - Read block address from ATA taskfile 684 * @tf: ATA taskfile of interest 685 * @dev: ATA device @tf belongs to 686 * 687 * LOCKING: 688 * None. 689 * 690 * Read block address from @tf. This function can handle all 691 * three address formats - LBA, LBA48 and CHS. tf->protocol and 692 * flags select the address format to use. 693 * 694 * RETURNS: 695 * Block address read from @tf. 696 */ 697 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 698 { 699 u64 block = 0; 700 701 if (!dev || tf->flags & ATA_TFLAG_LBA) { 702 if (tf->flags & ATA_TFLAG_LBA48) { 703 block |= (u64)tf->hob_lbah << 40; 704 block |= (u64)tf->hob_lbam << 32; 705 block |= (u64)tf->hob_lbal << 24; 706 } else 707 block |= (tf->device & 0xf) << 24; 708 709 block |= tf->lbah << 16; 710 block |= tf->lbam << 8; 711 block |= tf->lbal; 712 } else { 713 u32 cyl, head, sect; 714 715 cyl = tf->lbam | (tf->lbah << 8); 716 head = tf->device & 0xf; 717 sect = tf->lbal; 718 719 if (!sect) { 720 ata_dev_warn(dev, 721 "device reported invalid CHS sector 0\n"); 722 sect = 1; /* oh well */ 723 } 724 725 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 726 } 727 728 return block; 729 } 730 731 /** 732 * ata_build_rw_tf - Build ATA taskfile for given read/write request 733 * @tf: Target ATA taskfile 734 * @dev: ATA device @tf belongs to 735 * @block: Block address 736 * @n_block: Number of blocks 737 * @tf_flags: RW/FUA etc... 738 * @tag: tag 739 * 740 * LOCKING: 741 * None. 742 * 743 * Build ATA taskfile @tf for read/write request described by 744 * @block, @n_block, @tf_flags and @tag on @dev. 745 * 746 * RETURNS: 747 * 748 * 0 on success, -ERANGE if the request is too large for @dev, 749 * -EINVAL if the request is invalid. 750 */ 751 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 752 u64 block, u32 n_block, unsigned int tf_flags, 753 unsigned int tag) 754 { 755 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 756 tf->flags |= tf_flags; 757 758 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 759 /* yay, NCQ */ 760 if (!lba_48_ok(block, n_block)) 761 return -ERANGE; 762 763 tf->protocol = ATA_PROT_NCQ; 764 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 765 766 if (tf->flags & ATA_TFLAG_WRITE) 767 tf->command = ATA_CMD_FPDMA_WRITE; 768 else 769 tf->command = ATA_CMD_FPDMA_READ; 770 771 tf->nsect = tag << 3; 772 tf->hob_feature = (n_block >> 8) & 0xff; 773 tf->feature = n_block & 0xff; 774 775 tf->hob_lbah = (block >> 40) & 0xff; 776 tf->hob_lbam = (block >> 32) & 0xff; 777 tf->hob_lbal = (block >> 24) & 0xff; 778 tf->lbah = (block >> 16) & 0xff; 779 tf->lbam = (block >> 8) & 0xff; 780 tf->lbal = block & 0xff; 781 782 tf->device = ATA_LBA; 783 if (tf->flags & ATA_TFLAG_FUA) 784 tf->device |= 1 << 7; 785 } else if (dev->flags & ATA_DFLAG_LBA) { 786 tf->flags |= ATA_TFLAG_LBA; 787 788 if (lba_28_ok(block, n_block)) { 789 /* use LBA28 */ 790 tf->device |= (block >> 24) & 0xf; 791 } else if (lba_48_ok(block, n_block)) { 792 if (!(dev->flags & ATA_DFLAG_LBA48)) 793 return -ERANGE; 794 795 /* use LBA48 */ 796 tf->flags |= ATA_TFLAG_LBA48; 797 798 tf->hob_nsect = (n_block >> 8) & 0xff; 799 800 tf->hob_lbah = (block >> 40) & 0xff; 801 tf->hob_lbam = (block >> 32) & 0xff; 802 tf->hob_lbal = (block >> 24) & 0xff; 803 } else 804 /* request too large even for LBA48 */ 805 return -ERANGE; 806 807 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 808 return -EINVAL; 809 810 tf->nsect = n_block & 0xff; 811 812 tf->lbah = (block >> 16) & 0xff; 813 tf->lbam = (block >> 8) & 0xff; 814 tf->lbal = block & 0xff; 815 816 tf->device |= ATA_LBA; 817 } else { 818 /* CHS */ 819 u32 sect, head, cyl, track; 820 821 /* The request -may- be too large for CHS addressing. */ 822 if (!lba_28_ok(block, n_block)) 823 return -ERANGE; 824 825 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 826 return -EINVAL; 827 828 /* Convert LBA to CHS */ 829 track = (u32)block / dev->sectors; 830 cyl = track / dev->heads; 831 head = track % dev->heads; 832 sect = (u32)block % dev->sectors + 1; 833 834 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 835 (u32)block, track, cyl, head, sect); 836 837 /* Check whether the converted CHS can fit. 838 Cylinder: 0-65535 839 Head: 0-15 840 Sector: 1-255*/ 841 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 842 return -ERANGE; 843 844 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 845 tf->lbal = sect; 846 tf->lbam = cyl; 847 tf->lbah = cyl >> 8; 848 tf->device |= head; 849 } 850 851 return 0; 852 } 853 854 /** 855 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 856 * @pio_mask: pio_mask 857 * @mwdma_mask: mwdma_mask 858 * @udma_mask: udma_mask 859 * 860 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 861 * unsigned int xfer_mask. 862 * 863 * LOCKING: 864 * None. 865 * 866 * RETURNS: 867 * Packed xfer_mask. 868 */ 869 unsigned long ata_pack_xfermask(unsigned long pio_mask, 870 unsigned long mwdma_mask, 871 unsigned long udma_mask) 872 { 873 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 874 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 875 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 876 } 877 878 /** 879 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 880 * @xfer_mask: xfer_mask to unpack 881 * @pio_mask: resulting pio_mask 882 * @mwdma_mask: resulting mwdma_mask 883 * @udma_mask: resulting udma_mask 884 * 885 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 886 * Any NULL distination masks will be ignored. 887 */ 888 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 889 unsigned long *mwdma_mask, unsigned long *udma_mask) 890 { 891 if (pio_mask) 892 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 893 if (mwdma_mask) 894 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 895 if (udma_mask) 896 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 897 } 898 899 static const struct ata_xfer_ent { 900 int shift, bits; 901 u8 base; 902 } ata_xfer_tbl[] = { 903 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 904 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 905 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 906 { -1, }, 907 }; 908 909 /** 910 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 911 * @xfer_mask: xfer_mask of interest 912 * 913 * Return matching XFER_* value for @xfer_mask. Only the highest 914 * bit of @xfer_mask is considered. 915 * 916 * LOCKING: 917 * None. 918 * 919 * RETURNS: 920 * Matching XFER_* value, 0xff if no match found. 921 */ 922 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 923 { 924 int highbit = fls(xfer_mask) - 1; 925 const struct ata_xfer_ent *ent; 926 927 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 928 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 929 return ent->base + highbit - ent->shift; 930 return 0xff; 931 } 932 933 /** 934 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 935 * @xfer_mode: XFER_* of interest 936 * 937 * Return matching xfer_mask for @xfer_mode. 938 * 939 * LOCKING: 940 * None. 941 * 942 * RETURNS: 943 * Matching xfer_mask, 0 if no match found. 944 */ 945 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 946 { 947 const struct ata_xfer_ent *ent; 948 949 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 950 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 951 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 952 & ~((1 << ent->shift) - 1); 953 return 0; 954 } 955 956 /** 957 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 958 * @xfer_mode: XFER_* of interest 959 * 960 * Return matching xfer_shift for @xfer_mode. 961 * 962 * LOCKING: 963 * None. 964 * 965 * RETURNS: 966 * Matching xfer_shift, -1 if no match found. 967 */ 968 int ata_xfer_mode2shift(unsigned long xfer_mode) 969 { 970 const struct ata_xfer_ent *ent; 971 972 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 973 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 974 return ent->shift; 975 return -1; 976 } 977 978 /** 979 * ata_mode_string - convert xfer_mask to string 980 * @xfer_mask: mask of bits supported; only highest bit counts. 981 * 982 * Determine string which represents the highest speed 983 * (highest bit in @modemask). 984 * 985 * LOCKING: 986 * None. 987 * 988 * RETURNS: 989 * Constant C string representing highest speed listed in 990 * @mode_mask, or the constant C string "<n/a>". 991 */ 992 const char *ata_mode_string(unsigned long xfer_mask) 993 { 994 static const char * const xfer_mode_str[] = { 995 "PIO0", 996 "PIO1", 997 "PIO2", 998 "PIO3", 999 "PIO4", 1000 "PIO5", 1001 "PIO6", 1002 "MWDMA0", 1003 "MWDMA1", 1004 "MWDMA2", 1005 "MWDMA3", 1006 "MWDMA4", 1007 "UDMA/16", 1008 "UDMA/25", 1009 "UDMA/33", 1010 "UDMA/44", 1011 "UDMA/66", 1012 "UDMA/100", 1013 "UDMA/133", 1014 "UDMA7", 1015 }; 1016 int highbit; 1017 1018 highbit = fls(xfer_mask) - 1; 1019 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1020 return xfer_mode_str[highbit]; 1021 return "<n/a>"; 1022 } 1023 1024 const char *sata_spd_string(unsigned int spd) 1025 { 1026 static const char * const spd_str[] = { 1027 "1.5 Gbps", 1028 "3.0 Gbps", 1029 "6.0 Gbps", 1030 }; 1031 1032 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1033 return "<unknown>"; 1034 return spd_str[spd - 1]; 1035 } 1036 1037 /** 1038 * ata_dev_classify - determine device type based on ATA-spec signature 1039 * @tf: ATA taskfile register set for device to be identified 1040 * 1041 * Determine from taskfile register contents whether a device is 1042 * ATA or ATAPI, as per "Signature and persistence" section 1043 * of ATA/PI spec (volume 1, sect 5.14). 1044 * 1045 * LOCKING: 1046 * None. 1047 * 1048 * RETURNS: 1049 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1050 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1051 */ 1052 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1053 { 1054 /* Apple's open source Darwin code hints that some devices only 1055 * put a proper signature into the LBA mid/high registers, 1056 * So, we only check those. It's sufficient for uniqueness. 1057 * 1058 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1059 * signatures for ATA and ATAPI devices attached on SerialATA, 1060 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1061 * spec has never mentioned about using different signatures 1062 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1063 * Multiplier specification began to use 0x69/0x96 to identify 1064 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1065 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1066 * 0x69/0x96 shortly and described them as reserved for 1067 * SerialATA. 1068 * 1069 * We follow the current spec and consider that 0x69/0x96 1070 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1071 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1072 * SEMB signature. This is worked around in 1073 * ata_dev_read_id(). 1074 */ 1075 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1076 DPRINTK("found ATA device by sig\n"); 1077 return ATA_DEV_ATA; 1078 } 1079 1080 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1081 DPRINTK("found ATAPI device by sig\n"); 1082 return ATA_DEV_ATAPI; 1083 } 1084 1085 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1086 DPRINTK("found PMP device by sig\n"); 1087 return ATA_DEV_PMP; 1088 } 1089 1090 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1091 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1092 return ATA_DEV_SEMB; 1093 } 1094 1095 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1096 DPRINTK("found ZAC device by sig\n"); 1097 return ATA_DEV_ZAC; 1098 } 1099 1100 DPRINTK("unknown device\n"); 1101 return ATA_DEV_UNKNOWN; 1102 } 1103 1104 /** 1105 * ata_id_string - Convert IDENTIFY DEVICE page into string 1106 * @id: IDENTIFY DEVICE results we will examine 1107 * @s: string into which data is output 1108 * @ofs: offset into identify device page 1109 * @len: length of string to return. must be an even number. 1110 * 1111 * The strings in the IDENTIFY DEVICE page are broken up into 1112 * 16-bit chunks. Run through the string, and output each 1113 * 8-bit chunk linearly, regardless of platform. 1114 * 1115 * LOCKING: 1116 * caller. 1117 */ 1118 1119 void ata_id_string(const u16 *id, unsigned char *s, 1120 unsigned int ofs, unsigned int len) 1121 { 1122 unsigned int c; 1123 1124 BUG_ON(len & 1); 1125 1126 while (len > 0) { 1127 c = id[ofs] >> 8; 1128 *s = c; 1129 s++; 1130 1131 c = id[ofs] & 0xff; 1132 *s = c; 1133 s++; 1134 1135 ofs++; 1136 len -= 2; 1137 } 1138 } 1139 1140 /** 1141 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1142 * @id: IDENTIFY DEVICE results we will examine 1143 * @s: string into which data is output 1144 * @ofs: offset into identify device page 1145 * @len: length of string to return. must be an odd number. 1146 * 1147 * This function is identical to ata_id_string except that it 1148 * trims trailing spaces and terminates the resulting string with 1149 * null. @len must be actual maximum length (even number) + 1. 1150 * 1151 * LOCKING: 1152 * caller. 1153 */ 1154 void ata_id_c_string(const u16 *id, unsigned char *s, 1155 unsigned int ofs, unsigned int len) 1156 { 1157 unsigned char *p; 1158 1159 ata_id_string(id, s, ofs, len - 1); 1160 1161 p = s + strnlen(s, len - 1); 1162 while (p > s && p[-1] == ' ') 1163 p--; 1164 *p = '\0'; 1165 } 1166 1167 static u64 ata_id_n_sectors(const u16 *id) 1168 { 1169 if (ata_id_has_lba(id)) { 1170 if (ata_id_has_lba48(id)) 1171 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1172 else 1173 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1174 } else { 1175 if (ata_id_current_chs_valid(id)) 1176 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1177 id[ATA_ID_CUR_SECTORS]; 1178 else 1179 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1180 id[ATA_ID_SECTORS]; 1181 } 1182 } 1183 1184 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1185 { 1186 u64 sectors = 0; 1187 1188 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1189 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1190 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1191 sectors |= (tf->lbah & 0xff) << 16; 1192 sectors |= (tf->lbam & 0xff) << 8; 1193 sectors |= (tf->lbal & 0xff); 1194 1195 return sectors; 1196 } 1197 1198 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1199 { 1200 u64 sectors = 0; 1201 1202 sectors |= (tf->device & 0x0f) << 24; 1203 sectors |= (tf->lbah & 0xff) << 16; 1204 sectors |= (tf->lbam & 0xff) << 8; 1205 sectors |= (tf->lbal & 0xff); 1206 1207 return sectors; 1208 } 1209 1210 /** 1211 * ata_read_native_max_address - Read native max address 1212 * @dev: target device 1213 * @max_sectors: out parameter for the result native max address 1214 * 1215 * Perform an LBA48 or LBA28 native size query upon the device in 1216 * question. 1217 * 1218 * RETURNS: 1219 * 0 on success, -EACCES if command is aborted by the drive. 1220 * -EIO on other errors. 1221 */ 1222 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1223 { 1224 unsigned int err_mask; 1225 struct ata_taskfile tf; 1226 int lba48 = ata_id_has_lba48(dev->id); 1227 1228 ata_tf_init(dev, &tf); 1229 1230 /* always clear all address registers */ 1231 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1232 1233 if (lba48) { 1234 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1235 tf.flags |= ATA_TFLAG_LBA48; 1236 } else 1237 tf.command = ATA_CMD_READ_NATIVE_MAX; 1238 1239 tf.protocol |= ATA_PROT_NODATA; 1240 tf.device |= ATA_LBA; 1241 1242 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1243 if (err_mask) { 1244 ata_dev_warn(dev, 1245 "failed to read native max address (err_mask=0x%x)\n", 1246 err_mask); 1247 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1248 return -EACCES; 1249 return -EIO; 1250 } 1251 1252 if (lba48) 1253 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1254 else 1255 *max_sectors = ata_tf_to_lba(&tf) + 1; 1256 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1257 (*max_sectors)--; 1258 return 0; 1259 } 1260 1261 /** 1262 * ata_set_max_sectors - Set max sectors 1263 * @dev: target device 1264 * @new_sectors: new max sectors value to set for the device 1265 * 1266 * Set max sectors of @dev to @new_sectors. 1267 * 1268 * RETURNS: 1269 * 0 on success, -EACCES if command is aborted or denied (due to 1270 * previous non-volatile SET_MAX) by the drive. -EIO on other 1271 * errors. 1272 */ 1273 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1274 { 1275 unsigned int err_mask; 1276 struct ata_taskfile tf; 1277 int lba48 = ata_id_has_lba48(dev->id); 1278 1279 new_sectors--; 1280 1281 ata_tf_init(dev, &tf); 1282 1283 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1284 1285 if (lba48) { 1286 tf.command = ATA_CMD_SET_MAX_EXT; 1287 tf.flags |= ATA_TFLAG_LBA48; 1288 1289 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1290 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1291 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1292 } else { 1293 tf.command = ATA_CMD_SET_MAX; 1294 1295 tf.device |= (new_sectors >> 24) & 0xf; 1296 } 1297 1298 tf.protocol |= ATA_PROT_NODATA; 1299 tf.device |= ATA_LBA; 1300 1301 tf.lbal = (new_sectors >> 0) & 0xff; 1302 tf.lbam = (new_sectors >> 8) & 0xff; 1303 tf.lbah = (new_sectors >> 16) & 0xff; 1304 1305 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1306 if (err_mask) { 1307 ata_dev_warn(dev, 1308 "failed to set max address (err_mask=0x%x)\n", 1309 err_mask); 1310 if (err_mask == AC_ERR_DEV && 1311 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1312 return -EACCES; 1313 return -EIO; 1314 } 1315 1316 return 0; 1317 } 1318 1319 /** 1320 * ata_hpa_resize - Resize a device with an HPA set 1321 * @dev: Device to resize 1322 * 1323 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1324 * it if required to the full size of the media. The caller must check 1325 * the drive has the HPA feature set enabled. 1326 * 1327 * RETURNS: 1328 * 0 on success, -errno on failure. 1329 */ 1330 static int ata_hpa_resize(struct ata_device *dev) 1331 { 1332 struct ata_eh_context *ehc = &dev->link->eh_context; 1333 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1334 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1335 u64 sectors = ata_id_n_sectors(dev->id); 1336 u64 native_sectors; 1337 int rc; 1338 1339 /* do we need to do it? */ 1340 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1341 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1342 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1343 return 0; 1344 1345 /* read native max address */ 1346 rc = ata_read_native_max_address(dev, &native_sectors); 1347 if (rc) { 1348 /* If device aborted the command or HPA isn't going to 1349 * be unlocked, skip HPA resizing. 1350 */ 1351 if (rc == -EACCES || !unlock_hpa) { 1352 ata_dev_warn(dev, 1353 "HPA support seems broken, skipping HPA handling\n"); 1354 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1355 1356 /* we can continue if device aborted the command */ 1357 if (rc == -EACCES) 1358 rc = 0; 1359 } 1360 1361 return rc; 1362 } 1363 dev->n_native_sectors = native_sectors; 1364 1365 /* nothing to do? */ 1366 if (native_sectors <= sectors || !unlock_hpa) { 1367 if (!print_info || native_sectors == sectors) 1368 return 0; 1369 1370 if (native_sectors > sectors) 1371 ata_dev_info(dev, 1372 "HPA detected: current %llu, native %llu\n", 1373 (unsigned long long)sectors, 1374 (unsigned long long)native_sectors); 1375 else if (native_sectors < sectors) 1376 ata_dev_warn(dev, 1377 "native sectors (%llu) is smaller than sectors (%llu)\n", 1378 (unsigned long long)native_sectors, 1379 (unsigned long long)sectors); 1380 return 0; 1381 } 1382 1383 /* let's unlock HPA */ 1384 rc = ata_set_max_sectors(dev, native_sectors); 1385 if (rc == -EACCES) { 1386 /* if device aborted the command, skip HPA resizing */ 1387 ata_dev_warn(dev, 1388 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1389 (unsigned long long)sectors, 1390 (unsigned long long)native_sectors); 1391 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1392 return 0; 1393 } else if (rc) 1394 return rc; 1395 1396 /* re-read IDENTIFY data */ 1397 rc = ata_dev_reread_id(dev, 0); 1398 if (rc) { 1399 ata_dev_err(dev, 1400 "failed to re-read IDENTIFY data after HPA resizing\n"); 1401 return rc; 1402 } 1403 1404 if (print_info) { 1405 u64 new_sectors = ata_id_n_sectors(dev->id); 1406 ata_dev_info(dev, 1407 "HPA unlocked: %llu -> %llu, native %llu\n", 1408 (unsigned long long)sectors, 1409 (unsigned long long)new_sectors, 1410 (unsigned long long)native_sectors); 1411 } 1412 1413 return 0; 1414 } 1415 1416 /** 1417 * ata_dump_id - IDENTIFY DEVICE info debugging output 1418 * @id: IDENTIFY DEVICE page to dump 1419 * 1420 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1421 * page. 1422 * 1423 * LOCKING: 1424 * caller. 1425 */ 1426 1427 static inline void ata_dump_id(const u16 *id) 1428 { 1429 DPRINTK("49==0x%04x " 1430 "53==0x%04x " 1431 "63==0x%04x " 1432 "64==0x%04x " 1433 "75==0x%04x \n", 1434 id[49], 1435 id[53], 1436 id[63], 1437 id[64], 1438 id[75]); 1439 DPRINTK("80==0x%04x " 1440 "81==0x%04x " 1441 "82==0x%04x " 1442 "83==0x%04x " 1443 "84==0x%04x \n", 1444 id[80], 1445 id[81], 1446 id[82], 1447 id[83], 1448 id[84]); 1449 DPRINTK("88==0x%04x " 1450 "93==0x%04x\n", 1451 id[88], 1452 id[93]); 1453 } 1454 1455 /** 1456 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1457 * @id: IDENTIFY data to compute xfer mask from 1458 * 1459 * Compute the xfermask for this device. This is not as trivial 1460 * as it seems if we must consider early devices correctly. 1461 * 1462 * FIXME: pre IDE drive timing (do we care ?). 1463 * 1464 * LOCKING: 1465 * None. 1466 * 1467 * RETURNS: 1468 * Computed xfermask 1469 */ 1470 unsigned long ata_id_xfermask(const u16 *id) 1471 { 1472 unsigned long pio_mask, mwdma_mask, udma_mask; 1473 1474 /* Usual case. Word 53 indicates word 64 is valid */ 1475 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1476 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1477 pio_mask <<= 3; 1478 pio_mask |= 0x7; 1479 } else { 1480 /* If word 64 isn't valid then Word 51 high byte holds 1481 * the PIO timing number for the maximum. Turn it into 1482 * a mask. 1483 */ 1484 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1485 if (mode < 5) /* Valid PIO range */ 1486 pio_mask = (2 << mode) - 1; 1487 else 1488 pio_mask = 1; 1489 1490 /* But wait.. there's more. Design your standards by 1491 * committee and you too can get a free iordy field to 1492 * process. However its the speeds not the modes that 1493 * are supported... Note drivers using the timing API 1494 * will get this right anyway 1495 */ 1496 } 1497 1498 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1499 1500 if (ata_id_is_cfa(id)) { 1501 /* 1502 * Process compact flash extended modes 1503 */ 1504 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1505 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1506 1507 if (pio) 1508 pio_mask |= (1 << 5); 1509 if (pio > 1) 1510 pio_mask |= (1 << 6); 1511 if (dma) 1512 mwdma_mask |= (1 << 3); 1513 if (dma > 1) 1514 mwdma_mask |= (1 << 4); 1515 } 1516 1517 udma_mask = 0; 1518 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1519 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1520 1521 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1522 } 1523 1524 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1525 { 1526 struct completion *waiting = qc->private_data; 1527 1528 complete(waiting); 1529 } 1530 1531 /** 1532 * ata_exec_internal_sg - execute libata internal command 1533 * @dev: Device to which the command is sent 1534 * @tf: Taskfile registers for the command and the result 1535 * @cdb: CDB for packet command 1536 * @dma_dir: Data transfer direction of the command 1537 * @sgl: sg list for the data buffer of the command 1538 * @n_elem: Number of sg entries 1539 * @timeout: Timeout in msecs (0 for default) 1540 * 1541 * Executes libata internal command with timeout. @tf contains 1542 * command on entry and result on return. Timeout and error 1543 * conditions are reported via return value. No recovery action 1544 * is taken after a command times out. It's caller's duty to 1545 * clean up after timeout. 1546 * 1547 * LOCKING: 1548 * None. Should be called with kernel context, might sleep. 1549 * 1550 * RETURNS: 1551 * Zero on success, AC_ERR_* mask on failure 1552 */ 1553 unsigned ata_exec_internal_sg(struct ata_device *dev, 1554 struct ata_taskfile *tf, const u8 *cdb, 1555 int dma_dir, struct scatterlist *sgl, 1556 unsigned int n_elem, unsigned long timeout) 1557 { 1558 struct ata_link *link = dev->link; 1559 struct ata_port *ap = link->ap; 1560 u8 command = tf->command; 1561 int auto_timeout = 0; 1562 struct ata_queued_cmd *qc; 1563 unsigned int tag, preempted_tag; 1564 u32 preempted_sactive, preempted_qc_active; 1565 int preempted_nr_active_links; 1566 DECLARE_COMPLETION_ONSTACK(wait); 1567 unsigned long flags; 1568 unsigned int err_mask; 1569 int rc; 1570 1571 spin_lock_irqsave(ap->lock, flags); 1572 1573 /* no internal command while frozen */ 1574 if (ap->pflags & ATA_PFLAG_FROZEN) { 1575 spin_unlock_irqrestore(ap->lock, flags); 1576 return AC_ERR_SYSTEM; 1577 } 1578 1579 /* initialize internal qc */ 1580 1581 /* XXX: Tag 0 is used for drivers with legacy EH as some 1582 * drivers choke if any other tag is given. This breaks 1583 * ata_tag_internal() test for those drivers. Don't use new 1584 * EH stuff without converting to it. 1585 */ 1586 if (ap->ops->error_handler) 1587 tag = ATA_TAG_INTERNAL; 1588 else 1589 tag = 0; 1590 1591 qc = __ata_qc_from_tag(ap, tag); 1592 1593 qc->tag = tag; 1594 qc->scsicmd = NULL; 1595 qc->ap = ap; 1596 qc->dev = dev; 1597 ata_qc_reinit(qc); 1598 1599 preempted_tag = link->active_tag; 1600 preempted_sactive = link->sactive; 1601 preempted_qc_active = ap->qc_active; 1602 preempted_nr_active_links = ap->nr_active_links; 1603 link->active_tag = ATA_TAG_POISON; 1604 link->sactive = 0; 1605 ap->qc_active = 0; 1606 ap->nr_active_links = 0; 1607 1608 /* prepare & issue qc */ 1609 qc->tf = *tf; 1610 if (cdb) 1611 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1612 1613 /* some SATA bridges need us to indicate data xfer direction */ 1614 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1615 dma_dir == DMA_FROM_DEVICE) 1616 qc->tf.feature |= ATAPI_DMADIR; 1617 1618 qc->flags |= ATA_QCFLAG_RESULT_TF; 1619 qc->dma_dir = dma_dir; 1620 if (dma_dir != DMA_NONE) { 1621 unsigned int i, buflen = 0; 1622 struct scatterlist *sg; 1623 1624 for_each_sg(sgl, sg, n_elem, i) 1625 buflen += sg->length; 1626 1627 ata_sg_init(qc, sgl, n_elem); 1628 qc->nbytes = buflen; 1629 } 1630 1631 qc->private_data = &wait; 1632 qc->complete_fn = ata_qc_complete_internal; 1633 1634 ata_qc_issue(qc); 1635 1636 spin_unlock_irqrestore(ap->lock, flags); 1637 1638 if (!timeout) { 1639 if (ata_probe_timeout) 1640 timeout = ata_probe_timeout * 1000; 1641 else { 1642 timeout = ata_internal_cmd_timeout(dev, command); 1643 auto_timeout = 1; 1644 } 1645 } 1646 1647 if (ap->ops->error_handler) 1648 ata_eh_release(ap); 1649 1650 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1651 1652 if (ap->ops->error_handler) 1653 ata_eh_acquire(ap); 1654 1655 ata_sff_flush_pio_task(ap); 1656 1657 if (!rc) { 1658 spin_lock_irqsave(ap->lock, flags); 1659 1660 /* We're racing with irq here. If we lose, the 1661 * following test prevents us from completing the qc 1662 * twice. If we win, the port is frozen and will be 1663 * cleaned up by ->post_internal_cmd(). 1664 */ 1665 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1666 qc->err_mask |= AC_ERR_TIMEOUT; 1667 1668 if (ap->ops->error_handler) 1669 ata_port_freeze(ap); 1670 else 1671 ata_qc_complete(qc); 1672 1673 if (ata_msg_warn(ap)) 1674 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1675 command); 1676 } 1677 1678 spin_unlock_irqrestore(ap->lock, flags); 1679 } 1680 1681 /* do post_internal_cmd */ 1682 if (ap->ops->post_internal_cmd) 1683 ap->ops->post_internal_cmd(qc); 1684 1685 /* perform minimal error analysis */ 1686 if (qc->flags & ATA_QCFLAG_FAILED) { 1687 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1688 qc->err_mask |= AC_ERR_DEV; 1689 1690 if (!qc->err_mask) 1691 qc->err_mask |= AC_ERR_OTHER; 1692 1693 if (qc->err_mask & ~AC_ERR_OTHER) 1694 qc->err_mask &= ~AC_ERR_OTHER; 1695 } 1696 1697 /* finish up */ 1698 spin_lock_irqsave(ap->lock, flags); 1699 1700 *tf = qc->result_tf; 1701 err_mask = qc->err_mask; 1702 1703 ata_qc_free(qc); 1704 link->active_tag = preempted_tag; 1705 link->sactive = preempted_sactive; 1706 ap->qc_active = preempted_qc_active; 1707 ap->nr_active_links = preempted_nr_active_links; 1708 1709 spin_unlock_irqrestore(ap->lock, flags); 1710 1711 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1712 ata_internal_cmd_timed_out(dev, command); 1713 1714 return err_mask; 1715 } 1716 1717 /** 1718 * ata_exec_internal - execute libata internal command 1719 * @dev: Device to which the command is sent 1720 * @tf: Taskfile registers for the command and the result 1721 * @cdb: CDB for packet command 1722 * @dma_dir: Data transfer direction of the command 1723 * @buf: Data buffer of the command 1724 * @buflen: Length of data buffer 1725 * @timeout: Timeout in msecs (0 for default) 1726 * 1727 * Wrapper around ata_exec_internal_sg() which takes simple 1728 * buffer instead of sg list. 1729 * 1730 * LOCKING: 1731 * None. Should be called with kernel context, might sleep. 1732 * 1733 * RETURNS: 1734 * Zero on success, AC_ERR_* mask on failure 1735 */ 1736 unsigned ata_exec_internal(struct ata_device *dev, 1737 struct ata_taskfile *tf, const u8 *cdb, 1738 int dma_dir, void *buf, unsigned int buflen, 1739 unsigned long timeout) 1740 { 1741 struct scatterlist *psg = NULL, sg; 1742 unsigned int n_elem = 0; 1743 1744 if (dma_dir != DMA_NONE) { 1745 WARN_ON(!buf); 1746 sg_init_one(&sg, buf, buflen); 1747 psg = &sg; 1748 n_elem++; 1749 } 1750 1751 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1752 timeout); 1753 } 1754 1755 /** 1756 * ata_pio_need_iordy - check if iordy needed 1757 * @adev: ATA device 1758 * 1759 * Check if the current speed of the device requires IORDY. Used 1760 * by various controllers for chip configuration. 1761 */ 1762 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1763 { 1764 /* Don't set IORDY if we're preparing for reset. IORDY may 1765 * lead to controller lock up on certain controllers if the 1766 * port is not occupied. See bko#11703 for details. 1767 */ 1768 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1769 return 0; 1770 /* Controller doesn't support IORDY. Probably a pointless 1771 * check as the caller should know this. 1772 */ 1773 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1774 return 0; 1775 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1776 if (ata_id_is_cfa(adev->id) 1777 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1778 return 0; 1779 /* PIO3 and higher it is mandatory */ 1780 if (adev->pio_mode > XFER_PIO_2) 1781 return 1; 1782 /* We turn it on when possible */ 1783 if (ata_id_has_iordy(adev->id)) 1784 return 1; 1785 return 0; 1786 } 1787 1788 /** 1789 * ata_pio_mask_no_iordy - Return the non IORDY mask 1790 * @adev: ATA device 1791 * 1792 * Compute the highest mode possible if we are not using iordy. Return 1793 * -1 if no iordy mode is available. 1794 */ 1795 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1796 { 1797 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1798 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1799 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1800 /* Is the speed faster than the drive allows non IORDY ? */ 1801 if (pio) { 1802 /* This is cycle times not frequency - watch the logic! */ 1803 if (pio > 240) /* PIO2 is 240nS per cycle */ 1804 return 3 << ATA_SHIFT_PIO; 1805 return 7 << ATA_SHIFT_PIO; 1806 } 1807 } 1808 return 3 << ATA_SHIFT_PIO; 1809 } 1810 1811 /** 1812 * ata_do_dev_read_id - default ID read method 1813 * @dev: device 1814 * @tf: proposed taskfile 1815 * @id: data buffer 1816 * 1817 * Issue the identify taskfile and hand back the buffer containing 1818 * identify data. For some RAID controllers and for pre ATA devices 1819 * this function is wrapped or replaced by the driver 1820 */ 1821 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1822 struct ata_taskfile *tf, u16 *id) 1823 { 1824 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1825 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1826 } 1827 1828 /** 1829 * ata_dev_read_id - Read ID data from the specified device 1830 * @dev: target device 1831 * @p_class: pointer to class of the target device (may be changed) 1832 * @flags: ATA_READID_* flags 1833 * @id: buffer to read IDENTIFY data into 1834 * 1835 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1836 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1837 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1838 * for pre-ATA4 drives. 1839 * 1840 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1841 * now we abort if we hit that case. 1842 * 1843 * LOCKING: 1844 * Kernel thread context (may sleep) 1845 * 1846 * RETURNS: 1847 * 0 on success, -errno otherwise. 1848 */ 1849 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1850 unsigned int flags, u16 *id) 1851 { 1852 struct ata_port *ap = dev->link->ap; 1853 unsigned int class = *p_class; 1854 struct ata_taskfile tf; 1855 unsigned int err_mask = 0; 1856 const char *reason; 1857 bool is_semb = class == ATA_DEV_SEMB; 1858 int may_fallback = 1, tried_spinup = 0; 1859 int rc; 1860 1861 if (ata_msg_ctl(ap)) 1862 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1863 1864 retry: 1865 ata_tf_init(dev, &tf); 1866 1867 switch (class) { 1868 case ATA_DEV_SEMB: 1869 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1870 case ATA_DEV_ATA: 1871 case ATA_DEV_ZAC: 1872 tf.command = ATA_CMD_ID_ATA; 1873 break; 1874 case ATA_DEV_ATAPI: 1875 tf.command = ATA_CMD_ID_ATAPI; 1876 break; 1877 default: 1878 rc = -ENODEV; 1879 reason = "unsupported class"; 1880 goto err_out; 1881 } 1882 1883 tf.protocol = ATA_PROT_PIO; 1884 1885 /* Some devices choke if TF registers contain garbage. Make 1886 * sure those are properly initialized. 1887 */ 1888 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1889 1890 /* Device presence detection is unreliable on some 1891 * controllers. Always poll IDENTIFY if available. 1892 */ 1893 tf.flags |= ATA_TFLAG_POLLING; 1894 1895 if (ap->ops->read_id) 1896 err_mask = ap->ops->read_id(dev, &tf, id); 1897 else 1898 err_mask = ata_do_dev_read_id(dev, &tf, id); 1899 1900 if (err_mask) { 1901 if (err_mask & AC_ERR_NODEV_HINT) { 1902 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1903 return -ENOENT; 1904 } 1905 1906 if (is_semb) { 1907 ata_dev_info(dev, 1908 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1909 /* SEMB is not supported yet */ 1910 *p_class = ATA_DEV_SEMB_UNSUP; 1911 return 0; 1912 } 1913 1914 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1915 /* Device or controller might have reported 1916 * the wrong device class. Give a shot at the 1917 * other IDENTIFY if the current one is 1918 * aborted by the device. 1919 */ 1920 if (may_fallback) { 1921 may_fallback = 0; 1922 1923 if (class == ATA_DEV_ATA) 1924 class = ATA_DEV_ATAPI; 1925 else 1926 class = ATA_DEV_ATA; 1927 goto retry; 1928 } 1929 1930 /* Control reaches here iff the device aborted 1931 * both flavors of IDENTIFYs which happens 1932 * sometimes with phantom devices. 1933 */ 1934 ata_dev_dbg(dev, 1935 "both IDENTIFYs aborted, assuming NODEV\n"); 1936 return -ENOENT; 1937 } 1938 1939 rc = -EIO; 1940 reason = "I/O error"; 1941 goto err_out; 1942 } 1943 1944 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1945 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1946 "class=%d may_fallback=%d tried_spinup=%d\n", 1947 class, may_fallback, tried_spinup); 1948 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1949 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1950 } 1951 1952 /* Falling back doesn't make sense if ID data was read 1953 * successfully at least once. 1954 */ 1955 may_fallback = 0; 1956 1957 swap_buf_le16(id, ATA_ID_WORDS); 1958 1959 /* sanity check */ 1960 rc = -EINVAL; 1961 reason = "device reports invalid type"; 1962 1963 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1964 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1965 goto err_out; 1966 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1967 ata_id_is_ata(id)) { 1968 ata_dev_dbg(dev, 1969 "host indicates ignore ATA devices, ignored\n"); 1970 return -ENOENT; 1971 } 1972 } else { 1973 if (ata_id_is_ata(id)) 1974 goto err_out; 1975 } 1976 1977 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1978 tried_spinup = 1; 1979 /* 1980 * Drive powered-up in standby mode, and requires a specific 1981 * SET_FEATURES spin-up subcommand before it will accept 1982 * anything other than the original IDENTIFY command. 1983 */ 1984 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1985 if (err_mask && id[2] != 0x738c) { 1986 rc = -EIO; 1987 reason = "SPINUP failed"; 1988 goto err_out; 1989 } 1990 /* 1991 * If the drive initially returned incomplete IDENTIFY info, 1992 * we now must reissue the IDENTIFY command. 1993 */ 1994 if (id[2] == 0x37c8) 1995 goto retry; 1996 } 1997 1998 if ((flags & ATA_READID_POSTRESET) && 1999 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2000 /* 2001 * The exact sequence expected by certain pre-ATA4 drives is: 2002 * SRST RESET 2003 * IDENTIFY (optional in early ATA) 2004 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2005 * anything else.. 2006 * Some drives were very specific about that exact sequence. 2007 * 2008 * Note that ATA4 says lba is mandatory so the second check 2009 * should never trigger. 2010 */ 2011 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2012 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2013 if (err_mask) { 2014 rc = -EIO; 2015 reason = "INIT_DEV_PARAMS failed"; 2016 goto err_out; 2017 } 2018 2019 /* current CHS translation info (id[53-58]) might be 2020 * changed. reread the identify device info. 2021 */ 2022 flags &= ~ATA_READID_POSTRESET; 2023 goto retry; 2024 } 2025 } 2026 2027 *p_class = class; 2028 2029 return 0; 2030 2031 err_out: 2032 if (ata_msg_warn(ap)) 2033 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2034 reason, err_mask); 2035 return rc; 2036 } 2037 2038 static int ata_do_link_spd_horkage(struct ata_device *dev) 2039 { 2040 struct ata_link *plink = ata_dev_phys_link(dev); 2041 u32 target, target_limit; 2042 2043 if (!sata_scr_valid(plink)) 2044 return 0; 2045 2046 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2047 target = 1; 2048 else 2049 return 0; 2050 2051 target_limit = (1 << target) - 1; 2052 2053 /* if already on stricter limit, no need to push further */ 2054 if (plink->sata_spd_limit <= target_limit) 2055 return 0; 2056 2057 plink->sata_spd_limit = target_limit; 2058 2059 /* Request another EH round by returning -EAGAIN if link is 2060 * going faster than the target speed. Forward progress is 2061 * guaranteed by setting sata_spd_limit to target_limit above. 2062 */ 2063 if (plink->sata_spd > target) { 2064 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2065 sata_spd_string(target)); 2066 return -EAGAIN; 2067 } 2068 return 0; 2069 } 2070 2071 static inline u8 ata_dev_knobble(struct ata_device *dev) 2072 { 2073 struct ata_port *ap = dev->link->ap; 2074 2075 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2076 return 0; 2077 2078 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2079 } 2080 2081 static int ata_dev_config_ncq(struct ata_device *dev, 2082 char *desc, size_t desc_sz) 2083 { 2084 struct ata_port *ap = dev->link->ap; 2085 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2086 unsigned int err_mask; 2087 char *aa_desc = ""; 2088 2089 if (!ata_id_has_ncq(dev->id)) { 2090 desc[0] = '\0'; 2091 return 0; 2092 } 2093 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2094 snprintf(desc, desc_sz, "NCQ (not used)"); 2095 return 0; 2096 } 2097 if (ap->flags & ATA_FLAG_NCQ) { 2098 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2099 dev->flags |= ATA_DFLAG_NCQ; 2100 } 2101 2102 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2103 (ap->flags & ATA_FLAG_FPDMA_AA) && 2104 ata_id_has_fpdma_aa(dev->id)) { 2105 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2106 SATA_FPDMA_AA); 2107 if (err_mask) { 2108 ata_dev_err(dev, 2109 "failed to enable AA (error_mask=0x%x)\n", 2110 err_mask); 2111 if (err_mask != AC_ERR_DEV) { 2112 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2113 return -EIO; 2114 } 2115 } else 2116 aa_desc = ", AA"; 2117 } 2118 2119 if (hdepth >= ddepth) 2120 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2121 else 2122 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2123 ddepth, aa_desc); 2124 2125 if ((ap->flags & ATA_FLAG_FPDMA_AUX) && 2126 ata_id_has_ncq_send_and_recv(dev->id)) { 2127 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2128 0, ap->sector_buf, 1); 2129 if (err_mask) { 2130 ata_dev_dbg(dev, 2131 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2132 err_mask); 2133 } else { 2134 u8 *cmds = dev->ncq_send_recv_cmds; 2135 2136 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2137 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2138 2139 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2140 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2141 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2142 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2143 } 2144 } 2145 } 2146 2147 return 0; 2148 } 2149 2150 static void ata_dev_config_sense_reporting(struct ata_device *dev) 2151 { 2152 unsigned int err_mask; 2153 2154 if (!ata_id_has_sense_reporting(dev->id)) 2155 return; 2156 2157 if (ata_id_sense_reporting_enabled(dev->id)) 2158 return; 2159 2160 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2161 if (err_mask) { 2162 ata_dev_dbg(dev, 2163 "failed to enable Sense Data Reporting, Emask 0x%x\n", 2164 err_mask); 2165 } 2166 } 2167 2168 /** 2169 * ata_dev_configure - Configure the specified ATA/ATAPI device 2170 * @dev: Target device to configure 2171 * 2172 * Configure @dev according to @dev->id. Generic and low-level 2173 * driver specific fixups are also applied. 2174 * 2175 * LOCKING: 2176 * Kernel thread context (may sleep) 2177 * 2178 * RETURNS: 2179 * 0 on success, -errno otherwise 2180 */ 2181 int ata_dev_configure(struct ata_device *dev) 2182 { 2183 struct ata_port *ap = dev->link->ap; 2184 struct ata_eh_context *ehc = &dev->link->eh_context; 2185 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2186 const u16 *id = dev->id; 2187 unsigned long xfer_mask; 2188 unsigned int err_mask; 2189 char revbuf[7]; /* XYZ-99\0 */ 2190 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2191 char modelbuf[ATA_ID_PROD_LEN+1]; 2192 int rc; 2193 2194 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2195 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2196 return 0; 2197 } 2198 2199 if (ata_msg_probe(ap)) 2200 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2201 2202 /* set horkage */ 2203 dev->horkage |= ata_dev_blacklisted(dev); 2204 ata_force_horkage(dev); 2205 2206 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2207 ata_dev_info(dev, "unsupported device, disabling\n"); 2208 ata_dev_disable(dev); 2209 return 0; 2210 } 2211 2212 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2213 dev->class == ATA_DEV_ATAPI) { 2214 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2215 atapi_enabled ? "not supported with this driver" 2216 : "disabled"); 2217 ata_dev_disable(dev); 2218 return 0; 2219 } 2220 2221 rc = ata_do_link_spd_horkage(dev); 2222 if (rc) 2223 return rc; 2224 2225 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2226 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2227 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2228 dev->horkage |= ATA_HORKAGE_NOLPM; 2229 2230 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2231 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2232 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2233 } 2234 2235 /* let ACPI work its magic */ 2236 rc = ata_acpi_on_devcfg(dev); 2237 if (rc) 2238 return rc; 2239 2240 /* massage HPA, do it early as it might change IDENTIFY data */ 2241 rc = ata_hpa_resize(dev); 2242 if (rc) 2243 return rc; 2244 2245 /* print device capabilities */ 2246 if (ata_msg_probe(ap)) 2247 ata_dev_dbg(dev, 2248 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2249 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2250 __func__, 2251 id[49], id[82], id[83], id[84], 2252 id[85], id[86], id[87], id[88]); 2253 2254 /* initialize to-be-configured parameters */ 2255 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2256 dev->max_sectors = 0; 2257 dev->cdb_len = 0; 2258 dev->n_sectors = 0; 2259 dev->cylinders = 0; 2260 dev->heads = 0; 2261 dev->sectors = 0; 2262 dev->multi_count = 0; 2263 2264 /* 2265 * common ATA, ATAPI feature tests 2266 */ 2267 2268 /* find max transfer mode; for printk only */ 2269 xfer_mask = ata_id_xfermask(id); 2270 2271 if (ata_msg_probe(ap)) 2272 ata_dump_id(id); 2273 2274 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2275 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2276 sizeof(fwrevbuf)); 2277 2278 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2279 sizeof(modelbuf)); 2280 2281 /* ATA-specific feature tests */ 2282 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2283 if (ata_id_is_cfa(id)) { 2284 /* CPRM may make this media unusable */ 2285 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2286 ata_dev_warn(dev, 2287 "supports DRM functions and may not be fully accessible\n"); 2288 snprintf(revbuf, 7, "CFA"); 2289 } else { 2290 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2291 /* Warn the user if the device has TPM extensions */ 2292 if (ata_id_has_tpm(id)) 2293 ata_dev_warn(dev, 2294 "supports DRM functions and may not be fully accessible\n"); 2295 } 2296 2297 dev->n_sectors = ata_id_n_sectors(id); 2298 2299 /* get current R/W Multiple count setting */ 2300 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2301 unsigned int max = dev->id[47] & 0xff; 2302 unsigned int cnt = dev->id[59] & 0xff; 2303 /* only recognize/allow powers of two here */ 2304 if (is_power_of_2(max) && is_power_of_2(cnt)) 2305 if (cnt <= max) 2306 dev->multi_count = cnt; 2307 } 2308 2309 if (ata_id_has_lba(id)) { 2310 const char *lba_desc; 2311 char ncq_desc[24]; 2312 2313 lba_desc = "LBA"; 2314 dev->flags |= ATA_DFLAG_LBA; 2315 if (ata_id_has_lba48(id)) { 2316 dev->flags |= ATA_DFLAG_LBA48; 2317 lba_desc = "LBA48"; 2318 2319 if (dev->n_sectors >= (1UL << 28) && 2320 ata_id_has_flush_ext(id)) 2321 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2322 } 2323 2324 /* config NCQ */ 2325 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2326 if (rc) 2327 return rc; 2328 2329 /* print device info to dmesg */ 2330 if (ata_msg_drv(ap) && print_info) { 2331 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2332 revbuf, modelbuf, fwrevbuf, 2333 ata_mode_string(xfer_mask)); 2334 ata_dev_info(dev, 2335 "%llu sectors, multi %u: %s %s\n", 2336 (unsigned long long)dev->n_sectors, 2337 dev->multi_count, lba_desc, ncq_desc); 2338 } 2339 } else { 2340 /* CHS */ 2341 2342 /* Default translation */ 2343 dev->cylinders = id[1]; 2344 dev->heads = id[3]; 2345 dev->sectors = id[6]; 2346 2347 if (ata_id_current_chs_valid(id)) { 2348 /* Current CHS translation is valid. */ 2349 dev->cylinders = id[54]; 2350 dev->heads = id[55]; 2351 dev->sectors = id[56]; 2352 } 2353 2354 /* print device info to dmesg */ 2355 if (ata_msg_drv(ap) && print_info) { 2356 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2357 revbuf, modelbuf, fwrevbuf, 2358 ata_mode_string(xfer_mask)); 2359 ata_dev_info(dev, 2360 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2361 (unsigned long long)dev->n_sectors, 2362 dev->multi_count, dev->cylinders, 2363 dev->heads, dev->sectors); 2364 } 2365 } 2366 2367 /* Check and mark DevSlp capability. Get DevSlp timing variables 2368 * from SATA Settings page of Identify Device Data Log. 2369 */ 2370 if (ata_id_has_devslp(dev->id)) { 2371 u8 *sata_setting = ap->sector_buf; 2372 int i, j; 2373 2374 dev->flags |= ATA_DFLAG_DEVSLP; 2375 err_mask = ata_read_log_page(dev, 2376 ATA_LOG_SATA_ID_DEV_DATA, 2377 ATA_LOG_SATA_SETTINGS, 2378 sata_setting, 2379 1); 2380 if (err_mask) 2381 ata_dev_dbg(dev, 2382 "failed to get Identify Device Data, Emask 0x%x\n", 2383 err_mask); 2384 else 2385 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2386 j = ATA_LOG_DEVSLP_OFFSET + i; 2387 dev->devslp_timing[i] = sata_setting[j]; 2388 } 2389 } 2390 ata_dev_config_sense_reporting(dev); 2391 dev->cdb_len = 16; 2392 } 2393 2394 /* ATAPI-specific feature tests */ 2395 else if (dev->class == ATA_DEV_ATAPI) { 2396 const char *cdb_intr_string = ""; 2397 const char *atapi_an_string = ""; 2398 const char *dma_dir_string = ""; 2399 u32 sntf; 2400 2401 rc = atapi_cdb_len(id); 2402 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2403 if (ata_msg_warn(ap)) 2404 ata_dev_warn(dev, "unsupported CDB len\n"); 2405 rc = -EINVAL; 2406 goto err_out_nosup; 2407 } 2408 dev->cdb_len = (unsigned int) rc; 2409 2410 /* Enable ATAPI AN if both the host and device have 2411 * the support. If PMP is attached, SNTF is required 2412 * to enable ATAPI AN to discern between PHY status 2413 * changed notifications and ATAPI ANs. 2414 */ 2415 if (atapi_an && 2416 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2417 (!sata_pmp_attached(ap) || 2418 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2419 /* issue SET feature command to turn this on */ 2420 err_mask = ata_dev_set_feature(dev, 2421 SETFEATURES_SATA_ENABLE, SATA_AN); 2422 if (err_mask) 2423 ata_dev_err(dev, 2424 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2425 err_mask); 2426 else { 2427 dev->flags |= ATA_DFLAG_AN; 2428 atapi_an_string = ", ATAPI AN"; 2429 } 2430 } 2431 2432 if (ata_id_cdb_intr(dev->id)) { 2433 dev->flags |= ATA_DFLAG_CDB_INTR; 2434 cdb_intr_string = ", CDB intr"; 2435 } 2436 2437 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2438 dev->flags |= ATA_DFLAG_DMADIR; 2439 dma_dir_string = ", DMADIR"; 2440 } 2441 2442 if (ata_id_has_da(dev->id)) { 2443 dev->flags |= ATA_DFLAG_DA; 2444 zpodd_init(dev); 2445 } 2446 2447 /* print device info to dmesg */ 2448 if (ata_msg_drv(ap) && print_info) 2449 ata_dev_info(dev, 2450 "ATAPI: %s, %s, max %s%s%s%s\n", 2451 modelbuf, fwrevbuf, 2452 ata_mode_string(xfer_mask), 2453 cdb_intr_string, atapi_an_string, 2454 dma_dir_string); 2455 } 2456 2457 /* determine max_sectors */ 2458 dev->max_sectors = ATA_MAX_SECTORS; 2459 if (dev->flags & ATA_DFLAG_LBA48) 2460 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2461 2462 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2463 200 sectors */ 2464 if (ata_dev_knobble(dev)) { 2465 if (ata_msg_drv(ap) && print_info) 2466 ata_dev_info(dev, "applying bridge limits\n"); 2467 dev->udma_mask &= ATA_UDMA5; 2468 dev->max_sectors = ATA_MAX_SECTORS; 2469 } 2470 2471 if ((dev->class == ATA_DEV_ATAPI) && 2472 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2473 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2474 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2475 } 2476 2477 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2479 dev->max_sectors); 2480 2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2482 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2483 2484 if (ap->ops->dev_config) 2485 ap->ops->dev_config(dev); 2486 2487 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2488 /* Let the user know. We don't want to disallow opens for 2489 rescue purposes, or in case the vendor is just a blithering 2490 idiot. Do this after the dev_config call as some controllers 2491 with buggy firmware may want to avoid reporting false device 2492 bugs */ 2493 2494 if (print_info) { 2495 ata_dev_warn(dev, 2496 "Drive reports diagnostics failure. This may indicate a drive\n"); 2497 ata_dev_warn(dev, 2498 "fault or invalid emulation. Contact drive vendor for information.\n"); 2499 } 2500 } 2501 2502 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2503 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2504 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2505 } 2506 2507 return 0; 2508 2509 err_out_nosup: 2510 if (ata_msg_probe(ap)) 2511 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2512 return rc; 2513 } 2514 2515 /** 2516 * ata_cable_40wire - return 40 wire cable type 2517 * @ap: port 2518 * 2519 * Helper method for drivers which want to hardwire 40 wire cable 2520 * detection. 2521 */ 2522 2523 int ata_cable_40wire(struct ata_port *ap) 2524 { 2525 return ATA_CBL_PATA40; 2526 } 2527 2528 /** 2529 * ata_cable_80wire - return 80 wire cable type 2530 * @ap: port 2531 * 2532 * Helper method for drivers which want to hardwire 80 wire cable 2533 * detection. 2534 */ 2535 2536 int ata_cable_80wire(struct ata_port *ap) 2537 { 2538 return ATA_CBL_PATA80; 2539 } 2540 2541 /** 2542 * ata_cable_unknown - return unknown PATA cable. 2543 * @ap: port 2544 * 2545 * Helper method for drivers which have no PATA cable detection. 2546 */ 2547 2548 int ata_cable_unknown(struct ata_port *ap) 2549 { 2550 return ATA_CBL_PATA_UNK; 2551 } 2552 2553 /** 2554 * ata_cable_ignore - return ignored PATA cable. 2555 * @ap: port 2556 * 2557 * Helper method for drivers which don't use cable type to limit 2558 * transfer mode. 2559 */ 2560 int ata_cable_ignore(struct ata_port *ap) 2561 { 2562 return ATA_CBL_PATA_IGN; 2563 } 2564 2565 /** 2566 * ata_cable_sata - return SATA cable type 2567 * @ap: port 2568 * 2569 * Helper method for drivers which have SATA cables 2570 */ 2571 2572 int ata_cable_sata(struct ata_port *ap) 2573 { 2574 return ATA_CBL_SATA; 2575 } 2576 2577 /** 2578 * ata_bus_probe - Reset and probe ATA bus 2579 * @ap: Bus to probe 2580 * 2581 * Master ATA bus probing function. Initiates a hardware-dependent 2582 * bus reset, then attempts to identify any devices found on 2583 * the bus. 2584 * 2585 * LOCKING: 2586 * PCI/etc. bus probe sem. 2587 * 2588 * RETURNS: 2589 * Zero on success, negative errno otherwise. 2590 */ 2591 2592 int ata_bus_probe(struct ata_port *ap) 2593 { 2594 unsigned int classes[ATA_MAX_DEVICES]; 2595 int tries[ATA_MAX_DEVICES]; 2596 int rc; 2597 struct ata_device *dev; 2598 2599 ata_for_each_dev(dev, &ap->link, ALL) 2600 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2601 2602 retry: 2603 ata_for_each_dev(dev, &ap->link, ALL) { 2604 /* If we issue an SRST then an ATA drive (not ATAPI) 2605 * may change configuration and be in PIO0 timing. If 2606 * we do a hard reset (or are coming from power on) 2607 * this is true for ATA or ATAPI. Until we've set a 2608 * suitable controller mode we should not touch the 2609 * bus as we may be talking too fast. 2610 */ 2611 dev->pio_mode = XFER_PIO_0; 2612 dev->dma_mode = 0xff; 2613 2614 /* If the controller has a pio mode setup function 2615 * then use it to set the chipset to rights. Don't 2616 * touch the DMA setup as that will be dealt with when 2617 * configuring devices. 2618 */ 2619 if (ap->ops->set_piomode) 2620 ap->ops->set_piomode(ap, dev); 2621 } 2622 2623 /* reset and determine device classes */ 2624 ap->ops->phy_reset(ap); 2625 2626 ata_for_each_dev(dev, &ap->link, ALL) { 2627 if (dev->class != ATA_DEV_UNKNOWN) 2628 classes[dev->devno] = dev->class; 2629 else 2630 classes[dev->devno] = ATA_DEV_NONE; 2631 2632 dev->class = ATA_DEV_UNKNOWN; 2633 } 2634 2635 /* read IDENTIFY page and configure devices. We have to do the identify 2636 specific sequence bass-ackwards so that PDIAG- is released by 2637 the slave device */ 2638 2639 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2640 if (tries[dev->devno]) 2641 dev->class = classes[dev->devno]; 2642 2643 if (!ata_dev_enabled(dev)) 2644 continue; 2645 2646 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2647 dev->id); 2648 if (rc) 2649 goto fail; 2650 } 2651 2652 /* Now ask for the cable type as PDIAG- should have been released */ 2653 if (ap->ops->cable_detect) 2654 ap->cbl = ap->ops->cable_detect(ap); 2655 2656 /* We may have SATA bridge glue hiding here irrespective of 2657 * the reported cable types and sensed types. When SATA 2658 * drives indicate we have a bridge, we don't know which end 2659 * of the link the bridge is which is a problem. 2660 */ 2661 ata_for_each_dev(dev, &ap->link, ENABLED) 2662 if (ata_id_is_sata(dev->id)) 2663 ap->cbl = ATA_CBL_SATA; 2664 2665 /* After the identify sequence we can now set up the devices. We do 2666 this in the normal order so that the user doesn't get confused */ 2667 2668 ata_for_each_dev(dev, &ap->link, ENABLED) { 2669 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2670 rc = ata_dev_configure(dev); 2671 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2672 if (rc) 2673 goto fail; 2674 } 2675 2676 /* configure transfer mode */ 2677 rc = ata_set_mode(&ap->link, &dev); 2678 if (rc) 2679 goto fail; 2680 2681 ata_for_each_dev(dev, &ap->link, ENABLED) 2682 return 0; 2683 2684 return -ENODEV; 2685 2686 fail: 2687 tries[dev->devno]--; 2688 2689 switch (rc) { 2690 case -EINVAL: 2691 /* eeek, something went very wrong, give up */ 2692 tries[dev->devno] = 0; 2693 break; 2694 2695 case -ENODEV: 2696 /* give it just one more chance */ 2697 tries[dev->devno] = min(tries[dev->devno], 1); 2698 case -EIO: 2699 if (tries[dev->devno] == 1) { 2700 /* This is the last chance, better to slow 2701 * down than lose it. 2702 */ 2703 sata_down_spd_limit(&ap->link, 0); 2704 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2705 } 2706 } 2707 2708 if (!tries[dev->devno]) 2709 ata_dev_disable(dev); 2710 2711 goto retry; 2712 } 2713 2714 /** 2715 * sata_print_link_status - Print SATA link status 2716 * @link: SATA link to printk link status about 2717 * 2718 * This function prints link speed and status of a SATA link. 2719 * 2720 * LOCKING: 2721 * None. 2722 */ 2723 static void sata_print_link_status(struct ata_link *link) 2724 { 2725 u32 sstatus, scontrol, tmp; 2726 2727 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2728 return; 2729 sata_scr_read(link, SCR_CONTROL, &scontrol); 2730 2731 if (ata_phys_link_online(link)) { 2732 tmp = (sstatus >> 4) & 0xf; 2733 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2734 sata_spd_string(tmp), sstatus, scontrol); 2735 } else { 2736 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2737 sstatus, scontrol); 2738 } 2739 } 2740 2741 /** 2742 * ata_dev_pair - return other device on cable 2743 * @adev: device 2744 * 2745 * Obtain the other device on the same cable, or if none is 2746 * present NULL is returned 2747 */ 2748 2749 struct ata_device *ata_dev_pair(struct ata_device *adev) 2750 { 2751 struct ata_link *link = adev->link; 2752 struct ata_device *pair = &link->device[1 - adev->devno]; 2753 if (!ata_dev_enabled(pair)) 2754 return NULL; 2755 return pair; 2756 } 2757 2758 /** 2759 * sata_down_spd_limit - adjust SATA spd limit downward 2760 * @link: Link to adjust SATA spd limit for 2761 * @spd_limit: Additional limit 2762 * 2763 * Adjust SATA spd limit of @link downward. Note that this 2764 * function only adjusts the limit. The change must be applied 2765 * using sata_set_spd(). 2766 * 2767 * If @spd_limit is non-zero, the speed is limited to equal to or 2768 * lower than @spd_limit if such speed is supported. If 2769 * @spd_limit is slower than any supported speed, only the lowest 2770 * supported speed is allowed. 2771 * 2772 * LOCKING: 2773 * Inherited from caller. 2774 * 2775 * RETURNS: 2776 * 0 on success, negative errno on failure 2777 */ 2778 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2779 { 2780 u32 sstatus, spd, mask; 2781 int rc, bit; 2782 2783 if (!sata_scr_valid(link)) 2784 return -EOPNOTSUPP; 2785 2786 /* If SCR can be read, use it to determine the current SPD. 2787 * If not, use cached value in link->sata_spd. 2788 */ 2789 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2790 if (rc == 0 && ata_sstatus_online(sstatus)) 2791 spd = (sstatus >> 4) & 0xf; 2792 else 2793 spd = link->sata_spd; 2794 2795 mask = link->sata_spd_limit; 2796 if (mask <= 1) 2797 return -EINVAL; 2798 2799 /* unconditionally mask off the highest bit */ 2800 bit = fls(mask) - 1; 2801 mask &= ~(1 << bit); 2802 2803 /* Mask off all speeds higher than or equal to the current 2804 * one. Force 1.5Gbps if current SPD is not available. 2805 */ 2806 if (spd > 1) 2807 mask &= (1 << (spd - 1)) - 1; 2808 else 2809 mask &= 1; 2810 2811 /* were we already at the bottom? */ 2812 if (!mask) 2813 return -EINVAL; 2814 2815 if (spd_limit) { 2816 if (mask & ((1 << spd_limit) - 1)) 2817 mask &= (1 << spd_limit) - 1; 2818 else { 2819 bit = ffs(mask) - 1; 2820 mask = 1 << bit; 2821 } 2822 } 2823 2824 link->sata_spd_limit = mask; 2825 2826 ata_link_warn(link, "limiting SATA link speed to %s\n", 2827 sata_spd_string(fls(mask))); 2828 2829 return 0; 2830 } 2831 2832 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2833 { 2834 struct ata_link *host_link = &link->ap->link; 2835 u32 limit, target, spd; 2836 2837 limit = link->sata_spd_limit; 2838 2839 /* Don't configure downstream link faster than upstream link. 2840 * It doesn't speed up anything and some PMPs choke on such 2841 * configuration. 2842 */ 2843 if (!ata_is_host_link(link) && host_link->sata_spd) 2844 limit &= (1 << host_link->sata_spd) - 1; 2845 2846 if (limit == UINT_MAX) 2847 target = 0; 2848 else 2849 target = fls(limit); 2850 2851 spd = (*scontrol >> 4) & 0xf; 2852 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2853 2854 return spd != target; 2855 } 2856 2857 /** 2858 * sata_set_spd_needed - is SATA spd configuration needed 2859 * @link: Link in question 2860 * 2861 * Test whether the spd limit in SControl matches 2862 * @link->sata_spd_limit. This function is used to determine 2863 * whether hardreset is necessary to apply SATA spd 2864 * configuration. 2865 * 2866 * LOCKING: 2867 * Inherited from caller. 2868 * 2869 * RETURNS: 2870 * 1 if SATA spd configuration is needed, 0 otherwise. 2871 */ 2872 static int sata_set_spd_needed(struct ata_link *link) 2873 { 2874 u32 scontrol; 2875 2876 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2877 return 1; 2878 2879 return __sata_set_spd_needed(link, &scontrol); 2880 } 2881 2882 /** 2883 * sata_set_spd - set SATA spd according to spd limit 2884 * @link: Link to set SATA spd for 2885 * 2886 * Set SATA spd of @link according to sata_spd_limit. 2887 * 2888 * LOCKING: 2889 * Inherited from caller. 2890 * 2891 * RETURNS: 2892 * 0 if spd doesn't need to be changed, 1 if spd has been 2893 * changed. Negative errno if SCR registers are inaccessible. 2894 */ 2895 int sata_set_spd(struct ata_link *link) 2896 { 2897 u32 scontrol; 2898 int rc; 2899 2900 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2901 return rc; 2902 2903 if (!__sata_set_spd_needed(link, &scontrol)) 2904 return 0; 2905 2906 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2907 return rc; 2908 2909 return 1; 2910 } 2911 2912 /* 2913 * This mode timing computation functionality is ported over from 2914 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2915 */ 2916 /* 2917 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2918 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2919 * for UDMA6, which is currently supported only by Maxtor drives. 2920 * 2921 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2922 */ 2923 2924 static const struct ata_timing ata_timing[] = { 2925 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2926 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2927 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2928 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2929 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2930 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2931 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2932 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2933 2934 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2935 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2936 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2937 2938 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2939 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2940 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2941 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2942 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2943 2944 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2945 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2946 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2947 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2948 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2949 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2950 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2951 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2952 2953 { 0xFF } 2954 }; 2955 2956 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2957 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2958 2959 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2960 { 2961 q->setup = EZ(t->setup * 1000, T); 2962 q->act8b = EZ(t->act8b * 1000, T); 2963 q->rec8b = EZ(t->rec8b * 1000, T); 2964 q->cyc8b = EZ(t->cyc8b * 1000, T); 2965 q->active = EZ(t->active * 1000, T); 2966 q->recover = EZ(t->recover * 1000, T); 2967 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2968 q->cycle = EZ(t->cycle * 1000, T); 2969 q->udma = EZ(t->udma * 1000, UT); 2970 } 2971 2972 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2973 struct ata_timing *m, unsigned int what) 2974 { 2975 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2976 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2977 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2978 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2979 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2980 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2981 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2982 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2983 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2984 } 2985 2986 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2987 { 2988 const struct ata_timing *t = ata_timing; 2989 2990 while (xfer_mode > t->mode) 2991 t++; 2992 2993 if (xfer_mode == t->mode) 2994 return t; 2995 2996 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 2997 __func__, xfer_mode); 2998 2999 return NULL; 3000 } 3001 3002 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3003 struct ata_timing *t, int T, int UT) 3004 { 3005 const u16 *id = adev->id; 3006 const struct ata_timing *s; 3007 struct ata_timing p; 3008 3009 /* 3010 * Find the mode. 3011 */ 3012 3013 if (!(s = ata_timing_find_mode(speed))) 3014 return -EINVAL; 3015 3016 memcpy(t, s, sizeof(*s)); 3017 3018 /* 3019 * If the drive is an EIDE drive, it can tell us it needs extended 3020 * PIO/MW_DMA cycle timing. 3021 */ 3022 3023 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3024 memset(&p, 0, sizeof(p)); 3025 3026 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3027 if (speed <= XFER_PIO_2) 3028 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3029 else if ((speed <= XFER_PIO_4) || 3030 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3031 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3032 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3033 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3034 3035 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3036 } 3037 3038 /* 3039 * Convert the timing to bus clock counts. 3040 */ 3041 3042 ata_timing_quantize(t, t, T, UT); 3043 3044 /* 3045 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3046 * S.M.A.R.T * and some other commands. We have to ensure that the 3047 * DMA cycle timing is slower/equal than the fastest PIO timing. 3048 */ 3049 3050 if (speed > XFER_PIO_6) { 3051 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3052 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3053 } 3054 3055 /* 3056 * Lengthen active & recovery time so that cycle time is correct. 3057 */ 3058 3059 if (t->act8b + t->rec8b < t->cyc8b) { 3060 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3061 t->rec8b = t->cyc8b - t->act8b; 3062 } 3063 3064 if (t->active + t->recover < t->cycle) { 3065 t->active += (t->cycle - (t->active + t->recover)) / 2; 3066 t->recover = t->cycle - t->active; 3067 } 3068 3069 /* In a few cases quantisation may produce enough errors to 3070 leave t->cycle too low for the sum of active and recovery 3071 if so we must correct this */ 3072 if (t->active + t->recover > t->cycle) 3073 t->cycle = t->active + t->recover; 3074 3075 return 0; 3076 } 3077 3078 /** 3079 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3080 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3081 * @cycle: cycle duration in ns 3082 * 3083 * Return matching xfer mode for @cycle. The returned mode is of 3084 * the transfer type specified by @xfer_shift. If @cycle is too 3085 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3086 * than the fastest known mode, the fasted mode is returned. 3087 * 3088 * LOCKING: 3089 * None. 3090 * 3091 * RETURNS: 3092 * Matching xfer_mode, 0xff if no match found. 3093 */ 3094 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3095 { 3096 u8 base_mode = 0xff, last_mode = 0xff; 3097 const struct ata_xfer_ent *ent; 3098 const struct ata_timing *t; 3099 3100 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3101 if (ent->shift == xfer_shift) 3102 base_mode = ent->base; 3103 3104 for (t = ata_timing_find_mode(base_mode); 3105 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3106 unsigned short this_cycle; 3107 3108 switch (xfer_shift) { 3109 case ATA_SHIFT_PIO: 3110 case ATA_SHIFT_MWDMA: 3111 this_cycle = t->cycle; 3112 break; 3113 case ATA_SHIFT_UDMA: 3114 this_cycle = t->udma; 3115 break; 3116 default: 3117 return 0xff; 3118 } 3119 3120 if (cycle > this_cycle) 3121 break; 3122 3123 last_mode = t->mode; 3124 } 3125 3126 return last_mode; 3127 } 3128 3129 /** 3130 * ata_down_xfermask_limit - adjust dev xfer masks downward 3131 * @dev: Device to adjust xfer masks 3132 * @sel: ATA_DNXFER_* selector 3133 * 3134 * Adjust xfer masks of @dev downward. Note that this function 3135 * does not apply the change. Invoking ata_set_mode() afterwards 3136 * will apply the limit. 3137 * 3138 * LOCKING: 3139 * Inherited from caller. 3140 * 3141 * RETURNS: 3142 * 0 on success, negative errno on failure 3143 */ 3144 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3145 { 3146 char buf[32]; 3147 unsigned long orig_mask, xfer_mask; 3148 unsigned long pio_mask, mwdma_mask, udma_mask; 3149 int quiet, highbit; 3150 3151 quiet = !!(sel & ATA_DNXFER_QUIET); 3152 sel &= ~ATA_DNXFER_QUIET; 3153 3154 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3155 dev->mwdma_mask, 3156 dev->udma_mask); 3157 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3158 3159 switch (sel) { 3160 case ATA_DNXFER_PIO: 3161 highbit = fls(pio_mask) - 1; 3162 pio_mask &= ~(1 << highbit); 3163 break; 3164 3165 case ATA_DNXFER_DMA: 3166 if (udma_mask) { 3167 highbit = fls(udma_mask) - 1; 3168 udma_mask &= ~(1 << highbit); 3169 if (!udma_mask) 3170 return -ENOENT; 3171 } else if (mwdma_mask) { 3172 highbit = fls(mwdma_mask) - 1; 3173 mwdma_mask &= ~(1 << highbit); 3174 if (!mwdma_mask) 3175 return -ENOENT; 3176 } 3177 break; 3178 3179 case ATA_DNXFER_40C: 3180 udma_mask &= ATA_UDMA_MASK_40C; 3181 break; 3182 3183 case ATA_DNXFER_FORCE_PIO0: 3184 pio_mask &= 1; 3185 case ATA_DNXFER_FORCE_PIO: 3186 mwdma_mask = 0; 3187 udma_mask = 0; 3188 break; 3189 3190 default: 3191 BUG(); 3192 } 3193 3194 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3195 3196 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3197 return -ENOENT; 3198 3199 if (!quiet) { 3200 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3201 snprintf(buf, sizeof(buf), "%s:%s", 3202 ata_mode_string(xfer_mask), 3203 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3204 else 3205 snprintf(buf, sizeof(buf), "%s", 3206 ata_mode_string(xfer_mask)); 3207 3208 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3209 } 3210 3211 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3212 &dev->udma_mask); 3213 3214 return 0; 3215 } 3216 3217 static int ata_dev_set_mode(struct ata_device *dev) 3218 { 3219 struct ata_port *ap = dev->link->ap; 3220 struct ata_eh_context *ehc = &dev->link->eh_context; 3221 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3222 const char *dev_err_whine = ""; 3223 int ign_dev_err = 0; 3224 unsigned int err_mask = 0; 3225 int rc; 3226 3227 dev->flags &= ~ATA_DFLAG_PIO; 3228 if (dev->xfer_shift == ATA_SHIFT_PIO) 3229 dev->flags |= ATA_DFLAG_PIO; 3230 3231 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3232 dev_err_whine = " (SET_XFERMODE skipped)"; 3233 else { 3234 if (nosetxfer) 3235 ata_dev_warn(dev, 3236 "NOSETXFER but PATA detected - can't " 3237 "skip SETXFER, might malfunction\n"); 3238 err_mask = ata_dev_set_xfermode(dev); 3239 } 3240 3241 if (err_mask & ~AC_ERR_DEV) 3242 goto fail; 3243 3244 /* revalidate */ 3245 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3246 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3247 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3248 if (rc) 3249 return rc; 3250 3251 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3252 /* Old CFA may refuse this command, which is just fine */ 3253 if (ata_id_is_cfa(dev->id)) 3254 ign_dev_err = 1; 3255 /* Catch several broken garbage emulations plus some pre 3256 ATA devices */ 3257 if (ata_id_major_version(dev->id) == 0 && 3258 dev->pio_mode <= XFER_PIO_2) 3259 ign_dev_err = 1; 3260 /* Some very old devices and some bad newer ones fail 3261 any kind of SET_XFERMODE request but support PIO0-2 3262 timings and no IORDY */ 3263 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3264 ign_dev_err = 1; 3265 } 3266 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3267 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3268 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3269 dev->dma_mode == XFER_MW_DMA_0 && 3270 (dev->id[63] >> 8) & 1) 3271 ign_dev_err = 1; 3272 3273 /* if the device is actually configured correctly, ignore dev err */ 3274 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3275 ign_dev_err = 1; 3276 3277 if (err_mask & AC_ERR_DEV) { 3278 if (!ign_dev_err) 3279 goto fail; 3280 else 3281 dev_err_whine = " (device error ignored)"; 3282 } 3283 3284 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3285 dev->xfer_shift, (int)dev->xfer_mode); 3286 3287 ata_dev_info(dev, "configured for %s%s\n", 3288 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3289 dev_err_whine); 3290 3291 return 0; 3292 3293 fail: 3294 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3295 return -EIO; 3296 } 3297 3298 /** 3299 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3300 * @link: link on which timings will be programmed 3301 * @r_failed_dev: out parameter for failed device 3302 * 3303 * Standard implementation of the function used to tune and set 3304 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3305 * ata_dev_set_mode() fails, pointer to the failing device is 3306 * returned in @r_failed_dev. 3307 * 3308 * LOCKING: 3309 * PCI/etc. bus probe sem. 3310 * 3311 * RETURNS: 3312 * 0 on success, negative errno otherwise 3313 */ 3314 3315 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3316 { 3317 struct ata_port *ap = link->ap; 3318 struct ata_device *dev; 3319 int rc = 0, used_dma = 0, found = 0; 3320 3321 /* step 1: calculate xfer_mask */ 3322 ata_for_each_dev(dev, link, ENABLED) { 3323 unsigned long pio_mask, dma_mask; 3324 unsigned int mode_mask; 3325 3326 mode_mask = ATA_DMA_MASK_ATA; 3327 if (dev->class == ATA_DEV_ATAPI) 3328 mode_mask = ATA_DMA_MASK_ATAPI; 3329 else if (ata_id_is_cfa(dev->id)) 3330 mode_mask = ATA_DMA_MASK_CFA; 3331 3332 ata_dev_xfermask(dev); 3333 ata_force_xfermask(dev); 3334 3335 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3336 3337 if (libata_dma_mask & mode_mask) 3338 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3339 dev->udma_mask); 3340 else 3341 dma_mask = 0; 3342 3343 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3344 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3345 3346 found = 1; 3347 if (ata_dma_enabled(dev)) 3348 used_dma = 1; 3349 } 3350 if (!found) 3351 goto out; 3352 3353 /* step 2: always set host PIO timings */ 3354 ata_for_each_dev(dev, link, ENABLED) { 3355 if (dev->pio_mode == 0xff) { 3356 ata_dev_warn(dev, "no PIO support\n"); 3357 rc = -EINVAL; 3358 goto out; 3359 } 3360 3361 dev->xfer_mode = dev->pio_mode; 3362 dev->xfer_shift = ATA_SHIFT_PIO; 3363 if (ap->ops->set_piomode) 3364 ap->ops->set_piomode(ap, dev); 3365 } 3366 3367 /* step 3: set host DMA timings */ 3368 ata_for_each_dev(dev, link, ENABLED) { 3369 if (!ata_dma_enabled(dev)) 3370 continue; 3371 3372 dev->xfer_mode = dev->dma_mode; 3373 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3374 if (ap->ops->set_dmamode) 3375 ap->ops->set_dmamode(ap, dev); 3376 } 3377 3378 /* step 4: update devices' xfer mode */ 3379 ata_for_each_dev(dev, link, ENABLED) { 3380 rc = ata_dev_set_mode(dev); 3381 if (rc) 3382 goto out; 3383 } 3384 3385 /* Record simplex status. If we selected DMA then the other 3386 * host channels are not permitted to do so. 3387 */ 3388 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3389 ap->host->simplex_claimed = ap; 3390 3391 out: 3392 if (rc) 3393 *r_failed_dev = dev; 3394 return rc; 3395 } 3396 3397 /** 3398 * ata_wait_ready - wait for link to become ready 3399 * @link: link to be waited on 3400 * @deadline: deadline jiffies for the operation 3401 * @check_ready: callback to check link readiness 3402 * 3403 * Wait for @link to become ready. @check_ready should return 3404 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3405 * link doesn't seem to be occupied, other errno for other error 3406 * conditions. 3407 * 3408 * Transient -ENODEV conditions are allowed for 3409 * ATA_TMOUT_FF_WAIT. 3410 * 3411 * LOCKING: 3412 * EH context. 3413 * 3414 * RETURNS: 3415 * 0 if @linke is ready before @deadline; otherwise, -errno. 3416 */ 3417 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3418 int (*check_ready)(struct ata_link *link)) 3419 { 3420 unsigned long start = jiffies; 3421 unsigned long nodev_deadline; 3422 int warned = 0; 3423 3424 /* choose which 0xff timeout to use, read comment in libata.h */ 3425 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3426 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3427 else 3428 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3429 3430 /* Slave readiness can't be tested separately from master. On 3431 * M/S emulation configuration, this function should be called 3432 * only on the master and it will handle both master and slave. 3433 */ 3434 WARN_ON(link == link->ap->slave_link); 3435 3436 if (time_after(nodev_deadline, deadline)) 3437 nodev_deadline = deadline; 3438 3439 while (1) { 3440 unsigned long now = jiffies; 3441 int ready, tmp; 3442 3443 ready = tmp = check_ready(link); 3444 if (ready > 0) 3445 return 0; 3446 3447 /* 3448 * -ENODEV could be transient. Ignore -ENODEV if link 3449 * is online. Also, some SATA devices take a long 3450 * time to clear 0xff after reset. Wait for 3451 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3452 * offline. 3453 * 3454 * Note that some PATA controllers (pata_ali) explode 3455 * if status register is read more than once when 3456 * there's no device attached. 3457 */ 3458 if (ready == -ENODEV) { 3459 if (ata_link_online(link)) 3460 ready = 0; 3461 else if ((link->ap->flags & ATA_FLAG_SATA) && 3462 !ata_link_offline(link) && 3463 time_before(now, nodev_deadline)) 3464 ready = 0; 3465 } 3466 3467 if (ready) 3468 return ready; 3469 if (time_after(now, deadline)) 3470 return -EBUSY; 3471 3472 if (!warned && time_after(now, start + 5 * HZ) && 3473 (deadline - now > 3 * HZ)) { 3474 ata_link_warn(link, 3475 "link is slow to respond, please be patient " 3476 "(ready=%d)\n", tmp); 3477 warned = 1; 3478 } 3479 3480 ata_msleep(link->ap, 50); 3481 } 3482 } 3483 3484 /** 3485 * ata_wait_after_reset - wait for link to become ready after reset 3486 * @link: link to be waited on 3487 * @deadline: deadline jiffies for the operation 3488 * @check_ready: callback to check link readiness 3489 * 3490 * Wait for @link to become ready after reset. 3491 * 3492 * LOCKING: 3493 * EH context. 3494 * 3495 * RETURNS: 3496 * 0 if @linke is ready before @deadline; otherwise, -errno. 3497 */ 3498 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3499 int (*check_ready)(struct ata_link *link)) 3500 { 3501 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3502 3503 return ata_wait_ready(link, deadline, check_ready); 3504 } 3505 3506 /** 3507 * sata_link_debounce - debounce SATA phy status 3508 * @link: ATA link to debounce SATA phy status for 3509 * @params: timing parameters { interval, duratinon, timeout } in msec 3510 * @deadline: deadline jiffies for the operation 3511 * 3512 * Make sure SStatus of @link reaches stable state, determined by 3513 * holding the same value where DET is not 1 for @duration polled 3514 * every @interval, before @timeout. Timeout constraints the 3515 * beginning of the stable state. Because DET gets stuck at 1 on 3516 * some controllers after hot unplugging, this functions waits 3517 * until timeout then returns 0 if DET is stable at 1. 3518 * 3519 * @timeout is further limited by @deadline. The sooner of the 3520 * two is used. 3521 * 3522 * LOCKING: 3523 * Kernel thread context (may sleep) 3524 * 3525 * RETURNS: 3526 * 0 on success, -errno on failure. 3527 */ 3528 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3529 unsigned long deadline) 3530 { 3531 unsigned long interval = params[0]; 3532 unsigned long duration = params[1]; 3533 unsigned long last_jiffies, t; 3534 u32 last, cur; 3535 int rc; 3536 3537 t = ata_deadline(jiffies, params[2]); 3538 if (time_before(t, deadline)) 3539 deadline = t; 3540 3541 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3542 return rc; 3543 cur &= 0xf; 3544 3545 last = cur; 3546 last_jiffies = jiffies; 3547 3548 while (1) { 3549 ata_msleep(link->ap, interval); 3550 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3551 return rc; 3552 cur &= 0xf; 3553 3554 /* DET stable? */ 3555 if (cur == last) { 3556 if (cur == 1 && time_before(jiffies, deadline)) 3557 continue; 3558 if (time_after(jiffies, 3559 ata_deadline(last_jiffies, duration))) 3560 return 0; 3561 continue; 3562 } 3563 3564 /* unstable, start over */ 3565 last = cur; 3566 last_jiffies = jiffies; 3567 3568 /* Check deadline. If debouncing failed, return 3569 * -EPIPE to tell upper layer to lower link speed. 3570 */ 3571 if (time_after(jiffies, deadline)) 3572 return -EPIPE; 3573 } 3574 } 3575 3576 /** 3577 * sata_link_resume - resume SATA link 3578 * @link: ATA link to resume SATA 3579 * @params: timing parameters { interval, duratinon, timeout } in msec 3580 * @deadline: deadline jiffies for the operation 3581 * 3582 * Resume SATA phy @link and debounce it. 3583 * 3584 * LOCKING: 3585 * Kernel thread context (may sleep) 3586 * 3587 * RETURNS: 3588 * 0 on success, -errno on failure. 3589 */ 3590 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3591 unsigned long deadline) 3592 { 3593 int tries = ATA_LINK_RESUME_TRIES; 3594 u32 scontrol, serror; 3595 int rc; 3596 3597 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3598 return rc; 3599 3600 /* 3601 * Writes to SControl sometimes get ignored under certain 3602 * controllers (ata_piix SIDPR). Make sure DET actually is 3603 * cleared. 3604 */ 3605 do { 3606 scontrol = (scontrol & 0x0f0) | 0x300; 3607 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3608 return rc; 3609 /* 3610 * Some PHYs react badly if SStatus is pounded 3611 * immediately after resuming. Delay 200ms before 3612 * debouncing. 3613 */ 3614 ata_msleep(link->ap, 200); 3615 3616 /* is SControl restored correctly? */ 3617 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3618 return rc; 3619 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3620 3621 if ((scontrol & 0xf0f) != 0x300) { 3622 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3623 scontrol); 3624 return 0; 3625 } 3626 3627 if (tries < ATA_LINK_RESUME_TRIES) 3628 ata_link_warn(link, "link resume succeeded after %d retries\n", 3629 ATA_LINK_RESUME_TRIES - tries); 3630 3631 if ((rc = sata_link_debounce(link, params, deadline))) 3632 return rc; 3633 3634 /* clear SError, some PHYs require this even for SRST to work */ 3635 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3636 rc = sata_scr_write(link, SCR_ERROR, serror); 3637 3638 return rc != -EINVAL ? rc : 0; 3639 } 3640 3641 /** 3642 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3643 * @link: ATA link to manipulate SControl for 3644 * @policy: LPM policy to configure 3645 * @spm_wakeup: initiate LPM transition to active state 3646 * 3647 * Manipulate the IPM field of the SControl register of @link 3648 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3649 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3650 * the link. This function also clears PHYRDY_CHG before 3651 * returning. 3652 * 3653 * LOCKING: 3654 * EH context. 3655 * 3656 * RETURNS: 3657 * 0 on succes, -errno otherwise. 3658 */ 3659 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3660 bool spm_wakeup) 3661 { 3662 struct ata_eh_context *ehc = &link->eh_context; 3663 bool woken_up = false; 3664 u32 scontrol; 3665 int rc; 3666 3667 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3668 if (rc) 3669 return rc; 3670 3671 switch (policy) { 3672 case ATA_LPM_MAX_POWER: 3673 /* disable all LPM transitions */ 3674 scontrol |= (0x7 << 8); 3675 /* initiate transition to active state */ 3676 if (spm_wakeup) { 3677 scontrol |= (0x4 << 12); 3678 woken_up = true; 3679 } 3680 break; 3681 case ATA_LPM_MED_POWER: 3682 /* allow LPM to PARTIAL */ 3683 scontrol &= ~(0x1 << 8); 3684 scontrol |= (0x6 << 8); 3685 break; 3686 case ATA_LPM_MIN_POWER: 3687 if (ata_link_nr_enabled(link) > 0) 3688 /* no restrictions on LPM transitions */ 3689 scontrol &= ~(0x7 << 8); 3690 else { 3691 /* empty port, power off */ 3692 scontrol &= ~0xf; 3693 scontrol |= (0x1 << 2); 3694 } 3695 break; 3696 default: 3697 WARN_ON(1); 3698 } 3699 3700 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3701 if (rc) 3702 return rc; 3703 3704 /* give the link time to transit out of LPM state */ 3705 if (woken_up) 3706 msleep(10); 3707 3708 /* clear PHYRDY_CHG from SError */ 3709 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3710 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3711 } 3712 3713 /** 3714 * ata_std_prereset - prepare for reset 3715 * @link: ATA link to be reset 3716 * @deadline: deadline jiffies for the operation 3717 * 3718 * @link is about to be reset. Initialize it. Failure from 3719 * prereset makes libata abort whole reset sequence and give up 3720 * that port, so prereset should be best-effort. It does its 3721 * best to prepare for reset sequence but if things go wrong, it 3722 * should just whine, not fail. 3723 * 3724 * LOCKING: 3725 * Kernel thread context (may sleep) 3726 * 3727 * RETURNS: 3728 * 0 on success, -errno otherwise. 3729 */ 3730 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3731 { 3732 struct ata_port *ap = link->ap; 3733 struct ata_eh_context *ehc = &link->eh_context; 3734 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3735 int rc; 3736 3737 /* if we're about to do hardreset, nothing more to do */ 3738 if (ehc->i.action & ATA_EH_HARDRESET) 3739 return 0; 3740 3741 /* if SATA, resume link */ 3742 if (ap->flags & ATA_FLAG_SATA) { 3743 rc = sata_link_resume(link, timing, deadline); 3744 /* whine about phy resume failure but proceed */ 3745 if (rc && rc != -EOPNOTSUPP) 3746 ata_link_warn(link, 3747 "failed to resume link for reset (errno=%d)\n", 3748 rc); 3749 } 3750 3751 /* no point in trying softreset on offline link */ 3752 if (ata_phys_link_offline(link)) 3753 ehc->i.action &= ~ATA_EH_SOFTRESET; 3754 3755 return 0; 3756 } 3757 3758 /** 3759 * sata_link_hardreset - reset link via SATA phy reset 3760 * @link: link to reset 3761 * @timing: timing parameters { interval, duratinon, timeout } in msec 3762 * @deadline: deadline jiffies for the operation 3763 * @online: optional out parameter indicating link onlineness 3764 * @check_ready: optional callback to check link readiness 3765 * 3766 * SATA phy-reset @link using DET bits of SControl register. 3767 * After hardreset, link readiness is waited upon using 3768 * ata_wait_ready() if @check_ready is specified. LLDs are 3769 * allowed to not specify @check_ready and wait itself after this 3770 * function returns. Device classification is LLD's 3771 * responsibility. 3772 * 3773 * *@online is set to one iff reset succeeded and @link is online 3774 * after reset. 3775 * 3776 * LOCKING: 3777 * Kernel thread context (may sleep) 3778 * 3779 * RETURNS: 3780 * 0 on success, -errno otherwise. 3781 */ 3782 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3783 unsigned long deadline, 3784 bool *online, int (*check_ready)(struct ata_link *)) 3785 { 3786 u32 scontrol; 3787 int rc; 3788 3789 DPRINTK("ENTER\n"); 3790 3791 if (online) 3792 *online = false; 3793 3794 if (sata_set_spd_needed(link)) { 3795 /* SATA spec says nothing about how to reconfigure 3796 * spd. To be on the safe side, turn off phy during 3797 * reconfiguration. This works for at least ICH7 AHCI 3798 * and Sil3124. 3799 */ 3800 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3801 goto out; 3802 3803 scontrol = (scontrol & 0x0f0) | 0x304; 3804 3805 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3806 goto out; 3807 3808 sata_set_spd(link); 3809 } 3810 3811 /* issue phy wake/reset */ 3812 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3813 goto out; 3814 3815 scontrol = (scontrol & 0x0f0) | 0x301; 3816 3817 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3818 goto out; 3819 3820 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3821 * 10.4.2 says at least 1 ms. 3822 */ 3823 ata_msleep(link->ap, 1); 3824 3825 /* bring link back */ 3826 rc = sata_link_resume(link, timing, deadline); 3827 if (rc) 3828 goto out; 3829 /* if link is offline nothing more to do */ 3830 if (ata_phys_link_offline(link)) 3831 goto out; 3832 3833 /* Link is online. From this point, -ENODEV too is an error. */ 3834 if (online) 3835 *online = true; 3836 3837 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3838 /* If PMP is supported, we have to do follow-up SRST. 3839 * Some PMPs don't send D2H Reg FIS after hardreset if 3840 * the first port is empty. Wait only for 3841 * ATA_TMOUT_PMP_SRST_WAIT. 3842 */ 3843 if (check_ready) { 3844 unsigned long pmp_deadline; 3845 3846 pmp_deadline = ata_deadline(jiffies, 3847 ATA_TMOUT_PMP_SRST_WAIT); 3848 if (time_after(pmp_deadline, deadline)) 3849 pmp_deadline = deadline; 3850 ata_wait_ready(link, pmp_deadline, check_ready); 3851 } 3852 rc = -EAGAIN; 3853 goto out; 3854 } 3855 3856 rc = 0; 3857 if (check_ready) 3858 rc = ata_wait_ready(link, deadline, check_ready); 3859 out: 3860 if (rc && rc != -EAGAIN) { 3861 /* online is set iff link is online && reset succeeded */ 3862 if (online) 3863 *online = false; 3864 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3865 } 3866 DPRINTK("EXIT, rc=%d\n", rc); 3867 return rc; 3868 } 3869 3870 /** 3871 * sata_std_hardreset - COMRESET w/o waiting or classification 3872 * @link: link to reset 3873 * @class: resulting class of attached device 3874 * @deadline: deadline jiffies for the operation 3875 * 3876 * Standard SATA COMRESET w/o waiting or classification. 3877 * 3878 * LOCKING: 3879 * Kernel thread context (may sleep) 3880 * 3881 * RETURNS: 3882 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3883 */ 3884 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3885 unsigned long deadline) 3886 { 3887 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3888 bool online; 3889 int rc; 3890 3891 /* do hardreset */ 3892 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3893 return online ? -EAGAIN : rc; 3894 } 3895 3896 /** 3897 * ata_std_postreset - standard postreset callback 3898 * @link: the target ata_link 3899 * @classes: classes of attached devices 3900 * 3901 * This function is invoked after a successful reset. Note that 3902 * the device might have been reset more than once using 3903 * different reset methods before postreset is invoked. 3904 * 3905 * LOCKING: 3906 * Kernel thread context (may sleep) 3907 */ 3908 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3909 { 3910 u32 serror; 3911 3912 DPRINTK("ENTER\n"); 3913 3914 /* reset complete, clear SError */ 3915 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3916 sata_scr_write(link, SCR_ERROR, serror); 3917 3918 /* print link status */ 3919 sata_print_link_status(link); 3920 3921 DPRINTK("EXIT\n"); 3922 } 3923 3924 /** 3925 * ata_dev_same_device - Determine whether new ID matches configured device 3926 * @dev: device to compare against 3927 * @new_class: class of the new device 3928 * @new_id: IDENTIFY page of the new device 3929 * 3930 * Compare @new_class and @new_id against @dev and determine 3931 * whether @dev is the device indicated by @new_class and 3932 * @new_id. 3933 * 3934 * LOCKING: 3935 * None. 3936 * 3937 * RETURNS: 3938 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3939 */ 3940 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3941 const u16 *new_id) 3942 { 3943 const u16 *old_id = dev->id; 3944 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3945 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3946 3947 if (dev->class != new_class) { 3948 ata_dev_info(dev, "class mismatch %d != %d\n", 3949 dev->class, new_class); 3950 return 0; 3951 } 3952 3953 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3954 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3955 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3956 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3957 3958 if (strcmp(model[0], model[1])) { 3959 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3960 model[0], model[1]); 3961 return 0; 3962 } 3963 3964 if (strcmp(serial[0], serial[1])) { 3965 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3966 serial[0], serial[1]); 3967 return 0; 3968 } 3969 3970 return 1; 3971 } 3972 3973 /** 3974 * ata_dev_reread_id - Re-read IDENTIFY data 3975 * @dev: target ATA device 3976 * @readid_flags: read ID flags 3977 * 3978 * Re-read IDENTIFY page and make sure @dev is still attached to 3979 * the port. 3980 * 3981 * LOCKING: 3982 * Kernel thread context (may sleep) 3983 * 3984 * RETURNS: 3985 * 0 on success, negative errno otherwise 3986 */ 3987 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3988 { 3989 unsigned int class = dev->class; 3990 u16 *id = (void *)dev->link->ap->sector_buf; 3991 int rc; 3992 3993 /* read ID data */ 3994 rc = ata_dev_read_id(dev, &class, readid_flags, id); 3995 if (rc) 3996 return rc; 3997 3998 /* is the device still there? */ 3999 if (!ata_dev_same_device(dev, class, id)) 4000 return -ENODEV; 4001 4002 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4003 return 0; 4004 } 4005 4006 /** 4007 * ata_dev_revalidate - Revalidate ATA device 4008 * @dev: device to revalidate 4009 * @new_class: new class code 4010 * @readid_flags: read ID flags 4011 * 4012 * Re-read IDENTIFY page, make sure @dev is still attached to the 4013 * port and reconfigure it according to the new IDENTIFY page. 4014 * 4015 * LOCKING: 4016 * Kernel thread context (may sleep) 4017 * 4018 * RETURNS: 4019 * 0 on success, negative errno otherwise 4020 */ 4021 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4022 unsigned int readid_flags) 4023 { 4024 u64 n_sectors = dev->n_sectors; 4025 u64 n_native_sectors = dev->n_native_sectors; 4026 int rc; 4027 4028 if (!ata_dev_enabled(dev)) 4029 return -ENODEV; 4030 4031 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4032 if (ata_class_enabled(new_class) && 4033 new_class != ATA_DEV_ATA && 4034 new_class != ATA_DEV_ATAPI && 4035 new_class != ATA_DEV_ZAC && 4036 new_class != ATA_DEV_SEMB) { 4037 ata_dev_info(dev, "class mismatch %u != %u\n", 4038 dev->class, new_class); 4039 rc = -ENODEV; 4040 goto fail; 4041 } 4042 4043 /* re-read ID */ 4044 rc = ata_dev_reread_id(dev, readid_flags); 4045 if (rc) 4046 goto fail; 4047 4048 /* configure device according to the new ID */ 4049 rc = ata_dev_configure(dev); 4050 if (rc) 4051 goto fail; 4052 4053 /* verify n_sectors hasn't changed */ 4054 if (dev->class != ATA_DEV_ATA || !n_sectors || 4055 dev->n_sectors == n_sectors) 4056 return 0; 4057 4058 /* n_sectors has changed */ 4059 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4060 (unsigned long long)n_sectors, 4061 (unsigned long long)dev->n_sectors); 4062 4063 /* 4064 * Something could have caused HPA to be unlocked 4065 * involuntarily. If n_native_sectors hasn't changed and the 4066 * new size matches it, keep the device. 4067 */ 4068 if (dev->n_native_sectors == n_native_sectors && 4069 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4070 ata_dev_warn(dev, 4071 "new n_sectors matches native, probably " 4072 "late HPA unlock, n_sectors updated\n"); 4073 /* use the larger n_sectors */ 4074 return 0; 4075 } 4076 4077 /* 4078 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4079 * unlocking HPA in those cases. 4080 * 4081 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4082 */ 4083 if (dev->n_native_sectors == n_native_sectors && 4084 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4085 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4086 ata_dev_warn(dev, 4087 "old n_sectors matches native, probably " 4088 "late HPA lock, will try to unlock HPA\n"); 4089 /* try unlocking HPA */ 4090 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4091 rc = -EIO; 4092 } else 4093 rc = -ENODEV; 4094 4095 /* restore original n_[native_]sectors and fail */ 4096 dev->n_native_sectors = n_native_sectors; 4097 dev->n_sectors = n_sectors; 4098 fail: 4099 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4100 return rc; 4101 } 4102 4103 struct ata_blacklist_entry { 4104 const char *model_num; 4105 const char *model_rev; 4106 unsigned long horkage; 4107 }; 4108 4109 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4110 /* Devices with DMA related problems under Linux */ 4111 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4112 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4113 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4114 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4115 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4116 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4117 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4118 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4119 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4120 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4121 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4122 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4123 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4124 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4125 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4126 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4127 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4128 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4129 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4130 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4131 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4132 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4133 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4134 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4135 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4136 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4137 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4138 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4139 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4140 /* Odd clown on sil3726/4726 PMPs */ 4141 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4142 4143 /* Weird ATAPI devices */ 4144 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4145 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4148 4149 /* Devices we expect to fail diagnostics */ 4150 4151 /* Devices where NCQ should be avoided */ 4152 /* NCQ is slow */ 4153 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4154 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4155 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4156 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4157 /* NCQ is broken */ 4158 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4159 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4160 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4161 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4162 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4163 4164 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4165 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4166 ATA_HORKAGE_FIRMWARE_WARN }, 4167 4168 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4169 ATA_HORKAGE_FIRMWARE_WARN }, 4170 4171 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4172 ATA_HORKAGE_FIRMWARE_WARN }, 4173 4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4175 ATA_HORKAGE_FIRMWARE_WARN }, 4176 4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4180 4181 /* Blacklist entries taken from Silicon Image 3124/3132 4182 Windows driver .inf file - also several Linux problem reports */ 4183 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4184 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4185 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4186 4187 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4188 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4189 4190 /* devices which puke on READ_NATIVE_MAX */ 4191 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4192 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4193 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4194 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4195 4196 /* this one allows HPA unlocking but fails IOs on the area */ 4197 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4198 4199 /* Devices which report 1 sector over size HPA */ 4200 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4201 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4202 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4203 4204 /* Devices which get the IVB wrong */ 4205 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4206 /* Maybe we should just blacklist TSSTcorp... */ 4207 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4208 4209 /* Devices that do not need bridging limits applied */ 4210 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4211 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4212 4213 /* Devices which aren't very happy with higher link speeds */ 4214 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4215 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4216 4217 /* 4218 * Devices which choke on SETXFER. Applies only if both the 4219 * device and controller are SATA. 4220 */ 4221 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4222 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4223 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4225 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4226 4227 /* devices that don't properly handle queued TRIM commands */ 4228 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4232 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4236 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4237 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 4241 /* 4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4243 * (Return Zero After Trim) flags in the ATA Command Set are 4244 * unreliable in the sense that they only define what happens if 4245 * the device successfully executed the DSM TRIM command. TRIM 4246 * is only advisory, however, and the device is free to silently 4247 * ignore all or parts of the request. 4248 * 4249 * Whitelist drives that are known to reliably return zeroes 4250 * after TRIM. 4251 */ 4252 4253 /* 4254 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4255 * that model before whitelisting all other intel SSDs. 4256 */ 4257 { "INTEL*SSDSC2MH*", NULL, 0, }, 4258 4259 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4260 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4261 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4262 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4263 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4264 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4265 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4266 4267 /* 4268 * Some WD SATA-I drives spin up and down erratically when the link 4269 * is put into the slumber mode. We don't have full list of the 4270 * affected devices. Disable LPM if the device matches one of the 4271 * known prefixes and is SATA-1. As a side effect LPM partial is 4272 * lost too. 4273 * 4274 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4275 */ 4276 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4277 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4278 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4279 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4280 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4281 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4282 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4283 4284 /* End Marker */ 4285 { } 4286 }; 4287 4288 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4289 { 4290 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4291 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4292 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4293 4294 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4295 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4296 4297 while (ad->model_num) { 4298 if (glob_match(ad->model_num, model_num)) { 4299 if (ad->model_rev == NULL) 4300 return ad->horkage; 4301 if (glob_match(ad->model_rev, model_rev)) 4302 return ad->horkage; 4303 } 4304 ad++; 4305 } 4306 return 0; 4307 } 4308 4309 static int ata_dma_blacklisted(const struct ata_device *dev) 4310 { 4311 /* We don't support polling DMA. 4312 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4313 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4314 */ 4315 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4316 (dev->flags & ATA_DFLAG_CDB_INTR)) 4317 return 1; 4318 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4319 } 4320 4321 /** 4322 * ata_is_40wire - check drive side detection 4323 * @dev: device 4324 * 4325 * Perform drive side detection decoding, allowing for device vendors 4326 * who can't follow the documentation. 4327 */ 4328 4329 static int ata_is_40wire(struct ata_device *dev) 4330 { 4331 if (dev->horkage & ATA_HORKAGE_IVB) 4332 return ata_drive_40wire_relaxed(dev->id); 4333 return ata_drive_40wire(dev->id); 4334 } 4335 4336 /** 4337 * cable_is_40wire - 40/80/SATA decider 4338 * @ap: port to consider 4339 * 4340 * This function encapsulates the policy for speed management 4341 * in one place. At the moment we don't cache the result but 4342 * there is a good case for setting ap->cbl to the result when 4343 * we are called with unknown cables (and figuring out if it 4344 * impacts hotplug at all). 4345 * 4346 * Return 1 if the cable appears to be 40 wire. 4347 */ 4348 4349 static int cable_is_40wire(struct ata_port *ap) 4350 { 4351 struct ata_link *link; 4352 struct ata_device *dev; 4353 4354 /* If the controller thinks we are 40 wire, we are. */ 4355 if (ap->cbl == ATA_CBL_PATA40) 4356 return 1; 4357 4358 /* If the controller thinks we are 80 wire, we are. */ 4359 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4360 return 0; 4361 4362 /* If the system is known to be 40 wire short cable (eg 4363 * laptop), then we allow 80 wire modes even if the drive 4364 * isn't sure. 4365 */ 4366 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4367 return 0; 4368 4369 /* If the controller doesn't know, we scan. 4370 * 4371 * Note: We look for all 40 wire detects at this point. Any 4372 * 80 wire detect is taken to be 80 wire cable because 4373 * - in many setups only the one drive (slave if present) will 4374 * give a valid detect 4375 * - if you have a non detect capable drive you don't want it 4376 * to colour the choice 4377 */ 4378 ata_for_each_link(link, ap, EDGE) { 4379 ata_for_each_dev(dev, link, ENABLED) { 4380 if (!ata_is_40wire(dev)) 4381 return 0; 4382 } 4383 } 4384 return 1; 4385 } 4386 4387 /** 4388 * ata_dev_xfermask - Compute supported xfermask of the given device 4389 * @dev: Device to compute xfermask for 4390 * 4391 * Compute supported xfermask of @dev and store it in 4392 * dev->*_mask. This function is responsible for applying all 4393 * known limits including host controller limits, device 4394 * blacklist, etc... 4395 * 4396 * LOCKING: 4397 * None. 4398 */ 4399 static void ata_dev_xfermask(struct ata_device *dev) 4400 { 4401 struct ata_link *link = dev->link; 4402 struct ata_port *ap = link->ap; 4403 struct ata_host *host = ap->host; 4404 unsigned long xfer_mask; 4405 4406 /* controller modes available */ 4407 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4408 ap->mwdma_mask, ap->udma_mask); 4409 4410 /* drive modes available */ 4411 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4412 dev->mwdma_mask, dev->udma_mask); 4413 xfer_mask &= ata_id_xfermask(dev->id); 4414 4415 /* 4416 * CFA Advanced TrueIDE timings are not allowed on a shared 4417 * cable 4418 */ 4419 if (ata_dev_pair(dev)) { 4420 /* No PIO5 or PIO6 */ 4421 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4422 /* No MWDMA3 or MWDMA 4 */ 4423 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4424 } 4425 4426 if (ata_dma_blacklisted(dev)) { 4427 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4428 ata_dev_warn(dev, 4429 "device is on DMA blacklist, disabling DMA\n"); 4430 } 4431 4432 if ((host->flags & ATA_HOST_SIMPLEX) && 4433 host->simplex_claimed && host->simplex_claimed != ap) { 4434 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4435 ata_dev_warn(dev, 4436 "simplex DMA is claimed by other device, disabling DMA\n"); 4437 } 4438 4439 if (ap->flags & ATA_FLAG_NO_IORDY) 4440 xfer_mask &= ata_pio_mask_no_iordy(dev); 4441 4442 if (ap->ops->mode_filter) 4443 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4444 4445 /* Apply cable rule here. Don't apply it early because when 4446 * we handle hot plug the cable type can itself change. 4447 * Check this last so that we know if the transfer rate was 4448 * solely limited by the cable. 4449 * Unknown or 80 wire cables reported host side are checked 4450 * drive side as well. Cases where we know a 40wire cable 4451 * is used safely for 80 are not checked here. 4452 */ 4453 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4454 /* UDMA/44 or higher would be available */ 4455 if (cable_is_40wire(ap)) { 4456 ata_dev_warn(dev, 4457 "limited to UDMA/33 due to 40-wire cable\n"); 4458 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4459 } 4460 4461 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4462 &dev->mwdma_mask, &dev->udma_mask); 4463 } 4464 4465 /** 4466 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4467 * @dev: Device to which command will be sent 4468 * 4469 * Issue SET FEATURES - XFER MODE command to device @dev 4470 * on port @ap. 4471 * 4472 * LOCKING: 4473 * PCI/etc. bus probe sem. 4474 * 4475 * RETURNS: 4476 * 0 on success, AC_ERR_* mask otherwise. 4477 */ 4478 4479 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4480 { 4481 struct ata_taskfile tf; 4482 unsigned int err_mask; 4483 4484 /* set up set-features taskfile */ 4485 DPRINTK("set features - xfer mode\n"); 4486 4487 /* Some controllers and ATAPI devices show flaky interrupt 4488 * behavior after setting xfer mode. Use polling instead. 4489 */ 4490 ata_tf_init(dev, &tf); 4491 tf.command = ATA_CMD_SET_FEATURES; 4492 tf.feature = SETFEATURES_XFER; 4493 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4494 tf.protocol = ATA_PROT_NODATA; 4495 /* If we are using IORDY we must send the mode setting command */ 4496 if (ata_pio_need_iordy(dev)) 4497 tf.nsect = dev->xfer_mode; 4498 /* If the device has IORDY and the controller does not - turn it off */ 4499 else if (ata_id_has_iordy(dev->id)) 4500 tf.nsect = 0x01; 4501 else /* In the ancient relic department - skip all of this */ 4502 return 0; 4503 4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4505 4506 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4507 return err_mask; 4508 } 4509 4510 /** 4511 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4512 * @dev: Device to which command will be sent 4513 * @enable: Whether to enable or disable the feature 4514 * @feature: The sector count represents the feature to set 4515 * 4516 * Issue SET FEATURES - SATA FEATURES command to device @dev 4517 * on port @ap with sector count 4518 * 4519 * LOCKING: 4520 * PCI/etc. bus probe sem. 4521 * 4522 * RETURNS: 4523 * 0 on success, AC_ERR_* mask otherwise. 4524 */ 4525 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4526 { 4527 struct ata_taskfile tf; 4528 unsigned int err_mask; 4529 4530 /* set up set-features taskfile */ 4531 DPRINTK("set features - SATA features\n"); 4532 4533 ata_tf_init(dev, &tf); 4534 tf.command = ATA_CMD_SET_FEATURES; 4535 tf.feature = enable; 4536 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4537 tf.protocol = ATA_PROT_NODATA; 4538 tf.nsect = feature; 4539 4540 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4541 4542 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4543 return err_mask; 4544 } 4545 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4546 4547 /** 4548 * ata_dev_init_params - Issue INIT DEV PARAMS command 4549 * @dev: Device to which command will be sent 4550 * @heads: Number of heads (taskfile parameter) 4551 * @sectors: Number of sectors (taskfile parameter) 4552 * 4553 * LOCKING: 4554 * Kernel thread context (may sleep) 4555 * 4556 * RETURNS: 4557 * 0 on success, AC_ERR_* mask otherwise. 4558 */ 4559 static unsigned int ata_dev_init_params(struct ata_device *dev, 4560 u16 heads, u16 sectors) 4561 { 4562 struct ata_taskfile tf; 4563 unsigned int err_mask; 4564 4565 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4566 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4567 return AC_ERR_INVALID; 4568 4569 /* set up init dev params taskfile */ 4570 DPRINTK("init dev params \n"); 4571 4572 ata_tf_init(dev, &tf); 4573 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4574 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4575 tf.protocol = ATA_PROT_NODATA; 4576 tf.nsect = sectors; 4577 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4578 4579 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4580 /* A clean abort indicates an original or just out of spec drive 4581 and we should continue as we issue the setup based on the 4582 drive reported working geometry */ 4583 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4584 err_mask = 0; 4585 4586 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4587 return err_mask; 4588 } 4589 4590 /** 4591 * ata_sg_clean - Unmap DMA memory associated with command 4592 * @qc: Command containing DMA memory to be released 4593 * 4594 * Unmap all mapped DMA memory associated with this command. 4595 * 4596 * LOCKING: 4597 * spin_lock_irqsave(host lock) 4598 */ 4599 void ata_sg_clean(struct ata_queued_cmd *qc) 4600 { 4601 struct ata_port *ap = qc->ap; 4602 struct scatterlist *sg = qc->sg; 4603 int dir = qc->dma_dir; 4604 4605 WARN_ON_ONCE(sg == NULL); 4606 4607 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4608 4609 if (qc->n_elem) 4610 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4611 4612 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4613 qc->sg = NULL; 4614 } 4615 4616 /** 4617 * atapi_check_dma - Check whether ATAPI DMA can be supported 4618 * @qc: Metadata associated with taskfile to check 4619 * 4620 * Allow low-level driver to filter ATA PACKET commands, returning 4621 * a status indicating whether or not it is OK to use DMA for the 4622 * supplied PACKET command. 4623 * 4624 * LOCKING: 4625 * spin_lock_irqsave(host lock) 4626 * 4627 * RETURNS: 0 when ATAPI DMA can be used 4628 * nonzero otherwise 4629 */ 4630 int atapi_check_dma(struct ata_queued_cmd *qc) 4631 { 4632 struct ata_port *ap = qc->ap; 4633 4634 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4635 * few ATAPI devices choke on such DMA requests. 4636 */ 4637 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4638 unlikely(qc->nbytes & 15)) 4639 return 1; 4640 4641 if (ap->ops->check_atapi_dma) 4642 return ap->ops->check_atapi_dma(qc); 4643 4644 return 0; 4645 } 4646 4647 /** 4648 * ata_std_qc_defer - Check whether a qc needs to be deferred 4649 * @qc: ATA command in question 4650 * 4651 * Non-NCQ commands cannot run with any other command, NCQ or 4652 * not. As upper layer only knows the queue depth, we are 4653 * responsible for maintaining exclusion. This function checks 4654 * whether a new command @qc can be issued. 4655 * 4656 * LOCKING: 4657 * spin_lock_irqsave(host lock) 4658 * 4659 * RETURNS: 4660 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4661 */ 4662 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4663 { 4664 struct ata_link *link = qc->dev->link; 4665 4666 if (qc->tf.protocol == ATA_PROT_NCQ) { 4667 if (!ata_tag_valid(link->active_tag)) 4668 return 0; 4669 } else { 4670 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4671 return 0; 4672 } 4673 4674 return ATA_DEFER_LINK; 4675 } 4676 4677 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4678 4679 /** 4680 * ata_sg_init - Associate command with scatter-gather table. 4681 * @qc: Command to be associated 4682 * @sg: Scatter-gather table. 4683 * @n_elem: Number of elements in s/g table. 4684 * 4685 * Initialize the data-related elements of queued_cmd @qc 4686 * to point to a scatter-gather table @sg, containing @n_elem 4687 * elements. 4688 * 4689 * LOCKING: 4690 * spin_lock_irqsave(host lock) 4691 */ 4692 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4693 unsigned int n_elem) 4694 { 4695 qc->sg = sg; 4696 qc->n_elem = n_elem; 4697 qc->cursg = qc->sg; 4698 } 4699 4700 /** 4701 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4702 * @qc: Command with scatter-gather table to be mapped. 4703 * 4704 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4705 * 4706 * LOCKING: 4707 * spin_lock_irqsave(host lock) 4708 * 4709 * RETURNS: 4710 * Zero on success, negative on error. 4711 * 4712 */ 4713 static int ata_sg_setup(struct ata_queued_cmd *qc) 4714 { 4715 struct ata_port *ap = qc->ap; 4716 unsigned int n_elem; 4717 4718 VPRINTK("ENTER, ata%u\n", ap->print_id); 4719 4720 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4721 if (n_elem < 1) 4722 return -1; 4723 4724 DPRINTK("%d sg elements mapped\n", n_elem); 4725 qc->orig_n_elem = qc->n_elem; 4726 qc->n_elem = n_elem; 4727 qc->flags |= ATA_QCFLAG_DMAMAP; 4728 4729 return 0; 4730 } 4731 4732 /** 4733 * swap_buf_le16 - swap halves of 16-bit words in place 4734 * @buf: Buffer to swap 4735 * @buf_words: Number of 16-bit words in buffer. 4736 * 4737 * Swap halves of 16-bit words if needed to convert from 4738 * little-endian byte order to native cpu byte order, or 4739 * vice-versa. 4740 * 4741 * LOCKING: 4742 * Inherited from caller. 4743 */ 4744 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4745 { 4746 #ifdef __BIG_ENDIAN 4747 unsigned int i; 4748 4749 for (i = 0; i < buf_words; i++) 4750 buf[i] = le16_to_cpu(buf[i]); 4751 #endif /* __BIG_ENDIAN */ 4752 } 4753 4754 /** 4755 * ata_qc_new_init - Request an available ATA command, and initialize it 4756 * @dev: Device from whom we request an available command structure 4757 * 4758 * LOCKING: 4759 * None. 4760 */ 4761 4762 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 4763 { 4764 struct ata_port *ap = dev->link->ap; 4765 struct ata_queued_cmd *qc; 4766 4767 /* no command while frozen */ 4768 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4769 return NULL; 4770 4771 /* libsas case */ 4772 if (ap->flags & ATA_FLAG_SAS_HOST) { 4773 tag = ata_sas_allocate_tag(ap); 4774 if (tag < 0) 4775 return NULL; 4776 } 4777 4778 qc = __ata_qc_from_tag(ap, tag); 4779 qc->tag = tag; 4780 qc->scsicmd = NULL; 4781 qc->ap = ap; 4782 qc->dev = dev; 4783 4784 ata_qc_reinit(qc); 4785 4786 return qc; 4787 } 4788 4789 /** 4790 * ata_qc_free - free unused ata_queued_cmd 4791 * @qc: Command to complete 4792 * 4793 * Designed to free unused ata_queued_cmd object 4794 * in case something prevents using it. 4795 * 4796 * LOCKING: 4797 * spin_lock_irqsave(host lock) 4798 */ 4799 void ata_qc_free(struct ata_queued_cmd *qc) 4800 { 4801 struct ata_port *ap; 4802 unsigned int tag; 4803 4804 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4805 ap = qc->ap; 4806 4807 qc->flags = 0; 4808 tag = qc->tag; 4809 if (likely(ata_tag_valid(tag))) { 4810 qc->tag = ATA_TAG_POISON; 4811 if (ap->flags & ATA_FLAG_SAS_HOST) 4812 ata_sas_free_tag(tag, ap); 4813 } 4814 } 4815 4816 void __ata_qc_complete(struct ata_queued_cmd *qc) 4817 { 4818 struct ata_port *ap; 4819 struct ata_link *link; 4820 4821 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4822 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4823 ap = qc->ap; 4824 link = qc->dev->link; 4825 4826 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4827 ata_sg_clean(qc); 4828 4829 /* command should be marked inactive atomically with qc completion */ 4830 if (qc->tf.protocol == ATA_PROT_NCQ) { 4831 link->sactive &= ~(1 << qc->tag); 4832 if (!link->sactive) 4833 ap->nr_active_links--; 4834 } else { 4835 link->active_tag = ATA_TAG_POISON; 4836 ap->nr_active_links--; 4837 } 4838 4839 /* clear exclusive status */ 4840 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4841 ap->excl_link == link)) 4842 ap->excl_link = NULL; 4843 4844 /* atapi: mark qc as inactive to prevent the interrupt handler 4845 * from completing the command twice later, before the error handler 4846 * is called. (when rc != 0 and atapi request sense is needed) 4847 */ 4848 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4849 ap->qc_active &= ~(1 << qc->tag); 4850 4851 /* call completion callback */ 4852 qc->complete_fn(qc); 4853 } 4854 4855 static void fill_result_tf(struct ata_queued_cmd *qc) 4856 { 4857 struct ata_port *ap = qc->ap; 4858 4859 qc->result_tf.flags = qc->tf.flags; 4860 ap->ops->qc_fill_rtf(qc); 4861 } 4862 4863 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4864 { 4865 struct ata_device *dev = qc->dev; 4866 4867 if (ata_is_nodata(qc->tf.protocol)) 4868 return; 4869 4870 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4871 return; 4872 4873 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4874 } 4875 4876 /** 4877 * ata_qc_complete - Complete an active ATA command 4878 * @qc: Command to complete 4879 * 4880 * Indicate to the mid and upper layers that an ATA command has 4881 * completed, with either an ok or not-ok status. 4882 * 4883 * Refrain from calling this function multiple times when 4884 * successfully completing multiple NCQ commands. 4885 * ata_qc_complete_multiple() should be used instead, which will 4886 * properly update IRQ expect state. 4887 * 4888 * LOCKING: 4889 * spin_lock_irqsave(host lock) 4890 */ 4891 void ata_qc_complete(struct ata_queued_cmd *qc) 4892 { 4893 struct ata_port *ap = qc->ap; 4894 4895 /* XXX: New EH and old EH use different mechanisms to 4896 * synchronize EH with regular execution path. 4897 * 4898 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4899 * Normal execution path is responsible for not accessing a 4900 * failed qc. libata core enforces the rule by returning NULL 4901 * from ata_qc_from_tag() for failed qcs. 4902 * 4903 * Old EH depends on ata_qc_complete() nullifying completion 4904 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4905 * not synchronize with interrupt handler. Only PIO task is 4906 * taken care of. 4907 */ 4908 if (ap->ops->error_handler) { 4909 struct ata_device *dev = qc->dev; 4910 struct ata_eh_info *ehi = &dev->link->eh_info; 4911 4912 if (unlikely(qc->err_mask)) 4913 qc->flags |= ATA_QCFLAG_FAILED; 4914 4915 /* 4916 * Finish internal commands without any further processing 4917 * and always with the result TF filled. 4918 */ 4919 if (unlikely(ata_tag_internal(qc->tag))) { 4920 fill_result_tf(qc); 4921 trace_ata_qc_complete_internal(qc); 4922 __ata_qc_complete(qc); 4923 return; 4924 } 4925 4926 /* 4927 * Non-internal qc has failed. Fill the result TF and 4928 * summon EH. 4929 */ 4930 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4931 fill_result_tf(qc); 4932 trace_ata_qc_complete_failed(qc); 4933 ata_qc_schedule_eh(qc); 4934 return; 4935 } 4936 4937 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4938 4939 /* read result TF if requested */ 4940 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4941 fill_result_tf(qc); 4942 4943 trace_ata_qc_complete_done(qc); 4944 /* Some commands need post-processing after successful 4945 * completion. 4946 */ 4947 switch (qc->tf.command) { 4948 case ATA_CMD_SET_FEATURES: 4949 if (qc->tf.feature != SETFEATURES_WC_ON && 4950 qc->tf.feature != SETFEATURES_WC_OFF) 4951 break; 4952 /* fall through */ 4953 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4954 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4955 /* revalidate device */ 4956 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4957 ata_port_schedule_eh(ap); 4958 break; 4959 4960 case ATA_CMD_SLEEP: 4961 dev->flags |= ATA_DFLAG_SLEEPING; 4962 break; 4963 } 4964 4965 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4966 ata_verify_xfer(qc); 4967 4968 __ata_qc_complete(qc); 4969 } else { 4970 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4971 return; 4972 4973 /* read result TF if failed or requested */ 4974 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4975 fill_result_tf(qc); 4976 4977 __ata_qc_complete(qc); 4978 } 4979 } 4980 4981 /** 4982 * ata_qc_complete_multiple - Complete multiple qcs successfully 4983 * @ap: port in question 4984 * @qc_active: new qc_active mask 4985 * 4986 * Complete in-flight commands. This functions is meant to be 4987 * called from low-level driver's interrupt routine to complete 4988 * requests normally. ap->qc_active and @qc_active is compared 4989 * and commands are completed accordingly. 4990 * 4991 * Always use this function when completing multiple NCQ commands 4992 * from IRQ handlers instead of calling ata_qc_complete() 4993 * multiple times to keep IRQ expect status properly in sync. 4994 * 4995 * LOCKING: 4996 * spin_lock_irqsave(host lock) 4997 * 4998 * RETURNS: 4999 * Number of completed commands on success, -errno otherwise. 5000 */ 5001 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5002 { 5003 int nr_done = 0; 5004 u32 done_mask; 5005 5006 done_mask = ap->qc_active ^ qc_active; 5007 5008 if (unlikely(done_mask & qc_active)) { 5009 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5010 ap->qc_active, qc_active); 5011 return -EINVAL; 5012 } 5013 5014 while (done_mask) { 5015 struct ata_queued_cmd *qc; 5016 unsigned int tag = __ffs(done_mask); 5017 5018 qc = ata_qc_from_tag(ap, tag); 5019 if (qc) { 5020 ata_qc_complete(qc); 5021 nr_done++; 5022 } 5023 done_mask &= ~(1 << tag); 5024 } 5025 5026 return nr_done; 5027 } 5028 5029 /** 5030 * ata_qc_issue - issue taskfile to device 5031 * @qc: command to issue to device 5032 * 5033 * Prepare an ATA command to submission to device. 5034 * This includes mapping the data into a DMA-able 5035 * area, filling in the S/G table, and finally 5036 * writing the taskfile to hardware, starting the command. 5037 * 5038 * LOCKING: 5039 * spin_lock_irqsave(host lock) 5040 */ 5041 void ata_qc_issue(struct ata_queued_cmd *qc) 5042 { 5043 struct ata_port *ap = qc->ap; 5044 struct ata_link *link = qc->dev->link; 5045 u8 prot = qc->tf.protocol; 5046 5047 /* Make sure only one non-NCQ command is outstanding. The 5048 * check is skipped for old EH because it reuses active qc to 5049 * request ATAPI sense. 5050 */ 5051 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5052 5053 if (ata_is_ncq(prot)) { 5054 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5055 5056 if (!link->sactive) 5057 ap->nr_active_links++; 5058 link->sactive |= 1 << qc->tag; 5059 } else { 5060 WARN_ON_ONCE(link->sactive); 5061 5062 ap->nr_active_links++; 5063 link->active_tag = qc->tag; 5064 } 5065 5066 qc->flags |= ATA_QCFLAG_ACTIVE; 5067 ap->qc_active |= 1 << qc->tag; 5068 5069 /* 5070 * We guarantee to LLDs that they will have at least one 5071 * non-zero sg if the command is a data command. 5072 */ 5073 if (WARN_ON_ONCE(ata_is_data(prot) && 5074 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5075 goto sys_err; 5076 5077 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5078 (ap->flags & ATA_FLAG_PIO_DMA))) 5079 if (ata_sg_setup(qc)) 5080 goto sys_err; 5081 5082 /* if device is sleeping, schedule reset and abort the link */ 5083 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5084 link->eh_info.action |= ATA_EH_RESET; 5085 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5086 ata_link_abort(link); 5087 return; 5088 } 5089 5090 ap->ops->qc_prep(qc); 5091 trace_ata_qc_issue(qc); 5092 qc->err_mask |= ap->ops->qc_issue(qc); 5093 if (unlikely(qc->err_mask)) 5094 goto err; 5095 return; 5096 5097 sys_err: 5098 qc->err_mask |= AC_ERR_SYSTEM; 5099 err: 5100 ata_qc_complete(qc); 5101 } 5102 5103 /** 5104 * sata_scr_valid - test whether SCRs are accessible 5105 * @link: ATA link to test SCR accessibility for 5106 * 5107 * Test whether SCRs are accessible for @link. 5108 * 5109 * LOCKING: 5110 * None. 5111 * 5112 * RETURNS: 5113 * 1 if SCRs are accessible, 0 otherwise. 5114 */ 5115 int sata_scr_valid(struct ata_link *link) 5116 { 5117 struct ata_port *ap = link->ap; 5118 5119 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5120 } 5121 5122 /** 5123 * sata_scr_read - read SCR register of the specified port 5124 * @link: ATA link to read SCR for 5125 * @reg: SCR to read 5126 * @val: Place to store read value 5127 * 5128 * Read SCR register @reg of @link into *@val. This function is 5129 * guaranteed to succeed if @link is ap->link, the cable type of 5130 * the port is SATA and the port implements ->scr_read. 5131 * 5132 * LOCKING: 5133 * None if @link is ap->link. Kernel thread context otherwise. 5134 * 5135 * RETURNS: 5136 * 0 on success, negative errno on failure. 5137 */ 5138 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5139 { 5140 if (ata_is_host_link(link)) { 5141 if (sata_scr_valid(link)) 5142 return link->ap->ops->scr_read(link, reg, val); 5143 return -EOPNOTSUPP; 5144 } 5145 5146 return sata_pmp_scr_read(link, reg, val); 5147 } 5148 5149 /** 5150 * sata_scr_write - write SCR register of the specified port 5151 * @link: ATA link to write SCR for 5152 * @reg: SCR to write 5153 * @val: value to write 5154 * 5155 * Write @val to SCR register @reg of @link. This function is 5156 * guaranteed to succeed if @link is ap->link, the cable type of 5157 * the port is SATA and the port implements ->scr_read. 5158 * 5159 * LOCKING: 5160 * None if @link is ap->link. Kernel thread context otherwise. 5161 * 5162 * RETURNS: 5163 * 0 on success, negative errno on failure. 5164 */ 5165 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5166 { 5167 if (ata_is_host_link(link)) { 5168 if (sata_scr_valid(link)) 5169 return link->ap->ops->scr_write(link, reg, val); 5170 return -EOPNOTSUPP; 5171 } 5172 5173 return sata_pmp_scr_write(link, reg, val); 5174 } 5175 5176 /** 5177 * sata_scr_write_flush - write SCR register of the specified port and flush 5178 * @link: ATA link to write SCR for 5179 * @reg: SCR to write 5180 * @val: value to write 5181 * 5182 * This function is identical to sata_scr_write() except that this 5183 * function performs flush after writing to the register. 5184 * 5185 * LOCKING: 5186 * None if @link is ap->link. Kernel thread context otherwise. 5187 * 5188 * RETURNS: 5189 * 0 on success, negative errno on failure. 5190 */ 5191 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5192 { 5193 if (ata_is_host_link(link)) { 5194 int rc; 5195 5196 if (sata_scr_valid(link)) { 5197 rc = link->ap->ops->scr_write(link, reg, val); 5198 if (rc == 0) 5199 rc = link->ap->ops->scr_read(link, reg, &val); 5200 return rc; 5201 } 5202 return -EOPNOTSUPP; 5203 } 5204 5205 return sata_pmp_scr_write(link, reg, val); 5206 } 5207 5208 /** 5209 * ata_phys_link_online - test whether the given link is online 5210 * @link: ATA link to test 5211 * 5212 * Test whether @link is online. Note that this function returns 5213 * 0 if online status of @link cannot be obtained, so 5214 * ata_link_online(link) != !ata_link_offline(link). 5215 * 5216 * LOCKING: 5217 * None. 5218 * 5219 * RETURNS: 5220 * True if the port online status is available and online. 5221 */ 5222 bool ata_phys_link_online(struct ata_link *link) 5223 { 5224 u32 sstatus; 5225 5226 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5227 ata_sstatus_online(sstatus)) 5228 return true; 5229 return false; 5230 } 5231 5232 /** 5233 * ata_phys_link_offline - test whether the given link is offline 5234 * @link: ATA link to test 5235 * 5236 * Test whether @link is offline. Note that this function 5237 * returns 0 if offline status of @link cannot be obtained, so 5238 * ata_link_online(link) != !ata_link_offline(link). 5239 * 5240 * LOCKING: 5241 * None. 5242 * 5243 * RETURNS: 5244 * True if the port offline status is available and offline. 5245 */ 5246 bool ata_phys_link_offline(struct ata_link *link) 5247 { 5248 u32 sstatus; 5249 5250 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5251 !ata_sstatus_online(sstatus)) 5252 return true; 5253 return false; 5254 } 5255 5256 /** 5257 * ata_link_online - test whether the given link is online 5258 * @link: ATA link to test 5259 * 5260 * Test whether @link is online. This is identical to 5261 * ata_phys_link_online() when there's no slave link. When 5262 * there's a slave link, this function should only be called on 5263 * the master link and will return true if any of M/S links is 5264 * online. 5265 * 5266 * LOCKING: 5267 * None. 5268 * 5269 * RETURNS: 5270 * True if the port online status is available and online. 5271 */ 5272 bool ata_link_online(struct ata_link *link) 5273 { 5274 struct ata_link *slave = link->ap->slave_link; 5275 5276 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5277 5278 return ata_phys_link_online(link) || 5279 (slave && ata_phys_link_online(slave)); 5280 } 5281 5282 /** 5283 * ata_link_offline - test whether the given link is offline 5284 * @link: ATA link to test 5285 * 5286 * Test whether @link is offline. This is identical to 5287 * ata_phys_link_offline() when there's no slave link. When 5288 * there's a slave link, this function should only be called on 5289 * the master link and will return true if both M/S links are 5290 * offline. 5291 * 5292 * LOCKING: 5293 * None. 5294 * 5295 * RETURNS: 5296 * True if the port offline status is available and offline. 5297 */ 5298 bool ata_link_offline(struct ata_link *link) 5299 { 5300 struct ata_link *slave = link->ap->slave_link; 5301 5302 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5303 5304 return ata_phys_link_offline(link) && 5305 (!slave || ata_phys_link_offline(slave)); 5306 } 5307 5308 #ifdef CONFIG_PM 5309 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5310 unsigned int action, unsigned int ehi_flags, 5311 bool async) 5312 { 5313 struct ata_link *link; 5314 unsigned long flags; 5315 5316 /* Previous resume operation might still be in 5317 * progress. Wait for PM_PENDING to clear. 5318 */ 5319 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5320 ata_port_wait_eh(ap); 5321 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5322 } 5323 5324 /* request PM ops to EH */ 5325 spin_lock_irqsave(ap->lock, flags); 5326 5327 ap->pm_mesg = mesg; 5328 ap->pflags |= ATA_PFLAG_PM_PENDING; 5329 ata_for_each_link(link, ap, HOST_FIRST) { 5330 link->eh_info.action |= action; 5331 link->eh_info.flags |= ehi_flags; 5332 } 5333 5334 ata_port_schedule_eh(ap); 5335 5336 spin_unlock_irqrestore(ap->lock, flags); 5337 5338 if (!async) { 5339 ata_port_wait_eh(ap); 5340 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5341 } 5342 } 5343 5344 /* 5345 * On some hardware, device fails to respond after spun down for suspend. As 5346 * the device won't be used before being resumed, we don't need to touch the 5347 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5348 * 5349 * http://thread.gmane.org/gmane.linux.ide/46764 5350 */ 5351 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5352 | ATA_EHI_NO_AUTOPSY 5353 | ATA_EHI_NO_RECOVERY; 5354 5355 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5356 { 5357 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5358 } 5359 5360 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5361 { 5362 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5363 } 5364 5365 static int ata_port_pm_suspend(struct device *dev) 5366 { 5367 struct ata_port *ap = to_ata_port(dev); 5368 5369 if (pm_runtime_suspended(dev)) 5370 return 0; 5371 5372 ata_port_suspend(ap, PMSG_SUSPEND); 5373 return 0; 5374 } 5375 5376 static int ata_port_pm_freeze(struct device *dev) 5377 { 5378 struct ata_port *ap = to_ata_port(dev); 5379 5380 if (pm_runtime_suspended(dev)) 5381 return 0; 5382 5383 ata_port_suspend(ap, PMSG_FREEZE); 5384 return 0; 5385 } 5386 5387 static int ata_port_pm_poweroff(struct device *dev) 5388 { 5389 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5390 return 0; 5391 } 5392 5393 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5394 | ATA_EHI_QUIET; 5395 5396 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5397 { 5398 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5399 } 5400 5401 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5402 { 5403 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5404 } 5405 5406 static int ata_port_pm_resume(struct device *dev) 5407 { 5408 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5409 pm_runtime_disable(dev); 5410 pm_runtime_set_active(dev); 5411 pm_runtime_enable(dev); 5412 return 0; 5413 } 5414 5415 /* 5416 * For ODDs, the upper layer will poll for media change every few seconds, 5417 * which will make it enter and leave suspend state every few seconds. And 5418 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5419 * is very little and the ODD may malfunction after constantly being reset. 5420 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5421 * ODD is attached to the port. 5422 */ 5423 static int ata_port_runtime_idle(struct device *dev) 5424 { 5425 struct ata_port *ap = to_ata_port(dev); 5426 struct ata_link *link; 5427 struct ata_device *adev; 5428 5429 ata_for_each_link(link, ap, HOST_FIRST) { 5430 ata_for_each_dev(adev, link, ENABLED) 5431 if (adev->class == ATA_DEV_ATAPI && 5432 !zpodd_dev_enabled(adev)) 5433 return -EBUSY; 5434 } 5435 5436 return 0; 5437 } 5438 5439 static int ata_port_runtime_suspend(struct device *dev) 5440 { 5441 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5442 return 0; 5443 } 5444 5445 static int ata_port_runtime_resume(struct device *dev) 5446 { 5447 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5448 return 0; 5449 } 5450 5451 static const struct dev_pm_ops ata_port_pm_ops = { 5452 .suspend = ata_port_pm_suspend, 5453 .resume = ata_port_pm_resume, 5454 .freeze = ata_port_pm_freeze, 5455 .thaw = ata_port_pm_resume, 5456 .poweroff = ata_port_pm_poweroff, 5457 .restore = ata_port_pm_resume, 5458 5459 .runtime_suspend = ata_port_runtime_suspend, 5460 .runtime_resume = ata_port_runtime_resume, 5461 .runtime_idle = ata_port_runtime_idle, 5462 }; 5463 5464 /* sas ports don't participate in pm runtime management of ata_ports, 5465 * and need to resume ata devices at the domain level, not the per-port 5466 * level. sas suspend/resume is async to allow parallel port recovery 5467 * since sas has multiple ata_port instances per Scsi_Host. 5468 */ 5469 void ata_sas_port_suspend(struct ata_port *ap) 5470 { 5471 ata_port_suspend_async(ap, PMSG_SUSPEND); 5472 } 5473 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5474 5475 void ata_sas_port_resume(struct ata_port *ap) 5476 { 5477 ata_port_resume_async(ap, PMSG_RESUME); 5478 } 5479 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5480 5481 /** 5482 * ata_host_suspend - suspend host 5483 * @host: host to suspend 5484 * @mesg: PM message 5485 * 5486 * Suspend @host. Actual operation is performed by port suspend. 5487 */ 5488 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5489 { 5490 host->dev->power.power_state = mesg; 5491 return 0; 5492 } 5493 5494 /** 5495 * ata_host_resume - resume host 5496 * @host: host to resume 5497 * 5498 * Resume @host. Actual operation is performed by port resume. 5499 */ 5500 void ata_host_resume(struct ata_host *host) 5501 { 5502 host->dev->power.power_state = PMSG_ON; 5503 } 5504 #endif 5505 5506 struct device_type ata_port_type = { 5507 .name = "ata_port", 5508 #ifdef CONFIG_PM 5509 .pm = &ata_port_pm_ops, 5510 #endif 5511 }; 5512 5513 /** 5514 * ata_dev_init - Initialize an ata_device structure 5515 * @dev: Device structure to initialize 5516 * 5517 * Initialize @dev in preparation for probing. 5518 * 5519 * LOCKING: 5520 * Inherited from caller. 5521 */ 5522 void ata_dev_init(struct ata_device *dev) 5523 { 5524 struct ata_link *link = ata_dev_phys_link(dev); 5525 struct ata_port *ap = link->ap; 5526 unsigned long flags; 5527 5528 /* SATA spd limit is bound to the attached device, reset together */ 5529 link->sata_spd_limit = link->hw_sata_spd_limit; 5530 link->sata_spd = 0; 5531 5532 /* High bits of dev->flags are used to record warm plug 5533 * requests which occur asynchronously. Synchronize using 5534 * host lock. 5535 */ 5536 spin_lock_irqsave(ap->lock, flags); 5537 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5538 dev->horkage = 0; 5539 spin_unlock_irqrestore(ap->lock, flags); 5540 5541 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5542 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5543 dev->pio_mask = UINT_MAX; 5544 dev->mwdma_mask = UINT_MAX; 5545 dev->udma_mask = UINT_MAX; 5546 } 5547 5548 /** 5549 * ata_link_init - Initialize an ata_link structure 5550 * @ap: ATA port link is attached to 5551 * @link: Link structure to initialize 5552 * @pmp: Port multiplier port number 5553 * 5554 * Initialize @link. 5555 * 5556 * LOCKING: 5557 * Kernel thread context (may sleep) 5558 */ 5559 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5560 { 5561 int i; 5562 5563 /* clear everything except for devices */ 5564 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5565 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5566 5567 link->ap = ap; 5568 link->pmp = pmp; 5569 link->active_tag = ATA_TAG_POISON; 5570 link->hw_sata_spd_limit = UINT_MAX; 5571 5572 /* can't use iterator, ap isn't initialized yet */ 5573 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5574 struct ata_device *dev = &link->device[i]; 5575 5576 dev->link = link; 5577 dev->devno = dev - link->device; 5578 #ifdef CONFIG_ATA_ACPI 5579 dev->gtf_filter = ata_acpi_gtf_filter; 5580 #endif 5581 ata_dev_init(dev); 5582 } 5583 } 5584 5585 /** 5586 * sata_link_init_spd - Initialize link->sata_spd_limit 5587 * @link: Link to configure sata_spd_limit for 5588 * 5589 * Initialize @link->[hw_]sata_spd_limit to the currently 5590 * configured value. 5591 * 5592 * LOCKING: 5593 * Kernel thread context (may sleep). 5594 * 5595 * RETURNS: 5596 * 0 on success, -errno on failure. 5597 */ 5598 int sata_link_init_spd(struct ata_link *link) 5599 { 5600 u8 spd; 5601 int rc; 5602 5603 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5604 if (rc) 5605 return rc; 5606 5607 spd = (link->saved_scontrol >> 4) & 0xf; 5608 if (spd) 5609 link->hw_sata_spd_limit &= (1 << spd) - 1; 5610 5611 ata_force_link_limits(link); 5612 5613 link->sata_spd_limit = link->hw_sata_spd_limit; 5614 5615 return 0; 5616 } 5617 5618 /** 5619 * ata_port_alloc - allocate and initialize basic ATA port resources 5620 * @host: ATA host this allocated port belongs to 5621 * 5622 * Allocate and initialize basic ATA port resources. 5623 * 5624 * RETURNS: 5625 * Allocate ATA port on success, NULL on failure. 5626 * 5627 * LOCKING: 5628 * Inherited from calling layer (may sleep). 5629 */ 5630 struct ata_port *ata_port_alloc(struct ata_host *host) 5631 { 5632 struct ata_port *ap; 5633 5634 DPRINTK("ENTER\n"); 5635 5636 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5637 if (!ap) 5638 return NULL; 5639 5640 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5641 ap->lock = &host->lock; 5642 ap->print_id = -1; 5643 ap->local_port_no = -1; 5644 ap->host = host; 5645 ap->dev = host->dev; 5646 5647 #if defined(ATA_VERBOSE_DEBUG) 5648 /* turn on all debugging levels */ 5649 ap->msg_enable = 0x00FF; 5650 #elif defined(ATA_DEBUG) 5651 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5652 #else 5653 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5654 #endif 5655 5656 mutex_init(&ap->scsi_scan_mutex); 5657 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5658 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5659 INIT_LIST_HEAD(&ap->eh_done_q); 5660 init_waitqueue_head(&ap->eh_wait_q); 5661 init_completion(&ap->park_req_pending); 5662 init_timer_deferrable(&ap->fastdrain_timer); 5663 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5664 ap->fastdrain_timer.data = (unsigned long)ap; 5665 5666 ap->cbl = ATA_CBL_NONE; 5667 5668 ata_link_init(ap, &ap->link, 0); 5669 5670 #ifdef ATA_IRQ_TRAP 5671 ap->stats.unhandled_irq = 1; 5672 ap->stats.idle_irq = 1; 5673 #endif 5674 ata_sff_port_init(ap); 5675 5676 return ap; 5677 } 5678 5679 static void ata_host_release(struct device *gendev, void *res) 5680 { 5681 struct ata_host *host = dev_get_drvdata(gendev); 5682 int i; 5683 5684 for (i = 0; i < host->n_ports; i++) { 5685 struct ata_port *ap = host->ports[i]; 5686 5687 if (!ap) 5688 continue; 5689 5690 if (ap->scsi_host) 5691 scsi_host_put(ap->scsi_host); 5692 5693 kfree(ap->pmp_link); 5694 kfree(ap->slave_link); 5695 kfree(ap); 5696 host->ports[i] = NULL; 5697 } 5698 5699 dev_set_drvdata(gendev, NULL); 5700 } 5701 5702 /** 5703 * ata_host_alloc - allocate and init basic ATA host resources 5704 * @dev: generic device this host is associated with 5705 * @max_ports: maximum number of ATA ports associated with this host 5706 * 5707 * Allocate and initialize basic ATA host resources. LLD calls 5708 * this function to allocate a host, initializes it fully and 5709 * attaches it using ata_host_register(). 5710 * 5711 * @max_ports ports are allocated and host->n_ports is 5712 * initialized to @max_ports. The caller is allowed to decrease 5713 * host->n_ports before calling ata_host_register(). The unused 5714 * ports will be automatically freed on registration. 5715 * 5716 * RETURNS: 5717 * Allocate ATA host on success, NULL on failure. 5718 * 5719 * LOCKING: 5720 * Inherited from calling layer (may sleep). 5721 */ 5722 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5723 { 5724 struct ata_host *host; 5725 size_t sz; 5726 int i; 5727 5728 DPRINTK("ENTER\n"); 5729 5730 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5731 return NULL; 5732 5733 /* alloc a container for our list of ATA ports (buses) */ 5734 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5735 /* alloc a container for our list of ATA ports (buses) */ 5736 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5737 if (!host) 5738 goto err_out; 5739 5740 devres_add(dev, host); 5741 dev_set_drvdata(dev, host); 5742 5743 spin_lock_init(&host->lock); 5744 mutex_init(&host->eh_mutex); 5745 host->dev = dev; 5746 host->n_ports = max_ports; 5747 5748 /* allocate ports bound to this host */ 5749 for (i = 0; i < max_ports; i++) { 5750 struct ata_port *ap; 5751 5752 ap = ata_port_alloc(host); 5753 if (!ap) 5754 goto err_out; 5755 5756 ap->port_no = i; 5757 host->ports[i] = ap; 5758 } 5759 5760 devres_remove_group(dev, NULL); 5761 return host; 5762 5763 err_out: 5764 devres_release_group(dev, NULL); 5765 return NULL; 5766 } 5767 5768 /** 5769 * ata_host_alloc_pinfo - alloc host and init with port_info array 5770 * @dev: generic device this host is associated with 5771 * @ppi: array of ATA port_info to initialize host with 5772 * @n_ports: number of ATA ports attached to this host 5773 * 5774 * Allocate ATA host and initialize with info from @ppi. If NULL 5775 * terminated, @ppi may contain fewer entries than @n_ports. The 5776 * last entry will be used for the remaining ports. 5777 * 5778 * RETURNS: 5779 * Allocate ATA host on success, NULL on failure. 5780 * 5781 * LOCKING: 5782 * Inherited from calling layer (may sleep). 5783 */ 5784 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5785 const struct ata_port_info * const * ppi, 5786 int n_ports) 5787 { 5788 const struct ata_port_info *pi; 5789 struct ata_host *host; 5790 int i, j; 5791 5792 host = ata_host_alloc(dev, n_ports); 5793 if (!host) 5794 return NULL; 5795 5796 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5797 struct ata_port *ap = host->ports[i]; 5798 5799 if (ppi[j]) 5800 pi = ppi[j++]; 5801 5802 ap->pio_mask = pi->pio_mask; 5803 ap->mwdma_mask = pi->mwdma_mask; 5804 ap->udma_mask = pi->udma_mask; 5805 ap->flags |= pi->flags; 5806 ap->link.flags |= pi->link_flags; 5807 ap->ops = pi->port_ops; 5808 5809 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5810 host->ops = pi->port_ops; 5811 } 5812 5813 return host; 5814 } 5815 5816 /** 5817 * ata_slave_link_init - initialize slave link 5818 * @ap: port to initialize slave link for 5819 * 5820 * Create and initialize slave link for @ap. This enables slave 5821 * link handling on the port. 5822 * 5823 * In libata, a port contains links and a link contains devices. 5824 * There is single host link but if a PMP is attached to it, 5825 * there can be multiple fan-out links. On SATA, there's usually 5826 * a single device connected to a link but PATA and SATA 5827 * controllers emulating TF based interface can have two - master 5828 * and slave. 5829 * 5830 * However, there are a few controllers which don't fit into this 5831 * abstraction too well - SATA controllers which emulate TF 5832 * interface with both master and slave devices but also have 5833 * separate SCR register sets for each device. These controllers 5834 * need separate links for physical link handling 5835 * (e.g. onlineness, link speed) but should be treated like a 5836 * traditional M/S controller for everything else (e.g. command 5837 * issue, softreset). 5838 * 5839 * slave_link is libata's way of handling this class of 5840 * controllers without impacting core layer too much. For 5841 * anything other than physical link handling, the default host 5842 * link is used for both master and slave. For physical link 5843 * handling, separate @ap->slave_link is used. All dirty details 5844 * are implemented inside libata core layer. From LLD's POV, the 5845 * only difference is that prereset, hardreset and postreset are 5846 * called once more for the slave link, so the reset sequence 5847 * looks like the following. 5848 * 5849 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5850 * softreset(M) -> postreset(M) -> postreset(S) 5851 * 5852 * Note that softreset is called only for the master. Softreset 5853 * resets both M/S by definition, so SRST on master should handle 5854 * both (the standard method will work just fine). 5855 * 5856 * LOCKING: 5857 * Should be called before host is registered. 5858 * 5859 * RETURNS: 5860 * 0 on success, -errno on failure. 5861 */ 5862 int ata_slave_link_init(struct ata_port *ap) 5863 { 5864 struct ata_link *link; 5865 5866 WARN_ON(ap->slave_link); 5867 WARN_ON(ap->flags & ATA_FLAG_PMP); 5868 5869 link = kzalloc(sizeof(*link), GFP_KERNEL); 5870 if (!link) 5871 return -ENOMEM; 5872 5873 ata_link_init(ap, link, 1); 5874 ap->slave_link = link; 5875 return 0; 5876 } 5877 5878 static void ata_host_stop(struct device *gendev, void *res) 5879 { 5880 struct ata_host *host = dev_get_drvdata(gendev); 5881 int i; 5882 5883 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5884 5885 for (i = 0; i < host->n_ports; i++) { 5886 struct ata_port *ap = host->ports[i]; 5887 5888 if (ap->ops->port_stop) 5889 ap->ops->port_stop(ap); 5890 } 5891 5892 if (host->ops->host_stop) 5893 host->ops->host_stop(host); 5894 } 5895 5896 /** 5897 * ata_finalize_port_ops - finalize ata_port_operations 5898 * @ops: ata_port_operations to finalize 5899 * 5900 * An ata_port_operations can inherit from another ops and that 5901 * ops can again inherit from another. This can go on as many 5902 * times as necessary as long as there is no loop in the 5903 * inheritance chain. 5904 * 5905 * Ops tables are finalized when the host is started. NULL or 5906 * unspecified entries are inherited from the closet ancestor 5907 * which has the method and the entry is populated with it. 5908 * After finalization, the ops table directly points to all the 5909 * methods and ->inherits is no longer necessary and cleared. 5910 * 5911 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5912 * 5913 * LOCKING: 5914 * None. 5915 */ 5916 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5917 { 5918 static DEFINE_SPINLOCK(lock); 5919 const struct ata_port_operations *cur; 5920 void **begin = (void **)ops; 5921 void **end = (void **)&ops->inherits; 5922 void **pp; 5923 5924 if (!ops || !ops->inherits) 5925 return; 5926 5927 spin_lock(&lock); 5928 5929 for (cur = ops->inherits; cur; cur = cur->inherits) { 5930 void **inherit = (void **)cur; 5931 5932 for (pp = begin; pp < end; pp++, inherit++) 5933 if (!*pp) 5934 *pp = *inherit; 5935 } 5936 5937 for (pp = begin; pp < end; pp++) 5938 if (IS_ERR(*pp)) 5939 *pp = NULL; 5940 5941 ops->inherits = NULL; 5942 5943 spin_unlock(&lock); 5944 } 5945 5946 /** 5947 * ata_host_start - start and freeze ports of an ATA host 5948 * @host: ATA host to start ports for 5949 * 5950 * Start and then freeze ports of @host. Started status is 5951 * recorded in host->flags, so this function can be called 5952 * multiple times. Ports are guaranteed to get started only 5953 * once. If host->ops isn't initialized yet, its set to the 5954 * first non-dummy port ops. 5955 * 5956 * LOCKING: 5957 * Inherited from calling layer (may sleep). 5958 * 5959 * RETURNS: 5960 * 0 if all ports are started successfully, -errno otherwise. 5961 */ 5962 int ata_host_start(struct ata_host *host) 5963 { 5964 int have_stop = 0; 5965 void *start_dr = NULL; 5966 int i, rc; 5967 5968 if (host->flags & ATA_HOST_STARTED) 5969 return 0; 5970 5971 ata_finalize_port_ops(host->ops); 5972 5973 for (i = 0; i < host->n_ports; i++) { 5974 struct ata_port *ap = host->ports[i]; 5975 5976 ata_finalize_port_ops(ap->ops); 5977 5978 if (!host->ops && !ata_port_is_dummy(ap)) 5979 host->ops = ap->ops; 5980 5981 if (ap->ops->port_stop) 5982 have_stop = 1; 5983 } 5984 5985 if (host->ops->host_stop) 5986 have_stop = 1; 5987 5988 if (have_stop) { 5989 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5990 if (!start_dr) 5991 return -ENOMEM; 5992 } 5993 5994 for (i = 0; i < host->n_ports; i++) { 5995 struct ata_port *ap = host->ports[i]; 5996 5997 if (ap->ops->port_start) { 5998 rc = ap->ops->port_start(ap); 5999 if (rc) { 6000 if (rc != -ENODEV) 6001 dev_err(host->dev, 6002 "failed to start port %d (errno=%d)\n", 6003 i, rc); 6004 goto err_out; 6005 } 6006 } 6007 ata_eh_freeze_port(ap); 6008 } 6009 6010 if (start_dr) 6011 devres_add(host->dev, start_dr); 6012 host->flags |= ATA_HOST_STARTED; 6013 return 0; 6014 6015 err_out: 6016 while (--i >= 0) { 6017 struct ata_port *ap = host->ports[i]; 6018 6019 if (ap->ops->port_stop) 6020 ap->ops->port_stop(ap); 6021 } 6022 devres_free(start_dr); 6023 return rc; 6024 } 6025 6026 /** 6027 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6028 * @host: host to initialize 6029 * @dev: device host is attached to 6030 * @ops: port_ops 6031 * 6032 */ 6033 void ata_host_init(struct ata_host *host, struct device *dev, 6034 struct ata_port_operations *ops) 6035 { 6036 spin_lock_init(&host->lock); 6037 mutex_init(&host->eh_mutex); 6038 host->n_tags = ATA_MAX_QUEUE - 1; 6039 host->dev = dev; 6040 host->ops = ops; 6041 } 6042 6043 void __ata_port_probe(struct ata_port *ap) 6044 { 6045 struct ata_eh_info *ehi = &ap->link.eh_info; 6046 unsigned long flags; 6047 6048 /* kick EH for boot probing */ 6049 spin_lock_irqsave(ap->lock, flags); 6050 6051 ehi->probe_mask |= ATA_ALL_DEVICES; 6052 ehi->action |= ATA_EH_RESET; 6053 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6054 6055 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6056 ap->pflags |= ATA_PFLAG_LOADING; 6057 ata_port_schedule_eh(ap); 6058 6059 spin_unlock_irqrestore(ap->lock, flags); 6060 } 6061 6062 int ata_port_probe(struct ata_port *ap) 6063 { 6064 int rc = 0; 6065 6066 if (ap->ops->error_handler) { 6067 __ata_port_probe(ap); 6068 ata_port_wait_eh(ap); 6069 } else { 6070 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6071 rc = ata_bus_probe(ap); 6072 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6073 } 6074 return rc; 6075 } 6076 6077 6078 static void async_port_probe(void *data, async_cookie_t cookie) 6079 { 6080 struct ata_port *ap = data; 6081 6082 /* 6083 * If we're not allowed to scan this host in parallel, 6084 * we need to wait until all previous scans have completed 6085 * before going further. 6086 * Jeff Garzik says this is only within a controller, so we 6087 * don't need to wait for port 0, only for later ports. 6088 */ 6089 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6090 async_synchronize_cookie(cookie); 6091 6092 (void)ata_port_probe(ap); 6093 6094 /* in order to keep device order, we need to synchronize at this point */ 6095 async_synchronize_cookie(cookie); 6096 6097 ata_scsi_scan_host(ap, 1); 6098 } 6099 6100 /** 6101 * ata_host_register - register initialized ATA host 6102 * @host: ATA host to register 6103 * @sht: template for SCSI host 6104 * 6105 * Register initialized ATA host. @host is allocated using 6106 * ata_host_alloc() and fully initialized by LLD. This function 6107 * starts ports, registers @host with ATA and SCSI layers and 6108 * probe registered devices. 6109 * 6110 * LOCKING: 6111 * Inherited from calling layer (may sleep). 6112 * 6113 * RETURNS: 6114 * 0 on success, -errno otherwise. 6115 */ 6116 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6117 { 6118 int i, rc; 6119 6120 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6121 6122 /* host must have been started */ 6123 if (!(host->flags & ATA_HOST_STARTED)) { 6124 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6125 WARN_ON(1); 6126 return -EINVAL; 6127 } 6128 6129 /* Blow away unused ports. This happens when LLD can't 6130 * determine the exact number of ports to allocate at 6131 * allocation time. 6132 */ 6133 for (i = host->n_ports; host->ports[i]; i++) 6134 kfree(host->ports[i]); 6135 6136 /* give ports names and add SCSI hosts */ 6137 for (i = 0; i < host->n_ports; i++) { 6138 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6139 host->ports[i]->local_port_no = i + 1; 6140 } 6141 6142 /* Create associated sysfs transport objects */ 6143 for (i = 0; i < host->n_ports; i++) { 6144 rc = ata_tport_add(host->dev,host->ports[i]); 6145 if (rc) { 6146 goto err_tadd; 6147 } 6148 } 6149 6150 rc = ata_scsi_add_hosts(host, sht); 6151 if (rc) 6152 goto err_tadd; 6153 6154 /* set cable, sata_spd_limit and report */ 6155 for (i = 0; i < host->n_ports; i++) { 6156 struct ata_port *ap = host->ports[i]; 6157 unsigned long xfer_mask; 6158 6159 /* set SATA cable type if still unset */ 6160 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6161 ap->cbl = ATA_CBL_SATA; 6162 6163 /* init sata_spd_limit to the current value */ 6164 sata_link_init_spd(&ap->link); 6165 if (ap->slave_link) 6166 sata_link_init_spd(ap->slave_link); 6167 6168 /* print per-port info to dmesg */ 6169 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6170 ap->udma_mask); 6171 6172 if (!ata_port_is_dummy(ap)) { 6173 ata_port_info(ap, "%cATA max %s %s\n", 6174 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6175 ata_mode_string(xfer_mask), 6176 ap->link.eh_info.desc); 6177 ata_ehi_clear_desc(&ap->link.eh_info); 6178 } else 6179 ata_port_info(ap, "DUMMY\n"); 6180 } 6181 6182 /* perform each probe asynchronously */ 6183 for (i = 0; i < host->n_ports; i++) { 6184 struct ata_port *ap = host->ports[i]; 6185 async_schedule(async_port_probe, ap); 6186 } 6187 6188 return 0; 6189 6190 err_tadd: 6191 while (--i >= 0) { 6192 ata_tport_delete(host->ports[i]); 6193 } 6194 return rc; 6195 6196 } 6197 6198 /** 6199 * ata_host_activate - start host, request IRQ and register it 6200 * @host: target ATA host 6201 * @irq: IRQ to request 6202 * @irq_handler: irq_handler used when requesting IRQ 6203 * @irq_flags: irq_flags used when requesting IRQ 6204 * @sht: scsi_host_template to use when registering the host 6205 * 6206 * After allocating an ATA host and initializing it, most libata 6207 * LLDs perform three steps to activate the host - start host, 6208 * request IRQ and register it. This helper takes necessasry 6209 * arguments and performs the three steps in one go. 6210 * 6211 * An invalid IRQ skips the IRQ registration and expects the host to 6212 * have set polling mode on the port. In this case, @irq_handler 6213 * should be NULL. 6214 * 6215 * LOCKING: 6216 * Inherited from calling layer (may sleep). 6217 * 6218 * RETURNS: 6219 * 0 on success, -errno otherwise. 6220 */ 6221 int ata_host_activate(struct ata_host *host, int irq, 6222 irq_handler_t irq_handler, unsigned long irq_flags, 6223 struct scsi_host_template *sht) 6224 { 6225 int i, rc; 6226 6227 rc = ata_host_start(host); 6228 if (rc) 6229 return rc; 6230 6231 /* Special case for polling mode */ 6232 if (!irq) { 6233 WARN_ON(irq_handler); 6234 return ata_host_register(host, sht); 6235 } 6236 6237 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6238 dev_name(host->dev), host); 6239 if (rc) 6240 return rc; 6241 6242 for (i = 0; i < host->n_ports; i++) 6243 ata_port_desc(host->ports[i], "irq %d", irq); 6244 6245 rc = ata_host_register(host, sht); 6246 /* if failed, just free the IRQ and leave ports alone */ 6247 if (rc) 6248 devm_free_irq(host->dev, irq, host); 6249 6250 return rc; 6251 } 6252 6253 /** 6254 * ata_port_detach - Detach ATA port in prepration of device removal 6255 * @ap: ATA port to be detached 6256 * 6257 * Detach all ATA devices and the associated SCSI devices of @ap; 6258 * then, remove the associated SCSI host. @ap is guaranteed to 6259 * be quiescent on return from this function. 6260 * 6261 * LOCKING: 6262 * Kernel thread context (may sleep). 6263 */ 6264 static void ata_port_detach(struct ata_port *ap) 6265 { 6266 unsigned long flags; 6267 struct ata_link *link; 6268 struct ata_device *dev; 6269 6270 if (!ap->ops->error_handler) 6271 goto skip_eh; 6272 6273 /* tell EH we're leaving & flush EH */ 6274 spin_lock_irqsave(ap->lock, flags); 6275 ap->pflags |= ATA_PFLAG_UNLOADING; 6276 ata_port_schedule_eh(ap); 6277 spin_unlock_irqrestore(ap->lock, flags); 6278 6279 /* wait till EH commits suicide */ 6280 ata_port_wait_eh(ap); 6281 6282 /* it better be dead now */ 6283 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6284 6285 cancel_delayed_work_sync(&ap->hotplug_task); 6286 6287 skip_eh: 6288 /* clean up zpodd on port removal */ 6289 ata_for_each_link(link, ap, HOST_FIRST) { 6290 ata_for_each_dev(dev, link, ALL) { 6291 if (zpodd_dev_enabled(dev)) 6292 zpodd_exit(dev); 6293 } 6294 } 6295 if (ap->pmp_link) { 6296 int i; 6297 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6298 ata_tlink_delete(&ap->pmp_link[i]); 6299 } 6300 /* remove the associated SCSI host */ 6301 scsi_remove_host(ap->scsi_host); 6302 ata_tport_delete(ap); 6303 } 6304 6305 /** 6306 * ata_host_detach - Detach all ports of an ATA host 6307 * @host: Host to detach 6308 * 6309 * Detach all ports of @host. 6310 * 6311 * LOCKING: 6312 * Kernel thread context (may sleep). 6313 */ 6314 void ata_host_detach(struct ata_host *host) 6315 { 6316 int i; 6317 6318 for (i = 0; i < host->n_ports; i++) 6319 ata_port_detach(host->ports[i]); 6320 6321 /* the host is dead now, dissociate ACPI */ 6322 ata_acpi_dissociate(host); 6323 } 6324 6325 #ifdef CONFIG_PCI 6326 6327 /** 6328 * ata_pci_remove_one - PCI layer callback for device removal 6329 * @pdev: PCI device that was removed 6330 * 6331 * PCI layer indicates to libata via this hook that hot-unplug or 6332 * module unload event has occurred. Detach all ports. Resource 6333 * release is handled via devres. 6334 * 6335 * LOCKING: 6336 * Inherited from PCI layer (may sleep). 6337 */ 6338 void ata_pci_remove_one(struct pci_dev *pdev) 6339 { 6340 struct ata_host *host = pci_get_drvdata(pdev); 6341 6342 ata_host_detach(host); 6343 } 6344 6345 /* move to PCI subsystem */ 6346 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6347 { 6348 unsigned long tmp = 0; 6349 6350 switch (bits->width) { 6351 case 1: { 6352 u8 tmp8 = 0; 6353 pci_read_config_byte(pdev, bits->reg, &tmp8); 6354 tmp = tmp8; 6355 break; 6356 } 6357 case 2: { 6358 u16 tmp16 = 0; 6359 pci_read_config_word(pdev, bits->reg, &tmp16); 6360 tmp = tmp16; 6361 break; 6362 } 6363 case 4: { 6364 u32 tmp32 = 0; 6365 pci_read_config_dword(pdev, bits->reg, &tmp32); 6366 tmp = tmp32; 6367 break; 6368 } 6369 6370 default: 6371 return -EINVAL; 6372 } 6373 6374 tmp &= bits->mask; 6375 6376 return (tmp == bits->val) ? 1 : 0; 6377 } 6378 6379 #ifdef CONFIG_PM 6380 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6381 { 6382 pci_save_state(pdev); 6383 pci_disable_device(pdev); 6384 6385 if (mesg.event & PM_EVENT_SLEEP) 6386 pci_set_power_state(pdev, PCI_D3hot); 6387 } 6388 6389 int ata_pci_device_do_resume(struct pci_dev *pdev) 6390 { 6391 int rc; 6392 6393 pci_set_power_state(pdev, PCI_D0); 6394 pci_restore_state(pdev); 6395 6396 rc = pcim_enable_device(pdev); 6397 if (rc) { 6398 dev_err(&pdev->dev, 6399 "failed to enable device after resume (%d)\n", rc); 6400 return rc; 6401 } 6402 6403 pci_set_master(pdev); 6404 return 0; 6405 } 6406 6407 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6408 { 6409 struct ata_host *host = pci_get_drvdata(pdev); 6410 int rc = 0; 6411 6412 rc = ata_host_suspend(host, mesg); 6413 if (rc) 6414 return rc; 6415 6416 ata_pci_device_do_suspend(pdev, mesg); 6417 6418 return 0; 6419 } 6420 6421 int ata_pci_device_resume(struct pci_dev *pdev) 6422 { 6423 struct ata_host *host = pci_get_drvdata(pdev); 6424 int rc; 6425 6426 rc = ata_pci_device_do_resume(pdev); 6427 if (rc == 0) 6428 ata_host_resume(host); 6429 return rc; 6430 } 6431 #endif /* CONFIG_PM */ 6432 6433 #endif /* CONFIG_PCI */ 6434 6435 /** 6436 * ata_platform_remove_one - Platform layer callback for device removal 6437 * @pdev: Platform device that was removed 6438 * 6439 * Platform layer indicates to libata via this hook that hot-unplug or 6440 * module unload event has occurred. Detach all ports. Resource 6441 * release is handled via devres. 6442 * 6443 * LOCKING: 6444 * Inherited from platform layer (may sleep). 6445 */ 6446 int ata_platform_remove_one(struct platform_device *pdev) 6447 { 6448 struct ata_host *host = platform_get_drvdata(pdev); 6449 6450 ata_host_detach(host); 6451 6452 return 0; 6453 } 6454 6455 static int __init ata_parse_force_one(char **cur, 6456 struct ata_force_ent *force_ent, 6457 const char **reason) 6458 { 6459 /* FIXME: Currently, there's no way to tag init const data and 6460 * using __initdata causes build failure on some versions of 6461 * gcc. Once __initdataconst is implemented, add const to the 6462 * following structure. 6463 */ 6464 static struct ata_force_param force_tbl[] __initdata = { 6465 { "40c", .cbl = ATA_CBL_PATA40 }, 6466 { "80c", .cbl = ATA_CBL_PATA80 }, 6467 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6468 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6469 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6470 { "sata", .cbl = ATA_CBL_SATA }, 6471 { "1.5Gbps", .spd_limit = 1 }, 6472 { "3.0Gbps", .spd_limit = 2 }, 6473 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6474 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6475 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6476 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6477 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6478 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6479 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6480 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6481 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6482 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6483 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6484 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6485 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6486 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6487 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6488 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6489 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6490 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6491 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6492 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6493 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6494 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6495 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6496 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6497 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6498 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6499 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6500 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6501 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6502 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6503 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6504 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6505 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6506 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6507 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6508 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6509 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6510 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6511 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6512 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6513 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6514 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6515 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6516 }; 6517 char *start = *cur, *p = *cur; 6518 char *id, *val, *endp; 6519 const struct ata_force_param *match_fp = NULL; 6520 int nr_matches = 0, i; 6521 6522 /* find where this param ends and update *cur */ 6523 while (*p != '\0' && *p != ',') 6524 p++; 6525 6526 if (*p == '\0') 6527 *cur = p; 6528 else 6529 *cur = p + 1; 6530 6531 *p = '\0'; 6532 6533 /* parse */ 6534 p = strchr(start, ':'); 6535 if (!p) { 6536 val = strstrip(start); 6537 goto parse_val; 6538 } 6539 *p = '\0'; 6540 6541 id = strstrip(start); 6542 val = strstrip(p + 1); 6543 6544 /* parse id */ 6545 p = strchr(id, '.'); 6546 if (p) { 6547 *p++ = '\0'; 6548 force_ent->device = simple_strtoul(p, &endp, 10); 6549 if (p == endp || *endp != '\0') { 6550 *reason = "invalid device"; 6551 return -EINVAL; 6552 } 6553 } 6554 6555 force_ent->port = simple_strtoul(id, &endp, 10); 6556 if (p == endp || *endp != '\0') { 6557 *reason = "invalid port/link"; 6558 return -EINVAL; 6559 } 6560 6561 parse_val: 6562 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6563 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6564 const struct ata_force_param *fp = &force_tbl[i]; 6565 6566 if (strncasecmp(val, fp->name, strlen(val))) 6567 continue; 6568 6569 nr_matches++; 6570 match_fp = fp; 6571 6572 if (strcasecmp(val, fp->name) == 0) { 6573 nr_matches = 1; 6574 break; 6575 } 6576 } 6577 6578 if (!nr_matches) { 6579 *reason = "unknown value"; 6580 return -EINVAL; 6581 } 6582 if (nr_matches > 1) { 6583 *reason = "ambigious value"; 6584 return -EINVAL; 6585 } 6586 6587 force_ent->param = *match_fp; 6588 6589 return 0; 6590 } 6591 6592 static void __init ata_parse_force_param(void) 6593 { 6594 int idx = 0, size = 1; 6595 int last_port = -1, last_device = -1; 6596 char *p, *cur, *next; 6597 6598 /* calculate maximum number of params and allocate force_tbl */ 6599 for (p = ata_force_param_buf; *p; p++) 6600 if (*p == ',') 6601 size++; 6602 6603 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6604 if (!ata_force_tbl) { 6605 printk(KERN_WARNING "ata: failed to extend force table, " 6606 "libata.force ignored\n"); 6607 return; 6608 } 6609 6610 /* parse and populate the table */ 6611 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6612 const char *reason = ""; 6613 struct ata_force_ent te = { .port = -1, .device = -1 }; 6614 6615 next = cur; 6616 if (ata_parse_force_one(&next, &te, &reason)) { 6617 printk(KERN_WARNING "ata: failed to parse force " 6618 "parameter \"%s\" (%s)\n", 6619 cur, reason); 6620 continue; 6621 } 6622 6623 if (te.port == -1) { 6624 te.port = last_port; 6625 te.device = last_device; 6626 } 6627 6628 ata_force_tbl[idx++] = te; 6629 6630 last_port = te.port; 6631 last_device = te.device; 6632 } 6633 6634 ata_force_tbl_size = idx; 6635 } 6636 6637 static int __init ata_init(void) 6638 { 6639 int rc; 6640 6641 ata_parse_force_param(); 6642 6643 rc = ata_sff_init(); 6644 if (rc) { 6645 kfree(ata_force_tbl); 6646 return rc; 6647 } 6648 6649 libata_transport_init(); 6650 ata_scsi_transport_template = ata_attach_transport(); 6651 if (!ata_scsi_transport_template) { 6652 ata_sff_exit(); 6653 rc = -ENOMEM; 6654 goto err_out; 6655 } 6656 6657 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6658 return 0; 6659 6660 err_out: 6661 return rc; 6662 } 6663 6664 static void __exit ata_exit(void) 6665 { 6666 ata_release_transport(ata_scsi_transport_template); 6667 libata_transport_exit(); 6668 ata_sff_exit(); 6669 kfree(ata_force_tbl); 6670 } 6671 6672 subsys_initcall(ata_init); 6673 module_exit(ata_exit); 6674 6675 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6676 6677 int ata_ratelimit(void) 6678 { 6679 return __ratelimit(&ratelimit); 6680 } 6681 6682 /** 6683 * ata_msleep - ATA EH owner aware msleep 6684 * @ap: ATA port to attribute the sleep to 6685 * @msecs: duration to sleep in milliseconds 6686 * 6687 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6688 * ownership is released before going to sleep and reacquired 6689 * after the sleep is complete. IOW, other ports sharing the 6690 * @ap->host will be allowed to own the EH while this task is 6691 * sleeping. 6692 * 6693 * LOCKING: 6694 * Might sleep. 6695 */ 6696 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6697 { 6698 bool owns_eh = ap && ap->host->eh_owner == current; 6699 6700 if (owns_eh) 6701 ata_eh_release(ap); 6702 6703 msleep(msecs); 6704 6705 if (owns_eh) 6706 ata_eh_acquire(ap); 6707 } 6708 6709 /** 6710 * ata_wait_register - wait until register value changes 6711 * @ap: ATA port to wait register for, can be NULL 6712 * @reg: IO-mapped register 6713 * @mask: Mask to apply to read register value 6714 * @val: Wait condition 6715 * @interval: polling interval in milliseconds 6716 * @timeout: timeout in milliseconds 6717 * 6718 * Waiting for some bits of register to change is a common 6719 * operation for ATA controllers. This function reads 32bit LE 6720 * IO-mapped register @reg and tests for the following condition. 6721 * 6722 * (*@reg & mask) != val 6723 * 6724 * If the condition is met, it returns; otherwise, the process is 6725 * repeated after @interval_msec until timeout. 6726 * 6727 * LOCKING: 6728 * Kernel thread context (may sleep) 6729 * 6730 * RETURNS: 6731 * The final register value. 6732 */ 6733 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6734 unsigned long interval, unsigned long timeout) 6735 { 6736 unsigned long deadline; 6737 u32 tmp; 6738 6739 tmp = ioread32(reg); 6740 6741 /* Calculate timeout _after_ the first read to make sure 6742 * preceding writes reach the controller before starting to 6743 * eat away the timeout. 6744 */ 6745 deadline = ata_deadline(jiffies, timeout); 6746 6747 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6748 ata_msleep(ap, interval); 6749 tmp = ioread32(reg); 6750 } 6751 6752 return tmp; 6753 } 6754 6755 /** 6756 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 6757 * @link: Link receiving the event 6758 * 6759 * Test whether the received PHY event has to be ignored or not. 6760 * 6761 * LOCKING: 6762 * None: 6763 * 6764 * RETURNS: 6765 * True if the event has to be ignored. 6766 */ 6767 bool sata_lpm_ignore_phy_events(struct ata_link *link) 6768 { 6769 unsigned long lpm_timeout = link->last_lpm_change + 6770 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 6771 6772 /* if LPM is enabled, PHYRDY doesn't mean anything */ 6773 if (link->lpm_policy > ATA_LPM_MAX_POWER) 6774 return true; 6775 6776 /* ignore the first PHY event after the LPM policy changed 6777 * as it is might be spurious 6778 */ 6779 if ((link->flags & ATA_LFLAG_CHANGED) && 6780 time_before(jiffies, lpm_timeout)) 6781 return true; 6782 6783 return false; 6784 } 6785 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 6786 6787 /* 6788 * Dummy port_ops 6789 */ 6790 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6791 { 6792 return AC_ERR_SYSTEM; 6793 } 6794 6795 static void ata_dummy_error_handler(struct ata_port *ap) 6796 { 6797 /* truly dummy */ 6798 } 6799 6800 struct ata_port_operations ata_dummy_port_ops = { 6801 .qc_prep = ata_noop_qc_prep, 6802 .qc_issue = ata_dummy_qc_issue, 6803 .error_handler = ata_dummy_error_handler, 6804 .sched_eh = ata_std_sched_eh, 6805 .end_eh = ata_std_end_eh, 6806 }; 6807 6808 const struct ata_port_info ata_dummy_port_info = { 6809 .port_ops = &ata_dummy_port_ops, 6810 }; 6811 6812 /* 6813 * Utility print functions 6814 */ 6815 void ata_port_printk(const struct ata_port *ap, const char *level, 6816 const char *fmt, ...) 6817 { 6818 struct va_format vaf; 6819 va_list args; 6820 6821 va_start(args, fmt); 6822 6823 vaf.fmt = fmt; 6824 vaf.va = &args; 6825 6826 printk("%sata%u: %pV", level, ap->print_id, &vaf); 6827 6828 va_end(args); 6829 } 6830 EXPORT_SYMBOL(ata_port_printk); 6831 6832 void ata_link_printk(const struct ata_link *link, const char *level, 6833 const char *fmt, ...) 6834 { 6835 struct va_format vaf; 6836 va_list args; 6837 6838 va_start(args, fmt); 6839 6840 vaf.fmt = fmt; 6841 vaf.va = &args; 6842 6843 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6844 printk("%sata%u.%02u: %pV", 6845 level, link->ap->print_id, link->pmp, &vaf); 6846 else 6847 printk("%sata%u: %pV", 6848 level, link->ap->print_id, &vaf); 6849 6850 va_end(args); 6851 } 6852 EXPORT_SYMBOL(ata_link_printk); 6853 6854 void ata_dev_printk(const struct ata_device *dev, const char *level, 6855 const char *fmt, ...) 6856 { 6857 struct va_format vaf; 6858 va_list args; 6859 6860 va_start(args, fmt); 6861 6862 vaf.fmt = fmt; 6863 vaf.va = &args; 6864 6865 printk("%sata%u.%02u: %pV", 6866 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6867 &vaf); 6868 6869 va_end(args); 6870 } 6871 EXPORT_SYMBOL(ata_dev_printk); 6872 6873 void ata_print_version(const struct device *dev, const char *version) 6874 { 6875 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6876 } 6877 EXPORT_SYMBOL(ata_print_version); 6878 6879 /* 6880 * libata is essentially a library of internal helper functions for 6881 * low-level ATA host controller drivers. As such, the API/ABI is 6882 * likely to change as new drivers are added and updated. 6883 * Do not depend on ABI/API stability. 6884 */ 6885 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6886 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6887 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6888 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6889 EXPORT_SYMBOL_GPL(sata_port_ops); 6890 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6891 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6892 EXPORT_SYMBOL_GPL(ata_link_next); 6893 EXPORT_SYMBOL_GPL(ata_dev_next); 6894 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6895 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6896 EXPORT_SYMBOL_GPL(ata_host_init); 6897 EXPORT_SYMBOL_GPL(ata_host_alloc); 6898 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6899 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6900 EXPORT_SYMBOL_GPL(ata_host_start); 6901 EXPORT_SYMBOL_GPL(ata_host_register); 6902 EXPORT_SYMBOL_GPL(ata_host_activate); 6903 EXPORT_SYMBOL_GPL(ata_host_detach); 6904 EXPORT_SYMBOL_GPL(ata_sg_init); 6905 EXPORT_SYMBOL_GPL(ata_qc_complete); 6906 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6907 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6908 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6909 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6910 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6911 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6912 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6913 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6914 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6915 EXPORT_SYMBOL_GPL(ata_mode_string); 6916 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6917 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6918 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6919 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6920 EXPORT_SYMBOL_GPL(ata_dev_disable); 6921 EXPORT_SYMBOL_GPL(sata_set_spd); 6922 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6923 EXPORT_SYMBOL_GPL(sata_link_debounce); 6924 EXPORT_SYMBOL_GPL(sata_link_resume); 6925 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6926 EXPORT_SYMBOL_GPL(ata_std_prereset); 6927 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6928 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6929 EXPORT_SYMBOL_GPL(ata_std_postreset); 6930 EXPORT_SYMBOL_GPL(ata_dev_classify); 6931 EXPORT_SYMBOL_GPL(ata_dev_pair); 6932 EXPORT_SYMBOL_GPL(ata_ratelimit); 6933 EXPORT_SYMBOL_GPL(ata_msleep); 6934 EXPORT_SYMBOL_GPL(ata_wait_register); 6935 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6936 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6937 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6938 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6939 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 6940 EXPORT_SYMBOL_GPL(sata_scr_valid); 6941 EXPORT_SYMBOL_GPL(sata_scr_read); 6942 EXPORT_SYMBOL_GPL(sata_scr_write); 6943 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6944 EXPORT_SYMBOL_GPL(ata_link_online); 6945 EXPORT_SYMBOL_GPL(ata_link_offline); 6946 #ifdef CONFIG_PM 6947 EXPORT_SYMBOL_GPL(ata_host_suspend); 6948 EXPORT_SYMBOL_GPL(ata_host_resume); 6949 #endif /* CONFIG_PM */ 6950 EXPORT_SYMBOL_GPL(ata_id_string); 6951 EXPORT_SYMBOL_GPL(ata_id_c_string); 6952 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6953 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6954 6955 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6956 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6957 EXPORT_SYMBOL_GPL(ata_timing_compute); 6958 EXPORT_SYMBOL_GPL(ata_timing_merge); 6959 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6960 6961 #ifdef CONFIG_PCI 6962 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6963 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6964 #ifdef CONFIG_PM 6965 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6966 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6967 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6968 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6969 #endif /* CONFIG_PM */ 6970 #endif /* CONFIG_PCI */ 6971 6972 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 6973 6974 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6975 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6976 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6977 EXPORT_SYMBOL_GPL(ata_port_desc); 6978 #ifdef CONFIG_PCI 6979 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6980 #endif /* CONFIG_PCI */ 6981 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6982 EXPORT_SYMBOL_GPL(ata_link_abort); 6983 EXPORT_SYMBOL_GPL(ata_port_abort); 6984 EXPORT_SYMBOL_GPL(ata_port_freeze); 6985 EXPORT_SYMBOL_GPL(sata_async_notification); 6986 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6987 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6988 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6989 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6990 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6991 EXPORT_SYMBOL_GPL(ata_do_eh); 6992 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6993 6994 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6995 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6996 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6997 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6998 EXPORT_SYMBOL_GPL(ata_cable_sata); 6999