1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/driver-api/libata.rst 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/time.h> 54 #include <linux/interrupt.h> 55 #include <linux/completion.h> 56 #include <linux/suspend.h> 57 #include <linux/workqueue.h> 58 #include <linux/scatterlist.h> 59 #include <linux/io.h> 60 #include <linux/async.h> 61 #include <linux/log2.h> 62 #include <linux/slab.h> 63 #include <linux/glob.h> 64 #include <scsi/scsi.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_host.h> 67 #include <linux/libata.h> 68 #include <asm/byteorder.h> 69 #include <asm/unaligned.h> 70 #include <linux/cdrom.h> 71 #include <linux/ratelimit.h> 72 #include <linux/leds.h> 73 #include <linux/pm_runtime.h> 74 #include <linux/platform_device.h> 75 76 #define CREATE_TRACE_POINTS 77 #include <trace/events/libata.h> 78 79 #include "libata.h" 80 #include "libata-transport.h" 81 82 /* debounce timing parameters in msecs { interval, duration, timeout } */ 83 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 84 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 85 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 86 87 const struct ata_port_operations ata_base_port_ops = { 88 .prereset = ata_std_prereset, 89 .postreset = ata_std_postreset, 90 .error_handler = ata_std_error_handler, 91 .sched_eh = ata_std_sched_eh, 92 .end_eh = ata_std_end_eh, 93 }; 94 95 const struct ata_port_operations sata_port_ops = { 96 .inherits = &ata_base_port_ops, 97 98 .qc_defer = ata_std_qc_defer, 99 .hardreset = sata_std_hardreset, 100 }; 101 102 static unsigned int ata_dev_init_params(struct ata_device *dev, 103 u16 heads, u16 sectors); 104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 105 static void ata_dev_xfermask(struct ata_device *dev); 106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 107 108 atomic_t ata_print_id = ATOMIC_INIT(0); 109 110 struct ata_force_param { 111 const char *name; 112 unsigned int cbl; 113 int spd_limit; 114 unsigned long xfer_mask; 115 unsigned int horkage_on; 116 unsigned int horkage_off; 117 unsigned int lflags; 118 }; 119 120 struct ata_force_ent { 121 int port; 122 int device; 123 struct ata_force_param param; 124 }; 125 126 static struct ata_force_ent *ata_force_tbl; 127 static int ata_force_tbl_size; 128 129 static char ata_force_param_buf[PAGE_SIZE] __initdata; 130 /* param_buf is thrown away after initialization, disallow read */ 131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)"); 133 134 static int atapi_enabled = 1; 135 module_param(atapi_enabled, int, 0444); 136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 137 138 static int atapi_dmadir = 0; 139 module_param(atapi_dmadir, int, 0444); 140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 141 142 int atapi_passthru16 = 1; 143 module_param(atapi_passthru16, int, 0444); 144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 145 146 int libata_fua = 0; 147 module_param_named(fua, libata_fua, int, 0444); 148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 149 150 static int ata_ignore_hpa; 151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 153 154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 155 module_param_named(dma, libata_dma_mask, int, 0444); 156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 157 158 static int ata_probe_timeout; 159 module_param(ata_probe_timeout, int, 0444); 160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 161 162 int libata_noacpi = 0; 163 module_param_named(noacpi, libata_noacpi, int, 0444); 164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 165 166 int libata_allow_tpm = 0; 167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 169 170 static int atapi_an; 171 module_param(atapi_an, int, 0444); 172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 173 174 MODULE_AUTHOR("Jeff Garzik"); 175 MODULE_DESCRIPTION("Library module for ATA devices"); 176 MODULE_LICENSE("GPL"); 177 MODULE_VERSION(DRV_VERSION); 178 179 180 static bool ata_sstatus_online(u32 sstatus) 181 { 182 return (sstatus & 0xf) == 0x3; 183 } 184 185 /** 186 * ata_link_next - link iteration helper 187 * @link: the previous link, NULL to start 188 * @ap: ATA port containing links to iterate 189 * @mode: iteration mode, one of ATA_LITER_* 190 * 191 * LOCKING: 192 * Host lock or EH context. 193 * 194 * RETURNS: 195 * Pointer to the next link. 196 */ 197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 198 enum ata_link_iter_mode mode) 199 { 200 BUG_ON(mode != ATA_LITER_EDGE && 201 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 202 203 /* NULL link indicates start of iteration */ 204 if (!link) 205 switch (mode) { 206 case ATA_LITER_EDGE: 207 case ATA_LITER_PMP_FIRST: 208 if (sata_pmp_attached(ap)) 209 return ap->pmp_link; 210 /* fall through */ 211 case ATA_LITER_HOST_FIRST: 212 return &ap->link; 213 } 214 215 /* we just iterated over the host link, what's next? */ 216 if (link == &ap->link) 217 switch (mode) { 218 case ATA_LITER_HOST_FIRST: 219 if (sata_pmp_attached(ap)) 220 return ap->pmp_link; 221 /* fall through */ 222 case ATA_LITER_PMP_FIRST: 223 if (unlikely(ap->slave_link)) 224 return ap->slave_link; 225 /* fall through */ 226 case ATA_LITER_EDGE: 227 return NULL; 228 } 229 230 /* slave_link excludes PMP */ 231 if (unlikely(link == ap->slave_link)) 232 return NULL; 233 234 /* we were over a PMP link */ 235 if (++link < ap->pmp_link + ap->nr_pmp_links) 236 return link; 237 238 if (mode == ATA_LITER_PMP_FIRST) 239 return &ap->link; 240 241 return NULL; 242 } 243 244 /** 245 * ata_dev_next - device iteration helper 246 * @dev: the previous device, NULL to start 247 * @link: ATA link containing devices to iterate 248 * @mode: iteration mode, one of ATA_DITER_* 249 * 250 * LOCKING: 251 * Host lock or EH context. 252 * 253 * RETURNS: 254 * Pointer to the next device. 255 */ 256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 257 enum ata_dev_iter_mode mode) 258 { 259 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 260 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 261 262 /* NULL dev indicates start of iteration */ 263 if (!dev) 264 switch (mode) { 265 case ATA_DITER_ENABLED: 266 case ATA_DITER_ALL: 267 dev = link->device; 268 goto check; 269 case ATA_DITER_ENABLED_REVERSE: 270 case ATA_DITER_ALL_REVERSE: 271 dev = link->device + ata_link_max_devices(link) - 1; 272 goto check; 273 } 274 275 next: 276 /* move to the next one */ 277 switch (mode) { 278 case ATA_DITER_ENABLED: 279 case ATA_DITER_ALL: 280 if (++dev < link->device + ata_link_max_devices(link)) 281 goto check; 282 return NULL; 283 case ATA_DITER_ENABLED_REVERSE: 284 case ATA_DITER_ALL_REVERSE: 285 if (--dev >= link->device) 286 goto check; 287 return NULL; 288 } 289 290 check: 291 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 292 !ata_dev_enabled(dev)) 293 goto next; 294 return dev; 295 } 296 297 /** 298 * ata_dev_phys_link - find physical link for a device 299 * @dev: ATA device to look up physical link for 300 * 301 * Look up physical link which @dev is attached to. Note that 302 * this is different from @dev->link only when @dev is on slave 303 * link. For all other cases, it's the same as @dev->link. 304 * 305 * LOCKING: 306 * Don't care. 307 * 308 * RETURNS: 309 * Pointer to the found physical link. 310 */ 311 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 312 { 313 struct ata_port *ap = dev->link->ap; 314 315 if (!ap->slave_link) 316 return dev->link; 317 if (!dev->devno) 318 return &ap->link; 319 return ap->slave_link; 320 } 321 322 /** 323 * ata_force_cbl - force cable type according to libata.force 324 * @ap: ATA port of interest 325 * 326 * Force cable type according to libata.force and whine about it. 327 * The last entry which has matching port number is used, so it 328 * can be specified as part of device force parameters. For 329 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 330 * same effect. 331 * 332 * LOCKING: 333 * EH context. 334 */ 335 void ata_force_cbl(struct ata_port *ap) 336 { 337 int i; 338 339 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 340 const struct ata_force_ent *fe = &ata_force_tbl[i]; 341 342 if (fe->port != -1 && fe->port != ap->print_id) 343 continue; 344 345 if (fe->param.cbl == ATA_CBL_NONE) 346 continue; 347 348 ap->cbl = fe->param.cbl; 349 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 350 return; 351 } 352 } 353 354 /** 355 * ata_force_link_limits - force link limits according to libata.force 356 * @link: ATA link of interest 357 * 358 * Force link flags and SATA spd limit according to libata.force 359 * and whine about it. When only the port part is specified 360 * (e.g. 1:), the limit applies to all links connected to both 361 * the host link and all fan-out ports connected via PMP. If the 362 * device part is specified as 0 (e.g. 1.00:), it specifies the 363 * first fan-out link not the host link. Device number 15 always 364 * points to the host link whether PMP is attached or not. If the 365 * controller has slave link, device number 16 points to it. 366 * 367 * LOCKING: 368 * EH context. 369 */ 370 static void ata_force_link_limits(struct ata_link *link) 371 { 372 bool did_spd = false; 373 int linkno = link->pmp; 374 int i; 375 376 if (ata_is_host_link(link)) 377 linkno += 15; 378 379 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 380 const struct ata_force_ent *fe = &ata_force_tbl[i]; 381 382 if (fe->port != -1 && fe->port != link->ap->print_id) 383 continue; 384 385 if (fe->device != -1 && fe->device != linkno) 386 continue; 387 388 /* only honor the first spd limit */ 389 if (!did_spd && fe->param.spd_limit) { 390 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 391 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 392 fe->param.name); 393 did_spd = true; 394 } 395 396 /* let lflags stack */ 397 if (fe->param.lflags) { 398 link->flags |= fe->param.lflags; 399 ata_link_notice(link, 400 "FORCE: link flag 0x%x forced -> 0x%x\n", 401 fe->param.lflags, link->flags); 402 } 403 } 404 } 405 406 /** 407 * ata_force_xfermask - force xfermask according to libata.force 408 * @dev: ATA device of interest 409 * 410 * Force xfer_mask according to libata.force and whine about it. 411 * For consistency with link selection, device number 15 selects 412 * the first device connected to the host link. 413 * 414 * LOCKING: 415 * EH context. 416 */ 417 static void ata_force_xfermask(struct ata_device *dev) 418 { 419 int devno = dev->link->pmp + dev->devno; 420 int alt_devno = devno; 421 int i; 422 423 /* allow n.15/16 for devices attached to host port */ 424 if (ata_is_host_link(dev->link)) 425 alt_devno += 15; 426 427 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 428 const struct ata_force_ent *fe = &ata_force_tbl[i]; 429 unsigned long pio_mask, mwdma_mask, udma_mask; 430 431 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 432 continue; 433 434 if (fe->device != -1 && fe->device != devno && 435 fe->device != alt_devno) 436 continue; 437 438 if (!fe->param.xfer_mask) 439 continue; 440 441 ata_unpack_xfermask(fe->param.xfer_mask, 442 &pio_mask, &mwdma_mask, &udma_mask); 443 if (udma_mask) 444 dev->udma_mask = udma_mask; 445 else if (mwdma_mask) { 446 dev->udma_mask = 0; 447 dev->mwdma_mask = mwdma_mask; 448 } else { 449 dev->udma_mask = 0; 450 dev->mwdma_mask = 0; 451 dev->pio_mask = pio_mask; 452 } 453 454 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 455 fe->param.name); 456 return; 457 } 458 } 459 460 /** 461 * ata_force_horkage - force horkage according to libata.force 462 * @dev: ATA device of interest 463 * 464 * Force horkage according to libata.force and whine about it. 465 * For consistency with link selection, device number 15 selects 466 * the first device connected to the host link. 467 * 468 * LOCKING: 469 * EH context. 470 */ 471 static void ata_force_horkage(struct ata_device *dev) 472 { 473 int devno = dev->link->pmp + dev->devno; 474 int alt_devno = devno; 475 int i; 476 477 /* allow n.15/16 for devices attached to host port */ 478 if (ata_is_host_link(dev->link)) 479 alt_devno += 15; 480 481 for (i = 0; i < ata_force_tbl_size; i++) { 482 const struct ata_force_ent *fe = &ata_force_tbl[i]; 483 484 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 485 continue; 486 487 if (fe->device != -1 && fe->device != devno && 488 fe->device != alt_devno) 489 continue; 490 491 if (!(~dev->horkage & fe->param.horkage_on) && 492 !(dev->horkage & fe->param.horkage_off)) 493 continue; 494 495 dev->horkage |= fe->param.horkage_on; 496 dev->horkage &= ~fe->param.horkage_off; 497 498 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 499 fe->param.name); 500 } 501 } 502 503 /** 504 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 505 * @opcode: SCSI opcode 506 * 507 * Determine ATAPI command type from @opcode. 508 * 509 * LOCKING: 510 * None. 511 * 512 * RETURNS: 513 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 514 */ 515 int atapi_cmd_type(u8 opcode) 516 { 517 switch (opcode) { 518 case GPCMD_READ_10: 519 case GPCMD_READ_12: 520 return ATAPI_READ; 521 522 case GPCMD_WRITE_10: 523 case GPCMD_WRITE_12: 524 case GPCMD_WRITE_AND_VERIFY_10: 525 return ATAPI_WRITE; 526 527 case GPCMD_READ_CD: 528 case GPCMD_READ_CD_MSF: 529 return ATAPI_READ_CD; 530 531 case ATA_16: 532 case ATA_12: 533 if (atapi_passthru16) 534 return ATAPI_PASS_THRU; 535 /* fall thru */ 536 default: 537 return ATAPI_MISC; 538 } 539 } 540 541 /** 542 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 543 * @tf: Taskfile to convert 544 * @pmp: Port multiplier port 545 * @is_cmd: This FIS is for command 546 * @fis: Buffer into which data will output 547 * 548 * Converts a standard ATA taskfile to a Serial ATA 549 * FIS structure (Register - Host to Device). 550 * 551 * LOCKING: 552 * Inherited from caller. 553 */ 554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 555 { 556 fis[0] = 0x27; /* Register - Host to Device FIS */ 557 fis[1] = pmp & 0xf; /* Port multiplier number*/ 558 if (is_cmd) 559 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 560 561 fis[2] = tf->command; 562 fis[3] = tf->feature; 563 564 fis[4] = tf->lbal; 565 fis[5] = tf->lbam; 566 fis[6] = tf->lbah; 567 fis[7] = tf->device; 568 569 fis[8] = tf->hob_lbal; 570 fis[9] = tf->hob_lbam; 571 fis[10] = tf->hob_lbah; 572 fis[11] = tf->hob_feature; 573 574 fis[12] = tf->nsect; 575 fis[13] = tf->hob_nsect; 576 fis[14] = 0; 577 fis[15] = tf->ctl; 578 579 fis[16] = tf->auxiliary & 0xff; 580 fis[17] = (tf->auxiliary >> 8) & 0xff; 581 fis[18] = (tf->auxiliary >> 16) & 0xff; 582 fis[19] = (tf->auxiliary >> 24) & 0xff; 583 } 584 585 /** 586 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 587 * @fis: Buffer from which data will be input 588 * @tf: Taskfile to output 589 * 590 * Converts a serial ATA FIS structure to a standard ATA taskfile. 591 * 592 * LOCKING: 593 * Inherited from caller. 594 */ 595 596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 597 { 598 tf->command = fis[2]; /* status */ 599 tf->feature = fis[3]; /* error */ 600 601 tf->lbal = fis[4]; 602 tf->lbam = fis[5]; 603 tf->lbah = fis[6]; 604 tf->device = fis[7]; 605 606 tf->hob_lbal = fis[8]; 607 tf->hob_lbam = fis[9]; 608 tf->hob_lbah = fis[10]; 609 610 tf->nsect = fis[12]; 611 tf->hob_nsect = fis[13]; 612 } 613 614 static const u8 ata_rw_cmds[] = { 615 /* pio multi */ 616 ATA_CMD_READ_MULTI, 617 ATA_CMD_WRITE_MULTI, 618 ATA_CMD_READ_MULTI_EXT, 619 ATA_CMD_WRITE_MULTI_EXT, 620 0, 621 0, 622 0, 623 ATA_CMD_WRITE_MULTI_FUA_EXT, 624 /* pio */ 625 ATA_CMD_PIO_READ, 626 ATA_CMD_PIO_WRITE, 627 ATA_CMD_PIO_READ_EXT, 628 ATA_CMD_PIO_WRITE_EXT, 629 0, 630 0, 631 0, 632 0, 633 /* dma */ 634 ATA_CMD_READ, 635 ATA_CMD_WRITE, 636 ATA_CMD_READ_EXT, 637 ATA_CMD_WRITE_EXT, 638 0, 639 0, 640 0, 641 ATA_CMD_WRITE_FUA_EXT 642 }; 643 644 /** 645 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 646 * @tf: command to examine and configure 647 * @dev: device tf belongs to 648 * 649 * Examine the device configuration and tf->flags to calculate 650 * the proper read/write commands and protocol to use. 651 * 652 * LOCKING: 653 * caller. 654 */ 655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 656 { 657 u8 cmd; 658 659 int index, fua, lba48, write; 660 661 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 662 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 663 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 664 665 if (dev->flags & ATA_DFLAG_PIO) { 666 tf->protocol = ATA_PROT_PIO; 667 index = dev->multi_count ? 0 : 8; 668 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 669 /* Unable to use DMA due to host limitation */ 670 tf->protocol = ATA_PROT_PIO; 671 index = dev->multi_count ? 0 : 8; 672 } else { 673 tf->protocol = ATA_PROT_DMA; 674 index = 16; 675 } 676 677 cmd = ata_rw_cmds[index + fua + lba48 + write]; 678 if (cmd) { 679 tf->command = cmd; 680 return 0; 681 } 682 return -1; 683 } 684 685 /** 686 * ata_tf_read_block - Read block address from ATA taskfile 687 * @tf: ATA taskfile of interest 688 * @dev: ATA device @tf belongs to 689 * 690 * LOCKING: 691 * None. 692 * 693 * Read block address from @tf. This function can handle all 694 * three address formats - LBA, LBA48 and CHS. tf->protocol and 695 * flags select the address format to use. 696 * 697 * RETURNS: 698 * Block address read from @tf. 699 */ 700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 701 { 702 u64 block = 0; 703 704 if (tf->flags & ATA_TFLAG_LBA) { 705 if (tf->flags & ATA_TFLAG_LBA48) { 706 block |= (u64)tf->hob_lbah << 40; 707 block |= (u64)tf->hob_lbam << 32; 708 block |= (u64)tf->hob_lbal << 24; 709 } else 710 block |= (tf->device & 0xf) << 24; 711 712 block |= tf->lbah << 16; 713 block |= tf->lbam << 8; 714 block |= tf->lbal; 715 } else { 716 u32 cyl, head, sect; 717 718 cyl = tf->lbam | (tf->lbah << 8); 719 head = tf->device & 0xf; 720 sect = tf->lbal; 721 722 if (!sect) { 723 ata_dev_warn(dev, 724 "device reported invalid CHS sector 0\n"); 725 return U64_MAX; 726 } 727 728 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 729 } 730 731 return block; 732 } 733 734 /** 735 * ata_build_rw_tf - Build ATA taskfile for given read/write request 736 * @tf: Target ATA taskfile 737 * @dev: ATA device @tf belongs to 738 * @block: Block address 739 * @n_block: Number of blocks 740 * @tf_flags: RW/FUA etc... 741 * @tag: tag 742 * @class: IO priority class 743 * 744 * LOCKING: 745 * None. 746 * 747 * Build ATA taskfile @tf for read/write request described by 748 * @block, @n_block, @tf_flags and @tag on @dev. 749 * 750 * RETURNS: 751 * 752 * 0 on success, -ERANGE if the request is too large for @dev, 753 * -EINVAL if the request is invalid. 754 */ 755 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 756 u64 block, u32 n_block, unsigned int tf_flags, 757 unsigned int tag, int class) 758 { 759 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 760 tf->flags |= tf_flags; 761 762 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 763 /* yay, NCQ */ 764 if (!lba_48_ok(block, n_block)) 765 return -ERANGE; 766 767 tf->protocol = ATA_PROT_NCQ; 768 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 769 770 if (tf->flags & ATA_TFLAG_WRITE) 771 tf->command = ATA_CMD_FPDMA_WRITE; 772 else 773 tf->command = ATA_CMD_FPDMA_READ; 774 775 tf->nsect = tag << 3; 776 tf->hob_feature = (n_block >> 8) & 0xff; 777 tf->feature = n_block & 0xff; 778 779 tf->hob_lbah = (block >> 40) & 0xff; 780 tf->hob_lbam = (block >> 32) & 0xff; 781 tf->hob_lbal = (block >> 24) & 0xff; 782 tf->lbah = (block >> 16) & 0xff; 783 tf->lbam = (block >> 8) & 0xff; 784 tf->lbal = block & 0xff; 785 786 tf->device = ATA_LBA; 787 if (tf->flags & ATA_TFLAG_FUA) 788 tf->device |= 1 << 7; 789 790 if (dev->flags & ATA_DFLAG_NCQ_PRIO) { 791 if (class == IOPRIO_CLASS_RT) 792 tf->hob_nsect |= ATA_PRIO_HIGH << 793 ATA_SHIFT_PRIO; 794 } 795 } else if (dev->flags & ATA_DFLAG_LBA) { 796 tf->flags |= ATA_TFLAG_LBA; 797 798 if (lba_28_ok(block, n_block)) { 799 /* use LBA28 */ 800 tf->device |= (block >> 24) & 0xf; 801 } else if (lba_48_ok(block, n_block)) { 802 if (!(dev->flags & ATA_DFLAG_LBA48)) 803 return -ERANGE; 804 805 /* use LBA48 */ 806 tf->flags |= ATA_TFLAG_LBA48; 807 808 tf->hob_nsect = (n_block >> 8) & 0xff; 809 810 tf->hob_lbah = (block >> 40) & 0xff; 811 tf->hob_lbam = (block >> 32) & 0xff; 812 tf->hob_lbal = (block >> 24) & 0xff; 813 } else 814 /* request too large even for LBA48 */ 815 return -ERANGE; 816 817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 818 return -EINVAL; 819 820 tf->nsect = n_block & 0xff; 821 822 tf->lbah = (block >> 16) & 0xff; 823 tf->lbam = (block >> 8) & 0xff; 824 tf->lbal = block & 0xff; 825 826 tf->device |= ATA_LBA; 827 } else { 828 /* CHS */ 829 u32 sect, head, cyl, track; 830 831 /* The request -may- be too large for CHS addressing. */ 832 if (!lba_28_ok(block, n_block)) 833 return -ERANGE; 834 835 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 836 return -EINVAL; 837 838 /* Convert LBA to CHS */ 839 track = (u32)block / dev->sectors; 840 cyl = track / dev->heads; 841 head = track % dev->heads; 842 sect = (u32)block % dev->sectors + 1; 843 844 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 845 (u32)block, track, cyl, head, sect); 846 847 /* Check whether the converted CHS can fit. 848 Cylinder: 0-65535 849 Head: 0-15 850 Sector: 1-255*/ 851 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 852 return -ERANGE; 853 854 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 855 tf->lbal = sect; 856 tf->lbam = cyl; 857 tf->lbah = cyl >> 8; 858 tf->device |= head; 859 } 860 861 return 0; 862 } 863 864 /** 865 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 866 * @pio_mask: pio_mask 867 * @mwdma_mask: mwdma_mask 868 * @udma_mask: udma_mask 869 * 870 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 871 * unsigned int xfer_mask. 872 * 873 * LOCKING: 874 * None. 875 * 876 * RETURNS: 877 * Packed xfer_mask. 878 */ 879 unsigned long ata_pack_xfermask(unsigned long pio_mask, 880 unsigned long mwdma_mask, 881 unsigned long udma_mask) 882 { 883 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 884 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 885 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 886 } 887 888 /** 889 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 890 * @xfer_mask: xfer_mask to unpack 891 * @pio_mask: resulting pio_mask 892 * @mwdma_mask: resulting mwdma_mask 893 * @udma_mask: resulting udma_mask 894 * 895 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 896 * Any NULL destination masks will be ignored. 897 */ 898 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 899 unsigned long *mwdma_mask, unsigned long *udma_mask) 900 { 901 if (pio_mask) 902 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 903 if (mwdma_mask) 904 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 905 if (udma_mask) 906 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 907 } 908 909 static const struct ata_xfer_ent { 910 int shift, bits; 911 u8 base; 912 } ata_xfer_tbl[] = { 913 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 914 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 915 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 916 { -1, }, 917 }; 918 919 /** 920 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 921 * @xfer_mask: xfer_mask of interest 922 * 923 * Return matching XFER_* value for @xfer_mask. Only the highest 924 * bit of @xfer_mask is considered. 925 * 926 * LOCKING: 927 * None. 928 * 929 * RETURNS: 930 * Matching XFER_* value, 0xff if no match found. 931 */ 932 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 933 { 934 int highbit = fls(xfer_mask) - 1; 935 const struct ata_xfer_ent *ent; 936 937 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 938 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 939 return ent->base + highbit - ent->shift; 940 return 0xff; 941 } 942 943 /** 944 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 945 * @xfer_mode: XFER_* of interest 946 * 947 * Return matching xfer_mask for @xfer_mode. 948 * 949 * LOCKING: 950 * None. 951 * 952 * RETURNS: 953 * Matching xfer_mask, 0 if no match found. 954 */ 955 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 956 { 957 const struct ata_xfer_ent *ent; 958 959 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 960 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 961 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 962 & ~((1 << ent->shift) - 1); 963 return 0; 964 } 965 966 /** 967 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 968 * @xfer_mode: XFER_* of interest 969 * 970 * Return matching xfer_shift for @xfer_mode. 971 * 972 * LOCKING: 973 * None. 974 * 975 * RETURNS: 976 * Matching xfer_shift, -1 if no match found. 977 */ 978 int ata_xfer_mode2shift(unsigned long xfer_mode) 979 { 980 const struct ata_xfer_ent *ent; 981 982 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 983 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 984 return ent->shift; 985 return -1; 986 } 987 988 /** 989 * ata_mode_string - convert xfer_mask to string 990 * @xfer_mask: mask of bits supported; only highest bit counts. 991 * 992 * Determine string which represents the highest speed 993 * (highest bit in @modemask). 994 * 995 * LOCKING: 996 * None. 997 * 998 * RETURNS: 999 * Constant C string representing highest speed listed in 1000 * @mode_mask, or the constant C string "<n/a>". 1001 */ 1002 const char *ata_mode_string(unsigned long xfer_mask) 1003 { 1004 static const char * const xfer_mode_str[] = { 1005 "PIO0", 1006 "PIO1", 1007 "PIO2", 1008 "PIO3", 1009 "PIO4", 1010 "PIO5", 1011 "PIO6", 1012 "MWDMA0", 1013 "MWDMA1", 1014 "MWDMA2", 1015 "MWDMA3", 1016 "MWDMA4", 1017 "UDMA/16", 1018 "UDMA/25", 1019 "UDMA/33", 1020 "UDMA/44", 1021 "UDMA/66", 1022 "UDMA/100", 1023 "UDMA/133", 1024 "UDMA7", 1025 }; 1026 int highbit; 1027 1028 highbit = fls(xfer_mask) - 1; 1029 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1030 return xfer_mode_str[highbit]; 1031 return "<n/a>"; 1032 } 1033 1034 const char *sata_spd_string(unsigned int spd) 1035 { 1036 static const char * const spd_str[] = { 1037 "1.5 Gbps", 1038 "3.0 Gbps", 1039 "6.0 Gbps", 1040 }; 1041 1042 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1043 return "<unknown>"; 1044 return spd_str[spd - 1]; 1045 } 1046 1047 /** 1048 * ata_dev_classify - determine device type based on ATA-spec signature 1049 * @tf: ATA taskfile register set for device to be identified 1050 * 1051 * Determine from taskfile register contents whether a device is 1052 * ATA or ATAPI, as per "Signature and persistence" section 1053 * of ATA/PI spec (volume 1, sect 5.14). 1054 * 1055 * LOCKING: 1056 * None. 1057 * 1058 * RETURNS: 1059 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1060 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1061 */ 1062 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1063 { 1064 /* Apple's open source Darwin code hints that some devices only 1065 * put a proper signature into the LBA mid/high registers, 1066 * So, we only check those. It's sufficient for uniqueness. 1067 * 1068 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1069 * signatures for ATA and ATAPI devices attached on SerialATA, 1070 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1071 * spec has never mentioned about using different signatures 1072 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1073 * Multiplier specification began to use 0x69/0x96 to identify 1074 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1075 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1076 * 0x69/0x96 shortly and described them as reserved for 1077 * SerialATA. 1078 * 1079 * We follow the current spec and consider that 0x69/0x96 1080 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1081 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1082 * SEMB signature. This is worked around in 1083 * ata_dev_read_id(). 1084 */ 1085 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1086 DPRINTK("found ATA device by sig\n"); 1087 return ATA_DEV_ATA; 1088 } 1089 1090 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1091 DPRINTK("found ATAPI device by sig\n"); 1092 return ATA_DEV_ATAPI; 1093 } 1094 1095 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1096 DPRINTK("found PMP device by sig\n"); 1097 return ATA_DEV_PMP; 1098 } 1099 1100 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1101 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1102 return ATA_DEV_SEMB; 1103 } 1104 1105 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1106 DPRINTK("found ZAC device by sig\n"); 1107 return ATA_DEV_ZAC; 1108 } 1109 1110 DPRINTK("unknown device\n"); 1111 return ATA_DEV_UNKNOWN; 1112 } 1113 1114 /** 1115 * ata_id_string - Convert IDENTIFY DEVICE page into string 1116 * @id: IDENTIFY DEVICE results we will examine 1117 * @s: string into which data is output 1118 * @ofs: offset into identify device page 1119 * @len: length of string to return. must be an even number. 1120 * 1121 * The strings in the IDENTIFY DEVICE page are broken up into 1122 * 16-bit chunks. Run through the string, and output each 1123 * 8-bit chunk linearly, regardless of platform. 1124 * 1125 * LOCKING: 1126 * caller. 1127 */ 1128 1129 void ata_id_string(const u16 *id, unsigned char *s, 1130 unsigned int ofs, unsigned int len) 1131 { 1132 unsigned int c; 1133 1134 BUG_ON(len & 1); 1135 1136 while (len > 0) { 1137 c = id[ofs] >> 8; 1138 *s = c; 1139 s++; 1140 1141 c = id[ofs] & 0xff; 1142 *s = c; 1143 s++; 1144 1145 ofs++; 1146 len -= 2; 1147 } 1148 } 1149 1150 /** 1151 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1152 * @id: IDENTIFY DEVICE results we will examine 1153 * @s: string into which data is output 1154 * @ofs: offset into identify device page 1155 * @len: length of string to return. must be an odd number. 1156 * 1157 * This function is identical to ata_id_string except that it 1158 * trims trailing spaces and terminates the resulting string with 1159 * null. @len must be actual maximum length (even number) + 1. 1160 * 1161 * LOCKING: 1162 * caller. 1163 */ 1164 void ata_id_c_string(const u16 *id, unsigned char *s, 1165 unsigned int ofs, unsigned int len) 1166 { 1167 unsigned char *p; 1168 1169 ata_id_string(id, s, ofs, len - 1); 1170 1171 p = s + strnlen(s, len - 1); 1172 while (p > s && p[-1] == ' ') 1173 p--; 1174 *p = '\0'; 1175 } 1176 1177 static u64 ata_id_n_sectors(const u16 *id) 1178 { 1179 if (ata_id_has_lba(id)) { 1180 if (ata_id_has_lba48(id)) 1181 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1182 else 1183 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1184 } else { 1185 if (ata_id_current_chs_valid(id)) 1186 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1187 id[ATA_ID_CUR_SECTORS]; 1188 else 1189 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1190 id[ATA_ID_SECTORS]; 1191 } 1192 } 1193 1194 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1195 { 1196 u64 sectors = 0; 1197 1198 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1199 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1200 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1201 sectors |= (tf->lbah & 0xff) << 16; 1202 sectors |= (tf->lbam & 0xff) << 8; 1203 sectors |= (tf->lbal & 0xff); 1204 1205 return sectors; 1206 } 1207 1208 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1209 { 1210 u64 sectors = 0; 1211 1212 sectors |= (tf->device & 0x0f) << 24; 1213 sectors |= (tf->lbah & 0xff) << 16; 1214 sectors |= (tf->lbam & 0xff) << 8; 1215 sectors |= (tf->lbal & 0xff); 1216 1217 return sectors; 1218 } 1219 1220 /** 1221 * ata_read_native_max_address - Read native max address 1222 * @dev: target device 1223 * @max_sectors: out parameter for the result native max address 1224 * 1225 * Perform an LBA48 or LBA28 native size query upon the device in 1226 * question. 1227 * 1228 * RETURNS: 1229 * 0 on success, -EACCES if command is aborted by the drive. 1230 * -EIO on other errors. 1231 */ 1232 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1233 { 1234 unsigned int err_mask; 1235 struct ata_taskfile tf; 1236 int lba48 = ata_id_has_lba48(dev->id); 1237 1238 ata_tf_init(dev, &tf); 1239 1240 /* always clear all address registers */ 1241 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1242 1243 if (lba48) { 1244 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1245 tf.flags |= ATA_TFLAG_LBA48; 1246 } else 1247 tf.command = ATA_CMD_READ_NATIVE_MAX; 1248 1249 tf.protocol = ATA_PROT_NODATA; 1250 tf.device |= ATA_LBA; 1251 1252 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1253 if (err_mask) { 1254 ata_dev_warn(dev, 1255 "failed to read native max address (err_mask=0x%x)\n", 1256 err_mask); 1257 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1258 return -EACCES; 1259 return -EIO; 1260 } 1261 1262 if (lba48) 1263 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1264 else 1265 *max_sectors = ata_tf_to_lba(&tf) + 1; 1266 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1267 (*max_sectors)--; 1268 return 0; 1269 } 1270 1271 /** 1272 * ata_set_max_sectors - Set max sectors 1273 * @dev: target device 1274 * @new_sectors: new max sectors value to set for the device 1275 * 1276 * Set max sectors of @dev to @new_sectors. 1277 * 1278 * RETURNS: 1279 * 0 on success, -EACCES if command is aborted or denied (due to 1280 * previous non-volatile SET_MAX) by the drive. -EIO on other 1281 * errors. 1282 */ 1283 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1284 { 1285 unsigned int err_mask; 1286 struct ata_taskfile tf; 1287 int lba48 = ata_id_has_lba48(dev->id); 1288 1289 new_sectors--; 1290 1291 ata_tf_init(dev, &tf); 1292 1293 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1294 1295 if (lba48) { 1296 tf.command = ATA_CMD_SET_MAX_EXT; 1297 tf.flags |= ATA_TFLAG_LBA48; 1298 1299 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1300 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1301 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1302 } else { 1303 tf.command = ATA_CMD_SET_MAX; 1304 1305 tf.device |= (new_sectors >> 24) & 0xf; 1306 } 1307 1308 tf.protocol = ATA_PROT_NODATA; 1309 tf.device |= ATA_LBA; 1310 1311 tf.lbal = (new_sectors >> 0) & 0xff; 1312 tf.lbam = (new_sectors >> 8) & 0xff; 1313 tf.lbah = (new_sectors >> 16) & 0xff; 1314 1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1316 if (err_mask) { 1317 ata_dev_warn(dev, 1318 "failed to set max address (err_mask=0x%x)\n", 1319 err_mask); 1320 if (err_mask == AC_ERR_DEV && 1321 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1322 return -EACCES; 1323 return -EIO; 1324 } 1325 1326 return 0; 1327 } 1328 1329 /** 1330 * ata_hpa_resize - Resize a device with an HPA set 1331 * @dev: Device to resize 1332 * 1333 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1334 * it if required to the full size of the media. The caller must check 1335 * the drive has the HPA feature set enabled. 1336 * 1337 * RETURNS: 1338 * 0 on success, -errno on failure. 1339 */ 1340 static int ata_hpa_resize(struct ata_device *dev) 1341 { 1342 struct ata_eh_context *ehc = &dev->link->eh_context; 1343 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1344 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1345 u64 sectors = ata_id_n_sectors(dev->id); 1346 u64 native_sectors; 1347 int rc; 1348 1349 /* do we need to do it? */ 1350 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1351 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1352 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1353 return 0; 1354 1355 /* read native max address */ 1356 rc = ata_read_native_max_address(dev, &native_sectors); 1357 if (rc) { 1358 /* If device aborted the command or HPA isn't going to 1359 * be unlocked, skip HPA resizing. 1360 */ 1361 if (rc == -EACCES || !unlock_hpa) { 1362 ata_dev_warn(dev, 1363 "HPA support seems broken, skipping HPA handling\n"); 1364 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1365 1366 /* we can continue if device aborted the command */ 1367 if (rc == -EACCES) 1368 rc = 0; 1369 } 1370 1371 return rc; 1372 } 1373 dev->n_native_sectors = native_sectors; 1374 1375 /* nothing to do? */ 1376 if (native_sectors <= sectors || !unlock_hpa) { 1377 if (!print_info || native_sectors == sectors) 1378 return 0; 1379 1380 if (native_sectors > sectors) 1381 ata_dev_info(dev, 1382 "HPA detected: current %llu, native %llu\n", 1383 (unsigned long long)sectors, 1384 (unsigned long long)native_sectors); 1385 else if (native_sectors < sectors) 1386 ata_dev_warn(dev, 1387 "native sectors (%llu) is smaller than sectors (%llu)\n", 1388 (unsigned long long)native_sectors, 1389 (unsigned long long)sectors); 1390 return 0; 1391 } 1392 1393 /* let's unlock HPA */ 1394 rc = ata_set_max_sectors(dev, native_sectors); 1395 if (rc == -EACCES) { 1396 /* if device aborted the command, skip HPA resizing */ 1397 ata_dev_warn(dev, 1398 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1399 (unsigned long long)sectors, 1400 (unsigned long long)native_sectors); 1401 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1402 return 0; 1403 } else if (rc) 1404 return rc; 1405 1406 /* re-read IDENTIFY data */ 1407 rc = ata_dev_reread_id(dev, 0); 1408 if (rc) { 1409 ata_dev_err(dev, 1410 "failed to re-read IDENTIFY data after HPA resizing\n"); 1411 return rc; 1412 } 1413 1414 if (print_info) { 1415 u64 new_sectors = ata_id_n_sectors(dev->id); 1416 ata_dev_info(dev, 1417 "HPA unlocked: %llu -> %llu, native %llu\n", 1418 (unsigned long long)sectors, 1419 (unsigned long long)new_sectors, 1420 (unsigned long long)native_sectors); 1421 } 1422 1423 return 0; 1424 } 1425 1426 /** 1427 * ata_dump_id - IDENTIFY DEVICE info debugging output 1428 * @id: IDENTIFY DEVICE page to dump 1429 * 1430 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1431 * page. 1432 * 1433 * LOCKING: 1434 * caller. 1435 */ 1436 1437 static inline void ata_dump_id(const u16 *id) 1438 { 1439 DPRINTK("49==0x%04x " 1440 "53==0x%04x " 1441 "63==0x%04x " 1442 "64==0x%04x " 1443 "75==0x%04x \n", 1444 id[49], 1445 id[53], 1446 id[63], 1447 id[64], 1448 id[75]); 1449 DPRINTK("80==0x%04x " 1450 "81==0x%04x " 1451 "82==0x%04x " 1452 "83==0x%04x " 1453 "84==0x%04x \n", 1454 id[80], 1455 id[81], 1456 id[82], 1457 id[83], 1458 id[84]); 1459 DPRINTK("88==0x%04x " 1460 "93==0x%04x\n", 1461 id[88], 1462 id[93]); 1463 } 1464 1465 /** 1466 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1467 * @id: IDENTIFY data to compute xfer mask from 1468 * 1469 * Compute the xfermask for this device. This is not as trivial 1470 * as it seems if we must consider early devices correctly. 1471 * 1472 * FIXME: pre IDE drive timing (do we care ?). 1473 * 1474 * LOCKING: 1475 * None. 1476 * 1477 * RETURNS: 1478 * Computed xfermask 1479 */ 1480 unsigned long ata_id_xfermask(const u16 *id) 1481 { 1482 unsigned long pio_mask, mwdma_mask, udma_mask; 1483 1484 /* Usual case. Word 53 indicates word 64 is valid */ 1485 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1486 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1487 pio_mask <<= 3; 1488 pio_mask |= 0x7; 1489 } else { 1490 /* If word 64 isn't valid then Word 51 high byte holds 1491 * the PIO timing number for the maximum. Turn it into 1492 * a mask. 1493 */ 1494 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1495 if (mode < 5) /* Valid PIO range */ 1496 pio_mask = (2 << mode) - 1; 1497 else 1498 pio_mask = 1; 1499 1500 /* But wait.. there's more. Design your standards by 1501 * committee and you too can get a free iordy field to 1502 * process. However its the speeds not the modes that 1503 * are supported... Note drivers using the timing API 1504 * will get this right anyway 1505 */ 1506 } 1507 1508 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1509 1510 if (ata_id_is_cfa(id)) { 1511 /* 1512 * Process compact flash extended modes 1513 */ 1514 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1515 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1516 1517 if (pio) 1518 pio_mask |= (1 << 5); 1519 if (pio > 1) 1520 pio_mask |= (1 << 6); 1521 if (dma) 1522 mwdma_mask |= (1 << 3); 1523 if (dma > 1) 1524 mwdma_mask |= (1 << 4); 1525 } 1526 1527 udma_mask = 0; 1528 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1529 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1530 1531 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1532 } 1533 1534 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1535 { 1536 struct completion *waiting = qc->private_data; 1537 1538 complete(waiting); 1539 } 1540 1541 /** 1542 * ata_exec_internal_sg - execute libata internal command 1543 * @dev: Device to which the command is sent 1544 * @tf: Taskfile registers for the command and the result 1545 * @cdb: CDB for packet command 1546 * @dma_dir: Data transfer direction of the command 1547 * @sgl: sg list for the data buffer of the command 1548 * @n_elem: Number of sg entries 1549 * @timeout: Timeout in msecs (0 for default) 1550 * 1551 * Executes libata internal command with timeout. @tf contains 1552 * command on entry and result on return. Timeout and error 1553 * conditions are reported via return value. No recovery action 1554 * is taken after a command times out. It's caller's duty to 1555 * clean up after timeout. 1556 * 1557 * LOCKING: 1558 * None. Should be called with kernel context, might sleep. 1559 * 1560 * RETURNS: 1561 * Zero on success, AC_ERR_* mask on failure 1562 */ 1563 unsigned ata_exec_internal_sg(struct ata_device *dev, 1564 struct ata_taskfile *tf, const u8 *cdb, 1565 int dma_dir, struct scatterlist *sgl, 1566 unsigned int n_elem, unsigned long timeout) 1567 { 1568 struct ata_link *link = dev->link; 1569 struct ata_port *ap = link->ap; 1570 u8 command = tf->command; 1571 int auto_timeout = 0; 1572 struct ata_queued_cmd *qc; 1573 unsigned int tag, preempted_tag; 1574 u32 preempted_sactive, preempted_qc_active; 1575 int preempted_nr_active_links; 1576 DECLARE_COMPLETION_ONSTACK(wait); 1577 unsigned long flags; 1578 unsigned int err_mask; 1579 int rc; 1580 1581 spin_lock_irqsave(ap->lock, flags); 1582 1583 /* no internal command while frozen */ 1584 if (ap->pflags & ATA_PFLAG_FROZEN) { 1585 spin_unlock_irqrestore(ap->lock, flags); 1586 return AC_ERR_SYSTEM; 1587 } 1588 1589 /* initialize internal qc */ 1590 1591 /* XXX: Tag 0 is used for drivers with legacy EH as some 1592 * drivers choke if any other tag is given. This breaks 1593 * ata_tag_internal() test for those drivers. Don't use new 1594 * EH stuff without converting to it. 1595 */ 1596 if (ap->ops->error_handler) 1597 tag = ATA_TAG_INTERNAL; 1598 else 1599 tag = 0; 1600 1601 qc = __ata_qc_from_tag(ap, tag); 1602 1603 qc->tag = tag; 1604 qc->scsicmd = NULL; 1605 qc->ap = ap; 1606 qc->dev = dev; 1607 ata_qc_reinit(qc); 1608 1609 preempted_tag = link->active_tag; 1610 preempted_sactive = link->sactive; 1611 preempted_qc_active = ap->qc_active; 1612 preempted_nr_active_links = ap->nr_active_links; 1613 link->active_tag = ATA_TAG_POISON; 1614 link->sactive = 0; 1615 ap->qc_active = 0; 1616 ap->nr_active_links = 0; 1617 1618 /* prepare & issue qc */ 1619 qc->tf = *tf; 1620 if (cdb) 1621 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1622 1623 /* some SATA bridges need us to indicate data xfer direction */ 1624 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1625 dma_dir == DMA_FROM_DEVICE) 1626 qc->tf.feature |= ATAPI_DMADIR; 1627 1628 qc->flags |= ATA_QCFLAG_RESULT_TF; 1629 qc->dma_dir = dma_dir; 1630 if (dma_dir != DMA_NONE) { 1631 unsigned int i, buflen = 0; 1632 struct scatterlist *sg; 1633 1634 for_each_sg(sgl, sg, n_elem, i) 1635 buflen += sg->length; 1636 1637 ata_sg_init(qc, sgl, n_elem); 1638 qc->nbytes = buflen; 1639 } 1640 1641 qc->private_data = &wait; 1642 qc->complete_fn = ata_qc_complete_internal; 1643 1644 ata_qc_issue(qc); 1645 1646 spin_unlock_irqrestore(ap->lock, flags); 1647 1648 if (!timeout) { 1649 if (ata_probe_timeout) 1650 timeout = ata_probe_timeout * 1000; 1651 else { 1652 timeout = ata_internal_cmd_timeout(dev, command); 1653 auto_timeout = 1; 1654 } 1655 } 1656 1657 if (ap->ops->error_handler) 1658 ata_eh_release(ap); 1659 1660 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1661 1662 if (ap->ops->error_handler) 1663 ata_eh_acquire(ap); 1664 1665 ata_sff_flush_pio_task(ap); 1666 1667 if (!rc) { 1668 spin_lock_irqsave(ap->lock, flags); 1669 1670 /* We're racing with irq here. If we lose, the 1671 * following test prevents us from completing the qc 1672 * twice. If we win, the port is frozen and will be 1673 * cleaned up by ->post_internal_cmd(). 1674 */ 1675 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1676 qc->err_mask |= AC_ERR_TIMEOUT; 1677 1678 if (ap->ops->error_handler) 1679 ata_port_freeze(ap); 1680 else 1681 ata_qc_complete(qc); 1682 1683 if (ata_msg_warn(ap)) 1684 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1685 command); 1686 } 1687 1688 spin_unlock_irqrestore(ap->lock, flags); 1689 } 1690 1691 /* do post_internal_cmd */ 1692 if (ap->ops->post_internal_cmd) 1693 ap->ops->post_internal_cmd(qc); 1694 1695 /* perform minimal error analysis */ 1696 if (qc->flags & ATA_QCFLAG_FAILED) { 1697 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1698 qc->err_mask |= AC_ERR_DEV; 1699 1700 if (!qc->err_mask) 1701 qc->err_mask |= AC_ERR_OTHER; 1702 1703 if (qc->err_mask & ~AC_ERR_OTHER) 1704 qc->err_mask &= ~AC_ERR_OTHER; 1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { 1706 qc->result_tf.command |= ATA_SENSE; 1707 } 1708 1709 /* finish up */ 1710 spin_lock_irqsave(ap->lock, flags); 1711 1712 *tf = qc->result_tf; 1713 err_mask = qc->err_mask; 1714 1715 ata_qc_free(qc); 1716 link->active_tag = preempted_tag; 1717 link->sactive = preempted_sactive; 1718 ap->qc_active = preempted_qc_active; 1719 ap->nr_active_links = preempted_nr_active_links; 1720 1721 spin_unlock_irqrestore(ap->lock, flags); 1722 1723 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1724 ata_internal_cmd_timed_out(dev, command); 1725 1726 return err_mask; 1727 } 1728 1729 /** 1730 * ata_exec_internal - execute libata internal command 1731 * @dev: Device to which the command is sent 1732 * @tf: Taskfile registers for the command and the result 1733 * @cdb: CDB for packet command 1734 * @dma_dir: Data transfer direction of the command 1735 * @buf: Data buffer of the command 1736 * @buflen: Length of data buffer 1737 * @timeout: Timeout in msecs (0 for default) 1738 * 1739 * Wrapper around ata_exec_internal_sg() which takes simple 1740 * buffer instead of sg list. 1741 * 1742 * LOCKING: 1743 * None. Should be called with kernel context, might sleep. 1744 * 1745 * RETURNS: 1746 * Zero on success, AC_ERR_* mask on failure 1747 */ 1748 unsigned ata_exec_internal(struct ata_device *dev, 1749 struct ata_taskfile *tf, const u8 *cdb, 1750 int dma_dir, void *buf, unsigned int buflen, 1751 unsigned long timeout) 1752 { 1753 struct scatterlist *psg = NULL, sg; 1754 unsigned int n_elem = 0; 1755 1756 if (dma_dir != DMA_NONE) { 1757 WARN_ON(!buf); 1758 sg_init_one(&sg, buf, buflen); 1759 psg = &sg; 1760 n_elem++; 1761 } 1762 1763 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1764 timeout); 1765 } 1766 1767 /** 1768 * ata_pio_need_iordy - check if iordy needed 1769 * @adev: ATA device 1770 * 1771 * Check if the current speed of the device requires IORDY. Used 1772 * by various controllers for chip configuration. 1773 */ 1774 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1775 { 1776 /* Don't set IORDY if we're preparing for reset. IORDY may 1777 * lead to controller lock up on certain controllers if the 1778 * port is not occupied. See bko#11703 for details. 1779 */ 1780 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1781 return 0; 1782 /* Controller doesn't support IORDY. Probably a pointless 1783 * check as the caller should know this. 1784 */ 1785 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1786 return 0; 1787 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1788 if (ata_id_is_cfa(adev->id) 1789 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1790 return 0; 1791 /* PIO3 and higher it is mandatory */ 1792 if (adev->pio_mode > XFER_PIO_2) 1793 return 1; 1794 /* We turn it on when possible */ 1795 if (ata_id_has_iordy(adev->id)) 1796 return 1; 1797 return 0; 1798 } 1799 1800 /** 1801 * ata_pio_mask_no_iordy - Return the non IORDY mask 1802 * @adev: ATA device 1803 * 1804 * Compute the highest mode possible if we are not using iordy. Return 1805 * -1 if no iordy mode is available. 1806 */ 1807 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1808 { 1809 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1810 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1811 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1812 /* Is the speed faster than the drive allows non IORDY ? */ 1813 if (pio) { 1814 /* This is cycle times not frequency - watch the logic! */ 1815 if (pio > 240) /* PIO2 is 240nS per cycle */ 1816 return 3 << ATA_SHIFT_PIO; 1817 return 7 << ATA_SHIFT_PIO; 1818 } 1819 } 1820 return 3 << ATA_SHIFT_PIO; 1821 } 1822 1823 /** 1824 * ata_do_dev_read_id - default ID read method 1825 * @dev: device 1826 * @tf: proposed taskfile 1827 * @id: data buffer 1828 * 1829 * Issue the identify taskfile and hand back the buffer containing 1830 * identify data. For some RAID controllers and for pre ATA devices 1831 * this function is wrapped or replaced by the driver 1832 */ 1833 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1834 struct ata_taskfile *tf, u16 *id) 1835 { 1836 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1837 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1838 } 1839 1840 /** 1841 * ata_dev_read_id - Read ID data from the specified device 1842 * @dev: target device 1843 * @p_class: pointer to class of the target device (may be changed) 1844 * @flags: ATA_READID_* flags 1845 * @id: buffer to read IDENTIFY data into 1846 * 1847 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1848 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1849 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1850 * for pre-ATA4 drives. 1851 * 1852 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1853 * now we abort if we hit that case. 1854 * 1855 * LOCKING: 1856 * Kernel thread context (may sleep) 1857 * 1858 * RETURNS: 1859 * 0 on success, -errno otherwise. 1860 */ 1861 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1862 unsigned int flags, u16 *id) 1863 { 1864 struct ata_port *ap = dev->link->ap; 1865 unsigned int class = *p_class; 1866 struct ata_taskfile tf; 1867 unsigned int err_mask = 0; 1868 const char *reason; 1869 bool is_semb = class == ATA_DEV_SEMB; 1870 int may_fallback = 1, tried_spinup = 0; 1871 int rc; 1872 1873 if (ata_msg_ctl(ap)) 1874 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1875 1876 retry: 1877 ata_tf_init(dev, &tf); 1878 1879 switch (class) { 1880 case ATA_DEV_SEMB: 1881 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1882 case ATA_DEV_ATA: 1883 case ATA_DEV_ZAC: 1884 tf.command = ATA_CMD_ID_ATA; 1885 break; 1886 case ATA_DEV_ATAPI: 1887 tf.command = ATA_CMD_ID_ATAPI; 1888 break; 1889 default: 1890 rc = -ENODEV; 1891 reason = "unsupported class"; 1892 goto err_out; 1893 } 1894 1895 tf.protocol = ATA_PROT_PIO; 1896 1897 /* Some devices choke if TF registers contain garbage. Make 1898 * sure those are properly initialized. 1899 */ 1900 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1901 1902 /* Device presence detection is unreliable on some 1903 * controllers. Always poll IDENTIFY if available. 1904 */ 1905 tf.flags |= ATA_TFLAG_POLLING; 1906 1907 if (ap->ops->read_id) 1908 err_mask = ap->ops->read_id(dev, &tf, id); 1909 else 1910 err_mask = ata_do_dev_read_id(dev, &tf, id); 1911 1912 if (err_mask) { 1913 if (err_mask & AC_ERR_NODEV_HINT) { 1914 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1915 return -ENOENT; 1916 } 1917 1918 if (is_semb) { 1919 ata_dev_info(dev, 1920 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1921 /* SEMB is not supported yet */ 1922 *p_class = ATA_DEV_SEMB_UNSUP; 1923 return 0; 1924 } 1925 1926 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1927 /* Device or controller might have reported 1928 * the wrong device class. Give a shot at the 1929 * other IDENTIFY if the current one is 1930 * aborted by the device. 1931 */ 1932 if (may_fallback) { 1933 may_fallback = 0; 1934 1935 if (class == ATA_DEV_ATA) 1936 class = ATA_DEV_ATAPI; 1937 else 1938 class = ATA_DEV_ATA; 1939 goto retry; 1940 } 1941 1942 /* Control reaches here iff the device aborted 1943 * both flavors of IDENTIFYs which happens 1944 * sometimes with phantom devices. 1945 */ 1946 ata_dev_dbg(dev, 1947 "both IDENTIFYs aborted, assuming NODEV\n"); 1948 return -ENOENT; 1949 } 1950 1951 rc = -EIO; 1952 reason = "I/O error"; 1953 goto err_out; 1954 } 1955 1956 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1957 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1958 "class=%d may_fallback=%d tried_spinup=%d\n", 1959 class, may_fallback, tried_spinup); 1960 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1961 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1962 } 1963 1964 /* Falling back doesn't make sense if ID data was read 1965 * successfully at least once. 1966 */ 1967 may_fallback = 0; 1968 1969 swap_buf_le16(id, ATA_ID_WORDS); 1970 1971 /* sanity check */ 1972 rc = -EINVAL; 1973 reason = "device reports invalid type"; 1974 1975 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1976 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1977 goto err_out; 1978 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1979 ata_id_is_ata(id)) { 1980 ata_dev_dbg(dev, 1981 "host indicates ignore ATA devices, ignored\n"); 1982 return -ENOENT; 1983 } 1984 } else { 1985 if (ata_id_is_ata(id)) 1986 goto err_out; 1987 } 1988 1989 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1990 tried_spinup = 1; 1991 /* 1992 * Drive powered-up in standby mode, and requires a specific 1993 * SET_FEATURES spin-up subcommand before it will accept 1994 * anything other than the original IDENTIFY command. 1995 */ 1996 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1997 if (err_mask && id[2] != 0x738c) { 1998 rc = -EIO; 1999 reason = "SPINUP failed"; 2000 goto err_out; 2001 } 2002 /* 2003 * If the drive initially returned incomplete IDENTIFY info, 2004 * we now must reissue the IDENTIFY command. 2005 */ 2006 if (id[2] == 0x37c8) 2007 goto retry; 2008 } 2009 2010 if ((flags & ATA_READID_POSTRESET) && 2011 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2012 /* 2013 * The exact sequence expected by certain pre-ATA4 drives is: 2014 * SRST RESET 2015 * IDENTIFY (optional in early ATA) 2016 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2017 * anything else.. 2018 * Some drives were very specific about that exact sequence. 2019 * 2020 * Note that ATA4 says lba is mandatory so the second check 2021 * should never trigger. 2022 */ 2023 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2024 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2025 if (err_mask) { 2026 rc = -EIO; 2027 reason = "INIT_DEV_PARAMS failed"; 2028 goto err_out; 2029 } 2030 2031 /* current CHS translation info (id[53-58]) might be 2032 * changed. reread the identify device info. 2033 */ 2034 flags &= ~ATA_READID_POSTRESET; 2035 goto retry; 2036 } 2037 } 2038 2039 *p_class = class; 2040 2041 return 0; 2042 2043 err_out: 2044 if (ata_msg_warn(ap)) 2045 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2046 reason, err_mask); 2047 return rc; 2048 } 2049 2050 /** 2051 * ata_read_log_page - read a specific log page 2052 * @dev: target device 2053 * @log: log to read 2054 * @page: page to read 2055 * @buf: buffer to store read page 2056 * @sectors: number of sectors to read 2057 * 2058 * Read log page using READ_LOG_EXT command. 2059 * 2060 * LOCKING: 2061 * Kernel thread context (may sleep). 2062 * 2063 * RETURNS: 2064 * 0 on success, AC_ERR_* mask otherwise. 2065 */ 2066 unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 2067 u8 page, void *buf, unsigned int sectors) 2068 { 2069 unsigned long ap_flags = dev->link->ap->flags; 2070 struct ata_taskfile tf; 2071 unsigned int err_mask; 2072 bool dma = false; 2073 2074 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 2075 2076 /* 2077 * Return error without actually issuing the command on controllers 2078 * which e.g. lockup on a read log page. 2079 */ 2080 if (ap_flags & ATA_FLAG_NO_LOG_PAGE) 2081 return AC_ERR_DEV; 2082 2083 retry: 2084 ata_tf_init(dev, &tf); 2085 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 2086 !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { 2087 tf.command = ATA_CMD_READ_LOG_DMA_EXT; 2088 tf.protocol = ATA_PROT_DMA; 2089 dma = true; 2090 } else { 2091 tf.command = ATA_CMD_READ_LOG_EXT; 2092 tf.protocol = ATA_PROT_PIO; 2093 dma = false; 2094 } 2095 tf.lbal = log; 2096 tf.lbam = page; 2097 tf.nsect = sectors; 2098 tf.hob_nsect = sectors >> 8; 2099 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 2100 2101 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 2102 buf, sectors * ATA_SECT_SIZE, 0); 2103 2104 if (err_mask && dma) { 2105 dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; 2106 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); 2107 goto retry; 2108 } 2109 2110 DPRINTK("EXIT, err_mask=%x\n", err_mask); 2111 return err_mask; 2112 } 2113 2114 static bool ata_log_supported(struct ata_device *dev, u8 log) 2115 { 2116 struct ata_port *ap = dev->link->ap; 2117 2118 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) 2119 return false; 2120 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; 2121 } 2122 2123 static bool ata_identify_page_supported(struct ata_device *dev, u8 page) 2124 { 2125 struct ata_port *ap = dev->link->ap; 2126 unsigned int err, i; 2127 2128 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) { 2129 ata_dev_warn(dev, "ATA Identify Device Log not supported\n"); 2130 return false; 2131 } 2132 2133 /* 2134 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is 2135 * supported. 2136 */ 2137 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf, 2138 1); 2139 if (err) { 2140 ata_dev_info(dev, 2141 "failed to get Device Identify Log Emask 0x%x\n", 2142 err); 2143 return false; 2144 } 2145 2146 for (i = 0; i < ap->sector_buf[8]; i++) { 2147 if (ap->sector_buf[9 + i] == page) 2148 return true; 2149 } 2150 2151 return false; 2152 } 2153 2154 static int ata_do_link_spd_horkage(struct ata_device *dev) 2155 { 2156 struct ata_link *plink = ata_dev_phys_link(dev); 2157 u32 target, target_limit; 2158 2159 if (!sata_scr_valid(plink)) 2160 return 0; 2161 2162 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2163 target = 1; 2164 else 2165 return 0; 2166 2167 target_limit = (1 << target) - 1; 2168 2169 /* if already on stricter limit, no need to push further */ 2170 if (plink->sata_spd_limit <= target_limit) 2171 return 0; 2172 2173 plink->sata_spd_limit = target_limit; 2174 2175 /* Request another EH round by returning -EAGAIN if link is 2176 * going faster than the target speed. Forward progress is 2177 * guaranteed by setting sata_spd_limit to target_limit above. 2178 */ 2179 if (plink->sata_spd > target) { 2180 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2181 sata_spd_string(target)); 2182 return -EAGAIN; 2183 } 2184 return 0; 2185 } 2186 2187 static inline u8 ata_dev_knobble(struct ata_device *dev) 2188 { 2189 struct ata_port *ap = dev->link->ap; 2190 2191 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2192 return 0; 2193 2194 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2195 } 2196 2197 static void ata_dev_config_ncq_send_recv(struct ata_device *dev) 2198 { 2199 struct ata_port *ap = dev->link->ap; 2200 unsigned int err_mask; 2201 2202 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) { 2203 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n"); 2204 return; 2205 } 2206 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2207 0, ap->sector_buf, 1); 2208 if (err_mask) { 2209 ata_dev_dbg(dev, 2210 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2211 err_mask); 2212 } else { 2213 u8 *cmds = dev->ncq_send_recv_cmds; 2214 2215 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2216 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2217 2218 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2219 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2220 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2221 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2222 } 2223 } 2224 } 2225 2226 static void ata_dev_config_ncq_non_data(struct ata_device *dev) 2227 { 2228 struct ata_port *ap = dev->link->ap; 2229 unsigned int err_mask; 2230 2231 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) { 2232 ata_dev_warn(dev, 2233 "NCQ Send/Recv Log not supported\n"); 2234 return; 2235 } 2236 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, 2237 0, ap->sector_buf, 1); 2238 if (err_mask) { 2239 ata_dev_dbg(dev, 2240 "failed to get NCQ Non-Data Log Emask 0x%x\n", 2241 err_mask); 2242 } else { 2243 u8 *cmds = dev->ncq_non_data_cmds; 2244 2245 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); 2246 } 2247 } 2248 2249 static void ata_dev_config_ncq_prio(struct ata_device *dev) 2250 { 2251 struct ata_port *ap = dev->link->ap; 2252 unsigned int err_mask; 2253 2254 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) { 2255 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; 2256 return; 2257 } 2258 2259 err_mask = ata_read_log_page(dev, 2260 ATA_LOG_IDENTIFY_DEVICE, 2261 ATA_LOG_SATA_SETTINGS, 2262 ap->sector_buf, 2263 1); 2264 if (err_mask) { 2265 ata_dev_dbg(dev, 2266 "failed to get Identify Device data, Emask 0x%x\n", 2267 err_mask); 2268 return; 2269 } 2270 2271 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) { 2272 dev->flags |= ATA_DFLAG_NCQ_PRIO; 2273 } else { 2274 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; 2275 ata_dev_dbg(dev, "SATA page does not support priority\n"); 2276 } 2277 2278 } 2279 2280 static int ata_dev_config_ncq(struct ata_device *dev, 2281 char *desc, size_t desc_sz) 2282 { 2283 struct ata_port *ap = dev->link->ap; 2284 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2285 unsigned int err_mask; 2286 char *aa_desc = ""; 2287 2288 if (!ata_id_has_ncq(dev->id)) { 2289 desc[0] = '\0'; 2290 return 0; 2291 } 2292 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2293 snprintf(desc, desc_sz, "NCQ (not used)"); 2294 return 0; 2295 } 2296 if (ap->flags & ATA_FLAG_NCQ) { 2297 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2298 dev->flags |= ATA_DFLAG_NCQ; 2299 } 2300 2301 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2302 (ap->flags & ATA_FLAG_FPDMA_AA) && 2303 ata_id_has_fpdma_aa(dev->id)) { 2304 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2305 SATA_FPDMA_AA); 2306 if (err_mask) { 2307 ata_dev_err(dev, 2308 "failed to enable AA (error_mask=0x%x)\n", 2309 err_mask); 2310 if (err_mask != AC_ERR_DEV) { 2311 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2312 return -EIO; 2313 } 2314 } else 2315 aa_desc = ", AA"; 2316 } 2317 2318 if (hdepth >= ddepth) 2319 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2320 else 2321 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2322 ddepth, aa_desc); 2323 2324 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { 2325 if (ata_id_has_ncq_send_and_recv(dev->id)) 2326 ata_dev_config_ncq_send_recv(dev); 2327 if (ata_id_has_ncq_non_data(dev->id)) 2328 ata_dev_config_ncq_non_data(dev); 2329 if (ata_id_has_ncq_prio(dev->id)) 2330 ata_dev_config_ncq_prio(dev); 2331 } 2332 2333 return 0; 2334 } 2335 2336 static void ata_dev_config_sense_reporting(struct ata_device *dev) 2337 { 2338 unsigned int err_mask; 2339 2340 if (!ata_id_has_sense_reporting(dev->id)) 2341 return; 2342 2343 if (ata_id_sense_reporting_enabled(dev->id)) 2344 return; 2345 2346 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2347 if (err_mask) { 2348 ata_dev_dbg(dev, 2349 "failed to enable Sense Data Reporting, Emask 0x%x\n", 2350 err_mask); 2351 } 2352 } 2353 2354 static void ata_dev_config_zac(struct ata_device *dev) 2355 { 2356 struct ata_port *ap = dev->link->ap; 2357 unsigned int err_mask; 2358 u8 *identify_buf = ap->sector_buf; 2359 2360 dev->zac_zones_optimal_open = U32_MAX; 2361 dev->zac_zones_optimal_nonseq = U32_MAX; 2362 dev->zac_zones_max_open = U32_MAX; 2363 2364 /* 2365 * Always set the 'ZAC' flag for Host-managed devices. 2366 */ 2367 if (dev->class == ATA_DEV_ZAC) 2368 dev->flags |= ATA_DFLAG_ZAC; 2369 else if (ata_id_zoned_cap(dev->id) == 0x01) 2370 /* 2371 * Check for host-aware devices. 2372 */ 2373 dev->flags |= ATA_DFLAG_ZAC; 2374 2375 if (!(dev->flags & ATA_DFLAG_ZAC)) 2376 return; 2377 2378 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) { 2379 ata_dev_warn(dev, 2380 "ATA Zoned Information Log not supported\n"); 2381 return; 2382 } 2383 2384 /* 2385 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) 2386 */ 2387 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 2388 ATA_LOG_ZONED_INFORMATION, 2389 identify_buf, 1); 2390 if (!err_mask) { 2391 u64 zoned_cap, opt_open, opt_nonseq, max_open; 2392 2393 zoned_cap = get_unaligned_le64(&identify_buf[8]); 2394 if ((zoned_cap >> 63)) 2395 dev->zac_zoned_cap = (zoned_cap & 1); 2396 opt_open = get_unaligned_le64(&identify_buf[24]); 2397 if ((opt_open >> 63)) 2398 dev->zac_zones_optimal_open = (u32)opt_open; 2399 opt_nonseq = get_unaligned_le64(&identify_buf[32]); 2400 if ((opt_nonseq >> 63)) 2401 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; 2402 max_open = get_unaligned_le64(&identify_buf[40]); 2403 if ((max_open >> 63)) 2404 dev->zac_zones_max_open = (u32)max_open; 2405 } 2406 } 2407 2408 static void ata_dev_config_trusted(struct ata_device *dev) 2409 { 2410 struct ata_port *ap = dev->link->ap; 2411 u64 trusted_cap; 2412 unsigned int err; 2413 2414 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { 2415 ata_dev_warn(dev, 2416 "Security Log not supported\n"); 2417 return; 2418 } 2419 2420 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY, 2421 ap->sector_buf, 1); 2422 if (err) { 2423 ata_dev_dbg(dev, 2424 "failed to read Security Log, Emask 0x%x\n", err); 2425 return; 2426 } 2427 2428 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]); 2429 if (!(trusted_cap & (1ULL << 63))) { 2430 ata_dev_dbg(dev, 2431 "Trusted Computing capability qword not valid!\n"); 2432 return; 2433 } 2434 2435 if (trusted_cap & (1 << 0)) 2436 dev->flags |= ATA_DFLAG_TRUSTED; 2437 } 2438 2439 /** 2440 * ata_dev_configure - Configure the specified ATA/ATAPI device 2441 * @dev: Target device to configure 2442 * 2443 * Configure @dev according to @dev->id. Generic and low-level 2444 * driver specific fixups are also applied. 2445 * 2446 * LOCKING: 2447 * Kernel thread context (may sleep) 2448 * 2449 * RETURNS: 2450 * 0 on success, -errno otherwise 2451 */ 2452 int ata_dev_configure(struct ata_device *dev) 2453 { 2454 struct ata_port *ap = dev->link->ap; 2455 struct ata_eh_context *ehc = &dev->link->eh_context; 2456 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2457 const u16 *id = dev->id; 2458 unsigned long xfer_mask; 2459 unsigned int err_mask; 2460 char revbuf[7]; /* XYZ-99\0 */ 2461 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2462 char modelbuf[ATA_ID_PROD_LEN+1]; 2463 int rc; 2464 2465 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2466 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2467 return 0; 2468 } 2469 2470 if (ata_msg_probe(ap)) 2471 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2472 2473 /* set horkage */ 2474 dev->horkage |= ata_dev_blacklisted(dev); 2475 ata_force_horkage(dev); 2476 2477 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2478 ata_dev_info(dev, "unsupported device, disabling\n"); 2479 ata_dev_disable(dev); 2480 return 0; 2481 } 2482 2483 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2484 dev->class == ATA_DEV_ATAPI) { 2485 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2486 atapi_enabled ? "not supported with this driver" 2487 : "disabled"); 2488 ata_dev_disable(dev); 2489 return 0; 2490 } 2491 2492 rc = ata_do_link_spd_horkage(dev); 2493 if (rc) 2494 return rc; 2495 2496 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2497 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2498 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2499 dev->horkage |= ATA_HORKAGE_NOLPM; 2500 2501 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2502 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2503 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2504 } 2505 2506 /* let ACPI work its magic */ 2507 rc = ata_acpi_on_devcfg(dev); 2508 if (rc) 2509 return rc; 2510 2511 /* massage HPA, do it early as it might change IDENTIFY data */ 2512 rc = ata_hpa_resize(dev); 2513 if (rc) 2514 return rc; 2515 2516 /* print device capabilities */ 2517 if (ata_msg_probe(ap)) 2518 ata_dev_dbg(dev, 2519 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2520 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2521 __func__, 2522 id[49], id[82], id[83], id[84], 2523 id[85], id[86], id[87], id[88]); 2524 2525 /* initialize to-be-configured parameters */ 2526 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2527 dev->max_sectors = 0; 2528 dev->cdb_len = 0; 2529 dev->n_sectors = 0; 2530 dev->cylinders = 0; 2531 dev->heads = 0; 2532 dev->sectors = 0; 2533 dev->multi_count = 0; 2534 2535 /* 2536 * common ATA, ATAPI feature tests 2537 */ 2538 2539 /* find max transfer mode; for printk only */ 2540 xfer_mask = ata_id_xfermask(id); 2541 2542 if (ata_msg_probe(ap)) 2543 ata_dump_id(id); 2544 2545 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2546 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2547 sizeof(fwrevbuf)); 2548 2549 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2550 sizeof(modelbuf)); 2551 2552 /* ATA-specific feature tests */ 2553 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2554 if (ata_id_is_cfa(id)) { 2555 /* CPRM may make this media unusable */ 2556 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2557 ata_dev_warn(dev, 2558 "supports DRM functions and may not be fully accessible\n"); 2559 snprintf(revbuf, 7, "CFA"); 2560 } else { 2561 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2562 /* Warn the user if the device has TPM extensions */ 2563 if (ata_id_has_tpm(id)) 2564 ata_dev_warn(dev, 2565 "supports DRM functions and may not be fully accessible\n"); 2566 } 2567 2568 dev->n_sectors = ata_id_n_sectors(id); 2569 2570 /* get current R/W Multiple count setting */ 2571 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2572 unsigned int max = dev->id[47] & 0xff; 2573 unsigned int cnt = dev->id[59] & 0xff; 2574 /* only recognize/allow powers of two here */ 2575 if (is_power_of_2(max) && is_power_of_2(cnt)) 2576 if (cnt <= max) 2577 dev->multi_count = cnt; 2578 } 2579 2580 if (ata_id_has_lba(id)) { 2581 const char *lba_desc; 2582 char ncq_desc[24]; 2583 2584 lba_desc = "LBA"; 2585 dev->flags |= ATA_DFLAG_LBA; 2586 if (ata_id_has_lba48(id)) { 2587 dev->flags |= ATA_DFLAG_LBA48; 2588 lba_desc = "LBA48"; 2589 2590 if (dev->n_sectors >= (1UL << 28) && 2591 ata_id_has_flush_ext(id)) 2592 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2593 } 2594 2595 /* config NCQ */ 2596 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2597 if (rc) 2598 return rc; 2599 2600 /* print device info to dmesg */ 2601 if (ata_msg_drv(ap) && print_info) { 2602 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2603 revbuf, modelbuf, fwrevbuf, 2604 ata_mode_string(xfer_mask)); 2605 ata_dev_info(dev, 2606 "%llu sectors, multi %u: %s %s\n", 2607 (unsigned long long)dev->n_sectors, 2608 dev->multi_count, lba_desc, ncq_desc); 2609 } 2610 } else { 2611 /* CHS */ 2612 2613 /* Default translation */ 2614 dev->cylinders = id[1]; 2615 dev->heads = id[3]; 2616 dev->sectors = id[6]; 2617 2618 if (ata_id_current_chs_valid(id)) { 2619 /* Current CHS translation is valid. */ 2620 dev->cylinders = id[54]; 2621 dev->heads = id[55]; 2622 dev->sectors = id[56]; 2623 } 2624 2625 /* print device info to dmesg */ 2626 if (ata_msg_drv(ap) && print_info) { 2627 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2628 revbuf, modelbuf, fwrevbuf, 2629 ata_mode_string(xfer_mask)); 2630 ata_dev_info(dev, 2631 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2632 (unsigned long long)dev->n_sectors, 2633 dev->multi_count, dev->cylinders, 2634 dev->heads, dev->sectors); 2635 } 2636 } 2637 2638 /* Check and mark DevSlp capability. Get DevSlp timing variables 2639 * from SATA Settings page of Identify Device Data Log. 2640 */ 2641 if (ata_id_has_devslp(dev->id)) { 2642 u8 *sata_setting = ap->sector_buf; 2643 int i, j; 2644 2645 dev->flags |= ATA_DFLAG_DEVSLP; 2646 err_mask = ata_read_log_page(dev, 2647 ATA_LOG_IDENTIFY_DEVICE, 2648 ATA_LOG_SATA_SETTINGS, 2649 sata_setting, 2650 1); 2651 if (err_mask) 2652 ata_dev_dbg(dev, 2653 "failed to get Identify Device Data, Emask 0x%x\n", 2654 err_mask); 2655 else 2656 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2657 j = ATA_LOG_DEVSLP_OFFSET + i; 2658 dev->devslp_timing[i] = sata_setting[j]; 2659 } 2660 } 2661 ata_dev_config_sense_reporting(dev); 2662 ata_dev_config_zac(dev); 2663 ata_dev_config_trusted(dev); 2664 dev->cdb_len = 32; 2665 } 2666 2667 /* ATAPI-specific feature tests */ 2668 else if (dev->class == ATA_DEV_ATAPI) { 2669 const char *cdb_intr_string = ""; 2670 const char *atapi_an_string = ""; 2671 const char *dma_dir_string = ""; 2672 u32 sntf; 2673 2674 rc = atapi_cdb_len(id); 2675 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2676 if (ata_msg_warn(ap)) 2677 ata_dev_warn(dev, "unsupported CDB len\n"); 2678 rc = -EINVAL; 2679 goto err_out_nosup; 2680 } 2681 dev->cdb_len = (unsigned int) rc; 2682 2683 /* Enable ATAPI AN if both the host and device have 2684 * the support. If PMP is attached, SNTF is required 2685 * to enable ATAPI AN to discern between PHY status 2686 * changed notifications and ATAPI ANs. 2687 */ 2688 if (atapi_an && 2689 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2690 (!sata_pmp_attached(ap) || 2691 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2692 /* issue SET feature command to turn this on */ 2693 err_mask = ata_dev_set_feature(dev, 2694 SETFEATURES_SATA_ENABLE, SATA_AN); 2695 if (err_mask) 2696 ata_dev_err(dev, 2697 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2698 err_mask); 2699 else { 2700 dev->flags |= ATA_DFLAG_AN; 2701 atapi_an_string = ", ATAPI AN"; 2702 } 2703 } 2704 2705 if (ata_id_cdb_intr(dev->id)) { 2706 dev->flags |= ATA_DFLAG_CDB_INTR; 2707 cdb_intr_string = ", CDB intr"; 2708 } 2709 2710 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2711 dev->flags |= ATA_DFLAG_DMADIR; 2712 dma_dir_string = ", DMADIR"; 2713 } 2714 2715 if (ata_id_has_da(dev->id)) { 2716 dev->flags |= ATA_DFLAG_DA; 2717 zpodd_init(dev); 2718 } 2719 2720 /* print device info to dmesg */ 2721 if (ata_msg_drv(ap) && print_info) 2722 ata_dev_info(dev, 2723 "ATAPI: %s, %s, max %s%s%s%s\n", 2724 modelbuf, fwrevbuf, 2725 ata_mode_string(xfer_mask), 2726 cdb_intr_string, atapi_an_string, 2727 dma_dir_string); 2728 } 2729 2730 /* determine max_sectors */ 2731 dev->max_sectors = ATA_MAX_SECTORS; 2732 if (dev->flags & ATA_DFLAG_LBA48) 2733 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2734 2735 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2736 200 sectors */ 2737 if (ata_dev_knobble(dev)) { 2738 if (ata_msg_drv(ap) && print_info) 2739 ata_dev_info(dev, "applying bridge limits\n"); 2740 dev->udma_mask &= ATA_UDMA5; 2741 dev->max_sectors = ATA_MAX_SECTORS; 2742 } 2743 2744 if ((dev->class == ATA_DEV_ATAPI) && 2745 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2746 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2747 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2748 } 2749 2750 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2751 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2752 dev->max_sectors); 2753 2754 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2755 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2756 dev->max_sectors); 2757 2758 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2759 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2760 2761 if (ap->ops->dev_config) 2762 ap->ops->dev_config(dev); 2763 2764 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2765 /* Let the user know. We don't want to disallow opens for 2766 rescue purposes, or in case the vendor is just a blithering 2767 idiot. Do this after the dev_config call as some controllers 2768 with buggy firmware may want to avoid reporting false device 2769 bugs */ 2770 2771 if (print_info) { 2772 ata_dev_warn(dev, 2773 "Drive reports diagnostics failure. This may indicate a drive\n"); 2774 ata_dev_warn(dev, 2775 "fault or invalid emulation. Contact drive vendor for information.\n"); 2776 } 2777 } 2778 2779 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2780 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2781 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2782 } 2783 2784 return 0; 2785 2786 err_out_nosup: 2787 if (ata_msg_probe(ap)) 2788 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2789 return rc; 2790 } 2791 2792 /** 2793 * ata_cable_40wire - return 40 wire cable type 2794 * @ap: port 2795 * 2796 * Helper method for drivers which want to hardwire 40 wire cable 2797 * detection. 2798 */ 2799 2800 int ata_cable_40wire(struct ata_port *ap) 2801 { 2802 return ATA_CBL_PATA40; 2803 } 2804 2805 /** 2806 * ata_cable_80wire - return 80 wire cable type 2807 * @ap: port 2808 * 2809 * Helper method for drivers which want to hardwire 80 wire cable 2810 * detection. 2811 */ 2812 2813 int ata_cable_80wire(struct ata_port *ap) 2814 { 2815 return ATA_CBL_PATA80; 2816 } 2817 2818 /** 2819 * ata_cable_unknown - return unknown PATA cable. 2820 * @ap: port 2821 * 2822 * Helper method for drivers which have no PATA cable detection. 2823 */ 2824 2825 int ata_cable_unknown(struct ata_port *ap) 2826 { 2827 return ATA_CBL_PATA_UNK; 2828 } 2829 2830 /** 2831 * ata_cable_ignore - return ignored PATA cable. 2832 * @ap: port 2833 * 2834 * Helper method for drivers which don't use cable type to limit 2835 * transfer mode. 2836 */ 2837 int ata_cable_ignore(struct ata_port *ap) 2838 { 2839 return ATA_CBL_PATA_IGN; 2840 } 2841 2842 /** 2843 * ata_cable_sata - return SATA cable type 2844 * @ap: port 2845 * 2846 * Helper method for drivers which have SATA cables 2847 */ 2848 2849 int ata_cable_sata(struct ata_port *ap) 2850 { 2851 return ATA_CBL_SATA; 2852 } 2853 2854 /** 2855 * ata_bus_probe - Reset and probe ATA bus 2856 * @ap: Bus to probe 2857 * 2858 * Master ATA bus probing function. Initiates a hardware-dependent 2859 * bus reset, then attempts to identify any devices found on 2860 * the bus. 2861 * 2862 * LOCKING: 2863 * PCI/etc. bus probe sem. 2864 * 2865 * RETURNS: 2866 * Zero on success, negative errno otherwise. 2867 */ 2868 2869 int ata_bus_probe(struct ata_port *ap) 2870 { 2871 unsigned int classes[ATA_MAX_DEVICES]; 2872 int tries[ATA_MAX_DEVICES]; 2873 int rc; 2874 struct ata_device *dev; 2875 2876 ata_for_each_dev(dev, &ap->link, ALL) 2877 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2878 2879 retry: 2880 ata_for_each_dev(dev, &ap->link, ALL) { 2881 /* If we issue an SRST then an ATA drive (not ATAPI) 2882 * may change configuration and be in PIO0 timing. If 2883 * we do a hard reset (or are coming from power on) 2884 * this is true for ATA or ATAPI. Until we've set a 2885 * suitable controller mode we should not touch the 2886 * bus as we may be talking too fast. 2887 */ 2888 dev->pio_mode = XFER_PIO_0; 2889 dev->dma_mode = 0xff; 2890 2891 /* If the controller has a pio mode setup function 2892 * then use it to set the chipset to rights. Don't 2893 * touch the DMA setup as that will be dealt with when 2894 * configuring devices. 2895 */ 2896 if (ap->ops->set_piomode) 2897 ap->ops->set_piomode(ap, dev); 2898 } 2899 2900 /* reset and determine device classes */ 2901 ap->ops->phy_reset(ap); 2902 2903 ata_for_each_dev(dev, &ap->link, ALL) { 2904 if (dev->class != ATA_DEV_UNKNOWN) 2905 classes[dev->devno] = dev->class; 2906 else 2907 classes[dev->devno] = ATA_DEV_NONE; 2908 2909 dev->class = ATA_DEV_UNKNOWN; 2910 } 2911 2912 /* read IDENTIFY page and configure devices. We have to do the identify 2913 specific sequence bass-ackwards so that PDIAG- is released by 2914 the slave device */ 2915 2916 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2917 if (tries[dev->devno]) 2918 dev->class = classes[dev->devno]; 2919 2920 if (!ata_dev_enabled(dev)) 2921 continue; 2922 2923 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2924 dev->id); 2925 if (rc) 2926 goto fail; 2927 } 2928 2929 /* Now ask for the cable type as PDIAG- should have been released */ 2930 if (ap->ops->cable_detect) 2931 ap->cbl = ap->ops->cable_detect(ap); 2932 2933 /* We may have SATA bridge glue hiding here irrespective of 2934 * the reported cable types and sensed types. When SATA 2935 * drives indicate we have a bridge, we don't know which end 2936 * of the link the bridge is which is a problem. 2937 */ 2938 ata_for_each_dev(dev, &ap->link, ENABLED) 2939 if (ata_id_is_sata(dev->id)) 2940 ap->cbl = ATA_CBL_SATA; 2941 2942 /* After the identify sequence we can now set up the devices. We do 2943 this in the normal order so that the user doesn't get confused */ 2944 2945 ata_for_each_dev(dev, &ap->link, ENABLED) { 2946 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2947 rc = ata_dev_configure(dev); 2948 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2949 if (rc) 2950 goto fail; 2951 } 2952 2953 /* configure transfer mode */ 2954 rc = ata_set_mode(&ap->link, &dev); 2955 if (rc) 2956 goto fail; 2957 2958 ata_for_each_dev(dev, &ap->link, ENABLED) 2959 return 0; 2960 2961 return -ENODEV; 2962 2963 fail: 2964 tries[dev->devno]--; 2965 2966 switch (rc) { 2967 case -EINVAL: 2968 /* eeek, something went very wrong, give up */ 2969 tries[dev->devno] = 0; 2970 break; 2971 2972 case -ENODEV: 2973 /* give it just one more chance */ 2974 tries[dev->devno] = min(tries[dev->devno], 1); 2975 case -EIO: 2976 if (tries[dev->devno] == 1) { 2977 /* This is the last chance, better to slow 2978 * down than lose it. 2979 */ 2980 sata_down_spd_limit(&ap->link, 0); 2981 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2982 } 2983 } 2984 2985 if (!tries[dev->devno]) 2986 ata_dev_disable(dev); 2987 2988 goto retry; 2989 } 2990 2991 /** 2992 * sata_print_link_status - Print SATA link status 2993 * @link: SATA link to printk link status about 2994 * 2995 * This function prints link speed and status of a SATA link. 2996 * 2997 * LOCKING: 2998 * None. 2999 */ 3000 static void sata_print_link_status(struct ata_link *link) 3001 { 3002 u32 sstatus, scontrol, tmp; 3003 3004 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 3005 return; 3006 sata_scr_read(link, SCR_CONTROL, &scontrol); 3007 3008 if (ata_phys_link_online(link)) { 3009 tmp = (sstatus >> 4) & 0xf; 3010 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 3011 sata_spd_string(tmp), sstatus, scontrol); 3012 } else { 3013 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 3014 sstatus, scontrol); 3015 } 3016 } 3017 3018 /** 3019 * ata_dev_pair - return other device on cable 3020 * @adev: device 3021 * 3022 * Obtain the other device on the same cable, or if none is 3023 * present NULL is returned 3024 */ 3025 3026 struct ata_device *ata_dev_pair(struct ata_device *adev) 3027 { 3028 struct ata_link *link = adev->link; 3029 struct ata_device *pair = &link->device[1 - adev->devno]; 3030 if (!ata_dev_enabled(pair)) 3031 return NULL; 3032 return pair; 3033 } 3034 3035 /** 3036 * sata_down_spd_limit - adjust SATA spd limit downward 3037 * @link: Link to adjust SATA spd limit for 3038 * @spd_limit: Additional limit 3039 * 3040 * Adjust SATA spd limit of @link downward. Note that this 3041 * function only adjusts the limit. The change must be applied 3042 * using sata_set_spd(). 3043 * 3044 * If @spd_limit is non-zero, the speed is limited to equal to or 3045 * lower than @spd_limit if such speed is supported. If 3046 * @spd_limit is slower than any supported speed, only the lowest 3047 * supported speed is allowed. 3048 * 3049 * LOCKING: 3050 * Inherited from caller. 3051 * 3052 * RETURNS: 3053 * 0 on success, negative errno on failure 3054 */ 3055 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 3056 { 3057 u32 sstatus, spd, mask; 3058 int rc, bit; 3059 3060 if (!sata_scr_valid(link)) 3061 return -EOPNOTSUPP; 3062 3063 /* If SCR can be read, use it to determine the current SPD. 3064 * If not, use cached value in link->sata_spd. 3065 */ 3066 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 3067 if (rc == 0 && ata_sstatus_online(sstatus)) 3068 spd = (sstatus >> 4) & 0xf; 3069 else 3070 spd = link->sata_spd; 3071 3072 mask = link->sata_spd_limit; 3073 if (mask <= 1) 3074 return -EINVAL; 3075 3076 /* unconditionally mask off the highest bit */ 3077 bit = fls(mask) - 1; 3078 mask &= ~(1 << bit); 3079 3080 /* Mask off all speeds higher than or equal to the current 3081 * one. Force 1.5Gbps if current SPD is not available. 3082 */ 3083 if (spd > 1) 3084 mask &= (1 << (spd - 1)) - 1; 3085 else 3086 mask &= 1; 3087 3088 /* were we already at the bottom? */ 3089 if (!mask) 3090 return -EINVAL; 3091 3092 if (spd_limit) { 3093 if (mask & ((1 << spd_limit) - 1)) 3094 mask &= (1 << spd_limit) - 1; 3095 else { 3096 bit = ffs(mask) - 1; 3097 mask = 1 << bit; 3098 } 3099 } 3100 3101 link->sata_spd_limit = mask; 3102 3103 ata_link_warn(link, "limiting SATA link speed to %s\n", 3104 sata_spd_string(fls(mask))); 3105 3106 return 0; 3107 } 3108 3109 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 3110 { 3111 struct ata_link *host_link = &link->ap->link; 3112 u32 limit, target, spd; 3113 3114 limit = link->sata_spd_limit; 3115 3116 /* Don't configure downstream link faster than upstream link. 3117 * It doesn't speed up anything and some PMPs choke on such 3118 * configuration. 3119 */ 3120 if (!ata_is_host_link(link) && host_link->sata_spd) 3121 limit &= (1 << host_link->sata_spd) - 1; 3122 3123 if (limit == UINT_MAX) 3124 target = 0; 3125 else 3126 target = fls(limit); 3127 3128 spd = (*scontrol >> 4) & 0xf; 3129 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3130 3131 return spd != target; 3132 } 3133 3134 /** 3135 * sata_set_spd_needed - is SATA spd configuration needed 3136 * @link: Link in question 3137 * 3138 * Test whether the spd limit in SControl matches 3139 * @link->sata_spd_limit. This function is used to determine 3140 * whether hardreset is necessary to apply SATA spd 3141 * configuration. 3142 * 3143 * LOCKING: 3144 * Inherited from caller. 3145 * 3146 * RETURNS: 3147 * 1 if SATA spd configuration is needed, 0 otherwise. 3148 */ 3149 static int sata_set_spd_needed(struct ata_link *link) 3150 { 3151 u32 scontrol; 3152 3153 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3154 return 1; 3155 3156 return __sata_set_spd_needed(link, &scontrol); 3157 } 3158 3159 /** 3160 * sata_set_spd - set SATA spd according to spd limit 3161 * @link: Link to set SATA spd for 3162 * 3163 * Set SATA spd of @link according to sata_spd_limit. 3164 * 3165 * LOCKING: 3166 * Inherited from caller. 3167 * 3168 * RETURNS: 3169 * 0 if spd doesn't need to be changed, 1 if spd has been 3170 * changed. Negative errno if SCR registers are inaccessible. 3171 */ 3172 int sata_set_spd(struct ata_link *link) 3173 { 3174 u32 scontrol; 3175 int rc; 3176 3177 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3178 return rc; 3179 3180 if (!__sata_set_spd_needed(link, &scontrol)) 3181 return 0; 3182 3183 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3184 return rc; 3185 3186 return 1; 3187 } 3188 3189 /* 3190 * This mode timing computation functionality is ported over from 3191 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3192 */ 3193 /* 3194 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3195 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3196 * for UDMA6, which is currently supported only by Maxtor drives. 3197 * 3198 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3199 */ 3200 3201 static const struct ata_timing ata_timing[] = { 3202 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3203 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3204 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3205 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3206 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3207 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3208 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3209 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3210 3211 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3212 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3213 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3214 3215 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3216 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3217 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3218 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3219 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3220 3221 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3222 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3223 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3224 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3225 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3226 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3227 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3228 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3229 3230 { 0xFF } 3231 }; 3232 3233 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3234 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3235 3236 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3237 { 3238 q->setup = EZ(t->setup * 1000, T); 3239 q->act8b = EZ(t->act8b * 1000, T); 3240 q->rec8b = EZ(t->rec8b * 1000, T); 3241 q->cyc8b = EZ(t->cyc8b * 1000, T); 3242 q->active = EZ(t->active * 1000, T); 3243 q->recover = EZ(t->recover * 1000, T); 3244 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3245 q->cycle = EZ(t->cycle * 1000, T); 3246 q->udma = EZ(t->udma * 1000, UT); 3247 } 3248 3249 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3250 struct ata_timing *m, unsigned int what) 3251 { 3252 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3253 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3254 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3255 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3256 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3257 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3258 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3259 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3260 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3261 } 3262 3263 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3264 { 3265 const struct ata_timing *t = ata_timing; 3266 3267 while (xfer_mode > t->mode) 3268 t++; 3269 3270 if (xfer_mode == t->mode) 3271 return t; 3272 3273 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 3274 __func__, xfer_mode); 3275 3276 return NULL; 3277 } 3278 3279 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3280 struct ata_timing *t, int T, int UT) 3281 { 3282 const u16 *id = adev->id; 3283 const struct ata_timing *s; 3284 struct ata_timing p; 3285 3286 /* 3287 * Find the mode. 3288 */ 3289 3290 if (!(s = ata_timing_find_mode(speed))) 3291 return -EINVAL; 3292 3293 memcpy(t, s, sizeof(*s)); 3294 3295 /* 3296 * If the drive is an EIDE drive, it can tell us it needs extended 3297 * PIO/MW_DMA cycle timing. 3298 */ 3299 3300 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3301 memset(&p, 0, sizeof(p)); 3302 3303 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3304 if (speed <= XFER_PIO_2) 3305 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3306 else if ((speed <= XFER_PIO_4) || 3307 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3308 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3309 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3310 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3311 3312 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3313 } 3314 3315 /* 3316 * Convert the timing to bus clock counts. 3317 */ 3318 3319 ata_timing_quantize(t, t, T, UT); 3320 3321 /* 3322 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3323 * S.M.A.R.T * and some other commands. We have to ensure that the 3324 * DMA cycle timing is slower/equal than the fastest PIO timing. 3325 */ 3326 3327 if (speed > XFER_PIO_6) { 3328 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3329 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3330 } 3331 3332 /* 3333 * Lengthen active & recovery time so that cycle time is correct. 3334 */ 3335 3336 if (t->act8b + t->rec8b < t->cyc8b) { 3337 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3338 t->rec8b = t->cyc8b - t->act8b; 3339 } 3340 3341 if (t->active + t->recover < t->cycle) { 3342 t->active += (t->cycle - (t->active + t->recover)) / 2; 3343 t->recover = t->cycle - t->active; 3344 } 3345 3346 /* In a few cases quantisation may produce enough errors to 3347 leave t->cycle too low for the sum of active and recovery 3348 if so we must correct this */ 3349 if (t->active + t->recover > t->cycle) 3350 t->cycle = t->active + t->recover; 3351 3352 return 0; 3353 } 3354 3355 /** 3356 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3357 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3358 * @cycle: cycle duration in ns 3359 * 3360 * Return matching xfer mode for @cycle. The returned mode is of 3361 * the transfer type specified by @xfer_shift. If @cycle is too 3362 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3363 * than the fastest known mode, the fasted mode is returned. 3364 * 3365 * LOCKING: 3366 * None. 3367 * 3368 * RETURNS: 3369 * Matching xfer_mode, 0xff if no match found. 3370 */ 3371 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3372 { 3373 u8 base_mode = 0xff, last_mode = 0xff; 3374 const struct ata_xfer_ent *ent; 3375 const struct ata_timing *t; 3376 3377 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3378 if (ent->shift == xfer_shift) 3379 base_mode = ent->base; 3380 3381 for (t = ata_timing_find_mode(base_mode); 3382 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3383 unsigned short this_cycle; 3384 3385 switch (xfer_shift) { 3386 case ATA_SHIFT_PIO: 3387 case ATA_SHIFT_MWDMA: 3388 this_cycle = t->cycle; 3389 break; 3390 case ATA_SHIFT_UDMA: 3391 this_cycle = t->udma; 3392 break; 3393 default: 3394 return 0xff; 3395 } 3396 3397 if (cycle > this_cycle) 3398 break; 3399 3400 last_mode = t->mode; 3401 } 3402 3403 return last_mode; 3404 } 3405 3406 /** 3407 * ata_down_xfermask_limit - adjust dev xfer masks downward 3408 * @dev: Device to adjust xfer masks 3409 * @sel: ATA_DNXFER_* selector 3410 * 3411 * Adjust xfer masks of @dev downward. Note that this function 3412 * does not apply the change. Invoking ata_set_mode() afterwards 3413 * will apply the limit. 3414 * 3415 * LOCKING: 3416 * Inherited from caller. 3417 * 3418 * RETURNS: 3419 * 0 on success, negative errno on failure 3420 */ 3421 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3422 { 3423 char buf[32]; 3424 unsigned long orig_mask, xfer_mask; 3425 unsigned long pio_mask, mwdma_mask, udma_mask; 3426 int quiet, highbit; 3427 3428 quiet = !!(sel & ATA_DNXFER_QUIET); 3429 sel &= ~ATA_DNXFER_QUIET; 3430 3431 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3432 dev->mwdma_mask, 3433 dev->udma_mask); 3434 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3435 3436 switch (sel) { 3437 case ATA_DNXFER_PIO: 3438 highbit = fls(pio_mask) - 1; 3439 pio_mask &= ~(1 << highbit); 3440 break; 3441 3442 case ATA_DNXFER_DMA: 3443 if (udma_mask) { 3444 highbit = fls(udma_mask) - 1; 3445 udma_mask &= ~(1 << highbit); 3446 if (!udma_mask) 3447 return -ENOENT; 3448 } else if (mwdma_mask) { 3449 highbit = fls(mwdma_mask) - 1; 3450 mwdma_mask &= ~(1 << highbit); 3451 if (!mwdma_mask) 3452 return -ENOENT; 3453 } 3454 break; 3455 3456 case ATA_DNXFER_40C: 3457 udma_mask &= ATA_UDMA_MASK_40C; 3458 break; 3459 3460 case ATA_DNXFER_FORCE_PIO0: 3461 pio_mask &= 1; 3462 case ATA_DNXFER_FORCE_PIO: 3463 mwdma_mask = 0; 3464 udma_mask = 0; 3465 break; 3466 3467 default: 3468 BUG(); 3469 } 3470 3471 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3472 3473 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3474 return -ENOENT; 3475 3476 if (!quiet) { 3477 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3478 snprintf(buf, sizeof(buf), "%s:%s", 3479 ata_mode_string(xfer_mask), 3480 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3481 else 3482 snprintf(buf, sizeof(buf), "%s", 3483 ata_mode_string(xfer_mask)); 3484 3485 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3486 } 3487 3488 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3489 &dev->udma_mask); 3490 3491 return 0; 3492 } 3493 3494 static int ata_dev_set_mode(struct ata_device *dev) 3495 { 3496 struct ata_port *ap = dev->link->ap; 3497 struct ata_eh_context *ehc = &dev->link->eh_context; 3498 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3499 const char *dev_err_whine = ""; 3500 int ign_dev_err = 0; 3501 unsigned int err_mask = 0; 3502 int rc; 3503 3504 dev->flags &= ~ATA_DFLAG_PIO; 3505 if (dev->xfer_shift == ATA_SHIFT_PIO) 3506 dev->flags |= ATA_DFLAG_PIO; 3507 3508 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3509 dev_err_whine = " (SET_XFERMODE skipped)"; 3510 else { 3511 if (nosetxfer) 3512 ata_dev_warn(dev, 3513 "NOSETXFER but PATA detected - can't " 3514 "skip SETXFER, might malfunction\n"); 3515 err_mask = ata_dev_set_xfermode(dev); 3516 } 3517 3518 if (err_mask & ~AC_ERR_DEV) 3519 goto fail; 3520 3521 /* revalidate */ 3522 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3523 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3524 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3525 if (rc) 3526 return rc; 3527 3528 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3529 /* Old CFA may refuse this command, which is just fine */ 3530 if (ata_id_is_cfa(dev->id)) 3531 ign_dev_err = 1; 3532 /* Catch several broken garbage emulations plus some pre 3533 ATA devices */ 3534 if (ata_id_major_version(dev->id) == 0 && 3535 dev->pio_mode <= XFER_PIO_2) 3536 ign_dev_err = 1; 3537 /* Some very old devices and some bad newer ones fail 3538 any kind of SET_XFERMODE request but support PIO0-2 3539 timings and no IORDY */ 3540 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3541 ign_dev_err = 1; 3542 } 3543 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3544 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3545 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3546 dev->dma_mode == XFER_MW_DMA_0 && 3547 (dev->id[63] >> 8) & 1) 3548 ign_dev_err = 1; 3549 3550 /* if the device is actually configured correctly, ignore dev err */ 3551 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3552 ign_dev_err = 1; 3553 3554 if (err_mask & AC_ERR_DEV) { 3555 if (!ign_dev_err) 3556 goto fail; 3557 else 3558 dev_err_whine = " (device error ignored)"; 3559 } 3560 3561 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3562 dev->xfer_shift, (int)dev->xfer_mode); 3563 3564 ata_dev_info(dev, "configured for %s%s\n", 3565 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3566 dev_err_whine); 3567 3568 return 0; 3569 3570 fail: 3571 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3572 return -EIO; 3573 } 3574 3575 /** 3576 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3577 * @link: link on which timings will be programmed 3578 * @r_failed_dev: out parameter for failed device 3579 * 3580 * Standard implementation of the function used to tune and set 3581 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3582 * ata_dev_set_mode() fails, pointer to the failing device is 3583 * returned in @r_failed_dev. 3584 * 3585 * LOCKING: 3586 * PCI/etc. bus probe sem. 3587 * 3588 * RETURNS: 3589 * 0 on success, negative errno otherwise 3590 */ 3591 3592 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3593 { 3594 struct ata_port *ap = link->ap; 3595 struct ata_device *dev; 3596 int rc = 0, used_dma = 0, found = 0; 3597 3598 /* step 1: calculate xfer_mask */ 3599 ata_for_each_dev(dev, link, ENABLED) { 3600 unsigned long pio_mask, dma_mask; 3601 unsigned int mode_mask; 3602 3603 mode_mask = ATA_DMA_MASK_ATA; 3604 if (dev->class == ATA_DEV_ATAPI) 3605 mode_mask = ATA_DMA_MASK_ATAPI; 3606 else if (ata_id_is_cfa(dev->id)) 3607 mode_mask = ATA_DMA_MASK_CFA; 3608 3609 ata_dev_xfermask(dev); 3610 ata_force_xfermask(dev); 3611 3612 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3613 3614 if (libata_dma_mask & mode_mask) 3615 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3616 dev->udma_mask); 3617 else 3618 dma_mask = 0; 3619 3620 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3621 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3622 3623 found = 1; 3624 if (ata_dma_enabled(dev)) 3625 used_dma = 1; 3626 } 3627 if (!found) 3628 goto out; 3629 3630 /* step 2: always set host PIO timings */ 3631 ata_for_each_dev(dev, link, ENABLED) { 3632 if (dev->pio_mode == 0xff) { 3633 ata_dev_warn(dev, "no PIO support\n"); 3634 rc = -EINVAL; 3635 goto out; 3636 } 3637 3638 dev->xfer_mode = dev->pio_mode; 3639 dev->xfer_shift = ATA_SHIFT_PIO; 3640 if (ap->ops->set_piomode) 3641 ap->ops->set_piomode(ap, dev); 3642 } 3643 3644 /* step 3: set host DMA timings */ 3645 ata_for_each_dev(dev, link, ENABLED) { 3646 if (!ata_dma_enabled(dev)) 3647 continue; 3648 3649 dev->xfer_mode = dev->dma_mode; 3650 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3651 if (ap->ops->set_dmamode) 3652 ap->ops->set_dmamode(ap, dev); 3653 } 3654 3655 /* step 4: update devices' xfer mode */ 3656 ata_for_each_dev(dev, link, ENABLED) { 3657 rc = ata_dev_set_mode(dev); 3658 if (rc) 3659 goto out; 3660 } 3661 3662 /* Record simplex status. If we selected DMA then the other 3663 * host channels are not permitted to do so. 3664 */ 3665 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3666 ap->host->simplex_claimed = ap; 3667 3668 out: 3669 if (rc) 3670 *r_failed_dev = dev; 3671 return rc; 3672 } 3673 3674 /** 3675 * ata_wait_ready - wait for link to become ready 3676 * @link: link to be waited on 3677 * @deadline: deadline jiffies for the operation 3678 * @check_ready: callback to check link readiness 3679 * 3680 * Wait for @link to become ready. @check_ready should return 3681 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3682 * link doesn't seem to be occupied, other errno for other error 3683 * conditions. 3684 * 3685 * Transient -ENODEV conditions are allowed for 3686 * ATA_TMOUT_FF_WAIT. 3687 * 3688 * LOCKING: 3689 * EH context. 3690 * 3691 * RETURNS: 3692 * 0 if @link is ready before @deadline; otherwise, -errno. 3693 */ 3694 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3695 int (*check_ready)(struct ata_link *link)) 3696 { 3697 unsigned long start = jiffies; 3698 unsigned long nodev_deadline; 3699 int warned = 0; 3700 3701 /* choose which 0xff timeout to use, read comment in libata.h */ 3702 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3703 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3704 else 3705 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3706 3707 /* Slave readiness can't be tested separately from master. On 3708 * M/S emulation configuration, this function should be called 3709 * only on the master and it will handle both master and slave. 3710 */ 3711 WARN_ON(link == link->ap->slave_link); 3712 3713 if (time_after(nodev_deadline, deadline)) 3714 nodev_deadline = deadline; 3715 3716 while (1) { 3717 unsigned long now = jiffies; 3718 int ready, tmp; 3719 3720 ready = tmp = check_ready(link); 3721 if (ready > 0) 3722 return 0; 3723 3724 /* 3725 * -ENODEV could be transient. Ignore -ENODEV if link 3726 * is online. Also, some SATA devices take a long 3727 * time to clear 0xff after reset. Wait for 3728 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3729 * offline. 3730 * 3731 * Note that some PATA controllers (pata_ali) explode 3732 * if status register is read more than once when 3733 * there's no device attached. 3734 */ 3735 if (ready == -ENODEV) { 3736 if (ata_link_online(link)) 3737 ready = 0; 3738 else if ((link->ap->flags & ATA_FLAG_SATA) && 3739 !ata_link_offline(link) && 3740 time_before(now, nodev_deadline)) 3741 ready = 0; 3742 } 3743 3744 if (ready) 3745 return ready; 3746 if (time_after(now, deadline)) 3747 return -EBUSY; 3748 3749 if (!warned && time_after(now, start + 5 * HZ) && 3750 (deadline - now > 3 * HZ)) { 3751 ata_link_warn(link, 3752 "link is slow to respond, please be patient " 3753 "(ready=%d)\n", tmp); 3754 warned = 1; 3755 } 3756 3757 ata_msleep(link->ap, 50); 3758 } 3759 } 3760 3761 /** 3762 * ata_wait_after_reset - wait for link to become ready after reset 3763 * @link: link to be waited on 3764 * @deadline: deadline jiffies for the operation 3765 * @check_ready: callback to check link readiness 3766 * 3767 * Wait for @link to become ready after reset. 3768 * 3769 * LOCKING: 3770 * EH context. 3771 * 3772 * RETURNS: 3773 * 0 if @link is ready before @deadline; otherwise, -errno. 3774 */ 3775 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3776 int (*check_ready)(struct ata_link *link)) 3777 { 3778 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3779 3780 return ata_wait_ready(link, deadline, check_ready); 3781 } 3782 3783 /** 3784 * sata_link_debounce - debounce SATA phy status 3785 * @link: ATA link to debounce SATA phy status for 3786 * @params: timing parameters { interval, duration, timeout } in msec 3787 * @deadline: deadline jiffies for the operation 3788 * 3789 * Make sure SStatus of @link reaches stable state, determined by 3790 * holding the same value where DET is not 1 for @duration polled 3791 * every @interval, before @timeout. Timeout constraints the 3792 * beginning of the stable state. Because DET gets stuck at 1 on 3793 * some controllers after hot unplugging, this functions waits 3794 * until timeout then returns 0 if DET is stable at 1. 3795 * 3796 * @timeout is further limited by @deadline. The sooner of the 3797 * two is used. 3798 * 3799 * LOCKING: 3800 * Kernel thread context (may sleep) 3801 * 3802 * RETURNS: 3803 * 0 on success, -errno on failure. 3804 */ 3805 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3806 unsigned long deadline) 3807 { 3808 unsigned long interval = params[0]; 3809 unsigned long duration = params[1]; 3810 unsigned long last_jiffies, t; 3811 u32 last, cur; 3812 int rc; 3813 3814 t = ata_deadline(jiffies, params[2]); 3815 if (time_before(t, deadline)) 3816 deadline = t; 3817 3818 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3819 return rc; 3820 cur &= 0xf; 3821 3822 last = cur; 3823 last_jiffies = jiffies; 3824 3825 while (1) { 3826 ata_msleep(link->ap, interval); 3827 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3828 return rc; 3829 cur &= 0xf; 3830 3831 /* DET stable? */ 3832 if (cur == last) { 3833 if (cur == 1 && time_before(jiffies, deadline)) 3834 continue; 3835 if (time_after(jiffies, 3836 ata_deadline(last_jiffies, duration))) 3837 return 0; 3838 continue; 3839 } 3840 3841 /* unstable, start over */ 3842 last = cur; 3843 last_jiffies = jiffies; 3844 3845 /* Check deadline. If debouncing failed, return 3846 * -EPIPE to tell upper layer to lower link speed. 3847 */ 3848 if (time_after(jiffies, deadline)) 3849 return -EPIPE; 3850 } 3851 } 3852 3853 /** 3854 * sata_link_resume - resume SATA link 3855 * @link: ATA link to resume SATA 3856 * @params: timing parameters { interval, duration, timeout } in msec 3857 * @deadline: deadline jiffies for the operation 3858 * 3859 * Resume SATA phy @link and debounce it. 3860 * 3861 * LOCKING: 3862 * Kernel thread context (may sleep) 3863 * 3864 * RETURNS: 3865 * 0 on success, -errno on failure. 3866 */ 3867 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3868 unsigned long deadline) 3869 { 3870 int tries = ATA_LINK_RESUME_TRIES; 3871 u32 scontrol, serror; 3872 int rc; 3873 3874 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3875 return rc; 3876 3877 /* 3878 * Writes to SControl sometimes get ignored under certain 3879 * controllers (ata_piix SIDPR). Make sure DET actually is 3880 * cleared. 3881 */ 3882 do { 3883 scontrol = (scontrol & 0x0f0) | 0x300; 3884 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3885 return rc; 3886 /* 3887 * Some PHYs react badly if SStatus is pounded 3888 * immediately after resuming. Delay 200ms before 3889 * debouncing. 3890 */ 3891 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) 3892 ata_msleep(link->ap, 200); 3893 3894 /* is SControl restored correctly? */ 3895 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3896 return rc; 3897 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3898 3899 if ((scontrol & 0xf0f) != 0x300) { 3900 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3901 scontrol); 3902 return 0; 3903 } 3904 3905 if (tries < ATA_LINK_RESUME_TRIES) 3906 ata_link_warn(link, "link resume succeeded after %d retries\n", 3907 ATA_LINK_RESUME_TRIES - tries); 3908 3909 if ((rc = sata_link_debounce(link, params, deadline))) 3910 return rc; 3911 3912 /* clear SError, some PHYs require this even for SRST to work */ 3913 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3914 rc = sata_scr_write(link, SCR_ERROR, serror); 3915 3916 return rc != -EINVAL ? rc : 0; 3917 } 3918 3919 /** 3920 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3921 * @link: ATA link to manipulate SControl for 3922 * @policy: LPM policy to configure 3923 * @spm_wakeup: initiate LPM transition to active state 3924 * 3925 * Manipulate the IPM field of the SControl register of @link 3926 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3927 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3928 * the link. This function also clears PHYRDY_CHG before 3929 * returning. 3930 * 3931 * LOCKING: 3932 * EH context. 3933 * 3934 * RETURNS: 3935 * 0 on success, -errno otherwise. 3936 */ 3937 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3938 bool spm_wakeup) 3939 { 3940 struct ata_eh_context *ehc = &link->eh_context; 3941 bool woken_up = false; 3942 u32 scontrol; 3943 int rc; 3944 3945 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3946 if (rc) 3947 return rc; 3948 3949 switch (policy) { 3950 case ATA_LPM_MAX_POWER: 3951 /* disable all LPM transitions */ 3952 scontrol |= (0x7 << 8); 3953 /* initiate transition to active state */ 3954 if (spm_wakeup) { 3955 scontrol |= (0x4 << 12); 3956 woken_up = true; 3957 } 3958 break; 3959 case ATA_LPM_MED_POWER: 3960 /* allow LPM to PARTIAL */ 3961 scontrol &= ~(0x1 << 8); 3962 scontrol |= (0x6 << 8); 3963 break; 3964 case ATA_LPM_MIN_POWER: 3965 if (ata_link_nr_enabled(link) > 0) 3966 /* no restrictions on LPM transitions */ 3967 scontrol &= ~(0x7 << 8); 3968 else { 3969 /* empty port, power off */ 3970 scontrol &= ~0xf; 3971 scontrol |= (0x1 << 2); 3972 } 3973 break; 3974 default: 3975 WARN_ON(1); 3976 } 3977 3978 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3979 if (rc) 3980 return rc; 3981 3982 /* give the link time to transit out of LPM state */ 3983 if (woken_up) 3984 msleep(10); 3985 3986 /* clear PHYRDY_CHG from SError */ 3987 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3988 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3989 } 3990 3991 /** 3992 * ata_std_prereset - prepare for reset 3993 * @link: ATA link to be reset 3994 * @deadline: deadline jiffies for the operation 3995 * 3996 * @link is about to be reset. Initialize it. Failure from 3997 * prereset makes libata abort whole reset sequence and give up 3998 * that port, so prereset should be best-effort. It does its 3999 * best to prepare for reset sequence but if things go wrong, it 4000 * should just whine, not fail. 4001 * 4002 * LOCKING: 4003 * Kernel thread context (may sleep) 4004 * 4005 * RETURNS: 4006 * 0 on success, -errno otherwise. 4007 */ 4008 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 4009 { 4010 struct ata_port *ap = link->ap; 4011 struct ata_eh_context *ehc = &link->eh_context; 4012 const unsigned long *timing = sata_ehc_deb_timing(ehc); 4013 int rc; 4014 4015 /* if we're about to do hardreset, nothing more to do */ 4016 if (ehc->i.action & ATA_EH_HARDRESET) 4017 return 0; 4018 4019 /* if SATA, resume link */ 4020 if (ap->flags & ATA_FLAG_SATA) { 4021 rc = sata_link_resume(link, timing, deadline); 4022 /* whine about phy resume failure but proceed */ 4023 if (rc && rc != -EOPNOTSUPP) 4024 ata_link_warn(link, 4025 "failed to resume link for reset (errno=%d)\n", 4026 rc); 4027 } 4028 4029 /* no point in trying softreset on offline link */ 4030 if (ata_phys_link_offline(link)) 4031 ehc->i.action &= ~ATA_EH_SOFTRESET; 4032 4033 return 0; 4034 } 4035 4036 /** 4037 * sata_link_hardreset - reset link via SATA phy reset 4038 * @link: link to reset 4039 * @timing: timing parameters { interval, duration, timeout } in msec 4040 * @deadline: deadline jiffies for the operation 4041 * @online: optional out parameter indicating link onlineness 4042 * @check_ready: optional callback to check link readiness 4043 * 4044 * SATA phy-reset @link using DET bits of SControl register. 4045 * After hardreset, link readiness is waited upon using 4046 * ata_wait_ready() if @check_ready is specified. LLDs are 4047 * allowed to not specify @check_ready and wait itself after this 4048 * function returns. Device classification is LLD's 4049 * responsibility. 4050 * 4051 * *@online is set to one iff reset succeeded and @link is online 4052 * after reset. 4053 * 4054 * LOCKING: 4055 * Kernel thread context (may sleep) 4056 * 4057 * RETURNS: 4058 * 0 on success, -errno otherwise. 4059 */ 4060 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 4061 unsigned long deadline, 4062 bool *online, int (*check_ready)(struct ata_link *)) 4063 { 4064 u32 scontrol; 4065 int rc; 4066 4067 DPRINTK("ENTER\n"); 4068 4069 if (online) 4070 *online = false; 4071 4072 if (sata_set_spd_needed(link)) { 4073 /* SATA spec says nothing about how to reconfigure 4074 * spd. To be on the safe side, turn off phy during 4075 * reconfiguration. This works for at least ICH7 AHCI 4076 * and Sil3124. 4077 */ 4078 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4079 goto out; 4080 4081 scontrol = (scontrol & 0x0f0) | 0x304; 4082 4083 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 4084 goto out; 4085 4086 sata_set_spd(link); 4087 } 4088 4089 /* issue phy wake/reset */ 4090 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4091 goto out; 4092 4093 scontrol = (scontrol & 0x0f0) | 0x301; 4094 4095 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 4096 goto out; 4097 4098 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 4099 * 10.4.2 says at least 1 ms. 4100 */ 4101 ata_msleep(link->ap, 1); 4102 4103 /* bring link back */ 4104 rc = sata_link_resume(link, timing, deadline); 4105 if (rc) 4106 goto out; 4107 /* if link is offline nothing more to do */ 4108 if (ata_phys_link_offline(link)) 4109 goto out; 4110 4111 /* Link is online. From this point, -ENODEV too is an error. */ 4112 if (online) 4113 *online = true; 4114 4115 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 4116 /* If PMP is supported, we have to do follow-up SRST. 4117 * Some PMPs don't send D2H Reg FIS after hardreset if 4118 * the first port is empty. Wait only for 4119 * ATA_TMOUT_PMP_SRST_WAIT. 4120 */ 4121 if (check_ready) { 4122 unsigned long pmp_deadline; 4123 4124 pmp_deadline = ata_deadline(jiffies, 4125 ATA_TMOUT_PMP_SRST_WAIT); 4126 if (time_after(pmp_deadline, deadline)) 4127 pmp_deadline = deadline; 4128 ata_wait_ready(link, pmp_deadline, check_ready); 4129 } 4130 rc = -EAGAIN; 4131 goto out; 4132 } 4133 4134 rc = 0; 4135 if (check_ready) 4136 rc = ata_wait_ready(link, deadline, check_ready); 4137 out: 4138 if (rc && rc != -EAGAIN) { 4139 /* online is set iff link is online && reset succeeded */ 4140 if (online) 4141 *online = false; 4142 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 4143 } 4144 DPRINTK("EXIT, rc=%d\n", rc); 4145 return rc; 4146 } 4147 4148 /** 4149 * sata_std_hardreset - COMRESET w/o waiting or classification 4150 * @link: link to reset 4151 * @class: resulting class of attached device 4152 * @deadline: deadline jiffies for the operation 4153 * 4154 * Standard SATA COMRESET w/o waiting or classification. 4155 * 4156 * LOCKING: 4157 * Kernel thread context (may sleep) 4158 * 4159 * RETURNS: 4160 * 0 if link offline, -EAGAIN if link online, -errno on errors. 4161 */ 4162 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4163 unsigned long deadline) 4164 { 4165 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4166 bool online; 4167 int rc; 4168 4169 /* do hardreset */ 4170 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 4171 return online ? -EAGAIN : rc; 4172 } 4173 4174 /** 4175 * ata_std_postreset - standard postreset callback 4176 * @link: the target ata_link 4177 * @classes: classes of attached devices 4178 * 4179 * This function is invoked after a successful reset. Note that 4180 * the device might have been reset more than once using 4181 * different reset methods before postreset is invoked. 4182 * 4183 * LOCKING: 4184 * Kernel thread context (may sleep) 4185 */ 4186 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4187 { 4188 u32 serror; 4189 4190 DPRINTK("ENTER\n"); 4191 4192 /* reset complete, clear SError */ 4193 if (!sata_scr_read(link, SCR_ERROR, &serror)) 4194 sata_scr_write(link, SCR_ERROR, serror); 4195 4196 /* print link status */ 4197 sata_print_link_status(link); 4198 4199 DPRINTK("EXIT\n"); 4200 } 4201 4202 /** 4203 * ata_dev_same_device - Determine whether new ID matches configured device 4204 * @dev: device to compare against 4205 * @new_class: class of the new device 4206 * @new_id: IDENTIFY page of the new device 4207 * 4208 * Compare @new_class and @new_id against @dev and determine 4209 * whether @dev is the device indicated by @new_class and 4210 * @new_id. 4211 * 4212 * LOCKING: 4213 * None. 4214 * 4215 * RETURNS: 4216 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4217 */ 4218 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4219 const u16 *new_id) 4220 { 4221 const u16 *old_id = dev->id; 4222 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4223 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4224 4225 if (dev->class != new_class) { 4226 ata_dev_info(dev, "class mismatch %d != %d\n", 4227 dev->class, new_class); 4228 return 0; 4229 } 4230 4231 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4232 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4233 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4234 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4235 4236 if (strcmp(model[0], model[1])) { 4237 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 4238 model[0], model[1]); 4239 return 0; 4240 } 4241 4242 if (strcmp(serial[0], serial[1])) { 4243 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 4244 serial[0], serial[1]); 4245 return 0; 4246 } 4247 4248 return 1; 4249 } 4250 4251 /** 4252 * ata_dev_reread_id - Re-read IDENTIFY data 4253 * @dev: target ATA device 4254 * @readid_flags: read ID flags 4255 * 4256 * Re-read IDENTIFY page and make sure @dev is still attached to 4257 * the port. 4258 * 4259 * LOCKING: 4260 * Kernel thread context (may sleep) 4261 * 4262 * RETURNS: 4263 * 0 on success, negative errno otherwise 4264 */ 4265 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4266 { 4267 unsigned int class = dev->class; 4268 u16 *id = (void *)dev->link->ap->sector_buf; 4269 int rc; 4270 4271 /* read ID data */ 4272 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4273 if (rc) 4274 return rc; 4275 4276 /* is the device still there? */ 4277 if (!ata_dev_same_device(dev, class, id)) 4278 return -ENODEV; 4279 4280 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4281 return 0; 4282 } 4283 4284 /** 4285 * ata_dev_revalidate - Revalidate ATA device 4286 * @dev: device to revalidate 4287 * @new_class: new class code 4288 * @readid_flags: read ID flags 4289 * 4290 * Re-read IDENTIFY page, make sure @dev is still attached to the 4291 * port and reconfigure it according to the new IDENTIFY page. 4292 * 4293 * LOCKING: 4294 * Kernel thread context (may sleep) 4295 * 4296 * RETURNS: 4297 * 0 on success, negative errno otherwise 4298 */ 4299 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4300 unsigned int readid_flags) 4301 { 4302 u64 n_sectors = dev->n_sectors; 4303 u64 n_native_sectors = dev->n_native_sectors; 4304 int rc; 4305 4306 if (!ata_dev_enabled(dev)) 4307 return -ENODEV; 4308 4309 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4310 if (ata_class_enabled(new_class) && 4311 new_class != ATA_DEV_ATA && 4312 new_class != ATA_DEV_ATAPI && 4313 new_class != ATA_DEV_ZAC && 4314 new_class != ATA_DEV_SEMB) { 4315 ata_dev_info(dev, "class mismatch %u != %u\n", 4316 dev->class, new_class); 4317 rc = -ENODEV; 4318 goto fail; 4319 } 4320 4321 /* re-read ID */ 4322 rc = ata_dev_reread_id(dev, readid_flags); 4323 if (rc) 4324 goto fail; 4325 4326 /* configure device according to the new ID */ 4327 rc = ata_dev_configure(dev); 4328 if (rc) 4329 goto fail; 4330 4331 /* verify n_sectors hasn't changed */ 4332 if (dev->class != ATA_DEV_ATA || !n_sectors || 4333 dev->n_sectors == n_sectors) 4334 return 0; 4335 4336 /* n_sectors has changed */ 4337 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4338 (unsigned long long)n_sectors, 4339 (unsigned long long)dev->n_sectors); 4340 4341 /* 4342 * Something could have caused HPA to be unlocked 4343 * involuntarily. If n_native_sectors hasn't changed and the 4344 * new size matches it, keep the device. 4345 */ 4346 if (dev->n_native_sectors == n_native_sectors && 4347 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4348 ata_dev_warn(dev, 4349 "new n_sectors matches native, probably " 4350 "late HPA unlock, n_sectors updated\n"); 4351 /* use the larger n_sectors */ 4352 return 0; 4353 } 4354 4355 /* 4356 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4357 * unlocking HPA in those cases. 4358 * 4359 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4360 */ 4361 if (dev->n_native_sectors == n_native_sectors && 4362 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4363 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4364 ata_dev_warn(dev, 4365 "old n_sectors matches native, probably " 4366 "late HPA lock, will try to unlock HPA\n"); 4367 /* try unlocking HPA */ 4368 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4369 rc = -EIO; 4370 } else 4371 rc = -ENODEV; 4372 4373 /* restore original n_[native_]sectors and fail */ 4374 dev->n_native_sectors = n_native_sectors; 4375 dev->n_sectors = n_sectors; 4376 fail: 4377 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4378 return rc; 4379 } 4380 4381 struct ata_blacklist_entry { 4382 const char *model_num; 4383 const char *model_rev; 4384 unsigned long horkage; 4385 }; 4386 4387 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4388 /* Devices with DMA related problems under Linux */ 4389 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4390 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4391 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4392 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4393 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4394 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4395 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4396 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4397 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4398 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4399 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4400 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4401 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4402 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4403 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4404 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4405 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4406 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4407 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4408 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4409 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4410 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4411 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4412 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4413 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4414 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4415 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4416 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4417 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4418 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, 4419 /* Odd clown on sil3726/4726 PMPs */ 4420 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4421 4422 /* Weird ATAPI devices */ 4423 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4424 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4425 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4426 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4427 4428 /* 4429 * Causes silent data corruption with higher max sects. 4430 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4431 */ 4432 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4433 4434 /* 4435 * These devices time out with higher max sects. 4436 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4437 */ 4438 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4439 4440 /* Devices we expect to fail diagnostics */ 4441 4442 /* Devices where NCQ should be avoided */ 4443 /* NCQ is slow */ 4444 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4445 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4446 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4447 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4448 /* NCQ is broken */ 4449 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4450 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4451 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4452 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4453 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4454 4455 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4456 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4457 ATA_HORKAGE_FIRMWARE_WARN }, 4458 4459 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4460 ATA_HORKAGE_FIRMWARE_WARN }, 4461 4462 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4463 ATA_HORKAGE_FIRMWARE_WARN }, 4464 4465 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4466 ATA_HORKAGE_FIRMWARE_WARN }, 4467 4468 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ 4469 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4470 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4471 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4472 4473 /* Blacklist entries taken from Silicon Image 3124/3132 4474 Windows driver .inf file - also several Linux problem reports */ 4475 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4476 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4477 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4478 4479 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4480 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4481 4482 /* devices which puke on READ_NATIVE_MAX */ 4483 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4484 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4485 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4486 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4487 4488 /* this one allows HPA unlocking but fails IOs on the area */ 4489 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4490 4491 /* Devices which report 1 sector over size HPA */ 4492 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4493 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4494 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4495 4496 /* Devices which get the IVB wrong */ 4497 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4498 /* Maybe we should just blacklist TSSTcorp... */ 4499 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4500 4501 /* Devices that do not need bridging limits applied */ 4502 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4503 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4504 4505 /* Devices which aren't very happy with higher link speeds */ 4506 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4507 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4508 4509 /* 4510 * Devices which choke on SETXFER. Applies only if both the 4511 * device and controller are SATA. 4512 */ 4513 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4514 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4515 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4516 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4517 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4518 4519 /* devices that don't properly handle queued TRIM commands */ 4520 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4521 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4522 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4523 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4524 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4525 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4526 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4527 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4528 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4529 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4530 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4531 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4532 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4533 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4534 4535 /* devices that don't properly handle TRIM commands */ 4536 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4537 4538 /* 4539 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4540 * (Return Zero After Trim) flags in the ATA Command Set are 4541 * unreliable in the sense that they only define what happens if 4542 * the device successfully executed the DSM TRIM command. TRIM 4543 * is only advisory, however, and the device is free to silently 4544 * ignore all or parts of the request. 4545 * 4546 * Whitelist drives that are known to reliably return zeroes 4547 * after TRIM. 4548 */ 4549 4550 /* 4551 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4552 * that model before whitelisting all other intel SSDs. 4553 */ 4554 { "INTEL*SSDSC2MH*", NULL, 0, }, 4555 4556 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4557 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4558 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4559 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4560 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4561 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4562 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4563 4564 /* 4565 * Some WD SATA-I drives spin up and down erratically when the link 4566 * is put into the slumber mode. We don't have full list of the 4567 * affected devices. Disable LPM if the device matches one of the 4568 * known prefixes and is SATA-1. As a side effect LPM partial is 4569 * lost too. 4570 * 4571 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4572 */ 4573 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4574 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4575 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4576 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4577 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4578 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4579 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4580 4581 /* End Marker */ 4582 { } 4583 }; 4584 4585 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4586 { 4587 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4588 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4589 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4590 4591 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4592 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4593 4594 while (ad->model_num) { 4595 if (glob_match(ad->model_num, model_num)) { 4596 if (ad->model_rev == NULL) 4597 return ad->horkage; 4598 if (glob_match(ad->model_rev, model_rev)) 4599 return ad->horkage; 4600 } 4601 ad++; 4602 } 4603 return 0; 4604 } 4605 4606 static int ata_dma_blacklisted(const struct ata_device *dev) 4607 { 4608 /* We don't support polling DMA. 4609 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4610 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4611 */ 4612 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4613 (dev->flags & ATA_DFLAG_CDB_INTR)) 4614 return 1; 4615 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4616 } 4617 4618 /** 4619 * ata_is_40wire - check drive side detection 4620 * @dev: device 4621 * 4622 * Perform drive side detection decoding, allowing for device vendors 4623 * who can't follow the documentation. 4624 */ 4625 4626 static int ata_is_40wire(struct ata_device *dev) 4627 { 4628 if (dev->horkage & ATA_HORKAGE_IVB) 4629 return ata_drive_40wire_relaxed(dev->id); 4630 return ata_drive_40wire(dev->id); 4631 } 4632 4633 /** 4634 * cable_is_40wire - 40/80/SATA decider 4635 * @ap: port to consider 4636 * 4637 * This function encapsulates the policy for speed management 4638 * in one place. At the moment we don't cache the result but 4639 * there is a good case for setting ap->cbl to the result when 4640 * we are called with unknown cables (and figuring out if it 4641 * impacts hotplug at all). 4642 * 4643 * Return 1 if the cable appears to be 40 wire. 4644 */ 4645 4646 static int cable_is_40wire(struct ata_port *ap) 4647 { 4648 struct ata_link *link; 4649 struct ata_device *dev; 4650 4651 /* If the controller thinks we are 40 wire, we are. */ 4652 if (ap->cbl == ATA_CBL_PATA40) 4653 return 1; 4654 4655 /* If the controller thinks we are 80 wire, we are. */ 4656 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4657 return 0; 4658 4659 /* If the system is known to be 40 wire short cable (eg 4660 * laptop), then we allow 80 wire modes even if the drive 4661 * isn't sure. 4662 */ 4663 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4664 return 0; 4665 4666 /* If the controller doesn't know, we scan. 4667 * 4668 * Note: We look for all 40 wire detects at this point. Any 4669 * 80 wire detect is taken to be 80 wire cable because 4670 * - in many setups only the one drive (slave if present) will 4671 * give a valid detect 4672 * - if you have a non detect capable drive you don't want it 4673 * to colour the choice 4674 */ 4675 ata_for_each_link(link, ap, EDGE) { 4676 ata_for_each_dev(dev, link, ENABLED) { 4677 if (!ata_is_40wire(dev)) 4678 return 0; 4679 } 4680 } 4681 return 1; 4682 } 4683 4684 /** 4685 * ata_dev_xfermask - Compute supported xfermask of the given device 4686 * @dev: Device to compute xfermask for 4687 * 4688 * Compute supported xfermask of @dev and store it in 4689 * dev->*_mask. This function is responsible for applying all 4690 * known limits including host controller limits, device 4691 * blacklist, etc... 4692 * 4693 * LOCKING: 4694 * None. 4695 */ 4696 static void ata_dev_xfermask(struct ata_device *dev) 4697 { 4698 struct ata_link *link = dev->link; 4699 struct ata_port *ap = link->ap; 4700 struct ata_host *host = ap->host; 4701 unsigned long xfer_mask; 4702 4703 /* controller modes available */ 4704 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4705 ap->mwdma_mask, ap->udma_mask); 4706 4707 /* drive modes available */ 4708 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4709 dev->mwdma_mask, dev->udma_mask); 4710 xfer_mask &= ata_id_xfermask(dev->id); 4711 4712 /* 4713 * CFA Advanced TrueIDE timings are not allowed on a shared 4714 * cable 4715 */ 4716 if (ata_dev_pair(dev)) { 4717 /* No PIO5 or PIO6 */ 4718 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4719 /* No MWDMA3 or MWDMA 4 */ 4720 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4721 } 4722 4723 if (ata_dma_blacklisted(dev)) { 4724 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4725 ata_dev_warn(dev, 4726 "device is on DMA blacklist, disabling DMA\n"); 4727 } 4728 4729 if ((host->flags & ATA_HOST_SIMPLEX) && 4730 host->simplex_claimed && host->simplex_claimed != ap) { 4731 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4732 ata_dev_warn(dev, 4733 "simplex DMA is claimed by other device, disabling DMA\n"); 4734 } 4735 4736 if (ap->flags & ATA_FLAG_NO_IORDY) 4737 xfer_mask &= ata_pio_mask_no_iordy(dev); 4738 4739 if (ap->ops->mode_filter) 4740 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4741 4742 /* Apply cable rule here. Don't apply it early because when 4743 * we handle hot plug the cable type can itself change. 4744 * Check this last so that we know if the transfer rate was 4745 * solely limited by the cable. 4746 * Unknown or 80 wire cables reported host side are checked 4747 * drive side as well. Cases where we know a 40wire cable 4748 * is used safely for 80 are not checked here. 4749 */ 4750 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4751 /* UDMA/44 or higher would be available */ 4752 if (cable_is_40wire(ap)) { 4753 ata_dev_warn(dev, 4754 "limited to UDMA/33 due to 40-wire cable\n"); 4755 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4756 } 4757 4758 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4759 &dev->mwdma_mask, &dev->udma_mask); 4760 } 4761 4762 /** 4763 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4764 * @dev: Device to which command will be sent 4765 * 4766 * Issue SET FEATURES - XFER MODE command to device @dev 4767 * on port @ap. 4768 * 4769 * LOCKING: 4770 * PCI/etc. bus probe sem. 4771 * 4772 * RETURNS: 4773 * 0 on success, AC_ERR_* mask otherwise. 4774 */ 4775 4776 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4777 { 4778 struct ata_taskfile tf; 4779 unsigned int err_mask; 4780 4781 /* set up set-features taskfile */ 4782 DPRINTK("set features - xfer mode\n"); 4783 4784 /* Some controllers and ATAPI devices show flaky interrupt 4785 * behavior after setting xfer mode. Use polling instead. 4786 */ 4787 ata_tf_init(dev, &tf); 4788 tf.command = ATA_CMD_SET_FEATURES; 4789 tf.feature = SETFEATURES_XFER; 4790 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4791 tf.protocol = ATA_PROT_NODATA; 4792 /* If we are using IORDY we must send the mode setting command */ 4793 if (ata_pio_need_iordy(dev)) 4794 tf.nsect = dev->xfer_mode; 4795 /* If the device has IORDY and the controller does not - turn it off */ 4796 else if (ata_id_has_iordy(dev->id)) 4797 tf.nsect = 0x01; 4798 else /* In the ancient relic department - skip all of this */ 4799 return 0; 4800 4801 /* On some disks, this command causes spin-up, so we need longer timeout */ 4802 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4803 4804 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4805 return err_mask; 4806 } 4807 4808 /** 4809 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4810 * @dev: Device to which command will be sent 4811 * @enable: Whether to enable or disable the feature 4812 * @feature: The sector count represents the feature to set 4813 * 4814 * Issue SET FEATURES - SATA FEATURES command to device @dev 4815 * on port @ap with sector count 4816 * 4817 * LOCKING: 4818 * PCI/etc. bus probe sem. 4819 * 4820 * RETURNS: 4821 * 0 on success, AC_ERR_* mask otherwise. 4822 */ 4823 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4824 { 4825 struct ata_taskfile tf; 4826 unsigned int err_mask; 4827 unsigned long timeout = 0; 4828 4829 /* set up set-features taskfile */ 4830 DPRINTK("set features - SATA features\n"); 4831 4832 ata_tf_init(dev, &tf); 4833 tf.command = ATA_CMD_SET_FEATURES; 4834 tf.feature = enable; 4835 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4836 tf.protocol = ATA_PROT_NODATA; 4837 tf.nsect = feature; 4838 4839 if (enable == SETFEATURES_SPINUP) 4840 timeout = ata_probe_timeout ? 4841 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; 4842 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); 4843 4844 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4845 return err_mask; 4846 } 4847 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4848 4849 /** 4850 * ata_dev_init_params - Issue INIT DEV PARAMS command 4851 * @dev: Device to which command will be sent 4852 * @heads: Number of heads (taskfile parameter) 4853 * @sectors: Number of sectors (taskfile parameter) 4854 * 4855 * LOCKING: 4856 * Kernel thread context (may sleep) 4857 * 4858 * RETURNS: 4859 * 0 on success, AC_ERR_* mask otherwise. 4860 */ 4861 static unsigned int ata_dev_init_params(struct ata_device *dev, 4862 u16 heads, u16 sectors) 4863 { 4864 struct ata_taskfile tf; 4865 unsigned int err_mask; 4866 4867 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4868 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4869 return AC_ERR_INVALID; 4870 4871 /* set up init dev params taskfile */ 4872 DPRINTK("init dev params \n"); 4873 4874 ata_tf_init(dev, &tf); 4875 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4876 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4877 tf.protocol = ATA_PROT_NODATA; 4878 tf.nsect = sectors; 4879 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4880 4881 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4882 /* A clean abort indicates an original or just out of spec drive 4883 and we should continue as we issue the setup based on the 4884 drive reported working geometry */ 4885 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4886 err_mask = 0; 4887 4888 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4889 return err_mask; 4890 } 4891 4892 /** 4893 * atapi_check_dma - Check whether ATAPI DMA can be supported 4894 * @qc: Metadata associated with taskfile to check 4895 * 4896 * Allow low-level driver to filter ATA PACKET commands, returning 4897 * a status indicating whether or not it is OK to use DMA for the 4898 * supplied PACKET command. 4899 * 4900 * LOCKING: 4901 * spin_lock_irqsave(host lock) 4902 * 4903 * RETURNS: 0 when ATAPI DMA can be used 4904 * nonzero otherwise 4905 */ 4906 int atapi_check_dma(struct ata_queued_cmd *qc) 4907 { 4908 struct ata_port *ap = qc->ap; 4909 4910 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4911 * few ATAPI devices choke on such DMA requests. 4912 */ 4913 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4914 unlikely(qc->nbytes & 15)) 4915 return 1; 4916 4917 if (ap->ops->check_atapi_dma) 4918 return ap->ops->check_atapi_dma(qc); 4919 4920 return 0; 4921 } 4922 4923 /** 4924 * ata_std_qc_defer - Check whether a qc needs to be deferred 4925 * @qc: ATA command in question 4926 * 4927 * Non-NCQ commands cannot run with any other command, NCQ or 4928 * not. As upper layer only knows the queue depth, we are 4929 * responsible for maintaining exclusion. This function checks 4930 * whether a new command @qc can be issued. 4931 * 4932 * LOCKING: 4933 * spin_lock_irqsave(host lock) 4934 * 4935 * RETURNS: 4936 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4937 */ 4938 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4939 { 4940 struct ata_link *link = qc->dev->link; 4941 4942 if (ata_is_ncq(qc->tf.protocol)) { 4943 if (!ata_tag_valid(link->active_tag)) 4944 return 0; 4945 } else { 4946 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4947 return 0; 4948 } 4949 4950 return ATA_DEFER_LINK; 4951 } 4952 4953 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4954 4955 /** 4956 * ata_sg_init - Associate command with scatter-gather table. 4957 * @qc: Command to be associated 4958 * @sg: Scatter-gather table. 4959 * @n_elem: Number of elements in s/g table. 4960 * 4961 * Initialize the data-related elements of queued_cmd @qc 4962 * to point to a scatter-gather table @sg, containing @n_elem 4963 * elements. 4964 * 4965 * LOCKING: 4966 * spin_lock_irqsave(host lock) 4967 */ 4968 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4969 unsigned int n_elem) 4970 { 4971 qc->sg = sg; 4972 qc->n_elem = n_elem; 4973 qc->cursg = qc->sg; 4974 } 4975 4976 #ifdef CONFIG_HAS_DMA 4977 4978 /** 4979 * ata_sg_clean - Unmap DMA memory associated with command 4980 * @qc: Command containing DMA memory to be released 4981 * 4982 * Unmap all mapped DMA memory associated with this command. 4983 * 4984 * LOCKING: 4985 * spin_lock_irqsave(host lock) 4986 */ 4987 static void ata_sg_clean(struct ata_queued_cmd *qc) 4988 { 4989 struct ata_port *ap = qc->ap; 4990 struct scatterlist *sg = qc->sg; 4991 int dir = qc->dma_dir; 4992 4993 WARN_ON_ONCE(sg == NULL); 4994 4995 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4996 4997 if (qc->n_elem) 4998 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4999 5000 qc->flags &= ~ATA_QCFLAG_DMAMAP; 5001 qc->sg = NULL; 5002 } 5003 5004 /** 5005 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 5006 * @qc: Command with scatter-gather table to be mapped. 5007 * 5008 * DMA-map the scatter-gather table associated with queued_cmd @qc. 5009 * 5010 * LOCKING: 5011 * spin_lock_irqsave(host lock) 5012 * 5013 * RETURNS: 5014 * Zero on success, negative on error. 5015 * 5016 */ 5017 static int ata_sg_setup(struct ata_queued_cmd *qc) 5018 { 5019 struct ata_port *ap = qc->ap; 5020 unsigned int n_elem; 5021 5022 VPRINTK("ENTER, ata%u\n", ap->print_id); 5023 5024 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 5025 if (n_elem < 1) 5026 return -1; 5027 5028 DPRINTK("%d sg elements mapped\n", n_elem); 5029 qc->orig_n_elem = qc->n_elem; 5030 qc->n_elem = n_elem; 5031 qc->flags |= ATA_QCFLAG_DMAMAP; 5032 5033 return 0; 5034 } 5035 5036 #else /* !CONFIG_HAS_DMA */ 5037 5038 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {} 5039 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; } 5040 5041 #endif /* !CONFIG_HAS_DMA */ 5042 5043 /** 5044 * swap_buf_le16 - swap halves of 16-bit words in place 5045 * @buf: Buffer to swap 5046 * @buf_words: Number of 16-bit words in buffer. 5047 * 5048 * Swap halves of 16-bit words if needed to convert from 5049 * little-endian byte order to native cpu byte order, or 5050 * vice-versa. 5051 * 5052 * LOCKING: 5053 * Inherited from caller. 5054 */ 5055 void swap_buf_le16(u16 *buf, unsigned int buf_words) 5056 { 5057 #ifdef __BIG_ENDIAN 5058 unsigned int i; 5059 5060 for (i = 0; i < buf_words; i++) 5061 buf[i] = le16_to_cpu(buf[i]); 5062 #endif /* __BIG_ENDIAN */ 5063 } 5064 5065 /** 5066 * ata_qc_new_init - Request an available ATA command, and initialize it 5067 * @dev: Device from whom we request an available command structure 5068 * @tag: tag 5069 * 5070 * LOCKING: 5071 * None. 5072 */ 5073 5074 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 5075 { 5076 struct ata_port *ap = dev->link->ap; 5077 struct ata_queued_cmd *qc; 5078 5079 /* no command while frozen */ 5080 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5081 return NULL; 5082 5083 /* libsas case */ 5084 if (ap->flags & ATA_FLAG_SAS_HOST) { 5085 tag = ata_sas_allocate_tag(ap); 5086 if (tag < 0) 5087 return NULL; 5088 } 5089 5090 qc = __ata_qc_from_tag(ap, tag); 5091 qc->tag = tag; 5092 qc->scsicmd = NULL; 5093 qc->ap = ap; 5094 qc->dev = dev; 5095 5096 ata_qc_reinit(qc); 5097 5098 return qc; 5099 } 5100 5101 /** 5102 * ata_qc_free - free unused ata_queued_cmd 5103 * @qc: Command to complete 5104 * 5105 * Designed to free unused ata_queued_cmd object 5106 * in case something prevents using it. 5107 * 5108 * LOCKING: 5109 * spin_lock_irqsave(host lock) 5110 */ 5111 void ata_qc_free(struct ata_queued_cmd *qc) 5112 { 5113 struct ata_port *ap; 5114 unsigned int tag; 5115 5116 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5117 ap = qc->ap; 5118 5119 qc->flags = 0; 5120 tag = qc->tag; 5121 if (likely(ata_tag_valid(tag))) { 5122 qc->tag = ATA_TAG_POISON; 5123 if (ap->flags & ATA_FLAG_SAS_HOST) 5124 ata_sas_free_tag(tag, ap); 5125 } 5126 } 5127 5128 void __ata_qc_complete(struct ata_queued_cmd *qc) 5129 { 5130 struct ata_port *ap; 5131 struct ata_link *link; 5132 5133 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5134 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5135 ap = qc->ap; 5136 link = qc->dev->link; 5137 5138 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5139 ata_sg_clean(qc); 5140 5141 /* command should be marked inactive atomically with qc completion */ 5142 if (ata_is_ncq(qc->tf.protocol)) { 5143 link->sactive &= ~(1 << qc->tag); 5144 if (!link->sactive) 5145 ap->nr_active_links--; 5146 } else { 5147 link->active_tag = ATA_TAG_POISON; 5148 ap->nr_active_links--; 5149 } 5150 5151 /* clear exclusive status */ 5152 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5153 ap->excl_link == link)) 5154 ap->excl_link = NULL; 5155 5156 /* atapi: mark qc as inactive to prevent the interrupt handler 5157 * from completing the command twice later, before the error handler 5158 * is called. (when rc != 0 and atapi request sense is needed) 5159 */ 5160 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5161 ap->qc_active &= ~(1 << qc->tag); 5162 5163 /* call completion callback */ 5164 qc->complete_fn(qc); 5165 } 5166 5167 static void fill_result_tf(struct ata_queued_cmd *qc) 5168 { 5169 struct ata_port *ap = qc->ap; 5170 5171 qc->result_tf.flags = qc->tf.flags; 5172 ap->ops->qc_fill_rtf(qc); 5173 } 5174 5175 static void ata_verify_xfer(struct ata_queued_cmd *qc) 5176 { 5177 struct ata_device *dev = qc->dev; 5178 5179 if (!ata_is_data(qc->tf.protocol)) 5180 return; 5181 5182 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5183 return; 5184 5185 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5186 } 5187 5188 /** 5189 * ata_qc_complete - Complete an active ATA command 5190 * @qc: Command to complete 5191 * 5192 * Indicate to the mid and upper layers that an ATA command has 5193 * completed, with either an ok or not-ok status. 5194 * 5195 * Refrain from calling this function multiple times when 5196 * successfully completing multiple NCQ commands. 5197 * ata_qc_complete_multiple() should be used instead, which will 5198 * properly update IRQ expect state. 5199 * 5200 * LOCKING: 5201 * spin_lock_irqsave(host lock) 5202 */ 5203 void ata_qc_complete(struct ata_queued_cmd *qc) 5204 { 5205 struct ata_port *ap = qc->ap; 5206 5207 /* Trigger the LED (if available) */ 5208 ledtrig_disk_activity(); 5209 5210 /* XXX: New EH and old EH use different mechanisms to 5211 * synchronize EH with regular execution path. 5212 * 5213 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5214 * Normal execution path is responsible for not accessing a 5215 * failed qc. libata core enforces the rule by returning NULL 5216 * from ata_qc_from_tag() for failed qcs. 5217 * 5218 * Old EH depends on ata_qc_complete() nullifying completion 5219 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5220 * not synchronize with interrupt handler. Only PIO task is 5221 * taken care of. 5222 */ 5223 if (ap->ops->error_handler) { 5224 struct ata_device *dev = qc->dev; 5225 struct ata_eh_info *ehi = &dev->link->eh_info; 5226 5227 if (unlikely(qc->err_mask)) 5228 qc->flags |= ATA_QCFLAG_FAILED; 5229 5230 /* 5231 * Finish internal commands without any further processing 5232 * and always with the result TF filled. 5233 */ 5234 if (unlikely(ata_tag_internal(qc->tag))) { 5235 fill_result_tf(qc); 5236 trace_ata_qc_complete_internal(qc); 5237 __ata_qc_complete(qc); 5238 return; 5239 } 5240 5241 /* 5242 * Non-internal qc has failed. Fill the result TF and 5243 * summon EH. 5244 */ 5245 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5246 fill_result_tf(qc); 5247 trace_ata_qc_complete_failed(qc); 5248 ata_qc_schedule_eh(qc); 5249 return; 5250 } 5251 5252 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 5253 5254 /* read result TF if requested */ 5255 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5256 fill_result_tf(qc); 5257 5258 trace_ata_qc_complete_done(qc); 5259 /* Some commands need post-processing after successful 5260 * completion. 5261 */ 5262 switch (qc->tf.command) { 5263 case ATA_CMD_SET_FEATURES: 5264 if (qc->tf.feature != SETFEATURES_WC_ON && 5265 qc->tf.feature != SETFEATURES_WC_OFF && 5266 qc->tf.feature != SETFEATURES_RA_ON && 5267 qc->tf.feature != SETFEATURES_RA_OFF) 5268 break; 5269 /* fall through */ 5270 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5271 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5272 /* revalidate device */ 5273 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5274 ata_port_schedule_eh(ap); 5275 break; 5276 5277 case ATA_CMD_SLEEP: 5278 dev->flags |= ATA_DFLAG_SLEEPING; 5279 break; 5280 } 5281 5282 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5283 ata_verify_xfer(qc); 5284 5285 __ata_qc_complete(qc); 5286 } else { 5287 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5288 return; 5289 5290 /* read result TF if failed or requested */ 5291 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5292 fill_result_tf(qc); 5293 5294 __ata_qc_complete(qc); 5295 } 5296 } 5297 5298 /** 5299 * ata_qc_complete_multiple - Complete multiple qcs successfully 5300 * @ap: port in question 5301 * @qc_active: new qc_active mask 5302 * 5303 * Complete in-flight commands. This functions is meant to be 5304 * called from low-level driver's interrupt routine to complete 5305 * requests normally. ap->qc_active and @qc_active is compared 5306 * and commands are completed accordingly. 5307 * 5308 * Always use this function when completing multiple NCQ commands 5309 * from IRQ handlers instead of calling ata_qc_complete() 5310 * multiple times to keep IRQ expect status properly in sync. 5311 * 5312 * LOCKING: 5313 * spin_lock_irqsave(host lock) 5314 * 5315 * RETURNS: 5316 * Number of completed commands on success, -errno otherwise. 5317 */ 5318 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5319 { 5320 int nr_done = 0; 5321 u32 done_mask; 5322 5323 done_mask = ap->qc_active ^ qc_active; 5324 5325 if (unlikely(done_mask & qc_active)) { 5326 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5327 ap->qc_active, qc_active); 5328 return -EINVAL; 5329 } 5330 5331 while (done_mask) { 5332 struct ata_queued_cmd *qc; 5333 unsigned int tag = __ffs(done_mask); 5334 5335 qc = ata_qc_from_tag(ap, tag); 5336 if (qc) { 5337 ata_qc_complete(qc); 5338 nr_done++; 5339 } 5340 done_mask &= ~(1 << tag); 5341 } 5342 5343 return nr_done; 5344 } 5345 5346 /** 5347 * ata_qc_issue - issue taskfile to device 5348 * @qc: command to issue to device 5349 * 5350 * Prepare an ATA command to submission to device. 5351 * This includes mapping the data into a DMA-able 5352 * area, filling in the S/G table, and finally 5353 * writing the taskfile to hardware, starting the command. 5354 * 5355 * LOCKING: 5356 * spin_lock_irqsave(host lock) 5357 */ 5358 void ata_qc_issue(struct ata_queued_cmd *qc) 5359 { 5360 struct ata_port *ap = qc->ap; 5361 struct ata_link *link = qc->dev->link; 5362 u8 prot = qc->tf.protocol; 5363 5364 /* Make sure only one non-NCQ command is outstanding. The 5365 * check is skipped for old EH because it reuses active qc to 5366 * request ATAPI sense. 5367 */ 5368 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5369 5370 if (ata_is_ncq(prot)) { 5371 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5372 5373 if (!link->sactive) 5374 ap->nr_active_links++; 5375 link->sactive |= 1 << qc->tag; 5376 } else { 5377 WARN_ON_ONCE(link->sactive); 5378 5379 ap->nr_active_links++; 5380 link->active_tag = qc->tag; 5381 } 5382 5383 qc->flags |= ATA_QCFLAG_ACTIVE; 5384 ap->qc_active |= 1 << qc->tag; 5385 5386 /* 5387 * We guarantee to LLDs that they will have at least one 5388 * non-zero sg if the command is a data command. 5389 */ 5390 if (WARN_ON_ONCE(ata_is_data(prot) && 5391 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5392 goto sys_err; 5393 5394 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5395 (ap->flags & ATA_FLAG_PIO_DMA))) 5396 if (ata_sg_setup(qc)) 5397 goto sys_err; 5398 5399 /* if device is sleeping, schedule reset and abort the link */ 5400 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5401 link->eh_info.action |= ATA_EH_RESET; 5402 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5403 ata_link_abort(link); 5404 return; 5405 } 5406 5407 ap->ops->qc_prep(qc); 5408 trace_ata_qc_issue(qc); 5409 qc->err_mask |= ap->ops->qc_issue(qc); 5410 if (unlikely(qc->err_mask)) 5411 goto err; 5412 return; 5413 5414 sys_err: 5415 qc->err_mask |= AC_ERR_SYSTEM; 5416 err: 5417 ata_qc_complete(qc); 5418 } 5419 5420 /** 5421 * sata_scr_valid - test whether SCRs are accessible 5422 * @link: ATA link to test SCR accessibility for 5423 * 5424 * Test whether SCRs are accessible for @link. 5425 * 5426 * LOCKING: 5427 * None. 5428 * 5429 * RETURNS: 5430 * 1 if SCRs are accessible, 0 otherwise. 5431 */ 5432 int sata_scr_valid(struct ata_link *link) 5433 { 5434 struct ata_port *ap = link->ap; 5435 5436 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5437 } 5438 5439 /** 5440 * sata_scr_read - read SCR register of the specified port 5441 * @link: ATA link to read SCR for 5442 * @reg: SCR to read 5443 * @val: Place to store read value 5444 * 5445 * Read SCR register @reg of @link into *@val. This function is 5446 * guaranteed to succeed if @link is ap->link, the cable type of 5447 * the port is SATA and the port implements ->scr_read. 5448 * 5449 * LOCKING: 5450 * None if @link is ap->link. Kernel thread context otherwise. 5451 * 5452 * RETURNS: 5453 * 0 on success, negative errno on failure. 5454 */ 5455 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5456 { 5457 if (ata_is_host_link(link)) { 5458 if (sata_scr_valid(link)) 5459 return link->ap->ops->scr_read(link, reg, val); 5460 return -EOPNOTSUPP; 5461 } 5462 5463 return sata_pmp_scr_read(link, reg, val); 5464 } 5465 5466 /** 5467 * sata_scr_write - write SCR register of the specified port 5468 * @link: ATA link to write SCR for 5469 * @reg: SCR to write 5470 * @val: value to write 5471 * 5472 * Write @val to SCR register @reg of @link. This function is 5473 * guaranteed to succeed if @link is ap->link, the cable type of 5474 * the port is SATA and the port implements ->scr_read. 5475 * 5476 * LOCKING: 5477 * None if @link is ap->link. Kernel thread context otherwise. 5478 * 5479 * RETURNS: 5480 * 0 on success, negative errno on failure. 5481 */ 5482 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5483 { 5484 if (ata_is_host_link(link)) { 5485 if (sata_scr_valid(link)) 5486 return link->ap->ops->scr_write(link, reg, val); 5487 return -EOPNOTSUPP; 5488 } 5489 5490 return sata_pmp_scr_write(link, reg, val); 5491 } 5492 5493 /** 5494 * sata_scr_write_flush - write SCR register of the specified port and flush 5495 * @link: ATA link to write SCR for 5496 * @reg: SCR to write 5497 * @val: value to write 5498 * 5499 * This function is identical to sata_scr_write() except that this 5500 * function performs flush after writing to the register. 5501 * 5502 * LOCKING: 5503 * None if @link is ap->link. Kernel thread context otherwise. 5504 * 5505 * RETURNS: 5506 * 0 on success, negative errno on failure. 5507 */ 5508 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5509 { 5510 if (ata_is_host_link(link)) { 5511 int rc; 5512 5513 if (sata_scr_valid(link)) { 5514 rc = link->ap->ops->scr_write(link, reg, val); 5515 if (rc == 0) 5516 rc = link->ap->ops->scr_read(link, reg, &val); 5517 return rc; 5518 } 5519 return -EOPNOTSUPP; 5520 } 5521 5522 return sata_pmp_scr_write(link, reg, val); 5523 } 5524 5525 /** 5526 * ata_phys_link_online - test whether the given link is online 5527 * @link: ATA link to test 5528 * 5529 * Test whether @link is online. Note that this function returns 5530 * 0 if online status of @link cannot be obtained, so 5531 * ata_link_online(link) != !ata_link_offline(link). 5532 * 5533 * LOCKING: 5534 * None. 5535 * 5536 * RETURNS: 5537 * True if the port online status is available and online. 5538 */ 5539 bool ata_phys_link_online(struct ata_link *link) 5540 { 5541 u32 sstatus; 5542 5543 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5544 ata_sstatus_online(sstatus)) 5545 return true; 5546 return false; 5547 } 5548 5549 /** 5550 * ata_phys_link_offline - test whether the given link is offline 5551 * @link: ATA link to test 5552 * 5553 * Test whether @link is offline. Note that this function 5554 * returns 0 if offline status of @link cannot be obtained, so 5555 * ata_link_online(link) != !ata_link_offline(link). 5556 * 5557 * LOCKING: 5558 * None. 5559 * 5560 * RETURNS: 5561 * True if the port offline status is available and offline. 5562 */ 5563 bool ata_phys_link_offline(struct ata_link *link) 5564 { 5565 u32 sstatus; 5566 5567 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5568 !ata_sstatus_online(sstatus)) 5569 return true; 5570 return false; 5571 } 5572 5573 /** 5574 * ata_link_online - test whether the given link is online 5575 * @link: ATA link to test 5576 * 5577 * Test whether @link is online. This is identical to 5578 * ata_phys_link_online() when there's no slave link. When 5579 * there's a slave link, this function should only be called on 5580 * the master link and will return true if any of M/S links is 5581 * online. 5582 * 5583 * LOCKING: 5584 * None. 5585 * 5586 * RETURNS: 5587 * True if the port online status is available and online. 5588 */ 5589 bool ata_link_online(struct ata_link *link) 5590 { 5591 struct ata_link *slave = link->ap->slave_link; 5592 5593 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5594 5595 return ata_phys_link_online(link) || 5596 (slave && ata_phys_link_online(slave)); 5597 } 5598 5599 /** 5600 * ata_link_offline - test whether the given link is offline 5601 * @link: ATA link to test 5602 * 5603 * Test whether @link is offline. This is identical to 5604 * ata_phys_link_offline() when there's no slave link. When 5605 * there's a slave link, this function should only be called on 5606 * the master link and will return true if both M/S links are 5607 * offline. 5608 * 5609 * LOCKING: 5610 * None. 5611 * 5612 * RETURNS: 5613 * True if the port offline status is available and offline. 5614 */ 5615 bool ata_link_offline(struct ata_link *link) 5616 { 5617 struct ata_link *slave = link->ap->slave_link; 5618 5619 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5620 5621 return ata_phys_link_offline(link) && 5622 (!slave || ata_phys_link_offline(slave)); 5623 } 5624 5625 #ifdef CONFIG_PM 5626 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5627 unsigned int action, unsigned int ehi_flags, 5628 bool async) 5629 { 5630 struct ata_link *link; 5631 unsigned long flags; 5632 5633 /* Previous resume operation might still be in 5634 * progress. Wait for PM_PENDING to clear. 5635 */ 5636 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5637 ata_port_wait_eh(ap); 5638 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5639 } 5640 5641 /* request PM ops to EH */ 5642 spin_lock_irqsave(ap->lock, flags); 5643 5644 ap->pm_mesg = mesg; 5645 ap->pflags |= ATA_PFLAG_PM_PENDING; 5646 ata_for_each_link(link, ap, HOST_FIRST) { 5647 link->eh_info.action |= action; 5648 link->eh_info.flags |= ehi_flags; 5649 } 5650 5651 ata_port_schedule_eh(ap); 5652 5653 spin_unlock_irqrestore(ap->lock, flags); 5654 5655 if (!async) { 5656 ata_port_wait_eh(ap); 5657 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5658 } 5659 } 5660 5661 /* 5662 * On some hardware, device fails to respond after spun down for suspend. As 5663 * the device won't be used before being resumed, we don't need to touch the 5664 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5665 * 5666 * http://thread.gmane.org/gmane.linux.ide/46764 5667 */ 5668 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5669 | ATA_EHI_NO_AUTOPSY 5670 | ATA_EHI_NO_RECOVERY; 5671 5672 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5673 { 5674 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5675 } 5676 5677 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5678 { 5679 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5680 } 5681 5682 static int ata_port_pm_suspend(struct device *dev) 5683 { 5684 struct ata_port *ap = to_ata_port(dev); 5685 5686 if (pm_runtime_suspended(dev)) 5687 return 0; 5688 5689 ata_port_suspend(ap, PMSG_SUSPEND); 5690 return 0; 5691 } 5692 5693 static int ata_port_pm_freeze(struct device *dev) 5694 { 5695 struct ata_port *ap = to_ata_port(dev); 5696 5697 if (pm_runtime_suspended(dev)) 5698 return 0; 5699 5700 ata_port_suspend(ap, PMSG_FREEZE); 5701 return 0; 5702 } 5703 5704 static int ata_port_pm_poweroff(struct device *dev) 5705 { 5706 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5707 return 0; 5708 } 5709 5710 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5711 | ATA_EHI_QUIET; 5712 5713 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5714 { 5715 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5716 } 5717 5718 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5719 { 5720 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5721 } 5722 5723 static int ata_port_pm_resume(struct device *dev) 5724 { 5725 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5726 pm_runtime_disable(dev); 5727 pm_runtime_set_active(dev); 5728 pm_runtime_enable(dev); 5729 return 0; 5730 } 5731 5732 /* 5733 * For ODDs, the upper layer will poll for media change every few seconds, 5734 * which will make it enter and leave suspend state every few seconds. And 5735 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5736 * is very little and the ODD may malfunction after constantly being reset. 5737 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5738 * ODD is attached to the port. 5739 */ 5740 static int ata_port_runtime_idle(struct device *dev) 5741 { 5742 struct ata_port *ap = to_ata_port(dev); 5743 struct ata_link *link; 5744 struct ata_device *adev; 5745 5746 ata_for_each_link(link, ap, HOST_FIRST) { 5747 ata_for_each_dev(adev, link, ENABLED) 5748 if (adev->class == ATA_DEV_ATAPI && 5749 !zpodd_dev_enabled(adev)) 5750 return -EBUSY; 5751 } 5752 5753 return 0; 5754 } 5755 5756 static int ata_port_runtime_suspend(struct device *dev) 5757 { 5758 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5759 return 0; 5760 } 5761 5762 static int ata_port_runtime_resume(struct device *dev) 5763 { 5764 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5765 return 0; 5766 } 5767 5768 static const struct dev_pm_ops ata_port_pm_ops = { 5769 .suspend = ata_port_pm_suspend, 5770 .resume = ata_port_pm_resume, 5771 .freeze = ata_port_pm_freeze, 5772 .thaw = ata_port_pm_resume, 5773 .poweroff = ata_port_pm_poweroff, 5774 .restore = ata_port_pm_resume, 5775 5776 .runtime_suspend = ata_port_runtime_suspend, 5777 .runtime_resume = ata_port_runtime_resume, 5778 .runtime_idle = ata_port_runtime_idle, 5779 }; 5780 5781 /* sas ports don't participate in pm runtime management of ata_ports, 5782 * and need to resume ata devices at the domain level, not the per-port 5783 * level. sas suspend/resume is async to allow parallel port recovery 5784 * since sas has multiple ata_port instances per Scsi_Host. 5785 */ 5786 void ata_sas_port_suspend(struct ata_port *ap) 5787 { 5788 ata_port_suspend_async(ap, PMSG_SUSPEND); 5789 } 5790 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5791 5792 void ata_sas_port_resume(struct ata_port *ap) 5793 { 5794 ata_port_resume_async(ap, PMSG_RESUME); 5795 } 5796 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5797 5798 /** 5799 * ata_host_suspend - suspend host 5800 * @host: host to suspend 5801 * @mesg: PM message 5802 * 5803 * Suspend @host. Actual operation is performed by port suspend. 5804 */ 5805 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5806 { 5807 host->dev->power.power_state = mesg; 5808 return 0; 5809 } 5810 5811 /** 5812 * ata_host_resume - resume host 5813 * @host: host to resume 5814 * 5815 * Resume @host. Actual operation is performed by port resume. 5816 */ 5817 void ata_host_resume(struct ata_host *host) 5818 { 5819 host->dev->power.power_state = PMSG_ON; 5820 } 5821 #endif 5822 5823 struct device_type ata_port_type = { 5824 .name = "ata_port", 5825 #ifdef CONFIG_PM 5826 .pm = &ata_port_pm_ops, 5827 #endif 5828 }; 5829 5830 /** 5831 * ata_dev_init - Initialize an ata_device structure 5832 * @dev: Device structure to initialize 5833 * 5834 * Initialize @dev in preparation for probing. 5835 * 5836 * LOCKING: 5837 * Inherited from caller. 5838 */ 5839 void ata_dev_init(struct ata_device *dev) 5840 { 5841 struct ata_link *link = ata_dev_phys_link(dev); 5842 struct ata_port *ap = link->ap; 5843 unsigned long flags; 5844 5845 /* SATA spd limit is bound to the attached device, reset together */ 5846 link->sata_spd_limit = link->hw_sata_spd_limit; 5847 link->sata_spd = 0; 5848 5849 /* High bits of dev->flags are used to record warm plug 5850 * requests which occur asynchronously. Synchronize using 5851 * host lock. 5852 */ 5853 spin_lock_irqsave(ap->lock, flags); 5854 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5855 dev->horkage = 0; 5856 spin_unlock_irqrestore(ap->lock, flags); 5857 5858 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5859 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5860 dev->pio_mask = UINT_MAX; 5861 dev->mwdma_mask = UINT_MAX; 5862 dev->udma_mask = UINT_MAX; 5863 } 5864 5865 /** 5866 * ata_link_init - Initialize an ata_link structure 5867 * @ap: ATA port link is attached to 5868 * @link: Link structure to initialize 5869 * @pmp: Port multiplier port number 5870 * 5871 * Initialize @link. 5872 * 5873 * LOCKING: 5874 * Kernel thread context (may sleep) 5875 */ 5876 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5877 { 5878 int i; 5879 5880 /* clear everything except for devices */ 5881 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5882 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5883 5884 link->ap = ap; 5885 link->pmp = pmp; 5886 link->active_tag = ATA_TAG_POISON; 5887 link->hw_sata_spd_limit = UINT_MAX; 5888 5889 /* can't use iterator, ap isn't initialized yet */ 5890 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5891 struct ata_device *dev = &link->device[i]; 5892 5893 dev->link = link; 5894 dev->devno = dev - link->device; 5895 #ifdef CONFIG_ATA_ACPI 5896 dev->gtf_filter = ata_acpi_gtf_filter; 5897 #endif 5898 ata_dev_init(dev); 5899 } 5900 } 5901 5902 /** 5903 * sata_link_init_spd - Initialize link->sata_spd_limit 5904 * @link: Link to configure sata_spd_limit for 5905 * 5906 * Initialize @link->[hw_]sata_spd_limit to the currently 5907 * configured value. 5908 * 5909 * LOCKING: 5910 * Kernel thread context (may sleep). 5911 * 5912 * RETURNS: 5913 * 0 on success, -errno on failure. 5914 */ 5915 int sata_link_init_spd(struct ata_link *link) 5916 { 5917 u8 spd; 5918 int rc; 5919 5920 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5921 if (rc) 5922 return rc; 5923 5924 spd = (link->saved_scontrol >> 4) & 0xf; 5925 if (spd) 5926 link->hw_sata_spd_limit &= (1 << spd) - 1; 5927 5928 ata_force_link_limits(link); 5929 5930 link->sata_spd_limit = link->hw_sata_spd_limit; 5931 5932 return 0; 5933 } 5934 5935 /** 5936 * ata_port_alloc - allocate and initialize basic ATA port resources 5937 * @host: ATA host this allocated port belongs to 5938 * 5939 * Allocate and initialize basic ATA port resources. 5940 * 5941 * RETURNS: 5942 * Allocate ATA port on success, NULL on failure. 5943 * 5944 * LOCKING: 5945 * Inherited from calling layer (may sleep). 5946 */ 5947 struct ata_port *ata_port_alloc(struct ata_host *host) 5948 { 5949 struct ata_port *ap; 5950 5951 DPRINTK("ENTER\n"); 5952 5953 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5954 if (!ap) 5955 return NULL; 5956 5957 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5958 ap->lock = &host->lock; 5959 ap->print_id = -1; 5960 ap->local_port_no = -1; 5961 ap->host = host; 5962 ap->dev = host->dev; 5963 5964 #if defined(ATA_VERBOSE_DEBUG) 5965 /* turn on all debugging levels */ 5966 ap->msg_enable = 0x00FF; 5967 #elif defined(ATA_DEBUG) 5968 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5969 #else 5970 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5971 #endif 5972 5973 mutex_init(&ap->scsi_scan_mutex); 5974 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5975 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5976 INIT_LIST_HEAD(&ap->eh_done_q); 5977 init_waitqueue_head(&ap->eh_wait_q); 5978 init_completion(&ap->park_req_pending); 5979 setup_deferrable_timer(&ap->fastdrain_timer, 5980 ata_eh_fastdrain_timerfn, 5981 (unsigned long)ap); 5982 5983 ap->cbl = ATA_CBL_NONE; 5984 5985 ata_link_init(ap, &ap->link, 0); 5986 5987 #ifdef ATA_IRQ_TRAP 5988 ap->stats.unhandled_irq = 1; 5989 ap->stats.idle_irq = 1; 5990 #endif 5991 ata_sff_port_init(ap); 5992 5993 return ap; 5994 } 5995 5996 static void ata_host_release(struct device *gendev, void *res) 5997 { 5998 struct ata_host *host = dev_get_drvdata(gendev); 5999 int i; 6000 6001 for (i = 0; i < host->n_ports; i++) { 6002 struct ata_port *ap = host->ports[i]; 6003 6004 if (!ap) 6005 continue; 6006 6007 if (ap->scsi_host) 6008 scsi_host_put(ap->scsi_host); 6009 6010 kfree(ap->pmp_link); 6011 kfree(ap->slave_link); 6012 kfree(ap); 6013 host->ports[i] = NULL; 6014 } 6015 6016 dev_set_drvdata(gendev, NULL); 6017 } 6018 6019 /** 6020 * ata_host_alloc - allocate and init basic ATA host resources 6021 * @dev: generic device this host is associated with 6022 * @max_ports: maximum number of ATA ports associated with this host 6023 * 6024 * Allocate and initialize basic ATA host resources. LLD calls 6025 * this function to allocate a host, initializes it fully and 6026 * attaches it using ata_host_register(). 6027 * 6028 * @max_ports ports are allocated and host->n_ports is 6029 * initialized to @max_ports. The caller is allowed to decrease 6030 * host->n_ports before calling ata_host_register(). The unused 6031 * ports will be automatically freed on registration. 6032 * 6033 * RETURNS: 6034 * Allocate ATA host on success, NULL on failure. 6035 * 6036 * LOCKING: 6037 * Inherited from calling layer (may sleep). 6038 */ 6039 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6040 { 6041 struct ata_host *host; 6042 size_t sz; 6043 int i; 6044 6045 DPRINTK("ENTER\n"); 6046 6047 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6048 return NULL; 6049 6050 /* alloc a container for our list of ATA ports (buses) */ 6051 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6052 /* alloc a container for our list of ATA ports (buses) */ 6053 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 6054 if (!host) 6055 goto err_out; 6056 6057 devres_add(dev, host); 6058 dev_set_drvdata(dev, host); 6059 6060 spin_lock_init(&host->lock); 6061 mutex_init(&host->eh_mutex); 6062 host->dev = dev; 6063 host->n_ports = max_ports; 6064 6065 /* allocate ports bound to this host */ 6066 for (i = 0; i < max_ports; i++) { 6067 struct ata_port *ap; 6068 6069 ap = ata_port_alloc(host); 6070 if (!ap) 6071 goto err_out; 6072 6073 ap->port_no = i; 6074 host->ports[i] = ap; 6075 } 6076 6077 devres_remove_group(dev, NULL); 6078 return host; 6079 6080 err_out: 6081 devres_release_group(dev, NULL); 6082 return NULL; 6083 } 6084 6085 /** 6086 * ata_host_alloc_pinfo - alloc host and init with port_info array 6087 * @dev: generic device this host is associated with 6088 * @ppi: array of ATA port_info to initialize host with 6089 * @n_ports: number of ATA ports attached to this host 6090 * 6091 * Allocate ATA host and initialize with info from @ppi. If NULL 6092 * terminated, @ppi may contain fewer entries than @n_ports. The 6093 * last entry will be used for the remaining ports. 6094 * 6095 * RETURNS: 6096 * Allocate ATA host on success, NULL on failure. 6097 * 6098 * LOCKING: 6099 * Inherited from calling layer (may sleep). 6100 */ 6101 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6102 const struct ata_port_info * const * ppi, 6103 int n_ports) 6104 { 6105 const struct ata_port_info *pi; 6106 struct ata_host *host; 6107 int i, j; 6108 6109 host = ata_host_alloc(dev, n_ports); 6110 if (!host) 6111 return NULL; 6112 6113 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6114 struct ata_port *ap = host->ports[i]; 6115 6116 if (ppi[j]) 6117 pi = ppi[j++]; 6118 6119 ap->pio_mask = pi->pio_mask; 6120 ap->mwdma_mask = pi->mwdma_mask; 6121 ap->udma_mask = pi->udma_mask; 6122 ap->flags |= pi->flags; 6123 ap->link.flags |= pi->link_flags; 6124 ap->ops = pi->port_ops; 6125 6126 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6127 host->ops = pi->port_ops; 6128 } 6129 6130 return host; 6131 } 6132 6133 /** 6134 * ata_slave_link_init - initialize slave link 6135 * @ap: port to initialize slave link for 6136 * 6137 * Create and initialize slave link for @ap. This enables slave 6138 * link handling on the port. 6139 * 6140 * In libata, a port contains links and a link contains devices. 6141 * There is single host link but if a PMP is attached to it, 6142 * there can be multiple fan-out links. On SATA, there's usually 6143 * a single device connected to a link but PATA and SATA 6144 * controllers emulating TF based interface can have two - master 6145 * and slave. 6146 * 6147 * However, there are a few controllers which don't fit into this 6148 * abstraction too well - SATA controllers which emulate TF 6149 * interface with both master and slave devices but also have 6150 * separate SCR register sets for each device. These controllers 6151 * need separate links for physical link handling 6152 * (e.g. onlineness, link speed) but should be treated like a 6153 * traditional M/S controller for everything else (e.g. command 6154 * issue, softreset). 6155 * 6156 * slave_link is libata's way of handling this class of 6157 * controllers without impacting core layer too much. For 6158 * anything other than physical link handling, the default host 6159 * link is used for both master and slave. For physical link 6160 * handling, separate @ap->slave_link is used. All dirty details 6161 * are implemented inside libata core layer. From LLD's POV, the 6162 * only difference is that prereset, hardreset and postreset are 6163 * called once more for the slave link, so the reset sequence 6164 * looks like the following. 6165 * 6166 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 6167 * softreset(M) -> postreset(M) -> postreset(S) 6168 * 6169 * Note that softreset is called only for the master. Softreset 6170 * resets both M/S by definition, so SRST on master should handle 6171 * both (the standard method will work just fine). 6172 * 6173 * LOCKING: 6174 * Should be called before host is registered. 6175 * 6176 * RETURNS: 6177 * 0 on success, -errno on failure. 6178 */ 6179 int ata_slave_link_init(struct ata_port *ap) 6180 { 6181 struct ata_link *link; 6182 6183 WARN_ON(ap->slave_link); 6184 WARN_ON(ap->flags & ATA_FLAG_PMP); 6185 6186 link = kzalloc(sizeof(*link), GFP_KERNEL); 6187 if (!link) 6188 return -ENOMEM; 6189 6190 ata_link_init(ap, link, 1); 6191 ap->slave_link = link; 6192 return 0; 6193 } 6194 6195 static void ata_host_stop(struct device *gendev, void *res) 6196 { 6197 struct ata_host *host = dev_get_drvdata(gendev); 6198 int i; 6199 6200 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6201 6202 for (i = 0; i < host->n_ports; i++) { 6203 struct ata_port *ap = host->ports[i]; 6204 6205 if (ap->ops->port_stop) 6206 ap->ops->port_stop(ap); 6207 } 6208 6209 if (host->ops->host_stop) 6210 host->ops->host_stop(host); 6211 } 6212 6213 /** 6214 * ata_finalize_port_ops - finalize ata_port_operations 6215 * @ops: ata_port_operations to finalize 6216 * 6217 * An ata_port_operations can inherit from another ops and that 6218 * ops can again inherit from another. This can go on as many 6219 * times as necessary as long as there is no loop in the 6220 * inheritance chain. 6221 * 6222 * Ops tables are finalized when the host is started. NULL or 6223 * unspecified entries are inherited from the closet ancestor 6224 * which has the method and the entry is populated with it. 6225 * After finalization, the ops table directly points to all the 6226 * methods and ->inherits is no longer necessary and cleared. 6227 * 6228 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 6229 * 6230 * LOCKING: 6231 * None. 6232 */ 6233 static void ata_finalize_port_ops(struct ata_port_operations *ops) 6234 { 6235 static DEFINE_SPINLOCK(lock); 6236 const struct ata_port_operations *cur; 6237 void **begin = (void **)ops; 6238 void **end = (void **)&ops->inherits; 6239 void **pp; 6240 6241 if (!ops || !ops->inherits) 6242 return; 6243 6244 spin_lock(&lock); 6245 6246 for (cur = ops->inherits; cur; cur = cur->inherits) { 6247 void **inherit = (void **)cur; 6248 6249 for (pp = begin; pp < end; pp++, inherit++) 6250 if (!*pp) 6251 *pp = *inherit; 6252 } 6253 6254 for (pp = begin; pp < end; pp++) 6255 if (IS_ERR(*pp)) 6256 *pp = NULL; 6257 6258 ops->inherits = NULL; 6259 6260 spin_unlock(&lock); 6261 } 6262 6263 /** 6264 * ata_host_start - start and freeze ports of an ATA host 6265 * @host: ATA host to start ports for 6266 * 6267 * Start and then freeze ports of @host. Started status is 6268 * recorded in host->flags, so this function can be called 6269 * multiple times. Ports are guaranteed to get started only 6270 * once. If host->ops isn't initialized yet, its set to the 6271 * first non-dummy port ops. 6272 * 6273 * LOCKING: 6274 * Inherited from calling layer (may sleep). 6275 * 6276 * RETURNS: 6277 * 0 if all ports are started successfully, -errno otherwise. 6278 */ 6279 int ata_host_start(struct ata_host *host) 6280 { 6281 int have_stop = 0; 6282 void *start_dr = NULL; 6283 int i, rc; 6284 6285 if (host->flags & ATA_HOST_STARTED) 6286 return 0; 6287 6288 ata_finalize_port_ops(host->ops); 6289 6290 for (i = 0; i < host->n_ports; i++) { 6291 struct ata_port *ap = host->ports[i]; 6292 6293 ata_finalize_port_ops(ap->ops); 6294 6295 if (!host->ops && !ata_port_is_dummy(ap)) 6296 host->ops = ap->ops; 6297 6298 if (ap->ops->port_stop) 6299 have_stop = 1; 6300 } 6301 6302 if (host->ops->host_stop) 6303 have_stop = 1; 6304 6305 if (have_stop) { 6306 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6307 if (!start_dr) 6308 return -ENOMEM; 6309 } 6310 6311 for (i = 0; i < host->n_ports; i++) { 6312 struct ata_port *ap = host->ports[i]; 6313 6314 if (ap->ops->port_start) { 6315 rc = ap->ops->port_start(ap); 6316 if (rc) { 6317 if (rc != -ENODEV) 6318 dev_err(host->dev, 6319 "failed to start port %d (errno=%d)\n", 6320 i, rc); 6321 goto err_out; 6322 } 6323 } 6324 ata_eh_freeze_port(ap); 6325 } 6326 6327 if (start_dr) 6328 devres_add(host->dev, start_dr); 6329 host->flags |= ATA_HOST_STARTED; 6330 return 0; 6331 6332 err_out: 6333 while (--i >= 0) { 6334 struct ata_port *ap = host->ports[i]; 6335 6336 if (ap->ops->port_stop) 6337 ap->ops->port_stop(ap); 6338 } 6339 devres_free(start_dr); 6340 return rc; 6341 } 6342 6343 /** 6344 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6345 * @host: host to initialize 6346 * @dev: device host is attached to 6347 * @ops: port_ops 6348 * 6349 */ 6350 void ata_host_init(struct ata_host *host, struct device *dev, 6351 struct ata_port_operations *ops) 6352 { 6353 spin_lock_init(&host->lock); 6354 mutex_init(&host->eh_mutex); 6355 host->n_tags = ATA_MAX_QUEUE - 1; 6356 host->dev = dev; 6357 host->ops = ops; 6358 } 6359 6360 void __ata_port_probe(struct ata_port *ap) 6361 { 6362 struct ata_eh_info *ehi = &ap->link.eh_info; 6363 unsigned long flags; 6364 6365 /* kick EH for boot probing */ 6366 spin_lock_irqsave(ap->lock, flags); 6367 6368 ehi->probe_mask |= ATA_ALL_DEVICES; 6369 ehi->action |= ATA_EH_RESET; 6370 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6371 6372 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6373 ap->pflags |= ATA_PFLAG_LOADING; 6374 ata_port_schedule_eh(ap); 6375 6376 spin_unlock_irqrestore(ap->lock, flags); 6377 } 6378 6379 int ata_port_probe(struct ata_port *ap) 6380 { 6381 int rc = 0; 6382 6383 if (ap->ops->error_handler) { 6384 __ata_port_probe(ap); 6385 ata_port_wait_eh(ap); 6386 } else { 6387 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6388 rc = ata_bus_probe(ap); 6389 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6390 } 6391 return rc; 6392 } 6393 6394 6395 static void async_port_probe(void *data, async_cookie_t cookie) 6396 { 6397 struct ata_port *ap = data; 6398 6399 /* 6400 * If we're not allowed to scan this host in parallel, 6401 * we need to wait until all previous scans have completed 6402 * before going further. 6403 * Jeff Garzik says this is only within a controller, so we 6404 * don't need to wait for port 0, only for later ports. 6405 */ 6406 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6407 async_synchronize_cookie(cookie); 6408 6409 (void)ata_port_probe(ap); 6410 6411 /* in order to keep device order, we need to synchronize at this point */ 6412 async_synchronize_cookie(cookie); 6413 6414 ata_scsi_scan_host(ap, 1); 6415 } 6416 6417 /** 6418 * ata_host_register - register initialized ATA host 6419 * @host: ATA host to register 6420 * @sht: template for SCSI host 6421 * 6422 * Register initialized ATA host. @host is allocated using 6423 * ata_host_alloc() and fully initialized by LLD. This function 6424 * starts ports, registers @host with ATA and SCSI layers and 6425 * probe registered devices. 6426 * 6427 * LOCKING: 6428 * Inherited from calling layer (may sleep). 6429 * 6430 * RETURNS: 6431 * 0 on success, -errno otherwise. 6432 */ 6433 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6434 { 6435 int i, rc; 6436 6437 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6438 6439 /* host must have been started */ 6440 if (!(host->flags & ATA_HOST_STARTED)) { 6441 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6442 WARN_ON(1); 6443 return -EINVAL; 6444 } 6445 6446 /* Blow away unused ports. This happens when LLD can't 6447 * determine the exact number of ports to allocate at 6448 * allocation time. 6449 */ 6450 for (i = host->n_ports; host->ports[i]; i++) 6451 kfree(host->ports[i]); 6452 6453 /* give ports names and add SCSI hosts */ 6454 for (i = 0; i < host->n_ports; i++) { 6455 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6456 host->ports[i]->local_port_no = i + 1; 6457 } 6458 6459 /* Create associated sysfs transport objects */ 6460 for (i = 0; i < host->n_ports; i++) { 6461 rc = ata_tport_add(host->dev,host->ports[i]); 6462 if (rc) { 6463 goto err_tadd; 6464 } 6465 } 6466 6467 rc = ata_scsi_add_hosts(host, sht); 6468 if (rc) 6469 goto err_tadd; 6470 6471 /* set cable, sata_spd_limit and report */ 6472 for (i = 0; i < host->n_ports; i++) { 6473 struct ata_port *ap = host->ports[i]; 6474 unsigned long xfer_mask; 6475 6476 /* set SATA cable type if still unset */ 6477 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6478 ap->cbl = ATA_CBL_SATA; 6479 6480 /* init sata_spd_limit to the current value */ 6481 sata_link_init_spd(&ap->link); 6482 if (ap->slave_link) 6483 sata_link_init_spd(ap->slave_link); 6484 6485 /* print per-port info to dmesg */ 6486 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6487 ap->udma_mask); 6488 6489 if (!ata_port_is_dummy(ap)) { 6490 ata_port_info(ap, "%cATA max %s %s\n", 6491 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6492 ata_mode_string(xfer_mask), 6493 ap->link.eh_info.desc); 6494 ata_ehi_clear_desc(&ap->link.eh_info); 6495 } else 6496 ata_port_info(ap, "DUMMY\n"); 6497 } 6498 6499 /* perform each probe asynchronously */ 6500 for (i = 0; i < host->n_ports; i++) { 6501 struct ata_port *ap = host->ports[i]; 6502 async_schedule(async_port_probe, ap); 6503 } 6504 6505 return 0; 6506 6507 err_tadd: 6508 while (--i >= 0) { 6509 ata_tport_delete(host->ports[i]); 6510 } 6511 return rc; 6512 6513 } 6514 6515 /** 6516 * ata_host_activate - start host, request IRQ and register it 6517 * @host: target ATA host 6518 * @irq: IRQ to request 6519 * @irq_handler: irq_handler used when requesting IRQ 6520 * @irq_flags: irq_flags used when requesting IRQ 6521 * @sht: scsi_host_template to use when registering the host 6522 * 6523 * After allocating an ATA host and initializing it, most libata 6524 * LLDs perform three steps to activate the host - start host, 6525 * request IRQ and register it. This helper takes necessary 6526 * arguments and performs the three steps in one go. 6527 * 6528 * An invalid IRQ skips the IRQ registration and expects the host to 6529 * have set polling mode on the port. In this case, @irq_handler 6530 * should be NULL. 6531 * 6532 * LOCKING: 6533 * Inherited from calling layer (may sleep). 6534 * 6535 * RETURNS: 6536 * 0 on success, -errno otherwise. 6537 */ 6538 int ata_host_activate(struct ata_host *host, int irq, 6539 irq_handler_t irq_handler, unsigned long irq_flags, 6540 struct scsi_host_template *sht) 6541 { 6542 int i, rc; 6543 char *irq_desc; 6544 6545 rc = ata_host_start(host); 6546 if (rc) 6547 return rc; 6548 6549 /* Special case for polling mode */ 6550 if (!irq) { 6551 WARN_ON(irq_handler); 6552 return ata_host_register(host, sht); 6553 } 6554 6555 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", 6556 dev_driver_string(host->dev), 6557 dev_name(host->dev)); 6558 if (!irq_desc) 6559 return -ENOMEM; 6560 6561 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6562 irq_desc, host); 6563 if (rc) 6564 return rc; 6565 6566 for (i = 0; i < host->n_ports; i++) 6567 ata_port_desc(host->ports[i], "irq %d", irq); 6568 6569 rc = ata_host_register(host, sht); 6570 /* if failed, just free the IRQ and leave ports alone */ 6571 if (rc) 6572 devm_free_irq(host->dev, irq, host); 6573 6574 return rc; 6575 } 6576 6577 /** 6578 * ata_port_detach - Detach ATA port in preparation of device removal 6579 * @ap: ATA port to be detached 6580 * 6581 * Detach all ATA devices and the associated SCSI devices of @ap; 6582 * then, remove the associated SCSI host. @ap is guaranteed to 6583 * be quiescent on return from this function. 6584 * 6585 * LOCKING: 6586 * Kernel thread context (may sleep). 6587 */ 6588 static void ata_port_detach(struct ata_port *ap) 6589 { 6590 unsigned long flags; 6591 struct ata_link *link; 6592 struct ata_device *dev; 6593 6594 if (!ap->ops->error_handler) 6595 goto skip_eh; 6596 6597 /* tell EH we're leaving & flush EH */ 6598 spin_lock_irqsave(ap->lock, flags); 6599 ap->pflags |= ATA_PFLAG_UNLOADING; 6600 ata_port_schedule_eh(ap); 6601 spin_unlock_irqrestore(ap->lock, flags); 6602 6603 /* wait till EH commits suicide */ 6604 ata_port_wait_eh(ap); 6605 6606 /* it better be dead now */ 6607 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6608 6609 cancel_delayed_work_sync(&ap->hotplug_task); 6610 6611 skip_eh: 6612 /* clean up zpodd on port removal */ 6613 ata_for_each_link(link, ap, HOST_FIRST) { 6614 ata_for_each_dev(dev, link, ALL) { 6615 if (zpodd_dev_enabled(dev)) 6616 zpodd_exit(dev); 6617 } 6618 } 6619 if (ap->pmp_link) { 6620 int i; 6621 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6622 ata_tlink_delete(&ap->pmp_link[i]); 6623 } 6624 /* remove the associated SCSI host */ 6625 scsi_remove_host(ap->scsi_host); 6626 ata_tport_delete(ap); 6627 } 6628 6629 /** 6630 * ata_host_detach - Detach all ports of an ATA host 6631 * @host: Host to detach 6632 * 6633 * Detach all ports of @host. 6634 * 6635 * LOCKING: 6636 * Kernel thread context (may sleep). 6637 */ 6638 void ata_host_detach(struct ata_host *host) 6639 { 6640 int i; 6641 6642 for (i = 0; i < host->n_ports; i++) 6643 ata_port_detach(host->ports[i]); 6644 6645 /* the host is dead now, dissociate ACPI */ 6646 ata_acpi_dissociate(host); 6647 } 6648 6649 #ifdef CONFIG_PCI 6650 6651 /** 6652 * ata_pci_remove_one - PCI layer callback for device removal 6653 * @pdev: PCI device that was removed 6654 * 6655 * PCI layer indicates to libata via this hook that hot-unplug or 6656 * module unload event has occurred. Detach all ports. Resource 6657 * release is handled via devres. 6658 * 6659 * LOCKING: 6660 * Inherited from PCI layer (may sleep). 6661 */ 6662 void ata_pci_remove_one(struct pci_dev *pdev) 6663 { 6664 struct ata_host *host = pci_get_drvdata(pdev); 6665 6666 ata_host_detach(host); 6667 } 6668 6669 /* move to PCI subsystem */ 6670 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6671 { 6672 unsigned long tmp = 0; 6673 6674 switch (bits->width) { 6675 case 1: { 6676 u8 tmp8 = 0; 6677 pci_read_config_byte(pdev, bits->reg, &tmp8); 6678 tmp = tmp8; 6679 break; 6680 } 6681 case 2: { 6682 u16 tmp16 = 0; 6683 pci_read_config_word(pdev, bits->reg, &tmp16); 6684 tmp = tmp16; 6685 break; 6686 } 6687 case 4: { 6688 u32 tmp32 = 0; 6689 pci_read_config_dword(pdev, bits->reg, &tmp32); 6690 tmp = tmp32; 6691 break; 6692 } 6693 6694 default: 6695 return -EINVAL; 6696 } 6697 6698 tmp &= bits->mask; 6699 6700 return (tmp == bits->val) ? 1 : 0; 6701 } 6702 6703 #ifdef CONFIG_PM 6704 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6705 { 6706 pci_save_state(pdev); 6707 pci_disable_device(pdev); 6708 6709 if (mesg.event & PM_EVENT_SLEEP) 6710 pci_set_power_state(pdev, PCI_D3hot); 6711 } 6712 6713 int ata_pci_device_do_resume(struct pci_dev *pdev) 6714 { 6715 int rc; 6716 6717 pci_set_power_state(pdev, PCI_D0); 6718 pci_restore_state(pdev); 6719 6720 rc = pcim_enable_device(pdev); 6721 if (rc) { 6722 dev_err(&pdev->dev, 6723 "failed to enable device after resume (%d)\n", rc); 6724 return rc; 6725 } 6726 6727 pci_set_master(pdev); 6728 return 0; 6729 } 6730 6731 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6732 { 6733 struct ata_host *host = pci_get_drvdata(pdev); 6734 int rc = 0; 6735 6736 rc = ata_host_suspend(host, mesg); 6737 if (rc) 6738 return rc; 6739 6740 ata_pci_device_do_suspend(pdev, mesg); 6741 6742 return 0; 6743 } 6744 6745 int ata_pci_device_resume(struct pci_dev *pdev) 6746 { 6747 struct ata_host *host = pci_get_drvdata(pdev); 6748 int rc; 6749 6750 rc = ata_pci_device_do_resume(pdev); 6751 if (rc == 0) 6752 ata_host_resume(host); 6753 return rc; 6754 } 6755 #endif /* CONFIG_PM */ 6756 6757 #endif /* CONFIG_PCI */ 6758 6759 /** 6760 * ata_platform_remove_one - Platform layer callback for device removal 6761 * @pdev: Platform device that was removed 6762 * 6763 * Platform layer indicates to libata via this hook that hot-unplug or 6764 * module unload event has occurred. Detach all ports. Resource 6765 * release is handled via devres. 6766 * 6767 * LOCKING: 6768 * Inherited from platform layer (may sleep). 6769 */ 6770 int ata_platform_remove_one(struct platform_device *pdev) 6771 { 6772 struct ata_host *host = platform_get_drvdata(pdev); 6773 6774 ata_host_detach(host); 6775 6776 return 0; 6777 } 6778 6779 static int __init ata_parse_force_one(char **cur, 6780 struct ata_force_ent *force_ent, 6781 const char **reason) 6782 { 6783 static const struct ata_force_param force_tbl[] __initconst = { 6784 { "40c", .cbl = ATA_CBL_PATA40 }, 6785 { "80c", .cbl = ATA_CBL_PATA80 }, 6786 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6787 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6788 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6789 { "sata", .cbl = ATA_CBL_SATA }, 6790 { "1.5Gbps", .spd_limit = 1 }, 6791 { "3.0Gbps", .spd_limit = 2 }, 6792 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6793 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6794 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, 6795 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, 6796 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6797 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6798 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6799 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6800 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6801 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6802 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6803 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6804 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6805 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6806 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6807 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6808 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6809 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6810 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6811 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6812 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6813 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6814 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6815 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6816 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6817 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6818 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6819 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6820 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6821 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6822 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6823 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6824 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6825 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6826 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6827 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6828 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6829 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6830 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6831 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6832 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6833 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6834 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6835 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6836 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6837 }; 6838 char *start = *cur, *p = *cur; 6839 char *id, *val, *endp; 6840 const struct ata_force_param *match_fp = NULL; 6841 int nr_matches = 0, i; 6842 6843 /* find where this param ends and update *cur */ 6844 while (*p != '\0' && *p != ',') 6845 p++; 6846 6847 if (*p == '\0') 6848 *cur = p; 6849 else 6850 *cur = p + 1; 6851 6852 *p = '\0'; 6853 6854 /* parse */ 6855 p = strchr(start, ':'); 6856 if (!p) { 6857 val = strstrip(start); 6858 goto parse_val; 6859 } 6860 *p = '\0'; 6861 6862 id = strstrip(start); 6863 val = strstrip(p + 1); 6864 6865 /* parse id */ 6866 p = strchr(id, '.'); 6867 if (p) { 6868 *p++ = '\0'; 6869 force_ent->device = simple_strtoul(p, &endp, 10); 6870 if (p == endp || *endp != '\0') { 6871 *reason = "invalid device"; 6872 return -EINVAL; 6873 } 6874 } 6875 6876 force_ent->port = simple_strtoul(id, &endp, 10); 6877 if (id == endp || *endp != '\0') { 6878 *reason = "invalid port/link"; 6879 return -EINVAL; 6880 } 6881 6882 parse_val: 6883 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6884 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6885 const struct ata_force_param *fp = &force_tbl[i]; 6886 6887 if (strncasecmp(val, fp->name, strlen(val))) 6888 continue; 6889 6890 nr_matches++; 6891 match_fp = fp; 6892 6893 if (strcasecmp(val, fp->name) == 0) { 6894 nr_matches = 1; 6895 break; 6896 } 6897 } 6898 6899 if (!nr_matches) { 6900 *reason = "unknown value"; 6901 return -EINVAL; 6902 } 6903 if (nr_matches > 1) { 6904 *reason = "ambigious value"; 6905 return -EINVAL; 6906 } 6907 6908 force_ent->param = *match_fp; 6909 6910 return 0; 6911 } 6912 6913 static void __init ata_parse_force_param(void) 6914 { 6915 int idx = 0, size = 1; 6916 int last_port = -1, last_device = -1; 6917 char *p, *cur, *next; 6918 6919 /* calculate maximum number of params and allocate force_tbl */ 6920 for (p = ata_force_param_buf; *p; p++) 6921 if (*p == ',') 6922 size++; 6923 6924 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6925 if (!ata_force_tbl) { 6926 printk(KERN_WARNING "ata: failed to extend force table, " 6927 "libata.force ignored\n"); 6928 return; 6929 } 6930 6931 /* parse and populate the table */ 6932 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6933 const char *reason = ""; 6934 struct ata_force_ent te = { .port = -1, .device = -1 }; 6935 6936 next = cur; 6937 if (ata_parse_force_one(&next, &te, &reason)) { 6938 printk(KERN_WARNING "ata: failed to parse force " 6939 "parameter \"%s\" (%s)\n", 6940 cur, reason); 6941 continue; 6942 } 6943 6944 if (te.port == -1) { 6945 te.port = last_port; 6946 te.device = last_device; 6947 } 6948 6949 ata_force_tbl[idx++] = te; 6950 6951 last_port = te.port; 6952 last_device = te.device; 6953 } 6954 6955 ata_force_tbl_size = idx; 6956 } 6957 6958 static int __init ata_init(void) 6959 { 6960 int rc; 6961 6962 ata_parse_force_param(); 6963 6964 rc = ata_sff_init(); 6965 if (rc) { 6966 kfree(ata_force_tbl); 6967 return rc; 6968 } 6969 6970 libata_transport_init(); 6971 ata_scsi_transport_template = ata_attach_transport(); 6972 if (!ata_scsi_transport_template) { 6973 ata_sff_exit(); 6974 rc = -ENOMEM; 6975 goto err_out; 6976 } 6977 6978 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6979 return 0; 6980 6981 err_out: 6982 return rc; 6983 } 6984 6985 static void __exit ata_exit(void) 6986 { 6987 ata_release_transport(ata_scsi_transport_template); 6988 libata_transport_exit(); 6989 ata_sff_exit(); 6990 kfree(ata_force_tbl); 6991 } 6992 6993 subsys_initcall(ata_init); 6994 module_exit(ata_exit); 6995 6996 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6997 6998 int ata_ratelimit(void) 6999 { 7000 return __ratelimit(&ratelimit); 7001 } 7002 7003 /** 7004 * ata_msleep - ATA EH owner aware msleep 7005 * @ap: ATA port to attribute the sleep to 7006 * @msecs: duration to sleep in milliseconds 7007 * 7008 * Sleeps @msecs. If the current task is owner of @ap's EH, the 7009 * ownership is released before going to sleep and reacquired 7010 * after the sleep is complete. IOW, other ports sharing the 7011 * @ap->host will be allowed to own the EH while this task is 7012 * sleeping. 7013 * 7014 * LOCKING: 7015 * Might sleep. 7016 */ 7017 void ata_msleep(struct ata_port *ap, unsigned int msecs) 7018 { 7019 bool owns_eh = ap && ap->host->eh_owner == current; 7020 7021 if (owns_eh) 7022 ata_eh_release(ap); 7023 7024 if (msecs < 20) { 7025 unsigned long usecs = msecs * USEC_PER_MSEC; 7026 usleep_range(usecs, usecs + 50); 7027 } else { 7028 msleep(msecs); 7029 } 7030 7031 if (owns_eh) 7032 ata_eh_acquire(ap); 7033 } 7034 7035 /** 7036 * ata_wait_register - wait until register value changes 7037 * @ap: ATA port to wait register for, can be NULL 7038 * @reg: IO-mapped register 7039 * @mask: Mask to apply to read register value 7040 * @val: Wait condition 7041 * @interval: polling interval in milliseconds 7042 * @timeout: timeout in milliseconds 7043 * 7044 * Waiting for some bits of register to change is a common 7045 * operation for ATA controllers. This function reads 32bit LE 7046 * IO-mapped register @reg and tests for the following condition. 7047 * 7048 * (*@reg & mask) != val 7049 * 7050 * If the condition is met, it returns; otherwise, the process is 7051 * repeated after @interval_msec until timeout. 7052 * 7053 * LOCKING: 7054 * Kernel thread context (may sleep) 7055 * 7056 * RETURNS: 7057 * The final register value. 7058 */ 7059 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 7060 unsigned long interval, unsigned long timeout) 7061 { 7062 unsigned long deadline; 7063 u32 tmp; 7064 7065 tmp = ioread32(reg); 7066 7067 /* Calculate timeout _after_ the first read to make sure 7068 * preceding writes reach the controller before starting to 7069 * eat away the timeout. 7070 */ 7071 deadline = ata_deadline(jiffies, timeout); 7072 7073 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 7074 ata_msleep(ap, interval); 7075 tmp = ioread32(reg); 7076 } 7077 7078 return tmp; 7079 } 7080 7081 /** 7082 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 7083 * @link: Link receiving the event 7084 * 7085 * Test whether the received PHY event has to be ignored or not. 7086 * 7087 * LOCKING: 7088 * None: 7089 * 7090 * RETURNS: 7091 * True if the event has to be ignored. 7092 */ 7093 bool sata_lpm_ignore_phy_events(struct ata_link *link) 7094 { 7095 unsigned long lpm_timeout = link->last_lpm_change + 7096 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 7097 7098 /* if LPM is enabled, PHYRDY doesn't mean anything */ 7099 if (link->lpm_policy > ATA_LPM_MAX_POWER) 7100 return true; 7101 7102 /* ignore the first PHY event after the LPM policy changed 7103 * as it is might be spurious 7104 */ 7105 if ((link->flags & ATA_LFLAG_CHANGED) && 7106 time_before(jiffies, lpm_timeout)) 7107 return true; 7108 7109 return false; 7110 } 7111 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 7112 7113 /* 7114 * Dummy port_ops 7115 */ 7116 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7117 { 7118 return AC_ERR_SYSTEM; 7119 } 7120 7121 static void ata_dummy_error_handler(struct ata_port *ap) 7122 { 7123 /* truly dummy */ 7124 } 7125 7126 struct ata_port_operations ata_dummy_port_ops = { 7127 .qc_prep = ata_noop_qc_prep, 7128 .qc_issue = ata_dummy_qc_issue, 7129 .error_handler = ata_dummy_error_handler, 7130 .sched_eh = ata_std_sched_eh, 7131 .end_eh = ata_std_end_eh, 7132 }; 7133 7134 const struct ata_port_info ata_dummy_port_info = { 7135 .port_ops = &ata_dummy_port_ops, 7136 }; 7137 7138 /* 7139 * Utility print functions 7140 */ 7141 void ata_port_printk(const struct ata_port *ap, const char *level, 7142 const char *fmt, ...) 7143 { 7144 struct va_format vaf; 7145 va_list args; 7146 7147 va_start(args, fmt); 7148 7149 vaf.fmt = fmt; 7150 vaf.va = &args; 7151 7152 printk("%sata%u: %pV", level, ap->print_id, &vaf); 7153 7154 va_end(args); 7155 } 7156 EXPORT_SYMBOL(ata_port_printk); 7157 7158 void ata_link_printk(const struct ata_link *link, const char *level, 7159 const char *fmt, ...) 7160 { 7161 struct va_format vaf; 7162 va_list args; 7163 7164 va_start(args, fmt); 7165 7166 vaf.fmt = fmt; 7167 vaf.va = &args; 7168 7169 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 7170 printk("%sata%u.%02u: %pV", 7171 level, link->ap->print_id, link->pmp, &vaf); 7172 else 7173 printk("%sata%u: %pV", 7174 level, link->ap->print_id, &vaf); 7175 7176 va_end(args); 7177 } 7178 EXPORT_SYMBOL(ata_link_printk); 7179 7180 void ata_dev_printk(const struct ata_device *dev, const char *level, 7181 const char *fmt, ...) 7182 { 7183 struct va_format vaf; 7184 va_list args; 7185 7186 va_start(args, fmt); 7187 7188 vaf.fmt = fmt; 7189 vaf.va = &args; 7190 7191 printk("%sata%u.%02u: %pV", 7192 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 7193 &vaf); 7194 7195 va_end(args); 7196 } 7197 EXPORT_SYMBOL(ata_dev_printk); 7198 7199 void ata_print_version(const struct device *dev, const char *version) 7200 { 7201 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 7202 } 7203 EXPORT_SYMBOL(ata_print_version); 7204 7205 /* 7206 * libata is essentially a library of internal helper functions for 7207 * low-level ATA host controller drivers. As such, the API/ABI is 7208 * likely to change as new drivers are added and updated. 7209 * Do not depend on ABI/API stability. 7210 */ 7211 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7212 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7213 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7214 EXPORT_SYMBOL_GPL(ata_base_port_ops); 7215 EXPORT_SYMBOL_GPL(sata_port_ops); 7216 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7217 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7218 EXPORT_SYMBOL_GPL(ata_link_next); 7219 EXPORT_SYMBOL_GPL(ata_dev_next); 7220 EXPORT_SYMBOL_GPL(ata_std_bios_param); 7221 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 7222 EXPORT_SYMBOL_GPL(ata_host_init); 7223 EXPORT_SYMBOL_GPL(ata_host_alloc); 7224 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7225 EXPORT_SYMBOL_GPL(ata_slave_link_init); 7226 EXPORT_SYMBOL_GPL(ata_host_start); 7227 EXPORT_SYMBOL_GPL(ata_host_register); 7228 EXPORT_SYMBOL_GPL(ata_host_activate); 7229 EXPORT_SYMBOL_GPL(ata_host_detach); 7230 EXPORT_SYMBOL_GPL(ata_sg_init); 7231 EXPORT_SYMBOL_GPL(ata_qc_complete); 7232 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7233 EXPORT_SYMBOL_GPL(atapi_cmd_type); 7234 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7235 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7236 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7237 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7238 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7239 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7240 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7241 EXPORT_SYMBOL_GPL(ata_mode_string); 7242 EXPORT_SYMBOL_GPL(ata_id_xfermask); 7243 EXPORT_SYMBOL_GPL(ata_do_set_mode); 7244 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7245 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7246 EXPORT_SYMBOL_GPL(ata_dev_disable); 7247 EXPORT_SYMBOL_GPL(sata_set_spd); 7248 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7249 EXPORT_SYMBOL_GPL(sata_link_debounce); 7250 EXPORT_SYMBOL_GPL(sata_link_resume); 7251 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 7252 EXPORT_SYMBOL_GPL(ata_std_prereset); 7253 EXPORT_SYMBOL_GPL(sata_link_hardreset); 7254 EXPORT_SYMBOL_GPL(sata_std_hardreset); 7255 EXPORT_SYMBOL_GPL(ata_std_postreset); 7256 EXPORT_SYMBOL_GPL(ata_dev_classify); 7257 EXPORT_SYMBOL_GPL(ata_dev_pair); 7258 EXPORT_SYMBOL_GPL(ata_ratelimit); 7259 EXPORT_SYMBOL_GPL(ata_msleep); 7260 EXPORT_SYMBOL_GPL(ata_wait_register); 7261 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7262 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7263 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7264 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7265 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 7266 EXPORT_SYMBOL_GPL(sata_scr_valid); 7267 EXPORT_SYMBOL_GPL(sata_scr_read); 7268 EXPORT_SYMBOL_GPL(sata_scr_write); 7269 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7270 EXPORT_SYMBOL_GPL(ata_link_online); 7271 EXPORT_SYMBOL_GPL(ata_link_offline); 7272 #ifdef CONFIG_PM 7273 EXPORT_SYMBOL_GPL(ata_host_suspend); 7274 EXPORT_SYMBOL_GPL(ata_host_resume); 7275 #endif /* CONFIG_PM */ 7276 EXPORT_SYMBOL_GPL(ata_id_string); 7277 EXPORT_SYMBOL_GPL(ata_id_c_string); 7278 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 7279 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7280 7281 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7282 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7283 EXPORT_SYMBOL_GPL(ata_timing_compute); 7284 EXPORT_SYMBOL_GPL(ata_timing_merge); 7285 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7286 7287 #ifdef CONFIG_PCI 7288 EXPORT_SYMBOL_GPL(pci_test_config_bits); 7289 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7290 #ifdef CONFIG_PM 7291 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7292 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7293 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7294 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7295 #endif /* CONFIG_PM */ 7296 #endif /* CONFIG_PCI */ 7297 7298 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 7299 7300 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7301 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7302 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7303 EXPORT_SYMBOL_GPL(ata_port_desc); 7304 #ifdef CONFIG_PCI 7305 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7306 #endif /* CONFIG_PCI */ 7307 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7308 EXPORT_SYMBOL_GPL(ata_link_abort); 7309 EXPORT_SYMBOL_GPL(ata_port_abort); 7310 EXPORT_SYMBOL_GPL(ata_port_freeze); 7311 EXPORT_SYMBOL_GPL(sata_async_notification); 7312 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7313 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7314 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7315 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7316 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 7317 EXPORT_SYMBOL_GPL(ata_do_eh); 7318 EXPORT_SYMBOL_GPL(ata_std_error_handler); 7319 7320 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7321 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7322 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7323 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7324 EXPORT_SYMBOL_GPL(ata_cable_sata); 7325