1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * libata-core.c - helper library for ATA 4 * 5 * Maintained by: Tejun Heo <tj@kernel.org> 6 * Please ALWAYS copy linux-ide@vger.kernel.org 7 * on emails. 8 * 9 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 10 * Copyright 2003-2004 Jeff Garzik 11 * 12 * libata documentation is available via 'make {ps|pdf}docs', 13 * as Documentation/driver-api/libata.rst 14 * 15 * Hardware documentation available from http://www.t13.org/ and 16 * http://www.sata-io.org/ 17 * 18 * Standards documents from: 19 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 20 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 21 * http://www.sata-io.org (SATA) 22 * http://www.compactflash.org (CF) 23 * http://www.qic.org (QIC157 - Tape and DSC) 24 * http://www.ce-ata.org (CE-ATA: not supported) 25 */ 26 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/init.h> 31 #include <linux/list.h> 32 #include <linux/mm.h> 33 #include <linux/spinlock.h> 34 #include <linux/blkdev.h> 35 #include <linux/delay.h> 36 #include <linux/timer.h> 37 #include <linux/time.h> 38 #include <linux/interrupt.h> 39 #include <linux/completion.h> 40 #include <linux/suspend.h> 41 #include <linux/workqueue.h> 42 #include <linux/scatterlist.h> 43 #include <linux/io.h> 44 #include <linux/async.h> 45 #include <linux/log2.h> 46 #include <linux/slab.h> 47 #include <linux/glob.h> 48 #include <scsi/scsi.h> 49 #include <scsi/scsi_cmnd.h> 50 #include <scsi/scsi_host.h> 51 #include <linux/libata.h> 52 #include <asm/byteorder.h> 53 #include <asm/unaligned.h> 54 #include <linux/cdrom.h> 55 #include <linux/ratelimit.h> 56 #include <linux/leds.h> 57 #include <linux/pm_runtime.h> 58 #include <linux/platform_device.h> 59 60 #define CREATE_TRACE_POINTS 61 #include <trace/events/libata.h> 62 63 #include "libata.h" 64 #include "libata-transport.h" 65 66 /* debounce timing parameters in msecs { interval, duration, timeout } */ 67 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 68 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 69 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 70 71 const struct ata_port_operations ata_base_port_ops = { 72 .prereset = ata_std_prereset, 73 .postreset = ata_std_postreset, 74 .error_handler = ata_std_error_handler, 75 .sched_eh = ata_std_sched_eh, 76 .end_eh = ata_std_end_eh, 77 }; 78 79 const struct ata_port_operations sata_port_ops = { 80 .inherits = &ata_base_port_ops, 81 82 .qc_defer = ata_std_qc_defer, 83 .hardreset = sata_std_hardreset, 84 }; 85 86 static unsigned int ata_dev_init_params(struct ata_device *dev, 87 u16 heads, u16 sectors); 88 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 89 static void ata_dev_xfermask(struct ata_device *dev); 90 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 91 92 atomic_t ata_print_id = ATOMIC_INIT(0); 93 94 struct ata_force_param { 95 const char *name; 96 unsigned int cbl; 97 int spd_limit; 98 unsigned long xfer_mask; 99 unsigned int horkage_on; 100 unsigned int horkage_off; 101 unsigned int lflags; 102 }; 103 104 struct ata_force_ent { 105 int port; 106 int device; 107 struct ata_force_param param; 108 }; 109 110 static struct ata_force_ent *ata_force_tbl; 111 static int ata_force_tbl_size; 112 113 static char ata_force_param_buf[PAGE_SIZE] __initdata; 114 /* param_buf is thrown away after initialization, disallow read */ 115 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 116 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)"); 117 118 static int atapi_enabled = 1; 119 module_param(atapi_enabled, int, 0444); 120 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 121 122 static int atapi_dmadir = 0; 123 module_param(atapi_dmadir, int, 0444); 124 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 125 126 int atapi_passthru16 = 1; 127 module_param(atapi_passthru16, int, 0444); 128 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 129 130 int libata_fua = 0; 131 module_param_named(fua, libata_fua, int, 0444); 132 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 133 134 static int ata_ignore_hpa; 135 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 136 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 137 138 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 139 module_param_named(dma, libata_dma_mask, int, 0444); 140 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 141 142 static int ata_probe_timeout; 143 module_param(ata_probe_timeout, int, 0444); 144 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 145 146 int libata_noacpi = 0; 147 module_param_named(noacpi, libata_noacpi, int, 0444); 148 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 149 150 int libata_allow_tpm = 0; 151 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 152 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 153 154 static int atapi_an; 155 module_param(atapi_an, int, 0444); 156 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 157 158 MODULE_AUTHOR("Jeff Garzik"); 159 MODULE_DESCRIPTION("Library module for ATA devices"); 160 MODULE_LICENSE("GPL"); 161 MODULE_VERSION(DRV_VERSION); 162 163 164 static bool ata_sstatus_online(u32 sstatus) 165 { 166 return (sstatus & 0xf) == 0x3; 167 } 168 169 /** 170 * ata_link_next - link iteration helper 171 * @link: the previous link, NULL to start 172 * @ap: ATA port containing links to iterate 173 * @mode: iteration mode, one of ATA_LITER_* 174 * 175 * LOCKING: 176 * Host lock or EH context. 177 * 178 * RETURNS: 179 * Pointer to the next link. 180 */ 181 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 182 enum ata_link_iter_mode mode) 183 { 184 BUG_ON(mode != ATA_LITER_EDGE && 185 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 186 187 /* NULL link indicates start of iteration */ 188 if (!link) 189 switch (mode) { 190 case ATA_LITER_EDGE: 191 case ATA_LITER_PMP_FIRST: 192 if (sata_pmp_attached(ap)) 193 return ap->pmp_link; 194 /* fall through */ 195 case ATA_LITER_HOST_FIRST: 196 return &ap->link; 197 } 198 199 /* we just iterated over the host link, what's next? */ 200 if (link == &ap->link) 201 switch (mode) { 202 case ATA_LITER_HOST_FIRST: 203 if (sata_pmp_attached(ap)) 204 return ap->pmp_link; 205 /* fall through */ 206 case ATA_LITER_PMP_FIRST: 207 if (unlikely(ap->slave_link)) 208 return ap->slave_link; 209 /* fall through */ 210 case ATA_LITER_EDGE: 211 return NULL; 212 } 213 214 /* slave_link excludes PMP */ 215 if (unlikely(link == ap->slave_link)) 216 return NULL; 217 218 /* we were over a PMP link */ 219 if (++link < ap->pmp_link + ap->nr_pmp_links) 220 return link; 221 222 if (mode == ATA_LITER_PMP_FIRST) 223 return &ap->link; 224 225 return NULL; 226 } 227 228 /** 229 * ata_dev_next - device iteration helper 230 * @dev: the previous device, NULL to start 231 * @link: ATA link containing devices to iterate 232 * @mode: iteration mode, one of ATA_DITER_* 233 * 234 * LOCKING: 235 * Host lock or EH context. 236 * 237 * RETURNS: 238 * Pointer to the next device. 239 */ 240 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 241 enum ata_dev_iter_mode mode) 242 { 243 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 244 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 245 246 /* NULL dev indicates start of iteration */ 247 if (!dev) 248 switch (mode) { 249 case ATA_DITER_ENABLED: 250 case ATA_DITER_ALL: 251 dev = link->device; 252 goto check; 253 case ATA_DITER_ENABLED_REVERSE: 254 case ATA_DITER_ALL_REVERSE: 255 dev = link->device + ata_link_max_devices(link) - 1; 256 goto check; 257 } 258 259 next: 260 /* move to the next one */ 261 switch (mode) { 262 case ATA_DITER_ENABLED: 263 case ATA_DITER_ALL: 264 if (++dev < link->device + ata_link_max_devices(link)) 265 goto check; 266 return NULL; 267 case ATA_DITER_ENABLED_REVERSE: 268 case ATA_DITER_ALL_REVERSE: 269 if (--dev >= link->device) 270 goto check; 271 return NULL; 272 } 273 274 check: 275 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 276 !ata_dev_enabled(dev)) 277 goto next; 278 return dev; 279 } 280 281 /** 282 * ata_dev_phys_link - find physical link for a device 283 * @dev: ATA device to look up physical link for 284 * 285 * Look up physical link which @dev is attached to. Note that 286 * this is different from @dev->link only when @dev is on slave 287 * link. For all other cases, it's the same as @dev->link. 288 * 289 * LOCKING: 290 * Don't care. 291 * 292 * RETURNS: 293 * Pointer to the found physical link. 294 */ 295 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 296 { 297 struct ata_port *ap = dev->link->ap; 298 299 if (!ap->slave_link) 300 return dev->link; 301 if (!dev->devno) 302 return &ap->link; 303 return ap->slave_link; 304 } 305 306 /** 307 * ata_force_cbl - force cable type according to libata.force 308 * @ap: ATA port of interest 309 * 310 * Force cable type according to libata.force and whine about it. 311 * The last entry which has matching port number is used, so it 312 * can be specified as part of device force parameters. For 313 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 314 * same effect. 315 * 316 * LOCKING: 317 * EH context. 318 */ 319 void ata_force_cbl(struct ata_port *ap) 320 { 321 int i; 322 323 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 324 const struct ata_force_ent *fe = &ata_force_tbl[i]; 325 326 if (fe->port != -1 && fe->port != ap->print_id) 327 continue; 328 329 if (fe->param.cbl == ATA_CBL_NONE) 330 continue; 331 332 ap->cbl = fe->param.cbl; 333 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 334 return; 335 } 336 } 337 338 /** 339 * ata_force_link_limits - force link limits according to libata.force 340 * @link: ATA link of interest 341 * 342 * Force link flags and SATA spd limit according to libata.force 343 * and whine about it. When only the port part is specified 344 * (e.g. 1:), the limit applies to all links connected to both 345 * the host link and all fan-out ports connected via PMP. If the 346 * device part is specified as 0 (e.g. 1.00:), it specifies the 347 * first fan-out link not the host link. Device number 15 always 348 * points to the host link whether PMP is attached or not. If the 349 * controller has slave link, device number 16 points to it. 350 * 351 * LOCKING: 352 * EH context. 353 */ 354 static void ata_force_link_limits(struct ata_link *link) 355 { 356 bool did_spd = false; 357 int linkno = link->pmp; 358 int i; 359 360 if (ata_is_host_link(link)) 361 linkno += 15; 362 363 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 364 const struct ata_force_ent *fe = &ata_force_tbl[i]; 365 366 if (fe->port != -1 && fe->port != link->ap->print_id) 367 continue; 368 369 if (fe->device != -1 && fe->device != linkno) 370 continue; 371 372 /* only honor the first spd limit */ 373 if (!did_spd && fe->param.spd_limit) { 374 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 375 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 376 fe->param.name); 377 did_spd = true; 378 } 379 380 /* let lflags stack */ 381 if (fe->param.lflags) { 382 link->flags |= fe->param.lflags; 383 ata_link_notice(link, 384 "FORCE: link flag 0x%x forced -> 0x%x\n", 385 fe->param.lflags, link->flags); 386 } 387 } 388 } 389 390 /** 391 * ata_force_xfermask - force xfermask according to libata.force 392 * @dev: ATA device of interest 393 * 394 * Force xfer_mask according to libata.force and whine about it. 395 * For consistency with link selection, device number 15 selects 396 * the first device connected to the host link. 397 * 398 * LOCKING: 399 * EH context. 400 */ 401 static void ata_force_xfermask(struct ata_device *dev) 402 { 403 int devno = dev->link->pmp + dev->devno; 404 int alt_devno = devno; 405 int i; 406 407 /* allow n.15/16 for devices attached to host port */ 408 if (ata_is_host_link(dev->link)) 409 alt_devno += 15; 410 411 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 412 const struct ata_force_ent *fe = &ata_force_tbl[i]; 413 unsigned long pio_mask, mwdma_mask, udma_mask; 414 415 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 416 continue; 417 418 if (fe->device != -1 && fe->device != devno && 419 fe->device != alt_devno) 420 continue; 421 422 if (!fe->param.xfer_mask) 423 continue; 424 425 ata_unpack_xfermask(fe->param.xfer_mask, 426 &pio_mask, &mwdma_mask, &udma_mask); 427 if (udma_mask) 428 dev->udma_mask = udma_mask; 429 else if (mwdma_mask) { 430 dev->udma_mask = 0; 431 dev->mwdma_mask = mwdma_mask; 432 } else { 433 dev->udma_mask = 0; 434 dev->mwdma_mask = 0; 435 dev->pio_mask = pio_mask; 436 } 437 438 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 439 fe->param.name); 440 return; 441 } 442 } 443 444 /** 445 * ata_force_horkage - force horkage according to libata.force 446 * @dev: ATA device of interest 447 * 448 * Force horkage according to libata.force and whine about it. 449 * For consistency with link selection, device number 15 selects 450 * the first device connected to the host link. 451 * 452 * LOCKING: 453 * EH context. 454 */ 455 static void ata_force_horkage(struct ata_device *dev) 456 { 457 int devno = dev->link->pmp + dev->devno; 458 int alt_devno = devno; 459 int i; 460 461 /* allow n.15/16 for devices attached to host port */ 462 if (ata_is_host_link(dev->link)) 463 alt_devno += 15; 464 465 for (i = 0; i < ata_force_tbl_size; i++) { 466 const struct ata_force_ent *fe = &ata_force_tbl[i]; 467 468 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 469 continue; 470 471 if (fe->device != -1 && fe->device != devno && 472 fe->device != alt_devno) 473 continue; 474 475 if (!(~dev->horkage & fe->param.horkage_on) && 476 !(dev->horkage & fe->param.horkage_off)) 477 continue; 478 479 dev->horkage |= fe->param.horkage_on; 480 dev->horkage &= ~fe->param.horkage_off; 481 482 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 483 fe->param.name); 484 } 485 } 486 487 /** 488 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 489 * @opcode: SCSI opcode 490 * 491 * Determine ATAPI command type from @opcode. 492 * 493 * LOCKING: 494 * None. 495 * 496 * RETURNS: 497 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 498 */ 499 int atapi_cmd_type(u8 opcode) 500 { 501 switch (opcode) { 502 case GPCMD_READ_10: 503 case GPCMD_READ_12: 504 return ATAPI_READ; 505 506 case GPCMD_WRITE_10: 507 case GPCMD_WRITE_12: 508 case GPCMD_WRITE_AND_VERIFY_10: 509 return ATAPI_WRITE; 510 511 case GPCMD_READ_CD: 512 case GPCMD_READ_CD_MSF: 513 return ATAPI_READ_CD; 514 515 case ATA_16: 516 case ATA_12: 517 if (atapi_passthru16) 518 return ATAPI_PASS_THRU; 519 /* fall thru */ 520 default: 521 return ATAPI_MISC; 522 } 523 } 524 525 /** 526 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 527 * @tf: Taskfile to convert 528 * @pmp: Port multiplier port 529 * @is_cmd: This FIS is for command 530 * @fis: Buffer into which data will output 531 * 532 * Converts a standard ATA taskfile to a Serial ATA 533 * FIS structure (Register - Host to Device). 534 * 535 * LOCKING: 536 * Inherited from caller. 537 */ 538 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 539 { 540 fis[0] = 0x27; /* Register - Host to Device FIS */ 541 fis[1] = pmp & 0xf; /* Port multiplier number*/ 542 if (is_cmd) 543 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 544 545 fis[2] = tf->command; 546 fis[3] = tf->feature; 547 548 fis[4] = tf->lbal; 549 fis[5] = tf->lbam; 550 fis[6] = tf->lbah; 551 fis[7] = tf->device; 552 553 fis[8] = tf->hob_lbal; 554 fis[9] = tf->hob_lbam; 555 fis[10] = tf->hob_lbah; 556 fis[11] = tf->hob_feature; 557 558 fis[12] = tf->nsect; 559 fis[13] = tf->hob_nsect; 560 fis[14] = 0; 561 fis[15] = tf->ctl; 562 563 fis[16] = tf->auxiliary & 0xff; 564 fis[17] = (tf->auxiliary >> 8) & 0xff; 565 fis[18] = (tf->auxiliary >> 16) & 0xff; 566 fis[19] = (tf->auxiliary >> 24) & 0xff; 567 } 568 569 /** 570 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 571 * @fis: Buffer from which data will be input 572 * @tf: Taskfile to output 573 * 574 * Converts a serial ATA FIS structure to a standard ATA taskfile. 575 * 576 * LOCKING: 577 * Inherited from caller. 578 */ 579 580 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 581 { 582 tf->command = fis[2]; /* status */ 583 tf->feature = fis[3]; /* error */ 584 585 tf->lbal = fis[4]; 586 tf->lbam = fis[5]; 587 tf->lbah = fis[6]; 588 tf->device = fis[7]; 589 590 tf->hob_lbal = fis[8]; 591 tf->hob_lbam = fis[9]; 592 tf->hob_lbah = fis[10]; 593 594 tf->nsect = fis[12]; 595 tf->hob_nsect = fis[13]; 596 } 597 598 static const u8 ata_rw_cmds[] = { 599 /* pio multi */ 600 ATA_CMD_READ_MULTI, 601 ATA_CMD_WRITE_MULTI, 602 ATA_CMD_READ_MULTI_EXT, 603 ATA_CMD_WRITE_MULTI_EXT, 604 0, 605 0, 606 0, 607 ATA_CMD_WRITE_MULTI_FUA_EXT, 608 /* pio */ 609 ATA_CMD_PIO_READ, 610 ATA_CMD_PIO_WRITE, 611 ATA_CMD_PIO_READ_EXT, 612 ATA_CMD_PIO_WRITE_EXT, 613 0, 614 0, 615 0, 616 0, 617 /* dma */ 618 ATA_CMD_READ, 619 ATA_CMD_WRITE, 620 ATA_CMD_READ_EXT, 621 ATA_CMD_WRITE_EXT, 622 0, 623 0, 624 0, 625 ATA_CMD_WRITE_FUA_EXT 626 }; 627 628 /** 629 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 630 * @tf: command to examine and configure 631 * @dev: device tf belongs to 632 * 633 * Examine the device configuration and tf->flags to calculate 634 * the proper read/write commands and protocol to use. 635 * 636 * LOCKING: 637 * caller. 638 */ 639 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 640 { 641 u8 cmd; 642 643 int index, fua, lba48, write; 644 645 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 646 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 647 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 648 649 if (dev->flags & ATA_DFLAG_PIO) { 650 tf->protocol = ATA_PROT_PIO; 651 index = dev->multi_count ? 0 : 8; 652 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 653 /* Unable to use DMA due to host limitation */ 654 tf->protocol = ATA_PROT_PIO; 655 index = dev->multi_count ? 0 : 8; 656 } else { 657 tf->protocol = ATA_PROT_DMA; 658 index = 16; 659 } 660 661 cmd = ata_rw_cmds[index + fua + lba48 + write]; 662 if (cmd) { 663 tf->command = cmd; 664 return 0; 665 } 666 return -1; 667 } 668 669 /** 670 * ata_tf_read_block - Read block address from ATA taskfile 671 * @tf: ATA taskfile of interest 672 * @dev: ATA device @tf belongs to 673 * 674 * LOCKING: 675 * None. 676 * 677 * Read block address from @tf. This function can handle all 678 * three address formats - LBA, LBA48 and CHS. tf->protocol and 679 * flags select the address format to use. 680 * 681 * RETURNS: 682 * Block address read from @tf. 683 */ 684 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 685 { 686 u64 block = 0; 687 688 if (tf->flags & ATA_TFLAG_LBA) { 689 if (tf->flags & ATA_TFLAG_LBA48) { 690 block |= (u64)tf->hob_lbah << 40; 691 block |= (u64)tf->hob_lbam << 32; 692 block |= (u64)tf->hob_lbal << 24; 693 } else 694 block |= (tf->device & 0xf) << 24; 695 696 block |= tf->lbah << 16; 697 block |= tf->lbam << 8; 698 block |= tf->lbal; 699 } else { 700 u32 cyl, head, sect; 701 702 cyl = tf->lbam | (tf->lbah << 8); 703 head = tf->device & 0xf; 704 sect = tf->lbal; 705 706 if (!sect) { 707 ata_dev_warn(dev, 708 "device reported invalid CHS sector 0\n"); 709 return U64_MAX; 710 } 711 712 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 713 } 714 715 return block; 716 } 717 718 /** 719 * ata_build_rw_tf - Build ATA taskfile for given read/write request 720 * @tf: Target ATA taskfile 721 * @dev: ATA device @tf belongs to 722 * @block: Block address 723 * @n_block: Number of blocks 724 * @tf_flags: RW/FUA etc... 725 * @tag: tag 726 * @class: IO priority class 727 * 728 * LOCKING: 729 * None. 730 * 731 * Build ATA taskfile @tf for read/write request described by 732 * @block, @n_block, @tf_flags and @tag on @dev. 733 * 734 * RETURNS: 735 * 736 * 0 on success, -ERANGE if the request is too large for @dev, 737 * -EINVAL if the request is invalid. 738 */ 739 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 740 u64 block, u32 n_block, unsigned int tf_flags, 741 unsigned int tag, int class) 742 { 743 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 744 tf->flags |= tf_flags; 745 746 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) { 747 /* yay, NCQ */ 748 if (!lba_48_ok(block, n_block)) 749 return -ERANGE; 750 751 tf->protocol = ATA_PROT_NCQ; 752 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 753 754 if (tf->flags & ATA_TFLAG_WRITE) 755 tf->command = ATA_CMD_FPDMA_WRITE; 756 else 757 tf->command = ATA_CMD_FPDMA_READ; 758 759 tf->nsect = tag << 3; 760 tf->hob_feature = (n_block >> 8) & 0xff; 761 tf->feature = n_block & 0xff; 762 763 tf->hob_lbah = (block >> 40) & 0xff; 764 tf->hob_lbam = (block >> 32) & 0xff; 765 tf->hob_lbal = (block >> 24) & 0xff; 766 tf->lbah = (block >> 16) & 0xff; 767 tf->lbam = (block >> 8) & 0xff; 768 tf->lbal = block & 0xff; 769 770 tf->device = ATA_LBA; 771 if (tf->flags & ATA_TFLAG_FUA) 772 tf->device |= 1 << 7; 773 774 if (dev->flags & ATA_DFLAG_NCQ_PRIO) { 775 if (class == IOPRIO_CLASS_RT) 776 tf->hob_nsect |= ATA_PRIO_HIGH << 777 ATA_SHIFT_PRIO; 778 } 779 } else if (dev->flags & ATA_DFLAG_LBA) { 780 tf->flags |= ATA_TFLAG_LBA; 781 782 if (lba_28_ok(block, n_block)) { 783 /* use LBA28 */ 784 tf->device |= (block >> 24) & 0xf; 785 } else if (lba_48_ok(block, n_block)) { 786 if (!(dev->flags & ATA_DFLAG_LBA48)) 787 return -ERANGE; 788 789 /* use LBA48 */ 790 tf->flags |= ATA_TFLAG_LBA48; 791 792 tf->hob_nsect = (n_block >> 8) & 0xff; 793 794 tf->hob_lbah = (block >> 40) & 0xff; 795 tf->hob_lbam = (block >> 32) & 0xff; 796 tf->hob_lbal = (block >> 24) & 0xff; 797 } else 798 /* request too large even for LBA48 */ 799 return -ERANGE; 800 801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 802 return -EINVAL; 803 804 tf->nsect = n_block & 0xff; 805 806 tf->lbah = (block >> 16) & 0xff; 807 tf->lbam = (block >> 8) & 0xff; 808 tf->lbal = block & 0xff; 809 810 tf->device |= ATA_LBA; 811 } else { 812 /* CHS */ 813 u32 sect, head, cyl, track; 814 815 /* The request -may- be too large for CHS addressing. */ 816 if (!lba_28_ok(block, n_block)) 817 return -ERANGE; 818 819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 820 return -EINVAL; 821 822 /* Convert LBA to CHS */ 823 track = (u32)block / dev->sectors; 824 cyl = track / dev->heads; 825 head = track % dev->heads; 826 sect = (u32)block % dev->sectors + 1; 827 828 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 829 (u32)block, track, cyl, head, sect); 830 831 /* Check whether the converted CHS can fit. 832 Cylinder: 0-65535 833 Head: 0-15 834 Sector: 1-255*/ 835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 836 return -ERANGE; 837 838 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 839 tf->lbal = sect; 840 tf->lbam = cyl; 841 tf->lbah = cyl >> 8; 842 tf->device |= head; 843 } 844 845 return 0; 846 } 847 848 /** 849 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 850 * @pio_mask: pio_mask 851 * @mwdma_mask: mwdma_mask 852 * @udma_mask: udma_mask 853 * 854 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 855 * unsigned int xfer_mask. 856 * 857 * LOCKING: 858 * None. 859 * 860 * RETURNS: 861 * Packed xfer_mask. 862 */ 863 unsigned long ata_pack_xfermask(unsigned long pio_mask, 864 unsigned long mwdma_mask, 865 unsigned long udma_mask) 866 { 867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 870 } 871 872 /** 873 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 874 * @xfer_mask: xfer_mask to unpack 875 * @pio_mask: resulting pio_mask 876 * @mwdma_mask: resulting mwdma_mask 877 * @udma_mask: resulting udma_mask 878 * 879 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 880 * Any NULL destination masks will be ignored. 881 */ 882 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 883 unsigned long *mwdma_mask, unsigned long *udma_mask) 884 { 885 if (pio_mask) 886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 887 if (mwdma_mask) 888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 889 if (udma_mask) 890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 891 } 892 893 static const struct ata_xfer_ent { 894 int shift, bits; 895 u8 base; 896 } ata_xfer_tbl[] = { 897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 900 { -1, }, 901 }; 902 903 /** 904 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 905 * @xfer_mask: xfer_mask of interest 906 * 907 * Return matching XFER_* value for @xfer_mask. Only the highest 908 * bit of @xfer_mask is considered. 909 * 910 * LOCKING: 911 * None. 912 * 913 * RETURNS: 914 * Matching XFER_* value, 0xff if no match found. 915 */ 916 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 917 { 918 int highbit = fls(xfer_mask) - 1; 919 const struct ata_xfer_ent *ent; 920 921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 923 return ent->base + highbit - ent->shift; 924 return 0xff; 925 } 926 927 /** 928 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 929 * @xfer_mode: XFER_* of interest 930 * 931 * Return matching xfer_mask for @xfer_mode. 932 * 933 * LOCKING: 934 * None. 935 * 936 * RETURNS: 937 * Matching xfer_mask, 0 if no match found. 938 */ 939 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 940 { 941 const struct ata_xfer_ent *ent; 942 943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 946 & ~((1 << ent->shift) - 1); 947 return 0; 948 } 949 950 /** 951 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 952 * @xfer_mode: XFER_* of interest 953 * 954 * Return matching xfer_shift for @xfer_mode. 955 * 956 * LOCKING: 957 * None. 958 * 959 * RETURNS: 960 * Matching xfer_shift, -1 if no match found. 961 */ 962 int ata_xfer_mode2shift(unsigned long xfer_mode) 963 { 964 const struct ata_xfer_ent *ent; 965 966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 968 return ent->shift; 969 return -1; 970 } 971 972 /** 973 * ata_mode_string - convert xfer_mask to string 974 * @xfer_mask: mask of bits supported; only highest bit counts. 975 * 976 * Determine string which represents the highest speed 977 * (highest bit in @modemask). 978 * 979 * LOCKING: 980 * None. 981 * 982 * RETURNS: 983 * Constant C string representing highest speed listed in 984 * @mode_mask, or the constant C string "<n/a>". 985 */ 986 const char *ata_mode_string(unsigned long xfer_mask) 987 { 988 static const char * const xfer_mode_str[] = { 989 "PIO0", 990 "PIO1", 991 "PIO2", 992 "PIO3", 993 "PIO4", 994 "PIO5", 995 "PIO6", 996 "MWDMA0", 997 "MWDMA1", 998 "MWDMA2", 999 "MWDMA3", 1000 "MWDMA4", 1001 "UDMA/16", 1002 "UDMA/25", 1003 "UDMA/33", 1004 "UDMA/44", 1005 "UDMA/66", 1006 "UDMA/100", 1007 "UDMA/133", 1008 "UDMA7", 1009 }; 1010 int highbit; 1011 1012 highbit = fls(xfer_mask) - 1; 1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1014 return xfer_mode_str[highbit]; 1015 return "<n/a>"; 1016 } 1017 1018 const char *sata_spd_string(unsigned int spd) 1019 { 1020 static const char * const spd_str[] = { 1021 "1.5 Gbps", 1022 "3.0 Gbps", 1023 "6.0 Gbps", 1024 }; 1025 1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1027 return "<unknown>"; 1028 return spd_str[spd - 1]; 1029 } 1030 1031 /** 1032 * ata_dev_classify - determine device type based on ATA-spec signature 1033 * @tf: ATA taskfile register set for device to be identified 1034 * 1035 * Determine from taskfile register contents whether a device is 1036 * ATA or ATAPI, as per "Signature and persistence" section 1037 * of ATA/PI spec (volume 1, sect 5.14). 1038 * 1039 * LOCKING: 1040 * None. 1041 * 1042 * RETURNS: 1043 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1044 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1045 */ 1046 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1047 { 1048 /* Apple's open source Darwin code hints that some devices only 1049 * put a proper signature into the LBA mid/high registers, 1050 * So, we only check those. It's sufficient for uniqueness. 1051 * 1052 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1053 * signatures for ATA and ATAPI devices attached on SerialATA, 1054 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1055 * spec has never mentioned about using different signatures 1056 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1057 * Multiplier specification began to use 0x69/0x96 to identify 1058 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1059 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1060 * 0x69/0x96 shortly and described them as reserved for 1061 * SerialATA. 1062 * 1063 * We follow the current spec and consider that 0x69/0x96 1064 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1065 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1066 * SEMB signature. This is worked around in 1067 * ata_dev_read_id(). 1068 */ 1069 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1070 DPRINTK("found ATA device by sig\n"); 1071 return ATA_DEV_ATA; 1072 } 1073 1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1075 DPRINTK("found ATAPI device by sig\n"); 1076 return ATA_DEV_ATAPI; 1077 } 1078 1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1080 DPRINTK("found PMP device by sig\n"); 1081 return ATA_DEV_PMP; 1082 } 1083 1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1085 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1086 return ATA_DEV_SEMB; 1087 } 1088 1089 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1090 DPRINTK("found ZAC device by sig\n"); 1091 return ATA_DEV_ZAC; 1092 } 1093 1094 DPRINTK("unknown device\n"); 1095 return ATA_DEV_UNKNOWN; 1096 } 1097 1098 /** 1099 * ata_id_string - Convert IDENTIFY DEVICE page into string 1100 * @id: IDENTIFY DEVICE results we will examine 1101 * @s: string into which data is output 1102 * @ofs: offset into identify device page 1103 * @len: length of string to return. must be an even number. 1104 * 1105 * The strings in the IDENTIFY DEVICE page are broken up into 1106 * 16-bit chunks. Run through the string, and output each 1107 * 8-bit chunk linearly, regardless of platform. 1108 * 1109 * LOCKING: 1110 * caller. 1111 */ 1112 1113 void ata_id_string(const u16 *id, unsigned char *s, 1114 unsigned int ofs, unsigned int len) 1115 { 1116 unsigned int c; 1117 1118 BUG_ON(len & 1); 1119 1120 while (len > 0) { 1121 c = id[ofs] >> 8; 1122 *s = c; 1123 s++; 1124 1125 c = id[ofs] & 0xff; 1126 *s = c; 1127 s++; 1128 1129 ofs++; 1130 len -= 2; 1131 } 1132 } 1133 1134 /** 1135 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1136 * @id: IDENTIFY DEVICE results we will examine 1137 * @s: string into which data is output 1138 * @ofs: offset into identify device page 1139 * @len: length of string to return. must be an odd number. 1140 * 1141 * This function is identical to ata_id_string except that it 1142 * trims trailing spaces and terminates the resulting string with 1143 * null. @len must be actual maximum length (even number) + 1. 1144 * 1145 * LOCKING: 1146 * caller. 1147 */ 1148 void ata_id_c_string(const u16 *id, unsigned char *s, 1149 unsigned int ofs, unsigned int len) 1150 { 1151 unsigned char *p; 1152 1153 ata_id_string(id, s, ofs, len - 1); 1154 1155 p = s + strnlen(s, len - 1); 1156 while (p > s && p[-1] == ' ') 1157 p--; 1158 *p = '\0'; 1159 } 1160 1161 static u64 ata_id_n_sectors(const u16 *id) 1162 { 1163 if (ata_id_has_lba(id)) { 1164 if (ata_id_has_lba48(id)) 1165 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1166 else 1167 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1168 } else { 1169 if (ata_id_current_chs_valid(id)) 1170 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1171 id[ATA_ID_CUR_SECTORS]; 1172 else 1173 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1174 id[ATA_ID_SECTORS]; 1175 } 1176 } 1177 1178 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1179 { 1180 u64 sectors = 0; 1181 1182 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1183 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1184 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1185 sectors |= (tf->lbah & 0xff) << 16; 1186 sectors |= (tf->lbam & 0xff) << 8; 1187 sectors |= (tf->lbal & 0xff); 1188 1189 return sectors; 1190 } 1191 1192 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1193 { 1194 u64 sectors = 0; 1195 1196 sectors |= (tf->device & 0x0f) << 24; 1197 sectors |= (tf->lbah & 0xff) << 16; 1198 sectors |= (tf->lbam & 0xff) << 8; 1199 sectors |= (tf->lbal & 0xff); 1200 1201 return sectors; 1202 } 1203 1204 /** 1205 * ata_read_native_max_address - Read native max address 1206 * @dev: target device 1207 * @max_sectors: out parameter for the result native max address 1208 * 1209 * Perform an LBA48 or LBA28 native size query upon the device in 1210 * question. 1211 * 1212 * RETURNS: 1213 * 0 on success, -EACCES if command is aborted by the drive. 1214 * -EIO on other errors. 1215 */ 1216 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1217 { 1218 unsigned int err_mask; 1219 struct ata_taskfile tf; 1220 int lba48 = ata_id_has_lba48(dev->id); 1221 1222 ata_tf_init(dev, &tf); 1223 1224 /* always clear all address registers */ 1225 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1226 1227 if (lba48) { 1228 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1229 tf.flags |= ATA_TFLAG_LBA48; 1230 } else 1231 tf.command = ATA_CMD_READ_NATIVE_MAX; 1232 1233 tf.protocol = ATA_PROT_NODATA; 1234 tf.device |= ATA_LBA; 1235 1236 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1237 if (err_mask) { 1238 ata_dev_warn(dev, 1239 "failed to read native max address (err_mask=0x%x)\n", 1240 err_mask); 1241 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1242 return -EACCES; 1243 return -EIO; 1244 } 1245 1246 if (lba48) 1247 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1248 else 1249 *max_sectors = ata_tf_to_lba(&tf) + 1; 1250 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1251 (*max_sectors)--; 1252 return 0; 1253 } 1254 1255 /** 1256 * ata_set_max_sectors - Set max sectors 1257 * @dev: target device 1258 * @new_sectors: new max sectors value to set for the device 1259 * 1260 * Set max sectors of @dev to @new_sectors. 1261 * 1262 * RETURNS: 1263 * 0 on success, -EACCES if command is aborted or denied (due to 1264 * previous non-volatile SET_MAX) by the drive. -EIO on other 1265 * errors. 1266 */ 1267 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1268 { 1269 unsigned int err_mask; 1270 struct ata_taskfile tf; 1271 int lba48 = ata_id_has_lba48(dev->id); 1272 1273 new_sectors--; 1274 1275 ata_tf_init(dev, &tf); 1276 1277 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1278 1279 if (lba48) { 1280 tf.command = ATA_CMD_SET_MAX_EXT; 1281 tf.flags |= ATA_TFLAG_LBA48; 1282 1283 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1284 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1285 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1286 } else { 1287 tf.command = ATA_CMD_SET_MAX; 1288 1289 tf.device |= (new_sectors >> 24) & 0xf; 1290 } 1291 1292 tf.protocol = ATA_PROT_NODATA; 1293 tf.device |= ATA_LBA; 1294 1295 tf.lbal = (new_sectors >> 0) & 0xff; 1296 tf.lbam = (new_sectors >> 8) & 0xff; 1297 tf.lbah = (new_sectors >> 16) & 0xff; 1298 1299 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1300 if (err_mask) { 1301 ata_dev_warn(dev, 1302 "failed to set max address (err_mask=0x%x)\n", 1303 err_mask); 1304 if (err_mask == AC_ERR_DEV && 1305 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1306 return -EACCES; 1307 return -EIO; 1308 } 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * ata_hpa_resize - Resize a device with an HPA set 1315 * @dev: Device to resize 1316 * 1317 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1318 * it if required to the full size of the media. The caller must check 1319 * the drive has the HPA feature set enabled. 1320 * 1321 * RETURNS: 1322 * 0 on success, -errno on failure. 1323 */ 1324 static int ata_hpa_resize(struct ata_device *dev) 1325 { 1326 struct ata_eh_context *ehc = &dev->link->eh_context; 1327 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1328 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1329 u64 sectors = ata_id_n_sectors(dev->id); 1330 u64 native_sectors; 1331 int rc; 1332 1333 /* do we need to do it? */ 1334 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1335 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1336 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1337 return 0; 1338 1339 /* read native max address */ 1340 rc = ata_read_native_max_address(dev, &native_sectors); 1341 if (rc) { 1342 /* If device aborted the command or HPA isn't going to 1343 * be unlocked, skip HPA resizing. 1344 */ 1345 if (rc == -EACCES || !unlock_hpa) { 1346 ata_dev_warn(dev, 1347 "HPA support seems broken, skipping HPA handling\n"); 1348 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1349 1350 /* we can continue if device aborted the command */ 1351 if (rc == -EACCES) 1352 rc = 0; 1353 } 1354 1355 return rc; 1356 } 1357 dev->n_native_sectors = native_sectors; 1358 1359 /* nothing to do? */ 1360 if (native_sectors <= sectors || !unlock_hpa) { 1361 if (!print_info || native_sectors == sectors) 1362 return 0; 1363 1364 if (native_sectors > sectors) 1365 ata_dev_info(dev, 1366 "HPA detected: current %llu, native %llu\n", 1367 (unsigned long long)sectors, 1368 (unsigned long long)native_sectors); 1369 else if (native_sectors < sectors) 1370 ata_dev_warn(dev, 1371 "native sectors (%llu) is smaller than sectors (%llu)\n", 1372 (unsigned long long)native_sectors, 1373 (unsigned long long)sectors); 1374 return 0; 1375 } 1376 1377 /* let's unlock HPA */ 1378 rc = ata_set_max_sectors(dev, native_sectors); 1379 if (rc == -EACCES) { 1380 /* if device aborted the command, skip HPA resizing */ 1381 ata_dev_warn(dev, 1382 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1383 (unsigned long long)sectors, 1384 (unsigned long long)native_sectors); 1385 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1386 return 0; 1387 } else if (rc) 1388 return rc; 1389 1390 /* re-read IDENTIFY data */ 1391 rc = ata_dev_reread_id(dev, 0); 1392 if (rc) { 1393 ata_dev_err(dev, 1394 "failed to re-read IDENTIFY data after HPA resizing\n"); 1395 return rc; 1396 } 1397 1398 if (print_info) { 1399 u64 new_sectors = ata_id_n_sectors(dev->id); 1400 ata_dev_info(dev, 1401 "HPA unlocked: %llu -> %llu, native %llu\n", 1402 (unsigned long long)sectors, 1403 (unsigned long long)new_sectors, 1404 (unsigned long long)native_sectors); 1405 } 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * ata_dump_id - IDENTIFY DEVICE info debugging output 1412 * @id: IDENTIFY DEVICE page to dump 1413 * 1414 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1415 * page. 1416 * 1417 * LOCKING: 1418 * caller. 1419 */ 1420 1421 static inline void ata_dump_id(const u16 *id) 1422 { 1423 DPRINTK("49==0x%04x " 1424 "53==0x%04x " 1425 "63==0x%04x " 1426 "64==0x%04x " 1427 "75==0x%04x \n", 1428 id[49], 1429 id[53], 1430 id[63], 1431 id[64], 1432 id[75]); 1433 DPRINTK("80==0x%04x " 1434 "81==0x%04x " 1435 "82==0x%04x " 1436 "83==0x%04x " 1437 "84==0x%04x \n", 1438 id[80], 1439 id[81], 1440 id[82], 1441 id[83], 1442 id[84]); 1443 DPRINTK("88==0x%04x " 1444 "93==0x%04x\n", 1445 id[88], 1446 id[93]); 1447 } 1448 1449 /** 1450 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1451 * @id: IDENTIFY data to compute xfer mask from 1452 * 1453 * Compute the xfermask for this device. This is not as trivial 1454 * as it seems if we must consider early devices correctly. 1455 * 1456 * FIXME: pre IDE drive timing (do we care ?). 1457 * 1458 * LOCKING: 1459 * None. 1460 * 1461 * RETURNS: 1462 * Computed xfermask 1463 */ 1464 unsigned long ata_id_xfermask(const u16 *id) 1465 { 1466 unsigned long pio_mask, mwdma_mask, udma_mask; 1467 1468 /* Usual case. Word 53 indicates word 64 is valid */ 1469 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1470 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1471 pio_mask <<= 3; 1472 pio_mask |= 0x7; 1473 } else { 1474 /* If word 64 isn't valid then Word 51 high byte holds 1475 * the PIO timing number for the maximum. Turn it into 1476 * a mask. 1477 */ 1478 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1479 if (mode < 5) /* Valid PIO range */ 1480 pio_mask = (2 << mode) - 1; 1481 else 1482 pio_mask = 1; 1483 1484 /* But wait.. there's more. Design your standards by 1485 * committee and you too can get a free iordy field to 1486 * process. However its the speeds not the modes that 1487 * are supported... Note drivers using the timing API 1488 * will get this right anyway 1489 */ 1490 } 1491 1492 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1493 1494 if (ata_id_is_cfa(id)) { 1495 /* 1496 * Process compact flash extended modes 1497 */ 1498 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1499 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1500 1501 if (pio) 1502 pio_mask |= (1 << 5); 1503 if (pio > 1) 1504 pio_mask |= (1 << 6); 1505 if (dma) 1506 mwdma_mask |= (1 << 3); 1507 if (dma > 1) 1508 mwdma_mask |= (1 << 4); 1509 } 1510 1511 udma_mask = 0; 1512 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1513 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1514 1515 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1516 } 1517 1518 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1519 { 1520 struct completion *waiting = qc->private_data; 1521 1522 complete(waiting); 1523 } 1524 1525 /** 1526 * ata_exec_internal_sg - execute libata internal command 1527 * @dev: Device to which the command is sent 1528 * @tf: Taskfile registers for the command and the result 1529 * @cdb: CDB for packet command 1530 * @dma_dir: Data transfer direction of the command 1531 * @sgl: sg list for the data buffer of the command 1532 * @n_elem: Number of sg entries 1533 * @timeout: Timeout in msecs (0 for default) 1534 * 1535 * Executes libata internal command with timeout. @tf contains 1536 * command on entry and result on return. Timeout and error 1537 * conditions are reported via return value. No recovery action 1538 * is taken after a command times out. It's caller's duty to 1539 * clean up after timeout. 1540 * 1541 * LOCKING: 1542 * None. Should be called with kernel context, might sleep. 1543 * 1544 * RETURNS: 1545 * Zero on success, AC_ERR_* mask on failure 1546 */ 1547 unsigned ata_exec_internal_sg(struct ata_device *dev, 1548 struct ata_taskfile *tf, const u8 *cdb, 1549 int dma_dir, struct scatterlist *sgl, 1550 unsigned int n_elem, unsigned long timeout) 1551 { 1552 struct ata_link *link = dev->link; 1553 struct ata_port *ap = link->ap; 1554 u8 command = tf->command; 1555 int auto_timeout = 0; 1556 struct ata_queued_cmd *qc; 1557 unsigned int preempted_tag; 1558 u32 preempted_sactive; 1559 u64 preempted_qc_active; 1560 int preempted_nr_active_links; 1561 DECLARE_COMPLETION_ONSTACK(wait); 1562 unsigned long flags; 1563 unsigned int err_mask; 1564 int rc; 1565 1566 spin_lock_irqsave(ap->lock, flags); 1567 1568 /* no internal command while frozen */ 1569 if (ap->pflags & ATA_PFLAG_FROZEN) { 1570 spin_unlock_irqrestore(ap->lock, flags); 1571 return AC_ERR_SYSTEM; 1572 } 1573 1574 /* initialize internal qc */ 1575 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL); 1576 1577 qc->tag = ATA_TAG_INTERNAL; 1578 qc->hw_tag = 0; 1579 qc->scsicmd = NULL; 1580 qc->ap = ap; 1581 qc->dev = dev; 1582 ata_qc_reinit(qc); 1583 1584 preempted_tag = link->active_tag; 1585 preempted_sactive = link->sactive; 1586 preempted_qc_active = ap->qc_active; 1587 preempted_nr_active_links = ap->nr_active_links; 1588 link->active_tag = ATA_TAG_POISON; 1589 link->sactive = 0; 1590 ap->qc_active = 0; 1591 ap->nr_active_links = 0; 1592 1593 /* prepare & issue qc */ 1594 qc->tf = *tf; 1595 if (cdb) 1596 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1597 1598 /* some SATA bridges need us to indicate data xfer direction */ 1599 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1600 dma_dir == DMA_FROM_DEVICE) 1601 qc->tf.feature |= ATAPI_DMADIR; 1602 1603 qc->flags |= ATA_QCFLAG_RESULT_TF; 1604 qc->dma_dir = dma_dir; 1605 if (dma_dir != DMA_NONE) { 1606 unsigned int i, buflen = 0; 1607 struct scatterlist *sg; 1608 1609 for_each_sg(sgl, sg, n_elem, i) 1610 buflen += sg->length; 1611 1612 ata_sg_init(qc, sgl, n_elem); 1613 qc->nbytes = buflen; 1614 } 1615 1616 qc->private_data = &wait; 1617 qc->complete_fn = ata_qc_complete_internal; 1618 1619 ata_qc_issue(qc); 1620 1621 spin_unlock_irqrestore(ap->lock, flags); 1622 1623 if (!timeout) { 1624 if (ata_probe_timeout) 1625 timeout = ata_probe_timeout * 1000; 1626 else { 1627 timeout = ata_internal_cmd_timeout(dev, command); 1628 auto_timeout = 1; 1629 } 1630 } 1631 1632 if (ap->ops->error_handler) 1633 ata_eh_release(ap); 1634 1635 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1636 1637 if (ap->ops->error_handler) 1638 ata_eh_acquire(ap); 1639 1640 ata_sff_flush_pio_task(ap); 1641 1642 if (!rc) { 1643 spin_lock_irqsave(ap->lock, flags); 1644 1645 /* We're racing with irq here. If we lose, the 1646 * following test prevents us from completing the qc 1647 * twice. If we win, the port is frozen and will be 1648 * cleaned up by ->post_internal_cmd(). 1649 */ 1650 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1651 qc->err_mask |= AC_ERR_TIMEOUT; 1652 1653 if (ap->ops->error_handler) 1654 ata_port_freeze(ap); 1655 else 1656 ata_qc_complete(qc); 1657 1658 if (ata_msg_warn(ap)) 1659 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1660 command); 1661 } 1662 1663 spin_unlock_irqrestore(ap->lock, flags); 1664 } 1665 1666 /* do post_internal_cmd */ 1667 if (ap->ops->post_internal_cmd) 1668 ap->ops->post_internal_cmd(qc); 1669 1670 /* perform minimal error analysis */ 1671 if (qc->flags & ATA_QCFLAG_FAILED) { 1672 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1673 qc->err_mask |= AC_ERR_DEV; 1674 1675 if (!qc->err_mask) 1676 qc->err_mask |= AC_ERR_OTHER; 1677 1678 if (qc->err_mask & ~AC_ERR_OTHER) 1679 qc->err_mask &= ~AC_ERR_OTHER; 1680 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { 1681 qc->result_tf.command |= ATA_SENSE; 1682 } 1683 1684 /* finish up */ 1685 spin_lock_irqsave(ap->lock, flags); 1686 1687 *tf = qc->result_tf; 1688 err_mask = qc->err_mask; 1689 1690 ata_qc_free(qc); 1691 link->active_tag = preempted_tag; 1692 link->sactive = preempted_sactive; 1693 ap->qc_active = preempted_qc_active; 1694 ap->nr_active_links = preempted_nr_active_links; 1695 1696 spin_unlock_irqrestore(ap->lock, flags); 1697 1698 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1699 ata_internal_cmd_timed_out(dev, command); 1700 1701 return err_mask; 1702 } 1703 1704 /** 1705 * ata_exec_internal - execute libata internal command 1706 * @dev: Device to which the command is sent 1707 * @tf: Taskfile registers for the command and the result 1708 * @cdb: CDB for packet command 1709 * @dma_dir: Data transfer direction of the command 1710 * @buf: Data buffer of the command 1711 * @buflen: Length of data buffer 1712 * @timeout: Timeout in msecs (0 for default) 1713 * 1714 * Wrapper around ata_exec_internal_sg() which takes simple 1715 * buffer instead of sg list. 1716 * 1717 * LOCKING: 1718 * None. Should be called with kernel context, might sleep. 1719 * 1720 * RETURNS: 1721 * Zero on success, AC_ERR_* mask on failure 1722 */ 1723 unsigned ata_exec_internal(struct ata_device *dev, 1724 struct ata_taskfile *tf, const u8 *cdb, 1725 int dma_dir, void *buf, unsigned int buflen, 1726 unsigned long timeout) 1727 { 1728 struct scatterlist *psg = NULL, sg; 1729 unsigned int n_elem = 0; 1730 1731 if (dma_dir != DMA_NONE) { 1732 WARN_ON(!buf); 1733 sg_init_one(&sg, buf, buflen); 1734 psg = &sg; 1735 n_elem++; 1736 } 1737 1738 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1739 timeout); 1740 } 1741 1742 /** 1743 * ata_pio_need_iordy - check if iordy needed 1744 * @adev: ATA device 1745 * 1746 * Check if the current speed of the device requires IORDY. Used 1747 * by various controllers for chip configuration. 1748 */ 1749 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1750 { 1751 /* Don't set IORDY if we're preparing for reset. IORDY may 1752 * lead to controller lock up on certain controllers if the 1753 * port is not occupied. See bko#11703 for details. 1754 */ 1755 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1756 return 0; 1757 /* Controller doesn't support IORDY. Probably a pointless 1758 * check as the caller should know this. 1759 */ 1760 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1761 return 0; 1762 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1763 if (ata_id_is_cfa(adev->id) 1764 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1765 return 0; 1766 /* PIO3 and higher it is mandatory */ 1767 if (adev->pio_mode > XFER_PIO_2) 1768 return 1; 1769 /* We turn it on when possible */ 1770 if (ata_id_has_iordy(adev->id)) 1771 return 1; 1772 return 0; 1773 } 1774 1775 /** 1776 * ata_pio_mask_no_iordy - Return the non IORDY mask 1777 * @adev: ATA device 1778 * 1779 * Compute the highest mode possible if we are not using iordy. Return 1780 * -1 if no iordy mode is available. 1781 */ 1782 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1783 { 1784 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1785 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1786 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1787 /* Is the speed faster than the drive allows non IORDY ? */ 1788 if (pio) { 1789 /* This is cycle times not frequency - watch the logic! */ 1790 if (pio > 240) /* PIO2 is 240nS per cycle */ 1791 return 3 << ATA_SHIFT_PIO; 1792 return 7 << ATA_SHIFT_PIO; 1793 } 1794 } 1795 return 3 << ATA_SHIFT_PIO; 1796 } 1797 1798 /** 1799 * ata_do_dev_read_id - default ID read method 1800 * @dev: device 1801 * @tf: proposed taskfile 1802 * @id: data buffer 1803 * 1804 * Issue the identify taskfile and hand back the buffer containing 1805 * identify data. For some RAID controllers and for pre ATA devices 1806 * this function is wrapped or replaced by the driver 1807 */ 1808 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1809 struct ata_taskfile *tf, u16 *id) 1810 { 1811 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1812 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1813 } 1814 1815 /** 1816 * ata_dev_read_id - Read ID data from the specified device 1817 * @dev: target device 1818 * @p_class: pointer to class of the target device (may be changed) 1819 * @flags: ATA_READID_* flags 1820 * @id: buffer to read IDENTIFY data into 1821 * 1822 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1823 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1824 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1825 * for pre-ATA4 drives. 1826 * 1827 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1828 * now we abort if we hit that case. 1829 * 1830 * LOCKING: 1831 * Kernel thread context (may sleep) 1832 * 1833 * RETURNS: 1834 * 0 on success, -errno otherwise. 1835 */ 1836 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1837 unsigned int flags, u16 *id) 1838 { 1839 struct ata_port *ap = dev->link->ap; 1840 unsigned int class = *p_class; 1841 struct ata_taskfile tf; 1842 unsigned int err_mask = 0; 1843 const char *reason; 1844 bool is_semb = class == ATA_DEV_SEMB; 1845 int may_fallback = 1, tried_spinup = 0; 1846 int rc; 1847 1848 if (ata_msg_ctl(ap)) 1849 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1850 1851 retry: 1852 ata_tf_init(dev, &tf); 1853 1854 switch (class) { 1855 case ATA_DEV_SEMB: 1856 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1857 /* fall through */ 1858 case ATA_DEV_ATA: 1859 case ATA_DEV_ZAC: 1860 tf.command = ATA_CMD_ID_ATA; 1861 break; 1862 case ATA_DEV_ATAPI: 1863 tf.command = ATA_CMD_ID_ATAPI; 1864 break; 1865 default: 1866 rc = -ENODEV; 1867 reason = "unsupported class"; 1868 goto err_out; 1869 } 1870 1871 tf.protocol = ATA_PROT_PIO; 1872 1873 /* Some devices choke if TF registers contain garbage. Make 1874 * sure those are properly initialized. 1875 */ 1876 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1877 1878 /* Device presence detection is unreliable on some 1879 * controllers. Always poll IDENTIFY if available. 1880 */ 1881 tf.flags |= ATA_TFLAG_POLLING; 1882 1883 if (ap->ops->read_id) 1884 err_mask = ap->ops->read_id(dev, &tf, id); 1885 else 1886 err_mask = ata_do_dev_read_id(dev, &tf, id); 1887 1888 if (err_mask) { 1889 if (err_mask & AC_ERR_NODEV_HINT) { 1890 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1891 return -ENOENT; 1892 } 1893 1894 if (is_semb) { 1895 ata_dev_info(dev, 1896 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1897 /* SEMB is not supported yet */ 1898 *p_class = ATA_DEV_SEMB_UNSUP; 1899 return 0; 1900 } 1901 1902 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1903 /* Device or controller might have reported 1904 * the wrong device class. Give a shot at the 1905 * other IDENTIFY if the current one is 1906 * aborted by the device. 1907 */ 1908 if (may_fallback) { 1909 may_fallback = 0; 1910 1911 if (class == ATA_DEV_ATA) 1912 class = ATA_DEV_ATAPI; 1913 else 1914 class = ATA_DEV_ATA; 1915 goto retry; 1916 } 1917 1918 /* Control reaches here iff the device aborted 1919 * both flavors of IDENTIFYs which happens 1920 * sometimes with phantom devices. 1921 */ 1922 ata_dev_dbg(dev, 1923 "both IDENTIFYs aborted, assuming NODEV\n"); 1924 return -ENOENT; 1925 } 1926 1927 rc = -EIO; 1928 reason = "I/O error"; 1929 goto err_out; 1930 } 1931 1932 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1933 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1934 "class=%d may_fallback=%d tried_spinup=%d\n", 1935 class, may_fallback, tried_spinup); 1936 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1937 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1938 } 1939 1940 /* Falling back doesn't make sense if ID data was read 1941 * successfully at least once. 1942 */ 1943 may_fallback = 0; 1944 1945 swap_buf_le16(id, ATA_ID_WORDS); 1946 1947 /* sanity check */ 1948 rc = -EINVAL; 1949 reason = "device reports invalid type"; 1950 1951 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1952 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1953 goto err_out; 1954 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1955 ata_id_is_ata(id)) { 1956 ata_dev_dbg(dev, 1957 "host indicates ignore ATA devices, ignored\n"); 1958 return -ENOENT; 1959 } 1960 } else { 1961 if (ata_id_is_ata(id)) 1962 goto err_out; 1963 } 1964 1965 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1966 tried_spinup = 1; 1967 /* 1968 * Drive powered-up in standby mode, and requires a specific 1969 * SET_FEATURES spin-up subcommand before it will accept 1970 * anything other than the original IDENTIFY command. 1971 */ 1972 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1973 if (err_mask && id[2] != 0x738c) { 1974 rc = -EIO; 1975 reason = "SPINUP failed"; 1976 goto err_out; 1977 } 1978 /* 1979 * If the drive initially returned incomplete IDENTIFY info, 1980 * we now must reissue the IDENTIFY command. 1981 */ 1982 if (id[2] == 0x37c8) 1983 goto retry; 1984 } 1985 1986 if ((flags & ATA_READID_POSTRESET) && 1987 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 1988 /* 1989 * The exact sequence expected by certain pre-ATA4 drives is: 1990 * SRST RESET 1991 * IDENTIFY (optional in early ATA) 1992 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 1993 * anything else.. 1994 * Some drives were very specific about that exact sequence. 1995 * 1996 * Note that ATA4 says lba is mandatory so the second check 1997 * should never trigger. 1998 */ 1999 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2000 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2001 if (err_mask) { 2002 rc = -EIO; 2003 reason = "INIT_DEV_PARAMS failed"; 2004 goto err_out; 2005 } 2006 2007 /* current CHS translation info (id[53-58]) might be 2008 * changed. reread the identify device info. 2009 */ 2010 flags &= ~ATA_READID_POSTRESET; 2011 goto retry; 2012 } 2013 } 2014 2015 *p_class = class; 2016 2017 return 0; 2018 2019 err_out: 2020 if (ata_msg_warn(ap)) 2021 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2022 reason, err_mask); 2023 return rc; 2024 } 2025 2026 /** 2027 * ata_read_log_page - read a specific log page 2028 * @dev: target device 2029 * @log: log to read 2030 * @page: page to read 2031 * @buf: buffer to store read page 2032 * @sectors: number of sectors to read 2033 * 2034 * Read log page using READ_LOG_EXT command. 2035 * 2036 * LOCKING: 2037 * Kernel thread context (may sleep). 2038 * 2039 * RETURNS: 2040 * 0 on success, AC_ERR_* mask otherwise. 2041 */ 2042 unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 2043 u8 page, void *buf, unsigned int sectors) 2044 { 2045 unsigned long ap_flags = dev->link->ap->flags; 2046 struct ata_taskfile tf; 2047 unsigned int err_mask; 2048 bool dma = false; 2049 2050 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 2051 2052 /* 2053 * Return error without actually issuing the command on controllers 2054 * which e.g. lockup on a read log page. 2055 */ 2056 if (ap_flags & ATA_FLAG_NO_LOG_PAGE) 2057 return AC_ERR_DEV; 2058 2059 retry: 2060 ata_tf_init(dev, &tf); 2061 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 2062 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { 2063 tf.command = ATA_CMD_READ_LOG_DMA_EXT; 2064 tf.protocol = ATA_PROT_DMA; 2065 dma = true; 2066 } else { 2067 tf.command = ATA_CMD_READ_LOG_EXT; 2068 tf.protocol = ATA_PROT_PIO; 2069 dma = false; 2070 } 2071 tf.lbal = log; 2072 tf.lbam = page; 2073 tf.nsect = sectors; 2074 tf.hob_nsect = sectors >> 8; 2075 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 2076 2077 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 2078 buf, sectors * ATA_SECT_SIZE, 0); 2079 2080 if (err_mask && dma) { 2081 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; 2082 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n"); 2083 goto retry; 2084 } 2085 2086 DPRINTK("EXIT, err_mask=%x\n", err_mask); 2087 return err_mask; 2088 } 2089 2090 static bool ata_log_supported(struct ata_device *dev, u8 log) 2091 { 2092 struct ata_port *ap = dev->link->ap; 2093 2094 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) 2095 return false; 2096 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; 2097 } 2098 2099 static bool ata_identify_page_supported(struct ata_device *dev, u8 page) 2100 { 2101 struct ata_port *ap = dev->link->ap; 2102 unsigned int err, i; 2103 2104 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) { 2105 ata_dev_warn(dev, "ATA Identify Device Log not supported\n"); 2106 return false; 2107 } 2108 2109 /* 2110 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is 2111 * supported. 2112 */ 2113 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf, 2114 1); 2115 if (err) { 2116 ata_dev_info(dev, 2117 "failed to get Device Identify Log Emask 0x%x\n", 2118 err); 2119 return false; 2120 } 2121 2122 for (i = 0; i < ap->sector_buf[8]; i++) { 2123 if (ap->sector_buf[9 + i] == page) 2124 return true; 2125 } 2126 2127 return false; 2128 } 2129 2130 static int ata_do_link_spd_horkage(struct ata_device *dev) 2131 { 2132 struct ata_link *plink = ata_dev_phys_link(dev); 2133 u32 target, target_limit; 2134 2135 if (!sata_scr_valid(plink)) 2136 return 0; 2137 2138 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2139 target = 1; 2140 else 2141 return 0; 2142 2143 target_limit = (1 << target) - 1; 2144 2145 /* if already on stricter limit, no need to push further */ 2146 if (plink->sata_spd_limit <= target_limit) 2147 return 0; 2148 2149 plink->sata_spd_limit = target_limit; 2150 2151 /* Request another EH round by returning -EAGAIN if link is 2152 * going faster than the target speed. Forward progress is 2153 * guaranteed by setting sata_spd_limit to target_limit above. 2154 */ 2155 if (plink->sata_spd > target) { 2156 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2157 sata_spd_string(target)); 2158 return -EAGAIN; 2159 } 2160 return 0; 2161 } 2162 2163 static inline u8 ata_dev_knobble(struct ata_device *dev) 2164 { 2165 struct ata_port *ap = dev->link->ap; 2166 2167 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2168 return 0; 2169 2170 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2171 } 2172 2173 static void ata_dev_config_ncq_send_recv(struct ata_device *dev) 2174 { 2175 struct ata_port *ap = dev->link->ap; 2176 unsigned int err_mask; 2177 2178 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) { 2179 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n"); 2180 return; 2181 } 2182 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2183 0, ap->sector_buf, 1); 2184 if (err_mask) { 2185 ata_dev_dbg(dev, 2186 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2187 err_mask); 2188 } else { 2189 u8 *cmds = dev->ncq_send_recv_cmds; 2190 2191 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2192 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2193 2194 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2195 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2196 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2197 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2198 } 2199 } 2200 } 2201 2202 static void ata_dev_config_ncq_non_data(struct ata_device *dev) 2203 { 2204 struct ata_port *ap = dev->link->ap; 2205 unsigned int err_mask; 2206 2207 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) { 2208 ata_dev_warn(dev, 2209 "NCQ Send/Recv Log not supported\n"); 2210 return; 2211 } 2212 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, 2213 0, ap->sector_buf, 1); 2214 if (err_mask) { 2215 ata_dev_dbg(dev, 2216 "failed to get NCQ Non-Data Log Emask 0x%x\n", 2217 err_mask); 2218 } else { 2219 u8 *cmds = dev->ncq_non_data_cmds; 2220 2221 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); 2222 } 2223 } 2224 2225 static void ata_dev_config_ncq_prio(struct ata_device *dev) 2226 { 2227 struct ata_port *ap = dev->link->ap; 2228 unsigned int err_mask; 2229 2230 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) { 2231 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; 2232 return; 2233 } 2234 2235 err_mask = ata_read_log_page(dev, 2236 ATA_LOG_IDENTIFY_DEVICE, 2237 ATA_LOG_SATA_SETTINGS, 2238 ap->sector_buf, 2239 1); 2240 if (err_mask) { 2241 ata_dev_dbg(dev, 2242 "failed to get Identify Device data, Emask 0x%x\n", 2243 err_mask); 2244 return; 2245 } 2246 2247 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) { 2248 dev->flags |= ATA_DFLAG_NCQ_PRIO; 2249 } else { 2250 dev->flags &= ~ATA_DFLAG_NCQ_PRIO; 2251 ata_dev_dbg(dev, "SATA page does not support priority\n"); 2252 } 2253 2254 } 2255 2256 static int ata_dev_config_ncq(struct ata_device *dev, 2257 char *desc, size_t desc_sz) 2258 { 2259 struct ata_port *ap = dev->link->ap; 2260 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2261 unsigned int err_mask; 2262 char *aa_desc = ""; 2263 2264 if (!ata_id_has_ncq(dev->id)) { 2265 desc[0] = '\0'; 2266 return 0; 2267 } 2268 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2269 snprintf(desc, desc_sz, "NCQ (not used)"); 2270 return 0; 2271 } 2272 if (ap->flags & ATA_FLAG_NCQ) { 2273 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE); 2274 dev->flags |= ATA_DFLAG_NCQ; 2275 } 2276 2277 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2278 (ap->flags & ATA_FLAG_FPDMA_AA) && 2279 ata_id_has_fpdma_aa(dev->id)) { 2280 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2281 SATA_FPDMA_AA); 2282 if (err_mask) { 2283 ata_dev_err(dev, 2284 "failed to enable AA (error_mask=0x%x)\n", 2285 err_mask); 2286 if (err_mask != AC_ERR_DEV) { 2287 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2288 return -EIO; 2289 } 2290 } else 2291 aa_desc = ", AA"; 2292 } 2293 2294 if (hdepth >= ddepth) 2295 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2296 else 2297 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2298 ddepth, aa_desc); 2299 2300 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { 2301 if (ata_id_has_ncq_send_and_recv(dev->id)) 2302 ata_dev_config_ncq_send_recv(dev); 2303 if (ata_id_has_ncq_non_data(dev->id)) 2304 ata_dev_config_ncq_non_data(dev); 2305 if (ata_id_has_ncq_prio(dev->id)) 2306 ata_dev_config_ncq_prio(dev); 2307 } 2308 2309 return 0; 2310 } 2311 2312 static void ata_dev_config_sense_reporting(struct ata_device *dev) 2313 { 2314 unsigned int err_mask; 2315 2316 if (!ata_id_has_sense_reporting(dev->id)) 2317 return; 2318 2319 if (ata_id_sense_reporting_enabled(dev->id)) 2320 return; 2321 2322 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2323 if (err_mask) { 2324 ata_dev_dbg(dev, 2325 "failed to enable Sense Data Reporting, Emask 0x%x\n", 2326 err_mask); 2327 } 2328 } 2329 2330 static void ata_dev_config_zac(struct ata_device *dev) 2331 { 2332 struct ata_port *ap = dev->link->ap; 2333 unsigned int err_mask; 2334 u8 *identify_buf = ap->sector_buf; 2335 2336 dev->zac_zones_optimal_open = U32_MAX; 2337 dev->zac_zones_optimal_nonseq = U32_MAX; 2338 dev->zac_zones_max_open = U32_MAX; 2339 2340 /* 2341 * Always set the 'ZAC' flag for Host-managed devices. 2342 */ 2343 if (dev->class == ATA_DEV_ZAC) 2344 dev->flags |= ATA_DFLAG_ZAC; 2345 else if (ata_id_zoned_cap(dev->id) == 0x01) 2346 /* 2347 * Check for host-aware devices. 2348 */ 2349 dev->flags |= ATA_DFLAG_ZAC; 2350 2351 if (!(dev->flags & ATA_DFLAG_ZAC)) 2352 return; 2353 2354 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) { 2355 ata_dev_warn(dev, 2356 "ATA Zoned Information Log not supported\n"); 2357 return; 2358 } 2359 2360 /* 2361 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) 2362 */ 2363 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 2364 ATA_LOG_ZONED_INFORMATION, 2365 identify_buf, 1); 2366 if (!err_mask) { 2367 u64 zoned_cap, opt_open, opt_nonseq, max_open; 2368 2369 zoned_cap = get_unaligned_le64(&identify_buf[8]); 2370 if ((zoned_cap >> 63)) 2371 dev->zac_zoned_cap = (zoned_cap & 1); 2372 opt_open = get_unaligned_le64(&identify_buf[24]); 2373 if ((opt_open >> 63)) 2374 dev->zac_zones_optimal_open = (u32)opt_open; 2375 opt_nonseq = get_unaligned_le64(&identify_buf[32]); 2376 if ((opt_nonseq >> 63)) 2377 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; 2378 max_open = get_unaligned_le64(&identify_buf[40]); 2379 if ((max_open >> 63)) 2380 dev->zac_zones_max_open = (u32)max_open; 2381 } 2382 } 2383 2384 static void ata_dev_config_trusted(struct ata_device *dev) 2385 { 2386 struct ata_port *ap = dev->link->ap; 2387 u64 trusted_cap; 2388 unsigned int err; 2389 2390 if (!ata_id_has_trusted(dev->id)) 2391 return; 2392 2393 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { 2394 ata_dev_warn(dev, 2395 "Security Log not supported\n"); 2396 return; 2397 } 2398 2399 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY, 2400 ap->sector_buf, 1); 2401 if (err) { 2402 ata_dev_dbg(dev, 2403 "failed to read Security Log, Emask 0x%x\n", err); 2404 return; 2405 } 2406 2407 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]); 2408 if (!(trusted_cap & (1ULL << 63))) { 2409 ata_dev_dbg(dev, 2410 "Trusted Computing capability qword not valid!\n"); 2411 return; 2412 } 2413 2414 if (trusted_cap & (1 << 0)) 2415 dev->flags |= ATA_DFLAG_TRUSTED; 2416 } 2417 2418 /** 2419 * ata_dev_configure - Configure the specified ATA/ATAPI device 2420 * @dev: Target device to configure 2421 * 2422 * Configure @dev according to @dev->id. Generic and low-level 2423 * driver specific fixups are also applied. 2424 * 2425 * LOCKING: 2426 * Kernel thread context (may sleep) 2427 * 2428 * RETURNS: 2429 * 0 on success, -errno otherwise 2430 */ 2431 int ata_dev_configure(struct ata_device *dev) 2432 { 2433 struct ata_port *ap = dev->link->ap; 2434 struct ata_eh_context *ehc = &dev->link->eh_context; 2435 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2436 const u16 *id = dev->id; 2437 unsigned long xfer_mask; 2438 unsigned int err_mask; 2439 char revbuf[7]; /* XYZ-99\0 */ 2440 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2441 char modelbuf[ATA_ID_PROD_LEN+1]; 2442 int rc; 2443 2444 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2445 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2446 return 0; 2447 } 2448 2449 if (ata_msg_probe(ap)) 2450 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2451 2452 /* set horkage */ 2453 dev->horkage |= ata_dev_blacklisted(dev); 2454 ata_force_horkage(dev); 2455 2456 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2457 ata_dev_info(dev, "unsupported device, disabling\n"); 2458 ata_dev_disable(dev); 2459 return 0; 2460 } 2461 2462 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2463 dev->class == ATA_DEV_ATAPI) { 2464 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2465 atapi_enabled ? "not supported with this driver" 2466 : "disabled"); 2467 ata_dev_disable(dev); 2468 return 0; 2469 } 2470 2471 rc = ata_do_link_spd_horkage(dev); 2472 if (rc) 2473 return rc; 2474 2475 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2476 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2477 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2478 dev->horkage |= ATA_HORKAGE_NOLPM; 2479 2480 if (ap->flags & ATA_FLAG_NO_LPM) 2481 dev->horkage |= ATA_HORKAGE_NOLPM; 2482 2483 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2484 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2485 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2486 } 2487 2488 /* let ACPI work its magic */ 2489 rc = ata_acpi_on_devcfg(dev); 2490 if (rc) 2491 return rc; 2492 2493 /* massage HPA, do it early as it might change IDENTIFY data */ 2494 rc = ata_hpa_resize(dev); 2495 if (rc) 2496 return rc; 2497 2498 /* print device capabilities */ 2499 if (ata_msg_probe(ap)) 2500 ata_dev_dbg(dev, 2501 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2502 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2503 __func__, 2504 id[49], id[82], id[83], id[84], 2505 id[85], id[86], id[87], id[88]); 2506 2507 /* initialize to-be-configured parameters */ 2508 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2509 dev->max_sectors = 0; 2510 dev->cdb_len = 0; 2511 dev->n_sectors = 0; 2512 dev->cylinders = 0; 2513 dev->heads = 0; 2514 dev->sectors = 0; 2515 dev->multi_count = 0; 2516 2517 /* 2518 * common ATA, ATAPI feature tests 2519 */ 2520 2521 /* find max transfer mode; for printk only */ 2522 xfer_mask = ata_id_xfermask(id); 2523 2524 if (ata_msg_probe(ap)) 2525 ata_dump_id(id); 2526 2527 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2528 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2529 sizeof(fwrevbuf)); 2530 2531 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2532 sizeof(modelbuf)); 2533 2534 /* ATA-specific feature tests */ 2535 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2536 if (ata_id_is_cfa(id)) { 2537 /* CPRM may make this media unusable */ 2538 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2539 ata_dev_warn(dev, 2540 "supports DRM functions and may not be fully accessible\n"); 2541 snprintf(revbuf, 7, "CFA"); 2542 } else { 2543 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2544 /* Warn the user if the device has TPM extensions */ 2545 if (ata_id_has_tpm(id)) 2546 ata_dev_warn(dev, 2547 "supports DRM functions and may not be fully accessible\n"); 2548 } 2549 2550 dev->n_sectors = ata_id_n_sectors(id); 2551 2552 /* get current R/W Multiple count setting */ 2553 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2554 unsigned int max = dev->id[47] & 0xff; 2555 unsigned int cnt = dev->id[59] & 0xff; 2556 /* only recognize/allow powers of two here */ 2557 if (is_power_of_2(max) && is_power_of_2(cnt)) 2558 if (cnt <= max) 2559 dev->multi_count = cnt; 2560 } 2561 2562 if (ata_id_has_lba(id)) { 2563 const char *lba_desc; 2564 char ncq_desc[24]; 2565 2566 lba_desc = "LBA"; 2567 dev->flags |= ATA_DFLAG_LBA; 2568 if (ata_id_has_lba48(id)) { 2569 dev->flags |= ATA_DFLAG_LBA48; 2570 lba_desc = "LBA48"; 2571 2572 if (dev->n_sectors >= (1UL << 28) && 2573 ata_id_has_flush_ext(id)) 2574 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2575 } 2576 2577 /* config NCQ */ 2578 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2579 if (rc) 2580 return rc; 2581 2582 /* print device info to dmesg */ 2583 if (ata_msg_drv(ap) && print_info) { 2584 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2585 revbuf, modelbuf, fwrevbuf, 2586 ata_mode_string(xfer_mask)); 2587 ata_dev_info(dev, 2588 "%llu sectors, multi %u: %s %s\n", 2589 (unsigned long long)dev->n_sectors, 2590 dev->multi_count, lba_desc, ncq_desc); 2591 } 2592 } else { 2593 /* CHS */ 2594 2595 /* Default translation */ 2596 dev->cylinders = id[1]; 2597 dev->heads = id[3]; 2598 dev->sectors = id[6]; 2599 2600 if (ata_id_current_chs_valid(id)) { 2601 /* Current CHS translation is valid. */ 2602 dev->cylinders = id[54]; 2603 dev->heads = id[55]; 2604 dev->sectors = id[56]; 2605 } 2606 2607 /* print device info to dmesg */ 2608 if (ata_msg_drv(ap) && print_info) { 2609 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2610 revbuf, modelbuf, fwrevbuf, 2611 ata_mode_string(xfer_mask)); 2612 ata_dev_info(dev, 2613 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2614 (unsigned long long)dev->n_sectors, 2615 dev->multi_count, dev->cylinders, 2616 dev->heads, dev->sectors); 2617 } 2618 } 2619 2620 /* Check and mark DevSlp capability. Get DevSlp timing variables 2621 * from SATA Settings page of Identify Device Data Log. 2622 */ 2623 if (ata_id_has_devslp(dev->id)) { 2624 u8 *sata_setting = ap->sector_buf; 2625 int i, j; 2626 2627 dev->flags |= ATA_DFLAG_DEVSLP; 2628 err_mask = ata_read_log_page(dev, 2629 ATA_LOG_IDENTIFY_DEVICE, 2630 ATA_LOG_SATA_SETTINGS, 2631 sata_setting, 2632 1); 2633 if (err_mask) 2634 ata_dev_dbg(dev, 2635 "failed to get Identify Device Data, Emask 0x%x\n", 2636 err_mask); 2637 else 2638 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2639 j = ATA_LOG_DEVSLP_OFFSET + i; 2640 dev->devslp_timing[i] = sata_setting[j]; 2641 } 2642 } 2643 ata_dev_config_sense_reporting(dev); 2644 ata_dev_config_zac(dev); 2645 ata_dev_config_trusted(dev); 2646 dev->cdb_len = 32; 2647 } 2648 2649 /* ATAPI-specific feature tests */ 2650 else if (dev->class == ATA_DEV_ATAPI) { 2651 const char *cdb_intr_string = ""; 2652 const char *atapi_an_string = ""; 2653 const char *dma_dir_string = ""; 2654 u32 sntf; 2655 2656 rc = atapi_cdb_len(id); 2657 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2658 if (ata_msg_warn(ap)) 2659 ata_dev_warn(dev, "unsupported CDB len\n"); 2660 rc = -EINVAL; 2661 goto err_out_nosup; 2662 } 2663 dev->cdb_len = (unsigned int) rc; 2664 2665 /* Enable ATAPI AN if both the host and device have 2666 * the support. If PMP is attached, SNTF is required 2667 * to enable ATAPI AN to discern between PHY status 2668 * changed notifications and ATAPI ANs. 2669 */ 2670 if (atapi_an && 2671 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2672 (!sata_pmp_attached(ap) || 2673 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2674 /* issue SET feature command to turn this on */ 2675 err_mask = ata_dev_set_feature(dev, 2676 SETFEATURES_SATA_ENABLE, SATA_AN); 2677 if (err_mask) 2678 ata_dev_err(dev, 2679 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2680 err_mask); 2681 else { 2682 dev->flags |= ATA_DFLAG_AN; 2683 atapi_an_string = ", ATAPI AN"; 2684 } 2685 } 2686 2687 if (ata_id_cdb_intr(dev->id)) { 2688 dev->flags |= ATA_DFLAG_CDB_INTR; 2689 cdb_intr_string = ", CDB intr"; 2690 } 2691 2692 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2693 dev->flags |= ATA_DFLAG_DMADIR; 2694 dma_dir_string = ", DMADIR"; 2695 } 2696 2697 if (ata_id_has_da(dev->id)) { 2698 dev->flags |= ATA_DFLAG_DA; 2699 zpodd_init(dev); 2700 } 2701 2702 /* print device info to dmesg */ 2703 if (ata_msg_drv(ap) && print_info) 2704 ata_dev_info(dev, 2705 "ATAPI: %s, %s, max %s%s%s%s\n", 2706 modelbuf, fwrevbuf, 2707 ata_mode_string(xfer_mask), 2708 cdb_intr_string, atapi_an_string, 2709 dma_dir_string); 2710 } 2711 2712 /* determine max_sectors */ 2713 dev->max_sectors = ATA_MAX_SECTORS; 2714 if (dev->flags & ATA_DFLAG_LBA48) 2715 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2716 2717 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2718 200 sectors */ 2719 if (ata_dev_knobble(dev)) { 2720 if (ata_msg_drv(ap) && print_info) 2721 ata_dev_info(dev, "applying bridge limits\n"); 2722 dev->udma_mask &= ATA_UDMA5; 2723 dev->max_sectors = ATA_MAX_SECTORS; 2724 } 2725 2726 if ((dev->class == ATA_DEV_ATAPI) && 2727 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2728 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2729 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2730 } 2731 2732 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2733 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2734 dev->max_sectors); 2735 2736 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2737 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2738 dev->max_sectors); 2739 2740 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2741 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2742 2743 if (ap->ops->dev_config) 2744 ap->ops->dev_config(dev); 2745 2746 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2747 /* Let the user know. We don't want to disallow opens for 2748 rescue purposes, or in case the vendor is just a blithering 2749 idiot. Do this after the dev_config call as some controllers 2750 with buggy firmware may want to avoid reporting false device 2751 bugs */ 2752 2753 if (print_info) { 2754 ata_dev_warn(dev, 2755 "Drive reports diagnostics failure. This may indicate a drive\n"); 2756 ata_dev_warn(dev, 2757 "fault or invalid emulation. Contact drive vendor for information.\n"); 2758 } 2759 } 2760 2761 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2762 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2763 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2764 } 2765 2766 return 0; 2767 2768 err_out_nosup: 2769 if (ata_msg_probe(ap)) 2770 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2771 return rc; 2772 } 2773 2774 /** 2775 * ata_cable_40wire - return 40 wire cable type 2776 * @ap: port 2777 * 2778 * Helper method for drivers which want to hardwire 40 wire cable 2779 * detection. 2780 */ 2781 2782 int ata_cable_40wire(struct ata_port *ap) 2783 { 2784 return ATA_CBL_PATA40; 2785 } 2786 2787 /** 2788 * ata_cable_80wire - return 80 wire cable type 2789 * @ap: port 2790 * 2791 * Helper method for drivers which want to hardwire 80 wire cable 2792 * detection. 2793 */ 2794 2795 int ata_cable_80wire(struct ata_port *ap) 2796 { 2797 return ATA_CBL_PATA80; 2798 } 2799 2800 /** 2801 * ata_cable_unknown - return unknown PATA cable. 2802 * @ap: port 2803 * 2804 * Helper method for drivers which have no PATA cable detection. 2805 */ 2806 2807 int ata_cable_unknown(struct ata_port *ap) 2808 { 2809 return ATA_CBL_PATA_UNK; 2810 } 2811 2812 /** 2813 * ata_cable_ignore - return ignored PATA cable. 2814 * @ap: port 2815 * 2816 * Helper method for drivers which don't use cable type to limit 2817 * transfer mode. 2818 */ 2819 int ata_cable_ignore(struct ata_port *ap) 2820 { 2821 return ATA_CBL_PATA_IGN; 2822 } 2823 2824 /** 2825 * ata_cable_sata - return SATA cable type 2826 * @ap: port 2827 * 2828 * Helper method for drivers which have SATA cables 2829 */ 2830 2831 int ata_cable_sata(struct ata_port *ap) 2832 { 2833 return ATA_CBL_SATA; 2834 } 2835 2836 /** 2837 * ata_bus_probe - Reset and probe ATA bus 2838 * @ap: Bus to probe 2839 * 2840 * Master ATA bus probing function. Initiates a hardware-dependent 2841 * bus reset, then attempts to identify any devices found on 2842 * the bus. 2843 * 2844 * LOCKING: 2845 * PCI/etc. bus probe sem. 2846 * 2847 * RETURNS: 2848 * Zero on success, negative errno otherwise. 2849 */ 2850 2851 int ata_bus_probe(struct ata_port *ap) 2852 { 2853 unsigned int classes[ATA_MAX_DEVICES]; 2854 int tries[ATA_MAX_DEVICES]; 2855 int rc; 2856 struct ata_device *dev; 2857 2858 ata_for_each_dev(dev, &ap->link, ALL) 2859 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2860 2861 retry: 2862 ata_for_each_dev(dev, &ap->link, ALL) { 2863 /* If we issue an SRST then an ATA drive (not ATAPI) 2864 * may change configuration and be in PIO0 timing. If 2865 * we do a hard reset (or are coming from power on) 2866 * this is true for ATA or ATAPI. Until we've set a 2867 * suitable controller mode we should not touch the 2868 * bus as we may be talking too fast. 2869 */ 2870 dev->pio_mode = XFER_PIO_0; 2871 dev->dma_mode = 0xff; 2872 2873 /* If the controller has a pio mode setup function 2874 * then use it to set the chipset to rights. Don't 2875 * touch the DMA setup as that will be dealt with when 2876 * configuring devices. 2877 */ 2878 if (ap->ops->set_piomode) 2879 ap->ops->set_piomode(ap, dev); 2880 } 2881 2882 /* reset and determine device classes */ 2883 ap->ops->phy_reset(ap); 2884 2885 ata_for_each_dev(dev, &ap->link, ALL) { 2886 if (dev->class != ATA_DEV_UNKNOWN) 2887 classes[dev->devno] = dev->class; 2888 else 2889 classes[dev->devno] = ATA_DEV_NONE; 2890 2891 dev->class = ATA_DEV_UNKNOWN; 2892 } 2893 2894 /* read IDENTIFY page and configure devices. We have to do the identify 2895 specific sequence bass-ackwards so that PDIAG- is released by 2896 the slave device */ 2897 2898 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2899 if (tries[dev->devno]) 2900 dev->class = classes[dev->devno]; 2901 2902 if (!ata_dev_enabled(dev)) 2903 continue; 2904 2905 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2906 dev->id); 2907 if (rc) 2908 goto fail; 2909 } 2910 2911 /* Now ask for the cable type as PDIAG- should have been released */ 2912 if (ap->ops->cable_detect) 2913 ap->cbl = ap->ops->cable_detect(ap); 2914 2915 /* We may have SATA bridge glue hiding here irrespective of 2916 * the reported cable types and sensed types. When SATA 2917 * drives indicate we have a bridge, we don't know which end 2918 * of the link the bridge is which is a problem. 2919 */ 2920 ata_for_each_dev(dev, &ap->link, ENABLED) 2921 if (ata_id_is_sata(dev->id)) 2922 ap->cbl = ATA_CBL_SATA; 2923 2924 /* After the identify sequence we can now set up the devices. We do 2925 this in the normal order so that the user doesn't get confused */ 2926 2927 ata_for_each_dev(dev, &ap->link, ENABLED) { 2928 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2929 rc = ata_dev_configure(dev); 2930 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2931 if (rc) 2932 goto fail; 2933 } 2934 2935 /* configure transfer mode */ 2936 rc = ata_set_mode(&ap->link, &dev); 2937 if (rc) 2938 goto fail; 2939 2940 ata_for_each_dev(dev, &ap->link, ENABLED) 2941 return 0; 2942 2943 return -ENODEV; 2944 2945 fail: 2946 tries[dev->devno]--; 2947 2948 switch (rc) { 2949 case -EINVAL: 2950 /* eeek, something went very wrong, give up */ 2951 tries[dev->devno] = 0; 2952 break; 2953 2954 case -ENODEV: 2955 /* give it just one more chance */ 2956 tries[dev->devno] = min(tries[dev->devno], 1); 2957 /* fall through */ 2958 case -EIO: 2959 if (tries[dev->devno] == 1) { 2960 /* This is the last chance, better to slow 2961 * down than lose it. 2962 */ 2963 sata_down_spd_limit(&ap->link, 0); 2964 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2965 } 2966 } 2967 2968 if (!tries[dev->devno]) 2969 ata_dev_disable(dev); 2970 2971 goto retry; 2972 } 2973 2974 /** 2975 * sata_print_link_status - Print SATA link status 2976 * @link: SATA link to printk link status about 2977 * 2978 * This function prints link speed and status of a SATA link. 2979 * 2980 * LOCKING: 2981 * None. 2982 */ 2983 static void sata_print_link_status(struct ata_link *link) 2984 { 2985 u32 sstatus, scontrol, tmp; 2986 2987 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2988 return; 2989 sata_scr_read(link, SCR_CONTROL, &scontrol); 2990 2991 if (ata_phys_link_online(link)) { 2992 tmp = (sstatus >> 4) & 0xf; 2993 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2994 sata_spd_string(tmp), sstatus, scontrol); 2995 } else { 2996 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2997 sstatus, scontrol); 2998 } 2999 } 3000 3001 /** 3002 * ata_dev_pair - return other device on cable 3003 * @adev: device 3004 * 3005 * Obtain the other device on the same cable, or if none is 3006 * present NULL is returned 3007 */ 3008 3009 struct ata_device *ata_dev_pair(struct ata_device *adev) 3010 { 3011 struct ata_link *link = adev->link; 3012 struct ata_device *pair = &link->device[1 - adev->devno]; 3013 if (!ata_dev_enabled(pair)) 3014 return NULL; 3015 return pair; 3016 } 3017 3018 /** 3019 * sata_down_spd_limit - adjust SATA spd limit downward 3020 * @link: Link to adjust SATA spd limit for 3021 * @spd_limit: Additional limit 3022 * 3023 * Adjust SATA spd limit of @link downward. Note that this 3024 * function only adjusts the limit. The change must be applied 3025 * using sata_set_spd(). 3026 * 3027 * If @spd_limit is non-zero, the speed is limited to equal to or 3028 * lower than @spd_limit if such speed is supported. If 3029 * @spd_limit is slower than any supported speed, only the lowest 3030 * supported speed is allowed. 3031 * 3032 * LOCKING: 3033 * Inherited from caller. 3034 * 3035 * RETURNS: 3036 * 0 on success, negative errno on failure 3037 */ 3038 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 3039 { 3040 u32 sstatus, spd, mask; 3041 int rc, bit; 3042 3043 if (!sata_scr_valid(link)) 3044 return -EOPNOTSUPP; 3045 3046 /* If SCR can be read, use it to determine the current SPD. 3047 * If not, use cached value in link->sata_spd. 3048 */ 3049 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 3050 if (rc == 0 && ata_sstatus_online(sstatus)) 3051 spd = (sstatus >> 4) & 0xf; 3052 else 3053 spd = link->sata_spd; 3054 3055 mask = link->sata_spd_limit; 3056 if (mask <= 1) 3057 return -EINVAL; 3058 3059 /* unconditionally mask off the highest bit */ 3060 bit = fls(mask) - 1; 3061 mask &= ~(1 << bit); 3062 3063 /* 3064 * Mask off all speeds higher than or equal to the current one. At 3065 * this point, if current SPD is not available and we previously 3066 * recorded the link speed from SStatus, the driver has already 3067 * masked off the highest bit so mask should already be 1 or 0. 3068 * Otherwise, we should not force 1.5Gbps on a link where we have 3069 * not previously recorded speed from SStatus. Just return in this 3070 * case. 3071 */ 3072 if (spd > 1) 3073 mask &= (1 << (spd - 1)) - 1; 3074 else 3075 return -EINVAL; 3076 3077 /* were we already at the bottom? */ 3078 if (!mask) 3079 return -EINVAL; 3080 3081 if (spd_limit) { 3082 if (mask & ((1 << spd_limit) - 1)) 3083 mask &= (1 << spd_limit) - 1; 3084 else { 3085 bit = ffs(mask) - 1; 3086 mask = 1 << bit; 3087 } 3088 } 3089 3090 link->sata_spd_limit = mask; 3091 3092 ata_link_warn(link, "limiting SATA link speed to %s\n", 3093 sata_spd_string(fls(mask))); 3094 3095 return 0; 3096 } 3097 3098 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 3099 { 3100 struct ata_link *host_link = &link->ap->link; 3101 u32 limit, target, spd; 3102 3103 limit = link->sata_spd_limit; 3104 3105 /* Don't configure downstream link faster than upstream link. 3106 * It doesn't speed up anything and some PMPs choke on such 3107 * configuration. 3108 */ 3109 if (!ata_is_host_link(link) && host_link->sata_spd) 3110 limit &= (1 << host_link->sata_spd) - 1; 3111 3112 if (limit == UINT_MAX) 3113 target = 0; 3114 else 3115 target = fls(limit); 3116 3117 spd = (*scontrol >> 4) & 0xf; 3118 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3119 3120 return spd != target; 3121 } 3122 3123 /** 3124 * sata_set_spd_needed - is SATA spd configuration needed 3125 * @link: Link in question 3126 * 3127 * Test whether the spd limit in SControl matches 3128 * @link->sata_spd_limit. This function is used to determine 3129 * whether hardreset is necessary to apply SATA spd 3130 * configuration. 3131 * 3132 * LOCKING: 3133 * Inherited from caller. 3134 * 3135 * RETURNS: 3136 * 1 if SATA spd configuration is needed, 0 otherwise. 3137 */ 3138 static int sata_set_spd_needed(struct ata_link *link) 3139 { 3140 u32 scontrol; 3141 3142 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3143 return 1; 3144 3145 return __sata_set_spd_needed(link, &scontrol); 3146 } 3147 3148 /** 3149 * sata_set_spd - set SATA spd according to spd limit 3150 * @link: Link to set SATA spd for 3151 * 3152 * Set SATA spd of @link according to sata_spd_limit. 3153 * 3154 * LOCKING: 3155 * Inherited from caller. 3156 * 3157 * RETURNS: 3158 * 0 if spd doesn't need to be changed, 1 if spd has been 3159 * changed. Negative errno if SCR registers are inaccessible. 3160 */ 3161 int sata_set_spd(struct ata_link *link) 3162 { 3163 u32 scontrol; 3164 int rc; 3165 3166 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3167 return rc; 3168 3169 if (!__sata_set_spd_needed(link, &scontrol)) 3170 return 0; 3171 3172 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3173 return rc; 3174 3175 return 1; 3176 } 3177 3178 /* 3179 * This mode timing computation functionality is ported over from 3180 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3181 */ 3182 /* 3183 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3184 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3185 * for UDMA6, which is currently supported only by Maxtor drives. 3186 * 3187 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3188 */ 3189 3190 static const struct ata_timing ata_timing[] = { 3191 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3192 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3193 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3194 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3195 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3196 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3197 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3198 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3199 3200 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3201 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3202 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3203 3204 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3205 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3206 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3207 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3208 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3209 3210 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3211 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3212 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3213 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3214 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3215 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3216 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3217 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3218 3219 { 0xFF } 3220 }; 3221 3222 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3223 #define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0) 3224 3225 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3226 { 3227 q->setup = EZ(t->setup, T); 3228 q->act8b = EZ(t->act8b, T); 3229 q->rec8b = EZ(t->rec8b, T); 3230 q->cyc8b = EZ(t->cyc8b, T); 3231 q->active = EZ(t->active, T); 3232 q->recover = EZ(t->recover, T); 3233 q->dmack_hold = EZ(t->dmack_hold, T); 3234 q->cycle = EZ(t->cycle, T); 3235 q->udma = EZ(t->udma, UT); 3236 } 3237 3238 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3239 struct ata_timing *m, unsigned int what) 3240 { 3241 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3242 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3243 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3244 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3245 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3246 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3247 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3248 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3249 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3250 } 3251 3252 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3253 { 3254 const struct ata_timing *t = ata_timing; 3255 3256 while (xfer_mode > t->mode) 3257 t++; 3258 3259 if (xfer_mode == t->mode) 3260 return t; 3261 3262 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 3263 __func__, xfer_mode); 3264 3265 return NULL; 3266 } 3267 3268 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3269 struct ata_timing *t, int T, int UT) 3270 { 3271 const u16 *id = adev->id; 3272 const struct ata_timing *s; 3273 struct ata_timing p; 3274 3275 /* 3276 * Find the mode. 3277 */ 3278 3279 if (!(s = ata_timing_find_mode(speed))) 3280 return -EINVAL; 3281 3282 memcpy(t, s, sizeof(*s)); 3283 3284 /* 3285 * If the drive is an EIDE drive, it can tell us it needs extended 3286 * PIO/MW_DMA cycle timing. 3287 */ 3288 3289 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3290 memset(&p, 0, sizeof(p)); 3291 3292 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3293 if (speed <= XFER_PIO_2) 3294 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3295 else if ((speed <= XFER_PIO_4) || 3296 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3297 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3298 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3299 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3300 3301 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3302 } 3303 3304 /* 3305 * Convert the timing to bus clock counts. 3306 */ 3307 3308 ata_timing_quantize(t, t, T, UT); 3309 3310 /* 3311 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3312 * S.M.A.R.T * and some other commands. We have to ensure that the 3313 * DMA cycle timing is slower/equal than the fastest PIO timing. 3314 */ 3315 3316 if (speed > XFER_PIO_6) { 3317 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3318 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3319 } 3320 3321 /* 3322 * Lengthen active & recovery time so that cycle time is correct. 3323 */ 3324 3325 if (t->act8b + t->rec8b < t->cyc8b) { 3326 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3327 t->rec8b = t->cyc8b - t->act8b; 3328 } 3329 3330 if (t->active + t->recover < t->cycle) { 3331 t->active += (t->cycle - (t->active + t->recover)) / 2; 3332 t->recover = t->cycle - t->active; 3333 } 3334 3335 /* In a few cases quantisation may produce enough errors to 3336 leave t->cycle too low for the sum of active and recovery 3337 if so we must correct this */ 3338 if (t->active + t->recover > t->cycle) 3339 t->cycle = t->active + t->recover; 3340 3341 return 0; 3342 } 3343 3344 /** 3345 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3346 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3347 * @cycle: cycle duration in ns 3348 * 3349 * Return matching xfer mode for @cycle. The returned mode is of 3350 * the transfer type specified by @xfer_shift. If @cycle is too 3351 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3352 * than the fastest known mode, the fasted mode is returned. 3353 * 3354 * LOCKING: 3355 * None. 3356 * 3357 * RETURNS: 3358 * Matching xfer_mode, 0xff if no match found. 3359 */ 3360 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3361 { 3362 u8 base_mode = 0xff, last_mode = 0xff; 3363 const struct ata_xfer_ent *ent; 3364 const struct ata_timing *t; 3365 3366 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3367 if (ent->shift == xfer_shift) 3368 base_mode = ent->base; 3369 3370 for (t = ata_timing_find_mode(base_mode); 3371 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3372 unsigned short this_cycle; 3373 3374 switch (xfer_shift) { 3375 case ATA_SHIFT_PIO: 3376 case ATA_SHIFT_MWDMA: 3377 this_cycle = t->cycle; 3378 break; 3379 case ATA_SHIFT_UDMA: 3380 this_cycle = t->udma; 3381 break; 3382 default: 3383 return 0xff; 3384 } 3385 3386 if (cycle > this_cycle) 3387 break; 3388 3389 last_mode = t->mode; 3390 } 3391 3392 return last_mode; 3393 } 3394 3395 /** 3396 * ata_down_xfermask_limit - adjust dev xfer masks downward 3397 * @dev: Device to adjust xfer masks 3398 * @sel: ATA_DNXFER_* selector 3399 * 3400 * Adjust xfer masks of @dev downward. Note that this function 3401 * does not apply the change. Invoking ata_set_mode() afterwards 3402 * will apply the limit. 3403 * 3404 * LOCKING: 3405 * Inherited from caller. 3406 * 3407 * RETURNS: 3408 * 0 on success, negative errno on failure 3409 */ 3410 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3411 { 3412 char buf[32]; 3413 unsigned long orig_mask, xfer_mask; 3414 unsigned long pio_mask, mwdma_mask, udma_mask; 3415 int quiet, highbit; 3416 3417 quiet = !!(sel & ATA_DNXFER_QUIET); 3418 sel &= ~ATA_DNXFER_QUIET; 3419 3420 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3421 dev->mwdma_mask, 3422 dev->udma_mask); 3423 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3424 3425 switch (sel) { 3426 case ATA_DNXFER_PIO: 3427 highbit = fls(pio_mask) - 1; 3428 pio_mask &= ~(1 << highbit); 3429 break; 3430 3431 case ATA_DNXFER_DMA: 3432 if (udma_mask) { 3433 highbit = fls(udma_mask) - 1; 3434 udma_mask &= ~(1 << highbit); 3435 if (!udma_mask) 3436 return -ENOENT; 3437 } else if (mwdma_mask) { 3438 highbit = fls(mwdma_mask) - 1; 3439 mwdma_mask &= ~(1 << highbit); 3440 if (!mwdma_mask) 3441 return -ENOENT; 3442 } 3443 break; 3444 3445 case ATA_DNXFER_40C: 3446 udma_mask &= ATA_UDMA_MASK_40C; 3447 break; 3448 3449 case ATA_DNXFER_FORCE_PIO0: 3450 pio_mask &= 1; 3451 /* fall through */ 3452 case ATA_DNXFER_FORCE_PIO: 3453 mwdma_mask = 0; 3454 udma_mask = 0; 3455 break; 3456 3457 default: 3458 BUG(); 3459 } 3460 3461 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3462 3463 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3464 return -ENOENT; 3465 3466 if (!quiet) { 3467 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3468 snprintf(buf, sizeof(buf), "%s:%s", 3469 ata_mode_string(xfer_mask), 3470 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3471 else 3472 snprintf(buf, sizeof(buf), "%s", 3473 ata_mode_string(xfer_mask)); 3474 3475 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3476 } 3477 3478 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3479 &dev->udma_mask); 3480 3481 return 0; 3482 } 3483 3484 static int ata_dev_set_mode(struct ata_device *dev) 3485 { 3486 struct ata_port *ap = dev->link->ap; 3487 struct ata_eh_context *ehc = &dev->link->eh_context; 3488 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3489 const char *dev_err_whine = ""; 3490 int ign_dev_err = 0; 3491 unsigned int err_mask = 0; 3492 int rc; 3493 3494 dev->flags &= ~ATA_DFLAG_PIO; 3495 if (dev->xfer_shift == ATA_SHIFT_PIO) 3496 dev->flags |= ATA_DFLAG_PIO; 3497 3498 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3499 dev_err_whine = " (SET_XFERMODE skipped)"; 3500 else { 3501 if (nosetxfer) 3502 ata_dev_warn(dev, 3503 "NOSETXFER but PATA detected - can't " 3504 "skip SETXFER, might malfunction\n"); 3505 err_mask = ata_dev_set_xfermode(dev); 3506 } 3507 3508 if (err_mask & ~AC_ERR_DEV) 3509 goto fail; 3510 3511 /* revalidate */ 3512 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3513 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3514 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3515 if (rc) 3516 return rc; 3517 3518 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3519 /* Old CFA may refuse this command, which is just fine */ 3520 if (ata_id_is_cfa(dev->id)) 3521 ign_dev_err = 1; 3522 /* Catch several broken garbage emulations plus some pre 3523 ATA devices */ 3524 if (ata_id_major_version(dev->id) == 0 && 3525 dev->pio_mode <= XFER_PIO_2) 3526 ign_dev_err = 1; 3527 /* Some very old devices and some bad newer ones fail 3528 any kind of SET_XFERMODE request but support PIO0-2 3529 timings and no IORDY */ 3530 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3531 ign_dev_err = 1; 3532 } 3533 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3534 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3535 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3536 dev->dma_mode == XFER_MW_DMA_0 && 3537 (dev->id[63] >> 8) & 1) 3538 ign_dev_err = 1; 3539 3540 /* if the device is actually configured correctly, ignore dev err */ 3541 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3542 ign_dev_err = 1; 3543 3544 if (err_mask & AC_ERR_DEV) { 3545 if (!ign_dev_err) 3546 goto fail; 3547 else 3548 dev_err_whine = " (device error ignored)"; 3549 } 3550 3551 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3552 dev->xfer_shift, (int)dev->xfer_mode); 3553 3554 if (!(ehc->i.flags & ATA_EHI_QUIET) || 3555 ehc->i.flags & ATA_EHI_DID_HARDRESET) 3556 ata_dev_info(dev, "configured for %s%s\n", 3557 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3558 dev_err_whine); 3559 3560 return 0; 3561 3562 fail: 3563 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3564 return -EIO; 3565 } 3566 3567 /** 3568 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3569 * @link: link on which timings will be programmed 3570 * @r_failed_dev: out parameter for failed device 3571 * 3572 * Standard implementation of the function used to tune and set 3573 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3574 * ata_dev_set_mode() fails, pointer to the failing device is 3575 * returned in @r_failed_dev. 3576 * 3577 * LOCKING: 3578 * PCI/etc. bus probe sem. 3579 * 3580 * RETURNS: 3581 * 0 on success, negative errno otherwise 3582 */ 3583 3584 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3585 { 3586 struct ata_port *ap = link->ap; 3587 struct ata_device *dev; 3588 int rc = 0, used_dma = 0, found = 0; 3589 3590 /* step 1: calculate xfer_mask */ 3591 ata_for_each_dev(dev, link, ENABLED) { 3592 unsigned long pio_mask, dma_mask; 3593 unsigned int mode_mask; 3594 3595 mode_mask = ATA_DMA_MASK_ATA; 3596 if (dev->class == ATA_DEV_ATAPI) 3597 mode_mask = ATA_DMA_MASK_ATAPI; 3598 else if (ata_id_is_cfa(dev->id)) 3599 mode_mask = ATA_DMA_MASK_CFA; 3600 3601 ata_dev_xfermask(dev); 3602 ata_force_xfermask(dev); 3603 3604 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3605 3606 if (libata_dma_mask & mode_mask) 3607 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3608 dev->udma_mask); 3609 else 3610 dma_mask = 0; 3611 3612 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3613 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3614 3615 found = 1; 3616 if (ata_dma_enabled(dev)) 3617 used_dma = 1; 3618 } 3619 if (!found) 3620 goto out; 3621 3622 /* step 2: always set host PIO timings */ 3623 ata_for_each_dev(dev, link, ENABLED) { 3624 if (dev->pio_mode == 0xff) { 3625 ata_dev_warn(dev, "no PIO support\n"); 3626 rc = -EINVAL; 3627 goto out; 3628 } 3629 3630 dev->xfer_mode = dev->pio_mode; 3631 dev->xfer_shift = ATA_SHIFT_PIO; 3632 if (ap->ops->set_piomode) 3633 ap->ops->set_piomode(ap, dev); 3634 } 3635 3636 /* step 3: set host DMA timings */ 3637 ata_for_each_dev(dev, link, ENABLED) { 3638 if (!ata_dma_enabled(dev)) 3639 continue; 3640 3641 dev->xfer_mode = dev->dma_mode; 3642 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3643 if (ap->ops->set_dmamode) 3644 ap->ops->set_dmamode(ap, dev); 3645 } 3646 3647 /* step 4: update devices' xfer mode */ 3648 ata_for_each_dev(dev, link, ENABLED) { 3649 rc = ata_dev_set_mode(dev); 3650 if (rc) 3651 goto out; 3652 } 3653 3654 /* Record simplex status. If we selected DMA then the other 3655 * host channels are not permitted to do so. 3656 */ 3657 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3658 ap->host->simplex_claimed = ap; 3659 3660 out: 3661 if (rc) 3662 *r_failed_dev = dev; 3663 return rc; 3664 } 3665 3666 /** 3667 * ata_wait_ready - wait for link to become ready 3668 * @link: link to be waited on 3669 * @deadline: deadline jiffies for the operation 3670 * @check_ready: callback to check link readiness 3671 * 3672 * Wait for @link to become ready. @check_ready should return 3673 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3674 * link doesn't seem to be occupied, other errno for other error 3675 * conditions. 3676 * 3677 * Transient -ENODEV conditions are allowed for 3678 * ATA_TMOUT_FF_WAIT. 3679 * 3680 * LOCKING: 3681 * EH context. 3682 * 3683 * RETURNS: 3684 * 0 if @link is ready before @deadline; otherwise, -errno. 3685 */ 3686 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3687 int (*check_ready)(struct ata_link *link)) 3688 { 3689 unsigned long start = jiffies; 3690 unsigned long nodev_deadline; 3691 int warned = 0; 3692 3693 /* choose which 0xff timeout to use, read comment in libata.h */ 3694 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3695 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3696 else 3697 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3698 3699 /* Slave readiness can't be tested separately from master. On 3700 * M/S emulation configuration, this function should be called 3701 * only on the master and it will handle both master and slave. 3702 */ 3703 WARN_ON(link == link->ap->slave_link); 3704 3705 if (time_after(nodev_deadline, deadline)) 3706 nodev_deadline = deadline; 3707 3708 while (1) { 3709 unsigned long now = jiffies; 3710 int ready, tmp; 3711 3712 ready = tmp = check_ready(link); 3713 if (ready > 0) 3714 return 0; 3715 3716 /* 3717 * -ENODEV could be transient. Ignore -ENODEV if link 3718 * is online. Also, some SATA devices take a long 3719 * time to clear 0xff after reset. Wait for 3720 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3721 * offline. 3722 * 3723 * Note that some PATA controllers (pata_ali) explode 3724 * if status register is read more than once when 3725 * there's no device attached. 3726 */ 3727 if (ready == -ENODEV) { 3728 if (ata_link_online(link)) 3729 ready = 0; 3730 else if ((link->ap->flags & ATA_FLAG_SATA) && 3731 !ata_link_offline(link) && 3732 time_before(now, nodev_deadline)) 3733 ready = 0; 3734 } 3735 3736 if (ready) 3737 return ready; 3738 if (time_after(now, deadline)) 3739 return -EBUSY; 3740 3741 if (!warned && time_after(now, start + 5 * HZ) && 3742 (deadline - now > 3 * HZ)) { 3743 ata_link_warn(link, 3744 "link is slow to respond, please be patient " 3745 "(ready=%d)\n", tmp); 3746 warned = 1; 3747 } 3748 3749 ata_msleep(link->ap, 50); 3750 } 3751 } 3752 3753 /** 3754 * ata_wait_after_reset - wait for link to become ready after reset 3755 * @link: link to be waited on 3756 * @deadline: deadline jiffies for the operation 3757 * @check_ready: callback to check link readiness 3758 * 3759 * Wait for @link to become ready after reset. 3760 * 3761 * LOCKING: 3762 * EH context. 3763 * 3764 * RETURNS: 3765 * 0 if @link is ready before @deadline; otherwise, -errno. 3766 */ 3767 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3768 int (*check_ready)(struct ata_link *link)) 3769 { 3770 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3771 3772 return ata_wait_ready(link, deadline, check_ready); 3773 } 3774 3775 /** 3776 * sata_link_debounce - debounce SATA phy status 3777 * @link: ATA link to debounce SATA phy status for 3778 * @params: timing parameters { interval, duration, timeout } in msec 3779 * @deadline: deadline jiffies for the operation 3780 * 3781 * Make sure SStatus of @link reaches stable state, determined by 3782 * holding the same value where DET is not 1 for @duration polled 3783 * every @interval, before @timeout. Timeout constraints the 3784 * beginning of the stable state. Because DET gets stuck at 1 on 3785 * some controllers after hot unplugging, this functions waits 3786 * until timeout then returns 0 if DET is stable at 1. 3787 * 3788 * @timeout is further limited by @deadline. The sooner of the 3789 * two is used. 3790 * 3791 * LOCKING: 3792 * Kernel thread context (may sleep) 3793 * 3794 * RETURNS: 3795 * 0 on success, -errno on failure. 3796 */ 3797 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3798 unsigned long deadline) 3799 { 3800 unsigned long interval = params[0]; 3801 unsigned long duration = params[1]; 3802 unsigned long last_jiffies, t; 3803 u32 last, cur; 3804 int rc; 3805 3806 t = ata_deadline(jiffies, params[2]); 3807 if (time_before(t, deadline)) 3808 deadline = t; 3809 3810 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3811 return rc; 3812 cur &= 0xf; 3813 3814 last = cur; 3815 last_jiffies = jiffies; 3816 3817 while (1) { 3818 ata_msleep(link->ap, interval); 3819 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3820 return rc; 3821 cur &= 0xf; 3822 3823 /* DET stable? */ 3824 if (cur == last) { 3825 if (cur == 1 && time_before(jiffies, deadline)) 3826 continue; 3827 if (time_after(jiffies, 3828 ata_deadline(last_jiffies, duration))) 3829 return 0; 3830 continue; 3831 } 3832 3833 /* unstable, start over */ 3834 last = cur; 3835 last_jiffies = jiffies; 3836 3837 /* Check deadline. If debouncing failed, return 3838 * -EPIPE to tell upper layer to lower link speed. 3839 */ 3840 if (time_after(jiffies, deadline)) 3841 return -EPIPE; 3842 } 3843 } 3844 3845 /** 3846 * sata_link_resume - resume SATA link 3847 * @link: ATA link to resume SATA 3848 * @params: timing parameters { interval, duration, timeout } in msec 3849 * @deadline: deadline jiffies for the operation 3850 * 3851 * Resume SATA phy @link and debounce it. 3852 * 3853 * LOCKING: 3854 * Kernel thread context (may sleep) 3855 * 3856 * RETURNS: 3857 * 0 on success, -errno on failure. 3858 */ 3859 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3860 unsigned long deadline) 3861 { 3862 int tries = ATA_LINK_RESUME_TRIES; 3863 u32 scontrol, serror; 3864 int rc; 3865 3866 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3867 return rc; 3868 3869 /* 3870 * Writes to SControl sometimes get ignored under certain 3871 * controllers (ata_piix SIDPR). Make sure DET actually is 3872 * cleared. 3873 */ 3874 do { 3875 scontrol = (scontrol & 0x0f0) | 0x300; 3876 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3877 return rc; 3878 /* 3879 * Some PHYs react badly if SStatus is pounded 3880 * immediately after resuming. Delay 200ms before 3881 * debouncing. 3882 */ 3883 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) 3884 ata_msleep(link->ap, 200); 3885 3886 /* is SControl restored correctly? */ 3887 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3888 return rc; 3889 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3890 3891 if ((scontrol & 0xf0f) != 0x300) { 3892 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3893 scontrol); 3894 return 0; 3895 } 3896 3897 if (tries < ATA_LINK_RESUME_TRIES) 3898 ata_link_warn(link, "link resume succeeded after %d retries\n", 3899 ATA_LINK_RESUME_TRIES - tries); 3900 3901 if ((rc = sata_link_debounce(link, params, deadline))) 3902 return rc; 3903 3904 /* clear SError, some PHYs require this even for SRST to work */ 3905 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3906 rc = sata_scr_write(link, SCR_ERROR, serror); 3907 3908 return rc != -EINVAL ? rc : 0; 3909 } 3910 3911 /** 3912 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3913 * @link: ATA link to manipulate SControl for 3914 * @policy: LPM policy to configure 3915 * @spm_wakeup: initiate LPM transition to active state 3916 * 3917 * Manipulate the IPM field of the SControl register of @link 3918 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3919 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3920 * the link. This function also clears PHYRDY_CHG before 3921 * returning. 3922 * 3923 * LOCKING: 3924 * EH context. 3925 * 3926 * RETURNS: 3927 * 0 on success, -errno otherwise. 3928 */ 3929 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3930 bool spm_wakeup) 3931 { 3932 struct ata_eh_context *ehc = &link->eh_context; 3933 bool woken_up = false; 3934 u32 scontrol; 3935 int rc; 3936 3937 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3938 if (rc) 3939 return rc; 3940 3941 switch (policy) { 3942 case ATA_LPM_MAX_POWER: 3943 /* disable all LPM transitions */ 3944 scontrol |= (0x7 << 8); 3945 /* initiate transition to active state */ 3946 if (spm_wakeup) { 3947 scontrol |= (0x4 << 12); 3948 woken_up = true; 3949 } 3950 break; 3951 case ATA_LPM_MED_POWER: 3952 /* allow LPM to PARTIAL */ 3953 scontrol &= ~(0x1 << 8); 3954 scontrol |= (0x6 << 8); 3955 break; 3956 case ATA_LPM_MED_POWER_WITH_DIPM: 3957 case ATA_LPM_MIN_POWER_WITH_PARTIAL: 3958 case ATA_LPM_MIN_POWER: 3959 if (ata_link_nr_enabled(link) > 0) 3960 /* no restrictions on LPM transitions */ 3961 scontrol &= ~(0x7 << 8); 3962 else { 3963 /* empty port, power off */ 3964 scontrol &= ~0xf; 3965 scontrol |= (0x1 << 2); 3966 } 3967 break; 3968 default: 3969 WARN_ON(1); 3970 } 3971 3972 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3973 if (rc) 3974 return rc; 3975 3976 /* give the link time to transit out of LPM state */ 3977 if (woken_up) 3978 msleep(10); 3979 3980 /* clear PHYRDY_CHG from SError */ 3981 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3982 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3983 } 3984 3985 /** 3986 * ata_std_prereset - prepare for reset 3987 * @link: ATA link to be reset 3988 * @deadline: deadline jiffies for the operation 3989 * 3990 * @link is about to be reset. Initialize it. Failure from 3991 * prereset makes libata abort whole reset sequence and give up 3992 * that port, so prereset should be best-effort. It does its 3993 * best to prepare for reset sequence but if things go wrong, it 3994 * should just whine, not fail. 3995 * 3996 * LOCKING: 3997 * Kernel thread context (may sleep) 3998 * 3999 * RETURNS: 4000 * 0 on success, -errno otherwise. 4001 */ 4002 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 4003 { 4004 struct ata_port *ap = link->ap; 4005 struct ata_eh_context *ehc = &link->eh_context; 4006 const unsigned long *timing = sata_ehc_deb_timing(ehc); 4007 int rc; 4008 4009 /* if we're about to do hardreset, nothing more to do */ 4010 if (ehc->i.action & ATA_EH_HARDRESET) 4011 return 0; 4012 4013 /* if SATA, resume link */ 4014 if (ap->flags & ATA_FLAG_SATA) { 4015 rc = sata_link_resume(link, timing, deadline); 4016 /* whine about phy resume failure but proceed */ 4017 if (rc && rc != -EOPNOTSUPP) 4018 ata_link_warn(link, 4019 "failed to resume link for reset (errno=%d)\n", 4020 rc); 4021 } 4022 4023 /* no point in trying softreset on offline link */ 4024 if (ata_phys_link_offline(link)) 4025 ehc->i.action &= ~ATA_EH_SOFTRESET; 4026 4027 return 0; 4028 } 4029 4030 /** 4031 * sata_link_hardreset - reset link via SATA phy reset 4032 * @link: link to reset 4033 * @timing: timing parameters { interval, duration, timeout } in msec 4034 * @deadline: deadline jiffies for the operation 4035 * @online: optional out parameter indicating link onlineness 4036 * @check_ready: optional callback to check link readiness 4037 * 4038 * SATA phy-reset @link using DET bits of SControl register. 4039 * After hardreset, link readiness is waited upon using 4040 * ata_wait_ready() if @check_ready is specified. LLDs are 4041 * allowed to not specify @check_ready and wait itself after this 4042 * function returns. Device classification is LLD's 4043 * responsibility. 4044 * 4045 * *@online is set to one iff reset succeeded and @link is online 4046 * after reset. 4047 * 4048 * LOCKING: 4049 * Kernel thread context (may sleep) 4050 * 4051 * RETURNS: 4052 * 0 on success, -errno otherwise. 4053 */ 4054 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 4055 unsigned long deadline, 4056 bool *online, int (*check_ready)(struct ata_link *)) 4057 { 4058 u32 scontrol; 4059 int rc; 4060 4061 DPRINTK("ENTER\n"); 4062 4063 if (online) 4064 *online = false; 4065 4066 if (sata_set_spd_needed(link)) { 4067 /* SATA spec says nothing about how to reconfigure 4068 * spd. To be on the safe side, turn off phy during 4069 * reconfiguration. This works for at least ICH7 AHCI 4070 * and Sil3124. 4071 */ 4072 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4073 goto out; 4074 4075 scontrol = (scontrol & 0x0f0) | 0x304; 4076 4077 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 4078 goto out; 4079 4080 sata_set_spd(link); 4081 } 4082 4083 /* issue phy wake/reset */ 4084 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4085 goto out; 4086 4087 scontrol = (scontrol & 0x0f0) | 0x301; 4088 4089 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 4090 goto out; 4091 4092 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 4093 * 10.4.2 says at least 1 ms. 4094 */ 4095 ata_msleep(link->ap, 1); 4096 4097 /* bring link back */ 4098 rc = sata_link_resume(link, timing, deadline); 4099 if (rc) 4100 goto out; 4101 /* if link is offline nothing more to do */ 4102 if (ata_phys_link_offline(link)) 4103 goto out; 4104 4105 /* Link is online. From this point, -ENODEV too is an error. */ 4106 if (online) 4107 *online = true; 4108 4109 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 4110 /* If PMP is supported, we have to do follow-up SRST. 4111 * Some PMPs don't send D2H Reg FIS after hardreset if 4112 * the first port is empty. Wait only for 4113 * ATA_TMOUT_PMP_SRST_WAIT. 4114 */ 4115 if (check_ready) { 4116 unsigned long pmp_deadline; 4117 4118 pmp_deadline = ata_deadline(jiffies, 4119 ATA_TMOUT_PMP_SRST_WAIT); 4120 if (time_after(pmp_deadline, deadline)) 4121 pmp_deadline = deadline; 4122 ata_wait_ready(link, pmp_deadline, check_ready); 4123 } 4124 rc = -EAGAIN; 4125 goto out; 4126 } 4127 4128 rc = 0; 4129 if (check_ready) 4130 rc = ata_wait_ready(link, deadline, check_ready); 4131 out: 4132 if (rc && rc != -EAGAIN) { 4133 /* online is set iff link is online && reset succeeded */ 4134 if (online) 4135 *online = false; 4136 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 4137 } 4138 DPRINTK("EXIT, rc=%d\n", rc); 4139 return rc; 4140 } 4141 4142 /** 4143 * sata_std_hardreset - COMRESET w/o waiting or classification 4144 * @link: link to reset 4145 * @class: resulting class of attached device 4146 * @deadline: deadline jiffies for the operation 4147 * 4148 * Standard SATA COMRESET w/o waiting or classification. 4149 * 4150 * LOCKING: 4151 * Kernel thread context (may sleep) 4152 * 4153 * RETURNS: 4154 * 0 if link offline, -EAGAIN if link online, -errno on errors. 4155 */ 4156 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4157 unsigned long deadline) 4158 { 4159 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4160 bool online; 4161 int rc; 4162 4163 /* do hardreset */ 4164 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 4165 return online ? -EAGAIN : rc; 4166 } 4167 4168 /** 4169 * ata_std_postreset - standard postreset callback 4170 * @link: the target ata_link 4171 * @classes: classes of attached devices 4172 * 4173 * This function is invoked after a successful reset. Note that 4174 * the device might have been reset more than once using 4175 * different reset methods before postreset is invoked. 4176 * 4177 * LOCKING: 4178 * Kernel thread context (may sleep) 4179 */ 4180 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4181 { 4182 u32 serror; 4183 4184 DPRINTK("ENTER\n"); 4185 4186 /* reset complete, clear SError */ 4187 if (!sata_scr_read(link, SCR_ERROR, &serror)) 4188 sata_scr_write(link, SCR_ERROR, serror); 4189 4190 /* print link status */ 4191 sata_print_link_status(link); 4192 4193 DPRINTK("EXIT\n"); 4194 } 4195 4196 /** 4197 * ata_dev_same_device - Determine whether new ID matches configured device 4198 * @dev: device to compare against 4199 * @new_class: class of the new device 4200 * @new_id: IDENTIFY page of the new device 4201 * 4202 * Compare @new_class and @new_id against @dev and determine 4203 * whether @dev is the device indicated by @new_class and 4204 * @new_id. 4205 * 4206 * LOCKING: 4207 * None. 4208 * 4209 * RETURNS: 4210 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4211 */ 4212 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4213 const u16 *new_id) 4214 { 4215 const u16 *old_id = dev->id; 4216 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4217 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4218 4219 if (dev->class != new_class) { 4220 ata_dev_info(dev, "class mismatch %d != %d\n", 4221 dev->class, new_class); 4222 return 0; 4223 } 4224 4225 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4226 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4227 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4228 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4229 4230 if (strcmp(model[0], model[1])) { 4231 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 4232 model[0], model[1]); 4233 return 0; 4234 } 4235 4236 if (strcmp(serial[0], serial[1])) { 4237 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 4238 serial[0], serial[1]); 4239 return 0; 4240 } 4241 4242 return 1; 4243 } 4244 4245 /** 4246 * ata_dev_reread_id - Re-read IDENTIFY data 4247 * @dev: target ATA device 4248 * @readid_flags: read ID flags 4249 * 4250 * Re-read IDENTIFY page and make sure @dev is still attached to 4251 * the port. 4252 * 4253 * LOCKING: 4254 * Kernel thread context (may sleep) 4255 * 4256 * RETURNS: 4257 * 0 on success, negative errno otherwise 4258 */ 4259 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4260 { 4261 unsigned int class = dev->class; 4262 u16 *id = (void *)dev->link->ap->sector_buf; 4263 int rc; 4264 4265 /* read ID data */ 4266 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4267 if (rc) 4268 return rc; 4269 4270 /* is the device still there? */ 4271 if (!ata_dev_same_device(dev, class, id)) 4272 return -ENODEV; 4273 4274 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4275 return 0; 4276 } 4277 4278 /** 4279 * ata_dev_revalidate - Revalidate ATA device 4280 * @dev: device to revalidate 4281 * @new_class: new class code 4282 * @readid_flags: read ID flags 4283 * 4284 * Re-read IDENTIFY page, make sure @dev is still attached to the 4285 * port and reconfigure it according to the new IDENTIFY page. 4286 * 4287 * LOCKING: 4288 * Kernel thread context (may sleep) 4289 * 4290 * RETURNS: 4291 * 0 on success, negative errno otherwise 4292 */ 4293 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4294 unsigned int readid_flags) 4295 { 4296 u64 n_sectors = dev->n_sectors; 4297 u64 n_native_sectors = dev->n_native_sectors; 4298 int rc; 4299 4300 if (!ata_dev_enabled(dev)) 4301 return -ENODEV; 4302 4303 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4304 if (ata_class_enabled(new_class) && 4305 new_class != ATA_DEV_ATA && 4306 new_class != ATA_DEV_ATAPI && 4307 new_class != ATA_DEV_ZAC && 4308 new_class != ATA_DEV_SEMB) { 4309 ata_dev_info(dev, "class mismatch %u != %u\n", 4310 dev->class, new_class); 4311 rc = -ENODEV; 4312 goto fail; 4313 } 4314 4315 /* re-read ID */ 4316 rc = ata_dev_reread_id(dev, readid_flags); 4317 if (rc) 4318 goto fail; 4319 4320 /* configure device according to the new ID */ 4321 rc = ata_dev_configure(dev); 4322 if (rc) 4323 goto fail; 4324 4325 /* verify n_sectors hasn't changed */ 4326 if (dev->class != ATA_DEV_ATA || !n_sectors || 4327 dev->n_sectors == n_sectors) 4328 return 0; 4329 4330 /* n_sectors has changed */ 4331 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4332 (unsigned long long)n_sectors, 4333 (unsigned long long)dev->n_sectors); 4334 4335 /* 4336 * Something could have caused HPA to be unlocked 4337 * involuntarily. If n_native_sectors hasn't changed and the 4338 * new size matches it, keep the device. 4339 */ 4340 if (dev->n_native_sectors == n_native_sectors && 4341 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4342 ata_dev_warn(dev, 4343 "new n_sectors matches native, probably " 4344 "late HPA unlock, n_sectors updated\n"); 4345 /* use the larger n_sectors */ 4346 return 0; 4347 } 4348 4349 /* 4350 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4351 * unlocking HPA in those cases. 4352 * 4353 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4354 */ 4355 if (dev->n_native_sectors == n_native_sectors && 4356 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4357 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4358 ata_dev_warn(dev, 4359 "old n_sectors matches native, probably " 4360 "late HPA lock, will try to unlock HPA\n"); 4361 /* try unlocking HPA */ 4362 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4363 rc = -EIO; 4364 } else 4365 rc = -ENODEV; 4366 4367 /* restore original n_[native_]sectors and fail */ 4368 dev->n_native_sectors = n_native_sectors; 4369 dev->n_sectors = n_sectors; 4370 fail: 4371 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4372 return rc; 4373 } 4374 4375 struct ata_blacklist_entry { 4376 const char *model_num; 4377 const char *model_rev; 4378 unsigned long horkage; 4379 }; 4380 4381 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4382 /* Devices with DMA related problems under Linux */ 4383 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4384 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4385 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4386 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4387 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4388 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4389 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4390 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4391 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4392 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4393 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4394 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4395 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4396 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4397 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4398 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4399 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4400 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4401 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4402 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4403 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4404 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4405 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4406 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4407 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4408 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4409 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4410 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4411 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4412 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, 4413 /* Odd clown on sil3726/4726 PMPs */ 4414 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4415 4416 /* Weird ATAPI devices */ 4417 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4418 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4419 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4420 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4421 4422 /* 4423 * Causes silent data corruption with higher max sects. 4424 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4425 */ 4426 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4427 4428 /* 4429 * These devices time out with higher max sects. 4430 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4431 */ 4432 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4433 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4434 4435 /* Devices we expect to fail diagnostics */ 4436 4437 /* Devices where NCQ should be avoided */ 4438 /* NCQ is slow */ 4439 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4440 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4441 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4442 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4443 /* NCQ is broken */ 4444 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4445 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4446 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4447 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4448 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4449 4450 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4451 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4452 ATA_HORKAGE_FIRMWARE_WARN }, 4453 4454 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4455 ATA_HORKAGE_FIRMWARE_WARN }, 4456 4457 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4458 ATA_HORKAGE_FIRMWARE_WARN }, 4459 4460 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4461 ATA_HORKAGE_FIRMWARE_WARN }, 4462 4463 /* drives which fail FPDMA_AA activation (some may freeze afterwards) 4464 the ST disks also have LPM issues */ 4465 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA | 4466 ATA_HORKAGE_NOLPM, }, 4467 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4468 4469 /* Blacklist entries taken from Silicon Image 3124/3132 4470 Windows driver .inf file - also several Linux problem reports */ 4471 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4472 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4473 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4474 4475 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4476 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4477 4478 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on 4479 SD7SN6S256G and SD8SN8U256G */ 4480 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, 4481 4482 /* devices which puke on READ_NATIVE_MAX */ 4483 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4484 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4485 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4486 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4487 4488 /* this one allows HPA unlocking but fails IOs on the area */ 4489 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4490 4491 /* Devices which report 1 sector over size HPA */ 4492 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4493 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4494 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4495 4496 /* Devices which get the IVB wrong */ 4497 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4498 /* Maybe we should just blacklist TSSTcorp... */ 4499 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4500 4501 /* Devices that do not need bridging limits applied */ 4502 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4503 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4504 4505 /* Devices which aren't very happy with higher link speeds */ 4506 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4507 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4508 4509 /* 4510 * Devices which choke on SETXFER. Applies only if both the 4511 * device and controller are SATA. 4512 */ 4513 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4514 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4515 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4516 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4517 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4518 4519 /* Crucial BX100 SSD 500GB has broken LPM support */ 4520 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, 4521 4522 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ 4523 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4524 ATA_HORKAGE_ZERO_AFTER_TRIM | 4525 ATA_HORKAGE_NOLPM, }, 4526 /* 512GB MX100 with newer firmware has only LPM issues */ 4527 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | 4528 ATA_HORKAGE_NOLPM, }, 4529 4530 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ 4531 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4532 ATA_HORKAGE_ZERO_AFTER_TRIM | 4533 ATA_HORKAGE_NOLPM, }, 4534 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4535 ATA_HORKAGE_ZERO_AFTER_TRIM | 4536 ATA_HORKAGE_NOLPM, }, 4537 4538 /* These specific Samsung models/firmware-revs do not handle LPM well */ 4539 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4540 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, 4541 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, 4542 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, 4543 4544 /* devices that don't properly handle queued TRIM commands */ 4545 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4546 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4547 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4548 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4549 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4550 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4551 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4552 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4553 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4554 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4555 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4556 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4557 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4558 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4559 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4560 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4561 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4562 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4563 4564 /* devices that don't properly handle TRIM commands */ 4565 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4566 4567 /* 4568 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4569 * (Return Zero After Trim) flags in the ATA Command Set are 4570 * unreliable in the sense that they only define what happens if 4571 * the device successfully executed the DSM TRIM command. TRIM 4572 * is only advisory, however, and the device is free to silently 4573 * ignore all or parts of the request. 4574 * 4575 * Whitelist drives that are known to reliably return zeroes 4576 * after TRIM. 4577 */ 4578 4579 /* 4580 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4581 * that model before whitelisting all other intel SSDs. 4582 */ 4583 { "INTEL*SSDSC2MH*", NULL, 0, }, 4584 4585 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4586 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4587 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4588 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4589 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4590 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4591 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4592 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4593 4594 /* 4595 * Some WD SATA-I drives spin up and down erratically when the link 4596 * is put into the slumber mode. We don't have full list of the 4597 * affected devices. Disable LPM if the device matches one of the 4598 * known prefixes and is SATA-1. As a side effect LPM partial is 4599 * lost too. 4600 * 4601 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4602 */ 4603 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4604 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4605 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4606 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4607 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4608 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4609 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4610 4611 /* End Marker */ 4612 { } 4613 }; 4614 4615 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4616 { 4617 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4618 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4619 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4620 4621 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4622 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4623 4624 while (ad->model_num) { 4625 if (glob_match(ad->model_num, model_num)) { 4626 if (ad->model_rev == NULL) 4627 return ad->horkage; 4628 if (glob_match(ad->model_rev, model_rev)) 4629 return ad->horkage; 4630 } 4631 ad++; 4632 } 4633 return 0; 4634 } 4635 4636 static int ata_dma_blacklisted(const struct ata_device *dev) 4637 { 4638 /* We don't support polling DMA. 4639 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4640 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4641 */ 4642 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4643 (dev->flags & ATA_DFLAG_CDB_INTR)) 4644 return 1; 4645 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4646 } 4647 4648 /** 4649 * ata_is_40wire - check drive side detection 4650 * @dev: device 4651 * 4652 * Perform drive side detection decoding, allowing for device vendors 4653 * who can't follow the documentation. 4654 */ 4655 4656 static int ata_is_40wire(struct ata_device *dev) 4657 { 4658 if (dev->horkage & ATA_HORKAGE_IVB) 4659 return ata_drive_40wire_relaxed(dev->id); 4660 return ata_drive_40wire(dev->id); 4661 } 4662 4663 /** 4664 * cable_is_40wire - 40/80/SATA decider 4665 * @ap: port to consider 4666 * 4667 * This function encapsulates the policy for speed management 4668 * in one place. At the moment we don't cache the result but 4669 * there is a good case for setting ap->cbl to the result when 4670 * we are called with unknown cables (and figuring out if it 4671 * impacts hotplug at all). 4672 * 4673 * Return 1 if the cable appears to be 40 wire. 4674 */ 4675 4676 static int cable_is_40wire(struct ata_port *ap) 4677 { 4678 struct ata_link *link; 4679 struct ata_device *dev; 4680 4681 /* If the controller thinks we are 40 wire, we are. */ 4682 if (ap->cbl == ATA_CBL_PATA40) 4683 return 1; 4684 4685 /* If the controller thinks we are 80 wire, we are. */ 4686 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4687 return 0; 4688 4689 /* If the system is known to be 40 wire short cable (eg 4690 * laptop), then we allow 80 wire modes even if the drive 4691 * isn't sure. 4692 */ 4693 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4694 return 0; 4695 4696 /* If the controller doesn't know, we scan. 4697 * 4698 * Note: We look for all 40 wire detects at this point. Any 4699 * 80 wire detect is taken to be 80 wire cable because 4700 * - in many setups only the one drive (slave if present) will 4701 * give a valid detect 4702 * - if you have a non detect capable drive you don't want it 4703 * to colour the choice 4704 */ 4705 ata_for_each_link(link, ap, EDGE) { 4706 ata_for_each_dev(dev, link, ENABLED) { 4707 if (!ata_is_40wire(dev)) 4708 return 0; 4709 } 4710 } 4711 return 1; 4712 } 4713 4714 /** 4715 * ata_dev_xfermask - Compute supported xfermask of the given device 4716 * @dev: Device to compute xfermask for 4717 * 4718 * Compute supported xfermask of @dev and store it in 4719 * dev->*_mask. This function is responsible for applying all 4720 * known limits including host controller limits, device 4721 * blacklist, etc... 4722 * 4723 * LOCKING: 4724 * None. 4725 */ 4726 static void ata_dev_xfermask(struct ata_device *dev) 4727 { 4728 struct ata_link *link = dev->link; 4729 struct ata_port *ap = link->ap; 4730 struct ata_host *host = ap->host; 4731 unsigned long xfer_mask; 4732 4733 /* controller modes available */ 4734 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4735 ap->mwdma_mask, ap->udma_mask); 4736 4737 /* drive modes available */ 4738 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4739 dev->mwdma_mask, dev->udma_mask); 4740 xfer_mask &= ata_id_xfermask(dev->id); 4741 4742 /* 4743 * CFA Advanced TrueIDE timings are not allowed on a shared 4744 * cable 4745 */ 4746 if (ata_dev_pair(dev)) { 4747 /* No PIO5 or PIO6 */ 4748 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4749 /* No MWDMA3 or MWDMA 4 */ 4750 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4751 } 4752 4753 if (ata_dma_blacklisted(dev)) { 4754 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4755 ata_dev_warn(dev, 4756 "device is on DMA blacklist, disabling DMA\n"); 4757 } 4758 4759 if ((host->flags & ATA_HOST_SIMPLEX) && 4760 host->simplex_claimed && host->simplex_claimed != ap) { 4761 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4762 ata_dev_warn(dev, 4763 "simplex DMA is claimed by other device, disabling DMA\n"); 4764 } 4765 4766 if (ap->flags & ATA_FLAG_NO_IORDY) 4767 xfer_mask &= ata_pio_mask_no_iordy(dev); 4768 4769 if (ap->ops->mode_filter) 4770 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4771 4772 /* Apply cable rule here. Don't apply it early because when 4773 * we handle hot plug the cable type can itself change. 4774 * Check this last so that we know if the transfer rate was 4775 * solely limited by the cable. 4776 * Unknown or 80 wire cables reported host side are checked 4777 * drive side as well. Cases where we know a 40wire cable 4778 * is used safely for 80 are not checked here. 4779 */ 4780 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4781 /* UDMA/44 or higher would be available */ 4782 if (cable_is_40wire(ap)) { 4783 ata_dev_warn(dev, 4784 "limited to UDMA/33 due to 40-wire cable\n"); 4785 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4786 } 4787 4788 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4789 &dev->mwdma_mask, &dev->udma_mask); 4790 } 4791 4792 /** 4793 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4794 * @dev: Device to which command will be sent 4795 * 4796 * Issue SET FEATURES - XFER MODE command to device @dev 4797 * on port @ap. 4798 * 4799 * LOCKING: 4800 * PCI/etc. bus probe sem. 4801 * 4802 * RETURNS: 4803 * 0 on success, AC_ERR_* mask otherwise. 4804 */ 4805 4806 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4807 { 4808 struct ata_taskfile tf; 4809 unsigned int err_mask; 4810 4811 /* set up set-features taskfile */ 4812 DPRINTK("set features - xfer mode\n"); 4813 4814 /* Some controllers and ATAPI devices show flaky interrupt 4815 * behavior after setting xfer mode. Use polling instead. 4816 */ 4817 ata_tf_init(dev, &tf); 4818 tf.command = ATA_CMD_SET_FEATURES; 4819 tf.feature = SETFEATURES_XFER; 4820 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4821 tf.protocol = ATA_PROT_NODATA; 4822 /* If we are using IORDY we must send the mode setting command */ 4823 if (ata_pio_need_iordy(dev)) 4824 tf.nsect = dev->xfer_mode; 4825 /* If the device has IORDY and the controller does not - turn it off */ 4826 else if (ata_id_has_iordy(dev->id)) 4827 tf.nsect = 0x01; 4828 else /* In the ancient relic department - skip all of this */ 4829 return 0; 4830 4831 /* On some disks, this command causes spin-up, so we need longer timeout */ 4832 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4833 4834 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4835 return err_mask; 4836 } 4837 4838 /** 4839 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4840 * @dev: Device to which command will be sent 4841 * @enable: Whether to enable or disable the feature 4842 * @feature: The sector count represents the feature to set 4843 * 4844 * Issue SET FEATURES - SATA FEATURES command to device @dev 4845 * on port @ap with sector count 4846 * 4847 * LOCKING: 4848 * PCI/etc. bus probe sem. 4849 * 4850 * RETURNS: 4851 * 0 on success, AC_ERR_* mask otherwise. 4852 */ 4853 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4854 { 4855 struct ata_taskfile tf; 4856 unsigned int err_mask; 4857 unsigned long timeout = 0; 4858 4859 /* set up set-features taskfile */ 4860 DPRINTK("set features - SATA features\n"); 4861 4862 ata_tf_init(dev, &tf); 4863 tf.command = ATA_CMD_SET_FEATURES; 4864 tf.feature = enable; 4865 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4866 tf.protocol = ATA_PROT_NODATA; 4867 tf.nsect = feature; 4868 4869 if (enable == SETFEATURES_SPINUP) 4870 timeout = ata_probe_timeout ? 4871 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; 4872 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); 4873 4874 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4875 return err_mask; 4876 } 4877 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4878 4879 /** 4880 * ata_dev_init_params - Issue INIT DEV PARAMS command 4881 * @dev: Device to which command will be sent 4882 * @heads: Number of heads (taskfile parameter) 4883 * @sectors: Number of sectors (taskfile parameter) 4884 * 4885 * LOCKING: 4886 * Kernel thread context (may sleep) 4887 * 4888 * RETURNS: 4889 * 0 on success, AC_ERR_* mask otherwise. 4890 */ 4891 static unsigned int ata_dev_init_params(struct ata_device *dev, 4892 u16 heads, u16 sectors) 4893 { 4894 struct ata_taskfile tf; 4895 unsigned int err_mask; 4896 4897 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4898 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4899 return AC_ERR_INVALID; 4900 4901 /* set up init dev params taskfile */ 4902 DPRINTK("init dev params \n"); 4903 4904 ata_tf_init(dev, &tf); 4905 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4906 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4907 tf.protocol = ATA_PROT_NODATA; 4908 tf.nsect = sectors; 4909 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4910 4911 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4912 /* A clean abort indicates an original or just out of spec drive 4913 and we should continue as we issue the setup based on the 4914 drive reported working geometry */ 4915 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4916 err_mask = 0; 4917 4918 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4919 return err_mask; 4920 } 4921 4922 /** 4923 * atapi_check_dma - Check whether ATAPI DMA can be supported 4924 * @qc: Metadata associated with taskfile to check 4925 * 4926 * Allow low-level driver to filter ATA PACKET commands, returning 4927 * a status indicating whether or not it is OK to use DMA for the 4928 * supplied PACKET command. 4929 * 4930 * LOCKING: 4931 * spin_lock_irqsave(host lock) 4932 * 4933 * RETURNS: 0 when ATAPI DMA can be used 4934 * nonzero otherwise 4935 */ 4936 int atapi_check_dma(struct ata_queued_cmd *qc) 4937 { 4938 struct ata_port *ap = qc->ap; 4939 4940 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4941 * few ATAPI devices choke on such DMA requests. 4942 */ 4943 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4944 unlikely(qc->nbytes & 15)) 4945 return 1; 4946 4947 if (ap->ops->check_atapi_dma) 4948 return ap->ops->check_atapi_dma(qc); 4949 4950 return 0; 4951 } 4952 4953 /** 4954 * ata_std_qc_defer - Check whether a qc needs to be deferred 4955 * @qc: ATA command in question 4956 * 4957 * Non-NCQ commands cannot run with any other command, NCQ or 4958 * not. As upper layer only knows the queue depth, we are 4959 * responsible for maintaining exclusion. This function checks 4960 * whether a new command @qc can be issued. 4961 * 4962 * LOCKING: 4963 * spin_lock_irqsave(host lock) 4964 * 4965 * RETURNS: 4966 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4967 */ 4968 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4969 { 4970 struct ata_link *link = qc->dev->link; 4971 4972 if (ata_is_ncq(qc->tf.protocol)) { 4973 if (!ata_tag_valid(link->active_tag)) 4974 return 0; 4975 } else { 4976 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4977 return 0; 4978 } 4979 4980 return ATA_DEFER_LINK; 4981 } 4982 4983 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) 4984 { 4985 return AC_ERR_OK; 4986 } 4987 4988 /** 4989 * ata_sg_init - Associate command with scatter-gather table. 4990 * @qc: Command to be associated 4991 * @sg: Scatter-gather table. 4992 * @n_elem: Number of elements in s/g table. 4993 * 4994 * Initialize the data-related elements of queued_cmd @qc 4995 * to point to a scatter-gather table @sg, containing @n_elem 4996 * elements. 4997 * 4998 * LOCKING: 4999 * spin_lock_irqsave(host lock) 5000 */ 5001 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 5002 unsigned int n_elem) 5003 { 5004 qc->sg = sg; 5005 qc->n_elem = n_elem; 5006 qc->cursg = qc->sg; 5007 } 5008 5009 #ifdef CONFIG_HAS_DMA 5010 5011 /** 5012 * ata_sg_clean - Unmap DMA memory associated with command 5013 * @qc: Command containing DMA memory to be released 5014 * 5015 * Unmap all mapped DMA memory associated with this command. 5016 * 5017 * LOCKING: 5018 * spin_lock_irqsave(host lock) 5019 */ 5020 static void ata_sg_clean(struct ata_queued_cmd *qc) 5021 { 5022 struct ata_port *ap = qc->ap; 5023 struct scatterlist *sg = qc->sg; 5024 int dir = qc->dma_dir; 5025 5026 WARN_ON_ONCE(sg == NULL); 5027 5028 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 5029 5030 if (qc->n_elem) 5031 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 5032 5033 qc->flags &= ~ATA_QCFLAG_DMAMAP; 5034 qc->sg = NULL; 5035 } 5036 5037 /** 5038 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 5039 * @qc: Command with scatter-gather table to be mapped. 5040 * 5041 * DMA-map the scatter-gather table associated with queued_cmd @qc. 5042 * 5043 * LOCKING: 5044 * spin_lock_irqsave(host lock) 5045 * 5046 * RETURNS: 5047 * Zero on success, negative on error. 5048 * 5049 */ 5050 static int ata_sg_setup(struct ata_queued_cmd *qc) 5051 { 5052 struct ata_port *ap = qc->ap; 5053 unsigned int n_elem; 5054 5055 VPRINTK("ENTER, ata%u\n", ap->print_id); 5056 5057 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 5058 if (n_elem < 1) 5059 return -1; 5060 5061 VPRINTK("%d sg elements mapped\n", n_elem); 5062 qc->orig_n_elem = qc->n_elem; 5063 qc->n_elem = n_elem; 5064 qc->flags |= ATA_QCFLAG_DMAMAP; 5065 5066 return 0; 5067 } 5068 5069 #else /* !CONFIG_HAS_DMA */ 5070 5071 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {} 5072 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; } 5073 5074 #endif /* !CONFIG_HAS_DMA */ 5075 5076 /** 5077 * swap_buf_le16 - swap halves of 16-bit words in place 5078 * @buf: Buffer to swap 5079 * @buf_words: Number of 16-bit words in buffer. 5080 * 5081 * Swap halves of 16-bit words if needed to convert from 5082 * little-endian byte order to native cpu byte order, or 5083 * vice-versa. 5084 * 5085 * LOCKING: 5086 * Inherited from caller. 5087 */ 5088 void swap_buf_le16(u16 *buf, unsigned int buf_words) 5089 { 5090 #ifdef __BIG_ENDIAN 5091 unsigned int i; 5092 5093 for (i = 0; i < buf_words; i++) 5094 buf[i] = le16_to_cpu(buf[i]); 5095 #endif /* __BIG_ENDIAN */ 5096 } 5097 5098 /** 5099 * ata_qc_new_init - Request an available ATA command, and initialize it 5100 * @dev: Device from whom we request an available command structure 5101 * @tag: tag 5102 * 5103 * LOCKING: 5104 * None. 5105 */ 5106 5107 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 5108 { 5109 struct ata_port *ap = dev->link->ap; 5110 struct ata_queued_cmd *qc; 5111 5112 /* no command while frozen */ 5113 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5114 return NULL; 5115 5116 /* libsas case */ 5117 if (ap->flags & ATA_FLAG_SAS_HOST) { 5118 tag = ata_sas_allocate_tag(ap); 5119 if (tag < 0) 5120 return NULL; 5121 } 5122 5123 qc = __ata_qc_from_tag(ap, tag); 5124 qc->tag = qc->hw_tag = tag; 5125 qc->scsicmd = NULL; 5126 qc->ap = ap; 5127 qc->dev = dev; 5128 5129 ata_qc_reinit(qc); 5130 5131 return qc; 5132 } 5133 5134 /** 5135 * ata_qc_free - free unused ata_queued_cmd 5136 * @qc: Command to complete 5137 * 5138 * Designed to free unused ata_queued_cmd object 5139 * in case something prevents using it. 5140 * 5141 * LOCKING: 5142 * spin_lock_irqsave(host lock) 5143 */ 5144 void ata_qc_free(struct ata_queued_cmd *qc) 5145 { 5146 struct ata_port *ap; 5147 unsigned int tag; 5148 5149 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5150 ap = qc->ap; 5151 5152 qc->flags = 0; 5153 tag = qc->tag; 5154 if (ata_tag_valid(tag)) { 5155 qc->tag = ATA_TAG_POISON; 5156 if (ap->flags & ATA_FLAG_SAS_HOST) 5157 ata_sas_free_tag(tag, ap); 5158 } 5159 } 5160 5161 void __ata_qc_complete(struct ata_queued_cmd *qc) 5162 { 5163 struct ata_port *ap; 5164 struct ata_link *link; 5165 5166 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5167 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5168 ap = qc->ap; 5169 link = qc->dev->link; 5170 5171 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5172 ata_sg_clean(qc); 5173 5174 /* command should be marked inactive atomically with qc completion */ 5175 if (ata_is_ncq(qc->tf.protocol)) { 5176 link->sactive &= ~(1 << qc->hw_tag); 5177 if (!link->sactive) 5178 ap->nr_active_links--; 5179 } else { 5180 link->active_tag = ATA_TAG_POISON; 5181 ap->nr_active_links--; 5182 } 5183 5184 /* clear exclusive status */ 5185 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5186 ap->excl_link == link)) 5187 ap->excl_link = NULL; 5188 5189 /* atapi: mark qc as inactive to prevent the interrupt handler 5190 * from completing the command twice later, before the error handler 5191 * is called. (when rc != 0 and atapi request sense is needed) 5192 */ 5193 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5194 ap->qc_active &= ~(1ULL << qc->tag); 5195 5196 /* call completion callback */ 5197 qc->complete_fn(qc); 5198 } 5199 5200 static void fill_result_tf(struct ata_queued_cmd *qc) 5201 { 5202 struct ata_port *ap = qc->ap; 5203 5204 qc->result_tf.flags = qc->tf.flags; 5205 ap->ops->qc_fill_rtf(qc); 5206 } 5207 5208 static void ata_verify_xfer(struct ata_queued_cmd *qc) 5209 { 5210 struct ata_device *dev = qc->dev; 5211 5212 if (!ata_is_data(qc->tf.protocol)) 5213 return; 5214 5215 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5216 return; 5217 5218 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5219 } 5220 5221 /** 5222 * ata_qc_complete - Complete an active ATA command 5223 * @qc: Command to complete 5224 * 5225 * Indicate to the mid and upper layers that an ATA command has 5226 * completed, with either an ok or not-ok status. 5227 * 5228 * Refrain from calling this function multiple times when 5229 * successfully completing multiple NCQ commands. 5230 * ata_qc_complete_multiple() should be used instead, which will 5231 * properly update IRQ expect state. 5232 * 5233 * LOCKING: 5234 * spin_lock_irqsave(host lock) 5235 */ 5236 void ata_qc_complete(struct ata_queued_cmd *qc) 5237 { 5238 struct ata_port *ap = qc->ap; 5239 5240 /* Trigger the LED (if available) */ 5241 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE)); 5242 5243 /* XXX: New EH and old EH use different mechanisms to 5244 * synchronize EH with regular execution path. 5245 * 5246 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5247 * Normal execution path is responsible for not accessing a 5248 * failed qc. libata core enforces the rule by returning NULL 5249 * from ata_qc_from_tag() for failed qcs. 5250 * 5251 * Old EH depends on ata_qc_complete() nullifying completion 5252 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5253 * not synchronize with interrupt handler. Only PIO task is 5254 * taken care of. 5255 */ 5256 if (ap->ops->error_handler) { 5257 struct ata_device *dev = qc->dev; 5258 struct ata_eh_info *ehi = &dev->link->eh_info; 5259 5260 if (unlikely(qc->err_mask)) 5261 qc->flags |= ATA_QCFLAG_FAILED; 5262 5263 /* 5264 * Finish internal commands without any further processing 5265 * and always with the result TF filled. 5266 */ 5267 if (unlikely(ata_tag_internal(qc->tag))) { 5268 fill_result_tf(qc); 5269 trace_ata_qc_complete_internal(qc); 5270 __ata_qc_complete(qc); 5271 return; 5272 } 5273 5274 /* 5275 * Non-internal qc has failed. Fill the result TF and 5276 * summon EH. 5277 */ 5278 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5279 fill_result_tf(qc); 5280 trace_ata_qc_complete_failed(qc); 5281 ata_qc_schedule_eh(qc); 5282 return; 5283 } 5284 5285 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 5286 5287 /* read result TF if requested */ 5288 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5289 fill_result_tf(qc); 5290 5291 trace_ata_qc_complete_done(qc); 5292 /* Some commands need post-processing after successful 5293 * completion. 5294 */ 5295 switch (qc->tf.command) { 5296 case ATA_CMD_SET_FEATURES: 5297 if (qc->tf.feature != SETFEATURES_WC_ON && 5298 qc->tf.feature != SETFEATURES_WC_OFF && 5299 qc->tf.feature != SETFEATURES_RA_ON && 5300 qc->tf.feature != SETFEATURES_RA_OFF) 5301 break; 5302 /* fall through */ 5303 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5304 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5305 /* revalidate device */ 5306 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5307 ata_port_schedule_eh(ap); 5308 break; 5309 5310 case ATA_CMD_SLEEP: 5311 dev->flags |= ATA_DFLAG_SLEEPING; 5312 break; 5313 } 5314 5315 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5316 ata_verify_xfer(qc); 5317 5318 __ata_qc_complete(qc); 5319 } else { 5320 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5321 return; 5322 5323 /* read result TF if failed or requested */ 5324 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5325 fill_result_tf(qc); 5326 5327 __ata_qc_complete(qc); 5328 } 5329 } 5330 5331 /** 5332 * ata_qc_get_active - get bitmask of active qcs 5333 * @ap: port in question 5334 * 5335 * LOCKING: 5336 * spin_lock_irqsave(host lock) 5337 * 5338 * RETURNS: 5339 * Bitmask of active qcs 5340 */ 5341 u64 ata_qc_get_active(struct ata_port *ap) 5342 { 5343 u64 qc_active = ap->qc_active; 5344 5345 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ 5346 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { 5347 qc_active |= (1 << 0); 5348 qc_active &= ~(1ULL << ATA_TAG_INTERNAL); 5349 } 5350 5351 return qc_active; 5352 } 5353 EXPORT_SYMBOL_GPL(ata_qc_get_active); 5354 5355 /** 5356 * ata_qc_complete_multiple - Complete multiple qcs successfully 5357 * @ap: port in question 5358 * @qc_active: new qc_active mask 5359 * 5360 * Complete in-flight commands. This functions is meant to be 5361 * called from low-level driver's interrupt routine to complete 5362 * requests normally. ap->qc_active and @qc_active is compared 5363 * and commands are completed accordingly. 5364 * 5365 * Always use this function when completing multiple NCQ commands 5366 * from IRQ handlers instead of calling ata_qc_complete() 5367 * multiple times to keep IRQ expect status properly in sync. 5368 * 5369 * LOCKING: 5370 * spin_lock_irqsave(host lock) 5371 * 5372 * RETURNS: 5373 * Number of completed commands on success, -errno otherwise. 5374 */ 5375 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) 5376 { 5377 u64 done_mask, ap_qc_active = ap->qc_active; 5378 int nr_done = 0; 5379 5380 /* 5381 * If the internal tag is set on ap->qc_active, then we care about 5382 * bit0 on the passed in qc_active mask. Move that bit up to match 5383 * the internal tag. 5384 */ 5385 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { 5386 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; 5387 qc_active ^= qc_active & 0x01; 5388 } 5389 5390 done_mask = ap_qc_active ^ qc_active; 5391 5392 if (unlikely(done_mask & qc_active)) { 5393 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", 5394 ap->qc_active, qc_active); 5395 return -EINVAL; 5396 } 5397 5398 while (done_mask) { 5399 struct ata_queued_cmd *qc; 5400 unsigned int tag = __ffs64(done_mask); 5401 5402 qc = ata_qc_from_tag(ap, tag); 5403 if (qc) { 5404 ata_qc_complete(qc); 5405 nr_done++; 5406 } 5407 done_mask &= ~(1ULL << tag); 5408 } 5409 5410 return nr_done; 5411 } 5412 5413 /** 5414 * ata_qc_issue - issue taskfile to device 5415 * @qc: command to issue to device 5416 * 5417 * Prepare an ATA command to submission to device. 5418 * This includes mapping the data into a DMA-able 5419 * area, filling in the S/G table, and finally 5420 * writing the taskfile to hardware, starting the command. 5421 * 5422 * LOCKING: 5423 * spin_lock_irqsave(host lock) 5424 */ 5425 void ata_qc_issue(struct ata_queued_cmd *qc) 5426 { 5427 struct ata_port *ap = qc->ap; 5428 struct ata_link *link = qc->dev->link; 5429 u8 prot = qc->tf.protocol; 5430 5431 /* Make sure only one non-NCQ command is outstanding. The 5432 * check is skipped for old EH because it reuses active qc to 5433 * request ATAPI sense. 5434 */ 5435 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5436 5437 if (ata_is_ncq(prot)) { 5438 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag)); 5439 5440 if (!link->sactive) 5441 ap->nr_active_links++; 5442 link->sactive |= 1 << qc->hw_tag; 5443 } else { 5444 WARN_ON_ONCE(link->sactive); 5445 5446 ap->nr_active_links++; 5447 link->active_tag = qc->tag; 5448 } 5449 5450 qc->flags |= ATA_QCFLAG_ACTIVE; 5451 ap->qc_active |= 1ULL << qc->tag; 5452 5453 /* 5454 * We guarantee to LLDs that they will have at least one 5455 * non-zero sg if the command is a data command. 5456 */ 5457 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) 5458 goto sys_err; 5459 5460 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5461 (ap->flags & ATA_FLAG_PIO_DMA))) 5462 if (ata_sg_setup(qc)) 5463 goto sys_err; 5464 5465 /* if device is sleeping, schedule reset and abort the link */ 5466 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5467 link->eh_info.action |= ATA_EH_RESET; 5468 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5469 ata_link_abort(link); 5470 return; 5471 } 5472 5473 qc->err_mask |= ap->ops->qc_prep(qc); 5474 if (unlikely(qc->err_mask)) 5475 goto err; 5476 trace_ata_qc_issue(qc); 5477 qc->err_mask |= ap->ops->qc_issue(qc); 5478 if (unlikely(qc->err_mask)) 5479 goto err; 5480 return; 5481 5482 sys_err: 5483 qc->err_mask |= AC_ERR_SYSTEM; 5484 err: 5485 ata_qc_complete(qc); 5486 } 5487 5488 /** 5489 * sata_scr_valid - test whether SCRs are accessible 5490 * @link: ATA link to test SCR accessibility for 5491 * 5492 * Test whether SCRs are accessible for @link. 5493 * 5494 * LOCKING: 5495 * None. 5496 * 5497 * RETURNS: 5498 * 1 if SCRs are accessible, 0 otherwise. 5499 */ 5500 int sata_scr_valid(struct ata_link *link) 5501 { 5502 struct ata_port *ap = link->ap; 5503 5504 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5505 } 5506 5507 /** 5508 * sata_scr_read - read SCR register of the specified port 5509 * @link: ATA link to read SCR for 5510 * @reg: SCR to read 5511 * @val: Place to store read value 5512 * 5513 * Read SCR register @reg of @link into *@val. This function is 5514 * guaranteed to succeed if @link is ap->link, the cable type of 5515 * the port is SATA and the port implements ->scr_read. 5516 * 5517 * LOCKING: 5518 * None if @link is ap->link. Kernel thread context otherwise. 5519 * 5520 * RETURNS: 5521 * 0 on success, negative errno on failure. 5522 */ 5523 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5524 { 5525 if (ata_is_host_link(link)) { 5526 if (sata_scr_valid(link)) 5527 return link->ap->ops->scr_read(link, reg, val); 5528 return -EOPNOTSUPP; 5529 } 5530 5531 return sata_pmp_scr_read(link, reg, val); 5532 } 5533 5534 /** 5535 * sata_scr_write - write SCR register of the specified port 5536 * @link: ATA link to write SCR for 5537 * @reg: SCR to write 5538 * @val: value to write 5539 * 5540 * Write @val to SCR register @reg of @link. This function is 5541 * guaranteed to succeed if @link is ap->link, the cable type of 5542 * the port is SATA and the port implements ->scr_read. 5543 * 5544 * LOCKING: 5545 * None if @link is ap->link. Kernel thread context otherwise. 5546 * 5547 * RETURNS: 5548 * 0 on success, negative errno on failure. 5549 */ 5550 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5551 { 5552 if (ata_is_host_link(link)) { 5553 if (sata_scr_valid(link)) 5554 return link->ap->ops->scr_write(link, reg, val); 5555 return -EOPNOTSUPP; 5556 } 5557 5558 return sata_pmp_scr_write(link, reg, val); 5559 } 5560 5561 /** 5562 * sata_scr_write_flush - write SCR register of the specified port and flush 5563 * @link: ATA link to write SCR for 5564 * @reg: SCR to write 5565 * @val: value to write 5566 * 5567 * This function is identical to sata_scr_write() except that this 5568 * function performs flush after writing to the register. 5569 * 5570 * LOCKING: 5571 * None if @link is ap->link. Kernel thread context otherwise. 5572 * 5573 * RETURNS: 5574 * 0 on success, negative errno on failure. 5575 */ 5576 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5577 { 5578 if (ata_is_host_link(link)) { 5579 int rc; 5580 5581 if (sata_scr_valid(link)) { 5582 rc = link->ap->ops->scr_write(link, reg, val); 5583 if (rc == 0) 5584 rc = link->ap->ops->scr_read(link, reg, &val); 5585 return rc; 5586 } 5587 return -EOPNOTSUPP; 5588 } 5589 5590 return sata_pmp_scr_write(link, reg, val); 5591 } 5592 5593 /** 5594 * ata_phys_link_online - test whether the given link is online 5595 * @link: ATA link to test 5596 * 5597 * Test whether @link is online. Note that this function returns 5598 * 0 if online status of @link cannot be obtained, so 5599 * ata_link_online(link) != !ata_link_offline(link). 5600 * 5601 * LOCKING: 5602 * None. 5603 * 5604 * RETURNS: 5605 * True if the port online status is available and online. 5606 */ 5607 bool ata_phys_link_online(struct ata_link *link) 5608 { 5609 u32 sstatus; 5610 5611 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5612 ata_sstatus_online(sstatus)) 5613 return true; 5614 return false; 5615 } 5616 5617 /** 5618 * ata_phys_link_offline - test whether the given link is offline 5619 * @link: ATA link to test 5620 * 5621 * Test whether @link is offline. Note that this function 5622 * returns 0 if offline status of @link cannot be obtained, so 5623 * ata_link_online(link) != !ata_link_offline(link). 5624 * 5625 * LOCKING: 5626 * None. 5627 * 5628 * RETURNS: 5629 * True if the port offline status is available and offline. 5630 */ 5631 bool ata_phys_link_offline(struct ata_link *link) 5632 { 5633 u32 sstatus; 5634 5635 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5636 !ata_sstatus_online(sstatus)) 5637 return true; 5638 return false; 5639 } 5640 5641 /** 5642 * ata_link_online - test whether the given link is online 5643 * @link: ATA link to test 5644 * 5645 * Test whether @link is online. This is identical to 5646 * ata_phys_link_online() when there's no slave link. When 5647 * there's a slave link, this function should only be called on 5648 * the master link and will return true if any of M/S links is 5649 * online. 5650 * 5651 * LOCKING: 5652 * None. 5653 * 5654 * RETURNS: 5655 * True if the port online status is available and online. 5656 */ 5657 bool ata_link_online(struct ata_link *link) 5658 { 5659 struct ata_link *slave = link->ap->slave_link; 5660 5661 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5662 5663 return ata_phys_link_online(link) || 5664 (slave && ata_phys_link_online(slave)); 5665 } 5666 5667 /** 5668 * ata_link_offline - test whether the given link is offline 5669 * @link: ATA link to test 5670 * 5671 * Test whether @link is offline. This is identical to 5672 * ata_phys_link_offline() when there's no slave link. When 5673 * there's a slave link, this function should only be called on 5674 * the master link and will return true if both M/S links are 5675 * offline. 5676 * 5677 * LOCKING: 5678 * None. 5679 * 5680 * RETURNS: 5681 * True if the port offline status is available and offline. 5682 */ 5683 bool ata_link_offline(struct ata_link *link) 5684 { 5685 struct ata_link *slave = link->ap->slave_link; 5686 5687 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5688 5689 return ata_phys_link_offline(link) && 5690 (!slave || ata_phys_link_offline(slave)); 5691 } 5692 5693 #ifdef CONFIG_PM 5694 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5695 unsigned int action, unsigned int ehi_flags, 5696 bool async) 5697 { 5698 struct ata_link *link; 5699 unsigned long flags; 5700 5701 /* Previous resume operation might still be in 5702 * progress. Wait for PM_PENDING to clear. 5703 */ 5704 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5705 ata_port_wait_eh(ap); 5706 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5707 } 5708 5709 /* request PM ops to EH */ 5710 spin_lock_irqsave(ap->lock, flags); 5711 5712 ap->pm_mesg = mesg; 5713 ap->pflags |= ATA_PFLAG_PM_PENDING; 5714 ata_for_each_link(link, ap, HOST_FIRST) { 5715 link->eh_info.action |= action; 5716 link->eh_info.flags |= ehi_flags; 5717 } 5718 5719 ata_port_schedule_eh(ap); 5720 5721 spin_unlock_irqrestore(ap->lock, flags); 5722 5723 if (!async) { 5724 ata_port_wait_eh(ap); 5725 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5726 } 5727 } 5728 5729 /* 5730 * On some hardware, device fails to respond after spun down for suspend. As 5731 * the device won't be used before being resumed, we don't need to touch the 5732 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5733 * 5734 * http://thread.gmane.org/gmane.linux.ide/46764 5735 */ 5736 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5737 | ATA_EHI_NO_AUTOPSY 5738 | ATA_EHI_NO_RECOVERY; 5739 5740 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5741 { 5742 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5743 } 5744 5745 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5746 { 5747 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5748 } 5749 5750 static int ata_port_pm_suspend(struct device *dev) 5751 { 5752 struct ata_port *ap = to_ata_port(dev); 5753 5754 if (pm_runtime_suspended(dev)) 5755 return 0; 5756 5757 ata_port_suspend(ap, PMSG_SUSPEND); 5758 return 0; 5759 } 5760 5761 static int ata_port_pm_freeze(struct device *dev) 5762 { 5763 struct ata_port *ap = to_ata_port(dev); 5764 5765 if (pm_runtime_suspended(dev)) 5766 return 0; 5767 5768 ata_port_suspend(ap, PMSG_FREEZE); 5769 return 0; 5770 } 5771 5772 static int ata_port_pm_poweroff(struct device *dev) 5773 { 5774 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5775 return 0; 5776 } 5777 5778 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5779 | ATA_EHI_QUIET; 5780 5781 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5782 { 5783 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5784 } 5785 5786 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5787 { 5788 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5789 } 5790 5791 static int ata_port_pm_resume(struct device *dev) 5792 { 5793 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5794 pm_runtime_disable(dev); 5795 pm_runtime_set_active(dev); 5796 pm_runtime_enable(dev); 5797 return 0; 5798 } 5799 5800 /* 5801 * For ODDs, the upper layer will poll for media change every few seconds, 5802 * which will make it enter and leave suspend state every few seconds. And 5803 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5804 * is very little and the ODD may malfunction after constantly being reset. 5805 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5806 * ODD is attached to the port. 5807 */ 5808 static int ata_port_runtime_idle(struct device *dev) 5809 { 5810 struct ata_port *ap = to_ata_port(dev); 5811 struct ata_link *link; 5812 struct ata_device *adev; 5813 5814 ata_for_each_link(link, ap, HOST_FIRST) { 5815 ata_for_each_dev(adev, link, ENABLED) 5816 if (adev->class == ATA_DEV_ATAPI && 5817 !zpodd_dev_enabled(adev)) 5818 return -EBUSY; 5819 } 5820 5821 return 0; 5822 } 5823 5824 static int ata_port_runtime_suspend(struct device *dev) 5825 { 5826 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5827 return 0; 5828 } 5829 5830 static int ata_port_runtime_resume(struct device *dev) 5831 { 5832 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5833 return 0; 5834 } 5835 5836 static const struct dev_pm_ops ata_port_pm_ops = { 5837 .suspend = ata_port_pm_suspend, 5838 .resume = ata_port_pm_resume, 5839 .freeze = ata_port_pm_freeze, 5840 .thaw = ata_port_pm_resume, 5841 .poweroff = ata_port_pm_poweroff, 5842 .restore = ata_port_pm_resume, 5843 5844 .runtime_suspend = ata_port_runtime_suspend, 5845 .runtime_resume = ata_port_runtime_resume, 5846 .runtime_idle = ata_port_runtime_idle, 5847 }; 5848 5849 /* sas ports don't participate in pm runtime management of ata_ports, 5850 * and need to resume ata devices at the domain level, not the per-port 5851 * level. sas suspend/resume is async to allow parallel port recovery 5852 * since sas has multiple ata_port instances per Scsi_Host. 5853 */ 5854 void ata_sas_port_suspend(struct ata_port *ap) 5855 { 5856 ata_port_suspend_async(ap, PMSG_SUSPEND); 5857 } 5858 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5859 5860 void ata_sas_port_resume(struct ata_port *ap) 5861 { 5862 ata_port_resume_async(ap, PMSG_RESUME); 5863 } 5864 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5865 5866 /** 5867 * ata_host_suspend - suspend host 5868 * @host: host to suspend 5869 * @mesg: PM message 5870 * 5871 * Suspend @host. Actual operation is performed by port suspend. 5872 */ 5873 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5874 { 5875 host->dev->power.power_state = mesg; 5876 return 0; 5877 } 5878 5879 /** 5880 * ata_host_resume - resume host 5881 * @host: host to resume 5882 * 5883 * Resume @host. Actual operation is performed by port resume. 5884 */ 5885 void ata_host_resume(struct ata_host *host) 5886 { 5887 host->dev->power.power_state = PMSG_ON; 5888 } 5889 #endif 5890 5891 const struct device_type ata_port_type = { 5892 .name = "ata_port", 5893 #ifdef CONFIG_PM 5894 .pm = &ata_port_pm_ops, 5895 #endif 5896 }; 5897 5898 /** 5899 * ata_dev_init - Initialize an ata_device structure 5900 * @dev: Device structure to initialize 5901 * 5902 * Initialize @dev in preparation for probing. 5903 * 5904 * LOCKING: 5905 * Inherited from caller. 5906 */ 5907 void ata_dev_init(struct ata_device *dev) 5908 { 5909 struct ata_link *link = ata_dev_phys_link(dev); 5910 struct ata_port *ap = link->ap; 5911 unsigned long flags; 5912 5913 /* SATA spd limit is bound to the attached device, reset together */ 5914 link->sata_spd_limit = link->hw_sata_spd_limit; 5915 link->sata_spd = 0; 5916 5917 /* High bits of dev->flags are used to record warm plug 5918 * requests which occur asynchronously. Synchronize using 5919 * host lock. 5920 */ 5921 spin_lock_irqsave(ap->lock, flags); 5922 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5923 dev->horkage = 0; 5924 spin_unlock_irqrestore(ap->lock, flags); 5925 5926 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5927 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5928 dev->pio_mask = UINT_MAX; 5929 dev->mwdma_mask = UINT_MAX; 5930 dev->udma_mask = UINT_MAX; 5931 } 5932 5933 /** 5934 * ata_link_init - Initialize an ata_link structure 5935 * @ap: ATA port link is attached to 5936 * @link: Link structure to initialize 5937 * @pmp: Port multiplier port number 5938 * 5939 * Initialize @link. 5940 * 5941 * LOCKING: 5942 * Kernel thread context (may sleep) 5943 */ 5944 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5945 { 5946 int i; 5947 5948 /* clear everything except for devices */ 5949 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5950 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5951 5952 link->ap = ap; 5953 link->pmp = pmp; 5954 link->active_tag = ATA_TAG_POISON; 5955 link->hw_sata_spd_limit = UINT_MAX; 5956 5957 /* can't use iterator, ap isn't initialized yet */ 5958 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5959 struct ata_device *dev = &link->device[i]; 5960 5961 dev->link = link; 5962 dev->devno = dev - link->device; 5963 #ifdef CONFIG_ATA_ACPI 5964 dev->gtf_filter = ata_acpi_gtf_filter; 5965 #endif 5966 ata_dev_init(dev); 5967 } 5968 } 5969 5970 /** 5971 * sata_link_init_spd - Initialize link->sata_spd_limit 5972 * @link: Link to configure sata_spd_limit for 5973 * 5974 * Initialize @link->[hw_]sata_spd_limit to the currently 5975 * configured value. 5976 * 5977 * LOCKING: 5978 * Kernel thread context (may sleep). 5979 * 5980 * RETURNS: 5981 * 0 on success, -errno on failure. 5982 */ 5983 int sata_link_init_spd(struct ata_link *link) 5984 { 5985 u8 spd; 5986 int rc; 5987 5988 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5989 if (rc) 5990 return rc; 5991 5992 spd = (link->saved_scontrol >> 4) & 0xf; 5993 if (spd) 5994 link->hw_sata_spd_limit &= (1 << spd) - 1; 5995 5996 ata_force_link_limits(link); 5997 5998 link->sata_spd_limit = link->hw_sata_spd_limit; 5999 6000 return 0; 6001 } 6002 6003 /** 6004 * ata_port_alloc - allocate and initialize basic ATA port resources 6005 * @host: ATA host this allocated port belongs to 6006 * 6007 * Allocate and initialize basic ATA port resources. 6008 * 6009 * RETURNS: 6010 * Allocate ATA port on success, NULL on failure. 6011 * 6012 * LOCKING: 6013 * Inherited from calling layer (may sleep). 6014 */ 6015 struct ata_port *ata_port_alloc(struct ata_host *host) 6016 { 6017 struct ata_port *ap; 6018 6019 DPRINTK("ENTER\n"); 6020 6021 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 6022 if (!ap) 6023 return NULL; 6024 6025 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 6026 ap->lock = &host->lock; 6027 ap->print_id = -1; 6028 ap->local_port_no = -1; 6029 ap->host = host; 6030 ap->dev = host->dev; 6031 6032 #if defined(ATA_VERBOSE_DEBUG) 6033 /* turn on all debugging levels */ 6034 ap->msg_enable = 0x00FF; 6035 #elif defined(ATA_DEBUG) 6036 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 6037 #else 6038 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6039 #endif 6040 6041 mutex_init(&ap->scsi_scan_mutex); 6042 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 6043 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6044 INIT_LIST_HEAD(&ap->eh_done_q); 6045 init_waitqueue_head(&ap->eh_wait_q); 6046 init_completion(&ap->park_req_pending); 6047 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn, 6048 TIMER_DEFERRABLE); 6049 6050 ap->cbl = ATA_CBL_NONE; 6051 6052 ata_link_init(ap, &ap->link, 0); 6053 6054 #ifdef ATA_IRQ_TRAP 6055 ap->stats.unhandled_irq = 1; 6056 ap->stats.idle_irq = 1; 6057 #endif 6058 ata_sff_port_init(ap); 6059 6060 return ap; 6061 } 6062 6063 static void ata_devres_release(struct device *gendev, void *res) 6064 { 6065 struct ata_host *host = dev_get_drvdata(gendev); 6066 int i; 6067 6068 for (i = 0; i < host->n_ports; i++) { 6069 struct ata_port *ap = host->ports[i]; 6070 6071 if (!ap) 6072 continue; 6073 6074 if (ap->scsi_host) 6075 scsi_host_put(ap->scsi_host); 6076 6077 } 6078 6079 dev_set_drvdata(gendev, NULL); 6080 ata_host_put(host); 6081 } 6082 6083 static void ata_host_release(struct kref *kref) 6084 { 6085 struct ata_host *host = container_of(kref, struct ata_host, kref); 6086 int i; 6087 6088 for (i = 0; i < host->n_ports; i++) { 6089 struct ata_port *ap = host->ports[i]; 6090 6091 kfree(ap->pmp_link); 6092 kfree(ap->slave_link); 6093 kfree(ap); 6094 host->ports[i] = NULL; 6095 } 6096 kfree(host); 6097 } 6098 6099 void ata_host_get(struct ata_host *host) 6100 { 6101 kref_get(&host->kref); 6102 } 6103 6104 void ata_host_put(struct ata_host *host) 6105 { 6106 kref_put(&host->kref, ata_host_release); 6107 } 6108 6109 /** 6110 * ata_host_alloc - allocate and init basic ATA host resources 6111 * @dev: generic device this host is associated with 6112 * @max_ports: maximum number of ATA ports associated with this host 6113 * 6114 * Allocate and initialize basic ATA host resources. LLD calls 6115 * this function to allocate a host, initializes it fully and 6116 * attaches it using ata_host_register(). 6117 * 6118 * @max_ports ports are allocated and host->n_ports is 6119 * initialized to @max_ports. The caller is allowed to decrease 6120 * host->n_ports before calling ata_host_register(). The unused 6121 * ports will be automatically freed on registration. 6122 * 6123 * RETURNS: 6124 * Allocate ATA host on success, NULL on failure. 6125 * 6126 * LOCKING: 6127 * Inherited from calling layer (may sleep). 6128 */ 6129 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6130 { 6131 struct ata_host *host; 6132 size_t sz; 6133 int i; 6134 void *dr; 6135 6136 DPRINTK("ENTER\n"); 6137 6138 /* alloc a container for our list of ATA ports (buses) */ 6139 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6140 host = kzalloc(sz, GFP_KERNEL); 6141 if (!host) 6142 return NULL; 6143 6144 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6145 goto err_free; 6146 6147 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL); 6148 if (!dr) 6149 goto err_out; 6150 6151 devres_add(dev, dr); 6152 dev_set_drvdata(dev, host); 6153 6154 spin_lock_init(&host->lock); 6155 mutex_init(&host->eh_mutex); 6156 host->dev = dev; 6157 host->n_ports = max_ports; 6158 kref_init(&host->kref); 6159 6160 /* allocate ports bound to this host */ 6161 for (i = 0; i < max_ports; i++) { 6162 struct ata_port *ap; 6163 6164 ap = ata_port_alloc(host); 6165 if (!ap) 6166 goto err_out; 6167 6168 ap->port_no = i; 6169 host->ports[i] = ap; 6170 } 6171 6172 devres_remove_group(dev, NULL); 6173 return host; 6174 6175 err_out: 6176 devres_release_group(dev, NULL); 6177 err_free: 6178 kfree(host); 6179 return NULL; 6180 } 6181 6182 /** 6183 * ata_host_alloc_pinfo - alloc host and init with port_info array 6184 * @dev: generic device this host is associated with 6185 * @ppi: array of ATA port_info to initialize host with 6186 * @n_ports: number of ATA ports attached to this host 6187 * 6188 * Allocate ATA host and initialize with info from @ppi. If NULL 6189 * terminated, @ppi may contain fewer entries than @n_ports. The 6190 * last entry will be used for the remaining ports. 6191 * 6192 * RETURNS: 6193 * Allocate ATA host on success, NULL on failure. 6194 * 6195 * LOCKING: 6196 * Inherited from calling layer (may sleep). 6197 */ 6198 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6199 const struct ata_port_info * const * ppi, 6200 int n_ports) 6201 { 6202 const struct ata_port_info *pi; 6203 struct ata_host *host; 6204 int i, j; 6205 6206 host = ata_host_alloc(dev, n_ports); 6207 if (!host) 6208 return NULL; 6209 6210 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6211 struct ata_port *ap = host->ports[i]; 6212 6213 if (ppi[j]) 6214 pi = ppi[j++]; 6215 6216 ap->pio_mask = pi->pio_mask; 6217 ap->mwdma_mask = pi->mwdma_mask; 6218 ap->udma_mask = pi->udma_mask; 6219 ap->flags |= pi->flags; 6220 ap->link.flags |= pi->link_flags; 6221 ap->ops = pi->port_ops; 6222 6223 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6224 host->ops = pi->port_ops; 6225 } 6226 6227 return host; 6228 } 6229 6230 /** 6231 * ata_slave_link_init - initialize slave link 6232 * @ap: port to initialize slave link for 6233 * 6234 * Create and initialize slave link for @ap. This enables slave 6235 * link handling on the port. 6236 * 6237 * In libata, a port contains links and a link contains devices. 6238 * There is single host link but if a PMP is attached to it, 6239 * there can be multiple fan-out links. On SATA, there's usually 6240 * a single device connected to a link but PATA and SATA 6241 * controllers emulating TF based interface can have two - master 6242 * and slave. 6243 * 6244 * However, there are a few controllers which don't fit into this 6245 * abstraction too well - SATA controllers which emulate TF 6246 * interface with both master and slave devices but also have 6247 * separate SCR register sets for each device. These controllers 6248 * need separate links for physical link handling 6249 * (e.g. onlineness, link speed) but should be treated like a 6250 * traditional M/S controller for everything else (e.g. command 6251 * issue, softreset). 6252 * 6253 * slave_link is libata's way of handling this class of 6254 * controllers without impacting core layer too much. For 6255 * anything other than physical link handling, the default host 6256 * link is used for both master and slave. For physical link 6257 * handling, separate @ap->slave_link is used. All dirty details 6258 * are implemented inside libata core layer. From LLD's POV, the 6259 * only difference is that prereset, hardreset and postreset are 6260 * called once more for the slave link, so the reset sequence 6261 * looks like the following. 6262 * 6263 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 6264 * softreset(M) -> postreset(M) -> postreset(S) 6265 * 6266 * Note that softreset is called only for the master. Softreset 6267 * resets both M/S by definition, so SRST on master should handle 6268 * both (the standard method will work just fine). 6269 * 6270 * LOCKING: 6271 * Should be called before host is registered. 6272 * 6273 * RETURNS: 6274 * 0 on success, -errno on failure. 6275 */ 6276 int ata_slave_link_init(struct ata_port *ap) 6277 { 6278 struct ata_link *link; 6279 6280 WARN_ON(ap->slave_link); 6281 WARN_ON(ap->flags & ATA_FLAG_PMP); 6282 6283 link = kzalloc(sizeof(*link), GFP_KERNEL); 6284 if (!link) 6285 return -ENOMEM; 6286 6287 ata_link_init(ap, link, 1); 6288 ap->slave_link = link; 6289 return 0; 6290 } 6291 6292 static void ata_host_stop(struct device *gendev, void *res) 6293 { 6294 struct ata_host *host = dev_get_drvdata(gendev); 6295 int i; 6296 6297 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6298 6299 for (i = 0; i < host->n_ports; i++) { 6300 struct ata_port *ap = host->ports[i]; 6301 6302 if (ap->ops->port_stop) 6303 ap->ops->port_stop(ap); 6304 } 6305 6306 if (host->ops->host_stop) 6307 host->ops->host_stop(host); 6308 } 6309 6310 /** 6311 * ata_finalize_port_ops - finalize ata_port_operations 6312 * @ops: ata_port_operations to finalize 6313 * 6314 * An ata_port_operations can inherit from another ops and that 6315 * ops can again inherit from another. This can go on as many 6316 * times as necessary as long as there is no loop in the 6317 * inheritance chain. 6318 * 6319 * Ops tables are finalized when the host is started. NULL or 6320 * unspecified entries are inherited from the closet ancestor 6321 * which has the method and the entry is populated with it. 6322 * After finalization, the ops table directly points to all the 6323 * methods and ->inherits is no longer necessary and cleared. 6324 * 6325 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 6326 * 6327 * LOCKING: 6328 * None. 6329 */ 6330 static void ata_finalize_port_ops(struct ata_port_operations *ops) 6331 { 6332 static DEFINE_SPINLOCK(lock); 6333 const struct ata_port_operations *cur; 6334 void **begin = (void **)ops; 6335 void **end = (void **)&ops->inherits; 6336 void **pp; 6337 6338 if (!ops || !ops->inherits) 6339 return; 6340 6341 spin_lock(&lock); 6342 6343 for (cur = ops->inherits; cur; cur = cur->inherits) { 6344 void **inherit = (void **)cur; 6345 6346 for (pp = begin; pp < end; pp++, inherit++) 6347 if (!*pp) 6348 *pp = *inherit; 6349 } 6350 6351 for (pp = begin; pp < end; pp++) 6352 if (IS_ERR(*pp)) 6353 *pp = NULL; 6354 6355 ops->inherits = NULL; 6356 6357 spin_unlock(&lock); 6358 } 6359 6360 /** 6361 * ata_host_start - start and freeze ports of an ATA host 6362 * @host: ATA host to start ports for 6363 * 6364 * Start and then freeze ports of @host. Started status is 6365 * recorded in host->flags, so this function can be called 6366 * multiple times. Ports are guaranteed to get started only 6367 * once. If host->ops isn't initialized yet, its set to the 6368 * first non-dummy port ops. 6369 * 6370 * LOCKING: 6371 * Inherited from calling layer (may sleep). 6372 * 6373 * RETURNS: 6374 * 0 if all ports are started successfully, -errno otherwise. 6375 */ 6376 int ata_host_start(struct ata_host *host) 6377 { 6378 int have_stop = 0; 6379 void *start_dr = NULL; 6380 int i, rc; 6381 6382 if (host->flags & ATA_HOST_STARTED) 6383 return 0; 6384 6385 ata_finalize_port_ops(host->ops); 6386 6387 for (i = 0; i < host->n_ports; i++) { 6388 struct ata_port *ap = host->ports[i]; 6389 6390 ata_finalize_port_ops(ap->ops); 6391 6392 if (!host->ops && !ata_port_is_dummy(ap)) 6393 host->ops = ap->ops; 6394 6395 if (ap->ops->port_stop) 6396 have_stop = 1; 6397 } 6398 6399 if (host->ops->host_stop) 6400 have_stop = 1; 6401 6402 if (have_stop) { 6403 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6404 if (!start_dr) 6405 return -ENOMEM; 6406 } 6407 6408 for (i = 0; i < host->n_ports; i++) { 6409 struct ata_port *ap = host->ports[i]; 6410 6411 if (ap->ops->port_start) { 6412 rc = ap->ops->port_start(ap); 6413 if (rc) { 6414 if (rc != -ENODEV) 6415 dev_err(host->dev, 6416 "failed to start port %d (errno=%d)\n", 6417 i, rc); 6418 goto err_out; 6419 } 6420 } 6421 ata_eh_freeze_port(ap); 6422 } 6423 6424 if (start_dr) 6425 devres_add(host->dev, start_dr); 6426 host->flags |= ATA_HOST_STARTED; 6427 return 0; 6428 6429 err_out: 6430 while (--i >= 0) { 6431 struct ata_port *ap = host->ports[i]; 6432 6433 if (ap->ops->port_stop) 6434 ap->ops->port_stop(ap); 6435 } 6436 devres_free(start_dr); 6437 return rc; 6438 } 6439 6440 /** 6441 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6442 * @host: host to initialize 6443 * @dev: device host is attached to 6444 * @ops: port_ops 6445 * 6446 */ 6447 void ata_host_init(struct ata_host *host, struct device *dev, 6448 struct ata_port_operations *ops) 6449 { 6450 spin_lock_init(&host->lock); 6451 mutex_init(&host->eh_mutex); 6452 host->n_tags = ATA_MAX_QUEUE; 6453 host->dev = dev; 6454 host->ops = ops; 6455 kref_init(&host->kref); 6456 } 6457 6458 void __ata_port_probe(struct ata_port *ap) 6459 { 6460 struct ata_eh_info *ehi = &ap->link.eh_info; 6461 unsigned long flags; 6462 6463 /* kick EH for boot probing */ 6464 spin_lock_irqsave(ap->lock, flags); 6465 6466 ehi->probe_mask |= ATA_ALL_DEVICES; 6467 ehi->action |= ATA_EH_RESET; 6468 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6469 6470 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6471 ap->pflags |= ATA_PFLAG_LOADING; 6472 ata_port_schedule_eh(ap); 6473 6474 spin_unlock_irqrestore(ap->lock, flags); 6475 } 6476 6477 int ata_port_probe(struct ata_port *ap) 6478 { 6479 int rc = 0; 6480 6481 if (ap->ops->error_handler) { 6482 __ata_port_probe(ap); 6483 ata_port_wait_eh(ap); 6484 } else { 6485 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6486 rc = ata_bus_probe(ap); 6487 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6488 } 6489 return rc; 6490 } 6491 6492 6493 static void async_port_probe(void *data, async_cookie_t cookie) 6494 { 6495 struct ata_port *ap = data; 6496 6497 /* 6498 * If we're not allowed to scan this host in parallel, 6499 * we need to wait until all previous scans have completed 6500 * before going further. 6501 * Jeff Garzik says this is only within a controller, so we 6502 * don't need to wait for port 0, only for later ports. 6503 */ 6504 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6505 async_synchronize_cookie(cookie); 6506 6507 (void)ata_port_probe(ap); 6508 6509 /* in order to keep device order, we need to synchronize at this point */ 6510 async_synchronize_cookie(cookie); 6511 6512 ata_scsi_scan_host(ap, 1); 6513 } 6514 6515 /** 6516 * ata_host_register - register initialized ATA host 6517 * @host: ATA host to register 6518 * @sht: template for SCSI host 6519 * 6520 * Register initialized ATA host. @host is allocated using 6521 * ata_host_alloc() and fully initialized by LLD. This function 6522 * starts ports, registers @host with ATA and SCSI layers and 6523 * probe registered devices. 6524 * 6525 * LOCKING: 6526 * Inherited from calling layer (may sleep). 6527 * 6528 * RETURNS: 6529 * 0 on success, -errno otherwise. 6530 */ 6531 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6532 { 6533 int i, rc; 6534 6535 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE); 6536 6537 /* host must have been started */ 6538 if (!(host->flags & ATA_HOST_STARTED)) { 6539 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6540 WARN_ON(1); 6541 return -EINVAL; 6542 } 6543 6544 /* Blow away unused ports. This happens when LLD can't 6545 * determine the exact number of ports to allocate at 6546 * allocation time. 6547 */ 6548 for (i = host->n_ports; host->ports[i]; i++) 6549 kfree(host->ports[i]); 6550 6551 /* give ports names and add SCSI hosts */ 6552 for (i = 0; i < host->n_ports; i++) { 6553 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6554 host->ports[i]->local_port_no = i + 1; 6555 } 6556 6557 /* Create associated sysfs transport objects */ 6558 for (i = 0; i < host->n_ports; i++) { 6559 rc = ata_tport_add(host->dev,host->ports[i]); 6560 if (rc) { 6561 goto err_tadd; 6562 } 6563 } 6564 6565 rc = ata_scsi_add_hosts(host, sht); 6566 if (rc) 6567 goto err_tadd; 6568 6569 /* set cable, sata_spd_limit and report */ 6570 for (i = 0; i < host->n_ports; i++) { 6571 struct ata_port *ap = host->ports[i]; 6572 unsigned long xfer_mask; 6573 6574 /* set SATA cable type if still unset */ 6575 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6576 ap->cbl = ATA_CBL_SATA; 6577 6578 /* init sata_spd_limit to the current value */ 6579 sata_link_init_spd(&ap->link); 6580 if (ap->slave_link) 6581 sata_link_init_spd(ap->slave_link); 6582 6583 /* print per-port info to dmesg */ 6584 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6585 ap->udma_mask); 6586 6587 if (!ata_port_is_dummy(ap)) { 6588 ata_port_info(ap, "%cATA max %s %s\n", 6589 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6590 ata_mode_string(xfer_mask), 6591 ap->link.eh_info.desc); 6592 ata_ehi_clear_desc(&ap->link.eh_info); 6593 } else 6594 ata_port_info(ap, "DUMMY\n"); 6595 } 6596 6597 /* perform each probe asynchronously */ 6598 for (i = 0; i < host->n_ports; i++) { 6599 struct ata_port *ap = host->ports[i]; 6600 async_schedule(async_port_probe, ap); 6601 } 6602 6603 return 0; 6604 6605 err_tadd: 6606 while (--i >= 0) { 6607 ata_tport_delete(host->ports[i]); 6608 } 6609 return rc; 6610 6611 } 6612 6613 /** 6614 * ata_host_activate - start host, request IRQ and register it 6615 * @host: target ATA host 6616 * @irq: IRQ to request 6617 * @irq_handler: irq_handler used when requesting IRQ 6618 * @irq_flags: irq_flags used when requesting IRQ 6619 * @sht: scsi_host_template to use when registering the host 6620 * 6621 * After allocating an ATA host and initializing it, most libata 6622 * LLDs perform three steps to activate the host - start host, 6623 * request IRQ and register it. This helper takes necessary 6624 * arguments and performs the three steps in one go. 6625 * 6626 * An invalid IRQ skips the IRQ registration and expects the host to 6627 * have set polling mode on the port. In this case, @irq_handler 6628 * should be NULL. 6629 * 6630 * LOCKING: 6631 * Inherited from calling layer (may sleep). 6632 * 6633 * RETURNS: 6634 * 0 on success, -errno otherwise. 6635 */ 6636 int ata_host_activate(struct ata_host *host, int irq, 6637 irq_handler_t irq_handler, unsigned long irq_flags, 6638 struct scsi_host_template *sht) 6639 { 6640 int i, rc; 6641 char *irq_desc; 6642 6643 rc = ata_host_start(host); 6644 if (rc) 6645 return rc; 6646 6647 /* Special case for polling mode */ 6648 if (!irq) { 6649 WARN_ON(irq_handler); 6650 return ata_host_register(host, sht); 6651 } 6652 6653 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", 6654 dev_driver_string(host->dev), 6655 dev_name(host->dev)); 6656 if (!irq_desc) 6657 return -ENOMEM; 6658 6659 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6660 irq_desc, host); 6661 if (rc) 6662 return rc; 6663 6664 for (i = 0; i < host->n_ports; i++) 6665 ata_port_desc(host->ports[i], "irq %d", irq); 6666 6667 rc = ata_host_register(host, sht); 6668 /* if failed, just free the IRQ and leave ports alone */ 6669 if (rc) 6670 devm_free_irq(host->dev, irq, host); 6671 6672 return rc; 6673 } 6674 6675 /** 6676 * ata_port_detach - Detach ATA port in preparation of device removal 6677 * @ap: ATA port to be detached 6678 * 6679 * Detach all ATA devices and the associated SCSI devices of @ap; 6680 * then, remove the associated SCSI host. @ap is guaranteed to 6681 * be quiescent on return from this function. 6682 * 6683 * LOCKING: 6684 * Kernel thread context (may sleep). 6685 */ 6686 static void ata_port_detach(struct ata_port *ap) 6687 { 6688 unsigned long flags; 6689 struct ata_link *link; 6690 struct ata_device *dev; 6691 6692 if (!ap->ops->error_handler) 6693 goto skip_eh; 6694 6695 /* tell EH we're leaving & flush EH */ 6696 spin_lock_irqsave(ap->lock, flags); 6697 ap->pflags |= ATA_PFLAG_UNLOADING; 6698 ata_port_schedule_eh(ap); 6699 spin_unlock_irqrestore(ap->lock, flags); 6700 6701 /* wait till EH commits suicide */ 6702 ata_port_wait_eh(ap); 6703 6704 /* it better be dead now */ 6705 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6706 6707 cancel_delayed_work_sync(&ap->hotplug_task); 6708 6709 skip_eh: 6710 /* clean up zpodd on port removal */ 6711 ata_for_each_link(link, ap, HOST_FIRST) { 6712 ata_for_each_dev(dev, link, ALL) { 6713 if (zpodd_dev_enabled(dev)) 6714 zpodd_exit(dev); 6715 } 6716 } 6717 if (ap->pmp_link) { 6718 int i; 6719 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6720 ata_tlink_delete(&ap->pmp_link[i]); 6721 } 6722 /* remove the associated SCSI host */ 6723 scsi_remove_host(ap->scsi_host); 6724 ata_tport_delete(ap); 6725 } 6726 6727 /** 6728 * ata_host_detach - Detach all ports of an ATA host 6729 * @host: Host to detach 6730 * 6731 * Detach all ports of @host. 6732 * 6733 * LOCKING: 6734 * Kernel thread context (may sleep). 6735 */ 6736 void ata_host_detach(struct ata_host *host) 6737 { 6738 int i; 6739 6740 /* Ensure ata_port probe has completed */ 6741 async_synchronize_full(); 6742 6743 for (i = 0; i < host->n_ports; i++) 6744 ata_port_detach(host->ports[i]); 6745 6746 /* the host is dead now, dissociate ACPI */ 6747 ata_acpi_dissociate(host); 6748 } 6749 6750 #ifdef CONFIG_PCI 6751 6752 /** 6753 * ata_pci_remove_one - PCI layer callback for device removal 6754 * @pdev: PCI device that was removed 6755 * 6756 * PCI layer indicates to libata via this hook that hot-unplug or 6757 * module unload event has occurred. Detach all ports. Resource 6758 * release is handled via devres. 6759 * 6760 * LOCKING: 6761 * Inherited from PCI layer (may sleep). 6762 */ 6763 void ata_pci_remove_one(struct pci_dev *pdev) 6764 { 6765 struct ata_host *host = pci_get_drvdata(pdev); 6766 6767 ata_host_detach(host); 6768 } 6769 6770 void ata_pci_shutdown_one(struct pci_dev *pdev) 6771 { 6772 struct ata_host *host = pci_get_drvdata(pdev); 6773 int i; 6774 6775 for (i = 0; i < host->n_ports; i++) { 6776 struct ata_port *ap = host->ports[i]; 6777 6778 ap->pflags |= ATA_PFLAG_FROZEN; 6779 6780 /* Disable port interrupts */ 6781 if (ap->ops->freeze) 6782 ap->ops->freeze(ap); 6783 6784 /* Stop the port DMA engines */ 6785 if (ap->ops->port_stop) 6786 ap->ops->port_stop(ap); 6787 } 6788 } 6789 6790 /* move to PCI subsystem */ 6791 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6792 { 6793 unsigned long tmp = 0; 6794 6795 switch (bits->width) { 6796 case 1: { 6797 u8 tmp8 = 0; 6798 pci_read_config_byte(pdev, bits->reg, &tmp8); 6799 tmp = tmp8; 6800 break; 6801 } 6802 case 2: { 6803 u16 tmp16 = 0; 6804 pci_read_config_word(pdev, bits->reg, &tmp16); 6805 tmp = tmp16; 6806 break; 6807 } 6808 case 4: { 6809 u32 tmp32 = 0; 6810 pci_read_config_dword(pdev, bits->reg, &tmp32); 6811 tmp = tmp32; 6812 break; 6813 } 6814 6815 default: 6816 return -EINVAL; 6817 } 6818 6819 tmp &= bits->mask; 6820 6821 return (tmp == bits->val) ? 1 : 0; 6822 } 6823 6824 #ifdef CONFIG_PM 6825 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6826 { 6827 pci_save_state(pdev); 6828 pci_disable_device(pdev); 6829 6830 if (mesg.event & PM_EVENT_SLEEP) 6831 pci_set_power_state(pdev, PCI_D3hot); 6832 } 6833 6834 int ata_pci_device_do_resume(struct pci_dev *pdev) 6835 { 6836 int rc; 6837 6838 pci_set_power_state(pdev, PCI_D0); 6839 pci_restore_state(pdev); 6840 6841 rc = pcim_enable_device(pdev); 6842 if (rc) { 6843 dev_err(&pdev->dev, 6844 "failed to enable device after resume (%d)\n", rc); 6845 return rc; 6846 } 6847 6848 pci_set_master(pdev); 6849 return 0; 6850 } 6851 6852 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6853 { 6854 struct ata_host *host = pci_get_drvdata(pdev); 6855 int rc = 0; 6856 6857 rc = ata_host_suspend(host, mesg); 6858 if (rc) 6859 return rc; 6860 6861 ata_pci_device_do_suspend(pdev, mesg); 6862 6863 return 0; 6864 } 6865 6866 int ata_pci_device_resume(struct pci_dev *pdev) 6867 { 6868 struct ata_host *host = pci_get_drvdata(pdev); 6869 int rc; 6870 6871 rc = ata_pci_device_do_resume(pdev); 6872 if (rc == 0) 6873 ata_host_resume(host); 6874 return rc; 6875 } 6876 #endif /* CONFIG_PM */ 6877 6878 #endif /* CONFIG_PCI */ 6879 6880 /** 6881 * ata_platform_remove_one - Platform layer callback for device removal 6882 * @pdev: Platform device that was removed 6883 * 6884 * Platform layer indicates to libata via this hook that hot-unplug or 6885 * module unload event has occurred. Detach all ports. Resource 6886 * release is handled via devres. 6887 * 6888 * LOCKING: 6889 * Inherited from platform layer (may sleep). 6890 */ 6891 int ata_platform_remove_one(struct platform_device *pdev) 6892 { 6893 struct ata_host *host = platform_get_drvdata(pdev); 6894 6895 ata_host_detach(host); 6896 6897 return 0; 6898 } 6899 6900 static int __init ata_parse_force_one(char **cur, 6901 struct ata_force_ent *force_ent, 6902 const char **reason) 6903 { 6904 static const struct ata_force_param force_tbl[] __initconst = { 6905 { "40c", .cbl = ATA_CBL_PATA40 }, 6906 { "80c", .cbl = ATA_CBL_PATA80 }, 6907 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6908 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6909 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6910 { "sata", .cbl = ATA_CBL_SATA }, 6911 { "1.5Gbps", .spd_limit = 1 }, 6912 { "3.0Gbps", .spd_limit = 2 }, 6913 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6914 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6915 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, 6916 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, 6917 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6918 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6919 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6920 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6921 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6922 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6923 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6924 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6925 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6926 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6927 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6928 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6929 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6930 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6931 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6932 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6933 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6934 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6935 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6936 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6937 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6938 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6939 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6940 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6941 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6942 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6943 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6944 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6945 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6946 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6947 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6948 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6949 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6950 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6951 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6952 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6953 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6954 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6955 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6956 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6957 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6958 }; 6959 char *start = *cur, *p = *cur; 6960 char *id, *val, *endp; 6961 const struct ata_force_param *match_fp = NULL; 6962 int nr_matches = 0, i; 6963 6964 /* find where this param ends and update *cur */ 6965 while (*p != '\0' && *p != ',') 6966 p++; 6967 6968 if (*p == '\0') 6969 *cur = p; 6970 else 6971 *cur = p + 1; 6972 6973 *p = '\0'; 6974 6975 /* parse */ 6976 p = strchr(start, ':'); 6977 if (!p) { 6978 val = strstrip(start); 6979 goto parse_val; 6980 } 6981 *p = '\0'; 6982 6983 id = strstrip(start); 6984 val = strstrip(p + 1); 6985 6986 /* parse id */ 6987 p = strchr(id, '.'); 6988 if (p) { 6989 *p++ = '\0'; 6990 force_ent->device = simple_strtoul(p, &endp, 10); 6991 if (p == endp || *endp != '\0') { 6992 *reason = "invalid device"; 6993 return -EINVAL; 6994 } 6995 } 6996 6997 force_ent->port = simple_strtoul(id, &endp, 10); 6998 if (id == endp || *endp != '\0') { 6999 *reason = "invalid port/link"; 7000 return -EINVAL; 7001 } 7002 7003 parse_val: 7004 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 7005 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 7006 const struct ata_force_param *fp = &force_tbl[i]; 7007 7008 if (strncasecmp(val, fp->name, strlen(val))) 7009 continue; 7010 7011 nr_matches++; 7012 match_fp = fp; 7013 7014 if (strcasecmp(val, fp->name) == 0) { 7015 nr_matches = 1; 7016 break; 7017 } 7018 } 7019 7020 if (!nr_matches) { 7021 *reason = "unknown value"; 7022 return -EINVAL; 7023 } 7024 if (nr_matches > 1) { 7025 *reason = "ambiguous value"; 7026 return -EINVAL; 7027 } 7028 7029 force_ent->param = *match_fp; 7030 7031 return 0; 7032 } 7033 7034 static void __init ata_parse_force_param(void) 7035 { 7036 int idx = 0, size = 1; 7037 int last_port = -1, last_device = -1; 7038 char *p, *cur, *next; 7039 7040 /* calculate maximum number of params and allocate force_tbl */ 7041 for (p = ata_force_param_buf; *p; p++) 7042 if (*p == ',') 7043 size++; 7044 7045 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL); 7046 if (!ata_force_tbl) { 7047 printk(KERN_WARNING "ata: failed to extend force table, " 7048 "libata.force ignored\n"); 7049 return; 7050 } 7051 7052 /* parse and populate the table */ 7053 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 7054 const char *reason = ""; 7055 struct ata_force_ent te = { .port = -1, .device = -1 }; 7056 7057 next = cur; 7058 if (ata_parse_force_one(&next, &te, &reason)) { 7059 printk(KERN_WARNING "ata: failed to parse force " 7060 "parameter \"%s\" (%s)\n", 7061 cur, reason); 7062 continue; 7063 } 7064 7065 if (te.port == -1) { 7066 te.port = last_port; 7067 te.device = last_device; 7068 } 7069 7070 ata_force_tbl[idx++] = te; 7071 7072 last_port = te.port; 7073 last_device = te.device; 7074 } 7075 7076 ata_force_tbl_size = idx; 7077 } 7078 7079 static int __init ata_init(void) 7080 { 7081 int rc; 7082 7083 ata_parse_force_param(); 7084 7085 rc = ata_sff_init(); 7086 if (rc) { 7087 kfree(ata_force_tbl); 7088 return rc; 7089 } 7090 7091 libata_transport_init(); 7092 ata_scsi_transport_template = ata_attach_transport(); 7093 if (!ata_scsi_transport_template) { 7094 ata_sff_exit(); 7095 rc = -ENOMEM; 7096 goto err_out; 7097 } 7098 7099 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 7100 return 0; 7101 7102 err_out: 7103 return rc; 7104 } 7105 7106 static void __exit ata_exit(void) 7107 { 7108 ata_release_transport(ata_scsi_transport_template); 7109 libata_transport_exit(); 7110 ata_sff_exit(); 7111 kfree(ata_force_tbl); 7112 } 7113 7114 subsys_initcall(ata_init); 7115 module_exit(ata_exit); 7116 7117 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 7118 7119 int ata_ratelimit(void) 7120 { 7121 return __ratelimit(&ratelimit); 7122 } 7123 7124 /** 7125 * ata_msleep - ATA EH owner aware msleep 7126 * @ap: ATA port to attribute the sleep to 7127 * @msecs: duration to sleep in milliseconds 7128 * 7129 * Sleeps @msecs. If the current task is owner of @ap's EH, the 7130 * ownership is released before going to sleep and reacquired 7131 * after the sleep is complete. IOW, other ports sharing the 7132 * @ap->host will be allowed to own the EH while this task is 7133 * sleeping. 7134 * 7135 * LOCKING: 7136 * Might sleep. 7137 */ 7138 void ata_msleep(struct ata_port *ap, unsigned int msecs) 7139 { 7140 bool owns_eh = ap && ap->host->eh_owner == current; 7141 7142 if (owns_eh) 7143 ata_eh_release(ap); 7144 7145 if (msecs < 20) { 7146 unsigned long usecs = msecs * USEC_PER_MSEC; 7147 usleep_range(usecs, usecs + 50); 7148 } else { 7149 msleep(msecs); 7150 } 7151 7152 if (owns_eh) 7153 ata_eh_acquire(ap); 7154 } 7155 7156 /** 7157 * ata_wait_register - wait until register value changes 7158 * @ap: ATA port to wait register for, can be NULL 7159 * @reg: IO-mapped register 7160 * @mask: Mask to apply to read register value 7161 * @val: Wait condition 7162 * @interval: polling interval in milliseconds 7163 * @timeout: timeout in milliseconds 7164 * 7165 * Waiting for some bits of register to change is a common 7166 * operation for ATA controllers. This function reads 32bit LE 7167 * IO-mapped register @reg and tests for the following condition. 7168 * 7169 * (*@reg & mask) != val 7170 * 7171 * If the condition is met, it returns; otherwise, the process is 7172 * repeated after @interval_msec until timeout. 7173 * 7174 * LOCKING: 7175 * Kernel thread context (may sleep) 7176 * 7177 * RETURNS: 7178 * The final register value. 7179 */ 7180 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 7181 unsigned long interval, unsigned long timeout) 7182 { 7183 unsigned long deadline; 7184 u32 tmp; 7185 7186 tmp = ioread32(reg); 7187 7188 /* Calculate timeout _after_ the first read to make sure 7189 * preceding writes reach the controller before starting to 7190 * eat away the timeout. 7191 */ 7192 deadline = ata_deadline(jiffies, timeout); 7193 7194 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 7195 ata_msleep(ap, interval); 7196 tmp = ioread32(reg); 7197 } 7198 7199 return tmp; 7200 } 7201 7202 /** 7203 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 7204 * @link: Link receiving the event 7205 * 7206 * Test whether the received PHY event has to be ignored or not. 7207 * 7208 * LOCKING: 7209 * None: 7210 * 7211 * RETURNS: 7212 * True if the event has to be ignored. 7213 */ 7214 bool sata_lpm_ignore_phy_events(struct ata_link *link) 7215 { 7216 unsigned long lpm_timeout = link->last_lpm_change + 7217 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 7218 7219 /* if LPM is enabled, PHYRDY doesn't mean anything */ 7220 if (link->lpm_policy > ATA_LPM_MAX_POWER) 7221 return true; 7222 7223 /* ignore the first PHY event after the LPM policy changed 7224 * as it is might be spurious 7225 */ 7226 if ((link->flags & ATA_LFLAG_CHANGED) && 7227 time_before(jiffies, lpm_timeout)) 7228 return true; 7229 7230 return false; 7231 } 7232 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 7233 7234 /* 7235 * Dummy port_ops 7236 */ 7237 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7238 { 7239 return AC_ERR_SYSTEM; 7240 } 7241 7242 static void ata_dummy_error_handler(struct ata_port *ap) 7243 { 7244 /* truly dummy */ 7245 } 7246 7247 struct ata_port_operations ata_dummy_port_ops = { 7248 .qc_prep = ata_noop_qc_prep, 7249 .qc_issue = ata_dummy_qc_issue, 7250 .error_handler = ata_dummy_error_handler, 7251 .sched_eh = ata_std_sched_eh, 7252 .end_eh = ata_std_end_eh, 7253 }; 7254 7255 const struct ata_port_info ata_dummy_port_info = { 7256 .port_ops = &ata_dummy_port_ops, 7257 }; 7258 7259 /* 7260 * Utility print functions 7261 */ 7262 void ata_port_printk(const struct ata_port *ap, const char *level, 7263 const char *fmt, ...) 7264 { 7265 struct va_format vaf; 7266 va_list args; 7267 7268 va_start(args, fmt); 7269 7270 vaf.fmt = fmt; 7271 vaf.va = &args; 7272 7273 printk("%sata%u: %pV", level, ap->print_id, &vaf); 7274 7275 va_end(args); 7276 } 7277 EXPORT_SYMBOL(ata_port_printk); 7278 7279 void ata_link_printk(const struct ata_link *link, const char *level, 7280 const char *fmt, ...) 7281 { 7282 struct va_format vaf; 7283 va_list args; 7284 7285 va_start(args, fmt); 7286 7287 vaf.fmt = fmt; 7288 vaf.va = &args; 7289 7290 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 7291 printk("%sata%u.%02u: %pV", 7292 level, link->ap->print_id, link->pmp, &vaf); 7293 else 7294 printk("%sata%u: %pV", 7295 level, link->ap->print_id, &vaf); 7296 7297 va_end(args); 7298 } 7299 EXPORT_SYMBOL(ata_link_printk); 7300 7301 void ata_dev_printk(const struct ata_device *dev, const char *level, 7302 const char *fmt, ...) 7303 { 7304 struct va_format vaf; 7305 va_list args; 7306 7307 va_start(args, fmt); 7308 7309 vaf.fmt = fmt; 7310 vaf.va = &args; 7311 7312 printk("%sata%u.%02u: %pV", 7313 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 7314 &vaf); 7315 7316 va_end(args); 7317 } 7318 EXPORT_SYMBOL(ata_dev_printk); 7319 7320 void ata_print_version(const struct device *dev, const char *version) 7321 { 7322 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 7323 } 7324 EXPORT_SYMBOL(ata_print_version); 7325 7326 /* 7327 * libata is essentially a library of internal helper functions for 7328 * low-level ATA host controller drivers. As such, the API/ABI is 7329 * likely to change as new drivers are added and updated. 7330 * Do not depend on ABI/API stability. 7331 */ 7332 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7333 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7334 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7335 EXPORT_SYMBOL_GPL(ata_base_port_ops); 7336 EXPORT_SYMBOL_GPL(sata_port_ops); 7337 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7338 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7339 EXPORT_SYMBOL_GPL(ata_link_next); 7340 EXPORT_SYMBOL_GPL(ata_dev_next); 7341 EXPORT_SYMBOL_GPL(ata_std_bios_param); 7342 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 7343 EXPORT_SYMBOL_GPL(ata_host_init); 7344 EXPORT_SYMBOL_GPL(ata_host_alloc); 7345 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7346 EXPORT_SYMBOL_GPL(ata_slave_link_init); 7347 EXPORT_SYMBOL_GPL(ata_host_start); 7348 EXPORT_SYMBOL_GPL(ata_host_register); 7349 EXPORT_SYMBOL_GPL(ata_host_activate); 7350 EXPORT_SYMBOL_GPL(ata_host_detach); 7351 EXPORT_SYMBOL_GPL(ata_sg_init); 7352 EXPORT_SYMBOL_GPL(ata_qc_complete); 7353 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7354 EXPORT_SYMBOL_GPL(atapi_cmd_type); 7355 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7356 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7357 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7358 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7359 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7360 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7361 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7362 EXPORT_SYMBOL_GPL(ata_mode_string); 7363 EXPORT_SYMBOL_GPL(ata_id_xfermask); 7364 EXPORT_SYMBOL_GPL(ata_do_set_mode); 7365 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7366 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7367 EXPORT_SYMBOL_GPL(ata_dev_disable); 7368 EXPORT_SYMBOL_GPL(sata_set_spd); 7369 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7370 EXPORT_SYMBOL_GPL(sata_link_debounce); 7371 EXPORT_SYMBOL_GPL(sata_link_resume); 7372 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 7373 EXPORT_SYMBOL_GPL(ata_std_prereset); 7374 EXPORT_SYMBOL_GPL(sata_link_hardreset); 7375 EXPORT_SYMBOL_GPL(sata_std_hardreset); 7376 EXPORT_SYMBOL_GPL(ata_std_postreset); 7377 EXPORT_SYMBOL_GPL(ata_dev_classify); 7378 EXPORT_SYMBOL_GPL(ata_dev_pair); 7379 EXPORT_SYMBOL_GPL(ata_ratelimit); 7380 EXPORT_SYMBOL_GPL(ata_msleep); 7381 EXPORT_SYMBOL_GPL(ata_wait_register); 7382 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7383 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7384 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7385 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7386 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 7387 EXPORT_SYMBOL_GPL(sata_scr_valid); 7388 EXPORT_SYMBOL_GPL(sata_scr_read); 7389 EXPORT_SYMBOL_GPL(sata_scr_write); 7390 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7391 EXPORT_SYMBOL_GPL(ata_link_online); 7392 EXPORT_SYMBOL_GPL(ata_link_offline); 7393 #ifdef CONFIG_PM 7394 EXPORT_SYMBOL_GPL(ata_host_suspend); 7395 EXPORT_SYMBOL_GPL(ata_host_resume); 7396 #endif /* CONFIG_PM */ 7397 EXPORT_SYMBOL_GPL(ata_id_string); 7398 EXPORT_SYMBOL_GPL(ata_id_c_string); 7399 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 7400 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7401 7402 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7403 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7404 EXPORT_SYMBOL_GPL(ata_timing_compute); 7405 EXPORT_SYMBOL_GPL(ata_timing_merge); 7406 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7407 7408 #ifdef CONFIG_PCI 7409 EXPORT_SYMBOL_GPL(pci_test_config_bits); 7410 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one); 7411 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7412 #ifdef CONFIG_PM 7413 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7414 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7415 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7416 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7417 #endif /* CONFIG_PM */ 7418 #endif /* CONFIG_PCI */ 7419 7420 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 7421 7422 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7423 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7424 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7425 EXPORT_SYMBOL_GPL(ata_port_desc); 7426 #ifdef CONFIG_PCI 7427 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7428 #endif /* CONFIG_PCI */ 7429 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7430 EXPORT_SYMBOL_GPL(ata_link_abort); 7431 EXPORT_SYMBOL_GPL(ata_port_abort); 7432 EXPORT_SYMBOL_GPL(ata_port_freeze); 7433 EXPORT_SYMBOL_GPL(sata_async_notification); 7434 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7435 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7436 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7437 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7438 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 7439 EXPORT_SYMBOL_GPL(ata_do_eh); 7440 EXPORT_SYMBOL_GPL(ata_std_error_handler); 7441 7442 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7443 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7444 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7445 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7446 EXPORT_SYMBOL_GPL(ata_cable_sata); 7447 EXPORT_SYMBOL_GPL(ata_host_get); 7448 EXPORT_SYMBOL_GPL(ata_host_put); 7449