1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <scsi/scsi.h> 62 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_host.h> 64 #include <linux/libata.h> 65 #include <asm/byteorder.h> 66 #include <linux/cdrom.h> 67 68 #include "libata.h" 69 70 71 /* debounce timing parameters in msecs { interval, duration, timeout } */ 72 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 73 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 74 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 75 76 const struct ata_port_operations ata_base_port_ops = { 77 .prereset = ata_std_prereset, 78 .postreset = ata_std_postreset, 79 .error_handler = ata_std_error_handler, 80 }; 81 82 const struct ata_port_operations sata_port_ops = { 83 .inherits = &ata_base_port_ops, 84 85 .qc_defer = ata_std_qc_defer, 86 .hardreset = sata_std_hardreset, 87 }; 88 89 static unsigned int ata_dev_init_params(struct ata_device *dev, 90 u16 heads, u16 sectors); 91 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 92 static unsigned int ata_dev_set_feature(struct ata_device *dev, 93 u8 enable, u8 feature); 94 static void ata_dev_xfermask(struct ata_device *dev); 95 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 96 97 unsigned int ata_print_id = 1; 98 static struct workqueue_struct *ata_wq; 99 100 struct workqueue_struct *ata_aux_wq; 101 102 struct ata_force_param { 103 const char *name; 104 unsigned int cbl; 105 int spd_limit; 106 unsigned long xfer_mask; 107 unsigned int horkage_on; 108 unsigned int horkage_off; 109 unsigned int lflags; 110 }; 111 112 struct ata_force_ent { 113 int port; 114 int device; 115 struct ata_force_param param; 116 }; 117 118 static struct ata_force_ent *ata_force_tbl; 119 static int ata_force_tbl_size; 120 121 static char ata_force_param_buf[PAGE_SIZE] __initdata; 122 /* param_buf is thrown away after initialization, disallow read */ 123 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 124 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 125 126 static int atapi_enabled = 1; 127 module_param(atapi_enabled, int, 0444); 128 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 129 130 static int atapi_dmadir = 0; 131 module_param(atapi_dmadir, int, 0444); 132 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 133 134 int atapi_passthru16 = 1; 135 module_param(atapi_passthru16, int, 0444); 136 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 137 138 int libata_fua = 0; 139 module_param_named(fua, libata_fua, int, 0444); 140 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 141 142 static int ata_ignore_hpa; 143 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 144 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 145 146 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 147 module_param_named(dma, libata_dma_mask, int, 0444); 148 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 149 150 static int ata_probe_timeout; 151 module_param(ata_probe_timeout, int, 0444); 152 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 153 154 int libata_noacpi = 0; 155 module_param_named(noacpi, libata_noacpi, int, 0444); 156 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 157 158 int libata_allow_tpm = 0; 159 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 160 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); 161 162 MODULE_AUTHOR("Jeff Garzik"); 163 MODULE_DESCRIPTION("Library module for ATA devices"); 164 MODULE_LICENSE("GPL"); 165 MODULE_VERSION(DRV_VERSION); 166 167 168 static bool ata_sstatus_online(u32 sstatus) 169 { 170 return (sstatus & 0xf) == 0x3; 171 } 172 173 /** 174 * ata_link_next - link iteration helper 175 * @link: the previous link, NULL to start 176 * @ap: ATA port containing links to iterate 177 * @mode: iteration mode, one of ATA_LITER_* 178 * 179 * LOCKING: 180 * Host lock or EH context. 181 * 182 * RETURNS: 183 * Pointer to the next link. 184 */ 185 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 186 enum ata_link_iter_mode mode) 187 { 188 BUG_ON(mode != ATA_LITER_EDGE && 189 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 190 191 /* NULL link indicates start of iteration */ 192 if (!link) 193 switch (mode) { 194 case ATA_LITER_EDGE: 195 case ATA_LITER_PMP_FIRST: 196 if (sata_pmp_attached(ap)) 197 return ap->pmp_link; 198 /* fall through */ 199 case ATA_LITER_HOST_FIRST: 200 return &ap->link; 201 } 202 203 /* we just iterated over the host link, what's next? */ 204 if (link == &ap->link) 205 switch (mode) { 206 case ATA_LITER_HOST_FIRST: 207 if (sata_pmp_attached(ap)) 208 return ap->pmp_link; 209 /* fall through */ 210 case ATA_LITER_PMP_FIRST: 211 if (unlikely(ap->slave_link)) 212 return ap->slave_link; 213 /* fall through */ 214 case ATA_LITER_EDGE: 215 return NULL; 216 } 217 218 /* slave_link excludes PMP */ 219 if (unlikely(link == ap->slave_link)) 220 return NULL; 221 222 /* we were over a PMP link */ 223 if (++link < ap->pmp_link + ap->nr_pmp_links) 224 return link; 225 226 if (mode == ATA_LITER_PMP_FIRST) 227 return &ap->link; 228 229 return NULL; 230 } 231 232 /** 233 * ata_dev_next - device iteration helper 234 * @dev: the previous device, NULL to start 235 * @link: ATA link containing devices to iterate 236 * @mode: iteration mode, one of ATA_DITER_* 237 * 238 * LOCKING: 239 * Host lock or EH context. 240 * 241 * RETURNS: 242 * Pointer to the next device. 243 */ 244 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 245 enum ata_dev_iter_mode mode) 246 { 247 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 248 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 249 250 /* NULL dev indicates start of iteration */ 251 if (!dev) 252 switch (mode) { 253 case ATA_DITER_ENABLED: 254 case ATA_DITER_ALL: 255 dev = link->device; 256 goto check; 257 case ATA_DITER_ENABLED_REVERSE: 258 case ATA_DITER_ALL_REVERSE: 259 dev = link->device + ata_link_max_devices(link) - 1; 260 goto check; 261 } 262 263 next: 264 /* move to the next one */ 265 switch (mode) { 266 case ATA_DITER_ENABLED: 267 case ATA_DITER_ALL: 268 if (++dev < link->device + ata_link_max_devices(link)) 269 goto check; 270 return NULL; 271 case ATA_DITER_ENABLED_REVERSE: 272 case ATA_DITER_ALL_REVERSE: 273 if (--dev >= link->device) 274 goto check; 275 return NULL; 276 } 277 278 check: 279 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 280 !ata_dev_enabled(dev)) 281 goto next; 282 return dev; 283 } 284 285 /** 286 * ata_dev_phys_link - find physical link for a device 287 * @dev: ATA device to look up physical link for 288 * 289 * Look up physical link which @dev is attached to. Note that 290 * this is different from @dev->link only when @dev is on slave 291 * link. For all other cases, it's the same as @dev->link. 292 * 293 * LOCKING: 294 * Don't care. 295 * 296 * RETURNS: 297 * Pointer to the found physical link. 298 */ 299 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 300 { 301 struct ata_port *ap = dev->link->ap; 302 303 if (!ap->slave_link) 304 return dev->link; 305 if (!dev->devno) 306 return &ap->link; 307 return ap->slave_link; 308 } 309 310 /** 311 * ata_force_cbl - force cable type according to libata.force 312 * @ap: ATA port of interest 313 * 314 * Force cable type according to libata.force and whine about it. 315 * The last entry which has matching port number is used, so it 316 * can be specified as part of device force parameters. For 317 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 318 * same effect. 319 * 320 * LOCKING: 321 * EH context. 322 */ 323 void ata_force_cbl(struct ata_port *ap) 324 { 325 int i; 326 327 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 328 const struct ata_force_ent *fe = &ata_force_tbl[i]; 329 330 if (fe->port != -1 && fe->port != ap->print_id) 331 continue; 332 333 if (fe->param.cbl == ATA_CBL_NONE) 334 continue; 335 336 ap->cbl = fe->param.cbl; 337 ata_port_printk(ap, KERN_NOTICE, 338 "FORCE: cable set to %s\n", fe->param.name); 339 return; 340 } 341 } 342 343 /** 344 * ata_force_link_limits - force link limits according to libata.force 345 * @link: ATA link of interest 346 * 347 * Force link flags and SATA spd limit according to libata.force 348 * and whine about it. When only the port part is specified 349 * (e.g. 1:), the limit applies to all links connected to both 350 * the host link and all fan-out ports connected via PMP. If the 351 * device part is specified as 0 (e.g. 1.00:), it specifies the 352 * first fan-out link not the host link. Device number 15 always 353 * points to the host link whether PMP is attached or not. If the 354 * controller has slave link, device number 16 points to it. 355 * 356 * LOCKING: 357 * EH context. 358 */ 359 static void ata_force_link_limits(struct ata_link *link) 360 { 361 bool did_spd = false; 362 int linkno = link->pmp; 363 int i; 364 365 if (ata_is_host_link(link)) 366 linkno += 15; 367 368 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 369 const struct ata_force_ent *fe = &ata_force_tbl[i]; 370 371 if (fe->port != -1 && fe->port != link->ap->print_id) 372 continue; 373 374 if (fe->device != -1 && fe->device != linkno) 375 continue; 376 377 /* only honor the first spd limit */ 378 if (!did_spd && fe->param.spd_limit) { 379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 380 ata_link_printk(link, KERN_NOTICE, 381 "FORCE: PHY spd limit set to %s\n", 382 fe->param.name); 383 did_spd = true; 384 } 385 386 /* let lflags stack */ 387 if (fe->param.lflags) { 388 link->flags |= fe->param.lflags; 389 ata_link_printk(link, KERN_NOTICE, 390 "FORCE: link flag 0x%x forced -> 0x%x\n", 391 fe->param.lflags, link->flags); 392 } 393 } 394 } 395 396 /** 397 * ata_force_xfermask - force xfermask according to libata.force 398 * @dev: ATA device of interest 399 * 400 * Force xfer_mask according to libata.force and whine about it. 401 * For consistency with link selection, device number 15 selects 402 * the first device connected to the host link. 403 * 404 * LOCKING: 405 * EH context. 406 */ 407 static void ata_force_xfermask(struct ata_device *dev) 408 { 409 int devno = dev->link->pmp + dev->devno; 410 int alt_devno = devno; 411 int i; 412 413 /* allow n.15/16 for devices attached to host port */ 414 if (ata_is_host_link(dev->link)) 415 alt_devno += 15; 416 417 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 418 const struct ata_force_ent *fe = &ata_force_tbl[i]; 419 unsigned long pio_mask, mwdma_mask, udma_mask; 420 421 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 422 continue; 423 424 if (fe->device != -1 && fe->device != devno && 425 fe->device != alt_devno) 426 continue; 427 428 if (!fe->param.xfer_mask) 429 continue; 430 431 ata_unpack_xfermask(fe->param.xfer_mask, 432 &pio_mask, &mwdma_mask, &udma_mask); 433 if (udma_mask) 434 dev->udma_mask = udma_mask; 435 else if (mwdma_mask) { 436 dev->udma_mask = 0; 437 dev->mwdma_mask = mwdma_mask; 438 } else { 439 dev->udma_mask = 0; 440 dev->mwdma_mask = 0; 441 dev->pio_mask = pio_mask; 442 } 443 444 ata_dev_printk(dev, KERN_NOTICE, 445 "FORCE: xfer_mask set to %s\n", fe->param.name); 446 return; 447 } 448 } 449 450 /** 451 * ata_force_horkage - force horkage according to libata.force 452 * @dev: ATA device of interest 453 * 454 * Force horkage according to libata.force and whine about it. 455 * For consistency with link selection, device number 15 selects 456 * the first device connected to the host link. 457 * 458 * LOCKING: 459 * EH context. 460 */ 461 static void ata_force_horkage(struct ata_device *dev) 462 { 463 int devno = dev->link->pmp + dev->devno; 464 int alt_devno = devno; 465 int i; 466 467 /* allow n.15/16 for devices attached to host port */ 468 if (ata_is_host_link(dev->link)) 469 alt_devno += 15; 470 471 for (i = 0; i < ata_force_tbl_size; i++) { 472 const struct ata_force_ent *fe = &ata_force_tbl[i]; 473 474 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 475 continue; 476 477 if (fe->device != -1 && fe->device != devno && 478 fe->device != alt_devno) 479 continue; 480 481 if (!(~dev->horkage & fe->param.horkage_on) && 482 !(dev->horkage & fe->param.horkage_off)) 483 continue; 484 485 dev->horkage |= fe->param.horkage_on; 486 dev->horkage &= ~fe->param.horkage_off; 487 488 ata_dev_printk(dev, KERN_NOTICE, 489 "FORCE: horkage modified (%s)\n", fe->param.name); 490 } 491 } 492 493 /** 494 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 495 * @opcode: SCSI opcode 496 * 497 * Determine ATAPI command type from @opcode. 498 * 499 * LOCKING: 500 * None. 501 * 502 * RETURNS: 503 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 504 */ 505 int atapi_cmd_type(u8 opcode) 506 { 507 switch (opcode) { 508 case GPCMD_READ_10: 509 case GPCMD_READ_12: 510 return ATAPI_READ; 511 512 case GPCMD_WRITE_10: 513 case GPCMD_WRITE_12: 514 case GPCMD_WRITE_AND_VERIFY_10: 515 return ATAPI_WRITE; 516 517 case GPCMD_READ_CD: 518 case GPCMD_READ_CD_MSF: 519 return ATAPI_READ_CD; 520 521 case ATA_16: 522 case ATA_12: 523 if (atapi_passthru16) 524 return ATAPI_PASS_THRU; 525 /* fall thru */ 526 default: 527 return ATAPI_MISC; 528 } 529 } 530 531 /** 532 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 533 * @tf: Taskfile to convert 534 * @pmp: Port multiplier port 535 * @is_cmd: This FIS is for command 536 * @fis: Buffer into which data will output 537 * 538 * Converts a standard ATA taskfile to a Serial ATA 539 * FIS structure (Register - Host to Device). 540 * 541 * LOCKING: 542 * Inherited from caller. 543 */ 544 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 545 { 546 fis[0] = 0x27; /* Register - Host to Device FIS */ 547 fis[1] = pmp & 0xf; /* Port multiplier number*/ 548 if (is_cmd) 549 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 550 551 fis[2] = tf->command; 552 fis[3] = tf->feature; 553 554 fis[4] = tf->lbal; 555 fis[5] = tf->lbam; 556 fis[6] = tf->lbah; 557 fis[7] = tf->device; 558 559 fis[8] = tf->hob_lbal; 560 fis[9] = tf->hob_lbam; 561 fis[10] = tf->hob_lbah; 562 fis[11] = tf->hob_feature; 563 564 fis[12] = tf->nsect; 565 fis[13] = tf->hob_nsect; 566 fis[14] = 0; 567 fis[15] = tf->ctl; 568 569 fis[16] = 0; 570 fis[17] = 0; 571 fis[18] = 0; 572 fis[19] = 0; 573 } 574 575 /** 576 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 577 * @fis: Buffer from which data will be input 578 * @tf: Taskfile to output 579 * 580 * Converts a serial ATA FIS structure to a standard ATA taskfile. 581 * 582 * LOCKING: 583 * Inherited from caller. 584 */ 585 586 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 587 { 588 tf->command = fis[2]; /* status */ 589 tf->feature = fis[3]; /* error */ 590 591 tf->lbal = fis[4]; 592 tf->lbam = fis[5]; 593 tf->lbah = fis[6]; 594 tf->device = fis[7]; 595 596 tf->hob_lbal = fis[8]; 597 tf->hob_lbam = fis[9]; 598 tf->hob_lbah = fis[10]; 599 600 tf->nsect = fis[12]; 601 tf->hob_nsect = fis[13]; 602 } 603 604 static const u8 ata_rw_cmds[] = { 605 /* pio multi */ 606 ATA_CMD_READ_MULTI, 607 ATA_CMD_WRITE_MULTI, 608 ATA_CMD_READ_MULTI_EXT, 609 ATA_CMD_WRITE_MULTI_EXT, 610 0, 611 0, 612 0, 613 ATA_CMD_WRITE_MULTI_FUA_EXT, 614 /* pio */ 615 ATA_CMD_PIO_READ, 616 ATA_CMD_PIO_WRITE, 617 ATA_CMD_PIO_READ_EXT, 618 ATA_CMD_PIO_WRITE_EXT, 619 0, 620 0, 621 0, 622 0, 623 /* dma */ 624 ATA_CMD_READ, 625 ATA_CMD_WRITE, 626 ATA_CMD_READ_EXT, 627 ATA_CMD_WRITE_EXT, 628 0, 629 0, 630 0, 631 ATA_CMD_WRITE_FUA_EXT 632 }; 633 634 /** 635 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 636 * @tf: command to examine and configure 637 * @dev: device tf belongs to 638 * 639 * Examine the device configuration and tf->flags to calculate 640 * the proper read/write commands and protocol to use. 641 * 642 * LOCKING: 643 * caller. 644 */ 645 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 646 { 647 u8 cmd; 648 649 int index, fua, lba48, write; 650 651 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 652 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 653 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 654 655 if (dev->flags & ATA_DFLAG_PIO) { 656 tf->protocol = ATA_PROT_PIO; 657 index = dev->multi_count ? 0 : 8; 658 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 659 /* Unable to use DMA due to host limitation */ 660 tf->protocol = ATA_PROT_PIO; 661 index = dev->multi_count ? 0 : 8; 662 } else { 663 tf->protocol = ATA_PROT_DMA; 664 index = 16; 665 } 666 667 cmd = ata_rw_cmds[index + fua + lba48 + write]; 668 if (cmd) { 669 tf->command = cmd; 670 return 0; 671 } 672 return -1; 673 } 674 675 /** 676 * ata_tf_read_block - Read block address from ATA taskfile 677 * @tf: ATA taskfile of interest 678 * @dev: ATA device @tf belongs to 679 * 680 * LOCKING: 681 * None. 682 * 683 * Read block address from @tf. This function can handle all 684 * three address formats - LBA, LBA48 and CHS. tf->protocol and 685 * flags select the address format to use. 686 * 687 * RETURNS: 688 * Block address read from @tf. 689 */ 690 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 691 { 692 u64 block = 0; 693 694 if (tf->flags & ATA_TFLAG_LBA) { 695 if (tf->flags & ATA_TFLAG_LBA48) { 696 block |= (u64)tf->hob_lbah << 40; 697 block |= (u64)tf->hob_lbam << 32; 698 block |= (u64)tf->hob_lbal << 24; 699 } else 700 block |= (tf->device & 0xf) << 24; 701 702 block |= tf->lbah << 16; 703 block |= tf->lbam << 8; 704 block |= tf->lbal; 705 } else { 706 u32 cyl, head, sect; 707 708 cyl = tf->lbam | (tf->lbah << 8); 709 head = tf->device & 0xf; 710 sect = tf->lbal; 711 712 block = (cyl * dev->heads + head) * dev->sectors + sect; 713 } 714 715 return block; 716 } 717 718 /** 719 * ata_build_rw_tf - Build ATA taskfile for given read/write request 720 * @tf: Target ATA taskfile 721 * @dev: ATA device @tf belongs to 722 * @block: Block address 723 * @n_block: Number of blocks 724 * @tf_flags: RW/FUA etc... 725 * @tag: tag 726 * 727 * LOCKING: 728 * None. 729 * 730 * Build ATA taskfile @tf for read/write request described by 731 * @block, @n_block, @tf_flags and @tag on @dev. 732 * 733 * RETURNS: 734 * 735 * 0 on success, -ERANGE if the request is too large for @dev, 736 * -EINVAL if the request is invalid. 737 */ 738 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 739 u64 block, u32 n_block, unsigned int tf_flags, 740 unsigned int tag) 741 { 742 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 743 tf->flags |= tf_flags; 744 745 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 746 /* yay, NCQ */ 747 if (!lba_48_ok(block, n_block)) 748 return -ERANGE; 749 750 tf->protocol = ATA_PROT_NCQ; 751 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 752 753 if (tf->flags & ATA_TFLAG_WRITE) 754 tf->command = ATA_CMD_FPDMA_WRITE; 755 else 756 tf->command = ATA_CMD_FPDMA_READ; 757 758 tf->nsect = tag << 3; 759 tf->hob_feature = (n_block >> 8) & 0xff; 760 tf->feature = n_block & 0xff; 761 762 tf->hob_lbah = (block >> 40) & 0xff; 763 tf->hob_lbam = (block >> 32) & 0xff; 764 tf->hob_lbal = (block >> 24) & 0xff; 765 tf->lbah = (block >> 16) & 0xff; 766 tf->lbam = (block >> 8) & 0xff; 767 tf->lbal = block & 0xff; 768 769 tf->device = 1 << 6; 770 if (tf->flags & ATA_TFLAG_FUA) 771 tf->device |= 1 << 7; 772 } else if (dev->flags & ATA_DFLAG_LBA) { 773 tf->flags |= ATA_TFLAG_LBA; 774 775 if (lba_28_ok(block, n_block)) { 776 /* use LBA28 */ 777 tf->device |= (block >> 24) & 0xf; 778 } else if (lba_48_ok(block, n_block)) { 779 if (!(dev->flags & ATA_DFLAG_LBA48)) 780 return -ERANGE; 781 782 /* use LBA48 */ 783 tf->flags |= ATA_TFLAG_LBA48; 784 785 tf->hob_nsect = (n_block >> 8) & 0xff; 786 787 tf->hob_lbah = (block >> 40) & 0xff; 788 tf->hob_lbam = (block >> 32) & 0xff; 789 tf->hob_lbal = (block >> 24) & 0xff; 790 } else 791 /* request too large even for LBA48 */ 792 return -ERANGE; 793 794 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 795 return -EINVAL; 796 797 tf->nsect = n_block & 0xff; 798 799 tf->lbah = (block >> 16) & 0xff; 800 tf->lbam = (block >> 8) & 0xff; 801 tf->lbal = block & 0xff; 802 803 tf->device |= ATA_LBA; 804 } else { 805 /* CHS */ 806 u32 sect, head, cyl, track; 807 808 /* The request -may- be too large for CHS addressing. */ 809 if (!lba_28_ok(block, n_block)) 810 return -ERANGE; 811 812 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 813 return -EINVAL; 814 815 /* Convert LBA to CHS */ 816 track = (u32)block / dev->sectors; 817 cyl = track / dev->heads; 818 head = track % dev->heads; 819 sect = (u32)block % dev->sectors + 1; 820 821 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 822 (u32)block, track, cyl, head, sect); 823 824 /* Check whether the converted CHS can fit. 825 Cylinder: 0-65535 826 Head: 0-15 827 Sector: 1-255*/ 828 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 829 return -ERANGE; 830 831 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 832 tf->lbal = sect; 833 tf->lbam = cyl; 834 tf->lbah = cyl >> 8; 835 tf->device |= head; 836 } 837 838 return 0; 839 } 840 841 /** 842 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 843 * @pio_mask: pio_mask 844 * @mwdma_mask: mwdma_mask 845 * @udma_mask: udma_mask 846 * 847 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 848 * unsigned int xfer_mask. 849 * 850 * LOCKING: 851 * None. 852 * 853 * RETURNS: 854 * Packed xfer_mask. 855 */ 856 unsigned long ata_pack_xfermask(unsigned long pio_mask, 857 unsigned long mwdma_mask, 858 unsigned long udma_mask) 859 { 860 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 861 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 862 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 863 } 864 865 /** 866 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 867 * @xfer_mask: xfer_mask to unpack 868 * @pio_mask: resulting pio_mask 869 * @mwdma_mask: resulting mwdma_mask 870 * @udma_mask: resulting udma_mask 871 * 872 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 873 * Any NULL distination masks will be ignored. 874 */ 875 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 876 unsigned long *mwdma_mask, unsigned long *udma_mask) 877 { 878 if (pio_mask) 879 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 880 if (mwdma_mask) 881 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 882 if (udma_mask) 883 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 884 } 885 886 static const struct ata_xfer_ent { 887 int shift, bits; 888 u8 base; 889 } ata_xfer_tbl[] = { 890 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 891 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 892 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 893 { -1, }, 894 }; 895 896 /** 897 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 898 * @xfer_mask: xfer_mask of interest 899 * 900 * Return matching XFER_* value for @xfer_mask. Only the highest 901 * bit of @xfer_mask is considered. 902 * 903 * LOCKING: 904 * None. 905 * 906 * RETURNS: 907 * Matching XFER_* value, 0xff if no match found. 908 */ 909 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 910 { 911 int highbit = fls(xfer_mask) - 1; 912 const struct ata_xfer_ent *ent; 913 914 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 915 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 916 return ent->base + highbit - ent->shift; 917 return 0xff; 918 } 919 920 /** 921 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 922 * @xfer_mode: XFER_* of interest 923 * 924 * Return matching xfer_mask for @xfer_mode. 925 * 926 * LOCKING: 927 * None. 928 * 929 * RETURNS: 930 * Matching xfer_mask, 0 if no match found. 931 */ 932 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 933 { 934 const struct ata_xfer_ent *ent; 935 936 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 937 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 938 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 939 & ~((1 << ent->shift) - 1); 940 return 0; 941 } 942 943 /** 944 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 945 * @xfer_mode: XFER_* of interest 946 * 947 * Return matching xfer_shift for @xfer_mode. 948 * 949 * LOCKING: 950 * None. 951 * 952 * RETURNS: 953 * Matching xfer_shift, -1 if no match found. 954 */ 955 int ata_xfer_mode2shift(unsigned long xfer_mode) 956 { 957 const struct ata_xfer_ent *ent; 958 959 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 960 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 961 return ent->shift; 962 return -1; 963 } 964 965 /** 966 * ata_mode_string - convert xfer_mask to string 967 * @xfer_mask: mask of bits supported; only highest bit counts. 968 * 969 * Determine string which represents the highest speed 970 * (highest bit in @modemask). 971 * 972 * LOCKING: 973 * None. 974 * 975 * RETURNS: 976 * Constant C string representing highest speed listed in 977 * @mode_mask, or the constant C string "<n/a>". 978 */ 979 const char *ata_mode_string(unsigned long xfer_mask) 980 { 981 static const char * const xfer_mode_str[] = { 982 "PIO0", 983 "PIO1", 984 "PIO2", 985 "PIO3", 986 "PIO4", 987 "PIO5", 988 "PIO6", 989 "MWDMA0", 990 "MWDMA1", 991 "MWDMA2", 992 "MWDMA3", 993 "MWDMA4", 994 "UDMA/16", 995 "UDMA/25", 996 "UDMA/33", 997 "UDMA/44", 998 "UDMA/66", 999 "UDMA/100", 1000 "UDMA/133", 1001 "UDMA7", 1002 }; 1003 int highbit; 1004 1005 highbit = fls(xfer_mask) - 1; 1006 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1007 return xfer_mode_str[highbit]; 1008 return "<n/a>"; 1009 } 1010 1011 static const char *sata_spd_string(unsigned int spd) 1012 { 1013 static const char * const spd_str[] = { 1014 "1.5 Gbps", 1015 "3.0 Gbps", 1016 "6.0 Gbps", 1017 }; 1018 1019 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1020 return "<unknown>"; 1021 return spd_str[spd - 1]; 1022 } 1023 1024 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 1025 { 1026 struct ata_link *link = dev->link; 1027 struct ata_port *ap = link->ap; 1028 u32 scontrol; 1029 unsigned int err_mask; 1030 int rc; 1031 1032 /* 1033 * disallow DIPM for drivers which haven't set 1034 * ATA_FLAG_IPM. This is because when DIPM is enabled, 1035 * phy ready will be set in the interrupt status on 1036 * state changes, which will cause some drivers to 1037 * think there are errors - additionally drivers will 1038 * need to disable hot plug. 1039 */ 1040 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 1041 ap->pm_policy = NOT_AVAILABLE; 1042 return -EINVAL; 1043 } 1044 1045 /* 1046 * For DIPM, we will only enable it for the 1047 * min_power setting. 1048 * 1049 * Why? Because Disks are too stupid to know that 1050 * If the host rejects a request to go to SLUMBER 1051 * they should retry at PARTIAL, and instead it 1052 * just would give up. So, for medium_power to 1053 * work at all, we need to only allow HIPM. 1054 */ 1055 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 1056 if (rc) 1057 return rc; 1058 1059 switch (policy) { 1060 case MIN_POWER: 1061 /* no restrictions on IPM transitions */ 1062 scontrol &= ~(0x3 << 8); 1063 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1064 if (rc) 1065 return rc; 1066 1067 /* enable DIPM */ 1068 if (dev->flags & ATA_DFLAG_DIPM) 1069 err_mask = ata_dev_set_feature(dev, 1070 SETFEATURES_SATA_ENABLE, SATA_DIPM); 1071 break; 1072 case MEDIUM_POWER: 1073 /* allow IPM to PARTIAL */ 1074 scontrol &= ~(0x1 << 8); 1075 scontrol |= (0x2 << 8); 1076 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1077 if (rc) 1078 return rc; 1079 1080 /* 1081 * we don't have to disable DIPM since IPM flags 1082 * disallow transitions to SLUMBER, which effectively 1083 * disable DIPM if it does not support PARTIAL 1084 */ 1085 break; 1086 case NOT_AVAILABLE: 1087 case MAX_PERFORMANCE: 1088 /* disable all IPM transitions */ 1089 scontrol |= (0x3 << 8); 1090 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1091 if (rc) 1092 return rc; 1093 1094 /* 1095 * we don't have to disable DIPM since IPM flags 1096 * disallow all transitions which effectively 1097 * disable DIPM anyway. 1098 */ 1099 break; 1100 } 1101 1102 /* FIXME: handle SET FEATURES failure */ 1103 (void) err_mask; 1104 1105 return 0; 1106 } 1107 1108 /** 1109 * ata_dev_enable_pm - enable SATA interface power management 1110 * @dev: device to enable power management 1111 * @policy: the link power management policy 1112 * 1113 * Enable SATA Interface power management. This will enable 1114 * Device Interface Power Management (DIPM) for min_power 1115 * policy, and then call driver specific callbacks for 1116 * enabling Host Initiated Power management. 1117 * 1118 * Locking: Caller. 1119 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 1120 */ 1121 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 1122 { 1123 int rc = 0; 1124 struct ata_port *ap = dev->link->ap; 1125 1126 /* set HIPM first, then DIPM */ 1127 if (ap->ops->enable_pm) 1128 rc = ap->ops->enable_pm(ap, policy); 1129 if (rc) 1130 goto enable_pm_out; 1131 rc = ata_dev_set_dipm(dev, policy); 1132 1133 enable_pm_out: 1134 if (rc) 1135 ap->pm_policy = MAX_PERFORMANCE; 1136 else 1137 ap->pm_policy = policy; 1138 return /* rc */; /* hopefully we can use 'rc' eventually */ 1139 } 1140 1141 #ifdef CONFIG_PM 1142 /** 1143 * ata_dev_disable_pm - disable SATA interface power management 1144 * @dev: device to disable power management 1145 * 1146 * Disable SATA Interface power management. This will disable 1147 * Device Interface Power Management (DIPM) without changing 1148 * policy, call driver specific callbacks for disabling Host 1149 * Initiated Power management. 1150 * 1151 * Locking: Caller. 1152 * Returns: void 1153 */ 1154 static void ata_dev_disable_pm(struct ata_device *dev) 1155 { 1156 struct ata_port *ap = dev->link->ap; 1157 1158 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 1159 if (ap->ops->disable_pm) 1160 ap->ops->disable_pm(ap); 1161 } 1162 #endif /* CONFIG_PM */ 1163 1164 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 1165 { 1166 ap->pm_policy = policy; 1167 ap->link.eh_info.action |= ATA_EH_LPM; 1168 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 1169 ata_port_schedule_eh(ap); 1170 } 1171 1172 #ifdef CONFIG_PM 1173 static void ata_lpm_enable(struct ata_host *host) 1174 { 1175 struct ata_link *link; 1176 struct ata_port *ap; 1177 struct ata_device *dev; 1178 int i; 1179 1180 for (i = 0; i < host->n_ports; i++) { 1181 ap = host->ports[i]; 1182 ata_for_each_link(link, ap, EDGE) { 1183 ata_for_each_dev(dev, link, ALL) 1184 ata_dev_disable_pm(dev); 1185 } 1186 } 1187 } 1188 1189 static void ata_lpm_disable(struct ata_host *host) 1190 { 1191 int i; 1192 1193 for (i = 0; i < host->n_ports; i++) { 1194 struct ata_port *ap = host->ports[i]; 1195 ata_lpm_schedule(ap, ap->pm_policy); 1196 } 1197 } 1198 #endif /* CONFIG_PM */ 1199 1200 /** 1201 * ata_dev_classify - determine device type based on ATA-spec signature 1202 * @tf: ATA taskfile register set for device to be identified 1203 * 1204 * Determine from taskfile register contents whether a device is 1205 * ATA or ATAPI, as per "Signature and persistence" section 1206 * of ATA/PI spec (volume 1, sect 5.14). 1207 * 1208 * LOCKING: 1209 * None. 1210 * 1211 * RETURNS: 1212 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1213 * %ATA_DEV_UNKNOWN the event of failure. 1214 */ 1215 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1216 { 1217 /* Apple's open source Darwin code hints that some devices only 1218 * put a proper signature into the LBA mid/high registers, 1219 * So, we only check those. It's sufficient for uniqueness. 1220 * 1221 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1222 * signatures for ATA and ATAPI devices attached on SerialATA, 1223 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1224 * spec has never mentioned about using different signatures 1225 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1226 * Multiplier specification began to use 0x69/0x96 to identify 1227 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1228 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1229 * 0x69/0x96 shortly and described them as reserved for 1230 * SerialATA. 1231 * 1232 * We follow the current spec and consider that 0x69/0x96 1233 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1234 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1235 * SEMB signature. This is worked around in 1236 * ata_dev_read_id(). 1237 */ 1238 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1239 DPRINTK("found ATA device by sig\n"); 1240 return ATA_DEV_ATA; 1241 } 1242 1243 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1244 DPRINTK("found ATAPI device by sig\n"); 1245 return ATA_DEV_ATAPI; 1246 } 1247 1248 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1249 DPRINTK("found PMP device by sig\n"); 1250 return ATA_DEV_PMP; 1251 } 1252 1253 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1254 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1255 return ATA_DEV_SEMB; 1256 } 1257 1258 DPRINTK("unknown device\n"); 1259 return ATA_DEV_UNKNOWN; 1260 } 1261 1262 /** 1263 * ata_id_string - Convert IDENTIFY DEVICE page into string 1264 * @id: IDENTIFY DEVICE results we will examine 1265 * @s: string into which data is output 1266 * @ofs: offset into identify device page 1267 * @len: length of string to return. must be an even number. 1268 * 1269 * The strings in the IDENTIFY DEVICE page are broken up into 1270 * 16-bit chunks. Run through the string, and output each 1271 * 8-bit chunk linearly, regardless of platform. 1272 * 1273 * LOCKING: 1274 * caller. 1275 */ 1276 1277 void ata_id_string(const u16 *id, unsigned char *s, 1278 unsigned int ofs, unsigned int len) 1279 { 1280 unsigned int c; 1281 1282 BUG_ON(len & 1); 1283 1284 while (len > 0) { 1285 c = id[ofs] >> 8; 1286 *s = c; 1287 s++; 1288 1289 c = id[ofs] & 0xff; 1290 *s = c; 1291 s++; 1292 1293 ofs++; 1294 len -= 2; 1295 } 1296 } 1297 1298 /** 1299 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1300 * @id: IDENTIFY DEVICE results we will examine 1301 * @s: string into which data is output 1302 * @ofs: offset into identify device page 1303 * @len: length of string to return. must be an odd number. 1304 * 1305 * This function is identical to ata_id_string except that it 1306 * trims trailing spaces and terminates the resulting string with 1307 * null. @len must be actual maximum length (even number) + 1. 1308 * 1309 * LOCKING: 1310 * caller. 1311 */ 1312 void ata_id_c_string(const u16 *id, unsigned char *s, 1313 unsigned int ofs, unsigned int len) 1314 { 1315 unsigned char *p; 1316 1317 ata_id_string(id, s, ofs, len - 1); 1318 1319 p = s + strnlen(s, len - 1); 1320 while (p > s && p[-1] == ' ') 1321 p--; 1322 *p = '\0'; 1323 } 1324 1325 static u64 ata_id_n_sectors(const u16 *id) 1326 { 1327 if (ata_id_has_lba(id)) { 1328 if (ata_id_has_lba48(id)) 1329 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1330 else 1331 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1332 } else { 1333 if (ata_id_current_chs_valid(id)) 1334 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1335 id[ATA_ID_CUR_SECTORS]; 1336 else 1337 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1338 id[ATA_ID_SECTORS]; 1339 } 1340 } 1341 1342 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1343 { 1344 u64 sectors = 0; 1345 1346 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1347 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1348 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1349 sectors |= (tf->lbah & 0xff) << 16; 1350 sectors |= (tf->lbam & 0xff) << 8; 1351 sectors |= (tf->lbal & 0xff); 1352 1353 return sectors; 1354 } 1355 1356 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1357 { 1358 u64 sectors = 0; 1359 1360 sectors |= (tf->device & 0x0f) << 24; 1361 sectors |= (tf->lbah & 0xff) << 16; 1362 sectors |= (tf->lbam & 0xff) << 8; 1363 sectors |= (tf->lbal & 0xff); 1364 1365 return sectors; 1366 } 1367 1368 /** 1369 * ata_read_native_max_address - Read native max address 1370 * @dev: target device 1371 * @max_sectors: out parameter for the result native max address 1372 * 1373 * Perform an LBA48 or LBA28 native size query upon the device in 1374 * question. 1375 * 1376 * RETURNS: 1377 * 0 on success, -EACCES if command is aborted by the drive. 1378 * -EIO on other errors. 1379 */ 1380 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1381 { 1382 unsigned int err_mask; 1383 struct ata_taskfile tf; 1384 int lba48 = ata_id_has_lba48(dev->id); 1385 1386 ata_tf_init(dev, &tf); 1387 1388 /* always clear all address registers */ 1389 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1390 1391 if (lba48) { 1392 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1393 tf.flags |= ATA_TFLAG_LBA48; 1394 } else 1395 tf.command = ATA_CMD_READ_NATIVE_MAX; 1396 1397 tf.protocol |= ATA_PROT_NODATA; 1398 tf.device |= ATA_LBA; 1399 1400 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1401 if (err_mask) { 1402 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1403 "max address (err_mask=0x%x)\n", err_mask); 1404 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1405 return -EACCES; 1406 return -EIO; 1407 } 1408 1409 if (lba48) 1410 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1411 else 1412 *max_sectors = ata_tf_to_lba(&tf) + 1; 1413 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1414 (*max_sectors)--; 1415 return 0; 1416 } 1417 1418 /** 1419 * ata_set_max_sectors - Set max sectors 1420 * @dev: target device 1421 * @new_sectors: new max sectors value to set for the device 1422 * 1423 * Set max sectors of @dev to @new_sectors. 1424 * 1425 * RETURNS: 1426 * 0 on success, -EACCES if command is aborted or denied (due to 1427 * previous non-volatile SET_MAX) by the drive. -EIO on other 1428 * errors. 1429 */ 1430 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1431 { 1432 unsigned int err_mask; 1433 struct ata_taskfile tf; 1434 int lba48 = ata_id_has_lba48(dev->id); 1435 1436 new_sectors--; 1437 1438 ata_tf_init(dev, &tf); 1439 1440 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1441 1442 if (lba48) { 1443 tf.command = ATA_CMD_SET_MAX_EXT; 1444 tf.flags |= ATA_TFLAG_LBA48; 1445 1446 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1447 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1448 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1449 } else { 1450 tf.command = ATA_CMD_SET_MAX; 1451 1452 tf.device |= (new_sectors >> 24) & 0xf; 1453 } 1454 1455 tf.protocol |= ATA_PROT_NODATA; 1456 tf.device |= ATA_LBA; 1457 1458 tf.lbal = (new_sectors >> 0) & 0xff; 1459 tf.lbam = (new_sectors >> 8) & 0xff; 1460 tf.lbah = (new_sectors >> 16) & 0xff; 1461 1462 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1463 if (err_mask) { 1464 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1465 "max address (err_mask=0x%x)\n", err_mask); 1466 if (err_mask == AC_ERR_DEV && 1467 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1468 return -EACCES; 1469 return -EIO; 1470 } 1471 1472 return 0; 1473 } 1474 1475 /** 1476 * ata_hpa_resize - Resize a device with an HPA set 1477 * @dev: Device to resize 1478 * 1479 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1480 * it if required to the full size of the media. The caller must check 1481 * the drive has the HPA feature set enabled. 1482 * 1483 * RETURNS: 1484 * 0 on success, -errno on failure. 1485 */ 1486 static int ata_hpa_resize(struct ata_device *dev) 1487 { 1488 struct ata_eh_context *ehc = &dev->link->eh_context; 1489 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1490 u64 sectors = ata_id_n_sectors(dev->id); 1491 u64 native_sectors; 1492 int rc; 1493 1494 /* do we need to do it? */ 1495 if (dev->class != ATA_DEV_ATA || 1496 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1497 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1498 return 0; 1499 1500 /* read native max address */ 1501 rc = ata_read_native_max_address(dev, &native_sectors); 1502 if (rc) { 1503 /* If device aborted the command or HPA isn't going to 1504 * be unlocked, skip HPA resizing. 1505 */ 1506 if (rc == -EACCES || !ata_ignore_hpa) { 1507 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1508 "broken, skipping HPA handling\n"); 1509 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1510 1511 /* we can continue if device aborted the command */ 1512 if (rc == -EACCES) 1513 rc = 0; 1514 } 1515 1516 return rc; 1517 } 1518 1519 /* nothing to do? */ 1520 if (native_sectors <= sectors || !ata_ignore_hpa) { 1521 if (!print_info || native_sectors == sectors) 1522 return 0; 1523 1524 if (native_sectors > sectors) 1525 ata_dev_printk(dev, KERN_INFO, 1526 "HPA detected: current %llu, native %llu\n", 1527 (unsigned long long)sectors, 1528 (unsigned long long)native_sectors); 1529 else if (native_sectors < sectors) 1530 ata_dev_printk(dev, KERN_WARNING, 1531 "native sectors (%llu) is smaller than " 1532 "sectors (%llu)\n", 1533 (unsigned long long)native_sectors, 1534 (unsigned long long)sectors); 1535 return 0; 1536 } 1537 1538 /* let's unlock HPA */ 1539 rc = ata_set_max_sectors(dev, native_sectors); 1540 if (rc == -EACCES) { 1541 /* if device aborted the command, skip HPA resizing */ 1542 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1543 "(%llu -> %llu), skipping HPA handling\n", 1544 (unsigned long long)sectors, 1545 (unsigned long long)native_sectors); 1546 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1547 return 0; 1548 } else if (rc) 1549 return rc; 1550 1551 /* re-read IDENTIFY data */ 1552 rc = ata_dev_reread_id(dev, 0); 1553 if (rc) { 1554 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1555 "data after HPA resizing\n"); 1556 return rc; 1557 } 1558 1559 if (print_info) { 1560 u64 new_sectors = ata_id_n_sectors(dev->id); 1561 ata_dev_printk(dev, KERN_INFO, 1562 "HPA unlocked: %llu -> %llu, native %llu\n", 1563 (unsigned long long)sectors, 1564 (unsigned long long)new_sectors, 1565 (unsigned long long)native_sectors); 1566 } 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * ata_dump_id - IDENTIFY DEVICE info debugging output 1573 * @id: IDENTIFY DEVICE page to dump 1574 * 1575 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1576 * page. 1577 * 1578 * LOCKING: 1579 * caller. 1580 */ 1581 1582 static inline void ata_dump_id(const u16 *id) 1583 { 1584 DPRINTK("49==0x%04x " 1585 "53==0x%04x " 1586 "63==0x%04x " 1587 "64==0x%04x " 1588 "75==0x%04x \n", 1589 id[49], 1590 id[53], 1591 id[63], 1592 id[64], 1593 id[75]); 1594 DPRINTK("80==0x%04x " 1595 "81==0x%04x " 1596 "82==0x%04x " 1597 "83==0x%04x " 1598 "84==0x%04x \n", 1599 id[80], 1600 id[81], 1601 id[82], 1602 id[83], 1603 id[84]); 1604 DPRINTK("88==0x%04x " 1605 "93==0x%04x\n", 1606 id[88], 1607 id[93]); 1608 } 1609 1610 /** 1611 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1612 * @id: IDENTIFY data to compute xfer mask from 1613 * 1614 * Compute the xfermask for this device. This is not as trivial 1615 * as it seems if we must consider early devices correctly. 1616 * 1617 * FIXME: pre IDE drive timing (do we care ?). 1618 * 1619 * LOCKING: 1620 * None. 1621 * 1622 * RETURNS: 1623 * Computed xfermask 1624 */ 1625 unsigned long ata_id_xfermask(const u16 *id) 1626 { 1627 unsigned long pio_mask, mwdma_mask, udma_mask; 1628 1629 /* Usual case. Word 53 indicates word 64 is valid */ 1630 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1631 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1632 pio_mask <<= 3; 1633 pio_mask |= 0x7; 1634 } else { 1635 /* If word 64 isn't valid then Word 51 high byte holds 1636 * the PIO timing number for the maximum. Turn it into 1637 * a mask. 1638 */ 1639 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1640 if (mode < 5) /* Valid PIO range */ 1641 pio_mask = (2 << mode) - 1; 1642 else 1643 pio_mask = 1; 1644 1645 /* But wait.. there's more. Design your standards by 1646 * committee and you too can get a free iordy field to 1647 * process. However its the speeds not the modes that 1648 * are supported... Note drivers using the timing API 1649 * will get this right anyway 1650 */ 1651 } 1652 1653 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1654 1655 if (ata_id_is_cfa(id)) { 1656 /* 1657 * Process compact flash extended modes 1658 */ 1659 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1660 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1661 1662 if (pio) 1663 pio_mask |= (1 << 5); 1664 if (pio > 1) 1665 pio_mask |= (1 << 6); 1666 if (dma) 1667 mwdma_mask |= (1 << 3); 1668 if (dma > 1) 1669 mwdma_mask |= (1 << 4); 1670 } 1671 1672 udma_mask = 0; 1673 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1674 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1675 1676 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1677 } 1678 1679 /** 1680 * ata_pio_queue_task - Queue port_task 1681 * @ap: The ata_port to queue port_task for 1682 * @data: data for @fn to use 1683 * @delay: delay time in msecs for workqueue function 1684 * 1685 * Schedule @fn(@data) for execution after @delay jiffies using 1686 * port_task. There is one port_task per port and it's the 1687 * user(low level driver)'s responsibility to make sure that only 1688 * one task is active at any given time. 1689 * 1690 * libata core layer takes care of synchronization between 1691 * port_task and EH. ata_pio_queue_task() may be ignored for EH 1692 * synchronization. 1693 * 1694 * LOCKING: 1695 * Inherited from caller. 1696 */ 1697 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) 1698 { 1699 ap->port_task_data = data; 1700 1701 /* may fail if ata_port_flush_task() in progress */ 1702 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay)); 1703 } 1704 1705 /** 1706 * ata_port_flush_task - Flush port_task 1707 * @ap: The ata_port to flush port_task for 1708 * 1709 * After this function completes, port_task is guranteed not to 1710 * be running or scheduled. 1711 * 1712 * LOCKING: 1713 * Kernel thread context (may sleep) 1714 */ 1715 void ata_port_flush_task(struct ata_port *ap) 1716 { 1717 DPRINTK("ENTER\n"); 1718 1719 cancel_rearming_delayed_work(&ap->port_task); 1720 1721 if (ata_msg_ctl(ap)) 1722 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); 1723 } 1724 1725 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1726 { 1727 struct completion *waiting = qc->private_data; 1728 1729 complete(waiting); 1730 } 1731 1732 /** 1733 * ata_exec_internal_sg - execute libata internal command 1734 * @dev: Device to which the command is sent 1735 * @tf: Taskfile registers for the command and the result 1736 * @cdb: CDB for packet command 1737 * @dma_dir: Data tranfer direction of the command 1738 * @sgl: sg list for the data buffer of the command 1739 * @n_elem: Number of sg entries 1740 * @timeout: Timeout in msecs (0 for default) 1741 * 1742 * Executes libata internal command with timeout. @tf contains 1743 * command on entry and result on return. Timeout and error 1744 * conditions are reported via return value. No recovery action 1745 * is taken after a command times out. It's caller's duty to 1746 * clean up after timeout. 1747 * 1748 * LOCKING: 1749 * None. Should be called with kernel context, might sleep. 1750 * 1751 * RETURNS: 1752 * Zero on success, AC_ERR_* mask on failure 1753 */ 1754 unsigned ata_exec_internal_sg(struct ata_device *dev, 1755 struct ata_taskfile *tf, const u8 *cdb, 1756 int dma_dir, struct scatterlist *sgl, 1757 unsigned int n_elem, unsigned long timeout) 1758 { 1759 struct ata_link *link = dev->link; 1760 struct ata_port *ap = link->ap; 1761 u8 command = tf->command; 1762 int auto_timeout = 0; 1763 struct ata_queued_cmd *qc; 1764 unsigned int tag, preempted_tag; 1765 u32 preempted_sactive, preempted_qc_active; 1766 int preempted_nr_active_links; 1767 DECLARE_COMPLETION_ONSTACK(wait); 1768 unsigned long flags; 1769 unsigned int err_mask; 1770 int rc; 1771 1772 spin_lock_irqsave(ap->lock, flags); 1773 1774 /* no internal command while frozen */ 1775 if (ap->pflags & ATA_PFLAG_FROZEN) { 1776 spin_unlock_irqrestore(ap->lock, flags); 1777 return AC_ERR_SYSTEM; 1778 } 1779 1780 /* initialize internal qc */ 1781 1782 /* XXX: Tag 0 is used for drivers with legacy EH as some 1783 * drivers choke if any other tag is given. This breaks 1784 * ata_tag_internal() test for those drivers. Don't use new 1785 * EH stuff without converting to it. 1786 */ 1787 if (ap->ops->error_handler) 1788 tag = ATA_TAG_INTERNAL; 1789 else 1790 tag = 0; 1791 1792 if (test_and_set_bit(tag, &ap->qc_allocated)) 1793 BUG(); 1794 qc = __ata_qc_from_tag(ap, tag); 1795 1796 qc->tag = tag; 1797 qc->scsicmd = NULL; 1798 qc->ap = ap; 1799 qc->dev = dev; 1800 ata_qc_reinit(qc); 1801 1802 preempted_tag = link->active_tag; 1803 preempted_sactive = link->sactive; 1804 preempted_qc_active = ap->qc_active; 1805 preempted_nr_active_links = ap->nr_active_links; 1806 link->active_tag = ATA_TAG_POISON; 1807 link->sactive = 0; 1808 ap->qc_active = 0; 1809 ap->nr_active_links = 0; 1810 1811 /* prepare & issue qc */ 1812 qc->tf = *tf; 1813 if (cdb) 1814 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1815 qc->flags |= ATA_QCFLAG_RESULT_TF; 1816 qc->dma_dir = dma_dir; 1817 if (dma_dir != DMA_NONE) { 1818 unsigned int i, buflen = 0; 1819 struct scatterlist *sg; 1820 1821 for_each_sg(sgl, sg, n_elem, i) 1822 buflen += sg->length; 1823 1824 ata_sg_init(qc, sgl, n_elem); 1825 qc->nbytes = buflen; 1826 } 1827 1828 qc->private_data = &wait; 1829 qc->complete_fn = ata_qc_complete_internal; 1830 1831 ata_qc_issue(qc); 1832 1833 spin_unlock_irqrestore(ap->lock, flags); 1834 1835 if (!timeout) { 1836 if (ata_probe_timeout) 1837 timeout = ata_probe_timeout * 1000; 1838 else { 1839 timeout = ata_internal_cmd_timeout(dev, command); 1840 auto_timeout = 1; 1841 } 1842 } 1843 1844 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1845 1846 ata_port_flush_task(ap); 1847 1848 if (!rc) { 1849 spin_lock_irqsave(ap->lock, flags); 1850 1851 /* We're racing with irq here. If we lose, the 1852 * following test prevents us from completing the qc 1853 * twice. If we win, the port is frozen and will be 1854 * cleaned up by ->post_internal_cmd(). 1855 */ 1856 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1857 qc->err_mask |= AC_ERR_TIMEOUT; 1858 1859 if (ap->ops->error_handler) 1860 ata_port_freeze(ap); 1861 else 1862 ata_qc_complete(qc); 1863 1864 if (ata_msg_warn(ap)) 1865 ata_dev_printk(dev, KERN_WARNING, 1866 "qc timeout (cmd 0x%x)\n", command); 1867 } 1868 1869 spin_unlock_irqrestore(ap->lock, flags); 1870 } 1871 1872 /* do post_internal_cmd */ 1873 if (ap->ops->post_internal_cmd) 1874 ap->ops->post_internal_cmd(qc); 1875 1876 /* perform minimal error analysis */ 1877 if (qc->flags & ATA_QCFLAG_FAILED) { 1878 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1879 qc->err_mask |= AC_ERR_DEV; 1880 1881 if (!qc->err_mask) 1882 qc->err_mask |= AC_ERR_OTHER; 1883 1884 if (qc->err_mask & ~AC_ERR_OTHER) 1885 qc->err_mask &= ~AC_ERR_OTHER; 1886 } 1887 1888 /* finish up */ 1889 spin_lock_irqsave(ap->lock, flags); 1890 1891 *tf = qc->result_tf; 1892 err_mask = qc->err_mask; 1893 1894 ata_qc_free(qc); 1895 link->active_tag = preempted_tag; 1896 link->sactive = preempted_sactive; 1897 ap->qc_active = preempted_qc_active; 1898 ap->nr_active_links = preempted_nr_active_links; 1899 1900 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1901 * Until those drivers are fixed, we detect the condition 1902 * here, fail the command with AC_ERR_SYSTEM and reenable the 1903 * port. 1904 * 1905 * Note that this doesn't change any behavior as internal 1906 * command failure results in disabling the device in the 1907 * higher layer for LLDDs without new reset/EH callbacks. 1908 * 1909 * Kill the following code as soon as those drivers are fixed. 1910 */ 1911 if (ap->flags & ATA_FLAG_DISABLED) { 1912 err_mask |= AC_ERR_SYSTEM; 1913 ata_port_probe(ap); 1914 } 1915 1916 spin_unlock_irqrestore(ap->lock, flags); 1917 1918 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1919 ata_internal_cmd_timed_out(dev, command); 1920 1921 return err_mask; 1922 } 1923 1924 /** 1925 * ata_exec_internal - execute libata internal command 1926 * @dev: Device to which the command is sent 1927 * @tf: Taskfile registers for the command and the result 1928 * @cdb: CDB for packet command 1929 * @dma_dir: Data tranfer direction of the command 1930 * @buf: Data buffer of the command 1931 * @buflen: Length of data buffer 1932 * @timeout: Timeout in msecs (0 for default) 1933 * 1934 * Wrapper around ata_exec_internal_sg() which takes simple 1935 * buffer instead of sg list. 1936 * 1937 * LOCKING: 1938 * None. Should be called with kernel context, might sleep. 1939 * 1940 * RETURNS: 1941 * Zero on success, AC_ERR_* mask on failure 1942 */ 1943 unsigned ata_exec_internal(struct ata_device *dev, 1944 struct ata_taskfile *tf, const u8 *cdb, 1945 int dma_dir, void *buf, unsigned int buflen, 1946 unsigned long timeout) 1947 { 1948 struct scatterlist *psg = NULL, sg; 1949 unsigned int n_elem = 0; 1950 1951 if (dma_dir != DMA_NONE) { 1952 WARN_ON(!buf); 1953 sg_init_one(&sg, buf, buflen); 1954 psg = &sg; 1955 n_elem++; 1956 } 1957 1958 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1959 timeout); 1960 } 1961 1962 /** 1963 * ata_do_simple_cmd - execute simple internal command 1964 * @dev: Device to which the command is sent 1965 * @cmd: Opcode to execute 1966 * 1967 * Execute a 'simple' command, that only consists of the opcode 1968 * 'cmd' itself, without filling any other registers 1969 * 1970 * LOCKING: 1971 * Kernel thread context (may sleep). 1972 * 1973 * RETURNS: 1974 * Zero on success, AC_ERR_* mask on failure 1975 */ 1976 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1977 { 1978 struct ata_taskfile tf; 1979 1980 ata_tf_init(dev, &tf); 1981 1982 tf.command = cmd; 1983 tf.flags |= ATA_TFLAG_DEVICE; 1984 tf.protocol = ATA_PROT_NODATA; 1985 1986 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1987 } 1988 1989 /** 1990 * ata_pio_need_iordy - check if iordy needed 1991 * @adev: ATA device 1992 * 1993 * Check if the current speed of the device requires IORDY. Used 1994 * by various controllers for chip configuration. 1995 */ 1996 1997 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1998 { 1999 /* Controller doesn't support IORDY. Probably a pointless check 2000 as the caller should know this */ 2001 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 2002 return 0; 2003 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 2004 if (ata_id_is_cfa(adev->id) 2005 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 2006 return 0; 2007 /* PIO3 and higher it is mandatory */ 2008 if (adev->pio_mode > XFER_PIO_2) 2009 return 1; 2010 /* We turn it on when possible */ 2011 if (ata_id_has_iordy(adev->id)) 2012 return 1; 2013 return 0; 2014 } 2015 2016 /** 2017 * ata_pio_mask_no_iordy - Return the non IORDY mask 2018 * @adev: ATA device 2019 * 2020 * Compute the highest mode possible if we are not using iordy. Return 2021 * -1 if no iordy mode is available. 2022 */ 2023 2024 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 2025 { 2026 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 2027 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 2028 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 2029 /* Is the speed faster than the drive allows non IORDY ? */ 2030 if (pio) { 2031 /* This is cycle times not frequency - watch the logic! */ 2032 if (pio > 240) /* PIO2 is 240nS per cycle */ 2033 return 3 << ATA_SHIFT_PIO; 2034 return 7 << ATA_SHIFT_PIO; 2035 } 2036 } 2037 return 3 << ATA_SHIFT_PIO; 2038 } 2039 2040 /** 2041 * ata_do_dev_read_id - default ID read method 2042 * @dev: device 2043 * @tf: proposed taskfile 2044 * @id: data buffer 2045 * 2046 * Issue the identify taskfile and hand back the buffer containing 2047 * identify data. For some RAID controllers and for pre ATA devices 2048 * this function is wrapped or replaced by the driver 2049 */ 2050 unsigned int ata_do_dev_read_id(struct ata_device *dev, 2051 struct ata_taskfile *tf, u16 *id) 2052 { 2053 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 2054 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2055 } 2056 2057 /** 2058 * ata_dev_read_id - Read ID data from the specified device 2059 * @dev: target device 2060 * @p_class: pointer to class of the target device (may be changed) 2061 * @flags: ATA_READID_* flags 2062 * @id: buffer to read IDENTIFY data into 2063 * 2064 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2065 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2066 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2067 * for pre-ATA4 drives. 2068 * 2069 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2070 * now we abort if we hit that case. 2071 * 2072 * LOCKING: 2073 * Kernel thread context (may sleep) 2074 * 2075 * RETURNS: 2076 * 0 on success, -errno otherwise. 2077 */ 2078 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2079 unsigned int flags, u16 *id) 2080 { 2081 struct ata_port *ap = dev->link->ap; 2082 unsigned int class = *p_class; 2083 struct ata_taskfile tf; 2084 unsigned int err_mask = 0; 2085 const char *reason; 2086 bool is_semb = class == ATA_DEV_SEMB; 2087 int may_fallback = 1, tried_spinup = 0; 2088 int rc; 2089 2090 if (ata_msg_ctl(ap)) 2091 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2092 2093 retry: 2094 ata_tf_init(dev, &tf); 2095 2096 switch (class) { 2097 case ATA_DEV_SEMB: 2098 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 2099 case ATA_DEV_ATA: 2100 tf.command = ATA_CMD_ID_ATA; 2101 break; 2102 case ATA_DEV_ATAPI: 2103 tf.command = ATA_CMD_ID_ATAPI; 2104 break; 2105 default: 2106 rc = -ENODEV; 2107 reason = "unsupported class"; 2108 goto err_out; 2109 } 2110 2111 tf.protocol = ATA_PROT_PIO; 2112 2113 /* Some devices choke if TF registers contain garbage. Make 2114 * sure those are properly initialized. 2115 */ 2116 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2117 2118 /* Device presence detection is unreliable on some 2119 * controllers. Always poll IDENTIFY if available. 2120 */ 2121 tf.flags |= ATA_TFLAG_POLLING; 2122 2123 if (ap->ops->read_id) 2124 err_mask = ap->ops->read_id(dev, &tf, id); 2125 else 2126 err_mask = ata_do_dev_read_id(dev, &tf, id); 2127 2128 if (err_mask) { 2129 if (err_mask & AC_ERR_NODEV_HINT) { 2130 ata_dev_printk(dev, KERN_DEBUG, 2131 "NODEV after polling detection\n"); 2132 return -ENOENT; 2133 } 2134 2135 if (is_semb) { 2136 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " 2137 "device w/ SEMB sig, disabled\n"); 2138 /* SEMB is not supported yet */ 2139 *p_class = ATA_DEV_SEMB_UNSUP; 2140 return 0; 2141 } 2142 2143 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2144 /* Device or controller might have reported 2145 * the wrong device class. Give a shot at the 2146 * other IDENTIFY if the current one is 2147 * aborted by the device. 2148 */ 2149 if (may_fallback) { 2150 may_fallback = 0; 2151 2152 if (class == ATA_DEV_ATA) 2153 class = ATA_DEV_ATAPI; 2154 else 2155 class = ATA_DEV_ATA; 2156 goto retry; 2157 } 2158 2159 /* Control reaches here iff the device aborted 2160 * both flavors of IDENTIFYs which happens 2161 * sometimes with phantom devices. 2162 */ 2163 ata_dev_printk(dev, KERN_DEBUG, 2164 "both IDENTIFYs aborted, assuming NODEV\n"); 2165 return -ENOENT; 2166 } 2167 2168 rc = -EIO; 2169 reason = "I/O error"; 2170 goto err_out; 2171 } 2172 2173 /* Falling back doesn't make sense if ID data was read 2174 * successfully at least once. 2175 */ 2176 may_fallback = 0; 2177 2178 swap_buf_le16(id, ATA_ID_WORDS); 2179 2180 /* sanity check */ 2181 rc = -EINVAL; 2182 reason = "device reports invalid type"; 2183 2184 if (class == ATA_DEV_ATA) { 2185 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2186 goto err_out; 2187 } else { 2188 if (ata_id_is_ata(id)) 2189 goto err_out; 2190 } 2191 2192 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2193 tried_spinup = 1; 2194 /* 2195 * Drive powered-up in standby mode, and requires a specific 2196 * SET_FEATURES spin-up subcommand before it will accept 2197 * anything other than the original IDENTIFY command. 2198 */ 2199 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2200 if (err_mask && id[2] != 0x738c) { 2201 rc = -EIO; 2202 reason = "SPINUP failed"; 2203 goto err_out; 2204 } 2205 /* 2206 * If the drive initially returned incomplete IDENTIFY info, 2207 * we now must reissue the IDENTIFY command. 2208 */ 2209 if (id[2] == 0x37c8) 2210 goto retry; 2211 } 2212 2213 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2214 /* 2215 * The exact sequence expected by certain pre-ATA4 drives is: 2216 * SRST RESET 2217 * IDENTIFY (optional in early ATA) 2218 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2219 * anything else.. 2220 * Some drives were very specific about that exact sequence. 2221 * 2222 * Note that ATA4 says lba is mandatory so the second check 2223 * shoud never trigger. 2224 */ 2225 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2226 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2227 if (err_mask) { 2228 rc = -EIO; 2229 reason = "INIT_DEV_PARAMS failed"; 2230 goto err_out; 2231 } 2232 2233 /* current CHS translation info (id[53-58]) might be 2234 * changed. reread the identify device info. 2235 */ 2236 flags &= ~ATA_READID_POSTRESET; 2237 goto retry; 2238 } 2239 } 2240 2241 *p_class = class; 2242 2243 return 0; 2244 2245 err_out: 2246 if (ata_msg_warn(ap)) 2247 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2248 "(%s, err_mask=0x%x)\n", reason, err_mask); 2249 return rc; 2250 } 2251 2252 static int ata_do_link_spd_horkage(struct ata_device *dev) 2253 { 2254 struct ata_link *plink = ata_dev_phys_link(dev); 2255 u32 target, target_limit; 2256 2257 if (!sata_scr_valid(plink)) 2258 return 0; 2259 2260 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2261 target = 1; 2262 else 2263 return 0; 2264 2265 target_limit = (1 << target) - 1; 2266 2267 /* if already on stricter limit, no need to push further */ 2268 if (plink->sata_spd_limit <= target_limit) 2269 return 0; 2270 2271 plink->sata_spd_limit = target_limit; 2272 2273 /* Request another EH round by returning -EAGAIN if link is 2274 * going faster than the target speed. Forward progress is 2275 * guaranteed by setting sata_spd_limit to target_limit above. 2276 */ 2277 if (plink->sata_spd > target) { 2278 ata_dev_printk(dev, KERN_INFO, 2279 "applying link speed limit horkage to %s\n", 2280 sata_spd_string(target)); 2281 return -EAGAIN; 2282 } 2283 return 0; 2284 } 2285 2286 static inline u8 ata_dev_knobble(struct ata_device *dev) 2287 { 2288 struct ata_port *ap = dev->link->ap; 2289 2290 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2291 return 0; 2292 2293 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2294 } 2295 2296 static void ata_dev_config_ncq(struct ata_device *dev, 2297 char *desc, size_t desc_sz) 2298 { 2299 struct ata_port *ap = dev->link->ap; 2300 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2301 2302 if (!ata_id_has_ncq(dev->id)) { 2303 desc[0] = '\0'; 2304 return; 2305 } 2306 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2307 snprintf(desc, desc_sz, "NCQ (not used)"); 2308 return; 2309 } 2310 if (ap->flags & ATA_FLAG_NCQ) { 2311 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2312 dev->flags |= ATA_DFLAG_NCQ; 2313 } 2314 2315 if (hdepth >= ddepth) 2316 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 2317 else 2318 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 2319 } 2320 2321 /** 2322 * ata_dev_configure - Configure the specified ATA/ATAPI device 2323 * @dev: Target device to configure 2324 * 2325 * Configure @dev according to @dev->id. Generic and low-level 2326 * driver specific fixups are also applied. 2327 * 2328 * LOCKING: 2329 * Kernel thread context (may sleep) 2330 * 2331 * RETURNS: 2332 * 0 on success, -errno otherwise 2333 */ 2334 int ata_dev_configure(struct ata_device *dev) 2335 { 2336 struct ata_port *ap = dev->link->ap; 2337 struct ata_eh_context *ehc = &dev->link->eh_context; 2338 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2339 const u16 *id = dev->id; 2340 unsigned long xfer_mask; 2341 char revbuf[7]; /* XYZ-99\0 */ 2342 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2343 char modelbuf[ATA_ID_PROD_LEN+1]; 2344 int rc; 2345 2346 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2347 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2348 __func__); 2349 return 0; 2350 } 2351 2352 if (ata_msg_probe(ap)) 2353 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2354 2355 /* set horkage */ 2356 dev->horkage |= ata_dev_blacklisted(dev); 2357 ata_force_horkage(dev); 2358 2359 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2360 ata_dev_printk(dev, KERN_INFO, 2361 "unsupported device, disabling\n"); 2362 ata_dev_disable(dev); 2363 return 0; 2364 } 2365 2366 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2367 dev->class == ATA_DEV_ATAPI) { 2368 ata_dev_printk(dev, KERN_WARNING, 2369 "WARNING: ATAPI is %s, device ignored.\n", 2370 atapi_enabled ? "not supported with this driver" 2371 : "disabled"); 2372 ata_dev_disable(dev); 2373 return 0; 2374 } 2375 2376 rc = ata_do_link_spd_horkage(dev); 2377 if (rc) 2378 return rc; 2379 2380 /* let ACPI work its magic */ 2381 rc = ata_acpi_on_devcfg(dev); 2382 if (rc) 2383 return rc; 2384 2385 /* massage HPA, do it early as it might change IDENTIFY data */ 2386 rc = ata_hpa_resize(dev); 2387 if (rc) 2388 return rc; 2389 2390 /* print device capabilities */ 2391 if (ata_msg_probe(ap)) 2392 ata_dev_printk(dev, KERN_DEBUG, 2393 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2394 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2395 __func__, 2396 id[49], id[82], id[83], id[84], 2397 id[85], id[86], id[87], id[88]); 2398 2399 /* initialize to-be-configured parameters */ 2400 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2401 dev->max_sectors = 0; 2402 dev->cdb_len = 0; 2403 dev->n_sectors = 0; 2404 dev->cylinders = 0; 2405 dev->heads = 0; 2406 dev->sectors = 0; 2407 dev->multi_count = 0; 2408 2409 /* 2410 * common ATA, ATAPI feature tests 2411 */ 2412 2413 /* find max transfer mode; for printk only */ 2414 xfer_mask = ata_id_xfermask(id); 2415 2416 if (ata_msg_probe(ap)) 2417 ata_dump_id(id); 2418 2419 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2420 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2421 sizeof(fwrevbuf)); 2422 2423 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2424 sizeof(modelbuf)); 2425 2426 /* ATA-specific feature tests */ 2427 if (dev->class == ATA_DEV_ATA) { 2428 if (ata_id_is_cfa(id)) { 2429 /* CPRM may make this media unusable */ 2430 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2431 ata_dev_printk(dev, KERN_WARNING, 2432 "supports DRM functions and may " 2433 "not be fully accessable.\n"); 2434 snprintf(revbuf, 7, "CFA"); 2435 } else { 2436 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2437 /* Warn the user if the device has TPM extensions */ 2438 if (ata_id_has_tpm(id)) 2439 ata_dev_printk(dev, KERN_WARNING, 2440 "supports DRM functions and may " 2441 "not be fully accessable.\n"); 2442 } 2443 2444 dev->n_sectors = ata_id_n_sectors(id); 2445 2446 /* get current R/W Multiple count setting */ 2447 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2448 unsigned int max = dev->id[47] & 0xff; 2449 unsigned int cnt = dev->id[59] & 0xff; 2450 /* only recognize/allow powers of two here */ 2451 if (is_power_of_2(max) && is_power_of_2(cnt)) 2452 if (cnt <= max) 2453 dev->multi_count = cnt; 2454 } 2455 2456 if (ata_id_has_lba(id)) { 2457 const char *lba_desc; 2458 char ncq_desc[20]; 2459 2460 lba_desc = "LBA"; 2461 dev->flags |= ATA_DFLAG_LBA; 2462 if (ata_id_has_lba48(id)) { 2463 dev->flags |= ATA_DFLAG_LBA48; 2464 lba_desc = "LBA48"; 2465 2466 if (dev->n_sectors >= (1UL << 28) && 2467 ata_id_has_flush_ext(id)) 2468 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2469 } 2470 2471 /* config NCQ */ 2472 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2473 2474 /* print device info to dmesg */ 2475 if (ata_msg_drv(ap) && print_info) { 2476 ata_dev_printk(dev, KERN_INFO, 2477 "%s: %s, %s, max %s\n", 2478 revbuf, modelbuf, fwrevbuf, 2479 ata_mode_string(xfer_mask)); 2480 ata_dev_printk(dev, KERN_INFO, 2481 "%Lu sectors, multi %u: %s %s\n", 2482 (unsigned long long)dev->n_sectors, 2483 dev->multi_count, lba_desc, ncq_desc); 2484 } 2485 } else { 2486 /* CHS */ 2487 2488 /* Default translation */ 2489 dev->cylinders = id[1]; 2490 dev->heads = id[3]; 2491 dev->sectors = id[6]; 2492 2493 if (ata_id_current_chs_valid(id)) { 2494 /* Current CHS translation is valid. */ 2495 dev->cylinders = id[54]; 2496 dev->heads = id[55]; 2497 dev->sectors = id[56]; 2498 } 2499 2500 /* print device info to dmesg */ 2501 if (ata_msg_drv(ap) && print_info) { 2502 ata_dev_printk(dev, KERN_INFO, 2503 "%s: %s, %s, max %s\n", 2504 revbuf, modelbuf, fwrevbuf, 2505 ata_mode_string(xfer_mask)); 2506 ata_dev_printk(dev, KERN_INFO, 2507 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2508 (unsigned long long)dev->n_sectors, 2509 dev->multi_count, dev->cylinders, 2510 dev->heads, dev->sectors); 2511 } 2512 } 2513 2514 dev->cdb_len = 16; 2515 } 2516 2517 /* ATAPI-specific feature tests */ 2518 else if (dev->class == ATA_DEV_ATAPI) { 2519 const char *cdb_intr_string = ""; 2520 const char *atapi_an_string = ""; 2521 const char *dma_dir_string = ""; 2522 u32 sntf; 2523 2524 rc = atapi_cdb_len(id); 2525 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2526 if (ata_msg_warn(ap)) 2527 ata_dev_printk(dev, KERN_WARNING, 2528 "unsupported CDB len\n"); 2529 rc = -EINVAL; 2530 goto err_out_nosup; 2531 } 2532 dev->cdb_len = (unsigned int) rc; 2533 2534 /* Enable ATAPI AN if both the host and device have 2535 * the support. If PMP is attached, SNTF is required 2536 * to enable ATAPI AN to discern between PHY status 2537 * changed notifications and ATAPI ANs. 2538 */ 2539 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2540 (!sata_pmp_attached(ap) || 2541 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2542 unsigned int err_mask; 2543 2544 /* issue SET feature command to turn this on */ 2545 err_mask = ata_dev_set_feature(dev, 2546 SETFEATURES_SATA_ENABLE, SATA_AN); 2547 if (err_mask) 2548 ata_dev_printk(dev, KERN_ERR, 2549 "failed to enable ATAPI AN " 2550 "(err_mask=0x%x)\n", err_mask); 2551 else { 2552 dev->flags |= ATA_DFLAG_AN; 2553 atapi_an_string = ", ATAPI AN"; 2554 } 2555 } 2556 2557 if (ata_id_cdb_intr(dev->id)) { 2558 dev->flags |= ATA_DFLAG_CDB_INTR; 2559 cdb_intr_string = ", CDB intr"; 2560 } 2561 2562 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2563 dev->flags |= ATA_DFLAG_DMADIR; 2564 dma_dir_string = ", DMADIR"; 2565 } 2566 2567 /* print device info to dmesg */ 2568 if (ata_msg_drv(ap) && print_info) 2569 ata_dev_printk(dev, KERN_INFO, 2570 "ATAPI: %s, %s, max %s%s%s%s\n", 2571 modelbuf, fwrevbuf, 2572 ata_mode_string(xfer_mask), 2573 cdb_intr_string, atapi_an_string, 2574 dma_dir_string); 2575 } 2576 2577 /* determine max_sectors */ 2578 dev->max_sectors = ATA_MAX_SECTORS; 2579 if (dev->flags & ATA_DFLAG_LBA48) 2580 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2581 2582 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2583 if (ata_id_has_hipm(dev->id)) 2584 dev->flags |= ATA_DFLAG_HIPM; 2585 if (ata_id_has_dipm(dev->id)) 2586 dev->flags |= ATA_DFLAG_DIPM; 2587 } 2588 2589 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2590 200 sectors */ 2591 if (ata_dev_knobble(dev)) { 2592 if (ata_msg_drv(ap) && print_info) 2593 ata_dev_printk(dev, KERN_INFO, 2594 "applying bridge limits\n"); 2595 dev->udma_mask &= ATA_UDMA5; 2596 dev->max_sectors = ATA_MAX_SECTORS; 2597 } 2598 2599 if ((dev->class == ATA_DEV_ATAPI) && 2600 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2601 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2602 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2603 } 2604 2605 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2606 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2607 dev->max_sectors); 2608 2609 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2610 dev->horkage |= ATA_HORKAGE_IPM; 2611 2612 /* reset link pm_policy for this port to no pm */ 2613 ap->pm_policy = MAX_PERFORMANCE; 2614 } 2615 2616 if (ap->ops->dev_config) 2617 ap->ops->dev_config(dev); 2618 2619 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2620 /* Let the user know. We don't want to disallow opens for 2621 rescue purposes, or in case the vendor is just a blithering 2622 idiot. Do this after the dev_config call as some controllers 2623 with buggy firmware may want to avoid reporting false device 2624 bugs */ 2625 2626 if (print_info) { 2627 ata_dev_printk(dev, KERN_WARNING, 2628 "Drive reports diagnostics failure. This may indicate a drive\n"); 2629 ata_dev_printk(dev, KERN_WARNING, 2630 "fault or invalid emulation. Contact drive vendor for information.\n"); 2631 } 2632 } 2633 2634 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2635 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " 2636 "firmware update to be fully functional.\n"); 2637 ata_dev_printk(dev, KERN_WARNING, " contact the vendor " 2638 "or visit http://ata.wiki.kernel.org.\n"); 2639 } 2640 2641 return 0; 2642 2643 err_out_nosup: 2644 if (ata_msg_probe(ap)) 2645 ata_dev_printk(dev, KERN_DEBUG, 2646 "%s: EXIT, err\n", __func__); 2647 return rc; 2648 } 2649 2650 /** 2651 * ata_cable_40wire - return 40 wire cable type 2652 * @ap: port 2653 * 2654 * Helper method for drivers which want to hardwire 40 wire cable 2655 * detection. 2656 */ 2657 2658 int ata_cable_40wire(struct ata_port *ap) 2659 { 2660 return ATA_CBL_PATA40; 2661 } 2662 2663 /** 2664 * ata_cable_80wire - return 80 wire cable type 2665 * @ap: port 2666 * 2667 * Helper method for drivers which want to hardwire 80 wire cable 2668 * detection. 2669 */ 2670 2671 int ata_cable_80wire(struct ata_port *ap) 2672 { 2673 return ATA_CBL_PATA80; 2674 } 2675 2676 /** 2677 * ata_cable_unknown - return unknown PATA cable. 2678 * @ap: port 2679 * 2680 * Helper method for drivers which have no PATA cable detection. 2681 */ 2682 2683 int ata_cable_unknown(struct ata_port *ap) 2684 { 2685 return ATA_CBL_PATA_UNK; 2686 } 2687 2688 /** 2689 * ata_cable_ignore - return ignored PATA cable. 2690 * @ap: port 2691 * 2692 * Helper method for drivers which don't use cable type to limit 2693 * transfer mode. 2694 */ 2695 int ata_cable_ignore(struct ata_port *ap) 2696 { 2697 return ATA_CBL_PATA_IGN; 2698 } 2699 2700 /** 2701 * ata_cable_sata - return SATA cable type 2702 * @ap: port 2703 * 2704 * Helper method for drivers which have SATA cables 2705 */ 2706 2707 int ata_cable_sata(struct ata_port *ap) 2708 { 2709 return ATA_CBL_SATA; 2710 } 2711 2712 /** 2713 * ata_bus_probe - Reset and probe ATA bus 2714 * @ap: Bus to probe 2715 * 2716 * Master ATA bus probing function. Initiates a hardware-dependent 2717 * bus reset, then attempts to identify any devices found on 2718 * the bus. 2719 * 2720 * LOCKING: 2721 * PCI/etc. bus probe sem. 2722 * 2723 * RETURNS: 2724 * Zero on success, negative errno otherwise. 2725 */ 2726 2727 int ata_bus_probe(struct ata_port *ap) 2728 { 2729 unsigned int classes[ATA_MAX_DEVICES]; 2730 int tries[ATA_MAX_DEVICES]; 2731 int rc; 2732 struct ata_device *dev; 2733 2734 ata_port_probe(ap); 2735 2736 ata_for_each_dev(dev, &ap->link, ALL) 2737 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2738 2739 retry: 2740 ata_for_each_dev(dev, &ap->link, ALL) { 2741 /* If we issue an SRST then an ATA drive (not ATAPI) 2742 * may change configuration and be in PIO0 timing. If 2743 * we do a hard reset (or are coming from power on) 2744 * this is true for ATA or ATAPI. Until we've set a 2745 * suitable controller mode we should not touch the 2746 * bus as we may be talking too fast. 2747 */ 2748 dev->pio_mode = XFER_PIO_0; 2749 2750 /* If the controller has a pio mode setup function 2751 * then use it to set the chipset to rights. Don't 2752 * touch the DMA setup as that will be dealt with when 2753 * configuring devices. 2754 */ 2755 if (ap->ops->set_piomode) 2756 ap->ops->set_piomode(ap, dev); 2757 } 2758 2759 /* reset and determine device classes */ 2760 ap->ops->phy_reset(ap); 2761 2762 ata_for_each_dev(dev, &ap->link, ALL) { 2763 if (!(ap->flags & ATA_FLAG_DISABLED) && 2764 dev->class != ATA_DEV_UNKNOWN) 2765 classes[dev->devno] = dev->class; 2766 else 2767 classes[dev->devno] = ATA_DEV_NONE; 2768 2769 dev->class = ATA_DEV_UNKNOWN; 2770 } 2771 2772 ata_port_probe(ap); 2773 2774 /* read IDENTIFY page and configure devices. We have to do the identify 2775 specific sequence bass-ackwards so that PDIAG- is released by 2776 the slave device */ 2777 2778 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2779 if (tries[dev->devno]) 2780 dev->class = classes[dev->devno]; 2781 2782 if (!ata_dev_enabled(dev)) 2783 continue; 2784 2785 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2786 dev->id); 2787 if (rc) 2788 goto fail; 2789 } 2790 2791 /* Now ask for the cable type as PDIAG- should have been released */ 2792 if (ap->ops->cable_detect) 2793 ap->cbl = ap->ops->cable_detect(ap); 2794 2795 /* We may have SATA bridge glue hiding here irrespective of 2796 * the reported cable types and sensed types. When SATA 2797 * drives indicate we have a bridge, we don't know which end 2798 * of the link the bridge is which is a problem. 2799 */ 2800 ata_for_each_dev(dev, &ap->link, ENABLED) 2801 if (ata_id_is_sata(dev->id)) 2802 ap->cbl = ATA_CBL_SATA; 2803 2804 /* After the identify sequence we can now set up the devices. We do 2805 this in the normal order so that the user doesn't get confused */ 2806 2807 ata_for_each_dev(dev, &ap->link, ENABLED) { 2808 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2809 rc = ata_dev_configure(dev); 2810 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2811 if (rc) 2812 goto fail; 2813 } 2814 2815 /* configure transfer mode */ 2816 rc = ata_set_mode(&ap->link, &dev); 2817 if (rc) 2818 goto fail; 2819 2820 ata_for_each_dev(dev, &ap->link, ENABLED) 2821 return 0; 2822 2823 /* no device present, disable port */ 2824 ata_port_disable(ap); 2825 return -ENODEV; 2826 2827 fail: 2828 tries[dev->devno]--; 2829 2830 switch (rc) { 2831 case -EINVAL: 2832 /* eeek, something went very wrong, give up */ 2833 tries[dev->devno] = 0; 2834 break; 2835 2836 case -ENODEV: 2837 /* give it just one more chance */ 2838 tries[dev->devno] = min(tries[dev->devno], 1); 2839 case -EIO: 2840 if (tries[dev->devno] == 1) { 2841 /* This is the last chance, better to slow 2842 * down than lose it. 2843 */ 2844 sata_down_spd_limit(&ap->link, 0); 2845 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2846 } 2847 } 2848 2849 if (!tries[dev->devno]) 2850 ata_dev_disable(dev); 2851 2852 goto retry; 2853 } 2854 2855 /** 2856 * ata_port_probe - Mark port as enabled 2857 * @ap: Port for which we indicate enablement 2858 * 2859 * Modify @ap data structure such that the system 2860 * thinks that the entire port is enabled. 2861 * 2862 * LOCKING: host lock, or some other form of 2863 * serialization. 2864 */ 2865 2866 void ata_port_probe(struct ata_port *ap) 2867 { 2868 ap->flags &= ~ATA_FLAG_DISABLED; 2869 } 2870 2871 /** 2872 * sata_print_link_status - Print SATA link status 2873 * @link: SATA link to printk link status about 2874 * 2875 * This function prints link speed and status of a SATA link. 2876 * 2877 * LOCKING: 2878 * None. 2879 */ 2880 static void sata_print_link_status(struct ata_link *link) 2881 { 2882 u32 sstatus, scontrol, tmp; 2883 2884 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2885 return; 2886 sata_scr_read(link, SCR_CONTROL, &scontrol); 2887 2888 if (ata_phys_link_online(link)) { 2889 tmp = (sstatus >> 4) & 0xf; 2890 ata_link_printk(link, KERN_INFO, 2891 "SATA link up %s (SStatus %X SControl %X)\n", 2892 sata_spd_string(tmp), sstatus, scontrol); 2893 } else { 2894 ata_link_printk(link, KERN_INFO, 2895 "SATA link down (SStatus %X SControl %X)\n", 2896 sstatus, scontrol); 2897 } 2898 } 2899 2900 /** 2901 * ata_dev_pair - return other device on cable 2902 * @adev: device 2903 * 2904 * Obtain the other device on the same cable, or if none is 2905 * present NULL is returned 2906 */ 2907 2908 struct ata_device *ata_dev_pair(struct ata_device *adev) 2909 { 2910 struct ata_link *link = adev->link; 2911 struct ata_device *pair = &link->device[1 - adev->devno]; 2912 if (!ata_dev_enabled(pair)) 2913 return NULL; 2914 return pair; 2915 } 2916 2917 /** 2918 * ata_port_disable - Disable port. 2919 * @ap: Port to be disabled. 2920 * 2921 * Modify @ap data structure such that the system 2922 * thinks that the entire port is disabled, and should 2923 * never attempt to probe or communicate with devices 2924 * on this port. 2925 * 2926 * LOCKING: host lock, or some other form of 2927 * serialization. 2928 */ 2929 2930 void ata_port_disable(struct ata_port *ap) 2931 { 2932 ap->link.device[0].class = ATA_DEV_NONE; 2933 ap->link.device[1].class = ATA_DEV_NONE; 2934 ap->flags |= ATA_FLAG_DISABLED; 2935 } 2936 2937 /** 2938 * sata_down_spd_limit - adjust SATA spd limit downward 2939 * @link: Link to adjust SATA spd limit for 2940 * @spd_limit: Additional limit 2941 * 2942 * Adjust SATA spd limit of @link downward. Note that this 2943 * function only adjusts the limit. The change must be applied 2944 * using sata_set_spd(). 2945 * 2946 * If @spd_limit is non-zero, the speed is limited to equal to or 2947 * lower than @spd_limit if such speed is supported. If 2948 * @spd_limit is slower than any supported speed, only the lowest 2949 * supported speed is allowed. 2950 * 2951 * LOCKING: 2952 * Inherited from caller. 2953 * 2954 * RETURNS: 2955 * 0 on success, negative errno on failure 2956 */ 2957 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2958 { 2959 u32 sstatus, spd, mask; 2960 int rc, bit; 2961 2962 if (!sata_scr_valid(link)) 2963 return -EOPNOTSUPP; 2964 2965 /* If SCR can be read, use it to determine the current SPD. 2966 * If not, use cached value in link->sata_spd. 2967 */ 2968 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2969 if (rc == 0 && ata_sstatus_online(sstatus)) 2970 spd = (sstatus >> 4) & 0xf; 2971 else 2972 spd = link->sata_spd; 2973 2974 mask = link->sata_spd_limit; 2975 if (mask <= 1) 2976 return -EINVAL; 2977 2978 /* unconditionally mask off the highest bit */ 2979 bit = fls(mask) - 1; 2980 mask &= ~(1 << bit); 2981 2982 /* Mask off all speeds higher than or equal to the current 2983 * one. Force 1.5Gbps if current SPD is not available. 2984 */ 2985 if (spd > 1) 2986 mask &= (1 << (spd - 1)) - 1; 2987 else 2988 mask &= 1; 2989 2990 /* were we already at the bottom? */ 2991 if (!mask) 2992 return -EINVAL; 2993 2994 if (spd_limit) { 2995 if (mask & ((1 << spd_limit) - 1)) 2996 mask &= (1 << spd_limit) - 1; 2997 else { 2998 bit = ffs(mask) - 1; 2999 mask = 1 << bit; 3000 } 3001 } 3002 3003 link->sata_spd_limit = mask; 3004 3005 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 3006 sata_spd_string(fls(mask))); 3007 3008 return 0; 3009 } 3010 3011 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 3012 { 3013 struct ata_link *host_link = &link->ap->link; 3014 u32 limit, target, spd; 3015 3016 limit = link->sata_spd_limit; 3017 3018 /* Don't configure downstream link faster than upstream link. 3019 * It doesn't speed up anything and some PMPs choke on such 3020 * configuration. 3021 */ 3022 if (!ata_is_host_link(link) && host_link->sata_spd) 3023 limit &= (1 << host_link->sata_spd) - 1; 3024 3025 if (limit == UINT_MAX) 3026 target = 0; 3027 else 3028 target = fls(limit); 3029 3030 spd = (*scontrol >> 4) & 0xf; 3031 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3032 3033 return spd != target; 3034 } 3035 3036 /** 3037 * sata_set_spd_needed - is SATA spd configuration needed 3038 * @link: Link in question 3039 * 3040 * Test whether the spd limit in SControl matches 3041 * @link->sata_spd_limit. This function is used to determine 3042 * whether hardreset is necessary to apply SATA spd 3043 * configuration. 3044 * 3045 * LOCKING: 3046 * Inherited from caller. 3047 * 3048 * RETURNS: 3049 * 1 if SATA spd configuration is needed, 0 otherwise. 3050 */ 3051 static int sata_set_spd_needed(struct ata_link *link) 3052 { 3053 u32 scontrol; 3054 3055 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3056 return 1; 3057 3058 return __sata_set_spd_needed(link, &scontrol); 3059 } 3060 3061 /** 3062 * sata_set_spd - set SATA spd according to spd limit 3063 * @link: Link to set SATA spd for 3064 * 3065 * Set SATA spd of @link according to sata_spd_limit. 3066 * 3067 * LOCKING: 3068 * Inherited from caller. 3069 * 3070 * RETURNS: 3071 * 0 if spd doesn't need to be changed, 1 if spd has been 3072 * changed. Negative errno if SCR registers are inaccessible. 3073 */ 3074 int sata_set_spd(struct ata_link *link) 3075 { 3076 u32 scontrol; 3077 int rc; 3078 3079 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3080 return rc; 3081 3082 if (!__sata_set_spd_needed(link, &scontrol)) 3083 return 0; 3084 3085 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3086 return rc; 3087 3088 return 1; 3089 } 3090 3091 /* 3092 * This mode timing computation functionality is ported over from 3093 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3094 */ 3095 /* 3096 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3097 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3098 * for UDMA6, which is currently supported only by Maxtor drives. 3099 * 3100 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3101 */ 3102 3103 static const struct ata_timing ata_timing[] = { 3104 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3105 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3106 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3107 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3108 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3109 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3110 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3111 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3112 3113 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3114 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3115 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3116 3117 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3118 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3119 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3120 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3121 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3122 3123 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3124 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3125 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3126 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3127 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3128 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3129 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3130 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3131 3132 { 0xFF } 3133 }; 3134 3135 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3136 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3137 3138 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3139 { 3140 q->setup = EZ(t->setup * 1000, T); 3141 q->act8b = EZ(t->act8b * 1000, T); 3142 q->rec8b = EZ(t->rec8b * 1000, T); 3143 q->cyc8b = EZ(t->cyc8b * 1000, T); 3144 q->active = EZ(t->active * 1000, T); 3145 q->recover = EZ(t->recover * 1000, T); 3146 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3147 q->cycle = EZ(t->cycle * 1000, T); 3148 q->udma = EZ(t->udma * 1000, UT); 3149 } 3150 3151 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3152 struct ata_timing *m, unsigned int what) 3153 { 3154 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3155 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3156 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3157 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3158 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3159 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3160 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3161 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3162 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3163 } 3164 3165 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3166 { 3167 const struct ata_timing *t = ata_timing; 3168 3169 while (xfer_mode > t->mode) 3170 t++; 3171 3172 if (xfer_mode == t->mode) 3173 return t; 3174 return NULL; 3175 } 3176 3177 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3178 struct ata_timing *t, int T, int UT) 3179 { 3180 const struct ata_timing *s; 3181 struct ata_timing p; 3182 3183 /* 3184 * Find the mode. 3185 */ 3186 3187 if (!(s = ata_timing_find_mode(speed))) 3188 return -EINVAL; 3189 3190 memcpy(t, s, sizeof(*s)); 3191 3192 /* 3193 * If the drive is an EIDE drive, it can tell us it needs extended 3194 * PIO/MW_DMA cycle timing. 3195 */ 3196 3197 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3198 memset(&p, 0, sizeof(p)); 3199 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3200 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 3201 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 3202 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 3203 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 3204 } 3205 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3206 } 3207 3208 /* 3209 * Convert the timing to bus clock counts. 3210 */ 3211 3212 ata_timing_quantize(t, t, T, UT); 3213 3214 /* 3215 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3216 * S.M.A.R.T * and some other commands. We have to ensure that the 3217 * DMA cycle timing is slower/equal than the fastest PIO timing. 3218 */ 3219 3220 if (speed > XFER_PIO_6) { 3221 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3222 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3223 } 3224 3225 /* 3226 * Lengthen active & recovery time so that cycle time is correct. 3227 */ 3228 3229 if (t->act8b + t->rec8b < t->cyc8b) { 3230 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3231 t->rec8b = t->cyc8b - t->act8b; 3232 } 3233 3234 if (t->active + t->recover < t->cycle) { 3235 t->active += (t->cycle - (t->active + t->recover)) / 2; 3236 t->recover = t->cycle - t->active; 3237 } 3238 3239 /* In a few cases quantisation may produce enough errors to 3240 leave t->cycle too low for the sum of active and recovery 3241 if so we must correct this */ 3242 if (t->active + t->recover > t->cycle) 3243 t->cycle = t->active + t->recover; 3244 3245 return 0; 3246 } 3247 3248 /** 3249 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3250 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3251 * @cycle: cycle duration in ns 3252 * 3253 * Return matching xfer mode for @cycle. The returned mode is of 3254 * the transfer type specified by @xfer_shift. If @cycle is too 3255 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3256 * than the fastest known mode, the fasted mode is returned. 3257 * 3258 * LOCKING: 3259 * None. 3260 * 3261 * RETURNS: 3262 * Matching xfer_mode, 0xff if no match found. 3263 */ 3264 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3265 { 3266 u8 base_mode = 0xff, last_mode = 0xff; 3267 const struct ata_xfer_ent *ent; 3268 const struct ata_timing *t; 3269 3270 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3271 if (ent->shift == xfer_shift) 3272 base_mode = ent->base; 3273 3274 for (t = ata_timing_find_mode(base_mode); 3275 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3276 unsigned short this_cycle; 3277 3278 switch (xfer_shift) { 3279 case ATA_SHIFT_PIO: 3280 case ATA_SHIFT_MWDMA: 3281 this_cycle = t->cycle; 3282 break; 3283 case ATA_SHIFT_UDMA: 3284 this_cycle = t->udma; 3285 break; 3286 default: 3287 return 0xff; 3288 } 3289 3290 if (cycle > this_cycle) 3291 break; 3292 3293 last_mode = t->mode; 3294 } 3295 3296 return last_mode; 3297 } 3298 3299 /** 3300 * ata_down_xfermask_limit - adjust dev xfer masks downward 3301 * @dev: Device to adjust xfer masks 3302 * @sel: ATA_DNXFER_* selector 3303 * 3304 * Adjust xfer masks of @dev downward. Note that this function 3305 * does not apply the change. Invoking ata_set_mode() afterwards 3306 * will apply the limit. 3307 * 3308 * LOCKING: 3309 * Inherited from caller. 3310 * 3311 * RETURNS: 3312 * 0 on success, negative errno on failure 3313 */ 3314 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3315 { 3316 char buf[32]; 3317 unsigned long orig_mask, xfer_mask; 3318 unsigned long pio_mask, mwdma_mask, udma_mask; 3319 int quiet, highbit; 3320 3321 quiet = !!(sel & ATA_DNXFER_QUIET); 3322 sel &= ~ATA_DNXFER_QUIET; 3323 3324 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3325 dev->mwdma_mask, 3326 dev->udma_mask); 3327 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3328 3329 switch (sel) { 3330 case ATA_DNXFER_PIO: 3331 highbit = fls(pio_mask) - 1; 3332 pio_mask &= ~(1 << highbit); 3333 break; 3334 3335 case ATA_DNXFER_DMA: 3336 if (udma_mask) { 3337 highbit = fls(udma_mask) - 1; 3338 udma_mask &= ~(1 << highbit); 3339 if (!udma_mask) 3340 return -ENOENT; 3341 } else if (mwdma_mask) { 3342 highbit = fls(mwdma_mask) - 1; 3343 mwdma_mask &= ~(1 << highbit); 3344 if (!mwdma_mask) 3345 return -ENOENT; 3346 } 3347 break; 3348 3349 case ATA_DNXFER_40C: 3350 udma_mask &= ATA_UDMA_MASK_40C; 3351 break; 3352 3353 case ATA_DNXFER_FORCE_PIO0: 3354 pio_mask &= 1; 3355 case ATA_DNXFER_FORCE_PIO: 3356 mwdma_mask = 0; 3357 udma_mask = 0; 3358 break; 3359 3360 default: 3361 BUG(); 3362 } 3363 3364 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3365 3366 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3367 return -ENOENT; 3368 3369 if (!quiet) { 3370 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3371 snprintf(buf, sizeof(buf), "%s:%s", 3372 ata_mode_string(xfer_mask), 3373 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3374 else 3375 snprintf(buf, sizeof(buf), "%s", 3376 ata_mode_string(xfer_mask)); 3377 3378 ata_dev_printk(dev, KERN_WARNING, 3379 "limiting speed to %s\n", buf); 3380 } 3381 3382 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3383 &dev->udma_mask); 3384 3385 return 0; 3386 } 3387 3388 static int ata_dev_set_mode(struct ata_device *dev) 3389 { 3390 struct ata_eh_context *ehc = &dev->link->eh_context; 3391 const char *dev_err_whine = ""; 3392 int ign_dev_err = 0; 3393 unsigned int err_mask; 3394 int rc; 3395 3396 dev->flags &= ~ATA_DFLAG_PIO; 3397 if (dev->xfer_shift == ATA_SHIFT_PIO) 3398 dev->flags |= ATA_DFLAG_PIO; 3399 3400 err_mask = ata_dev_set_xfermode(dev); 3401 3402 if (err_mask & ~AC_ERR_DEV) 3403 goto fail; 3404 3405 /* revalidate */ 3406 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3407 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3408 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3409 if (rc) 3410 return rc; 3411 3412 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3413 /* Old CFA may refuse this command, which is just fine */ 3414 if (ata_id_is_cfa(dev->id)) 3415 ign_dev_err = 1; 3416 /* Catch several broken garbage emulations plus some pre 3417 ATA devices */ 3418 if (ata_id_major_version(dev->id) == 0 && 3419 dev->pio_mode <= XFER_PIO_2) 3420 ign_dev_err = 1; 3421 /* Some very old devices and some bad newer ones fail 3422 any kind of SET_XFERMODE request but support PIO0-2 3423 timings and no IORDY */ 3424 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3425 ign_dev_err = 1; 3426 } 3427 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3428 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3429 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3430 dev->dma_mode == XFER_MW_DMA_0 && 3431 (dev->id[63] >> 8) & 1) 3432 ign_dev_err = 1; 3433 3434 /* if the device is actually configured correctly, ignore dev err */ 3435 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3436 ign_dev_err = 1; 3437 3438 if (err_mask & AC_ERR_DEV) { 3439 if (!ign_dev_err) 3440 goto fail; 3441 else 3442 dev_err_whine = " (device error ignored)"; 3443 } 3444 3445 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3446 dev->xfer_shift, (int)dev->xfer_mode); 3447 3448 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3449 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3450 dev_err_whine); 3451 3452 return 0; 3453 3454 fail: 3455 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3456 "(err_mask=0x%x)\n", err_mask); 3457 return -EIO; 3458 } 3459 3460 /** 3461 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3462 * @link: link on which timings will be programmed 3463 * @r_failed_dev: out parameter for failed device 3464 * 3465 * Standard implementation of the function used to tune and set 3466 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3467 * ata_dev_set_mode() fails, pointer to the failing device is 3468 * returned in @r_failed_dev. 3469 * 3470 * LOCKING: 3471 * PCI/etc. bus probe sem. 3472 * 3473 * RETURNS: 3474 * 0 on success, negative errno otherwise 3475 */ 3476 3477 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3478 { 3479 struct ata_port *ap = link->ap; 3480 struct ata_device *dev; 3481 int rc = 0, used_dma = 0, found = 0; 3482 3483 /* step 1: calculate xfer_mask */ 3484 ata_for_each_dev(dev, link, ENABLED) { 3485 unsigned long pio_mask, dma_mask; 3486 unsigned int mode_mask; 3487 3488 mode_mask = ATA_DMA_MASK_ATA; 3489 if (dev->class == ATA_DEV_ATAPI) 3490 mode_mask = ATA_DMA_MASK_ATAPI; 3491 else if (ata_id_is_cfa(dev->id)) 3492 mode_mask = ATA_DMA_MASK_CFA; 3493 3494 ata_dev_xfermask(dev); 3495 ata_force_xfermask(dev); 3496 3497 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3498 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3499 3500 if (libata_dma_mask & mode_mask) 3501 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3502 else 3503 dma_mask = 0; 3504 3505 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3506 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3507 3508 found = 1; 3509 if (ata_dma_enabled(dev)) 3510 used_dma = 1; 3511 } 3512 if (!found) 3513 goto out; 3514 3515 /* step 2: always set host PIO timings */ 3516 ata_for_each_dev(dev, link, ENABLED) { 3517 if (dev->pio_mode == 0xff) { 3518 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3519 rc = -EINVAL; 3520 goto out; 3521 } 3522 3523 dev->xfer_mode = dev->pio_mode; 3524 dev->xfer_shift = ATA_SHIFT_PIO; 3525 if (ap->ops->set_piomode) 3526 ap->ops->set_piomode(ap, dev); 3527 } 3528 3529 /* step 3: set host DMA timings */ 3530 ata_for_each_dev(dev, link, ENABLED) { 3531 if (!ata_dma_enabled(dev)) 3532 continue; 3533 3534 dev->xfer_mode = dev->dma_mode; 3535 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3536 if (ap->ops->set_dmamode) 3537 ap->ops->set_dmamode(ap, dev); 3538 } 3539 3540 /* step 4: update devices' xfer mode */ 3541 ata_for_each_dev(dev, link, ENABLED) { 3542 rc = ata_dev_set_mode(dev); 3543 if (rc) 3544 goto out; 3545 } 3546 3547 /* Record simplex status. If we selected DMA then the other 3548 * host channels are not permitted to do so. 3549 */ 3550 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3551 ap->host->simplex_claimed = ap; 3552 3553 out: 3554 if (rc) 3555 *r_failed_dev = dev; 3556 return rc; 3557 } 3558 3559 /** 3560 * ata_wait_ready - wait for link to become ready 3561 * @link: link to be waited on 3562 * @deadline: deadline jiffies for the operation 3563 * @check_ready: callback to check link readiness 3564 * 3565 * Wait for @link to become ready. @check_ready should return 3566 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3567 * link doesn't seem to be occupied, other errno for other error 3568 * conditions. 3569 * 3570 * Transient -ENODEV conditions are allowed for 3571 * ATA_TMOUT_FF_WAIT. 3572 * 3573 * LOCKING: 3574 * EH context. 3575 * 3576 * RETURNS: 3577 * 0 if @linke is ready before @deadline; otherwise, -errno. 3578 */ 3579 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3580 int (*check_ready)(struct ata_link *link)) 3581 { 3582 unsigned long start = jiffies; 3583 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3584 int warned = 0; 3585 3586 /* Slave readiness can't be tested separately from master. On 3587 * M/S emulation configuration, this function should be called 3588 * only on the master and it will handle both master and slave. 3589 */ 3590 WARN_ON(link == link->ap->slave_link); 3591 3592 if (time_after(nodev_deadline, deadline)) 3593 nodev_deadline = deadline; 3594 3595 while (1) { 3596 unsigned long now = jiffies; 3597 int ready, tmp; 3598 3599 ready = tmp = check_ready(link); 3600 if (ready > 0) 3601 return 0; 3602 3603 /* -ENODEV could be transient. Ignore -ENODEV if link 3604 * is online. Also, some SATA devices take a long 3605 * time to clear 0xff after reset. For example, 3606 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum 3607 * GoVault needs even more than that. Wait for 3608 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline. 3609 * 3610 * Note that some PATA controllers (pata_ali) explode 3611 * if status register is read more than once when 3612 * there's no device attached. 3613 */ 3614 if (ready == -ENODEV) { 3615 if (ata_link_online(link)) 3616 ready = 0; 3617 else if ((link->ap->flags & ATA_FLAG_SATA) && 3618 !ata_link_offline(link) && 3619 time_before(now, nodev_deadline)) 3620 ready = 0; 3621 } 3622 3623 if (ready) 3624 return ready; 3625 if (time_after(now, deadline)) 3626 return -EBUSY; 3627 3628 if (!warned && time_after(now, start + 5 * HZ) && 3629 (deadline - now > 3 * HZ)) { 3630 ata_link_printk(link, KERN_WARNING, 3631 "link is slow to respond, please be patient " 3632 "(ready=%d)\n", tmp); 3633 warned = 1; 3634 } 3635 3636 msleep(50); 3637 } 3638 } 3639 3640 /** 3641 * ata_wait_after_reset - wait for link to become ready after reset 3642 * @link: link to be waited on 3643 * @deadline: deadline jiffies for the operation 3644 * @check_ready: callback to check link readiness 3645 * 3646 * Wait for @link to become ready after reset. 3647 * 3648 * LOCKING: 3649 * EH context. 3650 * 3651 * RETURNS: 3652 * 0 if @linke is ready before @deadline; otherwise, -errno. 3653 */ 3654 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3655 int (*check_ready)(struct ata_link *link)) 3656 { 3657 msleep(ATA_WAIT_AFTER_RESET); 3658 3659 return ata_wait_ready(link, deadline, check_ready); 3660 } 3661 3662 /** 3663 * sata_link_debounce - debounce SATA phy status 3664 * @link: ATA link to debounce SATA phy status for 3665 * @params: timing parameters { interval, duratinon, timeout } in msec 3666 * @deadline: deadline jiffies for the operation 3667 * 3668 * Make sure SStatus of @link reaches stable state, determined by 3669 * holding the same value where DET is not 1 for @duration polled 3670 * every @interval, before @timeout. Timeout constraints the 3671 * beginning of the stable state. Because DET gets stuck at 1 on 3672 * some controllers after hot unplugging, this functions waits 3673 * until timeout then returns 0 if DET is stable at 1. 3674 * 3675 * @timeout is further limited by @deadline. The sooner of the 3676 * two is used. 3677 * 3678 * LOCKING: 3679 * Kernel thread context (may sleep) 3680 * 3681 * RETURNS: 3682 * 0 on success, -errno on failure. 3683 */ 3684 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3685 unsigned long deadline) 3686 { 3687 unsigned long interval = params[0]; 3688 unsigned long duration = params[1]; 3689 unsigned long last_jiffies, t; 3690 u32 last, cur; 3691 int rc; 3692 3693 t = ata_deadline(jiffies, params[2]); 3694 if (time_before(t, deadline)) 3695 deadline = t; 3696 3697 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3698 return rc; 3699 cur &= 0xf; 3700 3701 last = cur; 3702 last_jiffies = jiffies; 3703 3704 while (1) { 3705 msleep(interval); 3706 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3707 return rc; 3708 cur &= 0xf; 3709 3710 /* DET stable? */ 3711 if (cur == last) { 3712 if (cur == 1 && time_before(jiffies, deadline)) 3713 continue; 3714 if (time_after(jiffies, 3715 ata_deadline(last_jiffies, duration))) 3716 return 0; 3717 continue; 3718 } 3719 3720 /* unstable, start over */ 3721 last = cur; 3722 last_jiffies = jiffies; 3723 3724 /* Check deadline. If debouncing failed, return 3725 * -EPIPE to tell upper layer to lower link speed. 3726 */ 3727 if (time_after(jiffies, deadline)) 3728 return -EPIPE; 3729 } 3730 } 3731 3732 /** 3733 * sata_link_resume - resume SATA link 3734 * @link: ATA link to resume SATA 3735 * @params: timing parameters { interval, duratinon, timeout } in msec 3736 * @deadline: deadline jiffies for the operation 3737 * 3738 * Resume SATA phy @link and debounce it. 3739 * 3740 * LOCKING: 3741 * Kernel thread context (may sleep) 3742 * 3743 * RETURNS: 3744 * 0 on success, -errno on failure. 3745 */ 3746 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3747 unsigned long deadline) 3748 { 3749 u32 scontrol, serror; 3750 int rc; 3751 3752 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3753 return rc; 3754 3755 scontrol = (scontrol & 0x0f0) | 0x300; 3756 3757 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3758 return rc; 3759 3760 /* Some PHYs react badly if SStatus is pounded immediately 3761 * after resuming. Delay 200ms before debouncing. 3762 */ 3763 msleep(200); 3764 3765 if ((rc = sata_link_debounce(link, params, deadline))) 3766 return rc; 3767 3768 /* clear SError, some PHYs require this even for SRST to work */ 3769 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3770 rc = sata_scr_write(link, SCR_ERROR, serror); 3771 3772 return rc != -EINVAL ? rc : 0; 3773 } 3774 3775 /** 3776 * ata_std_prereset - prepare for reset 3777 * @link: ATA link to be reset 3778 * @deadline: deadline jiffies for the operation 3779 * 3780 * @link is about to be reset. Initialize it. Failure from 3781 * prereset makes libata abort whole reset sequence and give up 3782 * that port, so prereset should be best-effort. It does its 3783 * best to prepare for reset sequence but if things go wrong, it 3784 * should just whine, not fail. 3785 * 3786 * LOCKING: 3787 * Kernel thread context (may sleep) 3788 * 3789 * RETURNS: 3790 * 0 on success, -errno otherwise. 3791 */ 3792 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3793 { 3794 struct ata_port *ap = link->ap; 3795 struct ata_eh_context *ehc = &link->eh_context; 3796 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3797 int rc; 3798 3799 /* if we're about to do hardreset, nothing more to do */ 3800 if (ehc->i.action & ATA_EH_HARDRESET) 3801 return 0; 3802 3803 /* if SATA, resume link */ 3804 if (ap->flags & ATA_FLAG_SATA) { 3805 rc = sata_link_resume(link, timing, deadline); 3806 /* whine about phy resume failure but proceed */ 3807 if (rc && rc != -EOPNOTSUPP) 3808 ata_link_printk(link, KERN_WARNING, "failed to resume " 3809 "link for reset (errno=%d)\n", rc); 3810 } 3811 3812 /* no point in trying softreset on offline link */ 3813 if (ata_phys_link_offline(link)) 3814 ehc->i.action &= ~ATA_EH_SOFTRESET; 3815 3816 return 0; 3817 } 3818 3819 /** 3820 * sata_link_hardreset - reset link via SATA phy reset 3821 * @link: link to reset 3822 * @timing: timing parameters { interval, duratinon, timeout } in msec 3823 * @deadline: deadline jiffies for the operation 3824 * @online: optional out parameter indicating link onlineness 3825 * @check_ready: optional callback to check link readiness 3826 * 3827 * SATA phy-reset @link using DET bits of SControl register. 3828 * After hardreset, link readiness is waited upon using 3829 * ata_wait_ready() if @check_ready is specified. LLDs are 3830 * allowed to not specify @check_ready and wait itself after this 3831 * function returns. Device classification is LLD's 3832 * responsibility. 3833 * 3834 * *@online is set to one iff reset succeeded and @link is online 3835 * after reset. 3836 * 3837 * LOCKING: 3838 * Kernel thread context (may sleep) 3839 * 3840 * RETURNS: 3841 * 0 on success, -errno otherwise. 3842 */ 3843 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3844 unsigned long deadline, 3845 bool *online, int (*check_ready)(struct ata_link *)) 3846 { 3847 u32 scontrol; 3848 int rc; 3849 3850 DPRINTK("ENTER\n"); 3851 3852 if (online) 3853 *online = false; 3854 3855 if (sata_set_spd_needed(link)) { 3856 /* SATA spec says nothing about how to reconfigure 3857 * spd. To be on the safe side, turn off phy during 3858 * reconfiguration. This works for at least ICH7 AHCI 3859 * and Sil3124. 3860 */ 3861 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3862 goto out; 3863 3864 scontrol = (scontrol & 0x0f0) | 0x304; 3865 3866 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3867 goto out; 3868 3869 sata_set_spd(link); 3870 } 3871 3872 /* issue phy wake/reset */ 3873 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3874 goto out; 3875 3876 scontrol = (scontrol & 0x0f0) | 0x301; 3877 3878 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3879 goto out; 3880 3881 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3882 * 10.4.2 says at least 1 ms. 3883 */ 3884 msleep(1); 3885 3886 /* bring link back */ 3887 rc = sata_link_resume(link, timing, deadline); 3888 if (rc) 3889 goto out; 3890 /* if link is offline nothing more to do */ 3891 if (ata_phys_link_offline(link)) 3892 goto out; 3893 3894 /* Link is online. From this point, -ENODEV too is an error. */ 3895 if (online) 3896 *online = true; 3897 3898 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3899 /* If PMP is supported, we have to do follow-up SRST. 3900 * Some PMPs don't send D2H Reg FIS after hardreset if 3901 * the first port is empty. Wait only for 3902 * ATA_TMOUT_PMP_SRST_WAIT. 3903 */ 3904 if (check_ready) { 3905 unsigned long pmp_deadline; 3906 3907 pmp_deadline = ata_deadline(jiffies, 3908 ATA_TMOUT_PMP_SRST_WAIT); 3909 if (time_after(pmp_deadline, deadline)) 3910 pmp_deadline = deadline; 3911 ata_wait_ready(link, pmp_deadline, check_ready); 3912 } 3913 rc = -EAGAIN; 3914 goto out; 3915 } 3916 3917 rc = 0; 3918 if (check_ready) 3919 rc = ata_wait_ready(link, deadline, check_ready); 3920 out: 3921 if (rc && rc != -EAGAIN) { 3922 /* online is set iff link is online && reset succeeded */ 3923 if (online) 3924 *online = false; 3925 ata_link_printk(link, KERN_ERR, 3926 "COMRESET failed (errno=%d)\n", rc); 3927 } 3928 DPRINTK("EXIT, rc=%d\n", rc); 3929 return rc; 3930 } 3931 3932 /** 3933 * sata_std_hardreset - COMRESET w/o waiting or classification 3934 * @link: link to reset 3935 * @class: resulting class of attached device 3936 * @deadline: deadline jiffies for the operation 3937 * 3938 * Standard SATA COMRESET w/o waiting or classification. 3939 * 3940 * LOCKING: 3941 * Kernel thread context (may sleep) 3942 * 3943 * RETURNS: 3944 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3945 */ 3946 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3947 unsigned long deadline) 3948 { 3949 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3950 bool online; 3951 int rc; 3952 3953 /* do hardreset */ 3954 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3955 return online ? -EAGAIN : rc; 3956 } 3957 3958 /** 3959 * ata_std_postreset - standard postreset callback 3960 * @link: the target ata_link 3961 * @classes: classes of attached devices 3962 * 3963 * This function is invoked after a successful reset. Note that 3964 * the device might have been reset more than once using 3965 * different reset methods before postreset is invoked. 3966 * 3967 * LOCKING: 3968 * Kernel thread context (may sleep) 3969 */ 3970 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3971 { 3972 u32 serror; 3973 3974 DPRINTK("ENTER\n"); 3975 3976 /* reset complete, clear SError */ 3977 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3978 sata_scr_write(link, SCR_ERROR, serror); 3979 3980 /* print link status */ 3981 sata_print_link_status(link); 3982 3983 DPRINTK("EXIT\n"); 3984 } 3985 3986 /** 3987 * ata_dev_same_device - Determine whether new ID matches configured device 3988 * @dev: device to compare against 3989 * @new_class: class of the new device 3990 * @new_id: IDENTIFY page of the new device 3991 * 3992 * Compare @new_class and @new_id against @dev and determine 3993 * whether @dev is the device indicated by @new_class and 3994 * @new_id. 3995 * 3996 * LOCKING: 3997 * None. 3998 * 3999 * RETURNS: 4000 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4001 */ 4002 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4003 const u16 *new_id) 4004 { 4005 const u16 *old_id = dev->id; 4006 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4007 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4008 4009 if (dev->class != new_class) { 4010 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 4011 dev->class, new_class); 4012 return 0; 4013 } 4014 4015 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4016 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4017 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4018 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4019 4020 if (strcmp(model[0], model[1])) { 4021 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 4022 "'%s' != '%s'\n", model[0], model[1]); 4023 return 0; 4024 } 4025 4026 if (strcmp(serial[0], serial[1])) { 4027 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4028 "'%s' != '%s'\n", serial[0], serial[1]); 4029 return 0; 4030 } 4031 4032 return 1; 4033 } 4034 4035 /** 4036 * ata_dev_reread_id - Re-read IDENTIFY data 4037 * @dev: target ATA device 4038 * @readid_flags: read ID flags 4039 * 4040 * Re-read IDENTIFY page and make sure @dev is still attached to 4041 * the port. 4042 * 4043 * LOCKING: 4044 * Kernel thread context (may sleep) 4045 * 4046 * RETURNS: 4047 * 0 on success, negative errno otherwise 4048 */ 4049 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4050 { 4051 unsigned int class = dev->class; 4052 u16 *id = (void *)dev->link->ap->sector_buf; 4053 int rc; 4054 4055 /* read ID data */ 4056 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4057 if (rc) 4058 return rc; 4059 4060 /* is the device still there? */ 4061 if (!ata_dev_same_device(dev, class, id)) 4062 return -ENODEV; 4063 4064 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4065 return 0; 4066 } 4067 4068 /** 4069 * ata_dev_revalidate - Revalidate ATA device 4070 * @dev: device to revalidate 4071 * @new_class: new class code 4072 * @readid_flags: read ID flags 4073 * 4074 * Re-read IDENTIFY page, make sure @dev is still attached to the 4075 * port and reconfigure it according to the new IDENTIFY page. 4076 * 4077 * LOCKING: 4078 * Kernel thread context (may sleep) 4079 * 4080 * RETURNS: 4081 * 0 on success, negative errno otherwise 4082 */ 4083 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4084 unsigned int readid_flags) 4085 { 4086 u64 n_sectors = dev->n_sectors; 4087 int rc; 4088 4089 if (!ata_dev_enabled(dev)) 4090 return -ENODEV; 4091 4092 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4093 if (ata_class_enabled(new_class) && 4094 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { 4095 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4096 dev->class, new_class); 4097 rc = -ENODEV; 4098 goto fail; 4099 } 4100 4101 /* re-read ID */ 4102 rc = ata_dev_reread_id(dev, readid_flags); 4103 if (rc) 4104 goto fail; 4105 4106 /* configure device according to the new ID */ 4107 rc = ata_dev_configure(dev); 4108 if (rc) 4109 goto fail; 4110 4111 /* verify n_sectors hasn't changed */ 4112 if (dev->class == ATA_DEV_ATA && n_sectors && 4113 dev->n_sectors != n_sectors) { 4114 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4115 "%llu != %llu\n", 4116 (unsigned long long)n_sectors, 4117 (unsigned long long)dev->n_sectors); 4118 4119 /* restore original n_sectors */ 4120 dev->n_sectors = n_sectors; 4121 4122 rc = -ENODEV; 4123 goto fail; 4124 } 4125 4126 return 0; 4127 4128 fail: 4129 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4130 return rc; 4131 } 4132 4133 struct ata_blacklist_entry { 4134 const char *model_num; 4135 const char *model_rev; 4136 unsigned long horkage; 4137 }; 4138 4139 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4140 /* Devices with DMA related problems under Linux */ 4141 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4142 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4143 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4144 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4145 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4146 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4147 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4148 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4149 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4150 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4151 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 4152 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4153 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4154 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4155 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4156 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4157 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4158 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 4159 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4160 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4161 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4162 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4163 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4164 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4165 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4166 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4167 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4168 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4169 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4170 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4171 /* Odd clown on sil3726/4726 PMPs */ 4172 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4173 4174 /* Weird ATAPI devices */ 4175 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4176 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4177 4178 /* Devices we expect to fail diagnostics */ 4179 4180 /* Devices where NCQ should be avoided */ 4181 /* NCQ is slow */ 4182 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4183 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4184 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4185 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4186 /* NCQ is broken */ 4187 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4188 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4189 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4190 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4191 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4192 4193 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4194 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | 4195 ATA_HORKAGE_FIRMWARE_WARN }, 4196 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | 4197 ATA_HORKAGE_FIRMWARE_WARN }, 4198 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | 4199 ATA_HORKAGE_FIRMWARE_WARN }, 4200 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | 4201 ATA_HORKAGE_FIRMWARE_WARN }, 4202 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | 4203 ATA_HORKAGE_FIRMWARE_WARN }, 4204 4205 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | 4206 ATA_HORKAGE_FIRMWARE_WARN }, 4207 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | 4208 ATA_HORKAGE_FIRMWARE_WARN }, 4209 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | 4210 ATA_HORKAGE_FIRMWARE_WARN }, 4211 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | 4212 ATA_HORKAGE_FIRMWARE_WARN }, 4213 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | 4214 ATA_HORKAGE_FIRMWARE_WARN }, 4215 4216 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | 4217 ATA_HORKAGE_FIRMWARE_WARN }, 4218 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | 4219 ATA_HORKAGE_FIRMWARE_WARN }, 4220 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | 4221 ATA_HORKAGE_FIRMWARE_WARN }, 4222 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | 4223 ATA_HORKAGE_FIRMWARE_WARN }, 4224 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | 4225 ATA_HORKAGE_FIRMWARE_WARN }, 4226 4227 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | 4228 ATA_HORKAGE_FIRMWARE_WARN }, 4229 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | 4230 ATA_HORKAGE_FIRMWARE_WARN }, 4231 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | 4232 ATA_HORKAGE_FIRMWARE_WARN }, 4233 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | 4234 ATA_HORKAGE_FIRMWARE_WARN }, 4235 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | 4236 ATA_HORKAGE_FIRMWARE_WARN }, 4237 4238 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | 4239 ATA_HORKAGE_FIRMWARE_WARN }, 4240 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | 4241 ATA_HORKAGE_FIRMWARE_WARN }, 4242 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | 4243 ATA_HORKAGE_FIRMWARE_WARN }, 4244 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | 4245 ATA_HORKAGE_FIRMWARE_WARN }, 4246 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | 4247 ATA_HORKAGE_FIRMWARE_WARN }, 4248 4249 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | 4250 ATA_HORKAGE_FIRMWARE_WARN }, 4251 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | 4252 ATA_HORKAGE_FIRMWARE_WARN }, 4253 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | 4254 ATA_HORKAGE_FIRMWARE_WARN }, 4255 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | 4256 ATA_HORKAGE_FIRMWARE_WARN }, 4257 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | 4258 ATA_HORKAGE_FIRMWARE_WARN }, 4259 4260 /* Blacklist entries taken from Silicon Image 3124/3132 4261 Windows driver .inf file - also several Linux problem reports */ 4262 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4263 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4264 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4265 4266 /* devices which puke on READ_NATIVE_MAX */ 4267 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4268 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4269 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4270 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4271 4272 /* Devices which report 1 sector over size HPA */ 4273 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4274 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4275 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4276 4277 /* Devices which get the IVB wrong */ 4278 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4279 /* Maybe we should just blacklist TSSTcorp... */ 4280 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, }, 4281 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, }, 4282 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 4283 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 4284 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4285 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4286 4287 /* Devices that do not need bridging limits applied */ 4288 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4289 4290 /* Devices which aren't very happy with higher link speeds */ 4291 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4292 4293 /* End Marker */ 4294 { } 4295 }; 4296 4297 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4298 { 4299 const char *p; 4300 int len; 4301 4302 /* 4303 * check for trailing wildcard: *\0 4304 */ 4305 p = strchr(patt, wildchar); 4306 if (p && ((*(p + 1)) == 0)) 4307 len = p - patt; 4308 else { 4309 len = strlen(name); 4310 if (!len) { 4311 if (!*patt) 4312 return 0; 4313 return -1; 4314 } 4315 } 4316 4317 return strncmp(patt, name, len); 4318 } 4319 4320 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4321 { 4322 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4323 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4324 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4325 4326 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4327 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4328 4329 while (ad->model_num) { 4330 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4331 if (ad->model_rev == NULL) 4332 return ad->horkage; 4333 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4334 return ad->horkage; 4335 } 4336 ad++; 4337 } 4338 return 0; 4339 } 4340 4341 static int ata_dma_blacklisted(const struct ata_device *dev) 4342 { 4343 /* We don't support polling DMA. 4344 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4345 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4346 */ 4347 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4348 (dev->flags & ATA_DFLAG_CDB_INTR)) 4349 return 1; 4350 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4351 } 4352 4353 /** 4354 * ata_is_40wire - check drive side detection 4355 * @dev: device 4356 * 4357 * Perform drive side detection decoding, allowing for device vendors 4358 * who can't follow the documentation. 4359 */ 4360 4361 static int ata_is_40wire(struct ata_device *dev) 4362 { 4363 if (dev->horkage & ATA_HORKAGE_IVB) 4364 return ata_drive_40wire_relaxed(dev->id); 4365 return ata_drive_40wire(dev->id); 4366 } 4367 4368 /** 4369 * cable_is_40wire - 40/80/SATA decider 4370 * @ap: port to consider 4371 * 4372 * This function encapsulates the policy for speed management 4373 * in one place. At the moment we don't cache the result but 4374 * there is a good case for setting ap->cbl to the result when 4375 * we are called with unknown cables (and figuring out if it 4376 * impacts hotplug at all). 4377 * 4378 * Return 1 if the cable appears to be 40 wire. 4379 */ 4380 4381 static int cable_is_40wire(struct ata_port *ap) 4382 { 4383 struct ata_link *link; 4384 struct ata_device *dev; 4385 4386 /* If the controller thinks we are 40 wire, we are. */ 4387 if (ap->cbl == ATA_CBL_PATA40) 4388 return 1; 4389 4390 /* If the controller thinks we are 80 wire, we are. */ 4391 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4392 return 0; 4393 4394 /* If the system is known to be 40 wire short cable (eg 4395 * laptop), then we allow 80 wire modes even if the drive 4396 * isn't sure. 4397 */ 4398 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4399 return 0; 4400 4401 /* If the controller doesn't know, we scan. 4402 * 4403 * Note: We look for all 40 wire detects at this point. Any 4404 * 80 wire detect is taken to be 80 wire cable because 4405 * - in many setups only the one drive (slave if present) will 4406 * give a valid detect 4407 * - if you have a non detect capable drive you don't want it 4408 * to colour the choice 4409 */ 4410 ata_for_each_link(link, ap, EDGE) { 4411 ata_for_each_dev(dev, link, ENABLED) { 4412 if (!ata_is_40wire(dev)) 4413 return 0; 4414 } 4415 } 4416 return 1; 4417 } 4418 4419 /** 4420 * ata_dev_xfermask - Compute supported xfermask of the given device 4421 * @dev: Device to compute xfermask for 4422 * 4423 * Compute supported xfermask of @dev and store it in 4424 * dev->*_mask. This function is responsible for applying all 4425 * known limits including host controller limits, device 4426 * blacklist, etc... 4427 * 4428 * LOCKING: 4429 * None. 4430 */ 4431 static void ata_dev_xfermask(struct ata_device *dev) 4432 { 4433 struct ata_link *link = dev->link; 4434 struct ata_port *ap = link->ap; 4435 struct ata_host *host = ap->host; 4436 unsigned long xfer_mask; 4437 4438 /* controller modes available */ 4439 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4440 ap->mwdma_mask, ap->udma_mask); 4441 4442 /* drive modes available */ 4443 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4444 dev->mwdma_mask, dev->udma_mask); 4445 xfer_mask &= ata_id_xfermask(dev->id); 4446 4447 /* 4448 * CFA Advanced TrueIDE timings are not allowed on a shared 4449 * cable 4450 */ 4451 if (ata_dev_pair(dev)) { 4452 /* No PIO5 or PIO6 */ 4453 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4454 /* No MWDMA3 or MWDMA 4 */ 4455 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4456 } 4457 4458 if (ata_dma_blacklisted(dev)) { 4459 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4460 ata_dev_printk(dev, KERN_WARNING, 4461 "device is on DMA blacklist, disabling DMA\n"); 4462 } 4463 4464 if ((host->flags & ATA_HOST_SIMPLEX) && 4465 host->simplex_claimed && host->simplex_claimed != ap) { 4466 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4467 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4468 "other device, disabling DMA\n"); 4469 } 4470 4471 if (ap->flags & ATA_FLAG_NO_IORDY) 4472 xfer_mask &= ata_pio_mask_no_iordy(dev); 4473 4474 if (ap->ops->mode_filter) 4475 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4476 4477 /* Apply cable rule here. Don't apply it early because when 4478 * we handle hot plug the cable type can itself change. 4479 * Check this last so that we know if the transfer rate was 4480 * solely limited by the cable. 4481 * Unknown or 80 wire cables reported host side are checked 4482 * drive side as well. Cases where we know a 40wire cable 4483 * is used safely for 80 are not checked here. 4484 */ 4485 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4486 /* UDMA/44 or higher would be available */ 4487 if (cable_is_40wire(ap)) { 4488 ata_dev_printk(dev, KERN_WARNING, 4489 "limited to UDMA/33 due to 40-wire cable\n"); 4490 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4491 } 4492 4493 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4494 &dev->mwdma_mask, &dev->udma_mask); 4495 } 4496 4497 /** 4498 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4499 * @dev: Device to which command will be sent 4500 * 4501 * Issue SET FEATURES - XFER MODE command to device @dev 4502 * on port @ap. 4503 * 4504 * LOCKING: 4505 * PCI/etc. bus probe sem. 4506 * 4507 * RETURNS: 4508 * 0 on success, AC_ERR_* mask otherwise. 4509 */ 4510 4511 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4512 { 4513 struct ata_taskfile tf; 4514 unsigned int err_mask; 4515 4516 /* set up set-features taskfile */ 4517 DPRINTK("set features - xfer mode\n"); 4518 4519 /* Some controllers and ATAPI devices show flaky interrupt 4520 * behavior after setting xfer mode. Use polling instead. 4521 */ 4522 ata_tf_init(dev, &tf); 4523 tf.command = ATA_CMD_SET_FEATURES; 4524 tf.feature = SETFEATURES_XFER; 4525 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4526 tf.protocol = ATA_PROT_NODATA; 4527 /* If we are using IORDY we must send the mode setting command */ 4528 if (ata_pio_need_iordy(dev)) 4529 tf.nsect = dev->xfer_mode; 4530 /* If the device has IORDY and the controller does not - turn it off */ 4531 else if (ata_id_has_iordy(dev->id)) 4532 tf.nsect = 0x01; 4533 else /* In the ancient relic department - skip all of this */ 4534 return 0; 4535 4536 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4537 4538 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4539 return err_mask; 4540 } 4541 /** 4542 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4543 * @dev: Device to which command will be sent 4544 * @enable: Whether to enable or disable the feature 4545 * @feature: The sector count represents the feature to set 4546 * 4547 * Issue SET FEATURES - SATA FEATURES command to device @dev 4548 * on port @ap with sector count 4549 * 4550 * LOCKING: 4551 * PCI/etc. bus probe sem. 4552 * 4553 * RETURNS: 4554 * 0 on success, AC_ERR_* mask otherwise. 4555 */ 4556 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4557 u8 feature) 4558 { 4559 struct ata_taskfile tf; 4560 unsigned int err_mask; 4561 4562 /* set up set-features taskfile */ 4563 DPRINTK("set features - SATA features\n"); 4564 4565 ata_tf_init(dev, &tf); 4566 tf.command = ATA_CMD_SET_FEATURES; 4567 tf.feature = enable; 4568 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4569 tf.protocol = ATA_PROT_NODATA; 4570 tf.nsect = feature; 4571 4572 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4573 4574 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4575 return err_mask; 4576 } 4577 4578 /** 4579 * ata_dev_init_params - Issue INIT DEV PARAMS command 4580 * @dev: Device to which command will be sent 4581 * @heads: Number of heads (taskfile parameter) 4582 * @sectors: Number of sectors (taskfile parameter) 4583 * 4584 * LOCKING: 4585 * Kernel thread context (may sleep) 4586 * 4587 * RETURNS: 4588 * 0 on success, AC_ERR_* mask otherwise. 4589 */ 4590 static unsigned int ata_dev_init_params(struct ata_device *dev, 4591 u16 heads, u16 sectors) 4592 { 4593 struct ata_taskfile tf; 4594 unsigned int err_mask; 4595 4596 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4597 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4598 return AC_ERR_INVALID; 4599 4600 /* set up init dev params taskfile */ 4601 DPRINTK("init dev params \n"); 4602 4603 ata_tf_init(dev, &tf); 4604 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4605 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4606 tf.protocol = ATA_PROT_NODATA; 4607 tf.nsect = sectors; 4608 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4609 4610 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4611 /* A clean abort indicates an original or just out of spec drive 4612 and we should continue as we issue the setup based on the 4613 drive reported working geometry */ 4614 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4615 err_mask = 0; 4616 4617 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4618 return err_mask; 4619 } 4620 4621 /** 4622 * ata_sg_clean - Unmap DMA memory associated with command 4623 * @qc: Command containing DMA memory to be released 4624 * 4625 * Unmap all mapped DMA memory associated with this command. 4626 * 4627 * LOCKING: 4628 * spin_lock_irqsave(host lock) 4629 */ 4630 void ata_sg_clean(struct ata_queued_cmd *qc) 4631 { 4632 struct ata_port *ap = qc->ap; 4633 struct scatterlist *sg = qc->sg; 4634 int dir = qc->dma_dir; 4635 4636 WARN_ON_ONCE(sg == NULL); 4637 4638 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4639 4640 if (qc->n_elem) 4641 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4642 4643 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4644 qc->sg = NULL; 4645 } 4646 4647 /** 4648 * atapi_check_dma - Check whether ATAPI DMA can be supported 4649 * @qc: Metadata associated with taskfile to check 4650 * 4651 * Allow low-level driver to filter ATA PACKET commands, returning 4652 * a status indicating whether or not it is OK to use DMA for the 4653 * supplied PACKET command. 4654 * 4655 * LOCKING: 4656 * spin_lock_irqsave(host lock) 4657 * 4658 * RETURNS: 0 when ATAPI DMA can be used 4659 * nonzero otherwise 4660 */ 4661 int atapi_check_dma(struct ata_queued_cmd *qc) 4662 { 4663 struct ata_port *ap = qc->ap; 4664 4665 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4666 * few ATAPI devices choke on such DMA requests. 4667 */ 4668 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4669 unlikely(qc->nbytes & 15)) 4670 return 1; 4671 4672 if (ap->ops->check_atapi_dma) 4673 return ap->ops->check_atapi_dma(qc); 4674 4675 return 0; 4676 } 4677 4678 /** 4679 * ata_std_qc_defer - Check whether a qc needs to be deferred 4680 * @qc: ATA command in question 4681 * 4682 * Non-NCQ commands cannot run with any other command, NCQ or 4683 * not. As upper layer only knows the queue depth, we are 4684 * responsible for maintaining exclusion. This function checks 4685 * whether a new command @qc can be issued. 4686 * 4687 * LOCKING: 4688 * spin_lock_irqsave(host lock) 4689 * 4690 * RETURNS: 4691 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4692 */ 4693 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4694 { 4695 struct ata_link *link = qc->dev->link; 4696 4697 if (qc->tf.protocol == ATA_PROT_NCQ) { 4698 if (!ata_tag_valid(link->active_tag)) 4699 return 0; 4700 } else { 4701 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4702 return 0; 4703 } 4704 4705 return ATA_DEFER_LINK; 4706 } 4707 4708 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4709 4710 /** 4711 * ata_sg_init - Associate command with scatter-gather table. 4712 * @qc: Command to be associated 4713 * @sg: Scatter-gather table. 4714 * @n_elem: Number of elements in s/g table. 4715 * 4716 * Initialize the data-related elements of queued_cmd @qc 4717 * to point to a scatter-gather table @sg, containing @n_elem 4718 * elements. 4719 * 4720 * LOCKING: 4721 * spin_lock_irqsave(host lock) 4722 */ 4723 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4724 unsigned int n_elem) 4725 { 4726 qc->sg = sg; 4727 qc->n_elem = n_elem; 4728 qc->cursg = qc->sg; 4729 } 4730 4731 /** 4732 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4733 * @qc: Command with scatter-gather table to be mapped. 4734 * 4735 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4736 * 4737 * LOCKING: 4738 * spin_lock_irqsave(host lock) 4739 * 4740 * RETURNS: 4741 * Zero on success, negative on error. 4742 * 4743 */ 4744 static int ata_sg_setup(struct ata_queued_cmd *qc) 4745 { 4746 struct ata_port *ap = qc->ap; 4747 unsigned int n_elem; 4748 4749 VPRINTK("ENTER, ata%u\n", ap->print_id); 4750 4751 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4752 if (n_elem < 1) 4753 return -1; 4754 4755 DPRINTK("%d sg elements mapped\n", n_elem); 4756 qc->orig_n_elem = qc->n_elem; 4757 qc->n_elem = n_elem; 4758 qc->flags |= ATA_QCFLAG_DMAMAP; 4759 4760 return 0; 4761 } 4762 4763 /** 4764 * swap_buf_le16 - swap halves of 16-bit words in place 4765 * @buf: Buffer to swap 4766 * @buf_words: Number of 16-bit words in buffer. 4767 * 4768 * Swap halves of 16-bit words if needed to convert from 4769 * little-endian byte order to native cpu byte order, or 4770 * vice-versa. 4771 * 4772 * LOCKING: 4773 * Inherited from caller. 4774 */ 4775 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4776 { 4777 #ifdef __BIG_ENDIAN 4778 unsigned int i; 4779 4780 for (i = 0; i < buf_words; i++) 4781 buf[i] = le16_to_cpu(buf[i]); 4782 #endif /* __BIG_ENDIAN */ 4783 } 4784 4785 /** 4786 * ata_qc_new - Request an available ATA command, for queueing 4787 * @ap: target port 4788 * 4789 * LOCKING: 4790 * None. 4791 */ 4792 4793 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4794 { 4795 struct ata_queued_cmd *qc = NULL; 4796 unsigned int i; 4797 4798 /* no command while frozen */ 4799 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4800 return NULL; 4801 4802 /* the last tag is reserved for internal command. */ 4803 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4804 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4805 qc = __ata_qc_from_tag(ap, i); 4806 break; 4807 } 4808 4809 if (qc) 4810 qc->tag = i; 4811 4812 return qc; 4813 } 4814 4815 /** 4816 * ata_qc_new_init - Request an available ATA command, and initialize it 4817 * @dev: Device from whom we request an available command structure 4818 * 4819 * LOCKING: 4820 * None. 4821 */ 4822 4823 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4824 { 4825 struct ata_port *ap = dev->link->ap; 4826 struct ata_queued_cmd *qc; 4827 4828 qc = ata_qc_new(ap); 4829 if (qc) { 4830 qc->scsicmd = NULL; 4831 qc->ap = ap; 4832 qc->dev = dev; 4833 4834 ata_qc_reinit(qc); 4835 } 4836 4837 return qc; 4838 } 4839 4840 /** 4841 * ata_qc_free - free unused ata_queued_cmd 4842 * @qc: Command to complete 4843 * 4844 * Designed to free unused ata_queued_cmd object 4845 * in case something prevents using it. 4846 * 4847 * LOCKING: 4848 * spin_lock_irqsave(host lock) 4849 */ 4850 void ata_qc_free(struct ata_queued_cmd *qc) 4851 { 4852 struct ata_port *ap = qc->ap; 4853 unsigned int tag; 4854 4855 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4856 4857 qc->flags = 0; 4858 tag = qc->tag; 4859 if (likely(ata_tag_valid(tag))) { 4860 qc->tag = ATA_TAG_POISON; 4861 clear_bit(tag, &ap->qc_allocated); 4862 } 4863 } 4864 4865 void __ata_qc_complete(struct ata_queued_cmd *qc) 4866 { 4867 struct ata_port *ap = qc->ap; 4868 struct ata_link *link = qc->dev->link; 4869 4870 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4871 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4872 4873 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4874 ata_sg_clean(qc); 4875 4876 /* command should be marked inactive atomically with qc completion */ 4877 if (qc->tf.protocol == ATA_PROT_NCQ) { 4878 link->sactive &= ~(1 << qc->tag); 4879 if (!link->sactive) 4880 ap->nr_active_links--; 4881 } else { 4882 link->active_tag = ATA_TAG_POISON; 4883 ap->nr_active_links--; 4884 } 4885 4886 /* clear exclusive status */ 4887 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4888 ap->excl_link == link)) 4889 ap->excl_link = NULL; 4890 4891 /* atapi: mark qc as inactive to prevent the interrupt handler 4892 * from completing the command twice later, before the error handler 4893 * is called. (when rc != 0 and atapi request sense is needed) 4894 */ 4895 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4896 ap->qc_active &= ~(1 << qc->tag); 4897 4898 /* call completion callback */ 4899 qc->complete_fn(qc); 4900 } 4901 4902 static void fill_result_tf(struct ata_queued_cmd *qc) 4903 { 4904 struct ata_port *ap = qc->ap; 4905 4906 qc->result_tf.flags = qc->tf.flags; 4907 ap->ops->qc_fill_rtf(qc); 4908 } 4909 4910 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4911 { 4912 struct ata_device *dev = qc->dev; 4913 4914 if (ata_tag_internal(qc->tag)) 4915 return; 4916 4917 if (ata_is_nodata(qc->tf.protocol)) 4918 return; 4919 4920 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4921 return; 4922 4923 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4924 } 4925 4926 /** 4927 * ata_qc_complete - Complete an active ATA command 4928 * @qc: Command to complete 4929 * 4930 * Indicate to the mid and upper layers that an ATA 4931 * command has completed, with either an ok or not-ok status. 4932 * 4933 * LOCKING: 4934 * spin_lock_irqsave(host lock) 4935 */ 4936 void ata_qc_complete(struct ata_queued_cmd *qc) 4937 { 4938 struct ata_port *ap = qc->ap; 4939 4940 /* XXX: New EH and old EH use different mechanisms to 4941 * synchronize EH with regular execution path. 4942 * 4943 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4944 * Normal execution path is responsible for not accessing a 4945 * failed qc. libata core enforces the rule by returning NULL 4946 * from ata_qc_from_tag() for failed qcs. 4947 * 4948 * Old EH depends on ata_qc_complete() nullifying completion 4949 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4950 * not synchronize with interrupt handler. Only PIO task is 4951 * taken care of. 4952 */ 4953 if (ap->ops->error_handler) { 4954 struct ata_device *dev = qc->dev; 4955 struct ata_eh_info *ehi = &dev->link->eh_info; 4956 4957 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4958 4959 if (unlikely(qc->err_mask)) 4960 qc->flags |= ATA_QCFLAG_FAILED; 4961 4962 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4963 if (!ata_tag_internal(qc->tag)) { 4964 /* always fill result TF for failed qc */ 4965 fill_result_tf(qc); 4966 ata_qc_schedule_eh(qc); 4967 return; 4968 } 4969 } 4970 4971 /* read result TF if requested */ 4972 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4973 fill_result_tf(qc); 4974 4975 /* Some commands need post-processing after successful 4976 * completion. 4977 */ 4978 switch (qc->tf.command) { 4979 case ATA_CMD_SET_FEATURES: 4980 if (qc->tf.feature != SETFEATURES_WC_ON && 4981 qc->tf.feature != SETFEATURES_WC_OFF) 4982 break; 4983 /* fall through */ 4984 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4985 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4986 /* revalidate device */ 4987 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4988 ata_port_schedule_eh(ap); 4989 break; 4990 4991 case ATA_CMD_SLEEP: 4992 dev->flags |= ATA_DFLAG_SLEEPING; 4993 break; 4994 } 4995 4996 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4997 ata_verify_xfer(qc); 4998 4999 __ata_qc_complete(qc); 5000 } else { 5001 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5002 return; 5003 5004 /* read result TF if failed or requested */ 5005 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5006 fill_result_tf(qc); 5007 5008 __ata_qc_complete(qc); 5009 } 5010 } 5011 5012 /** 5013 * ata_qc_complete_multiple - Complete multiple qcs successfully 5014 * @ap: port in question 5015 * @qc_active: new qc_active mask 5016 * 5017 * Complete in-flight commands. This functions is meant to be 5018 * called from low-level driver's interrupt routine to complete 5019 * requests normally. ap->qc_active and @qc_active is compared 5020 * and commands are completed accordingly. 5021 * 5022 * LOCKING: 5023 * spin_lock_irqsave(host lock) 5024 * 5025 * RETURNS: 5026 * Number of completed commands on success, -errno otherwise. 5027 */ 5028 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5029 { 5030 int nr_done = 0; 5031 u32 done_mask; 5032 int i; 5033 5034 done_mask = ap->qc_active ^ qc_active; 5035 5036 if (unlikely(done_mask & qc_active)) { 5037 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5038 "(%08x->%08x)\n", ap->qc_active, qc_active); 5039 return -EINVAL; 5040 } 5041 5042 for (i = 0; i < ATA_MAX_QUEUE; i++) { 5043 struct ata_queued_cmd *qc; 5044 5045 if (!(done_mask & (1 << i))) 5046 continue; 5047 5048 if ((qc = ata_qc_from_tag(ap, i))) { 5049 ata_qc_complete(qc); 5050 nr_done++; 5051 } 5052 } 5053 5054 return nr_done; 5055 } 5056 5057 /** 5058 * ata_qc_issue - issue taskfile to device 5059 * @qc: command to issue to device 5060 * 5061 * Prepare an ATA command to submission to device. 5062 * This includes mapping the data into a DMA-able 5063 * area, filling in the S/G table, and finally 5064 * writing the taskfile to hardware, starting the command. 5065 * 5066 * LOCKING: 5067 * spin_lock_irqsave(host lock) 5068 */ 5069 void ata_qc_issue(struct ata_queued_cmd *qc) 5070 { 5071 struct ata_port *ap = qc->ap; 5072 struct ata_link *link = qc->dev->link; 5073 u8 prot = qc->tf.protocol; 5074 5075 /* Make sure only one non-NCQ command is outstanding. The 5076 * check is skipped for old EH because it reuses active qc to 5077 * request ATAPI sense. 5078 */ 5079 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5080 5081 if (ata_is_ncq(prot)) { 5082 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5083 5084 if (!link->sactive) 5085 ap->nr_active_links++; 5086 link->sactive |= 1 << qc->tag; 5087 } else { 5088 WARN_ON_ONCE(link->sactive); 5089 5090 ap->nr_active_links++; 5091 link->active_tag = qc->tag; 5092 } 5093 5094 qc->flags |= ATA_QCFLAG_ACTIVE; 5095 ap->qc_active |= 1 << qc->tag; 5096 5097 /* We guarantee to LLDs that they will have at least one 5098 * non-zero sg if the command is a data command. 5099 */ 5100 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5101 5102 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5103 (ap->flags & ATA_FLAG_PIO_DMA))) 5104 if (ata_sg_setup(qc)) 5105 goto sg_err; 5106 5107 /* if device is sleeping, schedule reset and abort the link */ 5108 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5109 link->eh_info.action |= ATA_EH_RESET; 5110 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5111 ata_link_abort(link); 5112 return; 5113 } 5114 5115 ap->ops->qc_prep(qc); 5116 5117 qc->err_mask |= ap->ops->qc_issue(qc); 5118 if (unlikely(qc->err_mask)) 5119 goto err; 5120 return; 5121 5122 sg_err: 5123 qc->err_mask |= AC_ERR_SYSTEM; 5124 err: 5125 ata_qc_complete(qc); 5126 } 5127 5128 /** 5129 * sata_scr_valid - test whether SCRs are accessible 5130 * @link: ATA link to test SCR accessibility for 5131 * 5132 * Test whether SCRs are accessible for @link. 5133 * 5134 * LOCKING: 5135 * None. 5136 * 5137 * RETURNS: 5138 * 1 if SCRs are accessible, 0 otherwise. 5139 */ 5140 int sata_scr_valid(struct ata_link *link) 5141 { 5142 struct ata_port *ap = link->ap; 5143 5144 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5145 } 5146 5147 /** 5148 * sata_scr_read - read SCR register of the specified port 5149 * @link: ATA link to read SCR for 5150 * @reg: SCR to read 5151 * @val: Place to store read value 5152 * 5153 * Read SCR register @reg of @link into *@val. This function is 5154 * guaranteed to succeed if @link is ap->link, the cable type of 5155 * the port is SATA and the port implements ->scr_read. 5156 * 5157 * LOCKING: 5158 * None if @link is ap->link. Kernel thread context otherwise. 5159 * 5160 * RETURNS: 5161 * 0 on success, negative errno on failure. 5162 */ 5163 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5164 { 5165 if (ata_is_host_link(link)) { 5166 if (sata_scr_valid(link)) 5167 return link->ap->ops->scr_read(link, reg, val); 5168 return -EOPNOTSUPP; 5169 } 5170 5171 return sata_pmp_scr_read(link, reg, val); 5172 } 5173 5174 /** 5175 * sata_scr_write - write SCR register of the specified port 5176 * @link: ATA link to write SCR for 5177 * @reg: SCR to write 5178 * @val: value to write 5179 * 5180 * Write @val to SCR register @reg of @link. This function is 5181 * guaranteed to succeed if @link is ap->link, the cable type of 5182 * the port is SATA and the port implements ->scr_read. 5183 * 5184 * LOCKING: 5185 * None if @link is ap->link. Kernel thread context otherwise. 5186 * 5187 * RETURNS: 5188 * 0 on success, negative errno on failure. 5189 */ 5190 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5191 { 5192 if (ata_is_host_link(link)) { 5193 if (sata_scr_valid(link)) 5194 return link->ap->ops->scr_write(link, reg, val); 5195 return -EOPNOTSUPP; 5196 } 5197 5198 return sata_pmp_scr_write(link, reg, val); 5199 } 5200 5201 /** 5202 * sata_scr_write_flush - write SCR register of the specified port and flush 5203 * @link: ATA link to write SCR for 5204 * @reg: SCR to write 5205 * @val: value to write 5206 * 5207 * This function is identical to sata_scr_write() except that this 5208 * function performs flush after writing to the register. 5209 * 5210 * LOCKING: 5211 * None if @link is ap->link. Kernel thread context otherwise. 5212 * 5213 * RETURNS: 5214 * 0 on success, negative errno on failure. 5215 */ 5216 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5217 { 5218 if (ata_is_host_link(link)) { 5219 int rc; 5220 5221 if (sata_scr_valid(link)) { 5222 rc = link->ap->ops->scr_write(link, reg, val); 5223 if (rc == 0) 5224 rc = link->ap->ops->scr_read(link, reg, &val); 5225 return rc; 5226 } 5227 return -EOPNOTSUPP; 5228 } 5229 5230 return sata_pmp_scr_write(link, reg, val); 5231 } 5232 5233 /** 5234 * ata_phys_link_online - test whether the given link is online 5235 * @link: ATA link to test 5236 * 5237 * Test whether @link is online. Note that this function returns 5238 * 0 if online status of @link cannot be obtained, so 5239 * ata_link_online(link) != !ata_link_offline(link). 5240 * 5241 * LOCKING: 5242 * None. 5243 * 5244 * RETURNS: 5245 * True if the port online status is available and online. 5246 */ 5247 bool ata_phys_link_online(struct ata_link *link) 5248 { 5249 u32 sstatus; 5250 5251 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5252 ata_sstatus_online(sstatus)) 5253 return true; 5254 return false; 5255 } 5256 5257 /** 5258 * ata_phys_link_offline - test whether the given link is offline 5259 * @link: ATA link to test 5260 * 5261 * Test whether @link is offline. Note that this function 5262 * returns 0 if offline status of @link cannot be obtained, so 5263 * ata_link_online(link) != !ata_link_offline(link). 5264 * 5265 * LOCKING: 5266 * None. 5267 * 5268 * RETURNS: 5269 * True if the port offline status is available and offline. 5270 */ 5271 bool ata_phys_link_offline(struct ata_link *link) 5272 { 5273 u32 sstatus; 5274 5275 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5276 !ata_sstatus_online(sstatus)) 5277 return true; 5278 return false; 5279 } 5280 5281 /** 5282 * ata_link_online - test whether the given link is online 5283 * @link: ATA link to test 5284 * 5285 * Test whether @link is online. This is identical to 5286 * ata_phys_link_online() when there's no slave link. When 5287 * there's a slave link, this function should only be called on 5288 * the master link and will return true if any of M/S links is 5289 * online. 5290 * 5291 * LOCKING: 5292 * None. 5293 * 5294 * RETURNS: 5295 * True if the port online status is available and online. 5296 */ 5297 bool ata_link_online(struct ata_link *link) 5298 { 5299 struct ata_link *slave = link->ap->slave_link; 5300 5301 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5302 5303 return ata_phys_link_online(link) || 5304 (slave && ata_phys_link_online(slave)); 5305 } 5306 5307 /** 5308 * ata_link_offline - test whether the given link is offline 5309 * @link: ATA link to test 5310 * 5311 * Test whether @link is offline. This is identical to 5312 * ata_phys_link_offline() when there's no slave link. When 5313 * there's a slave link, this function should only be called on 5314 * the master link and will return true if both M/S links are 5315 * offline. 5316 * 5317 * LOCKING: 5318 * None. 5319 * 5320 * RETURNS: 5321 * True if the port offline status is available and offline. 5322 */ 5323 bool ata_link_offline(struct ata_link *link) 5324 { 5325 struct ata_link *slave = link->ap->slave_link; 5326 5327 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5328 5329 return ata_phys_link_offline(link) && 5330 (!slave || ata_phys_link_offline(slave)); 5331 } 5332 5333 #ifdef CONFIG_PM 5334 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5335 unsigned int action, unsigned int ehi_flags, 5336 int wait) 5337 { 5338 unsigned long flags; 5339 int i, rc; 5340 5341 for (i = 0; i < host->n_ports; i++) { 5342 struct ata_port *ap = host->ports[i]; 5343 struct ata_link *link; 5344 5345 /* Previous resume operation might still be in 5346 * progress. Wait for PM_PENDING to clear. 5347 */ 5348 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5349 ata_port_wait_eh(ap); 5350 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5351 } 5352 5353 /* request PM ops to EH */ 5354 spin_lock_irqsave(ap->lock, flags); 5355 5356 ap->pm_mesg = mesg; 5357 if (wait) { 5358 rc = 0; 5359 ap->pm_result = &rc; 5360 } 5361 5362 ap->pflags |= ATA_PFLAG_PM_PENDING; 5363 ata_for_each_link(link, ap, HOST_FIRST) { 5364 link->eh_info.action |= action; 5365 link->eh_info.flags |= ehi_flags; 5366 } 5367 5368 ata_port_schedule_eh(ap); 5369 5370 spin_unlock_irqrestore(ap->lock, flags); 5371 5372 /* wait and check result */ 5373 if (wait) { 5374 ata_port_wait_eh(ap); 5375 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5376 if (rc) 5377 return rc; 5378 } 5379 } 5380 5381 return 0; 5382 } 5383 5384 /** 5385 * ata_host_suspend - suspend host 5386 * @host: host to suspend 5387 * @mesg: PM message 5388 * 5389 * Suspend @host. Actual operation is performed by EH. This 5390 * function requests EH to perform PM operations and waits for EH 5391 * to finish. 5392 * 5393 * LOCKING: 5394 * Kernel thread context (may sleep). 5395 * 5396 * RETURNS: 5397 * 0 on success, -errno on failure. 5398 */ 5399 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5400 { 5401 int rc; 5402 5403 /* 5404 * disable link pm on all ports before requesting 5405 * any pm activity 5406 */ 5407 ata_lpm_enable(host); 5408 5409 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5410 if (rc == 0) 5411 host->dev->power.power_state = mesg; 5412 return rc; 5413 } 5414 5415 /** 5416 * ata_host_resume - resume host 5417 * @host: host to resume 5418 * 5419 * Resume @host. Actual operation is performed by EH. This 5420 * function requests EH to perform PM operations and returns. 5421 * Note that all resume operations are performed parallely. 5422 * 5423 * LOCKING: 5424 * Kernel thread context (may sleep). 5425 */ 5426 void ata_host_resume(struct ata_host *host) 5427 { 5428 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5429 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5430 host->dev->power.power_state = PMSG_ON; 5431 5432 /* reenable link pm */ 5433 ata_lpm_disable(host); 5434 } 5435 #endif 5436 5437 /** 5438 * ata_port_start - Set port up for dma. 5439 * @ap: Port to initialize 5440 * 5441 * Called just after data structures for each port are 5442 * initialized. Allocates space for PRD table. 5443 * 5444 * May be used as the port_start() entry in ata_port_operations. 5445 * 5446 * LOCKING: 5447 * Inherited from caller. 5448 */ 5449 int ata_port_start(struct ata_port *ap) 5450 { 5451 struct device *dev = ap->dev; 5452 5453 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 5454 GFP_KERNEL); 5455 if (!ap->prd) 5456 return -ENOMEM; 5457 5458 return 0; 5459 } 5460 5461 /** 5462 * ata_dev_init - Initialize an ata_device structure 5463 * @dev: Device structure to initialize 5464 * 5465 * Initialize @dev in preparation for probing. 5466 * 5467 * LOCKING: 5468 * Inherited from caller. 5469 */ 5470 void ata_dev_init(struct ata_device *dev) 5471 { 5472 struct ata_link *link = ata_dev_phys_link(dev); 5473 struct ata_port *ap = link->ap; 5474 unsigned long flags; 5475 5476 /* SATA spd limit is bound to the attached device, reset together */ 5477 link->sata_spd_limit = link->hw_sata_spd_limit; 5478 link->sata_spd = 0; 5479 5480 /* High bits of dev->flags are used to record warm plug 5481 * requests which occur asynchronously. Synchronize using 5482 * host lock. 5483 */ 5484 spin_lock_irqsave(ap->lock, flags); 5485 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5486 dev->horkage = 0; 5487 spin_unlock_irqrestore(ap->lock, flags); 5488 5489 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5490 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5491 dev->pio_mask = UINT_MAX; 5492 dev->mwdma_mask = UINT_MAX; 5493 dev->udma_mask = UINT_MAX; 5494 } 5495 5496 /** 5497 * ata_link_init - Initialize an ata_link structure 5498 * @ap: ATA port link is attached to 5499 * @link: Link structure to initialize 5500 * @pmp: Port multiplier port number 5501 * 5502 * Initialize @link. 5503 * 5504 * LOCKING: 5505 * Kernel thread context (may sleep) 5506 */ 5507 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5508 { 5509 int i; 5510 5511 /* clear everything except for devices */ 5512 memset(link, 0, offsetof(struct ata_link, device[0])); 5513 5514 link->ap = ap; 5515 link->pmp = pmp; 5516 link->active_tag = ATA_TAG_POISON; 5517 link->hw_sata_spd_limit = UINT_MAX; 5518 5519 /* can't use iterator, ap isn't initialized yet */ 5520 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5521 struct ata_device *dev = &link->device[i]; 5522 5523 dev->link = link; 5524 dev->devno = dev - link->device; 5525 ata_dev_init(dev); 5526 } 5527 } 5528 5529 /** 5530 * sata_link_init_spd - Initialize link->sata_spd_limit 5531 * @link: Link to configure sata_spd_limit for 5532 * 5533 * Initialize @link->[hw_]sata_spd_limit to the currently 5534 * configured value. 5535 * 5536 * LOCKING: 5537 * Kernel thread context (may sleep). 5538 * 5539 * RETURNS: 5540 * 0 on success, -errno on failure. 5541 */ 5542 int sata_link_init_spd(struct ata_link *link) 5543 { 5544 u8 spd; 5545 int rc; 5546 5547 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5548 if (rc) 5549 return rc; 5550 5551 spd = (link->saved_scontrol >> 4) & 0xf; 5552 if (spd) 5553 link->hw_sata_spd_limit &= (1 << spd) - 1; 5554 5555 ata_force_link_limits(link); 5556 5557 link->sata_spd_limit = link->hw_sata_spd_limit; 5558 5559 return 0; 5560 } 5561 5562 /** 5563 * ata_port_alloc - allocate and initialize basic ATA port resources 5564 * @host: ATA host this allocated port belongs to 5565 * 5566 * Allocate and initialize basic ATA port resources. 5567 * 5568 * RETURNS: 5569 * Allocate ATA port on success, NULL on failure. 5570 * 5571 * LOCKING: 5572 * Inherited from calling layer (may sleep). 5573 */ 5574 struct ata_port *ata_port_alloc(struct ata_host *host) 5575 { 5576 struct ata_port *ap; 5577 5578 DPRINTK("ENTER\n"); 5579 5580 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5581 if (!ap) 5582 return NULL; 5583 5584 ap->pflags |= ATA_PFLAG_INITIALIZING; 5585 ap->lock = &host->lock; 5586 ap->flags = ATA_FLAG_DISABLED; 5587 ap->print_id = -1; 5588 ap->ctl = ATA_DEVCTL_OBS; 5589 ap->host = host; 5590 ap->dev = host->dev; 5591 ap->last_ctl = 0xFF; 5592 5593 #if defined(ATA_VERBOSE_DEBUG) 5594 /* turn on all debugging levels */ 5595 ap->msg_enable = 0x00FF; 5596 #elif defined(ATA_DEBUG) 5597 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5598 #else 5599 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5600 #endif 5601 5602 #ifdef CONFIG_ATA_SFF 5603 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 5604 #else 5605 INIT_DELAYED_WORK(&ap->port_task, NULL); 5606 #endif 5607 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5608 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5609 INIT_LIST_HEAD(&ap->eh_done_q); 5610 init_waitqueue_head(&ap->eh_wait_q); 5611 init_completion(&ap->park_req_pending); 5612 init_timer_deferrable(&ap->fastdrain_timer); 5613 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5614 ap->fastdrain_timer.data = (unsigned long)ap; 5615 5616 ap->cbl = ATA_CBL_NONE; 5617 5618 ata_link_init(ap, &ap->link, 0); 5619 5620 #ifdef ATA_IRQ_TRAP 5621 ap->stats.unhandled_irq = 1; 5622 ap->stats.idle_irq = 1; 5623 #endif 5624 return ap; 5625 } 5626 5627 static void ata_host_release(struct device *gendev, void *res) 5628 { 5629 struct ata_host *host = dev_get_drvdata(gendev); 5630 int i; 5631 5632 for (i = 0; i < host->n_ports; i++) { 5633 struct ata_port *ap = host->ports[i]; 5634 5635 if (!ap) 5636 continue; 5637 5638 if (ap->scsi_host) 5639 scsi_host_put(ap->scsi_host); 5640 5641 kfree(ap->pmp_link); 5642 kfree(ap->slave_link); 5643 kfree(ap); 5644 host->ports[i] = NULL; 5645 } 5646 5647 dev_set_drvdata(gendev, NULL); 5648 } 5649 5650 /** 5651 * ata_host_alloc - allocate and init basic ATA host resources 5652 * @dev: generic device this host is associated with 5653 * @max_ports: maximum number of ATA ports associated with this host 5654 * 5655 * Allocate and initialize basic ATA host resources. LLD calls 5656 * this function to allocate a host, initializes it fully and 5657 * attaches it using ata_host_register(). 5658 * 5659 * @max_ports ports are allocated and host->n_ports is 5660 * initialized to @max_ports. The caller is allowed to decrease 5661 * host->n_ports before calling ata_host_register(). The unused 5662 * ports will be automatically freed on registration. 5663 * 5664 * RETURNS: 5665 * Allocate ATA host on success, NULL on failure. 5666 * 5667 * LOCKING: 5668 * Inherited from calling layer (may sleep). 5669 */ 5670 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5671 { 5672 struct ata_host *host; 5673 size_t sz; 5674 int i; 5675 5676 DPRINTK("ENTER\n"); 5677 5678 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5679 return NULL; 5680 5681 /* alloc a container for our list of ATA ports (buses) */ 5682 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5683 /* alloc a container for our list of ATA ports (buses) */ 5684 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5685 if (!host) 5686 goto err_out; 5687 5688 devres_add(dev, host); 5689 dev_set_drvdata(dev, host); 5690 5691 spin_lock_init(&host->lock); 5692 host->dev = dev; 5693 host->n_ports = max_ports; 5694 5695 /* allocate ports bound to this host */ 5696 for (i = 0; i < max_ports; i++) { 5697 struct ata_port *ap; 5698 5699 ap = ata_port_alloc(host); 5700 if (!ap) 5701 goto err_out; 5702 5703 ap->port_no = i; 5704 host->ports[i] = ap; 5705 } 5706 5707 devres_remove_group(dev, NULL); 5708 return host; 5709 5710 err_out: 5711 devres_release_group(dev, NULL); 5712 return NULL; 5713 } 5714 5715 /** 5716 * ata_host_alloc_pinfo - alloc host and init with port_info array 5717 * @dev: generic device this host is associated with 5718 * @ppi: array of ATA port_info to initialize host with 5719 * @n_ports: number of ATA ports attached to this host 5720 * 5721 * Allocate ATA host and initialize with info from @ppi. If NULL 5722 * terminated, @ppi may contain fewer entries than @n_ports. The 5723 * last entry will be used for the remaining ports. 5724 * 5725 * RETURNS: 5726 * Allocate ATA host on success, NULL on failure. 5727 * 5728 * LOCKING: 5729 * Inherited from calling layer (may sleep). 5730 */ 5731 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5732 const struct ata_port_info * const * ppi, 5733 int n_ports) 5734 { 5735 const struct ata_port_info *pi; 5736 struct ata_host *host; 5737 int i, j; 5738 5739 host = ata_host_alloc(dev, n_ports); 5740 if (!host) 5741 return NULL; 5742 5743 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5744 struct ata_port *ap = host->ports[i]; 5745 5746 if (ppi[j]) 5747 pi = ppi[j++]; 5748 5749 ap->pio_mask = pi->pio_mask; 5750 ap->mwdma_mask = pi->mwdma_mask; 5751 ap->udma_mask = pi->udma_mask; 5752 ap->flags |= pi->flags; 5753 ap->link.flags |= pi->link_flags; 5754 ap->ops = pi->port_ops; 5755 5756 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5757 host->ops = pi->port_ops; 5758 } 5759 5760 return host; 5761 } 5762 5763 /** 5764 * ata_slave_link_init - initialize slave link 5765 * @ap: port to initialize slave link for 5766 * 5767 * Create and initialize slave link for @ap. This enables slave 5768 * link handling on the port. 5769 * 5770 * In libata, a port contains links and a link contains devices. 5771 * There is single host link but if a PMP is attached to it, 5772 * there can be multiple fan-out links. On SATA, there's usually 5773 * a single device connected to a link but PATA and SATA 5774 * controllers emulating TF based interface can have two - master 5775 * and slave. 5776 * 5777 * However, there are a few controllers which don't fit into this 5778 * abstraction too well - SATA controllers which emulate TF 5779 * interface with both master and slave devices but also have 5780 * separate SCR register sets for each device. These controllers 5781 * need separate links for physical link handling 5782 * (e.g. onlineness, link speed) but should be treated like a 5783 * traditional M/S controller for everything else (e.g. command 5784 * issue, softreset). 5785 * 5786 * slave_link is libata's way of handling this class of 5787 * controllers without impacting core layer too much. For 5788 * anything other than physical link handling, the default host 5789 * link is used for both master and slave. For physical link 5790 * handling, separate @ap->slave_link is used. All dirty details 5791 * are implemented inside libata core layer. From LLD's POV, the 5792 * only difference is that prereset, hardreset and postreset are 5793 * called once more for the slave link, so the reset sequence 5794 * looks like the following. 5795 * 5796 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5797 * softreset(M) -> postreset(M) -> postreset(S) 5798 * 5799 * Note that softreset is called only for the master. Softreset 5800 * resets both M/S by definition, so SRST on master should handle 5801 * both (the standard method will work just fine). 5802 * 5803 * LOCKING: 5804 * Should be called before host is registered. 5805 * 5806 * RETURNS: 5807 * 0 on success, -errno on failure. 5808 */ 5809 int ata_slave_link_init(struct ata_port *ap) 5810 { 5811 struct ata_link *link; 5812 5813 WARN_ON(ap->slave_link); 5814 WARN_ON(ap->flags & ATA_FLAG_PMP); 5815 5816 link = kzalloc(sizeof(*link), GFP_KERNEL); 5817 if (!link) 5818 return -ENOMEM; 5819 5820 ata_link_init(ap, link, 1); 5821 ap->slave_link = link; 5822 return 0; 5823 } 5824 5825 static void ata_host_stop(struct device *gendev, void *res) 5826 { 5827 struct ata_host *host = dev_get_drvdata(gendev); 5828 int i; 5829 5830 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5831 5832 for (i = 0; i < host->n_ports; i++) { 5833 struct ata_port *ap = host->ports[i]; 5834 5835 if (ap->ops->port_stop) 5836 ap->ops->port_stop(ap); 5837 } 5838 5839 if (host->ops->host_stop) 5840 host->ops->host_stop(host); 5841 } 5842 5843 /** 5844 * ata_finalize_port_ops - finalize ata_port_operations 5845 * @ops: ata_port_operations to finalize 5846 * 5847 * An ata_port_operations can inherit from another ops and that 5848 * ops can again inherit from another. This can go on as many 5849 * times as necessary as long as there is no loop in the 5850 * inheritance chain. 5851 * 5852 * Ops tables are finalized when the host is started. NULL or 5853 * unspecified entries are inherited from the closet ancestor 5854 * which has the method and the entry is populated with it. 5855 * After finalization, the ops table directly points to all the 5856 * methods and ->inherits is no longer necessary and cleared. 5857 * 5858 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5859 * 5860 * LOCKING: 5861 * None. 5862 */ 5863 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5864 { 5865 static DEFINE_SPINLOCK(lock); 5866 const struct ata_port_operations *cur; 5867 void **begin = (void **)ops; 5868 void **end = (void **)&ops->inherits; 5869 void **pp; 5870 5871 if (!ops || !ops->inherits) 5872 return; 5873 5874 spin_lock(&lock); 5875 5876 for (cur = ops->inherits; cur; cur = cur->inherits) { 5877 void **inherit = (void **)cur; 5878 5879 for (pp = begin; pp < end; pp++, inherit++) 5880 if (!*pp) 5881 *pp = *inherit; 5882 } 5883 5884 for (pp = begin; pp < end; pp++) 5885 if (IS_ERR(*pp)) 5886 *pp = NULL; 5887 5888 ops->inherits = NULL; 5889 5890 spin_unlock(&lock); 5891 } 5892 5893 /** 5894 * ata_host_start - start and freeze ports of an ATA host 5895 * @host: ATA host to start ports for 5896 * 5897 * Start and then freeze ports of @host. Started status is 5898 * recorded in host->flags, so this function can be called 5899 * multiple times. Ports are guaranteed to get started only 5900 * once. If host->ops isn't initialized yet, its set to the 5901 * first non-dummy port ops. 5902 * 5903 * LOCKING: 5904 * Inherited from calling layer (may sleep). 5905 * 5906 * RETURNS: 5907 * 0 if all ports are started successfully, -errno otherwise. 5908 */ 5909 int ata_host_start(struct ata_host *host) 5910 { 5911 int have_stop = 0; 5912 void *start_dr = NULL; 5913 int i, rc; 5914 5915 if (host->flags & ATA_HOST_STARTED) 5916 return 0; 5917 5918 ata_finalize_port_ops(host->ops); 5919 5920 for (i = 0; i < host->n_ports; i++) { 5921 struct ata_port *ap = host->ports[i]; 5922 5923 ata_finalize_port_ops(ap->ops); 5924 5925 if (!host->ops && !ata_port_is_dummy(ap)) 5926 host->ops = ap->ops; 5927 5928 if (ap->ops->port_stop) 5929 have_stop = 1; 5930 } 5931 5932 if (host->ops->host_stop) 5933 have_stop = 1; 5934 5935 if (have_stop) { 5936 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5937 if (!start_dr) 5938 return -ENOMEM; 5939 } 5940 5941 for (i = 0; i < host->n_ports; i++) { 5942 struct ata_port *ap = host->ports[i]; 5943 5944 if (ap->ops->port_start) { 5945 rc = ap->ops->port_start(ap); 5946 if (rc) { 5947 if (rc != -ENODEV) 5948 dev_printk(KERN_ERR, host->dev, 5949 "failed to start port %d " 5950 "(errno=%d)\n", i, rc); 5951 goto err_out; 5952 } 5953 } 5954 ata_eh_freeze_port(ap); 5955 } 5956 5957 if (start_dr) 5958 devres_add(host->dev, start_dr); 5959 host->flags |= ATA_HOST_STARTED; 5960 return 0; 5961 5962 err_out: 5963 while (--i >= 0) { 5964 struct ata_port *ap = host->ports[i]; 5965 5966 if (ap->ops->port_stop) 5967 ap->ops->port_stop(ap); 5968 } 5969 devres_free(start_dr); 5970 return rc; 5971 } 5972 5973 /** 5974 * ata_sas_host_init - Initialize a host struct 5975 * @host: host to initialize 5976 * @dev: device host is attached to 5977 * @flags: host flags 5978 * @ops: port_ops 5979 * 5980 * LOCKING: 5981 * PCI/etc. bus probe sem. 5982 * 5983 */ 5984 /* KILLME - the only user left is ipr */ 5985 void ata_host_init(struct ata_host *host, struct device *dev, 5986 unsigned long flags, struct ata_port_operations *ops) 5987 { 5988 spin_lock_init(&host->lock); 5989 host->dev = dev; 5990 host->flags = flags; 5991 host->ops = ops; 5992 } 5993 5994 5995 static void async_port_probe(void *data, async_cookie_t cookie) 5996 { 5997 int rc; 5998 struct ata_port *ap = data; 5999 6000 /* 6001 * If we're not allowed to scan this host in parallel, 6002 * we need to wait until all previous scans have completed 6003 * before going further. 6004 * Jeff Garzik says this is only within a controller, so we 6005 * don't need to wait for port 0, only for later ports. 6006 */ 6007 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6008 async_synchronize_cookie(cookie); 6009 6010 /* probe */ 6011 if (ap->ops->error_handler) { 6012 struct ata_eh_info *ehi = &ap->link.eh_info; 6013 unsigned long flags; 6014 6015 ata_port_probe(ap); 6016 6017 /* kick EH for boot probing */ 6018 spin_lock_irqsave(ap->lock, flags); 6019 6020 ehi->probe_mask |= ATA_ALL_DEVICES; 6021 ehi->action |= ATA_EH_RESET | ATA_EH_LPM; 6022 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6023 6024 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6025 ap->pflags |= ATA_PFLAG_LOADING; 6026 ata_port_schedule_eh(ap); 6027 6028 spin_unlock_irqrestore(ap->lock, flags); 6029 6030 /* wait for EH to finish */ 6031 ata_port_wait_eh(ap); 6032 } else { 6033 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6034 rc = ata_bus_probe(ap); 6035 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6036 6037 if (rc) { 6038 /* FIXME: do something useful here? 6039 * Current libata behavior will 6040 * tear down everything when 6041 * the module is removed 6042 * or the h/w is unplugged. 6043 */ 6044 } 6045 } 6046 6047 /* in order to keep device order, we need to synchronize at this point */ 6048 async_synchronize_cookie(cookie); 6049 6050 ata_scsi_scan_host(ap, 1); 6051 6052 } 6053 /** 6054 * ata_host_register - register initialized ATA host 6055 * @host: ATA host to register 6056 * @sht: template for SCSI host 6057 * 6058 * Register initialized ATA host. @host is allocated using 6059 * ata_host_alloc() and fully initialized by LLD. This function 6060 * starts ports, registers @host with ATA and SCSI layers and 6061 * probe registered devices. 6062 * 6063 * LOCKING: 6064 * Inherited from calling layer (may sleep). 6065 * 6066 * RETURNS: 6067 * 0 on success, -errno otherwise. 6068 */ 6069 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6070 { 6071 int i, rc; 6072 6073 /* host must have been started */ 6074 if (!(host->flags & ATA_HOST_STARTED)) { 6075 dev_printk(KERN_ERR, host->dev, 6076 "BUG: trying to register unstarted host\n"); 6077 WARN_ON(1); 6078 return -EINVAL; 6079 } 6080 6081 /* Blow away unused ports. This happens when LLD can't 6082 * determine the exact number of ports to allocate at 6083 * allocation time. 6084 */ 6085 for (i = host->n_ports; host->ports[i]; i++) 6086 kfree(host->ports[i]); 6087 6088 /* give ports names and add SCSI hosts */ 6089 for (i = 0; i < host->n_ports; i++) 6090 host->ports[i]->print_id = ata_print_id++; 6091 6092 rc = ata_scsi_add_hosts(host, sht); 6093 if (rc) 6094 return rc; 6095 6096 /* associate with ACPI nodes */ 6097 ata_acpi_associate(host); 6098 6099 /* set cable, sata_spd_limit and report */ 6100 for (i = 0; i < host->n_ports; i++) { 6101 struct ata_port *ap = host->ports[i]; 6102 unsigned long xfer_mask; 6103 6104 /* set SATA cable type if still unset */ 6105 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6106 ap->cbl = ATA_CBL_SATA; 6107 6108 /* init sata_spd_limit to the current value */ 6109 sata_link_init_spd(&ap->link); 6110 if (ap->slave_link) 6111 sata_link_init_spd(ap->slave_link); 6112 6113 /* print per-port info to dmesg */ 6114 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6115 ap->udma_mask); 6116 6117 if (!ata_port_is_dummy(ap)) { 6118 ata_port_printk(ap, KERN_INFO, 6119 "%cATA max %s %s\n", 6120 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6121 ata_mode_string(xfer_mask), 6122 ap->link.eh_info.desc); 6123 ata_ehi_clear_desc(&ap->link.eh_info); 6124 } else 6125 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6126 } 6127 6128 /* perform each probe asynchronously */ 6129 for (i = 0; i < host->n_ports; i++) { 6130 struct ata_port *ap = host->ports[i]; 6131 async_schedule(async_port_probe, ap); 6132 } 6133 6134 return 0; 6135 } 6136 6137 /** 6138 * ata_host_activate - start host, request IRQ and register it 6139 * @host: target ATA host 6140 * @irq: IRQ to request 6141 * @irq_handler: irq_handler used when requesting IRQ 6142 * @irq_flags: irq_flags used when requesting IRQ 6143 * @sht: scsi_host_template to use when registering the host 6144 * 6145 * After allocating an ATA host and initializing it, most libata 6146 * LLDs perform three steps to activate the host - start host, 6147 * request IRQ and register it. This helper takes necessasry 6148 * arguments and performs the three steps in one go. 6149 * 6150 * An invalid IRQ skips the IRQ registration and expects the host to 6151 * have set polling mode on the port. In this case, @irq_handler 6152 * should be NULL. 6153 * 6154 * LOCKING: 6155 * Inherited from calling layer (may sleep). 6156 * 6157 * RETURNS: 6158 * 0 on success, -errno otherwise. 6159 */ 6160 int ata_host_activate(struct ata_host *host, int irq, 6161 irq_handler_t irq_handler, unsigned long irq_flags, 6162 struct scsi_host_template *sht) 6163 { 6164 int i, rc; 6165 6166 rc = ata_host_start(host); 6167 if (rc) 6168 return rc; 6169 6170 /* Special case for polling mode */ 6171 if (!irq) { 6172 WARN_ON(irq_handler); 6173 return ata_host_register(host, sht); 6174 } 6175 6176 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6177 dev_driver_string(host->dev), host); 6178 if (rc) 6179 return rc; 6180 6181 for (i = 0; i < host->n_ports; i++) 6182 ata_port_desc(host->ports[i], "irq %d", irq); 6183 6184 rc = ata_host_register(host, sht); 6185 /* if failed, just free the IRQ and leave ports alone */ 6186 if (rc) 6187 devm_free_irq(host->dev, irq, host); 6188 6189 return rc; 6190 } 6191 6192 /** 6193 * ata_port_detach - Detach ATA port in prepration of device removal 6194 * @ap: ATA port to be detached 6195 * 6196 * Detach all ATA devices and the associated SCSI devices of @ap; 6197 * then, remove the associated SCSI host. @ap is guaranteed to 6198 * be quiescent on return from this function. 6199 * 6200 * LOCKING: 6201 * Kernel thread context (may sleep). 6202 */ 6203 static void ata_port_detach(struct ata_port *ap) 6204 { 6205 unsigned long flags; 6206 6207 if (!ap->ops->error_handler) 6208 goto skip_eh; 6209 6210 /* tell EH we're leaving & flush EH */ 6211 spin_lock_irqsave(ap->lock, flags); 6212 ap->pflags |= ATA_PFLAG_UNLOADING; 6213 ata_port_schedule_eh(ap); 6214 spin_unlock_irqrestore(ap->lock, flags); 6215 6216 /* wait till EH commits suicide */ 6217 ata_port_wait_eh(ap); 6218 6219 /* it better be dead now */ 6220 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6221 6222 cancel_rearming_delayed_work(&ap->hotplug_task); 6223 6224 skip_eh: 6225 /* remove the associated SCSI host */ 6226 scsi_remove_host(ap->scsi_host); 6227 } 6228 6229 /** 6230 * ata_host_detach - Detach all ports of an ATA host 6231 * @host: Host to detach 6232 * 6233 * Detach all ports of @host. 6234 * 6235 * LOCKING: 6236 * Kernel thread context (may sleep). 6237 */ 6238 void ata_host_detach(struct ata_host *host) 6239 { 6240 int i; 6241 6242 for (i = 0; i < host->n_ports; i++) 6243 ata_port_detach(host->ports[i]); 6244 6245 /* the host is dead now, dissociate ACPI */ 6246 ata_acpi_dissociate(host); 6247 } 6248 6249 #ifdef CONFIG_PCI 6250 6251 /** 6252 * ata_pci_remove_one - PCI layer callback for device removal 6253 * @pdev: PCI device that was removed 6254 * 6255 * PCI layer indicates to libata via this hook that hot-unplug or 6256 * module unload event has occurred. Detach all ports. Resource 6257 * release is handled via devres. 6258 * 6259 * LOCKING: 6260 * Inherited from PCI layer (may sleep). 6261 */ 6262 void ata_pci_remove_one(struct pci_dev *pdev) 6263 { 6264 struct device *dev = &pdev->dev; 6265 struct ata_host *host = dev_get_drvdata(dev); 6266 6267 ata_host_detach(host); 6268 } 6269 6270 /* move to PCI subsystem */ 6271 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6272 { 6273 unsigned long tmp = 0; 6274 6275 switch (bits->width) { 6276 case 1: { 6277 u8 tmp8 = 0; 6278 pci_read_config_byte(pdev, bits->reg, &tmp8); 6279 tmp = tmp8; 6280 break; 6281 } 6282 case 2: { 6283 u16 tmp16 = 0; 6284 pci_read_config_word(pdev, bits->reg, &tmp16); 6285 tmp = tmp16; 6286 break; 6287 } 6288 case 4: { 6289 u32 tmp32 = 0; 6290 pci_read_config_dword(pdev, bits->reg, &tmp32); 6291 tmp = tmp32; 6292 break; 6293 } 6294 6295 default: 6296 return -EINVAL; 6297 } 6298 6299 tmp &= bits->mask; 6300 6301 return (tmp == bits->val) ? 1 : 0; 6302 } 6303 6304 #ifdef CONFIG_PM 6305 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6306 { 6307 pci_save_state(pdev); 6308 pci_disable_device(pdev); 6309 6310 if (mesg.event & PM_EVENT_SLEEP) 6311 pci_set_power_state(pdev, PCI_D3hot); 6312 } 6313 6314 int ata_pci_device_do_resume(struct pci_dev *pdev) 6315 { 6316 int rc; 6317 6318 pci_set_power_state(pdev, PCI_D0); 6319 pci_restore_state(pdev); 6320 6321 rc = pcim_enable_device(pdev); 6322 if (rc) { 6323 dev_printk(KERN_ERR, &pdev->dev, 6324 "failed to enable device after resume (%d)\n", rc); 6325 return rc; 6326 } 6327 6328 pci_set_master(pdev); 6329 return 0; 6330 } 6331 6332 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6333 { 6334 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6335 int rc = 0; 6336 6337 rc = ata_host_suspend(host, mesg); 6338 if (rc) 6339 return rc; 6340 6341 ata_pci_device_do_suspend(pdev, mesg); 6342 6343 return 0; 6344 } 6345 6346 int ata_pci_device_resume(struct pci_dev *pdev) 6347 { 6348 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6349 int rc; 6350 6351 rc = ata_pci_device_do_resume(pdev); 6352 if (rc == 0) 6353 ata_host_resume(host); 6354 return rc; 6355 } 6356 #endif /* CONFIG_PM */ 6357 6358 #endif /* CONFIG_PCI */ 6359 6360 static int __init ata_parse_force_one(char **cur, 6361 struct ata_force_ent *force_ent, 6362 const char **reason) 6363 { 6364 /* FIXME: Currently, there's no way to tag init const data and 6365 * using __initdata causes build failure on some versions of 6366 * gcc. Once __initdataconst is implemented, add const to the 6367 * following structure. 6368 */ 6369 static struct ata_force_param force_tbl[] __initdata = { 6370 { "40c", .cbl = ATA_CBL_PATA40 }, 6371 { "80c", .cbl = ATA_CBL_PATA80 }, 6372 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6373 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6374 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6375 { "sata", .cbl = ATA_CBL_SATA }, 6376 { "1.5Gbps", .spd_limit = 1 }, 6377 { "3.0Gbps", .spd_limit = 2 }, 6378 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6379 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6380 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6381 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6382 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6383 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6384 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6385 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6386 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6387 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6388 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6389 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6390 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6391 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6392 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6393 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6394 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6395 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6396 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6397 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6398 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6399 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6400 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6401 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6402 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6403 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6404 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6405 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6406 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6407 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6408 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6409 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6410 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6411 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6412 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6413 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6414 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6415 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6416 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6417 }; 6418 char *start = *cur, *p = *cur; 6419 char *id, *val, *endp; 6420 const struct ata_force_param *match_fp = NULL; 6421 int nr_matches = 0, i; 6422 6423 /* find where this param ends and update *cur */ 6424 while (*p != '\0' && *p != ',') 6425 p++; 6426 6427 if (*p == '\0') 6428 *cur = p; 6429 else 6430 *cur = p + 1; 6431 6432 *p = '\0'; 6433 6434 /* parse */ 6435 p = strchr(start, ':'); 6436 if (!p) { 6437 val = strstrip(start); 6438 goto parse_val; 6439 } 6440 *p = '\0'; 6441 6442 id = strstrip(start); 6443 val = strstrip(p + 1); 6444 6445 /* parse id */ 6446 p = strchr(id, '.'); 6447 if (p) { 6448 *p++ = '\0'; 6449 force_ent->device = simple_strtoul(p, &endp, 10); 6450 if (p == endp || *endp != '\0') { 6451 *reason = "invalid device"; 6452 return -EINVAL; 6453 } 6454 } 6455 6456 force_ent->port = simple_strtoul(id, &endp, 10); 6457 if (p == endp || *endp != '\0') { 6458 *reason = "invalid port/link"; 6459 return -EINVAL; 6460 } 6461 6462 parse_val: 6463 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6464 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6465 const struct ata_force_param *fp = &force_tbl[i]; 6466 6467 if (strncasecmp(val, fp->name, strlen(val))) 6468 continue; 6469 6470 nr_matches++; 6471 match_fp = fp; 6472 6473 if (strcasecmp(val, fp->name) == 0) { 6474 nr_matches = 1; 6475 break; 6476 } 6477 } 6478 6479 if (!nr_matches) { 6480 *reason = "unknown value"; 6481 return -EINVAL; 6482 } 6483 if (nr_matches > 1) { 6484 *reason = "ambigious value"; 6485 return -EINVAL; 6486 } 6487 6488 force_ent->param = *match_fp; 6489 6490 return 0; 6491 } 6492 6493 static void __init ata_parse_force_param(void) 6494 { 6495 int idx = 0, size = 1; 6496 int last_port = -1, last_device = -1; 6497 char *p, *cur, *next; 6498 6499 /* calculate maximum number of params and allocate force_tbl */ 6500 for (p = ata_force_param_buf; *p; p++) 6501 if (*p == ',') 6502 size++; 6503 6504 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6505 if (!ata_force_tbl) { 6506 printk(KERN_WARNING "ata: failed to extend force table, " 6507 "libata.force ignored\n"); 6508 return; 6509 } 6510 6511 /* parse and populate the table */ 6512 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6513 const char *reason = ""; 6514 struct ata_force_ent te = { .port = -1, .device = -1 }; 6515 6516 next = cur; 6517 if (ata_parse_force_one(&next, &te, &reason)) { 6518 printk(KERN_WARNING "ata: failed to parse force " 6519 "parameter \"%s\" (%s)\n", 6520 cur, reason); 6521 continue; 6522 } 6523 6524 if (te.port == -1) { 6525 te.port = last_port; 6526 te.device = last_device; 6527 } 6528 6529 ata_force_tbl[idx++] = te; 6530 6531 last_port = te.port; 6532 last_device = te.device; 6533 } 6534 6535 ata_force_tbl_size = idx; 6536 } 6537 6538 static int __init ata_init(void) 6539 { 6540 ata_parse_force_param(); 6541 6542 ata_wq = create_workqueue("ata"); 6543 if (!ata_wq) 6544 goto free_force_tbl; 6545 6546 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6547 if (!ata_aux_wq) 6548 goto free_wq; 6549 6550 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6551 return 0; 6552 6553 free_wq: 6554 destroy_workqueue(ata_wq); 6555 free_force_tbl: 6556 kfree(ata_force_tbl); 6557 return -ENOMEM; 6558 } 6559 6560 static void __exit ata_exit(void) 6561 { 6562 kfree(ata_force_tbl); 6563 destroy_workqueue(ata_wq); 6564 destroy_workqueue(ata_aux_wq); 6565 } 6566 6567 subsys_initcall(ata_init); 6568 module_exit(ata_exit); 6569 6570 static unsigned long ratelimit_time; 6571 static DEFINE_SPINLOCK(ata_ratelimit_lock); 6572 6573 int ata_ratelimit(void) 6574 { 6575 int rc; 6576 unsigned long flags; 6577 6578 spin_lock_irqsave(&ata_ratelimit_lock, flags); 6579 6580 if (time_after(jiffies, ratelimit_time)) { 6581 rc = 1; 6582 ratelimit_time = jiffies + (HZ/5); 6583 } else 6584 rc = 0; 6585 6586 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 6587 6588 return rc; 6589 } 6590 6591 /** 6592 * ata_wait_register - wait until register value changes 6593 * @reg: IO-mapped register 6594 * @mask: Mask to apply to read register value 6595 * @val: Wait condition 6596 * @interval: polling interval in milliseconds 6597 * @timeout: timeout in milliseconds 6598 * 6599 * Waiting for some bits of register to change is a common 6600 * operation for ATA controllers. This function reads 32bit LE 6601 * IO-mapped register @reg and tests for the following condition. 6602 * 6603 * (*@reg & mask) != val 6604 * 6605 * If the condition is met, it returns; otherwise, the process is 6606 * repeated after @interval_msec until timeout. 6607 * 6608 * LOCKING: 6609 * Kernel thread context (may sleep) 6610 * 6611 * RETURNS: 6612 * The final register value. 6613 */ 6614 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6615 unsigned long interval, unsigned long timeout) 6616 { 6617 unsigned long deadline; 6618 u32 tmp; 6619 6620 tmp = ioread32(reg); 6621 6622 /* Calculate timeout _after_ the first read to make sure 6623 * preceding writes reach the controller before starting to 6624 * eat away the timeout. 6625 */ 6626 deadline = ata_deadline(jiffies, timeout); 6627 6628 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6629 msleep(interval); 6630 tmp = ioread32(reg); 6631 } 6632 6633 return tmp; 6634 } 6635 6636 /* 6637 * Dummy port_ops 6638 */ 6639 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6640 { 6641 return AC_ERR_SYSTEM; 6642 } 6643 6644 static void ata_dummy_error_handler(struct ata_port *ap) 6645 { 6646 /* truly dummy */ 6647 } 6648 6649 struct ata_port_operations ata_dummy_port_ops = { 6650 .qc_prep = ata_noop_qc_prep, 6651 .qc_issue = ata_dummy_qc_issue, 6652 .error_handler = ata_dummy_error_handler, 6653 }; 6654 6655 const struct ata_port_info ata_dummy_port_info = { 6656 .port_ops = &ata_dummy_port_ops, 6657 }; 6658 6659 /* 6660 * libata is essentially a library of internal helper functions for 6661 * low-level ATA host controller drivers. As such, the API/ABI is 6662 * likely to change as new drivers are added and updated. 6663 * Do not depend on ABI/API stability. 6664 */ 6665 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6666 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6667 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6668 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6669 EXPORT_SYMBOL_GPL(sata_port_ops); 6670 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6671 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6672 EXPORT_SYMBOL_GPL(ata_link_next); 6673 EXPORT_SYMBOL_GPL(ata_dev_next); 6674 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6675 EXPORT_SYMBOL_GPL(ata_host_init); 6676 EXPORT_SYMBOL_GPL(ata_host_alloc); 6677 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6678 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6679 EXPORT_SYMBOL_GPL(ata_host_start); 6680 EXPORT_SYMBOL_GPL(ata_host_register); 6681 EXPORT_SYMBOL_GPL(ata_host_activate); 6682 EXPORT_SYMBOL_GPL(ata_host_detach); 6683 EXPORT_SYMBOL_GPL(ata_sg_init); 6684 EXPORT_SYMBOL_GPL(ata_qc_complete); 6685 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6686 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6687 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6688 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6689 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6690 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6691 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6692 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6693 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6694 EXPORT_SYMBOL_GPL(ata_mode_string); 6695 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6696 EXPORT_SYMBOL_GPL(ata_port_start); 6697 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6698 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6699 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6700 EXPORT_SYMBOL_GPL(ata_port_probe); 6701 EXPORT_SYMBOL_GPL(ata_dev_disable); 6702 EXPORT_SYMBOL_GPL(sata_set_spd); 6703 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6704 EXPORT_SYMBOL_GPL(sata_link_debounce); 6705 EXPORT_SYMBOL_GPL(sata_link_resume); 6706 EXPORT_SYMBOL_GPL(ata_std_prereset); 6707 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6708 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6709 EXPORT_SYMBOL_GPL(ata_std_postreset); 6710 EXPORT_SYMBOL_GPL(ata_dev_classify); 6711 EXPORT_SYMBOL_GPL(ata_dev_pair); 6712 EXPORT_SYMBOL_GPL(ata_port_disable); 6713 EXPORT_SYMBOL_GPL(ata_ratelimit); 6714 EXPORT_SYMBOL_GPL(ata_wait_register); 6715 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6716 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6717 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6718 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6719 EXPORT_SYMBOL_GPL(sata_scr_valid); 6720 EXPORT_SYMBOL_GPL(sata_scr_read); 6721 EXPORT_SYMBOL_GPL(sata_scr_write); 6722 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6723 EXPORT_SYMBOL_GPL(ata_link_online); 6724 EXPORT_SYMBOL_GPL(ata_link_offline); 6725 #ifdef CONFIG_PM 6726 EXPORT_SYMBOL_GPL(ata_host_suspend); 6727 EXPORT_SYMBOL_GPL(ata_host_resume); 6728 #endif /* CONFIG_PM */ 6729 EXPORT_SYMBOL_GPL(ata_id_string); 6730 EXPORT_SYMBOL_GPL(ata_id_c_string); 6731 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6732 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6733 6734 EXPORT_SYMBOL_GPL(ata_pio_queue_task); 6735 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6736 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6737 EXPORT_SYMBOL_GPL(ata_timing_compute); 6738 EXPORT_SYMBOL_GPL(ata_timing_merge); 6739 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6740 6741 #ifdef CONFIG_PCI 6742 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6743 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6744 #ifdef CONFIG_PM 6745 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6746 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6747 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6748 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6749 #endif /* CONFIG_PM */ 6750 #endif /* CONFIG_PCI */ 6751 6752 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6753 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6754 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6755 EXPORT_SYMBOL_GPL(ata_port_desc); 6756 #ifdef CONFIG_PCI 6757 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6758 #endif /* CONFIG_PCI */ 6759 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6760 EXPORT_SYMBOL_GPL(ata_link_abort); 6761 EXPORT_SYMBOL_GPL(ata_port_abort); 6762 EXPORT_SYMBOL_GPL(ata_port_freeze); 6763 EXPORT_SYMBOL_GPL(sata_async_notification); 6764 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6765 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6766 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6767 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6768 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6769 EXPORT_SYMBOL_GPL(ata_do_eh); 6770 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6771 6772 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6773 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6774 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6775 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6776 EXPORT_SYMBOL_GPL(ata_cable_sata); 6777