1 /* 2 * libata-eh.c - libata error handling 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2006 Tejun Heo <htejun@gmail.com> 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation; either version 2, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24 * USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_eh.h> 40 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_cmnd.h> 42 #include "../scsi/scsi_transport_api.h" 43 44 #include <linux/libata.h> 45 46 #include "libata.h" 47 48 enum { 49 /* speed down verdicts */ 50 ATA_EH_SPDN_NCQ_OFF = (1 << 0), 51 ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 52 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 53 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 54 55 /* error flags */ 56 ATA_EFLAG_IS_IO = (1 << 0), 57 ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 58 59 /* error categories */ 60 ATA_ECAT_NONE = 0, 61 ATA_ECAT_ATA_BUS = 1, 62 ATA_ECAT_TOUT_HSM = 2, 63 ATA_ECAT_UNK_DEV = 3, 64 ATA_ECAT_DUBIOUS_NONE = 4, 65 ATA_ECAT_DUBIOUS_ATA_BUS = 5, 66 ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 67 ATA_ECAT_DUBIOUS_UNK_DEV = 7, 68 ATA_ECAT_NR = 8, 69 }; 70 71 /* Waiting in ->prereset can never be reliable. It's sometimes nice 72 * to wait there but it can't be depended upon; otherwise, we wouldn't 73 * be resetting. Just give it enough time for most drives to spin up. 74 */ 75 enum { 76 ATA_EH_PRERESET_TIMEOUT = 10 * HZ, 77 ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, 78 }; 79 80 /* The following table determines how we sequence resets. Each entry 81 * represents timeout for that try. The first try can be soft or 82 * hardreset. All others are hardreset if available. In most cases 83 * the first reset w/ 10sec timeout should succeed. Following entries 84 * are mostly for error handling, hotplug and retarded devices. 85 */ 86 static const unsigned long ata_eh_reset_timeouts[] = { 87 10 * HZ, /* most drives spin up by 10sec */ 88 10 * HZ, /* > 99% working drives spin up before 20sec */ 89 35 * HZ, /* give > 30 secs of idleness for retarded devices */ 90 5 * HZ, /* and sweet one last chance */ 91 /* > 1 min has elapsed, give up */ 92 }; 93 94 static void __ata_port_freeze(struct ata_port *ap); 95 #ifdef CONFIG_PM 96 static void ata_eh_handle_port_suspend(struct ata_port *ap); 97 static void ata_eh_handle_port_resume(struct ata_port *ap); 98 #else /* CONFIG_PM */ 99 static void ata_eh_handle_port_suspend(struct ata_port *ap) 100 { } 101 102 static void ata_eh_handle_port_resume(struct ata_port *ap) 103 { } 104 #endif /* CONFIG_PM */ 105 106 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 107 va_list args) 108 { 109 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 110 ATA_EH_DESC_LEN - ehi->desc_len, 111 fmt, args); 112 } 113 114 /** 115 * __ata_ehi_push_desc - push error description without adding separator 116 * @ehi: target EHI 117 * @fmt: printf format string 118 * 119 * Format string according to @fmt and append it to @ehi->desc. 120 * 121 * LOCKING: 122 * spin_lock_irqsave(host lock) 123 */ 124 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 125 { 126 va_list args; 127 128 va_start(args, fmt); 129 __ata_ehi_pushv_desc(ehi, fmt, args); 130 va_end(args); 131 } 132 133 /** 134 * ata_ehi_push_desc - push error description with separator 135 * @ehi: target EHI 136 * @fmt: printf format string 137 * 138 * Format string according to @fmt and append it to @ehi->desc. 139 * If @ehi->desc is not empty, ", " is added in-between. 140 * 141 * LOCKING: 142 * spin_lock_irqsave(host lock) 143 */ 144 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 145 { 146 va_list args; 147 148 if (ehi->desc_len) 149 __ata_ehi_push_desc(ehi, ", "); 150 151 va_start(args, fmt); 152 __ata_ehi_pushv_desc(ehi, fmt, args); 153 va_end(args); 154 } 155 156 /** 157 * ata_ehi_clear_desc - clean error description 158 * @ehi: target EHI 159 * 160 * Clear @ehi->desc. 161 * 162 * LOCKING: 163 * spin_lock_irqsave(host lock) 164 */ 165 void ata_ehi_clear_desc(struct ata_eh_info *ehi) 166 { 167 ehi->desc[0] = '\0'; 168 ehi->desc_len = 0; 169 } 170 171 /** 172 * ata_port_desc - append port description 173 * @ap: target ATA port 174 * @fmt: printf format string 175 * 176 * Format string according to @fmt and append it to port 177 * description. If port description is not empty, " " is added 178 * in-between. This function is to be used while initializing 179 * ata_host. The description is printed on host registration. 180 * 181 * LOCKING: 182 * None. 183 */ 184 void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 185 { 186 va_list args; 187 188 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 189 190 if (ap->link.eh_info.desc_len) 191 __ata_ehi_push_desc(&ap->link.eh_info, " "); 192 193 va_start(args, fmt); 194 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 195 va_end(args); 196 } 197 198 #ifdef CONFIG_PCI 199 200 /** 201 * ata_port_pbar_desc - append PCI BAR description 202 * @ap: target ATA port 203 * @bar: target PCI BAR 204 * @offset: offset into PCI BAR 205 * @name: name of the area 206 * 207 * If @offset is negative, this function formats a string which 208 * contains the name, address, size and type of the BAR and 209 * appends it to the port description. If @offset is zero or 210 * positive, only name and offsetted address is appended. 211 * 212 * LOCKING: 213 * None. 214 */ 215 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 216 const char *name) 217 { 218 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 219 char *type = ""; 220 unsigned long long start, len; 221 222 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 223 type = "m"; 224 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 225 type = "i"; 226 227 start = (unsigned long long)pci_resource_start(pdev, bar); 228 len = (unsigned long long)pci_resource_len(pdev, bar); 229 230 if (offset < 0) 231 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 232 else 233 ata_port_desc(ap, "%s 0x%llx", name, 234 start + (unsigned long long)offset); 235 } 236 237 #endif /* CONFIG_PCI */ 238 239 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 240 unsigned int err_mask) 241 { 242 struct ata_ering_entry *ent; 243 244 WARN_ON(!err_mask); 245 246 ering->cursor++; 247 ering->cursor %= ATA_ERING_SIZE; 248 249 ent = &ering->ring[ering->cursor]; 250 ent->eflags = eflags; 251 ent->err_mask = err_mask; 252 ent->timestamp = get_jiffies_64(); 253 } 254 255 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 256 { 257 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 258 259 if (ent->err_mask) 260 return ent; 261 return NULL; 262 } 263 264 static void ata_ering_clear(struct ata_ering *ering) 265 { 266 memset(ering, 0, sizeof(*ering)); 267 } 268 269 static int ata_ering_map(struct ata_ering *ering, 270 int (*map_fn)(struct ata_ering_entry *, void *), 271 void *arg) 272 { 273 int idx, rc = 0; 274 struct ata_ering_entry *ent; 275 276 idx = ering->cursor; 277 do { 278 ent = &ering->ring[idx]; 279 if (!ent->err_mask) 280 break; 281 rc = map_fn(ent, arg); 282 if (rc) 283 break; 284 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 285 } while (idx != ering->cursor); 286 287 return rc; 288 } 289 290 static unsigned int ata_eh_dev_action(struct ata_device *dev) 291 { 292 struct ata_eh_context *ehc = &dev->link->eh_context; 293 294 return ehc->i.action | ehc->i.dev_action[dev->devno]; 295 } 296 297 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 298 struct ata_eh_info *ehi, unsigned int action) 299 { 300 struct ata_device *tdev; 301 302 if (!dev) { 303 ehi->action &= ~action; 304 ata_link_for_each_dev(tdev, link) 305 ehi->dev_action[tdev->devno] &= ~action; 306 } else { 307 /* doesn't make sense for port-wide EH actions */ 308 WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 309 310 /* break ehi->action into ehi->dev_action */ 311 if (ehi->action & action) { 312 ata_link_for_each_dev(tdev, link) 313 ehi->dev_action[tdev->devno] |= 314 ehi->action & action; 315 ehi->action &= ~action; 316 } 317 318 /* turn off the specified per-dev action */ 319 ehi->dev_action[dev->devno] &= ~action; 320 } 321 } 322 323 /** 324 * ata_scsi_timed_out - SCSI layer time out callback 325 * @cmd: timed out SCSI command 326 * 327 * Handles SCSI layer timeout. We race with normal completion of 328 * the qc for @cmd. If the qc is already gone, we lose and let 329 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 330 * timed out and EH should be invoked. Prevent ata_qc_complete() 331 * from finishing it by setting EH_SCHEDULED and return 332 * EH_NOT_HANDLED. 333 * 334 * TODO: kill this function once old EH is gone. 335 * 336 * LOCKING: 337 * Called from timer context 338 * 339 * RETURNS: 340 * EH_HANDLED or EH_NOT_HANDLED 341 */ 342 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 343 { 344 struct Scsi_Host *host = cmd->device->host; 345 struct ata_port *ap = ata_shost_to_port(host); 346 unsigned long flags; 347 struct ata_queued_cmd *qc; 348 enum scsi_eh_timer_return ret; 349 350 DPRINTK("ENTER\n"); 351 352 if (ap->ops->error_handler) { 353 ret = EH_NOT_HANDLED; 354 goto out; 355 } 356 357 ret = EH_HANDLED; 358 spin_lock_irqsave(ap->lock, flags); 359 qc = ata_qc_from_tag(ap, ap->link.active_tag); 360 if (qc) { 361 WARN_ON(qc->scsicmd != cmd); 362 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 363 qc->err_mask |= AC_ERR_TIMEOUT; 364 ret = EH_NOT_HANDLED; 365 } 366 spin_unlock_irqrestore(ap->lock, flags); 367 368 out: 369 DPRINTK("EXIT, ret=%d\n", ret); 370 return ret; 371 } 372 373 /** 374 * ata_scsi_error - SCSI layer error handler callback 375 * @host: SCSI host on which error occurred 376 * 377 * Handles SCSI-layer-thrown error events. 378 * 379 * LOCKING: 380 * Inherited from SCSI layer (none, can sleep) 381 * 382 * RETURNS: 383 * Zero. 384 */ 385 void ata_scsi_error(struct Scsi_Host *host) 386 { 387 struct ata_port *ap = ata_shost_to_port(host); 388 int i; 389 unsigned long flags; 390 391 DPRINTK("ENTER\n"); 392 393 /* synchronize with port task */ 394 ata_port_flush_task(ap); 395 396 /* synchronize with host lock and sort out timeouts */ 397 398 /* For new EH, all qcs are finished in one of three ways - 399 * normal completion, error completion, and SCSI timeout. 400 * Both cmpletions can race against SCSI timeout. When normal 401 * completion wins, the qc never reaches EH. When error 402 * completion wins, the qc has ATA_QCFLAG_FAILED set. 403 * 404 * When SCSI timeout wins, things are a bit more complex. 405 * Normal or error completion can occur after the timeout but 406 * before this point. In such cases, both types of 407 * completions are honored. A scmd is determined to have 408 * timed out iff its associated qc is active and not failed. 409 */ 410 if (ap->ops->error_handler) { 411 struct scsi_cmnd *scmd, *tmp; 412 int nr_timedout = 0; 413 414 spin_lock_irqsave(ap->lock, flags); 415 416 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 417 struct ata_queued_cmd *qc; 418 419 for (i = 0; i < ATA_MAX_QUEUE; i++) { 420 qc = __ata_qc_from_tag(ap, i); 421 if (qc->flags & ATA_QCFLAG_ACTIVE && 422 qc->scsicmd == scmd) 423 break; 424 } 425 426 if (i < ATA_MAX_QUEUE) { 427 /* the scmd has an associated qc */ 428 if (!(qc->flags & ATA_QCFLAG_FAILED)) { 429 /* which hasn't failed yet, timeout */ 430 qc->err_mask |= AC_ERR_TIMEOUT; 431 qc->flags |= ATA_QCFLAG_FAILED; 432 nr_timedout++; 433 } 434 } else { 435 /* Normal completion occurred after 436 * SCSI timeout but before this point. 437 * Successfully complete it. 438 */ 439 scmd->retries = scmd->allowed; 440 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 441 } 442 } 443 444 /* If we have timed out qcs. They belong to EH from 445 * this point but the state of the controller is 446 * unknown. Freeze the port to make sure the IRQ 447 * handler doesn't diddle with those qcs. This must 448 * be done atomically w.r.t. setting QCFLAG_FAILED. 449 */ 450 if (nr_timedout) 451 __ata_port_freeze(ap); 452 453 spin_unlock_irqrestore(ap->lock, flags); 454 455 /* initialize eh_tries */ 456 ap->eh_tries = ATA_EH_MAX_TRIES; 457 } else 458 spin_unlock_wait(ap->lock); 459 460 repeat: 461 /* invoke error handler */ 462 if (ap->ops->error_handler) { 463 struct ata_link *link; 464 465 /* kill fast drain timer */ 466 del_timer_sync(&ap->fastdrain_timer); 467 468 /* process port resume request */ 469 ata_eh_handle_port_resume(ap); 470 471 /* fetch & clear EH info */ 472 spin_lock_irqsave(ap->lock, flags); 473 474 __ata_port_for_each_link(link, ap) { 475 struct ata_eh_context *ehc = &link->eh_context; 476 struct ata_device *dev; 477 478 memset(&link->eh_context, 0, sizeof(link->eh_context)); 479 link->eh_context.i = link->eh_info; 480 memset(&link->eh_info, 0, sizeof(link->eh_info)); 481 482 ata_link_for_each_dev(dev, link) { 483 int devno = dev->devno; 484 485 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 486 if (ata_ncq_enabled(dev)) 487 ehc->saved_ncq_enabled |= 1 << devno; 488 } 489 } 490 491 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 492 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 493 ap->excl_link = NULL; /* don't maintain exclusion over EH */ 494 495 spin_unlock_irqrestore(ap->lock, flags); 496 497 /* invoke EH, skip if unloading or suspended */ 498 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 499 ap->ops->error_handler(ap); 500 else 501 ata_eh_finish(ap); 502 503 /* process port suspend request */ 504 ata_eh_handle_port_suspend(ap); 505 506 /* Exception might have happend after ->error_handler 507 * recovered the port but before this point. Repeat 508 * EH in such case. 509 */ 510 spin_lock_irqsave(ap->lock, flags); 511 512 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 513 if (--ap->eh_tries) { 514 spin_unlock_irqrestore(ap->lock, flags); 515 goto repeat; 516 } 517 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 518 "tries, giving up\n", ATA_EH_MAX_TRIES); 519 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 520 } 521 522 /* this run is complete, make sure EH info is clear */ 523 __ata_port_for_each_link(link, ap) 524 memset(&link->eh_info, 0, sizeof(link->eh_info)); 525 526 /* Clear host_eh_scheduled while holding ap->lock such 527 * that if exception occurs after this point but 528 * before EH completion, SCSI midlayer will 529 * re-initiate EH. 530 */ 531 host->host_eh_scheduled = 0; 532 533 spin_unlock_irqrestore(ap->lock, flags); 534 } else { 535 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 536 ap->ops->eng_timeout(ap); 537 } 538 539 /* finish or retry handled scmd's and clean up */ 540 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 541 542 scsi_eh_flush_done_q(&ap->eh_done_q); 543 544 /* clean up */ 545 spin_lock_irqsave(ap->lock, flags); 546 547 if (ap->pflags & ATA_PFLAG_LOADING) 548 ap->pflags &= ~ATA_PFLAG_LOADING; 549 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 550 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 551 552 if (ap->pflags & ATA_PFLAG_RECOVERED) 553 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 554 555 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 556 557 /* tell wait_eh that we're done */ 558 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 559 wake_up_all(&ap->eh_wait_q); 560 561 spin_unlock_irqrestore(ap->lock, flags); 562 563 DPRINTK("EXIT\n"); 564 } 565 566 /** 567 * ata_port_wait_eh - Wait for the currently pending EH to complete 568 * @ap: Port to wait EH for 569 * 570 * Wait until the currently pending EH is complete. 571 * 572 * LOCKING: 573 * Kernel thread context (may sleep). 574 */ 575 void ata_port_wait_eh(struct ata_port *ap) 576 { 577 unsigned long flags; 578 DEFINE_WAIT(wait); 579 580 retry: 581 spin_lock_irqsave(ap->lock, flags); 582 583 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 584 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 585 spin_unlock_irqrestore(ap->lock, flags); 586 schedule(); 587 spin_lock_irqsave(ap->lock, flags); 588 } 589 finish_wait(&ap->eh_wait_q, &wait); 590 591 spin_unlock_irqrestore(ap->lock, flags); 592 593 /* make sure SCSI EH is complete */ 594 if (scsi_host_in_recovery(ap->scsi_host)) { 595 msleep(10); 596 goto retry; 597 } 598 } 599 600 static int ata_eh_nr_in_flight(struct ata_port *ap) 601 { 602 unsigned int tag; 603 int nr = 0; 604 605 /* count only non-internal commands */ 606 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 607 if (ata_qc_from_tag(ap, tag)) 608 nr++; 609 610 return nr; 611 } 612 613 void ata_eh_fastdrain_timerfn(unsigned long arg) 614 { 615 struct ata_port *ap = (void *)arg; 616 unsigned long flags; 617 int cnt; 618 619 spin_lock_irqsave(ap->lock, flags); 620 621 cnt = ata_eh_nr_in_flight(ap); 622 623 /* are we done? */ 624 if (!cnt) 625 goto out_unlock; 626 627 if (cnt == ap->fastdrain_cnt) { 628 unsigned int tag; 629 630 /* No progress during the last interval, tag all 631 * in-flight qcs as timed out and freeze the port. 632 */ 633 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 634 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 635 if (qc) 636 qc->err_mask |= AC_ERR_TIMEOUT; 637 } 638 639 ata_port_freeze(ap); 640 } else { 641 /* some qcs have finished, give it another chance */ 642 ap->fastdrain_cnt = cnt; 643 ap->fastdrain_timer.expires = 644 jiffies + ATA_EH_FASTDRAIN_INTERVAL; 645 add_timer(&ap->fastdrain_timer); 646 } 647 648 out_unlock: 649 spin_unlock_irqrestore(ap->lock, flags); 650 } 651 652 /** 653 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 654 * @ap: target ATA port 655 * @fastdrain: activate fast drain 656 * 657 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 658 * is non-zero and EH wasn't pending before. Fast drain ensures 659 * that EH kicks in in timely manner. 660 * 661 * LOCKING: 662 * spin_lock_irqsave(host lock) 663 */ 664 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 665 { 666 int cnt; 667 668 /* already scheduled? */ 669 if (ap->pflags & ATA_PFLAG_EH_PENDING) 670 return; 671 672 ap->pflags |= ATA_PFLAG_EH_PENDING; 673 674 if (!fastdrain) 675 return; 676 677 /* do we have in-flight qcs? */ 678 cnt = ata_eh_nr_in_flight(ap); 679 if (!cnt) 680 return; 681 682 /* activate fast drain */ 683 ap->fastdrain_cnt = cnt; 684 ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; 685 add_timer(&ap->fastdrain_timer); 686 } 687 688 /** 689 * ata_qc_schedule_eh - schedule qc for error handling 690 * @qc: command to schedule error handling for 691 * 692 * Schedule error handling for @qc. EH will kick in as soon as 693 * other commands are drained. 694 * 695 * LOCKING: 696 * spin_lock_irqsave(host lock) 697 */ 698 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 699 { 700 struct ata_port *ap = qc->ap; 701 702 WARN_ON(!ap->ops->error_handler); 703 704 qc->flags |= ATA_QCFLAG_FAILED; 705 ata_eh_set_pending(ap, 1); 706 707 /* The following will fail if timeout has already expired. 708 * ata_scsi_error() takes care of such scmds on EH entry. 709 * Note that ATA_QCFLAG_FAILED is unconditionally set after 710 * this function completes. 711 */ 712 scsi_req_abort_cmd(qc->scsicmd); 713 } 714 715 /** 716 * ata_port_schedule_eh - schedule error handling without a qc 717 * @ap: ATA port to schedule EH for 718 * 719 * Schedule error handling for @ap. EH will kick in as soon as 720 * all commands are drained. 721 * 722 * LOCKING: 723 * spin_lock_irqsave(host lock) 724 */ 725 void ata_port_schedule_eh(struct ata_port *ap) 726 { 727 WARN_ON(!ap->ops->error_handler); 728 729 if (ap->pflags & ATA_PFLAG_INITIALIZING) 730 return; 731 732 ata_eh_set_pending(ap, 1); 733 scsi_schedule_eh(ap->scsi_host); 734 735 DPRINTK("port EH scheduled\n"); 736 } 737 738 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 739 { 740 int tag, nr_aborted = 0; 741 742 WARN_ON(!ap->ops->error_handler); 743 744 /* we're gonna abort all commands, no need for fast drain */ 745 ata_eh_set_pending(ap, 0); 746 747 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 748 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 749 750 if (qc && (!link || qc->dev->link == link)) { 751 qc->flags |= ATA_QCFLAG_FAILED; 752 ata_qc_complete(qc); 753 nr_aborted++; 754 } 755 } 756 757 if (!nr_aborted) 758 ata_port_schedule_eh(ap); 759 760 return nr_aborted; 761 } 762 763 /** 764 * ata_link_abort - abort all qc's on the link 765 * @link: ATA link to abort qc's for 766 * 767 * Abort all active qc's active on @link and schedule EH. 768 * 769 * LOCKING: 770 * spin_lock_irqsave(host lock) 771 * 772 * RETURNS: 773 * Number of aborted qc's. 774 */ 775 int ata_link_abort(struct ata_link *link) 776 { 777 return ata_do_link_abort(link->ap, link); 778 } 779 780 /** 781 * ata_port_abort - abort all qc's on the port 782 * @ap: ATA port to abort qc's for 783 * 784 * Abort all active qc's of @ap and schedule EH. 785 * 786 * LOCKING: 787 * spin_lock_irqsave(host_set lock) 788 * 789 * RETURNS: 790 * Number of aborted qc's. 791 */ 792 int ata_port_abort(struct ata_port *ap) 793 { 794 return ata_do_link_abort(ap, NULL); 795 } 796 797 /** 798 * __ata_port_freeze - freeze port 799 * @ap: ATA port to freeze 800 * 801 * This function is called when HSM violation or some other 802 * condition disrupts normal operation of the port. Frozen port 803 * is not allowed to perform any operation until the port is 804 * thawed, which usually follows a successful reset. 805 * 806 * ap->ops->freeze() callback can be used for freezing the port 807 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 808 * port cannot be frozen hardware-wise, the interrupt handler 809 * must ack and clear interrupts unconditionally while the port 810 * is frozen. 811 * 812 * LOCKING: 813 * spin_lock_irqsave(host lock) 814 */ 815 static void __ata_port_freeze(struct ata_port *ap) 816 { 817 WARN_ON(!ap->ops->error_handler); 818 819 if (ap->ops->freeze) 820 ap->ops->freeze(ap); 821 822 ap->pflags |= ATA_PFLAG_FROZEN; 823 824 DPRINTK("ata%u port frozen\n", ap->print_id); 825 } 826 827 /** 828 * ata_port_freeze - abort & freeze port 829 * @ap: ATA port to freeze 830 * 831 * Abort and freeze @ap. 832 * 833 * LOCKING: 834 * spin_lock_irqsave(host lock) 835 * 836 * RETURNS: 837 * Number of aborted commands. 838 */ 839 int ata_port_freeze(struct ata_port *ap) 840 { 841 int nr_aborted; 842 843 WARN_ON(!ap->ops->error_handler); 844 845 nr_aborted = ata_port_abort(ap); 846 __ata_port_freeze(ap); 847 848 return nr_aborted; 849 } 850 851 /** 852 * sata_async_notification - SATA async notification handler 853 * @ap: ATA port where async notification is received 854 * 855 * Handler to be called when async notification via SDB FIS is 856 * received. This function schedules EH if necessary. 857 * 858 * LOCKING: 859 * spin_lock_irqsave(host lock) 860 * 861 * RETURNS: 862 * 1 if EH is scheduled, 0 otherwise. 863 */ 864 int sata_async_notification(struct ata_port *ap) 865 { 866 u32 sntf; 867 int rc; 868 869 if (!(ap->flags & ATA_FLAG_AN)) 870 return 0; 871 872 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 873 if (rc == 0) 874 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 875 876 if (!sata_pmp_attached(ap) || rc) { 877 /* PMP is not attached or SNTF is not available */ 878 if (!sata_pmp_attached(ap)) { 879 /* PMP is not attached. Check whether ATAPI 880 * AN is configured. If so, notify media 881 * change. 882 */ 883 struct ata_device *dev = ap->link.device; 884 885 if ((dev->class == ATA_DEV_ATAPI) && 886 (dev->flags & ATA_DFLAG_AN)) 887 ata_scsi_media_change_notify(dev); 888 return 0; 889 } else { 890 /* PMP is attached but SNTF is not available. 891 * ATAPI async media change notification is 892 * not used. The PMP must be reporting PHY 893 * status change, schedule EH. 894 */ 895 ata_port_schedule_eh(ap); 896 return 1; 897 } 898 } else { 899 /* PMP is attached and SNTF is available */ 900 struct ata_link *link; 901 902 /* check and notify ATAPI AN */ 903 ata_port_for_each_link(link, ap) { 904 if (!(sntf & (1 << link->pmp))) 905 continue; 906 907 if ((link->device->class == ATA_DEV_ATAPI) && 908 (link->device->flags & ATA_DFLAG_AN)) 909 ata_scsi_media_change_notify(link->device); 910 } 911 912 /* If PMP is reporting that PHY status of some 913 * downstream ports has changed, schedule EH. 914 */ 915 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 916 ata_port_schedule_eh(ap); 917 return 1; 918 } 919 920 return 0; 921 } 922 } 923 924 /** 925 * ata_eh_freeze_port - EH helper to freeze port 926 * @ap: ATA port to freeze 927 * 928 * Freeze @ap. 929 * 930 * LOCKING: 931 * None. 932 */ 933 void ata_eh_freeze_port(struct ata_port *ap) 934 { 935 unsigned long flags; 936 937 if (!ap->ops->error_handler) 938 return; 939 940 spin_lock_irqsave(ap->lock, flags); 941 __ata_port_freeze(ap); 942 spin_unlock_irqrestore(ap->lock, flags); 943 } 944 945 /** 946 * ata_port_thaw_port - EH helper to thaw port 947 * @ap: ATA port to thaw 948 * 949 * Thaw frozen port @ap. 950 * 951 * LOCKING: 952 * None. 953 */ 954 void ata_eh_thaw_port(struct ata_port *ap) 955 { 956 unsigned long flags; 957 958 if (!ap->ops->error_handler) 959 return; 960 961 spin_lock_irqsave(ap->lock, flags); 962 963 ap->pflags &= ~ATA_PFLAG_FROZEN; 964 965 if (ap->ops->thaw) 966 ap->ops->thaw(ap); 967 968 spin_unlock_irqrestore(ap->lock, flags); 969 970 DPRINTK("ata%u port thawed\n", ap->print_id); 971 } 972 973 static void ata_eh_scsidone(struct scsi_cmnd *scmd) 974 { 975 /* nada */ 976 } 977 978 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 979 { 980 struct ata_port *ap = qc->ap; 981 struct scsi_cmnd *scmd = qc->scsicmd; 982 unsigned long flags; 983 984 spin_lock_irqsave(ap->lock, flags); 985 qc->scsidone = ata_eh_scsidone; 986 __ata_qc_complete(qc); 987 WARN_ON(ata_tag_valid(qc->tag)); 988 spin_unlock_irqrestore(ap->lock, flags); 989 990 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 991 } 992 993 /** 994 * ata_eh_qc_complete - Complete an active ATA command from EH 995 * @qc: Command to complete 996 * 997 * Indicate to the mid and upper layers that an ATA command has 998 * completed. To be used from EH. 999 */ 1000 void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1001 { 1002 struct scsi_cmnd *scmd = qc->scsicmd; 1003 scmd->retries = scmd->allowed; 1004 __ata_eh_qc_complete(qc); 1005 } 1006 1007 /** 1008 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1009 * @qc: Command to retry 1010 * 1011 * Indicate to the mid and upper layers that an ATA command 1012 * should be retried. To be used from EH. 1013 * 1014 * SCSI midlayer limits the number of retries to scmd->allowed. 1015 * scmd->retries is decremented for commands which get retried 1016 * due to unrelated failures (qc->err_mask is zero). 1017 */ 1018 void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1019 { 1020 struct scsi_cmnd *scmd = qc->scsicmd; 1021 if (!qc->err_mask && scmd->retries) 1022 scmd->retries--; 1023 __ata_eh_qc_complete(qc); 1024 } 1025 1026 /** 1027 * ata_eh_detach_dev - detach ATA device 1028 * @dev: ATA device to detach 1029 * 1030 * Detach @dev. 1031 * 1032 * LOCKING: 1033 * None. 1034 */ 1035 void ata_eh_detach_dev(struct ata_device *dev) 1036 { 1037 struct ata_link *link = dev->link; 1038 struct ata_port *ap = link->ap; 1039 unsigned long flags; 1040 1041 ata_dev_disable(dev); 1042 1043 spin_lock_irqsave(ap->lock, flags); 1044 1045 dev->flags &= ~ATA_DFLAG_DETACH; 1046 1047 if (ata_scsi_offline_dev(dev)) { 1048 dev->flags |= ATA_DFLAG_DETACHED; 1049 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1050 } 1051 1052 /* clear per-dev EH actions */ 1053 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1054 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1055 1056 spin_unlock_irqrestore(ap->lock, flags); 1057 } 1058 1059 /** 1060 * ata_eh_about_to_do - about to perform eh_action 1061 * @link: target ATA link 1062 * @dev: target ATA dev for per-dev action (can be NULL) 1063 * @action: action about to be performed 1064 * 1065 * Called just before performing EH actions to clear related bits 1066 * in @link->eh_info such that eh actions are not unnecessarily 1067 * repeated. 1068 * 1069 * LOCKING: 1070 * None. 1071 */ 1072 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1073 unsigned int action) 1074 { 1075 struct ata_port *ap = link->ap; 1076 struct ata_eh_info *ehi = &link->eh_info; 1077 struct ata_eh_context *ehc = &link->eh_context; 1078 unsigned long flags; 1079 1080 spin_lock_irqsave(ap->lock, flags); 1081 1082 ata_eh_clear_action(link, dev, ehi, action); 1083 1084 if (!(ehc->i.flags & ATA_EHI_QUIET)) 1085 ap->pflags |= ATA_PFLAG_RECOVERED; 1086 1087 spin_unlock_irqrestore(ap->lock, flags); 1088 } 1089 1090 /** 1091 * ata_eh_done - EH action complete 1092 * @ap: target ATA port 1093 * @dev: target ATA dev for per-dev action (can be NULL) 1094 * @action: action just completed 1095 * 1096 * Called right after performing EH actions to clear related bits 1097 * in @link->eh_context. 1098 * 1099 * LOCKING: 1100 * None. 1101 */ 1102 void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1103 unsigned int action) 1104 { 1105 struct ata_eh_context *ehc = &link->eh_context; 1106 1107 ata_eh_clear_action(link, dev, &ehc->i, action); 1108 } 1109 1110 /** 1111 * ata_err_string - convert err_mask to descriptive string 1112 * @err_mask: error mask to convert to string 1113 * 1114 * Convert @err_mask to descriptive string. Errors are 1115 * prioritized according to severity and only the most severe 1116 * error is reported. 1117 * 1118 * LOCKING: 1119 * None. 1120 * 1121 * RETURNS: 1122 * Descriptive string for @err_mask 1123 */ 1124 static const char *ata_err_string(unsigned int err_mask) 1125 { 1126 if (err_mask & AC_ERR_HOST_BUS) 1127 return "host bus error"; 1128 if (err_mask & AC_ERR_ATA_BUS) 1129 return "ATA bus error"; 1130 if (err_mask & AC_ERR_TIMEOUT) 1131 return "timeout"; 1132 if (err_mask & AC_ERR_HSM) 1133 return "HSM violation"; 1134 if (err_mask & AC_ERR_SYSTEM) 1135 return "internal error"; 1136 if (err_mask & AC_ERR_MEDIA) 1137 return "media error"; 1138 if (err_mask & AC_ERR_INVALID) 1139 return "invalid argument"; 1140 if (err_mask & AC_ERR_DEV) 1141 return "device error"; 1142 return "unknown error"; 1143 } 1144 1145 /** 1146 * ata_read_log_page - read a specific log page 1147 * @dev: target device 1148 * @page: page to read 1149 * @buf: buffer to store read page 1150 * @sectors: number of sectors to read 1151 * 1152 * Read log page using READ_LOG_EXT command. 1153 * 1154 * LOCKING: 1155 * Kernel thread context (may sleep). 1156 * 1157 * RETURNS: 1158 * 0 on success, AC_ERR_* mask otherwise. 1159 */ 1160 static unsigned int ata_read_log_page(struct ata_device *dev, 1161 u8 page, void *buf, unsigned int sectors) 1162 { 1163 struct ata_taskfile tf; 1164 unsigned int err_mask; 1165 1166 DPRINTK("read log page - page %d\n", page); 1167 1168 ata_tf_init(dev, &tf); 1169 tf.command = ATA_CMD_READ_LOG_EXT; 1170 tf.lbal = page; 1171 tf.nsect = sectors; 1172 tf.hob_nsect = sectors >> 8; 1173 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1174 tf.protocol = ATA_PROT_PIO; 1175 1176 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1177 buf, sectors * ATA_SECT_SIZE, 0); 1178 1179 DPRINTK("EXIT, err_mask=%x\n", err_mask); 1180 return err_mask; 1181 } 1182 1183 /** 1184 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1185 * @dev: Device to read log page 10h from 1186 * @tag: Resulting tag of the failed command 1187 * @tf: Resulting taskfile registers of the failed command 1188 * 1189 * Read log page 10h to obtain NCQ error details and clear error 1190 * condition. 1191 * 1192 * LOCKING: 1193 * Kernel thread context (may sleep). 1194 * 1195 * RETURNS: 1196 * 0 on success, -errno otherwise. 1197 */ 1198 static int ata_eh_read_log_10h(struct ata_device *dev, 1199 int *tag, struct ata_taskfile *tf) 1200 { 1201 u8 *buf = dev->link->ap->sector_buf; 1202 unsigned int err_mask; 1203 u8 csum; 1204 int i; 1205 1206 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1207 if (err_mask) 1208 return -EIO; 1209 1210 csum = 0; 1211 for (i = 0; i < ATA_SECT_SIZE; i++) 1212 csum += buf[i]; 1213 if (csum) 1214 ata_dev_printk(dev, KERN_WARNING, 1215 "invalid checksum 0x%x on log page 10h\n", csum); 1216 1217 if (buf[0] & 0x80) 1218 return -ENOENT; 1219 1220 *tag = buf[0] & 0x1f; 1221 1222 tf->command = buf[2]; 1223 tf->feature = buf[3]; 1224 tf->lbal = buf[4]; 1225 tf->lbam = buf[5]; 1226 tf->lbah = buf[6]; 1227 tf->device = buf[7]; 1228 tf->hob_lbal = buf[8]; 1229 tf->hob_lbam = buf[9]; 1230 tf->hob_lbah = buf[10]; 1231 tf->nsect = buf[12]; 1232 tf->hob_nsect = buf[13]; 1233 1234 return 0; 1235 } 1236 1237 /** 1238 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1239 * @dev: device to perform REQUEST_SENSE to 1240 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1241 * 1242 * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1243 * SENSE. This function is EH helper. 1244 * 1245 * LOCKING: 1246 * Kernel thread context (may sleep). 1247 * 1248 * RETURNS: 1249 * 0 on success, AC_ERR_* mask on failure 1250 */ 1251 static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) 1252 { 1253 struct ata_device *dev = qc->dev; 1254 unsigned char *sense_buf = qc->scsicmd->sense_buffer; 1255 struct ata_port *ap = dev->link->ap; 1256 struct ata_taskfile tf; 1257 u8 cdb[ATAPI_CDB_LEN]; 1258 1259 DPRINTK("ATAPI request sense\n"); 1260 1261 /* FIXME: is this needed? */ 1262 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1263 1264 /* initialize sense_buf with the error register, 1265 * for the case where they are -not- overwritten 1266 */ 1267 sense_buf[0] = 0x70; 1268 sense_buf[2] = qc->result_tf.feature >> 4; 1269 1270 /* some devices time out if garbage left in tf */ 1271 ata_tf_init(dev, &tf); 1272 1273 memset(cdb, 0, ATAPI_CDB_LEN); 1274 cdb[0] = REQUEST_SENSE; 1275 cdb[4] = SCSI_SENSE_BUFFERSIZE; 1276 1277 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1278 tf.command = ATA_CMD_PACKET; 1279 1280 /* is it pointless to prefer PIO for "safety reasons"? */ 1281 if (ap->flags & ATA_FLAG_PIO_DMA) { 1282 tf.protocol = ATAPI_PROT_DMA; 1283 tf.feature |= ATAPI_PKT_DMA; 1284 } else { 1285 tf.protocol = ATAPI_PROT_PIO; 1286 tf.lbam = SCSI_SENSE_BUFFERSIZE; 1287 tf.lbah = 0; 1288 } 1289 1290 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1291 sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1292 } 1293 1294 /** 1295 * ata_eh_analyze_serror - analyze SError for a failed port 1296 * @link: ATA link to analyze SError for 1297 * 1298 * Analyze SError if available and further determine cause of 1299 * failure. 1300 * 1301 * LOCKING: 1302 * None. 1303 */ 1304 static void ata_eh_analyze_serror(struct ata_link *link) 1305 { 1306 struct ata_eh_context *ehc = &link->eh_context; 1307 u32 serror = ehc->i.serror; 1308 unsigned int err_mask = 0, action = 0; 1309 u32 hotplug_mask; 1310 1311 if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1312 err_mask |= AC_ERR_ATA_BUS; 1313 action |= ATA_EH_RESET; 1314 } 1315 if (serror & SERR_PROTOCOL) { 1316 err_mask |= AC_ERR_HSM; 1317 action |= ATA_EH_RESET; 1318 } 1319 if (serror & SERR_INTERNAL) { 1320 err_mask |= AC_ERR_SYSTEM; 1321 action |= ATA_EH_RESET; 1322 } 1323 1324 /* Determine whether a hotplug event has occurred. Both 1325 * SError.N/X are considered hotplug events for enabled or 1326 * host links. For disabled PMP links, only N bit is 1327 * considered as X bit is left at 1 for link plugging. 1328 */ 1329 hotplug_mask = 0; 1330 1331 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1332 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1333 else 1334 hotplug_mask = SERR_PHYRDY_CHG; 1335 1336 if (serror & hotplug_mask) 1337 ata_ehi_hotplugged(&ehc->i); 1338 1339 ehc->i.err_mask |= err_mask; 1340 ehc->i.action |= action; 1341 } 1342 1343 /** 1344 * ata_eh_analyze_ncq_error - analyze NCQ error 1345 * @link: ATA link to analyze NCQ error for 1346 * 1347 * Read log page 10h, determine the offending qc and acquire 1348 * error status TF. For NCQ device errors, all LLDDs have to do 1349 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1350 * care of the rest. 1351 * 1352 * LOCKING: 1353 * Kernel thread context (may sleep). 1354 */ 1355 void ata_eh_analyze_ncq_error(struct ata_link *link) 1356 { 1357 struct ata_port *ap = link->ap; 1358 struct ata_eh_context *ehc = &link->eh_context; 1359 struct ata_device *dev = link->device; 1360 struct ata_queued_cmd *qc; 1361 struct ata_taskfile tf; 1362 int tag, rc; 1363 1364 /* if frozen, we can't do much */ 1365 if (ap->pflags & ATA_PFLAG_FROZEN) 1366 return; 1367 1368 /* is it NCQ device error? */ 1369 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1370 return; 1371 1372 /* has LLDD analyzed already? */ 1373 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1374 qc = __ata_qc_from_tag(ap, tag); 1375 1376 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1377 continue; 1378 1379 if (qc->err_mask) 1380 return; 1381 } 1382 1383 /* okay, this error is ours */ 1384 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1385 if (rc) { 1386 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1387 "(errno=%d)\n", rc); 1388 return; 1389 } 1390 1391 if (!(link->sactive & (1 << tag))) { 1392 ata_link_printk(link, KERN_ERR, "log page 10h reported " 1393 "inactive tag %d\n", tag); 1394 return; 1395 } 1396 1397 /* we've got the perpetrator, condemn it */ 1398 qc = __ata_qc_from_tag(ap, tag); 1399 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1400 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1401 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1402 ehc->i.err_mask &= ~AC_ERR_DEV; 1403 } 1404 1405 /** 1406 * ata_eh_analyze_tf - analyze taskfile of a failed qc 1407 * @qc: qc to analyze 1408 * @tf: Taskfile registers to analyze 1409 * 1410 * Analyze taskfile of @qc and further determine cause of 1411 * failure. This function also requests ATAPI sense data if 1412 * avaliable. 1413 * 1414 * LOCKING: 1415 * Kernel thread context (may sleep). 1416 * 1417 * RETURNS: 1418 * Determined recovery action 1419 */ 1420 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1421 const struct ata_taskfile *tf) 1422 { 1423 unsigned int tmp, action = 0; 1424 u8 stat = tf->command, err = tf->feature; 1425 1426 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1427 qc->err_mask |= AC_ERR_HSM; 1428 return ATA_EH_RESET; 1429 } 1430 1431 if (stat & (ATA_ERR | ATA_DF)) 1432 qc->err_mask |= AC_ERR_DEV; 1433 else 1434 return 0; 1435 1436 switch (qc->dev->class) { 1437 case ATA_DEV_ATA: 1438 if (err & ATA_ICRC) 1439 qc->err_mask |= AC_ERR_ATA_BUS; 1440 if (err & ATA_UNC) 1441 qc->err_mask |= AC_ERR_MEDIA; 1442 if (err & ATA_IDNF) 1443 qc->err_mask |= AC_ERR_INVALID; 1444 break; 1445 1446 case ATA_DEV_ATAPI: 1447 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1448 tmp = atapi_eh_request_sense(qc); 1449 if (!tmp) { 1450 /* ATA_QCFLAG_SENSE_VALID is used to 1451 * tell atapi_qc_complete() that sense 1452 * data is already valid. 1453 * 1454 * TODO: interpret sense data and set 1455 * appropriate err_mask. 1456 */ 1457 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1458 } else 1459 qc->err_mask |= tmp; 1460 } 1461 } 1462 1463 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1464 action |= ATA_EH_RESET; 1465 1466 return action; 1467 } 1468 1469 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 1470 int *xfer_ok) 1471 { 1472 int base = 0; 1473 1474 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 1475 *xfer_ok = 1; 1476 1477 if (!*xfer_ok) 1478 base = ATA_ECAT_DUBIOUS_NONE; 1479 1480 if (err_mask & AC_ERR_ATA_BUS) 1481 return base + ATA_ECAT_ATA_BUS; 1482 1483 if (err_mask & AC_ERR_TIMEOUT) 1484 return base + ATA_ECAT_TOUT_HSM; 1485 1486 if (eflags & ATA_EFLAG_IS_IO) { 1487 if (err_mask & AC_ERR_HSM) 1488 return base + ATA_ECAT_TOUT_HSM; 1489 if ((err_mask & 1490 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1491 return base + ATA_ECAT_UNK_DEV; 1492 } 1493 1494 return 0; 1495 } 1496 1497 struct speed_down_verdict_arg { 1498 u64 since; 1499 int xfer_ok; 1500 int nr_errors[ATA_ECAT_NR]; 1501 }; 1502 1503 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1504 { 1505 struct speed_down_verdict_arg *arg = void_arg; 1506 int cat; 1507 1508 if (ent->timestamp < arg->since) 1509 return -1; 1510 1511 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 1512 &arg->xfer_ok); 1513 arg->nr_errors[cat]++; 1514 1515 return 0; 1516 } 1517 1518 /** 1519 * ata_eh_speed_down_verdict - Determine speed down verdict 1520 * @dev: Device of interest 1521 * 1522 * This function examines error ring of @dev and determines 1523 * whether NCQ needs to be turned off, transfer speed should be 1524 * stepped down, or falling back to PIO is necessary. 1525 * 1526 * ECAT_ATA_BUS : ATA_BUS error for any command 1527 * 1528 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 1529 * IO commands 1530 * 1531 * ECAT_UNK_DEV : Unknown DEV error for IO commands 1532 * 1533 * ECAT_DUBIOUS_* : Identical to above three but occurred while 1534 * data transfer hasn't been verified. 1535 * 1536 * Verdicts are 1537 * 1538 * NCQ_OFF : Turn off NCQ. 1539 * 1540 * SPEED_DOWN : Speed down transfer speed but don't fall back 1541 * to PIO. 1542 * 1543 * FALLBACK_TO_PIO : Fall back to PIO. 1544 * 1545 * Even if multiple verdicts are returned, only one action is 1546 * taken per error. An action triggered by non-DUBIOUS errors 1547 * clears ering, while one triggered by DUBIOUS_* errors doesn't. 1548 * This is to expedite speed down decisions right after device is 1549 * initially configured. 1550 * 1551 * The followings are speed down rules. #1 and #2 deal with 1552 * DUBIOUS errors. 1553 * 1554 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 1555 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 1556 * 1557 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 1558 * occurred during last 5 mins, NCQ_OFF. 1559 * 1560 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 1561 * ocurred during last 5 mins, FALLBACK_TO_PIO 1562 * 1563 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 1564 * during last 10 mins, NCQ_OFF. 1565 * 1566 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 1567 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 1568 * 1569 * LOCKING: 1570 * Inherited from caller. 1571 * 1572 * RETURNS: 1573 * OR of ATA_EH_SPDN_* flags. 1574 */ 1575 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1576 { 1577 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 1578 u64 j64 = get_jiffies_64(); 1579 struct speed_down_verdict_arg arg; 1580 unsigned int verdict = 0; 1581 1582 /* scan past 5 mins of error history */ 1583 memset(&arg, 0, sizeof(arg)); 1584 arg.since = j64 - min(j64, j5mins); 1585 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1586 1587 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 1588 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 1589 verdict |= ATA_EH_SPDN_SPEED_DOWN | 1590 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 1591 1592 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 1593 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 1594 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 1595 1596 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1597 arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1598 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1599 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1600 1601 /* scan past 10 mins of error history */ 1602 memset(&arg, 0, sizeof(arg)); 1603 arg.since = j64 - min(j64, j10mins); 1604 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1605 1606 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1607 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 1608 verdict |= ATA_EH_SPDN_NCQ_OFF; 1609 1610 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1611 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1612 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1613 verdict |= ATA_EH_SPDN_SPEED_DOWN; 1614 1615 return verdict; 1616 } 1617 1618 /** 1619 * ata_eh_speed_down - record error and speed down if necessary 1620 * @dev: Failed device 1621 * @eflags: mask of ATA_EFLAG_* flags 1622 * @err_mask: err_mask of the error 1623 * 1624 * Record error and examine error history to determine whether 1625 * adjusting transmission speed is necessary. It also sets 1626 * transmission limits appropriately if such adjustment is 1627 * necessary. 1628 * 1629 * LOCKING: 1630 * Kernel thread context (may sleep). 1631 * 1632 * RETURNS: 1633 * Determined recovery action. 1634 */ 1635 static unsigned int ata_eh_speed_down(struct ata_device *dev, 1636 unsigned int eflags, unsigned int err_mask) 1637 { 1638 struct ata_link *link = dev->link; 1639 int xfer_ok = 0; 1640 unsigned int verdict; 1641 unsigned int action = 0; 1642 1643 /* don't bother if Cat-0 error */ 1644 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1645 return 0; 1646 1647 /* record error and determine whether speed down is necessary */ 1648 ata_ering_record(&dev->ering, eflags, err_mask); 1649 verdict = ata_eh_speed_down_verdict(dev); 1650 1651 /* turn off NCQ? */ 1652 if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 1653 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 1654 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 1655 dev->flags |= ATA_DFLAG_NCQ_OFF; 1656 ata_dev_printk(dev, KERN_WARNING, 1657 "NCQ disabled due to excessive errors\n"); 1658 goto done; 1659 } 1660 1661 /* speed down? */ 1662 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1663 /* speed down SATA link speed if possible */ 1664 if (sata_down_spd_limit(link) == 0) { 1665 action |= ATA_EH_RESET; 1666 goto done; 1667 } 1668 1669 /* lower transfer mode */ 1670 if (dev->spdn_cnt < 2) { 1671 static const int dma_dnxfer_sel[] = 1672 { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 1673 static const int pio_dnxfer_sel[] = 1674 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 1675 int sel; 1676 1677 if (dev->xfer_shift != ATA_SHIFT_PIO) 1678 sel = dma_dnxfer_sel[dev->spdn_cnt]; 1679 else 1680 sel = pio_dnxfer_sel[dev->spdn_cnt]; 1681 1682 dev->spdn_cnt++; 1683 1684 if (ata_down_xfermask_limit(dev, sel) == 0) { 1685 action |= ATA_EH_RESET; 1686 goto done; 1687 } 1688 } 1689 } 1690 1691 /* Fall back to PIO? Slowing down to PIO is meaningless for 1692 * SATA ATA devices. Consider it only for PATA and SATAPI. 1693 */ 1694 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1695 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 1696 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1697 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1698 dev->spdn_cnt = 0; 1699 action |= ATA_EH_RESET; 1700 goto done; 1701 } 1702 } 1703 1704 return 0; 1705 done: 1706 /* device has been slowed down, blow error history */ 1707 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 1708 ata_ering_clear(&dev->ering); 1709 return action; 1710 } 1711 1712 /** 1713 * ata_eh_link_autopsy - analyze error and determine recovery action 1714 * @link: host link to perform autopsy on 1715 * 1716 * Analyze why @link failed and determine which recovery actions 1717 * are needed. This function also sets more detailed AC_ERR_* 1718 * values and fills sense data for ATAPI CHECK SENSE. 1719 * 1720 * LOCKING: 1721 * Kernel thread context (may sleep). 1722 */ 1723 static void ata_eh_link_autopsy(struct ata_link *link) 1724 { 1725 struct ata_port *ap = link->ap; 1726 struct ata_eh_context *ehc = &link->eh_context; 1727 struct ata_device *dev; 1728 unsigned int all_err_mask = 0, eflags = 0; 1729 int tag; 1730 u32 serror; 1731 int rc; 1732 1733 DPRINTK("ENTER\n"); 1734 1735 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1736 return; 1737 1738 /* obtain and analyze SError */ 1739 rc = sata_scr_read(link, SCR_ERROR, &serror); 1740 if (rc == 0) { 1741 ehc->i.serror |= serror; 1742 ata_eh_analyze_serror(link); 1743 } else if (rc != -EOPNOTSUPP) { 1744 /* SError read failed, force reset and probing */ 1745 ehc->i.probe_mask |= ATA_ALL_DEVICES; 1746 ehc->i.action |= ATA_EH_RESET; 1747 ehc->i.err_mask |= AC_ERR_OTHER; 1748 } 1749 1750 /* analyze NCQ failure */ 1751 ata_eh_analyze_ncq_error(link); 1752 1753 /* any real error trumps AC_ERR_OTHER */ 1754 if (ehc->i.err_mask & ~AC_ERR_OTHER) 1755 ehc->i.err_mask &= ~AC_ERR_OTHER; 1756 1757 all_err_mask |= ehc->i.err_mask; 1758 1759 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1760 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1761 1762 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) 1763 continue; 1764 1765 /* inherit upper level err_mask */ 1766 qc->err_mask |= ehc->i.err_mask; 1767 1768 /* analyze TF */ 1769 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1770 1771 /* DEV errors are probably spurious in case of ATA_BUS error */ 1772 if (qc->err_mask & AC_ERR_ATA_BUS) 1773 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1774 AC_ERR_INVALID); 1775 1776 /* any real error trumps unknown error */ 1777 if (qc->err_mask & ~AC_ERR_OTHER) 1778 qc->err_mask &= ~AC_ERR_OTHER; 1779 1780 /* SENSE_VALID trumps dev/unknown error and revalidation */ 1781 if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1782 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1783 1784 /* determine whether the command is worth retrying */ 1785 if (!(qc->err_mask & AC_ERR_INVALID) && 1786 ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 1787 qc->flags |= ATA_QCFLAG_RETRY; 1788 1789 /* accumulate error info */ 1790 ehc->i.dev = qc->dev; 1791 all_err_mask |= qc->err_mask; 1792 if (qc->flags & ATA_QCFLAG_IO) 1793 eflags |= ATA_EFLAG_IS_IO; 1794 } 1795 1796 /* enforce default EH actions */ 1797 if (ap->pflags & ATA_PFLAG_FROZEN || 1798 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1799 ehc->i.action |= ATA_EH_RESET; 1800 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 1801 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 1802 ehc->i.action |= ATA_EH_REVALIDATE; 1803 1804 /* If we have offending qcs and the associated failed device, 1805 * perform per-dev EH action only on the offending device. 1806 */ 1807 if (ehc->i.dev) { 1808 ehc->i.dev_action[ehc->i.dev->devno] |= 1809 ehc->i.action & ATA_EH_PERDEV_MASK; 1810 ehc->i.action &= ~ATA_EH_PERDEV_MASK; 1811 } 1812 1813 /* propagate timeout to host link */ 1814 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 1815 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 1816 1817 /* record error and consider speeding down */ 1818 dev = ehc->i.dev; 1819 if (!dev && ((ata_link_max_devices(link) == 1 && 1820 ata_dev_enabled(link->device)))) 1821 dev = link->device; 1822 1823 if (dev) { 1824 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 1825 eflags |= ATA_EFLAG_DUBIOUS_XFER; 1826 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 1827 } 1828 1829 DPRINTK("EXIT\n"); 1830 } 1831 1832 /** 1833 * ata_eh_autopsy - analyze error and determine recovery action 1834 * @ap: host port to perform autopsy on 1835 * 1836 * Analyze all links of @ap and determine why they failed and 1837 * which recovery actions are needed. 1838 * 1839 * LOCKING: 1840 * Kernel thread context (may sleep). 1841 */ 1842 void ata_eh_autopsy(struct ata_port *ap) 1843 { 1844 struct ata_link *link; 1845 1846 ata_port_for_each_link(link, ap) 1847 ata_eh_link_autopsy(link); 1848 1849 /* Autopsy of fanout ports can affect host link autopsy. 1850 * Perform host link autopsy last. 1851 */ 1852 if (sata_pmp_attached(ap)) 1853 ata_eh_link_autopsy(&ap->link); 1854 } 1855 1856 /** 1857 * ata_eh_link_report - report error handling to user 1858 * @link: ATA link EH is going on 1859 * 1860 * Report EH to user. 1861 * 1862 * LOCKING: 1863 * None. 1864 */ 1865 static void ata_eh_link_report(struct ata_link *link) 1866 { 1867 struct ata_port *ap = link->ap; 1868 struct ata_eh_context *ehc = &link->eh_context; 1869 const char *frozen, *desc; 1870 char tries_buf[6]; 1871 int tag, nr_failed = 0; 1872 1873 if (ehc->i.flags & ATA_EHI_QUIET) 1874 return; 1875 1876 desc = NULL; 1877 if (ehc->i.desc[0] != '\0') 1878 desc = ehc->i.desc; 1879 1880 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1881 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1882 1883 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || 1884 ((qc->flags & ATA_QCFLAG_QUIET) && 1885 qc->err_mask == AC_ERR_DEV)) 1886 continue; 1887 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 1888 continue; 1889 1890 nr_failed++; 1891 } 1892 1893 if (!nr_failed && !ehc->i.err_mask) 1894 return; 1895 1896 frozen = ""; 1897 if (ap->pflags & ATA_PFLAG_FROZEN) 1898 frozen = " frozen"; 1899 1900 memset(tries_buf, 0, sizeof(tries_buf)); 1901 if (ap->eh_tries < ATA_EH_MAX_TRIES) 1902 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 1903 ap->eh_tries); 1904 1905 if (ehc->i.dev) { 1906 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 1907 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 1908 ehc->i.err_mask, link->sactive, ehc->i.serror, 1909 ehc->i.action, frozen, tries_buf); 1910 if (desc) 1911 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 1912 } else { 1913 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 1914 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 1915 ehc->i.err_mask, link->sactive, ehc->i.serror, 1916 ehc->i.action, frozen, tries_buf); 1917 if (desc) 1918 ata_link_printk(link, KERN_ERR, "%s\n", desc); 1919 } 1920 1921 if (ehc->i.serror) 1922 ata_port_printk(ap, KERN_ERR, 1923 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 1924 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 1925 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 1926 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 1927 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 1928 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 1929 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 1930 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 1931 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 1932 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 1933 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 1934 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 1935 ehc->i.serror & SERR_CRC ? "BadCRC " : "", 1936 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 1937 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 1938 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 1939 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 1940 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 1941 1942 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1943 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1944 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1945 const u8 *cdb = qc->cdb; 1946 char data_buf[20] = ""; 1947 char cdb_buf[70] = ""; 1948 1949 if (!(qc->flags & ATA_QCFLAG_FAILED) || 1950 qc->dev->link != link || !qc->err_mask) 1951 continue; 1952 1953 if (qc->dma_dir != DMA_NONE) { 1954 static const char *dma_str[] = { 1955 [DMA_BIDIRECTIONAL] = "bidi", 1956 [DMA_TO_DEVICE] = "out", 1957 [DMA_FROM_DEVICE] = "in", 1958 }; 1959 static const char *prot_str[] = { 1960 [ATA_PROT_PIO] = "pio", 1961 [ATA_PROT_DMA] = "dma", 1962 [ATA_PROT_NCQ] = "ncq", 1963 [ATAPI_PROT_PIO] = "pio", 1964 [ATAPI_PROT_DMA] = "dma", 1965 }; 1966 1967 snprintf(data_buf, sizeof(data_buf), " %s %u %s", 1968 prot_str[qc->tf.protocol], qc->nbytes, 1969 dma_str[qc->dma_dir]); 1970 } 1971 1972 if (ata_is_atapi(qc->tf.protocol)) 1973 snprintf(cdb_buf, sizeof(cdb_buf), 1974 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 1975 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 1976 cdb[0], cdb[1], cdb[2], cdb[3], 1977 cdb[4], cdb[5], cdb[6], cdb[7], 1978 cdb[8], cdb[9], cdb[10], cdb[11], 1979 cdb[12], cdb[13], cdb[14], cdb[15]); 1980 1981 ata_dev_printk(qc->dev, KERN_ERR, 1982 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1983 "tag %d%s\n %s" 1984 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1985 "Emask 0x%x (%s)%s\n", 1986 cmd->command, cmd->feature, cmd->nsect, 1987 cmd->lbal, cmd->lbam, cmd->lbah, 1988 cmd->hob_feature, cmd->hob_nsect, 1989 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 1990 cmd->device, qc->tag, data_buf, cdb_buf, 1991 res->command, res->feature, res->nsect, 1992 res->lbal, res->lbam, res->lbah, 1993 res->hob_feature, res->hob_nsect, 1994 res->hob_lbal, res->hob_lbam, res->hob_lbah, 1995 res->device, qc->err_mask, ata_err_string(qc->err_mask), 1996 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 1997 1998 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 1999 ATA_ERR)) { 2000 if (res->command & ATA_BUSY) 2001 ata_dev_printk(qc->dev, KERN_ERR, 2002 "status: { Busy }\n"); 2003 else 2004 ata_dev_printk(qc->dev, KERN_ERR, 2005 "status: { %s%s%s%s}\n", 2006 res->command & ATA_DRDY ? "DRDY " : "", 2007 res->command & ATA_DF ? "DF " : "", 2008 res->command & ATA_DRQ ? "DRQ " : "", 2009 res->command & ATA_ERR ? "ERR " : ""); 2010 } 2011 2012 if (cmd->command != ATA_CMD_PACKET && 2013 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2014 ATA_ABORTED))) 2015 ata_dev_printk(qc->dev, KERN_ERR, 2016 "error: { %s%s%s%s}\n", 2017 res->feature & ATA_ICRC ? "ICRC " : "", 2018 res->feature & ATA_UNC ? "UNC " : "", 2019 res->feature & ATA_IDNF ? "IDNF " : "", 2020 res->feature & ATA_ABORTED ? "ABRT " : ""); 2021 } 2022 } 2023 2024 /** 2025 * ata_eh_report - report error handling to user 2026 * @ap: ATA port to report EH about 2027 * 2028 * Report EH to user. 2029 * 2030 * LOCKING: 2031 * None. 2032 */ 2033 void ata_eh_report(struct ata_port *ap) 2034 { 2035 struct ata_link *link; 2036 2037 __ata_port_for_each_link(link, ap) 2038 ata_eh_link_report(link); 2039 } 2040 2041 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2042 unsigned int *classes, unsigned long deadline) 2043 { 2044 struct ata_device *dev; 2045 2046 ata_link_for_each_dev(dev, link) 2047 classes[dev->devno] = ATA_DEV_UNKNOWN; 2048 2049 return reset(link, classes, deadline); 2050 } 2051 2052 static int ata_eh_followup_srst_needed(struct ata_link *link, 2053 int rc, int classify, 2054 const unsigned int *classes) 2055 { 2056 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2057 return 0; 2058 if (rc == -EAGAIN) { 2059 if (classify) 2060 return 1; 2061 rc = 0; 2062 } 2063 if (rc != 0) 2064 return 0; 2065 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 2066 return 1; 2067 return 0; 2068 } 2069 2070 int ata_eh_reset(struct ata_link *link, int classify, 2071 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2072 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2073 { 2074 const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts); 2075 struct ata_port *ap = link->ap; 2076 struct ata_eh_context *ehc = &link->eh_context; 2077 unsigned int *classes = ehc->classes; 2078 unsigned int lflags = link->flags; 2079 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2080 int try = 0; 2081 struct ata_device *dev; 2082 unsigned long deadline, now; 2083 ata_reset_fn_t reset; 2084 unsigned long flags; 2085 u32 sstatus; 2086 int nr_known, rc; 2087 2088 /* 2089 * Prepare to reset 2090 */ 2091 spin_lock_irqsave(ap->lock, flags); 2092 ap->pflags |= ATA_PFLAG_RESETTING; 2093 spin_unlock_irqrestore(ap->lock, flags); 2094 2095 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2096 2097 ata_link_for_each_dev(dev, link) { 2098 /* If we issue an SRST then an ATA drive (not ATAPI) 2099 * may change configuration and be in PIO0 timing. If 2100 * we do a hard reset (or are coming from power on) 2101 * this is true for ATA or ATAPI. Until we've set a 2102 * suitable controller mode we should not touch the 2103 * bus as we may be talking too fast. 2104 */ 2105 dev->pio_mode = XFER_PIO_0; 2106 2107 /* If the controller has a pio mode setup function 2108 * then use it to set the chipset to rights. Don't 2109 * touch the DMA setup as that will be dealt with when 2110 * configuring devices. 2111 */ 2112 if (ap->ops->set_piomode) 2113 ap->ops->set_piomode(ap, dev); 2114 } 2115 2116 /* prefer hardreset */ 2117 reset = NULL; 2118 ehc->i.action &= ~ATA_EH_RESET; 2119 if (hardreset) { 2120 reset = hardreset; 2121 ehc->i.action = ATA_EH_HARDRESET; 2122 } else if (softreset) { 2123 reset = softreset; 2124 ehc->i.action = ATA_EH_SOFTRESET; 2125 } 2126 2127 if (prereset) { 2128 rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); 2129 if (rc) { 2130 if (rc == -ENOENT) { 2131 ata_link_printk(link, KERN_DEBUG, 2132 "port disabled. ignoring.\n"); 2133 ehc->i.action &= ~ATA_EH_RESET; 2134 2135 ata_link_for_each_dev(dev, link) 2136 classes[dev->devno] = ATA_DEV_NONE; 2137 2138 rc = 0; 2139 } else 2140 ata_link_printk(link, KERN_ERR, 2141 "prereset failed (errno=%d)\n", rc); 2142 goto out; 2143 } 2144 2145 /* prereset() might have cleared ATA_EH_RESET. If so, 2146 * bang classes and return. 2147 */ 2148 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2149 ata_link_for_each_dev(dev, link) 2150 classes[dev->devno] = ATA_DEV_NONE; 2151 rc = 0; 2152 goto out; 2153 } 2154 } 2155 2156 retry: 2157 /* 2158 * Perform reset 2159 */ 2160 if (ata_is_host_link(link)) 2161 ata_eh_freeze_port(ap); 2162 2163 deadline = jiffies + ata_eh_reset_timeouts[try++]; 2164 2165 if (reset) { 2166 if (verbose) 2167 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2168 reset == softreset ? "soft" : "hard"); 2169 2170 /* mark that this EH session started with reset */ 2171 if (reset == hardreset) 2172 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2173 else 2174 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2175 2176 rc = ata_do_reset(link, reset, classes, deadline); 2177 2178 if (reset == hardreset && 2179 ata_eh_followup_srst_needed(link, rc, classify, classes)) { 2180 /* okay, let's do follow-up softreset */ 2181 reset = softreset; 2182 2183 if (!reset) { 2184 ata_link_printk(link, KERN_ERR, 2185 "follow-up softreset required " 2186 "but no softreset avaliable\n"); 2187 rc = -EINVAL; 2188 goto fail; 2189 } 2190 2191 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2192 rc = ata_do_reset(link, reset, classes, deadline); 2193 } 2194 2195 /* -EAGAIN can happen if we skipped followup SRST */ 2196 if (rc && rc != -EAGAIN) 2197 goto fail; 2198 } else { 2199 if (verbose) 2200 ata_link_printk(link, KERN_INFO, "no reset method " 2201 "available, skipping reset\n"); 2202 if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2203 lflags |= ATA_LFLAG_ASSUME_ATA; 2204 } 2205 2206 /* 2207 * Post-reset processing 2208 */ 2209 ata_link_for_each_dev(dev, link) { 2210 /* After the reset, the device state is PIO 0 and the 2211 * controller state is undefined. Reset also wakes up 2212 * drives from sleeping mode. 2213 */ 2214 dev->pio_mode = XFER_PIO_0; 2215 dev->flags &= ~ATA_DFLAG_SLEEPING; 2216 2217 if (ata_link_offline(link)) 2218 continue; 2219 2220 /* apply class override */ 2221 if (lflags & ATA_LFLAG_ASSUME_ATA) 2222 classes[dev->devno] = ATA_DEV_ATA; 2223 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2224 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ 2225 } 2226 2227 /* record current link speed */ 2228 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2229 link->sata_spd = (sstatus >> 4) & 0xf; 2230 2231 /* thaw the port */ 2232 if (ata_is_host_link(link)) 2233 ata_eh_thaw_port(ap); 2234 2235 /* postreset() should clear hardware SError. Although SError 2236 * is cleared during link resume, clearing SError here is 2237 * necessary as some PHYs raise hotplug events after SRST. 2238 * This introduces race condition where hotplug occurs between 2239 * reset and here. This race is mediated by cross checking 2240 * link onlineness and classification result later. 2241 */ 2242 if (postreset) 2243 postreset(link, classes); 2244 2245 /* clear cached SError */ 2246 spin_lock_irqsave(link->ap->lock, flags); 2247 link->eh_info.serror = 0; 2248 spin_unlock_irqrestore(link->ap->lock, flags); 2249 2250 /* Make sure onlineness and classification result correspond. 2251 * Hotplug could have happened during reset and some 2252 * controllers fail to wait while a drive is spinning up after 2253 * being hotplugged causing misdetection. By cross checking 2254 * link onlineness and classification result, those conditions 2255 * can be reliably detected and retried. 2256 */ 2257 nr_known = 0; 2258 ata_link_for_each_dev(dev, link) { 2259 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2260 if (classes[dev->devno] == ATA_DEV_UNKNOWN) 2261 classes[dev->devno] = ATA_DEV_NONE; 2262 else 2263 nr_known++; 2264 } 2265 2266 if (classify && !nr_known && ata_link_online(link)) { 2267 if (try < max_tries) { 2268 ata_link_printk(link, KERN_WARNING, "link online but " 2269 "device misclassified, retrying\n"); 2270 rc = -EAGAIN; 2271 goto fail; 2272 } 2273 ata_link_printk(link, KERN_WARNING, 2274 "link online but device misclassified, " 2275 "device detection might fail\n"); 2276 } 2277 2278 /* reset successful, schedule revalidation */ 2279 ata_eh_done(link, NULL, ATA_EH_RESET); 2280 ehc->i.action |= ATA_EH_REVALIDATE; 2281 2282 rc = 0; 2283 out: 2284 /* clear hotplug flag */ 2285 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2286 2287 spin_lock_irqsave(ap->lock, flags); 2288 ap->pflags &= ~ATA_PFLAG_RESETTING; 2289 spin_unlock_irqrestore(ap->lock, flags); 2290 2291 return rc; 2292 2293 fail: 2294 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 2295 if (!ata_is_host_link(link) && 2296 sata_scr_read(link, SCR_STATUS, &sstatus)) 2297 rc = -ERESTART; 2298 2299 if (rc == -ERESTART || try >= max_tries) 2300 goto out; 2301 2302 now = jiffies; 2303 if (time_before(now, deadline)) { 2304 unsigned long delta = deadline - now; 2305 2306 ata_link_printk(link, KERN_WARNING, "reset failed " 2307 "(errno=%d), retrying in %u secs\n", 2308 rc, (jiffies_to_msecs(delta) + 999) / 1000); 2309 2310 while (delta) 2311 delta = schedule_timeout_uninterruptible(delta); 2312 } 2313 2314 if (rc == -EPIPE || try == max_tries - 1) 2315 sata_down_spd_limit(link); 2316 if (hardreset) 2317 reset = hardreset; 2318 goto retry; 2319 } 2320 2321 static int ata_eh_revalidate_and_attach(struct ata_link *link, 2322 struct ata_device **r_failed_dev) 2323 { 2324 struct ata_port *ap = link->ap; 2325 struct ata_eh_context *ehc = &link->eh_context; 2326 struct ata_device *dev; 2327 unsigned int new_mask = 0; 2328 unsigned long flags; 2329 int rc = 0; 2330 2331 DPRINTK("ENTER\n"); 2332 2333 /* For PATA drive side cable detection to work, IDENTIFY must 2334 * be done backwards such that PDIAG- is released by the slave 2335 * device before the master device is identified. 2336 */ 2337 ata_link_for_each_dev_reverse(dev, link) { 2338 unsigned int action = ata_eh_dev_action(dev); 2339 unsigned int readid_flags = 0; 2340 2341 if (ehc->i.flags & ATA_EHI_DID_RESET) 2342 readid_flags |= ATA_READID_POSTRESET; 2343 2344 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2345 WARN_ON(dev->class == ATA_DEV_PMP); 2346 2347 if (ata_link_offline(link)) { 2348 rc = -EIO; 2349 goto err; 2350 } 2351 2352 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2353 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2354 readid_flags); 2355 if (rc) 2356 goto err; 2357 2358 ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2359 2360 /* Configuration may have changed, reconfigure 2361 * transfer mode. 2362 */ 2363 ehc->i.flags |= ATA_EHI_SETMODE; 2364 2365 /* schedule the scsi_rescan_device() here */ 2366 queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2367 } else if (dev->class == ATA_DEV_UNKNOWN && 2368 ehc->tries[dev->devno] && 2369 ata_class_enabled(ehc->classes[dev->devno])) { 2370 dev->class = ehc->classes[dev->devno]; 2371 2372 if (dev->class == ATA_DEV_PMP) 2373 rc = sata_pmp_attach(dev); 2374 else 2375 rc = ata_dev_read_id(dev, &dev->class, 2376 readid_flags, dev->id); 2377 switch (rc) { 2378 case 0: 2379 new_mask |= 1 << dev->devno; 2380 break; 2381 case -ENOENT: 2382 /* IDENTIFY was issued to non-existent 2383 * device. No need to reset. Just 2384 * thaw and kill the device. 2385 */ 2386 ata_eh_thaw_port(ap); 2387 dev->class = ATA_DEV_UNKNOWN; 2388 break; 2389 default: 2390 dev->class = ATA_DEV_UNKNOWN; 2391 goto err; 2392 } 2393 } 2394 } 2395 2396 /* PDIAG- should have been released, ask cable type if post-reset */ 2397 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 2398 if (ap->ops->cable_detect) 2399 ap->cbl = ap->ops->cable_detect(ap); 2400 ata_force_cbl(ap); 2401 } 2402 2403 /* Configure new devices forward such that user doesn't see 2404 * device detection messages backwards. 2405 */ 2406 ata_link_for_each_dev(dev, link) { 2407 if (!(new_mask & (1 << dev->devno)) || 2408 dev->class == ATA_DEV_PMP) 2409 continue; 2410 2411 ehc->i.flags |= ATA_EHI_PRINTINFO; 2412 rc = ata_dev_configure(dev); 2413 ehc->i.flags &= ~ATA_EHI_PRINTINFO; 2414 if (rc) 2415 goto err; 2416 2417 spin_lock_irqsave(ap->lock, flags); 2418 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 2419 spin_unlock_irqrestore(ap->lock, flags); 2420 2421 /* new device discovered, configure xfermode */ 2422 ehc->i.flags |= ATA_EHI_SETMODE; 2423 } 2424 2425 return 0; 2426 2427 err: 2428 *r_failed_dev = dev; 2429 DPRINTK("EXIT rc=%d\n", rc); 2430 return rc; 2431 } 2432 2433 /** 2434 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2435 * @link: link on which timings will be programmed 2436 * @r_failed_dev: out paramter for failed device 2437 * 2438 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 2439 * ata_set_mode() fails, pointer to the failing device is 2440 * returned in @r_failed_dev. 2441 * 2442 * LOCKING: 2443 * PCI/etc. bus probe sem. 2444 * 2445 * RETURNS: 2446 * 0 on success, negative errno otherwise 2447 */ 2448 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 2449 { 2450 struct ata_port *ap = link->ap; 2451 struct ata_device *dev; 2452 int rc; 2453 2454 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 2455 ata_link_for_each_dev(dev, link) { 2456 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 2457 struct ata_ering_entry *ent; 2458 2459 ent = ata_ering_top(&dev->ering); 2460 if (ent) 2461 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 2462 } 2463 } 2464 2465 /* has private set_mode? */ 2466 if (ap->ops->set_mode) 2467 rc = ap->ops->set_mode(link, r_failed_dev); 2468 else 2469 rc = ata_do_set_mode(link, r_failed_dev); 2470 2471 /* if transfer mode has changed, set DUBIOUS_XFER on device */ 2472 ata_link_for_each_dev(dev, link) { 2473 struct ata_eh_context *ehc = &link->eh_context; 2474 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 2475 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 2476 2477 if (dev->xfer_mode != saved_xfer_mode || 2478 ata_ncq_enabled(dev) != saved_ncq) 2479 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 2480 } 2481 2482 return rc; 2483 } 2484 2485 static int ata_link_nr_enabled(struct ata_link *link) 2486 { 2487 struct ata_device *dev; 2488 int cnt = 0; 2489 2490 ata_link_for_each_dev(dev, link) 2491 if (ata_dev_enabled(dev)) 2492 cnt++; 2493 return cnt; 2494 } 2495 2496 static int ata_link_nr_vacant(struct ata_link *link) 2497 { 2498 struct ata_device *dev; 2499 int cnt = 0; 2500 2501 ata_link_for_each_dev(dev, link) 2502 if (dev->class == ATA_DEV_UNKNOWN) 2503 cnt++; 2504 return cnt; 2505 } 2506 2507 static int ata_eh_skip_recovery(struct ata_link *link) 2508 { 2509 struct ata_port *ap = link->ap; 2510 struct ata_eh_context *ehc = &link->eh_context; 2511 struct ata_device *dev; 2512 2513 /* skip disabled links */ 2514 if (link->flags & ATA_LFLAG_DISABLED) 2515 return 1; 2516 2517 /* thaw frozen port and recover failed devices */ 2518 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 2519 return 0; 2520 2521 /* reset at least once if reset is requested */ 2522 if ((ehc->i.action & ATA_EH_RESET) && 2523 !(ehc->i.flags & ATA_EHI_DID_RESET)) 2524 return 0; 2525 2526 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 2527 ata_link_for_each_dev(dev, link) { 2528 if (dev->class == ATA_DEV_UNKNOWN && 2529 ehc->classes[dev->devno] != ATA_DEV_NONE) 2530 return 0; 2531 } 2532 2533 return 1; 2534 } 2535 2536 static int ata_eh_schedule_probe(struct ata_device *dev) 2537 { 2538 struct ata_eh_context *ehc = &dev->link->eh_context; 2539 2540 if (!(ehc->i.probe_mask & (1 << dev->devno)) || 2541 (ehc->did_probe_mask & (1 << dev->devno))) 2542 return 0; 2543 2544 ata_eh_detach_dev(dev); 2545 ata_dev_init(dev); 2546 ehc->did_probe_mask |= (1 << dev->devno); 2547 ehc->i.action |= ATA_EH_RESET; 2548 ehc->saved_xfer_mode[dev->devno] = 0; 2549 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 2550 2551 return 1; 2552 } 2553 2554 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 2555 { 2556 struct ata_eh_context *ehc = &dev->link->eh_context; 2557 2558 ehc->tries[dev->devno]--; 2559 2560 switch (err) { 2561 case -ENODEV: 2562 /* device missing or wrong IDENTIFY data, schedule probing */ 2563 ehc->i.probe_mask |= (1 << dev->devno); 2564 case -EINVAL: 2565 /* give it just one more chance */ 2566 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 2567 case -EIO: 2568 if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) { 2569 /* This is the last chance, better to slow 2570 * down than lose it. 2571 */ 2572 sata_down_spd_limit(dev->link); 2573 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2574 } 2575 } 2576 2577 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 2578 /* disable device if it has used up all its chances */ 2579 ata_dev_disable(dev); 2580 2581 /* detach if offline */ 2582 if (ata_link_offline(dev->link)) 2583 ata_eh_detach_dev(dev); 2584 2585 /* schedule probe if necessary */ 2586 if (ata_eh_schedule_probe(dev)) 2587 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2588 2589 return 1; 2590 } else { 2591 ehc->i.action |= ATA_EH_RESET; 2592 return 0; 2593 } 2594 } 2595 2596 /** 2597 * ata_eh_recover - recover host port after error 2598 * @ap: host port to recover 2599 * @prereset: prereset method (can be NULL) 2600 * @softreset: softreset method (can be NULL) 2601 * @hardreset: hardreset method (can be NULL) 2602 * @postreset: postreset method (can be NULL) 2603 * @r_failed_link: out parameter for failed link 2604 * 2605 * This is the alpha and omega, eum and yang, heart and soul of 2606 * libata exception handling. On entry, actions required to 2607 * recover each link and hotplug requests are recorded in the 2608 * link's eh_context. This function executes all the operations 2609 * with appropriate retrials and fallbacks to resurrect failed 2610 * devices, detach goners and greet newcomers. 2611 * 2612 * LOCKING: 2613 * Kernel thread context (may sleep). 2614 * 2615 * RETURNS: 2616 * 0 on success, -errno on failure. 2617 */ 2618 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 2619 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2620 ata_postreset_fn_t postreset, 2621 struct ata_link **r_failed_link) 2622 { 2623 struct ata_link *link; 2624 struct ata_device *dev; 2625 int nr_failed_devs, nr_disabled_devs; 2626 int rc; 2627 unsigned long flags; 2628 2629 DPRINTK("ENTER\n"); 2630 2631 /* prep for recovery */ 2632 ata_port_for_each_link(link, ap) { 2633 struct ata_eh_context *ehc = &link->eh_context; 2634 2635 /* re-enable link? */ 2636 if (ehc->i.action & ATA_EH_ENABLE_LINK) { 2637 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 2638 spin_lock_irqsave(ap->lock, flags); 2639 link->flags &= ~ATA_LFLAG_DISABLED; 2640 spin_unlock_irqrestore(ap->lock, flags); 2641 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 2642 } 2643 2644 ata_link_for_each_dev(dev, link) { 2645 if (link->flags & ATA_LFLAG_NO_RETRY) 2646 ehc->tries[dev->devno] = 1; 2647 else 2648 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2649 2650 /* collect port action mask recorded in dev actions */ 2651 ehc->i.action |= ehc->i.dev_action[dev->devno] & 2652 ~ATA_EH_PERDEV_MASK; 2653 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 2654 2655 /* process hotplug request */ 2656 if (dev->flags & ATA_DFLAG_DETACH) 2657 ata_eh_detach_dev(dev); 2658 2659 /* schedule probe if necessary */ 2660 if (!ata_dev_enabled(dev)) 2661 ata_eh_schedule_probe(dev); 2662 } 2663 } 2664 2665 retry: 2666 rc = 0; 2667 nr_failed_devs = 0; 2668 nr_disabled_devs = 0; 2669 2670 /* if UNLOADING, finish immediately */ 2671 if (ap->pflags & ATA_PFLAG_UNLOADING) 2672 goto out; 2673 2674 /* prep for EH */ 2675 ata_port_for_each_link(link, ap) { 2676 struct ata_eh_context *ehc = &link->eh_context; 2677 2678 /* skip EH if possible. */ 2679 if (ata_eh_skip_recovery(link)) 2680 ehc->i.action = 0; 2681 2682 ata_link_for_each_dev(dev, link) 2683 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 2684 } 2685 2686 /* reset */ 2687 ata_port_for_each_link(link, ap) { 2688 struct ata_eh_context *ehc = &link->eh_context; 2689 2690 if (!(ehc->i.action & ATA_EH_RESET)) 2691 continue; 2692 2693 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 2694 prereset, softreset, hardreset, postreset); 2695 if (rc) { 2696 ata_link_printk(link, KERN_ERR, 2697 "reset failed, giving up\n"); 2698 goto out; 2699 } 2700 } 2701 2702 /* the rest */ 2703 ata_port_for_each_link(link, ap) { 2704 struct ata_eh_context *ehc = &link->eh_context; 2705 2706 /* revalidate existing devices and attach new ones */ 2707 rc = ata_eh_revalidate_and_attach(link, &dev); 2708 if (rc) 2709 goto dev_fail; 2710 2711 /* if PMP got attached, return, pmp EH will take care of it */ 2712 if (link->device->class == ATA_DEV_PMP) { 2713 ehc->i.action = 0; 2714 return 0; 2715 } 2716 2717 /* configure transfer mode if necessary */ 2718 if (ehc->i.flags & ATA_EHI_SETMODE) { 2719 rc = ata_set_mode(link, &dev); 2720 if (rc) 2721 goto dev_fail; 2722 ehc->i.flags &= ~ATA_EHI_SETMODE; 2723 } 2724 2725 if (ehc->i.action & ATA_EH_LPM) 2726 ata_link_for_each_dev(dev, link) 2727 ata_dev_enable_pm(dev, ap->pm_policy); 2728 2729 /* this link is okay now */ 2730 ehc->i.flags = 0; 2731 continue; 2732 2733 dev_fail: 2734 nr_failed_devs++; 2735 if (ata_eh_handle_dev_fail(dev, rc)) 2736 nr_disabled_devs++; 2737 2738 if (ap->pflags & ATA_PFLAG_FROZEN) { 2739 /* PMP reset requires working host port. 2740 * Can't retry if it's frozen. 2741 */ 2742 if (sata_pmp_attached(ap)) 2743 goto out; 2744 break; 2745 } 2746 } 2747 2748 if (nr_failed_devs) { 2749 if (nr_failed_devs != nr_disabled_devs) { 2750 ata_port_printk(ap, KERN_WARNING, "failed to recover " 2751 "some devices, retrying in 5 secs\n"); 2752 ssleep(5); 2753 } else { 2754 /* no device left to recover, repeat fast */ 2755 msleep(500); 2756 } 2757 2758 goto retry; 2759 } 2760 2761 out: 2762 if (rc && r_failed_link) 2763 *r_failed_link = link; 2764 2765 DPRINTK("EXIT, rc=%d\n", rc); 2766 return rc; 2767 } 2768 2769 /** 2770 * ata_eh_finish - finish up EH 2771 * @ap: host port to finish EH for 2772 * 2773 * Recovery is complete. Clean up EH states and retry or finish 2774 * failed qcs. 2775 * 2776 * LOCKING: 2777 * None. 2778 */ 2779 void ata_eh_finish(struct ata_port *ap) 2780 { 2781 int tag; 2782 2783 /* retry or finish qcs */ 2784 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2785 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2786 2787 if (!(qc->flags & ATA_QCFLAG_FAILED)) 2788 continue; 2789 2790 if (qc->err_mask) { 2791 /* FIXME: Once EH migration is complete, 2792 * generate sense data in this function, 2793 * considering both err_mask and tf. 2794 */ 2795 if (qc->flags & ATA_QCFLAG_RETRY) 2796 ata_eh_qc_retry(qc); 2797 else 2798 ata_eh_qc_complete(qc); 2799 } else { 2800 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 2801 ata_eh_qc_complete(qc); 2802 } else { 2803 /* feed zero TF to sense generation */ 2804 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 2805 ata_eh_qc_retry(qc); 2806 } 2807 } 2808 } 2809 2810 /* make sure nr_active_links is zero after EH */ 2811 WARN_ON(ap->nr_active_links); 2812 ap->nr_active_links = 0; 2813 } 2814 2815 /** 2816 * ata_do_eh - do standard error handling 2817 * @ap: host port to handle error for 2818 * 2819 * @prereset: prereset method (can be NULL) 2820 * @softreset: softreset method (can be NULL) 2821 * @hardreset: hardreset method (can be NULL) 2822 * @postreset: postreset method (can be NULL) 2823 * 2824 * Perform standard error handling sequence. 2825 * 2826 * LOCKING: 2827 * Kernel thread context (may sleep). 2828 */ 2829 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 2830 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2831 ata_postreset_fn_t postreset) 2832 { 2833 struct ata_device *dev; 2834 int rc; 2835 2836 ata_eh_autopsy(ap); 2837 ata_eh_report(ap); 2838 2839 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 2840 NULL); 2841 if (rc) { 2842 ata_link_for_each_dev(dev, &ap->link) 2843 ata_dev_disable(dev); 2844 } 2845 2846 ata_eh_finish(ap); 2847 } 2848 2849 /** 2850 * ata_std_error_handler - standard error handler 2851 * @ap: host port to handle error for 2852 * 2853 * Standard error handler 2854 * 2855 * LOCKING: 2856 * Kernel thread context (may sleep). 2857 */ 2858 void ata_std_error_handler(struct ata_port *ap) 2859 { 2860 struct ata_port_operations *ops = ap->ops; 2861 ata_reset_fn_t hardreset = ops->hardreset; 2862 2863 /* ignore built-in hardreset if SCR access is not available */ 2864 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 2865 hardreset = NULL; 2866 2867 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 2868 } 2869 2870 #ifdef CONFIG_PM 2871 /** 2872 * ata_eh_handle_port_suspend - perform port suspend operation 2873 * @ap: port to suspend 2874 * 2875 * Suspend @ap. 2876 * 2877 * LOCKING: 2878 * Kernel thread context (may sleep). 2879 */ 2880 static void ata_eh_handle_port_suspend(struct ata_port *ap) 2881 { 2882 unsigned long flags; 2883 int rc = 0; 2884 2885 /* are we suspending? */ 2886 spin_lock_irqsave(ap->lock, flags); 2887 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2888 ap->pm_mesg.event == PM_EVENT_ON) { 2889 spin_unlock_irqrestore(ap->lock, flags); 2890 return; 2891 } 2892 spin_unlock_irqrestore(ap->lock, flags); 2893 2894 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 2895 2896 /* tell ACPI we're suspending */ 2897 rc = ata_acpi_on_suspend(ap); 2898 if (rc) 2899 goto out; 2900 2901 /* suspend */ 2902 ata_eh_freeze_port(ap); 2903 2904 if (ap->ops->port_suspend) 2905 rc = ap->ops->port_suspend(ap, ap->pm_mesg); 2906 2907 ata_acpi_set_state(ap, PMSG_SUSPEND); 2908 out: 2909 /* report result */ 2910 spin_lock_irqsave(ap->lock, flags); 2911 2912 ap->pflags &= ~ATA_PFLAG_PM_PENDING; 2913 if (rc == 0) 2914 ap->pflags |= ATA_PFLAG_SUSPENDED; 2915 else if (ap->pflags & ATA_PFLAG_FROZEN) 2916 ata_port_schedule_eh(ap); 2917 2918 if (ap->pm_result) { 2919 *ap->pm_result = rc; 2920 ap->pm_result = NULL; 2921 } 2922 2923 spin_unlock_irqrestore(ap->lock, flags); 2924 2925 return; 2926 } 2927 2928 /** 2929 * ata_eh_handle_port_resume - perform port resume operation 2930 * @ap: port to resume 2931 * 2932 * Resume @ap. 2933 * 2934 * LOCKING: 2935 * Kernel thread context (may sleep). 2936 */ 2937 static void ata_eh_handle_port_resume(struct ata_port *ap) 2938 { 2939 unsigned long flags; 2940 int rc = 0; 2941 2942 /* are we resuming? */ 2943 spin_lock_irqsave(ap->lock, flags); 2944 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2945 ap->pm_mesg.event != PM_EVENT_ON) { 2946 spin_unlock_irqrestore(ap->lock, flags); 2947 return; 2948 } 2949 spin_unlock_irqrestore(ap->lock, flags); 2950 2951 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 2952 2953 ata_acpi_set_state(ap, PMSG_ON); 2954 2955 if (ap->ops->port_resume) 2956 rc = ap->ops->port_resume(ap); 2957 2958 /* tell ACPI that we're resuming */ 2959 ata_acpi_on_resume(ap); 2960 2961 /* report result */ 2962 spin_lock_irqsave(ap->lock, flags); 2963 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 2964 if (ap->pm_result) { 2965 *ap->pm_result = rc; 2966 ap->pm_result = NULL; 2967 } 2968 spin_unlock_irqrestore(ap->lock, flags); 2969 } 2970 #endif /* CONFIG_PM */ 2971