1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SATA specific part of ATA helper library 4 * 5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 6 * Copyright 2003-2004 Jeff Garzik 7 * Copyright 2006 Tejun Heo <htejun@gmail.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <scsi/scsi_cmnd.h> 13 #include <scsi/scsi_device.h> 14 #include <linux/libata.h> 15 16 #include "libata.h" 17 #include "libata-transport.h" 18 19 /* debounce timing parameters in msecs { interval, duration, timeout } */ 20 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 21 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 22 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 23 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 24 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 25 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 26 27 /** 28 * sata_scr_valid - test whether SCRs are accessible 29 * @link: ATA link to test SCR accessibility for 30 * 31 * Test whether SCRs are accessible for @link. 32 * 33 * LOCKING: 34 * None. 35 * 36 * RETURNS: 37 * 1 if SCRs are accessible, 0 otherwise. 38 */ 39 int sata_scr_valid(struct ata_link *link) 40 { 41 struct ata_port *ap = link->ap; 42 43 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 44 } 45 EXPORT_SYMBOL_GPL(sata_scr_valid); 46 47 /** 48 * sata_scr_read - read SCR register of the specified port 49 * @link: ATA link to read SCR for 50 * @reg: SCR to read 51 * @val: Place to store read value 52 * 53 * Read SCR register @reg of @link into *@val. This function is 54 * guaranteed to succeed if @link is ap->link, the cable type of 55 * the port is SATA and the port implements ->scr_read. 56 * 57 * LOCKING: 58 * None if @link is ap->link. Kernel thread context otherwise. 59 * 60 * RETURNS: 61 * 0 on success, negative errno on failure. 62 */ 63 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 64 { 65 if (ata_is_host_link(link)) { 66 if (sata_scr_valid(link)) 67 return link->ap->ops->scr_read(link, reg, val); 68 return -EOPNOTSUPP; 69 } 70 71 return sata_pmp_scr_read(link, reg, val); 72 } 73 EXPORT_SYMBOL_GPL(sata_scr_read); 74 75 /** 76 * sata_scr_write - write SCR register of the specified port 77 * @link: ATA link to write SCR for 78 * @reg: SCR to write 79 * @val: value to write 80 * 81 * Write @val to SCR register @reg of @link. This function is 82 * guaranteed to succeed if @link is ap->link, the cable type of 83 * the port is SATA and the port implements ->scr_read. 84 * 85 * LOCKING: 86 * None if @link is ap->link. Kernel thread context otherwise. 87 * 88 * RETURNS: 89 * 0 on success, negative errno on failure. 90 */ 91 int sata_scr_write(struct ata_link *link, int reg, u32 val) 92 { 93 if (ata_is_host_link(link)) { 94 if (sata_scr_valid(link)) 95 return link->ap->ops->scr_write(link, reg, val); 96 return -EOPNOTSUPP; 97 } 98 99 return sata_pmp_scr_write(link, reg, val); 100 } 101 EXPORT_SYMBOL_GPL(sata_scr_write); 102 103 /** 104 * sata_scr_write_flush - write SCR register of the specified port and flush 105 * @link: ATA link to write SCR for 106 * @reg: SCR to write 107 * @val: value to write 108 * 109 * This function is identical to sata_scr_write() except that this 110 * function performs flush after writing to the register. 111 * 112 * LOCKING: 113 * None if @link is ap->link. Kernel thread context otherwise. 114 * 115 * RETURNS: 116 * 0 on success, negative errno on failure. 117 */ 118 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 119 { 120 if (ata_is_host_link(link)) { 121 int rc; 122 123 if (sata_scr_valid(link)) { 124 rc = link->ap->ops->scr_write(link, reg, val); 125 if (rc == 0) 126 rc = link->ap->ops->scr_read(link, reg, &val); 127 return rc; 128 } 129 return -EOPNOTSUPP; 130 } 131 132 return sata_pmp_scr_write(link, reg, val); 133 } 134 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 135 136 /** 137 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 138 * @tf: Taskfile to convert 139 * @pmp: Port multiplier port 140 * @is_cmd: This FIS is for command 141 * @fis: Buffer into which data will output 142 * 143 * Converts a standard ATA taskfile to a Serial ATA 144 * FIS structure (Register - Host to Device). 145 * 146 * LOCKING: 147 * Inherited from caller. 148 */ 149 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 150 { 151 fis[0] = 0x27; /* Register - Host to Device FIS */ 152 fis[1] = pmp & 0xf; /* Port multiplier number*/ 153 if (is_cmd) 154 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 155 156 fis[2] = tf->command; 157 fis[3] = tf->feature; 158 159 fis[4] = tf->lbal; 160 fis[5] = tf->lbam; 161 fis[6] = tf->lbah; 162 fis[7] = tf->device; 163 164 fis[8] = tf->hob_lbal; 165 fis[9] = tf->hob_lbam; 166 fis[10] = tf->hob_lbah; 167 fis[11] = tf->hob_feature; 168 169 fis[12] = tf->nsect; 170 fis[13] = tf->hob_nsect; 171 fis[14] = 0; 172 fis[15] = tf->ctl; 173 174 fis[16] = tf->auxiliary & 0xff; 175 fis[17] = (tf->auxiliary >> 8) & 0xff; 176 fis[18] = (tf->auxiliary >> 16) & 0xff; 177 fis[19] = (tf->auxiliary >> 24) & 0xff; 178 } 179 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 180 181 /** 182 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 183 * @fis: Buffer from which data will be input 184 * @tf: Taskfile to output 185 * 186 * Converts a serial ATA FIS structure to a standard ATA taskfile. 187 * 188 * LOCKING: 189 * Inherited from caller. 190 */ 191 192 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 193 { 194 tf->status = fis[2]; 195 tf->error = fis[3]; 196 197 tf->lbal = fis[4]; 198 tf->lbam = fis[5]; 199 tf->lbah = fis[6]; 200 tf->device = fis[7]; 201 202 tf->hob_lbal = fis[8]; 203 tf->hob_lbam = fis[9]; 204 tf->hob_lbah = fis[10]; 205 206 tf->nsect = fis[12]; 207 tf->hob_nsect = fis[13]; 208 } 209 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 210 211 /** 212 * sata_link_debounce - debounce SATA phy status 213 * @link: ATA link to debounce SATA phy status for 214 * @params: timing parameters { interval, duration, timeout } in msec 215 * @deadline: deadline jiffies for the operation 216 * 217 * Make sure SStatus of @link reaches stable state, determined by 218 * holding the same value where DET is not 1 for @duration polled 219 * every @interval, before @timeout. Timeout constraints the 220 * beginning of the stable state. Because DET gets stuck at 1 on 221 * some controllers after hot unplugging, this functions waits 222 * until timeout then returns 0 if DET is stable at 1. 223 * 224 * @timeout is further limited by @deadline. The sooner of the 225 * two is used. 226 * 227 * LOCKING: 228 * Kernel thread context (may sleep) 229 * 230 * RETURNS: 231 * 0 on success, -errno on failure. 232 */ 233 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 234 unsigned long deadline) 235 { 236 unsigned long interval = params[0]; 237 unsigned long duration = params[1]; 238 unsigned long last_jiffies, t; 239 u32 last, cur; 240 int rc; 241 242 t = ata_deadline(jiffies, params[2]); 243 if (time_before(t, deadline)) 244 deadline = t; 245 246 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 247 return rc; 248 cur &= 0xf; 249 250 last = cur; 251 last_jiffies = jiffies; 252 253 while (1) { 254 ata_msleep(link->ap, interval); 255 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 256 return rc; 257 cur &= 0xf; 258 259 /* DET stable? */ 260 if (cur == last) { 261 if (cur == 1 && time_before(jiffies, deadline)) 262 continue; 263 if (time_after(jiffies, 264 ata_deadline(last_jiffies, duration))) 265 return 0; 266 continue; 267 } 268 269 /* unstable, start over */ 270 last = cur; 271 last_jiffies = jiffies; 272 273 /* Check deadline. If debouncing failed, return 274 * -EPIPE to tell upper layer to lower link speed. 275 */ 276 if (time_after(jiffies, deadline)) 277 return -EPIPE; 278 } 279 } 280 EXPORT_SYMBOL_GPL(sata_link_debounce); 281 282 /** 283 * sata_link_resume - resume SATA link 284 * @link: ATA link to resume SATA 285 * @params: timing parameters { interval, duration, timeout } in msec 286 * @deadline: deadline jiffies for the operation 287 * 288 * Resume SATA phy @link and debounce it. 289 * 290 * LOCKING: 291 * Kernel thread context (may sleep) 292 * 293 * RETURNS: 294 * 0 on success, -errno on failure. 295 */ 296 int sata_link_resume(struct ata_link *link, const unsigned long *params, 297 unsigned long deadline) 298 { 299 int tries = ATA_LINK_RESUME_TRIES; 300 u32 scontrol, serror; 301 int rc; 302 303 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 304 return rc; 305 306 /* 307 * Writes to SControl sometimes get ignored under certain 308 * controllers (ata_piix SIDPR). Make sure DET actually is 309 * cleared. 310 */ 311 do { 312 scontrol = (scontrol & 0x0f0) | 0x300; 313 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 314 return rc; 315 /* 316 * Some PHYs react badly if SStatus is pounded 317 * immediately after resuming. Delay 200ms before 318 * debouncing. 319 */ 320 if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY)) 321 ata_msleep(link->ap, 200); 322 323 /* is SControl restored correctly? */ 324 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 325 return rc; 326 } while ((scontrol & 0xf0f) != 0x300 && --tries); 327 328 if ((scontrol & 0xf0f) != 0x300) { 329 ata_link_warn(link, "failed to resume link (SControl %X)\n", 330 scontrol); 331 return 0; 332 } 333 334 if (tries < ATA_LINK_RESUME_TRIES) 335 ata_link_warn(link, "link resume succeeded after %d retries\n", 336 ATA_LINK_RESUME_TRIES - tries); 337 338 if ((rc = sata_link_debounce(link, params, deadline))) 339 return rc; 340 341 /* clear SError, some PHYs require this even for SRST to work */ 342 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 343 rc = sata_scr_write(link, SCR_ERROR, serror); 344 345 return rc != -EINVAL ? rc : 0; 346 } 347 EXPORT_SYMBOL_GPL(sata_link_resume); 348 349 /** 350 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 351 * @link: ATA link to manipulate SControl for 352 * @policy: LPM policy to configure 353 * @spm_wakeup: initiate LPM transition to active state 354 * 355 * Manipulate the IPM field of the SControl register of @link 356 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 357 * @spm_wakeup is %true, the SPM field is manipulated to wake up 358 * the link. This function also clears PHYRDY_CHG before 359 * returning. 360 * 361 * LOCKING: 362 * EH context. 363 * 364 * RETURNS: 365 * 0 on success, -errno otherwise. 366 */ 367 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 368 bool spm_wakeup) 369 { 370 struct ata_eh_context *ehc = &link->eh_context; 371 bool woken_up = false; 372 u32 scontrol; 373 int rc; 374 375 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 376 if (rc) 377 return rc; 378 379 switch (policy) { 380 case ATA_LPM_MAX_POWER: 381 /* disable all LPM transitions */ 382 scontrol |= (0x7 << 8); 383 /* initiate transition to active state */ 384 if (spm_wakeup) { 385 scontrol |= (0x4 << 12); 386 woken_up = true; 387 } 388 break; 389 case ATA_LPM_MED_POWER: 390 /* allow LPM to PARTIAL */ 391 scontrol &= ~(0x1 << 8); 392 scontrol |= (0x6 << 8); 393 break; 394 case ATA_LPM_MED_POWER_WITH_DIPM: 395 case ATA_LPM_MIN_POWER_WITH_PARTIAL: 396 case ATA_LPM_MIN_POWER: 397 if (ata_link_nr_enabled(link) > 0) 398 /* no restrictions on LPM transitions */ 399 scontrol &= ~(0x7 << 8); 400 else { 401 /* empty port, power off */ 402 scontrol &= ~0xf; 403 scontrol |= (0x1 << 2); 404 } 405 break; 406 default: 407 WARN_ON(1); 408 } 409 410 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 411 if (rc) 412 return rc; 413 414 /* give the link time to transit out of LPM state */ 415 if (woken_up) 416 msleep(10); 417 418 /* clear PHYRDY_CHG from SError */ 419 ehc->i.serror &= ~SERR_PHYRDY_CHG; 420 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 421 } 422 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 423 424 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 425 { 426 struct ata_link *host_link = &link->ap->link; 427 u32 limit, target, spd; 428 429 limit = link->sata_spd_limit; 430 431 /* Don't configure downstream link faster than upstream link. 432 * It doesn't speed up anything and some PMPs choke on such 433 * configuration. 434 */ 435 if (!ata_is_host_link(link) && host_link->sata_spd) 436 limit &= (1 << host_link->sata_spd) - 1; 437 438 if (limit == UINT_MAX) 439 target = 0; 440 else 441 target = fls(limit); 442 443 spd = (*scontrol >> 4) & 0xf; 444 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 445 446 return spd != target; 447 } 448 449 /** 450 * sata_set_spd_needed - is SATA spd configuration needed 451 * @link: Link in question 452 * 453 * Test whether the spd limit in SControl matches 454 * @link->sata_spd_limit. This function is used to determine 455 * whether hardreset is necessary to apply SATA spd 456 * configuration. 457 * 458 * LOCKING: 459 * Inherited from caller. 460 * 461 * RETURNS: 462 * 1 if SATA spd configuration is needed, 0 otherwise. 463 */ 464 static int sata_set_spd_needed(struct ata_link *link) 465 { 466 u32 scontrol; 467 468 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 469 return 1; 470 471 return __sata_set_spd_needed(link, &scontrol); 472 } 473 474 /** 475 * sata_set_spd - set SATA spd according to spd limit 476 * @link: Link to set SATA spd for 477 * 478 * Set SATA spd of @link according to sata_spd_limit. 479 * 480 * LOCKING: 481 * Inherited from caller. 482 * 483 * RETURNS: 484 * 0 if spd doesn't need to be changed, 1 if spd has been 485 * changed. Negative errno if SCR registers are inaccessible. 486 */ 487 int sata_set_spd(struct ata_link *link) 488 { 489 u32 scontrol; 490 int rc; 491 492 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 493 return rc; 494 495 if (!__sata_set_spd_needed(link, &scontrol)) 496 return 0; 497 498 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 499 return rc; 500 501 return 1; 502 } 503 EXPORT_SYMBOL_GPL(sata_set_spd); 504 505 /** 506 * sata_link_hardreset - reset link via SATA phy reset 507 * @link: link to reset 508 * @timing: timing parameters { interval, duration, timeout } in msec 509 * @deadline: deadline jiffies for the operation 510 * @online: optional out parameter indicating link onlineness 511 * @check_ready: optional callback to check link readiness 512 * 513 * SATA phy-reset @link using DET bits of SControl register. 514 * After hardreset, link readiness is waited upon using 515 * ata_wait_ready() if @check_ready is specified. LLDs are 516 * allowed to not specify @check_ready and wait itself after this 517 * function returns. Device classification is LLD's 518 * responsibility. 519 * 520 * *@online is set to one iff reset succeeded and @link is online 521 * after reset. 522 * 523 * LOCKING: 524 * Kernel thread context (may sleep) 525 * 526 * RETURNS: 527 * 0 on success, -errno otherwise. 528 */ 529 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 530 unsigned long deadline, 531 bool *online, int (*check_ready)(struct ata_link *)) 532 { 533 u32 scontrol; 534 int rc; 535 536 if (online) 537 *online = false; 538 539 if (sata_set_spd_needed(link)) { 540 /* SATA spec says nothing about how to reconfigure 541 * spd. To be on the safe side, turn off phy during 542 * reconfiguration. This works for at least ICH7 AHCI 543 * and Sil3124. 544 */ 545 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 546 goto out; 547 548 scontrol = (scontrol & 0x0f0) | 0x304; 549 550 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 551 goto out; 552 553 sata_set_spd(link); 554 } 555 556 /* issue phy wake/reset */ 557 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 558 goto out; 559 560 scontrol = (scontrol & 0x0f0) | 0x301; 561 562 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 563 goto out; 564 565 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 566 * 10.4.2 says at least 1 ms. 567 */ 568 ata_msleep(link->ap, 1); 569 570 /* bring link back */ 571 rc = sata_link_resume(link, timing, deadline); 572 if (rc) 573 goto out; 574 /* if link is offline nothing more to do */ 575 if (ata_phys_link_offline(link)) 576 goto out; 577 578 /* Link is online. From this point, -ENODEV too is an error. */ 579 if (online) 580 *online = true; 581 582 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 583 /* If PMP is supported, we have to do follow-up SRST. 584 * Some PMPs don't send D2H Reg FIS after hardreset if 585 * the first port is empty. Wait only for 586 * ATA_TMOUT_PMP_SRST_WAIT. 587 */ 588 if (check_ready) { 589 unsigned long pmp_deadline; 590 591 pmp_deadline = ata_deadline(jiffies, 592 ATA_TMOUT_PMP_SRST_WAIT); 593 if (time_after(pmp_deadline, deadline)) 594 pmp_deadline = deadline; 595 ata_wait_ready(link, pmp_deadline, check_ready); 596 } 597 rc = -EAGAIN; 598 goto out; 599 } 600 601 rc = 0; 602 if (check_ready) 603 rc = ata_wait_ready(link, deadline, check_ready); 604 out: 605 if (rc && rc != -EAGAIN) { 606 /* online is set iff link is online && reset succeeded */ 607 if (online) 608 *online = false; 609 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 610 } 611 return rc; 612 } 613 EXPORT_SYMBOL_GPL(sata_link_hardreset); 614 615 /** 616 * ata_qc_complete_multiple - Complete multiple qcs successfully 617 * @ap: port in question 618 * @qc_active: new qc_active mask 619 * 620 * Complete in-flight commands. This functions is meant to be 621 * called from low-level driver's interrupt routine to complete 622 * requests normally. ap->qc_active and @qc_active is compared 623 * and commands are completed accordingly. 624 * 625 * Always use this function when completing multiple NCQ commands 626 * from IRQ handlers instead of calling ata_qc_complete() 627 * multiple times to keep IRQ expect status properly in sync. 628 * 629 * LOCKING: 630 * spin_lock_irqsave(host lock) 631 * 632 * RETURNS: 633 * Number of completed commands on success, -errno otherwise. 634 */ 635 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) 636 { 637 u64 done_mask, ap_qc_active = ap->qc_active; 638 int nr_done = 0; 639 640 /* 641 * If the internal tag is set on ap->qc_active, then we care about 642 * bit0 on the passed in qc_active mask. Move that bit up to match 643 * the internal tag. 644 */ 645 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { 646 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; 647 qc_active ^= qc_active & 0x01; 648 } 649 650 done_mask = ap_qc_active ^ qc_active; 651 652 if (unlikely(done_mask & qc_active)) { 653 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", 654 ap->qc_active, qc_active); 655 return -EINVAL; 656 } 657 658 if (ap->ops->qc_ncq_fill_rtf) 659 ap->ops->qc_ncq_fill_rtf(ap, done_mask); 660 661 while (done_mask) { 662 struct ata_queued_cmd *qc; 663 unsigned int tag = __ffs64(done_mask); 664 665 qc = ata_qc_from_tag(ap, tag); 666 if (qc) { 667 ata_qc_complete(qc); 668 nr_done++; 669 } 670 done_mask &= ~(1ULL << tag); 671 } 672 673 return nr_done; 674 } 675 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 676 677 /** 678 * ata_slave_link_init - initialize slave link 679 * @ap: port to initialize slave link for 680 * 681 * Create and initialize slave link for @ap. This enables slave 682 * link handling on the port. 683 * 684 * In libata, a port contains links and a link contains devices. 685 * There is single host link but if a PMP is attached to it, 686 * there can be multiple fan-out links. On SATA, there's usually 687 * a single device connected to a link but PATA and SATA 688 * controllers emulating TF based interface can have two - master 689 * and slave. 690 * 691 * However, there are a few controllers which don't fit into this 692 * abstraction too well - SATA controllers which emulate TF 693 * interface with both master and slave devices but also have 694 * separate SCR register sets for each device. These controllers 695 * need separate links for physical link handling 696 * (e.g. onlineness, link speed) but should be treated like a 697 * traditional M/S controller for everything else (e.g. command 698 * issue, softreset). 699 * 700 * slave_link is libata's way of handling this class of 701 * controllers without impacting core layer too much. For 702 * anything other than physical link handling, the default host 703 * link is used for both master and slave. For physical link 704 * handling, separate @ap->slave_link is used. All dirty details 705 * are implemented inside libata core layer. From LLD's POV, the 706 * only difference is that prereset, hardreset and postreset are 707 * called once more for the slave link, so the reset sequence 708 * looks like the following. 709 * 710 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 711 * softreset(M) -> postreset(M) -> postreset(S) 712 * 713 * Note that softreset is called only for the master. Softreset 714 * resets both M/S by definition, so SRST on master should handle 715 * both (the standard method will work just fine). 716 * 717 * LOCKING: 718 * Should be called before host is registered. 719 * 720 * RETURNS: 721 * 0 on success, -errno on failure. 722 */ 723 int ata_slave_link_init(struct ata_port *ap) 724 { 725 struct ata_link *link; 726 727 WARN_ON(ap->slave_link); 728 WARN_ON(ap->flags & ATA_FLAG_PMP); 729 730 link = kzalloc(sizeof(*link), GFP_KERNEL); 731 if (!link) 732 return -ENOMEM; 733 734 ata_link_init(ap, link, 1); 735 ap->slave_link = link; 736 return 0; 737 } 738 EXPORT_SYMBOL_GPL(ata_slave_link_init); 739 740 /** 741 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 742 * @link: Link receiving the event 743 * 744 * Test whether the received PHY event has to be ignored or not. 745 * 746 * LOCKING: 747 * None: 748 * 749 * RETURNS: 750 * True if the event has to be ignored. 751 */ 752 bool sata_lpm_ignore_phy_events(struct ata_link *link) 753 { 754 unsigned long lpm_timeout = link->last_lpm_change + 755 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 756 757 /* if LPM is enabled, PHYRDY doesn't mean anything */ 758 if (link->lpm_policy > ATA_LPM_MAX_POWER) 759 return true; 760 761 /* ignore the first PHY event after the LPM policy changed 762 * as it is might be spurious 763 */ 764 if ((link->flags & ATA_LFLAG_CHANGED) && 765 time_before(jiffies, lpm_timeout)) 766 return true; 767 768 return false; 769 } 770 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 771 772 static const char *ata_lpm_policy_names[] = { 773 [ATA_LPM_UNKNOWN] = "max_performance", 774 [ATA_LPM_MAX_POWER] = "max_performance", 775 [ATA_LPM_MED_POWER] = "medium_power", 776 [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm", 777 [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial", 778 [ATA_LPM_MIN_POWER] = "min_power", 779 }; 780 781 static ssize_t ata_scsi_lpm_store(struct device *device, 782 struct device_attribute *attr, 783 const char *buf, size_t count) 784 { 785 struct Scsi_Host *shost = class_to_shost(device); 786 struct ata_port *ap = ata_shost_to_port(shost); 787 struct ata_link *link; 788 struct ata_device *dev; 789 enum ata_lpm_policy policy; 790 unsigned long flags; 791 792 /* UNKNOWN is internal state, iterate from MAX_POWER */ 793 for (policy = ATA_LPM_MAX_POWER; 794 policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { 795 const char *name = ata_lpm_policy_names[policy]; 796 797 if (strncmp(name, buf, strlen(name)) == 0) 798 break; 799 } 800 if (policy == ARRAY_SIZE(ata_lpm_policy_names)) 801 return -EINVAL; 802 803 spin_lock_irqsave(ap->lock, flags); 804 805 ata_for_each_link(link, ap, EDGE) { 806 ata_for_each_dev(dev, &ap->link, ENABLED) { 807 if (dev->horkage & ATA_HORKAGE_NOLPM) { 808 count = -EOPNOTSUPP; 809 goto out_unlock; 810 } 811 } 812 } 813 814 ap->target_lpm_policy = policy; 815 ata_port_schedule_eh(ap); 816 out_unlock: 817 spin_unlock_irqrestore(ap->lock, flags); 818 return count; 819 } 820 821 static ssize_t ata_scsi_lpm_show(struct device *dev, 822 struct device_attribute *attr, char *buf) 823 { 824 struct Scsi_Host *shost = class_to_shost(dev); 825 struct ata_port *ap = ata_shost_to_port(shost); 826 827 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) 828 return -EINVAL; 829 830 return sysfs_emit(buf, "%s\n", 831 ata_lpm_policy_names[ap->target_lpm_policy]); 832 } 833 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 834 ata_scsi_lpm_show, ata_scsi_lpm_store); 835 EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 836 837 static ssize_t ata_ncq_prio_supported_show(struct device *device, 838 struct device_attribute *attr, 839 char *buf) 840 { 841 struct scsi_device *sdev = to_scsi_device(device); 842 struct ata_port *ap = ata_shost_to_port(sdev->host); 843 struct ata_device *dev; 844 bool ncq_prio_supported; 845 int rc = 0; 846 847 spin_lock_irq(ap->lock); 848 dev = ata_scsi_find_dev(ap, sdev); 849 if (!dev) 850 rc = -ENODEV; 851 else 852 ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO; 853 spin_unlock_irq(ap->lock); 854 855 return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported); 856 } 857 858 DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL); 859 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported); 860 861 static ssize_t ata_ncq_prio_enable_show(struct device *device, 862 struct device_attribute *attr, 863 char *buf) 864 { 865 struct scsi_device *sdev = to_scsi_device(device); 866 struct ata_port *ap = ata_shost_to_port(sdev->host); 867 struct ata_device *dev; 868 bool ncq_prio_enable; 869 int rc = 0; 870 871 spin_lock_irq(ap->lock); 872 dev = ata_scsi_find_dev(ap, sdev); 873 if (!dev) 874 rc = -ENODEV; 875 else 876 ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; 877 spin_unlock_irq(ap->lock); 878 879 return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable); 880 } 881 882 static ssize_t ata_ncq_prio_enable_store(struct device *device, 883 struct device_attribute *attr, 884 const char *buf, size_t len) 885 { 886 struct scsi_device *sdev = to_scsi_device(device); 887 struct ata_port *ap; 888 struct ata_device *dev; 889 long int input; 890 int rc = 0; 891 892 rc = kstrtol(buf, 10, &input); 893 if (rc) 894 return rc; 895 if ((input < 0) || (input > 1)) 896 return -EINVAL; 897 898 ap = ata_shost_to_port(sdev->host); 899 dev = ata_scsi_find_dev(ap, sdev); 900 if (unlikely(!dev)) 901 return -ENODEV; 902 903 spin_lock_irq(ap->lock); 904 905 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { 906 rc = -EINVAL; 907 goto unlock; 908 } 909 910 if (input) 911 dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED; 912 else 913 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED; 914 915 unlock: 916 spin_unlock_irq(ap->lock); 917 918 return rc ? rc : len; 919 } 920 921 DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, 922 ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); 923 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); 924 925 static struct attribute *ata_ncq_sdev_attrs[] = { 926 &dev_attr_unload_heads.attr, 927 &dev_attr_ncq_prio_enable.attr, 928 &dev_attr_ncq_prio_supported.attr, 929 NULL 930 }; 931 932 static const struct attribute_group ata_ncq_sdev_attr_group = { 933 .attrs = ata_ncq_sdev_attrs 934 }; 935 936 const struct attribute_group *ata_ncq_sdev_groups[] = { 937 &ata_ncq_sdev_attr_group, 938 NULL 939 }; 940 EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups); 941 942 static ssize_t 943 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, 944 const char *buf, size_t count) 945 { 946 struct Scsi_Host *shost = class_to_shost(dev); 947 struct ata_port *ap = ata_shost_to_port(shost); 948 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) 949 return ap->ops->em_store(ap, buf, count); 950 return -EINVAL; 951 } 952 953 static ssize_t 954 ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, 955 char *buf) 956 { 957 struct Scsi_Host *shost = class_to_shost(dev); 958 struct ata_port *ap = ata_shost_to_port(shost); 959 960 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) 961 return ap->ops->em_show(ap, buf); 962 return -EINVAL; 963 } 964 DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, 965 ata_scsi_em_message_show, ata_scsi_em_message_store); 966 EXPORT_SYMBOL_GPL(dev_attr_em_message); 967 968 static ssize_t 969 ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, 970 char *buf) 971 { 972 struct Scsi_Host *shost = class_to_shost(dev); 973 struct ata_port *ap = ata_shost_to_port(shost); 974 975 return sysfs_emit(buf, "%d\n", ap->em_message_type); 976 } 977 DEVICE_ATTR(em_message_type, S_IRUGO, 978 ata_scsi_em_message_type_show, NULL); 979 EXPORT_SYMBOL_GPL(dev_attr_em_message_type); 980 981 static ssize_t 982 ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, 983 char *buf) 984 { 985 struct scsi_device *sdev = to_scsi_device(dev); 986 struct ata_port *ap = ata_shost_to_port(sdev->host); 987 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); 988 989 if (atadev && ap->ops->sw_activity_show && 990 (ap->flags & ATA_FLAG_SW_ACTIVITY)) 991 return ap->ops->sw_activity_show(atadev, buf); 992 return -EINVAL; 993 } 994 995 static ssize_t 996 ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, 997 const char *buf, size_t count) 998 { 999 struct scsi_device *sdev = to_scsi_device(dev); 1000 struct ata_port *ap = ata_shost_to_port(sdev->host); 1001 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); 1002 enum sw_activity val; 1003 int rc; 1004 1005 if (atadev && ap->ops->sw_activity_store && 1006 (ap->flags & ATA_FLAG_SW_ACTIVITY)) { 1007 val = simple_strtoul(buf, NULL, 0); 1008 switch (val) { 1009 case OFF: case BLINK_ON: case BLINK_OFF: 1010 rc = ap->ops->sw_activity_store(atadev, val); 1011 if (!rc) 1012 return count; 1013 else 1014 return rc; 1015 } 1016 } 1017 return -EINVAL; 1018 } 1019 DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, 1020 ata_scsi_activity_store); 1021 EXPORT_SYMBOL_GPL(dev_attr_sw_activity); 1022 1023 /** 1024 * ata_change_queue_depth - Set a device maximum queue depth 1025 * @ap: ATA port of the target device 1026 * @dev: target ATA device 1027 * @sdev: SCSI device to configure queue depth for 1028 * @queue_depth: new queue depth 1029 * 1030 * Helper to set a device maximum queue depth, usable with both libsas 1031 * and libata. 1032 * 1033 */ 1034 int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev, 1035 struct scsi_device *sdev, int queue_depth) 1036 { 1037 unsigned long flags; 1038 1039 if (!dev || !ata_dev_enabled(dev)) 1040 return sdev->queue_depth; 1041 1042 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 1043 return sdev->queue_depth; 1044 1045 /* NCQ enabled? */ 1046 spin_lock_irqsave(ap->lock, flags); 1047 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 1048 if (queue_depth == 1 || !ata_ncq_enabled(dev)) { 1049 dev->flags |= ATA_DFLAG_NCQ_OFF; 1050 queue_depth = 1; 1051 } 1052 spin_unlock_irqrestore(ap->lock, flags); 1053 1054 /* limit and apply queue depth */ 1055 queue_depth = min(queue_depth, sdev->host->can_queue); 1056 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); 1057 queue_depth = min(queue_depth, ATA_MAX_QUEUE); 1058 1059 if (sdev->queue_depth == queue_depth) 1060 return -EINVAL; 1061 1062 return scsi_change_queue_depth(sdev, queue_depth); 1063 } 1064 EXPORT_SYMBOL_GPL(ata_change_queue_depth); 1065 1066 /** 1067 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 1068 * @sdev: SCSI device to configure queue depth for 1069 * @queue_depth: new queue depth 1070 * 1071 * This is libata standard hostt->change_queue_depth callback. 1072 * SCSI will call into this callback when user tries to set queue 1073 * depth via sysfs. 1074 * 1075 * LOCKING: 1076 * SCSI layer (we don't care) 1077 * 1078 * RETURNS: 1079 * Newly configured queue depth. 1080 */ 1081 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) 1082 { 1083 struct ata_port *ap = ata_shost_to_port(sdev->host); 1084 1085 return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev), 1086 sdev, queue_depth); 1087 } 1088 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 1089 1090 /** 1091 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 1092 * @host: ATA host container for all SAS ports 1093 * @port_info: Information from low-level host driver 1094 * @shost: SCSI host that the scsi device is attached to 1095 * 1096 * LOCKING: 1097 * PCI/etc. bus probe sem. 1098 * 1099 * RETURNS: 1100 * ata_port pointer on success / NULL on failure. 1101 */ 1102 1103 struct ata_port *ata_sas_port_alloc(struct ata_host *host, 1104 struct ata_port_info *port_info, 1105 struct Scsi_Host *shost) 1106 { 1107 struct ata_port *ap; 1108 1109 ap = ata_port_alloc(host); 1110 if (!ap) 1111 return NULL; 1112 1113 ap->port_no = 0; 1114 ap->lock = &host->lock; 1115 ap->pio_mask = port_info->pio_mask; 1116 ap->mwdma_mask = port_info->mwdma_mask; 1117 ap->udma_mask = port_info->udma_mask; 1118 ap->flags |= port_info->flags; 1119 ap->ops = port_info->port_ops; 1120 ap->cbl = ATA_CBL_SATA; 1121 1122 return ap; 1123 } 1124 EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 1125 1126 /** 1127 * ata_sas_port_start - Set port up for dma. 1128 * @ap: Port to initialize 1129 * 1130 * Called just after data structures for each port are 1131 * initialized. 1132 * 1133 * May be used as the port_start() entry in ata_port_operations. 1134 * 1135 * LOCKING: 1136 * Inherited from caller. 1137 */ 1138 int ata_sas_port_start(struct ata_port *ap) 1139 { 1140 /* 1141 * the port is marked as frozen at allocation time, but if we don't 1142 * have new eh, we won't thaw it 1143 */ 1144 if (!ap->ops->error_handler) 1145 ap->pflags &= ~ATA_PFLAG_FROZEN; 1146 return 0; 1147 } 1148 EXPORT_SYMBOL_GPL(ata_sas_port_start); 1149 1150 /** 1151 * ata_sas_port_stop - Undo ata_sas_port_start() 1152 * @ap: Port to shut down 1153 * 1154 * May be used as the port_stop() entry in ata_port_operations. 1155 * 1156 * LOCKING: 1157 * Inherited from caller. 1158 */ 1159 1160 void ata_sas_port_stop(struct ata_port *ap) 1161 { 1162 } 1163 EXPORT_SYMBOL_GPL(ata_sas_port_stop); 1164 1165 /** 1166 * ata_sas_async_probe - simply schedule probing and return 1167 * @ap: Port to probe 1168 * 1169 * For batch scheduling of probe for sas attached ata devices, assumes 1170 * the port has already been through ata_sas_port_init() 1171 */ 1172 void ata_sas_async_probe(struct ata_port *ap) 1173 { 1174 __ata_port_probe(ap); 1175 } 1176 EXPORT_SYMBOL_GPL(ata_sas_async_probe); 1177 1178 int ata_sas_sync_probe(struct ata_port *ap) 1179 { 1180 return ata_port_probe(ap); 1181 } 1182 EXPORT_SYMBOL_GPL(ata_sas_sync_probe); 1183 1184 1185 /** 1186 * ata_sas_port_init - Initialize a SATA device 1187 * @ap: SATA port to initialize 1188 * 1189 * LOCKING: 1190 * PCI/etc. bus probe sem. 1191 * 1192 * RETURNS: 1193 * Zero on success, non-zero on error. 1194 */ 1195 1196 int ata_sas_port_init(struct ata_port *ap) 1197 { 1198 int rc = ap->ops->port_start(ap); 1199 1200 if (rc) 1201 return rc; 1202 ap->print_id = atomic_inc_return(&ata_print_id); 1203 return 0; 1204 } 1205 EXPORT_SYMBOL_GPL(ata_sas_port_init); 1206 1207 int ata_sas_tport_add(struct device *parent, struct ata_port *ap) 1208 { 1209 return ata_tport_add(parent, ap); 1210 } 1211 EXPORT_SYMBOL_GPL(ata_sas_tport_add); 1212 1213 void ata_sas_tport_delete(struct ata_port *ap) 1214 { 1215 ata_tport_delete(ap); 1216 } 1217 EXPORT_SYMBOL_GPL(ata_sas_tport_delete); 1218 1219 /** 1220 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 1221 * @ap: SATA port to destroy 1222 * 1223 */ 1224 1225 void ata_sas_port_destroy(struct ata_port *ap) 1226 { 1227 if (ap->ops->port_stop) 1228 ap->ops->port_stop(ap); 1229 kfree(ap); 1230 } 1231 EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 1232 1233 /** 1234 * ata_sas_slave_configure - Default slave_config routine for libata devices 1235 * @sdev: SCSI device to configure 1236 * @ap: ATA port to which SCSI device is attached 1237 * 1238 * RETURNS: 1239 * Zero. 1240 */ 1241 1242 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 1243 { 1244 ata_scsi_sdev_config(sdev); 1245 ata_scsi_dev_config(sdev, ap->link.device); 1246 return 0; 1247 } 1248 EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 1249 1250 /** 1251 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 1252 * @cmd: SCSI command to be sent 1253 * @ap: ATA port to which the command is being sent 1254 * 1255 * RETURNS: 1256 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 1257 * 0 otherwise. 1258 */ 1259 1260 int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) 1261 { 1262 int rc = 0; 1263 1264 if (likely(ata_dev_enabled(ap->link.device))) 1265 rc = __ata_scsi_queuecmd(cmd, ap->link.device); 1266 else { 1267 cmd->result = (DID_BAD_TARGET << 16); 1268 scsi_done(cmd); 1269 } 1270 return rc; 1271 } 1272 EXPORT_SYMBOL_GPL(ata_sas_queuecmd); 1273 1274 /** 1275 * sata_async_notification - SATA async notification handler 1276 * @ap: ATA port where async notification is received 1277 * 1278 * Handler to be called when async notification via SDB FIS is 1279 * received. This function schedules EH if necessary. 1280 * 1281 * LOCKING: 1282 * spin_lock_irqsave(host lock) 1283 * 1284 * RETURNS: 1285 * 1 if EH is scheduled, 0 otherwise. 1286 */ 1287 int sata_async_notification(struct ata_port *ap) 1288 { 1289 u32 sntf; 1290 int rc; 1291 1292 if (!(ap->flags & ATA_FLAG_AN)) 1293 return 0; 1294 1295 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 1296 if (rc == 0) 1297 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 1298 1299 if (!sata_pmp_attached(ap) || rc) { 1300 /* PMP is not attached or SNTF is not available */ 1301 if (!sata_pmp_attached(ap)) { 1302 /* PMP is not attached. Check whether ATAPI 1303 * AN is configured. If so, notify media 1304 * change. 1305 */ 1306 struct ata_device *dev = ap->link.device; 1307 1308 if ((dev->class == ATA_DEV_ATAPI) && 1309 (dev->flags & ATA_DFLAG_AN)) 1310 ata_scsi_media_change_notify(dev); 1311 return 0; 1312 } else { 1313 /* PMP is attached but SNTF is not available. 1314 * ATAPI async media change notification is 1315 * not used. The PMP must be reporting PHY 1316 * status change, schedule EH. 1317 */ 1318 ata_port_schedule_eh(ap); 1319 return 1; 1320 } 1321 } else { 1322 /* PMP is attached and SNTF is available */ 1323 struct ata_link *link; 1324 1325 /* check and notify ATAPI AN */ 1326 ata_for_each_link(link, ap, EDGE) { 1327 if (!(sntf & (1 << link->pmp))) 1328 continue; 1329 1330 if ((link->device->class == ATA_DEV_ATAPI) && 1331 (link->device->flags & ATA_DFLAG_AN)) 1332 ata_scsi_media_change_notify(link->device); 1333 } 1334 1335 /* If PMP is reporting that PHY status of some 1336 * downstream ports has changed, schedule EH. 1337 */ 1338 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 1339 ata_port_schedule_eh(ap); 1340 return 1; 1341 } 1342 1343 return 0; 1344 } 1345 } 1346 EXPORT_SYMBOL_GPL(sata_async_notification); 1347 1348 /** 1349 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1350 * @dev: Device to read log page 10h from 1351 * @tag: Resulting tag of the failed command 1352 * @tf: Resulting taskfile registers of the failed command 1353 * 1354 * Read log page 10h to obtain NCQ error details and clear error 1355 * condition. 1356 * 1357 * LOCKING: 1358 * Kernel thread context (may sleep). 1359 * 1360 * RETURNS: 1361 * 0 on success, -errno otherwise. 1362 */ 1363 static int ata_eh_read_log_10h(struct ata_device *dev, 1364 int *tag, struct ata_taskfile *tf) 1365 { 1366 u8 *buf = dev->link->ap->sector_buf; 1367 unsigned int err_mask; 1368 u8 csum; 1369 int i; 1370 1371 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); 1372 if (err_mask) 1373 return -EIO; 1374 1375 csum = 0; 1376 for (i = 0; i < ATA_SECT_SIZE; i++) 1377 csum += buf[i]; 1378 if (csum) 1379 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1380 csum); 1381 1382 if (buf[0] & 0x80) 1383 return -ENOENT; 1384 1385 *tag = buf[0] & 0x1f; 1386 1387 tf->status = buf[2]; 1388 tf->error = buf[3]; 1389 tf->lbal = buf[4]; 1390 tf->lbam = buf[5]; 1391 tf->lbah = buf[6]; 1392 tf->device = buf[7]; 1393 tf->hob_lbal = buf[8]; 1394 tf->hob_lbam = buf[9]; 1395 tf->hob_lbah = buf[10]; 1396 tf->nsect = buf[12]; 1397 tf->hob_nsect = buf[13]; 1398 if (ata_id_has_ncq_autosense(dev->id) && (tf->status & ATA_SENSE)) 1399 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; 1400 1401 return 0; 1402 } 1403 1404 /** 1405 * ata_eh_analyze_ncq_error - analyze NCQ error 1406 * @link: ATA link to analyze NCQ error for 1407 * 1408 * Read log page 10h, determine the offending qc and acquire 1409 * error status TF. For NCQ device errors, all LLDDs have to do 1410 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1411 * care of the rest. 1412 * 1413 * LOCKING: 1414 * Kernel thread context (may sleep). 1415 */ 1416 void ata_eh_analyze_ncq_error(struct ata_link *link) 1417 { 1418 struct ata_port *ap = link->ap; 1419 struct ata_eh_context *ehc = &link->eh_context; 1420 struct ata_device *dev = link->device; 1421 struct ata_queued_cmd *qc; 1422 struct ata_taskfile tf; 1423 int tag, rc; 1424 1425 /* if frozen, we can't do much */ 1426 if (ata_port_is_frozen(ap)) 1427 return; 1428 1429 /* is it NCQ device error? */ 1430 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1431 return; 1432 1433 /* has LLDD analyzed already? */ 1434 ata_qc_for_each_raw(ap, qc, tag) { 1435 if (!(qc->flags & ATA_QCFLAG_EH)) 1436 continue; 1437 1438 if (qc->err_mask) 1439 return; 1440 } 1441 1442 /* okay, this error is ours */ 1443 memset(&tf, 0, sizeof(tf)); 1444 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1445 if (rc) { 1446 ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1447 rc); 1448 return; 1449 } 1450 1451 if (!(link->sactive & (1 << tag))) { 1452 ata_link_err(link, "log page 10h reported inactive tag %d\n", 1453 tag); 1454 return; 1455 } 1456 1457 /* we've got the perpetrator, condemn it */ 1458 qc = __ata_qc_from_tag(ap, tag); 1459 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1460 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1461 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1462 1463 /* 1464 * If the device supports NCQ autosense, ata_eh_read_log_10h() will have 1465 * stored the sense data in qc->result_tf.auxiliary. 1466 */ 1467 if (qc->result_tf.auxiliary) { 1468 char sense_key, asc, ascq; 1469 1470 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; 1471 asc = (qc->result_tf.auxiliary >> 8) & 0xff; 1472 ascq = qc->result_tf.auxiliary & 0xff; 1473 if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) { 1474 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, 1475 ascq); 1476 ata_scsi_set_sense_information(dev, qc->scsicmd, 1477 &qc->result_tf); 1478 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1479 } 1480 } 1481 1482 ata_qc_for_each_raw(ap, qc, tag) { 1483 if (!(qc->flags & ATA_QCFLAG_EH) || 1484 ata_dev_phys_link(qc->dev) != link) 1485 continue; 1486 1487 /* Skip the single QC which caused the NCQ error. */ 1488 if (qc->err_mask) 1489 continue; 1490 1491 /* 1492 * For SATA, the STATUS and ERROR fields are shared for all NCQ 1493 * commands that were completed with the same SDB FIS. 1494 * Therefore, we have to clear the ATA_ERR bit for all QCs 1495 * except the one that caused the NCQ error. 1496 */ 1497 qc->result_tf.status &= ~ATA_ERR; 1498 qc->result_tf.error = 0; 1499 1500 /* 1501 * If we get a NCQ error, that means that a single command was 1502 * aborted. All other failed commands for our link should be 1503 * retried and has no business of going though further scrutiny 1504 * by ata_eh_link_autopsy(). 1505 */ 1506 qc->flags |= ATA_QCFLAG_RETRY; 1507 } 1508 1509 ehc->i.err_mask &= ~AC_ERR_DEV; 1510 } 1511 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 1512