1 /* 2 * Serial Attached SCSI (SAS) class SCSI Host glue. 3 * 4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 6 * 7 * This file is licensed under GPLv2. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License as 11 * published by the Free Software Foundation; either version 2 of the 12 * License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 * USA 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <linux/firmware.h> 28 #include <linux/export.h> 29 #include <linux/ctype.h> 30 #include <linux/kernel.h> 31 32 #include "sas_internal.h" 33 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_eh.h> 39 #include <scsi/scsi_transport.h> 40 #include <scsi/scsi_transport_sas.h> 41 #include <scsi/sas_ata.h> 42 #include "../scsi_sas_internal.h" 43 #include "../scsi_transport_api.h" 44 #include "../scsi_priv.h" 45 46 #include <linux/err.h> 47 #include <linux/blkdev.h> 48 #include <linux/freezer.h> 49 #include <linux/gfp.h> 50 #include <linux/scatterlist.h> 51 #include <linux/libata.h> 52 53 /* record final status and free the task */ 54 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) 55 { 56 struct task_status_struct *ts = &task->task_status; 57 int hs = 0, stat = 0; 58 59 if (ts->resp == SAS_TASK_UNDELIVERED) { 60 /* transport error */ 61 hs = DID_NO_CONNECT; 62 } else { /* ts->resp == SAS_TASK_COMPLETE */ 63 /* task delivered, what happened afterwards? */ 64 switch (ts->stat) { 65 case SAS_DEV_NO_RESPONSE: 66 case SAS_INTERRUPTED: 67 case SAS_PHY_DOWN: 68 case SAS_NAK_R_ERR: 69 case SAS_OPEN_TO: 70 hs = DID_NO_CONNECT; 71 break; 72 case SAS_DATA_UNDERRUN: 73 scsi_set_resid(sc, ts->residual); 74 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow) 75 hs = DID_ERROR; 76 break; 77 case SAS_DATA_OVERRUN: 78 hs = DID_ERROR; 79 break; 80 case SAS_QUEUE_FULL: 81 hs = DID_SOFT_ERROR; /* retry */ 82 break; 83 case SAS_DEVICE_UNKNOWN: 84 hs = DID_BAD_TARGET; 85 break; 86 case SAS_SG_ERR: 87 hs = DID_PARITY; 88 break; 89 case SAS_OPEN_REJECT: 90 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) 91 hs = DID_SOFT_ERROR; /* retry */ 92 else 93 hs = DID_ERROR; 94 break; 95 case SAS_PROTO_RESPONSE: 96 pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n", 97 task->dev->port->ha->sas_ha_name); 98 break; 99 case SAS_ABORTED_TASK: 100 hs = DID_ABORT; 101 break; 102 case SAM_STAT_CHECK_CONDITION: 103 memcpy(sc->sense_buffer, ts->buf, 104 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); 105 stat = SAM_STAT_CHECK_CONDITION; 106 break; 107 default: 108 stat = ts->stat; 109 break; 110 } 111 } 112 113 sc->result = (hs << 16) | stat; 114 ASSIGN_SAS_TASK(sc, NULL); 115 sas_free_task(task); 116 } 117 118 static void sas_scsi_task_done(struct sas_task *task) 119 { 120 struct scsi_cmnd *sc = task->uldd_task; 121 struct domain_device *dev = task->dev; 122 struct sas_ha_struct *ha = dev->port->ha; 123 unsigned long flags; 124 125 spin_lock_irqsave(&dev->done_lock, flags); 126 if (test_bit(SAS_HA_FROZEN, &ha->state)) 127 task = NULL; 128 else 129 ASSIGN_SAS_TASK(sc, NULL); 130 spin_unlock_irqrestore(&dev->done_lock, flags); 131 132 if (unlikely(!task)) { 133 /* task will be completed by the error handler */ 134 pr_debug("task done but aborted\n"); 135 return; 136 } 137 138 if (unlikely(!sc)) { 139 pr_debug("task_done called with non existing SCSI cmnd!\n"); 140 sas_free_task(task); 141 return; 142 } 143 144 sas_end_task(sc, task); 145 sc->scsi_done(sc); 146 } 147 148 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, 149 struct domain_device *dev, 150 gfp_t gfp_flags) 151 { 152 struct sas_task *task = sas_alloc_task(gfp_flags); 153 struct scsi_lun lun; 154 155 if (!task) 156 return NULL; 157 158 task->uldd_task = cmd; 159 ASSIGN_SAS_TASK(cmd, task); 160 161 task->dev = dev; 162 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ 163 164 task->ssp_task.retry_count = 1; 165 int_to_scsilun(cmd->device->lun, &lun); 166 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); 167 task->ssp_task.task_attr = TASK_ATTR_SIMPLE; 168 task->ssp_task.cmd = cmd; 169 170 task->scatter = scsi_sglist(cmd); 171 task->num_scatter = scsi_sg_count(cmd); 172 task->total_xfer_len = scsi_bufflen(cmd); 173 task->data_dir = cmd->sc_data_direction; 174 175 task->task_done = sas_scsi_task_done; 176 177 return task; 178 } 179 180 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 181 { 182 struct sas_internal *i = to_sas_internal(host->transportt); 183 struct domain_device *dev = cmd_to_domain_dev(cmd); 184 struct sas_task *task; 185 int res = 0; 186 187 /* If the device fell off, no sense in issuing commands */ 188 if (test_bit(SAS_DEV_GONE, &dev->state)) { 189 cmd->result = DID_BAD_TARGET << 16; 190 goto out_done; 191 } 192 193 if (dev_is_sata(dev)) { 194 spin_lock_irq(dev->sata_dev.ap->lock); 195 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); 196 spin_unlock_irq(dev->sata_dev.ap->lock); 197 return res; 198 } 199 200 task = sas_create_task(cmd, dev, GFP_ATOMIC); 201 if (!task) 202 return SCSI_MLQUEUE_HOST_BUSY; 203 204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC); 205 if (res) 206 goto out_free_task; 207 return 0; 208 209 out_free_task: 210 pr_debug("lldd_execute_task returned: %d\n", res); 211 ASSIGN_SAS_TASK(cmd, NULL); 212 sas_free_task(task); 213 if (res == -SAS_QUEUE_FULL) 214 cmd->result = DID_SOFT_ERROR << 16; /* retry */ 215 else 216 cmd->result = DID_ERROR << 16; 217 out_done: 218 cmd->scsi_done(cmd); 219 return 0; 220 } 221 222 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 223 { 224 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 225 struct domain_device *dev = cmd_to_domain_dev(cmd); 226 struct sas_task *task = TO_SAS_TASK(cmd); 227 228 /* At this point, we only get called following an actual abort 229 * of the task, so we should be guaranteed not to be racing with 230 * any completions from the LLD. Task is freed after this. 231 */ 232 sas_end_task(cmd, task); 233 234 if (dev_is_sata(dev)) { 235 /* defer commands to libata so that libata EH can 236 * handle ata qcs correctly 237 */ 238 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); 239 return; 240 } 241 242 /* now finish the command and move it on to the error 243 * handler done list, this also takes it off the 244 * error handler pending list. 245 */ 246 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 247 } 248 249 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 250 { 251 struct scsi_cmnd *cmd, *n; 252 253 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 254 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 255 cmd->device->lun == my_cmd->device->lun) 256 sas_eh_finish_cmd(cmd); 257 } 258 } 259 260 static void sas_scsi_clear_queue_I_T(struct list_head *error_q, 261 struct domain_device *dev) 262 { 263 struct scsi_cmnd *cmd, *n; 264 265 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 266 struct domain_device *x = cmd_to_domain_dev(cmd); 267 268 if (x == dev) 269 sas_eh_finish_cmd(cmd); 270 } 271 } 272 273 static void sas_scsi_clear_queue_port(struct list_head *error_q, 274 struct asd_sas_port *port) 275 { 276 struct scsi_cmnd *cmd, *n; 277 278 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 279 struct domain_device *dev = cmd_to_domain_dev(cmd); 280 struct asd_sas_port *x = dev->port; 281 282 if (x == port) 283 sas_eh_finish_cmd(cmd); 284 } 285 } 286 287 enum task_disposition { 288 TASK_IS_DONE, 289 TASK_IS_ABORTED, 290 TASK_IS_AT_LU, 291 TASK_IS_NOT_AT_LU, 292 TASK_ABORT_FAILED, 293 }; 294 295 static enum task_disposition sas_scsi_find_task(struct sas_task *task) 296 { 297 unsigned long flags; 298 int i, res; 299 struct sas_internal *si = 300 to_sas_internal(task->dev->port->ha->core.shost->transportt); 301 302 for (i = 0; i < 5; i++) { 303 pr_notice("%s: aborting task 0x%p\n", __func__, task); 304 res = si->dft->lldd_abort_task(task); 305 306 spin_lock_irqsave(&task->task_state_lock, flags); 307 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 308 spin_unlock_irqrestore(&task->task_state_lock, flags); 309 pr_debug("%s: task 0x%p is done\n", __func__, task); 310 return TASK_IS_DONE; 311 } 312 spin_unlock_irqrestore(&task->task_state_lock, flags); 313 314 if (res == TMF_RESP_FUNC_COMPLETE) { 315 pr_notice("%s: task 0x%p is aborted\n", 316 __func__, task); 317 return TASK_IS_ABORTED; 318 } else if (si->dft->lldd_query_task) { 319 pr_notice("%s: querying task 0x%p\n", __func__, task); 320 res = si->dft->lldd_query_task(task); 321 switch (res) { 322 case TMF_RESP_FUNC_SUCC: 323 pr_notice("%s: task 0x%p at LU\n", __func__, 324 task); 325 return TASK_IS_AT_LU; 326 case TMF_RESP_FUNC_COMPLETE: 327 pr_notice("%s: task 0x%p not at LU\n", 328 __func__, task); 329 return TASK_IS_NOT_AT_LU; 330 case TMF_RESP_FUNC_FAILED: 331 pr_notice("%s: task 0x%p failed to abort\n", 332 __func__, task); 333 return TASK_ABORT_FAILED; 334 } 335 336 } 337 } 338 return res; 339 } 340 341 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) 342 { 343 int res = TMF_RESP_FUNC_FAILED; 344 struct scsi_lun lun; 345 struct sas_internal *i = 346 to_sas_internal(dev->port->ha->core.shost->transportt); 347 348 int_to_scsilun(cmd->device->lun, &lun); 349 350 pr_notice("eh: device %llx LUN %llx has the task\n", 351 SAS_ADDR(dev->sas_addr), 352 cmd->device->lun); 353 354 if (i->dft->lldd_abort_task_set) 355 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); 356 357 if (res == TMF_RESP_FUNC_FAILED) { 358 if (i->dft->lldd_clear_task_set) 359 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); 360 } 361 362 if (res == TMF_RESP_FUNC_FAILED) { 363 if (i->dft->lldd_lu_reset) 364 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 365 } 366 367 return res; 368 } 369 370 static int sas_recover_I_T(struct domain_device *dev) 371 { 372 int res = TMF_RESP_FUNC_FAILED; 373 struct sas_internal *i = 374 to_sas_internal(dev->port->ha->core.shost->transportt); 375 376 pr_notice("I_T nexus reset for dev %016llx\n", 377 SAS_ADDR(dev->sas_addr)); 378 379 if (i->dft->lldd_I_T_nexus_reset) 380 res = i->dft->lldd_I_T_nexus_reset(dev); 381 382 return res; 383 } 384 385 /* take a reference on the last known good phy for this device */ 386 struct sas_phy *sas_get_local_phy(struct domain_device *dev) 387 { 388 struct sas_ha_struct *ha = dev->port->ha; 389 struct sas_phy *phy; 390 unsigned long flags; 391 392 /* a published domain device always has a valid phy, it may be 393 * stale, but it is never NULL 394 */ 395 BUG_ON(!dev->phy); 396 397 spin_lock_irqsave(&ha->phy_port_lock, flags); 398 phy = dev->phy; 399 get_device(&phy->dev); 400 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 401 402 return phy; 403 } 404 EXPORT_SYMBOL_GPL(sas_get_local_phy); 405 406 static void sas_wait_eh(struct domain_device *dev) 407 { 408 struct sas_ha_struct *ha = dev->port->ha; 409 DEFINE_WAIT(wait); 410 411 if (dev_is_sata(dev)) { 412 ata_port_wait_eh(dev->sata_dev.ap); 413 return; 414 } 415 retry: 416 spin_lock_irq(&ha->lock); 417 418 while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) { 419 prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 420 spin_unlock_irq(&ha->lock); 421 schedule(); 422 spin_lock_irq(&ha->lock); 423 } 424 finish_wait(&ha->eh_wait_q, &wait); 425 426 spin_unlock_irq(&ha->lock); 427 428 /* make sure SCSI EH is complete */ 429 if (scsi_host_in_recovery(ha->core.shost)) { 430 msleep(10); 431 goto retry; 432 } 433 } 434 EXPORT_SYMBOL(sas_wait_eh); 435 436 static int sas_queue_reset(struct domain_device *dev, int reset_type, 437 u64 lun, int wait) 438 { 439 struct sas_ha_struct *ha = dev->port->ha; 440 int scheduled = 0, tries = 100; 441 442 /* ata: promote lun reset to bus reset */ 443 if (dev_is_sata(dev)) { 444 sas_ata_schedule_reset(dev); 445 if (wait) 446 sas_ata_wait_eh(dev); 447 return SUCCESS; 448 } 449 450 while (!scheduled && tries--) { 451 spin_lock_irq(&ha->lock); 452 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) && 453 !test_bit(reset_type, &dev->state)) { 454 scheduled = 1; 455 ha->eh_active++; 456 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q); 457 set_bit(SAS_DEV_EH_PENDING, &dev->state); 458 set_bit(reset_type, &dev->state); 459 int_to_scsilun(lun, &dev->ssp_dev.reset_lun); 460 scsi_schedule_eh(ha->core.shost); 461 } 462 spin_unlock_irq(&ha->lock); 463 464 if (wait) 465 sas_wait_eh(dev); 466 467 if (scheduled) 468 return SUCCESS; 469 } 470 471 pr_warn("%s reset of %s failed\n", 472 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus", 473 dev_name(&dev->rphy->dev)); 474 475 return FAILED; 476 } 477 478 int sas_eh_abort_handler(struct scsi_cmnd *cmd) 479 { 480 int res = TMF_RESP_FUNC_FAILED; 481 struct sas_task *task = TO_SAS_TASK(cmd); 482 struct Scsi_Host *host = cmd->device->host; 483 struct domain_device *dev = cmd_to_domain_dev(cmd); 484 struct sas_internal *i = to_sas_internal(host->transportt); 485 unsigned long flags; 486 487 if (!i->dft->lldd_abort_task) 488 return FAILED; 489 490 spin_lock_irqsave(host->host_lock, flags); 491 /* We cannot do async aborts for SATA devices */ 492 if (dev_is_sata(dev) && !host->host_eh_scheduled) { 493 spin_unlock_irqrestore(host->host_lock, flags); 494 return FAILED; 495 } 496 spin_unlock_irqrestore(host->host_lock, flags); 497 498 if (task) 499 res = i->dft->lldd_abort_task(task); 500 else 501 pr_notice("no task to abort\n"); 502 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 503 return SUCCESS; 504 505 return FAILED; 506 } 507 EXPORT_SYMBOL_GPL(sas_eh_abort_handler); 508 509 /* Attempt to send a LUN reset message to a device */ 510 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 511 { 512 int res; 513 struct scsi_lun lun; 514 struct Scsi_Host *host = cmd->device->host; 515 struct domain_device *dev = cmd_to_domain_dev(cmd); 516 struct sas_internal *i = to_sas_internal(host->transportt); 517 518 if (current != host->ehandler) 519 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0); 520 521 int_to_scsilun(cmd->device->lun, &lun); 522 523 if (!i->dft->lldd_lu_reset) 524 return FAILED; 525 526 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 527 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 528 return SUCCESS; 529 530 return FAILED; 531 } 532 533 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd) 534 { 535 int res; 536 struct Scsi_Host *host = cmd->device->host; 537 struct domain_device *dev = cmd_to_domain_dev(cmd); 538 struct sas_internal *i = to_sas_internal(host->transportt); 539 540 if (current != host->ehandler) 541 return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0); 542 543 if (!i->dft->lldd_I_T_nexus_reset) 544 return FAILED; 545 546 res = i->dft->lldd_I_T_nexus_reset(dev); 547 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE || 548 res == -ENODEV) 549 return SUCCESS; 550 551 return FAILED; 552 } 553 554 /* Try to reset a device */ 555 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) 556 { 557 int res; 558 struct Scsi_Host *shost = cmd->device->host; 559 560 if (!shost->hostt->eh_device_reset_handler) 561 goto try_target_reset; 562 563 res = shost->hostt->eh_device_reset_handler(cmd); 564 if (res == SUCCESS) 565 return res; 566 567 try_target_reset: 568 if (shost->hostt->eh_target_reset_handler) 569 return shost->hostt->eh_target_reset_handler(cmd); 570 571 return FAILED; 572 } 573 574 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q) 575 { 576 struct scsi_cmnd *cmd, *n; 577 enum task_disposition res = TASK_IS_DONE; 578 int tmf_resp, need_reset; 579 struct sas_internal *i = to_sas_internal(shost->transportt); 580 unsigned long flags; 581 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 582 LIST_HEAD(done); 583 584 /* clean out any commands that won the completion vs eh race */ 585 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 586 struct domain_device *dev = cmd_to_domain_dev(cmd); 587 struct sas_task *task; 588 589 spin_lock_irqsave(&dev->done_lock, flags); 590 /* by this point the lldd has either observed 591 * SAS_HA_FROZEN and is leaving the task alone, or has 592 * won the race with eh and decided to complete it 593 */ 594 task = TO_SAS_TASK(cmd); 595 spin_unlock_irqrestore(&dev->done_lock, flags); 596 597 if (!task) 598 list_move_tail(&cmd->eh_entry, &done); 599 } 600 601 Again: 602 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 603 struct sas_task *task = TO_SAS_TASK(cmd); 604 605 list_del_init(&cmd->eh_entry); 606 607 spin_lock_irqsave(&task->task_state_lock, flags); 608 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; 609 spin_unlock_irqrestore(&task->task_state_lock, flags); 610 611 if (need_reset) { 612 pr_notice("%s: task 0x%p requests reset\n", 613 __func__, task); 614 goto reset; 615 } 616 617 pr_debug("trying to find task 0x%p\n", task); 618 res = sas_scsi_find_task(task); 619 620 switch (res) { 621 case TASK_IS_DONE: 622 pr_notice("%s: task 0x%p is done\n", __func__, 623 task); 624 sas_eh_finish_cmd(cmd); 625 continue; 626 case TASK_IS_ABORTED: 627 pr_notice("%s: task 0x%p is aborted\n", 628 __func__, task); 629 sas_eh_finish_cmd(cmd); 630 continue; 631 case TASK_IS_AT_LU: 632 pr_info("task 0x%p is at LU: lu recover\n", task); 633 reset: 634 tmf_resp = sas_recover_lu(task->dev, cmd); 635 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 636 pr_notice("dev %016llx LU %llx is recovered\n", 637 SAS_ADDR(task->dev), 638 cmd->device->lun); 639 sas_eh_finish_cmd(cmd); 640 sas_scsi_clear_queue_lu(work_q, cmd); 641 goto Again; 642 } 643 /* fallthrough */ 644 case TASK_IS_NOT_AT_LU: 645 case TASK_ABORT_FAILED: 646 pr_notice("task 0x%p is not at LU: I_T recover\n", 647 task); 648 tmf_resp = sas_recover_I_T(task->dev); 649 if (tmf_resp == TMF_RESP_FUNC_COMPLETE || 650 tmf_resp == -ENODEV) { 651 struct domain_device *dev = task->dev; 652 pr_notice("I_T %016llx recovered\n", 653 SAS_ADDR(task->dev->sas_addr)); 654 sas_eh_finish_cmd(cmd); 655 sas_scsi_clear_queue_I_T(work_q, dev); 656 goto Again; 657 } 658 /* Hammer time :-) */ 659 try_to_reset_cmd_device(cmd); 660 if (i->dft->lldd_clear_nexus_port) { 661 struct asd_sas_port *port = task->dev->port; 662 pr_debug("clearing nexus for port:%d\n", 663 port->id); 664 res = i->dft->lldd_clear_nexus_port(port); 665 if (res == TMF_RESP_FUNC_COMPLETE) { 666 pr_notice("clear nexus port:%d succeeded\n", 667 port->id); 668 sas_eh_finish_cmd(cmd); 669 sas_scsi_clear_queue_port(work_q, 670 port); 671 goto Again; 672 } 673 } 674 if (i->dft->lldd_clear_nexus_ha) { 675 pr_debug("clear nexus ha\n"); 676 res = i->dft->lldd_clear_nexus_ha(ha); 677 if (res == TMF_RESP_FUNC_COMPLETE) { 678 pr_notice("clear nexus ha succeeded\n"); 679 sas_eh_finish_cmd(cmd); 680 goto clear_q; 681 } 682 } 683 /* If we are here -- this means that no amount 684 * of effort could recover from errors. Quite 685 * possibly the HA just disappeared. 686 */ 687 pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n", 688 SAS_ADDR(task->dev->sas_addr), 689 cmd->device->lun); 690 691 sas_eh_finish_cmd(cmd); 692 goto clear_q; 693 } 694 } 695 out: 696 list_splice_tail(&done, work_q); 697 list_splice_tail_init(&ha->eh_ata_q, work_q); 698 return; 699 700 clear_q: 701 pr_debug("--- Exit %s -- clear_q\n", __func__); 702 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 703 sas_eh_finish_cmd(cmd); 704 goto out; 705 } 706 707 static void sas_eh_handle_resets(struct Scsi_Host *shost) 708 { 709 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 710 struct sas_internal *i = to_sas_internal(shost->transportt); 711 712 /* handle directed resets to sas devices */ 713 spin_lock_irq(&ha->lock); 714 while (!list_empty(&ha->eh_dev_q)) { 715 struct domain_device *dev; 716 struct ssp_device *ssp; 717 718 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node); 719 list_del_init(&ssp->eh_list_node); 720 dev = container_of(ssp, typeof(*dev), ssp_dev); 721 kref_get(&dev->kref); 722 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n"); 723 724 spin_unlock_irq(&ha->lock); 725 726 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state)) 727 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun); 728 729 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state)) 730 i->dft->lldd_I_T_nexus_reset(dev); 731 732 sas_put_device(dev); 733 spin_lock_irq(&ha->lock); 734 clear_bit(SAS_DEV_EH_PENDING, &dev->state); 735 ha->eh_active--; 736 } 737 spin_unlock_irq(&ha->lock); 738 } 739 740 741 void sas_scsi_recover_host(struct Scsi_Host *shost) 742 { 743 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 744 LIST_HEAD(eh_work_q); 745 int tries = 0; 746 bool retry; 747 748 retry: 749 tries++; 750 retry = true; 751 spin_lock_irq(shost->host_lock); 752 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 753 spin_unlock_irq(shost->host_lock); 754 755 pr_notice("Enter %s busy: %d failed: %d\n", 756 __func__, scsi_host_busy(shost), shost->host_failed); 757 /* 758 * Deal with commands that still have SAS tasks (i.e. they didn't 759 * complete via the normal sas_task completion mechanism), 760 * SAS_HA_FROZEN gives eh dominion over all sas_task completion. 761 */ 762 set_bit(SAS_HA_FROZEN, &ha->state); 763 sas_eh_handle_sas_errors(shost, &eh_work_q); 764 clear_bit(SAS_HA_FROZEN, &ha->state); 765 if (list_empty(&eh_work_q)) 766 goto out; 767 768 /* 769 * Now deal with SCSI commands that completed ok but have a an error 770 * code (and hopefully sense data) attached. This is roughly what 771 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any 772 * command we see here has no sas_task and is thus unknown to the HA. 773 */ 774 sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q); 775 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) 776 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 777 778 out: 779 sas_eh_handle_resets(shost); 780 781 /* now link into libata eh --- if we have any ata devices */ 782 sas_ata_strategy_handler(shost); 783 784 scsi_eh_flush_done_q(&ha->eh_done_q); 785 786 /* check if any new eh work was scheduled during the last run */ 787 spin_lock_irq(&ha->lock); 788 if (ha->eh_active == 0) { 789 shost->host_eh_scheduled = 0; 790 retry = false; 791 } 792 spin_unlock_irq(&ha->lock); 793 794 if (retry) 795 goto retry; 796 797 pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n", 798 __func__, scsi_host_busy(shost), 799 shost->host_failed, tries); 800 } 801 802 int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) 803 { 804 struct domain_device *dev = sdev_to_domain_dev(sdev); 805 806 if (dev_is_sata(dev)) 807 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg); 808 809 return -EINVAL; 810 } 811 812 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) 813 { 814 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); 815 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 816 struct domain_device *found_dev = NULL; 817 int i; 818 unsigned long flags; 819 820 spin_lock_irqsave(&ha->phy_port_lock, flags); 821 for (i = 0; i < ha->num_phys; i++) { 822 struct asd_sas_port *port = ha->sas_port[i]; 823 struct domain_device *dev; 824 825 spin_lock(&port->dev_list_lock); 826 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 827 if (rphy == dev->rphy) { 828 found_dev = dev; 829 spin_unlock(&port->dev_list_lock); 830 goto found; 831 } 832 } 833 spin_unlock(&port->dev_list_lock); 834 } 835 found: 836 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 837 838 return found_dev; 839 } 840 841 int sas_target_alloc(struct scsi_target *starget) 842 { 843 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); 844 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy); 845 846 if (!found_dev) 847 return -ENODEV; 848 849 kref_get(&found_dev->kref); 850 starget->hostdata = found_dev; 851 return 0; 852 } 853 854 #define SAS_DEF_QD 256 855 856 int sas_slave_configure(struct scsi_device *scsi_dev) 857 { 858 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 859 860 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); 861 862 if (dev_is_sata(dev)) { 863 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); 864 return 0; 865 } 866 867 sas_read_port_mode_page(scsi_dev); 868 869 if (scsi_dev->tagged_supported) { 870 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); 871 } else { 872 pr_notice("device %llx, LUN %llx doesn't support TCQ\n", 873 SAS_ADDR(dev->sas_addr), scsi_dev->lun); 874 scsi_change_queue_depth(scsi_dev, 1); 875 } 876 877 scsi_dev->allow_restart = 1; 878 879 return 0; 880 } 881 882 int sas_change_queue_depth(struct scsi_device *sdev, int depth) 883 { 884 struct domain_device *dev = sdev_to_domain_dev(sdev); 885 886 if (dev_is_sata(dev)) 887 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth); 888 889 if (!sdev->tagged_supported) 890 depth = 1; 891 return scsi_change_queue_depth(sdev, depth); 892 } 893 894 int sas_bios_param(struct scsi_device *scsi_dev, 895 struct block_device *bdev, 896 sector_t capacity, int *hsc) 897 { 898 hsc[0] = 255; 899 hsc[1] = 63; 900 sector_div(capacity, 255*63); 901 hsc[2] = capacity; 902 903 return 0; 904 } 905 906 /* 907 * Tell an upper layer that it needs to initiate an abort for a given task. 908 * This should only ever be called by an LLDD. 909 */ 910 void sas_task_abort(struct sas_task *task) 911 { 912 struct scsi_cmnd *sc = task->uldd_task; 913 914 /* Escape for libsas internal commands */ 915 if (!sc) { 916 struct sas_task_slow *slow = task->slow_task; 917 918 if (!slow) 919 return; 920 if (!del_timer(&slow->timer)) 921 return; 922 slow->timer.function(&slow->timer); 923 return; 924 } 925 926 if (dev_is_sata(task->dev)) 927 sas_ata_task_abort(task); 928 else 929 blk_abort_request(sc->request); 930 } 931 932 void sas_target_destroy(struct scsi_target *starget) 933 { 934 struct domain_device *found_dev = starget->hostdata; 935 936 if (!found_dev) 937 return; 938 939 starget->hostdata = NULL; 940 sas_put_device(found_dev); 941 } 942 943 #define SAS_STRING_ADDR_SIZE 16 944 945 int sas_request_addr(struct Scsi_Host *shost, u8 *addr) 946 { 947 int res; 948 const struct firmware *fw; 949 950 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); 951 if (res) 952 return res; 953 954 if (fw->size < SAS_STRING_ADDR_SIZE) { 955 res = -ENODEV; 956 goto out; 957 } 958 959 res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2); 960 if (res) 961 goto out; 962 963 out: 964 release_firmware(fw); 965 return res; 966 } 967 EXPORT_SYMBOL_GPL(sas_request_addr); 968 969 EXPORT_SYMBOL_GPL(sas_queuecommand); 970 EXPORT_SYMBOL_GPL(sas_target_alloc); 971 EXPORT_SYMBOL_GPL(sas_slave_configure); 972 EXPORT_SYMBOL_GPL(sas_change_queue_depth); 973 EXPORT_SYMBOL_GPL(sas_bios_param); 974 EXPORT_SYMBOL_GPL(sas_task_abort); 975 EXPORT_SYMBOL_GPL(sas_phy_reset); 976 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 977 EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); 978 EXPORT_SYMBOL_GPL(sas_target_destroy); 979 EXPORT_SYMBOL_GPL(sas_ioctl); 980