1 /* 2 * Serial Attached SCSI (SAS) class SCSI Host glue. 3 * 4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 6 * 7 * This file is licensed under GPLv2. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License as 11 * published by the Free Software Foundation; either version 2 of the 12 * License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 * USA 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <linux/firmware.h> 28 #include <linux/export.h> 29 #include <linux/ctype.h> 30 31 #include "sas_internal.h" 32 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_eh.h> 38 #include <scsi/scsi_transport.h> 39 #include <scsi/scsi_transport_sas.h> 40 #include <scsi/sas_ata.h> 41 #include "../scsi_sas_internal.h" 42 #include "../scsi_transport_api.h" 43 #include "../scsi_priv.h" 44 45 #include <linux/err.h> 46 #include <linux/blkdev.h> 47 #include <linux/freezer.h> 48 #include <linux/gfp.h> 49 #include <linux/scatterlist.h> 50 #include <linux/libata.h> 51 52 /* record final status and free the task */ 53 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) 54 { 55 struct task_status_struct *ts = &task->task_status; 56 int hs = 0, stat = 0; 57 58 if (ts->resp == SAS_TASK_UNDELIVERED) { 59 /* transport error */ 60 hs = DID_NO_CONNECT; 61 } else { /* ts->resp == SAS_TASK_COMPLETE */ 62 /* task delivered, what happened afterwards? */ 63 switch (ts->stat) { 64 case SAS_DEV_NO_RESPONSE: 65 case SAS_INTERRUPTED: 66 case SAS_PHY_DOWN: 67 case SAS_NAK_R_ERR: 68 case SAS_OPEN_TO: 69 hs = DID_NO_CONNECT; 70 break; 71 case SAS_DATA_UNDERRUN: 72 scsi_set_resid(sc, ts->residual); 73 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow) 74 hs = DID_ERROR; 75 break; 76 case SAS_DATA_OVERRUN: 77 hs = DID_ERROR; 78 break; 79 case SAS_QUEUE_FULL: 80 hs = DID_SOFT_ERROR; /* retry */ 81 break; 82 case SAS_DEVICE_UNKNOWN: 83 hs = DID_BAD_TARGET; 84 break; 85 case SAS_SG_ERR: 86 hs = DID_PARITY; 87 break; 88 case SAS_OPEN_REJECT: 89 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) 90 hs = DID_SOFT_ERROR; /* retry */ 91 else 92 hs = DID_ERROR; 93 break; 94 case SAS_PROTO_RESPONSE: 95 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP " 96 "task; please report this\n", 97 task->dev->port->ha->sas_ha_name); 98 break; 99 case SAS_ABORTED_TASK: 100 hs = DID_ABORT; 101 break; 102 case SAM_STAT_CHECK_CONDITION: 103 memcpy(sc->sense_buffer, ts->buf, 104 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); 105 stat = SAM_STAT_CHECK_CONDITION; 106 break; 107 default: 108 stat = ts->stat; 109 break; 110 } 111 } 112 113 sc->result = (hs << 16) | stat; 114 ASSIGN_SAS_TASK(sc, NULL); 115 list_del_init(&task->list); 116 sas_free_task(task); 117 } 118 119 static void sas_scsi_task_done(struct sas_task *task) 120 { 121 struct scsi_cmnd *sc = task->uldd_task; 122 struct domain_device *dev = task->dev; 123 struct sas_ha_struct *ha = dev->port->ha; 124 unsigned long flags; 125 126 spin_lock_irqsave(&dev->done_lock, flags); 127 if (test_bit(SAS_HA_FROZEN, &ha->state)) 128 task = NULL; 129 else 130 ASSIGN_SAS_TASK(sc, NULL); 131 spin_unlock_irqrestore(&dev->done_lock, flags); 132 133 if (unlikely(!task)) { 134 /* task will be completed by the error handler */ 135 SAS_DPRINTK("task done but aborted\n"); 136 return; 137 } 138 139 if (unlikely(!sc)) { 140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 141 list_del_init(&task->list); 142 sas_free_task(task); 143 return; 144 } 145 146 sas_end_task(sc, task); 147 sc->scsi_done(sc); 148 } 149 150 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, 151 struct domain_device *dev, 152 gfp_t gfp_flags) 153 { 154 struct sas_task *task = sas_alloc_task(gfp_flags); 155 struct scsi_lun lun; 156 157 if (!task) 158 return NULL; 159 160 task->uldd_task = cmd; 161 ASSIGN_SAS_TASK(cmd, task); 162 163 task->dev = dev; 164 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ 165 166 task->ssp_task.retry_count = 1; 167 int_to_scsilun(cmd->device->lun, &lun); 168 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); 169 task->ssp_task.task_attr = TASK_ATTR_SIMPLE; 170 memcpy(task->ssp_task.cdb, cmd->cmnd, 16); 171 172 task->scatter = scsi_sglist(cmd); 173 task->num_scatter = scsi_sg_count(cmd); 174 task->total_xfer_len = scsi_bufflen(cmd); 175 task->data_dir = cmd->sc_data_direction; 176 177 task->task_done = sas_scsi_task_done; 178 179 return task; 180 } 181 182 int sas_queue_up(struct sas_task *task) 183 { 184 struct sas_ha_struct *sas_ha = task->dev->port->ha; 185 struct scsi_core *core = &sas_ha->core; 186 unsigned long flags; 187 LIST_HEAD(list); 188 189 spin_lock_irqsave(&core->task_queue_lock, flags); 190 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) { 191 spin_unlock_irqrestore(&core->task_queue_lock, flags); 192 return -SAS_QUEUE_FULL; 193 } 194 list_add_tail(&task->list, &core->task_queue); 195 core->task_queue_size += 1; 196 spin_unlock_irqrestore(&core->task_queue_lock, flags); 197 wake_up_process(core->queue_thread); 198 199 return 0; 200 } 201 202 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 203 { 204 struct sas_internal *i = to_sas_internal(host->transportt); 205 struct domain_device *dev = cmd_to_domain_dev(cmd); 206 struct sas_ha_struct *sas_ha = dev->port->ha; 207 struct sas_task *task; 208 int res = 0; 209 210 /* If the device fell off, no sense in issuing commands */ 211 if (test_bit(SAS_DEV_GONE, &dev->state)) { 212 cmd->result = DID_BAD_TARGET << 16; 213 goto out_done; 214 } 215 216 if (dev_is_sata(dev)) { 217 spin_lock_irq(dev->sata_dev.ap->lock); 218 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); 219 spin_unlock_irq(dev->sata_dev.ap->lock); 220 return res; 221 } 222 223 task = sas_create_task(cmd, dev, GFP_ATOMIC); 224 if (!task) 225 return SCSI_MLQUEUE_HOST_BUSY; 226 227 /* Queue up, Direct Mode or Task Collector Mode. */ 228 if (sas_ha->lldd_max_execute_num < 2) 229 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); 230 else 231 res = sas_queue_up(task); 232 233 if (res) 234 goto out_free_task; 235 return 0; 236 237 out_free_task: 238 SAS_DPRINTK("lldd_execute_task returned: %d\n", res); 239 ASSIGN_SAS_TASK(cmd, NULL); 240 sas_free_task(task); 241 if (res == -SAS_QUEUE_FULL) 242 cmd->result = DID_SOFT_ERROR << 16; /* retry */ 243 else 244 cmd->result = DID_ERROR << 16; 245 out_done: 246 cmd->scsi_done(cmd); 247 return 0; 248 } 249 250 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 251 { 252 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 253 struct sas_task *task = TO_SAS_TASK(cmd); 254 255 /* At this point, we only get called following an actual abort 256 * of the task, so we should be guaranteed not to be racing with 257 * any completions from the LLD. Task is freed after this. 258 */ 259 sas_end_task(cmd, task); 260 261 /* now finish the command and move it on to the error 262 * handler done list, this also takes it off the 263 * error handler pending list. 264 */ 265 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 266 } 267 268 static void sas_eh_defer_cmd(struct scsi_cmnd *cmd) 269 { 270 struct domain_device *dev = cmd_to_domain_dev(cmd); 271 struct sas_ha_struct *ha = dev->port->ha; 272 struct sas_task *task = TO_SAS_TASK(cmd); 273 274 if (!dev_is_sata(dev)) { 275 sas_eh_finish_cmd(cmd); 276 return; 277 } 278 279 /* report the timeout to libata */ 280 sas_end_task(cmd, task); 281 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q); 282 } 283 284 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 285 { 286 struct scsi_cmnd *cmd, *n; 287 288 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 289 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 290 cmd->device->lun == my_cmd->device->lun) 291 sas_eh_defer_cmd(cmd); 292 } 293 } 294 295 static void sas_scsi_clear_queue_I_T(struct list_head *error_q, 296 struct domain_device *dev) 297 { 298 struct scsi_cmnd *cmd, *n; 299 300 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 301 struct domain_device *x = cmd_to_domain_dev(cmd); 302 303 if (x == dev) 304 sas_eh_finish_cmd(cmd); 305 } 306 } 307 308 static void sas_scsi_clear_queue_port(struct list_head *error_q, 309 struct asd_sas_port *port) 310 { 311 struct scsi_cmnd *cmd, *n; 312 313 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 314 struct domain_device *dev = cmd_to_domain_dev(cmd); 315 struct asd_sas_port *x = dev->port; 316 317 if (x == port) 318 sas_eh_finish_cmd(cmd); 319 } 320 } 321 322 enum task_disposition { 323 TASK_IS_DONE, 324 TASK_IS_ABORTED, 325 TASK_IS_AT_LU, 326 TASK_IS_NOT_AT_HA, 327 TASK_IS_NOT_AT_LU, 328 TASK_ABORT_FAILED, 329 }; 330 331 static enum task_disposition sas_scsi_find_task(struct sas_task *task) 332 { 333 struct sas_ha_struct *ha = task->dev->port->ha; 334 unsigned long flags; 335 int i, res; 336 struct sas_internal *si = 337 to_sas_internal(task->dev->port->ha->core.shost->transportt); 338 339 if (ha->lldd_max_execute_num > 1) { 340 struct scsi_core *core = &ha->core; 341 struct sas_task *t, *n; 342 343 mutex_lock(&core->task_queue_flush); 344 spin_lock_irqsave(&core->task_queue_lock, flags); 345 list_for_each_entry_safe(t, n, &core->task_queue, list) 346 if (task == t) { 347 list_del_init(&t->list); 348 break; 349 } 350 spin_unlock_irqrestore(&core->task_queue_lock, flags); 351 mutex_unlock(&core->task_queue_flush); 352 353 if (task == t) 354 return TASK_IS_NOT_AT_HA; 355 } 356 357 for (i = 0; i < 5; i++) { 358 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 359 res = si->dft->lldd_abort_task(task); 360 361 spin_lock_irqsave(&task->task_state_lock, flags); 362 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 363 spin_unlock_irqrestore(&task->task_state_lock, flags); 364 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 365 task); 366 return TASK_IS_DONE; 367 } 368 spin_unlock_irqrestore(&task->task_state_lock, flags); 369 370 if (res == TMF_RESP_FUNC_COMPLETE) { 371 SAS_DPRINTK("%s: task 0x%p is aborted\n", 372 __func__, task); 373 return TASK_IS_ABORTED; 374 } else if (si->dft->lldd_query_task) { 375 SAS_DPRINTK("%s: querying task 0x%p\n", 376 __func__, task); 377 res = si->dft->lldd_query_task(task); 378 switch (res) { 379 case TMF_RESP_FUNC_SUCC: 380 SAS_DPRINTK("%s: task 0x%p at LU\n", 381 __func__, task); 382 return TASK_IS_AT_LU; 383 case TMF_RESP_FUNC_COMPLETE: 384 SAS_DPRINTK("%s: task 0x%p not at LU\n", 385 __func__, task); 386 return TASK_IS_NOT_AT_LU; 387 case TMF_RESP_FUNC_FAILED: 388 SAS_DPRINTK("%s: task 0x%p failed to abort\n", 389 __func__, task); 390 return TASK_ABORT_FAILED; 391 } 392 393 } 394 } 395 return res; 396 } 397 398 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) 399 { 400 int res = TMF_RESP_FUNC_FAILED; 401 struct scsi_lun lun; 402 struct sas_internal *i = 403 to_sas_internal(dev->port->ha->core.shost->transportt); 404 405 int_to_scsilun(cmd->device->lun, &lun); 406 407 SAS_DPRINTK("eh: device %llx LUN %x has the task\n", 408 SAS_ADDR(dev->sas_addr), 409 cmd->device->lun); 410 411 if (i->dft->lldd_abort_task_set) 412 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); 413 414 if (res == TMF_RESP_FUNC_FAILED) { 415 if (i->dft->lldd_clear_task_set) 416 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); 417 } 418 419 if (res == TMF_RESP_FUNC_FAILED) { 420 if (i->dft->lldd_lu_reset) 421 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 422 } 423 424 return res; 425 } 426 427 static int sas_recover_I_T(struct domain_device *dev) 428 { 429 int res = TMF_RESP_FUNC_FAILED; 430 struct sas_internal *i = 431 to_sas_internal(dev->port->ha->core.shost->transportt); 432 433 SAS_DPRINTK("I_T nexus reset for dev %016llx\n", 434 SAS_ADDR(dev->sas_addr)); 435 436 if (i->dft->lldd_I_T_nexus_reset) 437 res = i->dft->lldd_I_T_nexus_reset(dev); 438 439 return res; 440 } 441 442 /* take a reference on the last known good phy for this device */ 443 struct sas_phy *sas_get_local_phy(struct domain_device *dev) 444 { 445 struct sas_ha_struct *ha = dev->port->ha; 446 struct sas_phy *phy; 447 unsigned long flags; 448 449 /* a published domain device always has a valid phy, it may be 450 * stale, but it is never NULL 451 */ 452 BUG_ON(!dev->phy); 453 454 spin_lock_irqsave(&ha->phy_port_lock, flags); 455 phy = dev->phy; 456 get_device(&phy->dev); 457 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 458 459 return phy; 460 } 461 EXPORT_SYMBOL_GPL(sas_get_local_phy); 462 463 /* Attempt to send a LUN reset message to a device */ 464 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 465 { 466 struct domain_device *dev = cmd_to_domain_dev(cmd); 467 struct sas_internal *i = 468 to_sas_internal(dev->port->ha->core.shost->transportt); 469 struct scsi_lun lun; 470 int res; 471 472 int_to_scsilun(cmd->device->lun, &lun); 473 474 if (!i->dft->lldd_lu_reset) 475 return FAILED; 476 477 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 478 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 479 return SUCCESS; 480 481 return FAILED; 482 } 483 484 /* Attempt to send a phy (bus) reset */ 485 int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) 486 { 487 struct domain_device *dev = cmd_to_domain_dev(cmd); 488 struct sas_phy *phy = sas_get_local_phy(dev); 489 int res; 490 491 res = sas_phy_reset(phy, 1); 492 if (res) 493 SAS_DPRINTK("Bus reset of %s failed 0x%x\n", 494 kobject_name(&phy->dev.kobj), 495 res); 496 sas_put_local_phy(phy); 497 498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 499 return SUCCESS; 500 501 return FAILED; 502 } 503 504 /* Try to reset a device */ 505 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) 506 { 507 int res; 508 struct Scsi_Host *shost = cmd->device->host; 509 510 if (!shost->hostt->eh_device_reset_handler) 511 goto try_bus_reset; 512 513 res = shost->hostt->eh_device_reset_handler(cmd); 514 if (res == SUCCESS) 515 return res; 516 517 try_bus_reset: 518 if (shost->hostt->eh_bus_reset_handler) 519 return shost->hostt->eh_bus_reset_handler(cmd); 520 521 return FAILED; 522 } 523 524 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q) 525 { 526 struct scsi_cmnd *cmd, *n; 527 enum task_disposition res = TASK_IS_DONE; 528 int tmf_resp, need_reset; 529 struct sas_internal *i = to_sas_internal(shost->transportt); 530 unsigned long flags; 531 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 532 LIST_HEAD(done); 533 534 /* clean out any commands that won the completion vs eh race */ 535 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 536 struct domain_device *dev = cmd_to_domain_dev(cmd); 537 struct sas_task *task; 538 539 spin_lock_irqsave(&dev->done_lock, flags); 540 /* by this point the lldd has either observed 541 * SAS_HA_FROZEN and is leaving the task alone, or has 542 * won the race with eh and decided to complete it 543 */ 544 task = TO_SAS_TASK(cmd); 545 spin_unlock_irqrestore(&dev->done_lock, flags); 546 547 if (!task) 548 list_move_tail(&cmd->eh_entry, &done); 549 } 550 551 Again: 552 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 553 struct sas_task *task = TO_SAS_TASK(cmd); 554 555 list_del_init(&cmd->eh_entry); 556 557 spin_lock_irqsave(&task->task_state_lock, flags); 558 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; 559 spin_unlock_irqrestore(&task->task_state_lock, flags); 560 561 if (need_reset) { 562 SAS_DPRINTK("%s: task 0x%p requests reset\n", 563 __func__, task); 564 goto reset; 565 } 566 567 SAS_DPRINTK("trying to find task 0x%p\n", task); 568 res = sas_scsi_find_task(task); 569 570 cmd->eh_eflags = 0; 571 572 switch (res) { 573 case TASK_IS_NOT_AT_HA: 574 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n", 575 __func__, task, 576 cmd->retries ? "retry" : "aborted"); 577 if (cmd->retries) 578 cmd->retries--; 579 sas_eh_finish_cmd(cmd); 580 continue; 581 case TASK_IS_DONE: 582 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 583 task); 584 sas_eh_defer_cmd(cmd); 585 continue; 586 case TASK_IS_ABORTED: 587 SAS_DPRINTK("%s: task 0x%p is aborted\n", 588 __func__, task); 589 sas_eh_defer_cmd(cmd); 590 continue; 591 case TASK_IS_AT_LU: 592 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 593 reset: 594 tmf_resp = sas_recover_lu(task->dev, cmd); 595 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 596 SAS_DPRINTK("dev %016llx LU %x is " 597 "recovered\n", 598 SAS_ADDR(task->dev), 599 cmd->device->lun); 600 sas_eh_defer_cmd(cmd); 601 sas_scsi_clear_queue_lu(work_q, cmd); 602 goto Again; 603 } 604 /* fallthrough */ 605 case TASK_IS_NOT_AT_LU: 606 case TASK_ABORT_FAILED: 607 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", 608 task); 609 tmf_resp = sas_recover_I_T(task->dev); 610 if (tmf_resp == TMF_RESP_FUNC_COMPLETE || 611 tmf_resp == -ENODEV) { 612 struct domain_device *dev = task->dev; 613 SAS_DPRINTK("I_T %016llx recovered\n", 614 SAS_ADDR(task->dev->sas_addr)); 615 sas_eh_finish_cmd(cmd); 616 sas_scsi_clear_queue_I_T(work_q, dev); 617 goto Again; 618 } 619 /* Hammer time :-) */ 620 try_to_reset_cmd_device(cmd); 621 if (i->dft->lldd_clear_nexus_port) { 622 struct asd_sas_port *port = task->dev->port; 623 SAS_DPRINTK("clearing nexus for port:%d\n", 624 port->id); 625 res = i->dft->lldd_clear_nexus_port(port); 626 if (res == TMF_RESP_FUNC_COMPLETE) { 627 SAS_DPRINTK("clear nexus port:%d " 628 "succeeded\n", port->id); 629 sas_eh_finish_cmd(cmd); 630 sas_scsi_clear_queue_port(work_q, 631 port); 632 goto Again; 633 } 634 } 635 if (i->dft->lldd_clear_nexus_ha) { 636 SAS_DPRINTK("clear nexus ha\n"); 637 res = i->dft->lldd_clear_nexus_ha(ha); 638 if (res == TMF_RESP_FUNC_COMPLETE) { 639 SAS_DPRINTK("clear nexus ha " 640 "succeeded\n"); 641 sas_eh_finish_cmd(cmd); 642 goto clear_q; 643 } 644 } 645 /* If we are here -- this means that no amount 646 * of effort could recover from errors. Quite 647 * possibly the HA just disappeared. 648 */ 649 SAS_DPRINTK("error from device %llx, LUN %x " 650 "couldn't be recovered in any way\n", 651 SAS_ADDR(task->dev->sas_addr), 652 cmd->device->lun); 653 654 sas_eh_finish_cmd(cmd); 655 goto clear_q; 656 } 657 } 658 out: 659 list_splice_tail(&done, work_q); 660 list_splice_tail_init(&ha->eh_ata_q, work_q); 661 return; 662 663 clear_q: 664 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__); 665 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 666 sas_eh_finish_cmd(cmd); 667 goto out; 668 } 669 670 void sas_scsi_recover_host(struct Scsi_Host *shost) 671 { 672 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 673 unsigned long flags; 674 LIST_HEAD(eh_work_q); 675 676 spin_lock_irqsave(shost->host_lock, flags); 677 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 678 shost->host_eh_scheduled = 0; 679 spin_unlock_irqrestore(shost->host_lock, flags); 680 681 SAS_DPRINTK("Enter %s busy: %d failed: %d\n", 682 __func__, shost->host_busy, shost->host_failed); 683 /* 684 * Deal with commands that still have SAS tasks (i.e. they didn't 685 * complete via the normal sas_task completion mechanism), 686 * SAS_HA_FROZEN gives eh dominion over all sas_task completion. 687 */ 688 set_bit(SAS_HA_FROZEN, &ha->state); 689 sas_eh_handle_sas_errors(shost, &eh_work_q); 690 clear_bit(SAS_HA_FROZEN, &ha->state); 691 if (list_empty(&eh_work_q)) 692 goto out; 693 694 /* 695 * Now deal with SCSI commands that completed ok but have a an error 696 * code (and hopefully sense data) attached. This is roughly what 697 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any 698 * command we see here has no sas_task and is thus unknown to the HA. 699 */ 700 sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q); 701 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) 702 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 703 704 out: 705 if (ha->lldd_max_execute_num > 1) 706 wake_up_process(ha->core.queue_thread); 707 708 /* now link into libata eh --- if we have any ata devices */ 709 sas_ata_strategy_handler(shost); 710 711 scsi_eh_flush_done_q(&ha->eh_done_q); 712 713 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n", 714 __func__, shost->host_busy, shost->host_failed); 715 } 716 717 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 718 { 719 scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd); 720 721 return BLK_EH_NOT_HANDLED; 722 } 723 724 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 725 { 726 struct domain_device *dev = sdev_to_domain_dev(sdev); 727 728 if (dev_is_sata(dev)) 729 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg); 730 731 return -EINVAL; 732 } 733 734 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) 735 { 736 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); 737 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 738 struct domain_device *found_dev = NULL; 739 int i; 740 unsigned long flags; 741 742 spin_lock_irqsave(&ha->phy_port_lock, flags); 743 for (i = 0; i < ha->num_phys; i++) { 744 struct asd_sas_port *port = ha->sas_port[i]; 745 struct domain_device *dev; 746 747 spin_lock(&port->dev_list_lock); 748 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 749 if (rphy == dev->rphy) { 750 found_dev = dev; 751 spin_unlock(&port->dev_list_lock); 752 goto found; 753 } 754 } 755 spin_unlock(&port->dev_list_lock); 756 } 757 found: 758 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 759 760 return found_dev; 761 } 762 763 int sas_target_alloc(struct scsi_target *starget) 764 { 765 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); 766 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy); 767 768 if (!found_dev) 769 return -ENODEV; 770 771 kref_get(&found_dev->kref); 772 starget->hostdata = found_dev; 773 return 0; 774 } 775 776 #define SAS_DEF_QD 256 777 778 int sas_slave_configure(struct scsi_device *scsi_dev) 779 { 780 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 781 struct sas_ha_struct *sas_ha; 782 783 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); 784 785 if (dev_is_sata(dev)) { 786 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); 787 return 0; 788 } 789 790 sas_ha = dev->port->ha; 791 792 sas_read_port_mode_page(scsi_dev); 793 794 if (scsi_dev->tagged_supported) { 795 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); 796 scsi_activate_tcq(scsi_dev, SAS_DEF_QD); 797 } else { 798 SAS_DPRINTK("device %llx, LUN %x doesn't support " 799 "TCQ\n", SAS_ADDR(dev->sas_addr), 800 scsi_dev->lun); 801 scsi_dev->tagged_supported = 0; 802 scsi_set_tag_type(scsi_dev, 0); 803 scsi_deactivate_tcq(scsi_dev, 1); 804 } 805 806 scsi_dev->allow_restart = 1; 807 808 return 0; 809 } 810 811 int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) 812 { 813 struct domain_device *dev = sdev_to_domain_dev(sdev); 814 815 if (dev_is_sata(dev)) 816 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth, 817 reason); 818 819 switch (reason) { 820 case SCSI_QDEPTH_DEFAULT: 821 case SCSI_QDEPTH_RAMP_UP: 822 if (!sdev->tagged_supported) 823 depth = 1; 824 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 825 break; 826 case SCSI_QDEPTH_QFULL: 827 scsi_track_queue_full(sdev, depth); 828 break; 829 default: 830 return -EOPNOTSUPP; 831 } 832 833 return depth; 834 } 835 836 int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) 837 { 838 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 839 840 if (dev_is_sata(dev)) 841 return -EINVAL; 842 843 if (!scsi_dev->tagged_supported) 844 return 0; 845 846 scsi_deactivate_tcq(scsi_dev, 1); 847 848 scsi_set_tag_type(scsi_dev, qt); 849 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); 850 851 return qt; 852 } 853 854 int sas_bios_param(struct scsi_device *scsi_dev, 855 struct block_device *bdev, 856 sector_t capacity, int *hsc) 857 { 858 hsc[0] = 255; 859 hsc[1] = 63; 860 sector_div(capacity, 255*63); 861 hsc[2] = capacity; 862 863 return 0; 864 } 865 866 /* ---------- Task Collector Thread implementation ---------- */ 867 868 static void sas_queue(struct sas_ha_struct *sas_ha) 869 { 870 struct scsi_core *core = &sas_ha->core; 871 unsigned long flags; 872 LIST_HEAD(q); 873 int can_queue; 874 int res; 875 struct sas_internal *i = to_sas_internal(core->shost->transportt); 876 877 mutex_lock(&core->task_queue_flush); 878 spin_lock_irqsave(&core->task_queue_lock, flags); 879 while (!kthread_should_stop() && 880 !list_empty(&core->task_queue) && 881 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) { 882 883 can_queue = sas_ha->lldd_queue_size - core->task_queue_size; 884 if (can_queue >= 0) { 885 can_queue = core->task_queue_size; 886 list_splice_init(&core->task_queue, &q); 887 } else { 888 struct list_head *a, *n; 889 890 can_queue = sas_ha->lldd_queue_size; 891 list_for_each_safe(a, n, &core->task_queue) { 892 list_move_tail(a, &q); 893 if (--can_queue == 0) 894 break; 895 } 896 can_queue = sas_ha->lldd_queue_size; 897 } 898 core->task_queue_size -= can_queue; 899 spin_unlock_irqrestore(&core->task_queue_lock, flags); 900 { 901 struct sas_task *task = list_entry(q.next, 902 struct sas_task, 903 list); 904 list_del_init(&q); 905 res = i->dft->lldd_execute_task(task, can_queue, 906 GFP_KERNEL); 907 if (unlikely(res)) 908 __list_add(&q, task->list.prev, &task->list); 909 } 910 spin_lock_irqsave(&core->task_queue_lock, flags); 911 if (res) { 912 list_splice_init(&q, &core->task_queue); /*at head*/ 913 core->task_queue_size += can_queue; 914 } 915 } 916 spin_unlock_irqrestore(&core->task_queue_lock, flags); 917 mutex_unlock(&core->task_queue_flush); 918 } 919 920 /** 921 * sas_queue_thread -- The Task Collector thread 922 * @_sas_ha: pointer to struct sas_ha 923 */ 924 static int sas_queue_thread(void *_sas_ha) 925 { 926 struct sas_ha_struct *sas_ha = _sas_ha; 927 928 while (1) { 929 set_current_state(TASK_INTERRUPTIBLE); 930 schedule(); 931 sas_queue(sas_ha); 932 if (kthread_should_stop()) 933 break; 934 } 935 936 return 0; 937 } 938 939 int sas_init_queue(struct sas_ha_struct *sas_ha) 940 { 941 struct scsi_core *core = &sas_ha->core; 942 943 spin_lock_init(&core->task_queue_lock); 944 mutex_init(&core->task_queue_flush); 945 core->task_queue_size = 0; 946 INIT_LIST_HEAD(&core->task_queue); 947 948 core->queue_thread = kthread_run(sas_queue_thread, sas_ha, 949 "sas_queue_%d", core->shost->host_no); 950 if (IS_ERR(core->queue_thread)) 951 return PTR_ERR(core->queue_thread); 952 return 0; 953 } 954 955 void sas_shutdown_queue(struct sas_ha_struct *sas_ha) 956 { 957 unsigned long flags; 958 struct scsi_core *core = &sas_ha->core; 959 struct sas_task *task, *n; 960 961 kthread_stop(core->queue_thread); 962 963 if (!list_empty(&core->task_queue)) 964 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", 965 SAS_ADDR(sas_ha->sas_addr)); 966 967 spin_lock_irqsave(&core->task_queue_lock, flags); 968 list_for_each_entry_safe(task, n, &core->task_queue, list) { 969 struct scsi_cmnd *cmd = task->uldd_task; 970 971 list_del_init(&task->list); 972 973 ASSIGN_SAS_TASK(cmd, NULL); 974 sas_free_task(task); 975 cmd->result = DID_ABORT << 16; 976 cmd->scsi_done(cmd); 977 } 978 spin_unlock_irqrestore(&core->task_queue_lock, flags); 979 } 980 981 /* 982 * Tell an upper layer that it needs to initiate an abort for a given task. 983 * This should only ever be called by an LLDD. 984 */ 985 void sas_task_abort(struct sas_task *task) 986 { 987 struct scsi_cmnd *sc = task->uldd_task; 988 989 /* Escape for libsas internal commands */ 990 if (!sc) { 991 if (!del_timer(&task->timer)) 992 return; 993 task->timer.function(task->timer.data); 994 return; 995 } 996 997 if (dev_is_sata(task->dev)) { 998 sas_ata_task_abort(task); 999 } else { 1000 struct request_queue *q = sc->device->request_queue; 1001 unsigned long flags; 1002 1003 spin_lock_irqsave(q->queue_lock, flags); 1004 blk_abort_request(sc->request); 1005 spin_unlock_irqrestore(q->queue_lock, flags); 1006 scsi_schedule_eh(sc->device->host); 1007 } 1008 } 1009 1010 void sas_target_destroy(struct scsi_target *starget) 1011 { 1012 struct domain_device *found_dev = starget->hostdata; 1013 1014 if (!found_dev) 1015 return; 1016 1017 starget->hostdata = NULL; 1018 sas_put_device(found_dev); 1019 } 1020 1021 static void sas_parse_addr(u8 *sas_addr, const char *p) 1022 { 1023 int i; 1024 for (i = 0; i < SAS_ADDR_SIZE; i++) { 1025 u8 h, l; 1026 if (!*p) 1027 break; 1028 h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; 1029 p++; 1030 l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; 1031 p++; 1032 sas_addr[i] = (h<<4) | l; 1033 } 1034 } 1035 1036 #define SAS_STRING_ADDR_SIZE 16 1037 1038 int sas_request_addr(struct Scsi_Host *shost, u8 *addr) 1039 { 1040 int res; 1041 const struct firmware *fw; 1042 1043 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); 1044 if (res) 1045 return res; 1046 1047 if (fw->size < SAS_STRING_ADDR_SIZE) { 1048 res = -ENODEV; 1049 goto out; 1050 } 1051 1052 sas_parse_addr(addr, fw->data); 1053 1054 out: 1055 release_firmware(fw); 1056 return res; 1057 } 1058 EXPORT_SYMBOL_GPL(sas_request_addr); 1059 1060 EXPORT_SYMBOL_GPL(sas_queuecommand); 1061 EXPORT_SYMBOL_GPL(sas_target_alloc); 1062 EXPORT_SYMBOL_GPL(sas_slave_configure); 1063 EXPORT_SYMBOL_GPL(sas_change_queue_depth); 1064 EXPORT_SYMBOL_GPL(sas_change_queue_type); 1065 EXPORT_SYMBOL_GPL(sas_bios_param); 1066 EXPORT_SYMBOL_GPL(sas_task_abort); 1067 EXPORT_SYMBOL_GPL(sas_phy_reset); 1068 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 1069 EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler); 1070 EXPORT_SYMBOL_GPL(sas_target_destroy); 1071 EXPORT_SYMBOL_GPL(sas_ioctl); 1072