1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 #include <scsi/sas.h> 56 #include "isci.h" 57 #include "port.h" 58 #include "remote_device.h" 59 #include "request.h" 60 #include "remote_node_context.h" 61 #include "scu_event_codes.h" 62 #include "task.h" 63 64 /** 65 * isci_remote_device_not_ready() - This function is called by the ihost when 66 * the remote device is not ready. We mark the isci device as ready (not 67 * "ready_for_io") and signal the waiting proccess. 68 * @isci_host: This parameter specifies the isci host object. 69 * @isci_device: This parameter specifies the remote device 70 * 71 * sci_lock is held on entrance to this function. 72 */ 73 static void isci_remote_device_not_ready(struct isci_host *ihost, 74 struct isci_remote_device *idev, u32 reason) 75 { 76 struct isci_request *ireq; 77 78 dev_dbg(&ihost->pdev->dev, 79 "%s: isci_device = %p\n", __func__, idev); 80 81 switch (reason) { 82 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: 83 set_bit(IDEV_GONE, &idev->flags); 84 break; 85 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: 86 set_bit(IDEV_IO_NCQERROR, &idev->flags); 87 88 /* Kill all outstanding requests for the device. */ 89 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { 90 91 dev_dbg(&ihost->pdev->dev, 92 "%s: isci_device = %p request = %p\n", 93 __func__, idev, ireq); 94 95 sci_controller_terminate_request(ihost, 96 idev, 97 ireq); 98 } 99 /* Fall through into the default case... */ 100 default: 101 clear_bit(IDEV_IO_READY, &idev->flags); 102 break; 103 } 104 } 105 106 /** 107 * isci_remote_device_ready() - This function is called by the ihost when the 108 * remote device is ready. We mark the isci device as ready and signal the 109 * waiting proccess. 110 * @ihost: our valid isci_host 111 * @idev: remote device 112 * 113 */ 114 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) 115 { 116 dev_dbg(&ihost->pdev->dev, 117 "%s: idev = %p\n", __func__, idev); 118 119 clear_bit(IDEV_IO_NCQERROR, &idev->flags); 120 set_bit(IDEV_IO_READY, &idev->flags); 121 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) 122 wake_up(&ihost->eventq); 123 } 124 125 /* called once the remote node context is ready to be freed. 126 * The remote device can now report that its stop operation is complete. none 127 */ 128 static void rnc_destruct_done(void *_dev) 129 { 130 struct isci_remote_device *idev = _dev; 131 132 BUG_ON(idev->started_request_count != 0); 133 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 134 } 135 136 static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 137 { 138 struct isci_host *ihost = idev->owning_port->owning_controller; 139 enum sci_status status = SCI_SUCCESS; 140 u32 i; 141 142 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 143 struct isci_request *ireq = ihost->reqs[i]; 144 enum sci_status s; 145 146 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 147 ireq->target_device != idev) 148 continue; 149 150 s = sci_controller_terminate_request(ihost, idev, ireq); 151 if (s != SCI_SUCCESS) 152 status = s; 153 } 154 155 return status; 156 } 157 158 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 159 u32 timeout) 160 { 161 struct sci_base_state_machine *sm = &idev->sm; 162 enum sci_remote_device_states state = sm->current_state_id; 163 164 switch (state) { 165 case SCI_DEV_INITIAL: 166 case SCI_DEV_FAILED: 167 case SCI_DEV_FINAL: 168 default: 169 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 170 __func__, state); 171 return SCI_FAILURE_INVALID_STATE; 172 case SCI_DEV_STOPPED: 173 return SCI_SUCCESS; 174 case SCI_DEV_STARTING: 175 /* device not started so there had better be no requests */ 176 BUG_ON(idev->started_request_count != 0); 177 sci_remote_node_context_destruct(&idev->rnc, 178 rnc_destruct_done, idev); 179 /* Transition to the stopping state and wait for the 180 * remote node to complete being posted and invalidated. 181 */ 182 sci_change_state(sm, SCI_DEV_STOPPING); 183 return SCI_SUCCESS; 184 case SCI_DEV_READY: 185 case SCI_STP_DEV_IDLE: 186 case SCI_STP_DEV_CMD: 187 case SCI_STP_DEV_NCQ: 188 case SCI_STP_DEV_NCQ_ERROR: 189 case SCI_STP_DEV_AWAIT_RESET: 190 case SCI_SMP_DEV_IDLE: 191 case SCI_SMP_DEV_CMD: 192 sci_change_state(sm, SCI_DEV_STOPPING); 193 if (idev->started_request_count == 0) { 194 sci_remote_node_context_destruct(&idev->rnc, 195 rnc_destruct_done, idev); 196 return SCI_SUCCESS; 197 } else 198 return sci_remote_device_terminate_requests(idev); 199 break; 200 case SCI_DEV_STOPPING: 201 /* All requests should have been terminated, but if there is an 202 * attempt to stop a device already in the stopping state, then 203 * try again to terminate. 204 */ 205 return sci_remote_device_terminate_requests(idev); 206 case SCI_DEV_RESETTING: 207 sci_change_state(sm, SCI_DEV_STOPPING); 208 return SCI_SUCCESS; 209 } 210 } 211 212 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) 213 { 214 struct sci_base_state_machine *sm = &idev->sm; 215 enum sci_remote_device_states state = sm->current_state_id; 216 217 switch (state) { 218 case SCI_DEV_INITIAL: 219 case SCI_DEV_STOPPED: 220 case SCI_DEV_STARTING: 221 case SCI_SMP_DEV_IDLE: 222 case SCI_SMP_DEV_CMD: 223 case SCI_DEV_STOPPING: 224 case SCI_DEV_FAILED: 225 case SCI_DEV_RESETTING: 226 case SCI_DEV_FINAL: 227 default: 228 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 229 __func__, state); 230 return SCI_FAILURE_INVALID_STATE; 231 case SCI_DEV_READY: 232 case SCI_STP_DEV_IDLE: 233 case SCI_STP_DEV_CMD: 234 case SCI_STP_DEV_NCQ: 235 case SCI_STP_DEV_NCQ_ERROR: 236 case SCI_STP_DEV_AWAIT_RESET: 237 sci_change_state(sm, SCI_DEV_RESETTING); 238 return SCI_SUCCESS; 239 } 240 } 241 242 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) 243 { 244 struct sci_base_state_machine *sm = &idev->sm; 245 enum sci_remote_device_states state = sm->current_state_id; 246 247 if (state != SCI_DEV_RESETTING) { 248 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 249 __func__, state); 250 return SCI_FAILURE_INVALID_STATE; 251 } 252 253 sci_change_state(sm, SCI_DEV_READY); 254 return SCI_SUCCESS; 255 } 256 257 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, 258 u32 suspend_type) 259 { 260 struct sci_base_state_machine *sm = &idev->sm; 261 enum sci_remote_device_states state = sm->current_state_id; 262 263 if (state != SCI_STP_DEV_CMD) { 264 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 265 __func__, state); 266 return SCI_FAILURE_INVALID_STATE; 267 } 268 269 return sci_remote_node_context_suspend(&idev->rnc, 270 suspend_type, NULL, NULL); 271 } 272 273 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 274 u32 frame_index) 275 { 276 struct sci_base_state_machine *sm = &idev->sm; 277 enum sci_remote_device_states state = sm->current_state_id; 278 struct isci_host *ihost = idev->owning_port->owning_controller; 279 enum sci_status status; 280 281 switch (state) { 282 case SCI_DEV_INITIAL: 283 case SCI_DEV_STOPPED: 284 case SCI_DEV_STARTING: 285 case SCI_STP_DEV_IDLE: 286 case SCI_SMP_DEV_IDLE: 287 case SCI_DEV_FINAL: 288 default: 289 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 290 __func__, state); 291 /* Return the frame back to the controller */ 292 sci_controller_release_frame(ihost, frame_index); 293 return SCI_FAILURE_INVALID_STATE; 294 case SCI_DEV_READY: 295 case SCI_STP_DEV_NCQ_ERROR: 296 case SCI_STP_DEV_AWAIT_RESET: 297 case SCI_DEV_STOPPING: 298 case SCI_DEV_FAILED: 299 case SCI_DEV_RESETTING: { 300 struct isci_request *ireq; 301 struct ssp_frame_hdr hdr; 302 void *frame_header; 303 ssize_t word_cnt; 304 305 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 306 frame_index, 307 &frame_header); 308 if (status != SCI_SUCCESS) 309 return status; 310 311 word_cnt = sizeof(hdr) / sizeof(u32); 312 sci_swab32_cpy(&hdr, frame_header, word_cnt); 313 314 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); 315 if (ireq && ireq->target_device == idev) { 316 /* The IO request is now in charge of releasing the frame */ 317 status = sci_io_request_frame_handler(ireq, frame_index); 318 } else { 319 /* We could not map this tag to a valid IO 320 * request Just toss the frame and continue 321 */ 322 sci_controller_release_frame(ihost, frame_index); 323 } 324 break; 325 } 326 case SCI_STP_DEV_NCQ: { 327 struct dev_to_host_fis *hdr; 328 329 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 330 frame_index, 331 (void **)&hdr); 332 if (status != SCI_SUCCESS) 333 return status; 334 335 if (hdr->fis_type == FIS_SETDEVBITS && 336 (hdr->status & ATA_ERR)) { 337 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 338 339 /* TODO Check sactive and complete associated IO if any. */ 340 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); 341 } else if (hdr->fis_type == FIS_REGD2H && 342 (hdr->status & ATA_ERR)) { 343 /* 344 * Some devices return D2H FIS when an NCQ error is detected. 345 * Treat this like an SDB error FIS ready reason. 346 */ 347 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 348 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); 349 } else 350 status = SCI_FAILURE; 351 352 sci_controller_release_frame(ihost, frame_index); 353 break; 354 } 355 case SCI_STP_DEV_CMD: 356 case SCI_SMP_DEV_CMD: 357 /* The device does not process any UF received from the hardware while 358 * in this state. All unsolicited frames are forwarded to the io request 359 * object. 360 */ 361 status = sci_io_request_frame_handler(idev->working_request, frame_index); 362 break; 363 } 364 365 return status; 366 } 367 368 static bool is_remote_device_ready(struct isci_remote_device *idev) 369 { 370 371 struct sci_base_state_machine *sm = &idev->sm; 372 enum sci_remote_device_states state = sm->current_state_id; 373 374 switch (state) { 375 case SCI_DEV_READY: 376 case SCI_STP_DEV_IDLE: 377 case SCI_STP_DEV_CMD: 378 case SCI_STP_DEV_NCQ: 379 case SCI_STP_DEV_NCQ_ERROR: 380 case SCI_STP_DEV_AWAIT_RESET: 381 case SCI_SMP_DEV_IDLE: 382 case SCI_SMP_DEV_CMD: 383 return true; 384 default: 385 return false; 386 } 387 } 388 389 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 390 u32 event_code) 391 { 392 struct sci_base_state_machine *sm = &idev->sm; 393 enum sci_remote_device_states state = sm->current_state_id; 394 enum sci_status status; 395 396 switch (scu_get_event_type(event_code)) { 397 case SCU_EVENT_TYPE_RNC_OPS_MISC: 398 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 399 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 400 status = sci_remote_node_context_event_handler(&idev->rnc, event_code); 401 break; 402 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 403 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { 404 status = SCI_SUCCESS; 405 406 /* Suspend the associated RNC */ 407 sci_remote_node_context_suspend(&idev->rnc, 408 SCI_SOFTWARE_SUSPENSION, 409 NULL, NULL); 410 411 dev_dbg(scirdev_to_dev(idev), 412 "%s: device: %p event code: %x: %s\n", 413 __func__, idev, event_code, 414 is_remote_device_ready(idev) 415 ? "I_T_Nexus_Timeout event" 416 : "I_T_Nexus_Timeout event in wrong state"); 417 418 break; 419 } 420 /* Else, fall through and treat as unhandled... */ 421 default: 422 dev_dbg(scirdev_to_dev(idev), 423 "%s: device: %p event code: %x: %s\n", 424 __func__, idev, event_code, 425 is_remote_device_ready(idev) 426 ? "unexpected event" 427 : "unexpected event in wrong state"); 428 status = SCI_FAILURE_INVALID_STATE; 429 break; 430 } 431 432 if (status != SCI_SUCCESS) 433 return status; 434 435 if (state == SCI_STP_DEV_IDLE) { 436 437 /* We pick up suspension events to handle specifically to this 438 * state. We resume the RNC right away. 439 */ 440 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 441 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) 442 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 443 } 444 445 return status; 446 } 447 448 static void sci_remote_device_start_request(struct isci_remote_device *idev, 449 struct isci_request *ireq, 450 enum sci_status status) 451 { 452 struct isci_port *iport = idev->owning_port; 453 454 /* cleanup requests that failed after starting on the port */ 455 if (status != SCI_SUCCESS) 456 sci_port_complete_io(iport, idev, ireq); 457 else { 458 kref_get(&idev->kref); 459 idev->started_request_count++; 460 } 461 } 462 463 enum sci_status sci_remote_device_start_io(struct isci_host *ihost, 464 struct isci_remote_device *idev, 465 struct isci_request *ireq) 466 { 467 struct sci_base_state_machine *sm = &idev->sm; 468 enum sci_remote_device_states state = sm->current_state_id; 469 struct isci_port *iport = idev->owning_port; 470 enum sci_status status; 471 472 switch (state) { 473 case SCI_DEV_INITIAL: 474 case SCI_DEV_STOPPED: 475 case SCI_DEV_STARTING: 476 case SCI_STP_DEV_NCQ_ERROR: 477 case SCI_DEV_STOPPING: 478 case SCI_DEV_FAILED: 479 case SCI_DEV_RESETTING: 480 case SCI_DEV_FINAL: 481 default: 482 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 483 __func__, state); 484 return SCI_FAILURE_INVALID_STATE; 485 case SCI_DEV_READY: 486 /* attempt to start an io request for this device object. The remote 487 * device object will issue the start request for the io and if 488 * successful it will start the request for the port object then 489 * increment its own request count. 490 */ 491 status = sci_port_start_io(iport, idev, ireq); 492 if (status != SCI_SUCCESS) 493 return status; 494 495 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 496 if (status != SCI_SUCCESS) 497 break; 498 499 status = sci_request_start(ireq); 500 break; 501 case SCI_STP_DEV_IDLE: { 502 /* handle the start io operation for a sata device that is in 503 * the command idle state. - Evalute the type of IO request to 504 * be started - If its an NCQ request change to NCQ substate - 505 * If its any other command change to the CMD substate 506 * 507 * If this is a softreset we may want to have a different 508 * substate. 509 */ 510 enum sci_remote_device_states new_state; 511 struct sas_task *task = isci_request_access_task(ireq); 512 513 status = sci_port_start_io(iport, idev, ireq); 514 if (status != SCI_SUCCESS) 515 return status; 516 517 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 518 if (status != SCI_SUCCESS) 519 break; 520 521 status = sci_request_start(ireq); 522 if (status != SCI_SUCCESS) 523 break; 524 525 if (task->ata_task.use_ncq) 526 new_state = SCI_STP_DEV_NCQ; 527 else { 528 idev->working_request = ireq; 529 new_state = SCI_STP_DEV_CMD; 530 } 531 sci_change_state(sm, new_state); 532 break; 533 } 534 case SCI_STP_DEV_NCQ: { 535 struct sas_task *task = isci_request_access_task(ireq); 536 537 if (task->ata_task.use_ncq) { 538 status = sci_port_start_io(iport, idev, ireq); 539 if (status != SCI_SUCCESS) 540 return status; 541 542 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 543 if (status != SCI_SUCCESS) 544 break; 545 546 status = sci_request_start(ireq); 547 } else 548 return SCI_FAILURE_INVALID_STATE; 549 break; 550 } 551 case SCI_STP_DEV_AWAIT_RESET: 552 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 553 case SCI_SMP_DEV_IDLE: 554 status = sci_port_start_io(iport, idev, ireq); 555 if (status != SCI_SUCCESS) 556 return status; 557 558 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 559 if (status != SCI_SUCCESS) 560 break; 561 562 status = sci_request_start(ireq); 563 if (status != SCI_SUCCESS) 564 break; 565 566 idev->working_request = ireq; 567 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); 568 break; 569 case SCI_STP_DEV_CMD: 570 case SCI_SMP_DEV_CMD: 571 /* device is already handling a command it can not accept new commands 572 * until this one is complete. 573 */ 574 return SCI_FAILURE_INVALID_STATE; 575 } 576 577 sci_remote_device_start_request(idev, ireq, status); 578 return status; 579 } 580 581 static enum sci_status common_complete_io(struct isci_port *iport, 582 struct isci_remote_device *idev, 583 struct isci_request *ireq) 584 { 585 enum sci_status status; 586 587 status = sci_request_complete(ireq); 588 if (status != SCI_SUCCESS) 589 return status; 590 591 status = sci_port_complete_io(iport, idev, ireq); 592 if (status != SCI_SUCCESS) 593 return status; 594 595 sci_remote_device_decrement_request_count(idev); 596 return status; 597 } 598 599 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, 600 struct isci_remote_device *idev, 601 struct isci_request *ireq) 602 { 603 struct sci_base_state_machine *sm = &idev->sm; 604 enum sci_remote_device_states state = sm->current_state_id; 605 struct isci_port *iport = idev->owning_port; 606 enum sci_status status; 607 608 switch (state) { 609 case SCI_DEV_INITIAL: 610 case SCI_DEV_STOPPED: 611 case SCI_DEV_STARTING: 612 case SCI_STP_DEV_IDLE: 613 case SCI_SMP_DEV_IDLE: 614 case SCI_DEV_FAILED: 615 case SCI_DEV_FINAL: 616 default: 617 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 618 __func__, state); 619 return SCI_FAILURE_INVALID_STATE; 620 case SCI_DEV_READY: 621 case SCI_STP_DEV_AWAIT_RESET: 622 case SCI_DEV_RESETTING: 623 status = common_complete_io(iport, idev, ireq); 624 break; 625 case SCI_STP_DEV_CMD: 626 case SCI_STP_DEV_NCQ: 627 case SCI_STP_DEV_NCQ_ERROR: 628 status = common_complete_io(iport, idev, ireq); 629 if (status != SCI_SUCCESS) 630 break; 631 632 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 633 /* This request causes hardware error, device needs to be Lun Reset. 634 * So here we force the state machine to IDLE state so the rest IOs 635 * can reach RNC state handler, these IOs will be completed by RNC with 636 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". 637 */ 638 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); 639 } else if (idev->started_request_count == 0) 640 sci_change_state(sm, SCI_STP_DEV_IDLE); 641 break; 642 case SCI_SMP_DEV_CMD: 643 status = common_complete_io(iport, idev, ireq); 644 if (status != SCI_SUCCESS) 645 break; 646 sci_change_state(sm, SCI_SMP_DEV_IDLE); 647 break; 648 case SCI_DEV_STOPPING: 649 status = common_complete_io(iport, idev, ireq); 650 if (status != SCI_SUCCESS) 651 break; 652 653 if (idev->started_request_count == 0) 654 sci_remote_node_context_destruct(&idev->rnc, 655 rnc_destruct_done, 656 idev); 657 break; 658 } 659 660 if (status != SCI_SUCCESS) 661 dev_err(scirdev_to_dev(idev), 662 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 663 "could not complete\n", __func__, iport, 664 idev, ireq, status); 665 else 666 isci_put_device(idev); 667 668 return status; 669 } 670 671 static void sci_remote_device_continue_request(void *dev) 672 { 673 struct isci_remote_device *idev = dev; 674 675 /* we need to check if this request is still valid to continue. */ 676 if (idev->working_request) 677 sci_controller_continue_io(idev->working_request); 678 } 679 680 enum sci_status sci_remote_device_start_task(struct isci_host *ihost, 681 struct isci_remote_device *idev, 682 struct isci_request *ireq) 683 { 684 struct sci_base_state_machine *sm = &idev->sm; 685 enum sci_remote_device_states state = sm->current_state_id; 686 struct isci_port *iport = idev->owning_port; 687 enum sci_status status; 688 689 switch (state) { 690 case SCI_DEV_INITIAL: 691 case SCI_DEV_STOPPED: 692 case SCI_DEV_STARTING: 693 case SCI_SMP_DEV_IDLE: 694 case SCI_SMP_DEV_CMD: 695 case SCI_DEV_STOPPING: 696 case SCI_DEV_FAILED: 697 case SCI_DEV_RESETTING: 698 case SCI_DEV_FINAL: 699 default: 700 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 701 __func__, state); 702 return SCI_FAILURE_INVALID_STATE; 703 case SCI_STP_DEV_IDLE: 704 case SCI_STP_DEV_CMD: 705 case SCI_STP_DEV_NCQ: 706 case SCI_STP_DEV_NCQ_ERROR: 707 case SCI_STP_DEV_AWAIT_RESET: 708 status = sci_port_start_io(iport, idev, ireq); 709 if (status != SCI_SUCCESS) 710 return status; 711 712 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 713 if (status != SCI_SUCCESS) 714 goto out; 715 716 status = sci_request_start(ireq); 717 if (status != SCI_SUCCESS) 718 goto out; 719 720 /* Note: If the remote device state is not IDLE this will 721 * replace the request that probably resulted in the task 722 * management request. 723 */ 724 idev->working_request = ireq; 725 sci_change_state(sm, SCI_STP_DEV_CMD); 726 727 /* The remote node context must cleanup the TCi to NCQ mapping 728 * table. The only way to do this correctly is to either write 729 * to the TLCR register or to invalidate and repost the RNC. In 730 * either case the remote node context state machine will take 731 * the correct action when the remote node context is suspended 732 * and later resumed. 733 */ 734 sci_remote_node_context_suspend(&idev->rnc, 735 SCI_SOFTWARE_SUSPENSION, NULL, NULL); 736 sci_remote_node_context_resume(&idev->rnc, 737 sci_remote_device_continue_request, 738 idev); 739 740 out: 741 sci_remote_device_start_request(idev, ireq, status); 742 /* We need to let the controller start request handler know that 743 * it can't post TC yet. We will provide a callback function to 744 * post TC when RNC gets resumed. 745 */ 746 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; 747 case SCI_DEV_READY: 748 status = sci_port_start_io(iport, idev, ireq); 749 if (status != SCI_SUCCESS) 750 return status; 751 752 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 753 if (status != SCI_SUCCESS) 754 break; 755 756 status = sci_request_start(ireq); 757 break; 758 } 759 sci_remote_device_start_request(idev, ireq, status); 760 761 return status; 762 } 763 764 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) 765 { 766 struct isci_port *iport = idev->owning_port; 767 u32 context; 768 769 context = request | 770 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 771 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 772 idev->rnc.remote_node_index; 773 774 sci_controller_post_request(iport->owning_controller, context); 775 } 776 777 /* called once the remote node context has transisitioned to a 778 * ready state. This is the indication that the remote device object can also 779 * transition to ready. 780 */ 781 static void remote_device_resume_done(void *_dev) 782 { 783 struct isci_remote_device *idev = _dev; 784 785 if (is_remote_device_ready(idev)) 786 return; 787 788 /* go 'ready' if we are not already in a ready state */ 789 sci_change_state(&idev->sm, SCI_DEV_READY); 790 } 791 792 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) 793 { 794 struct isci_remote_device *idev = _dev; 795 struct isci_host *ihost = idev->owning_port->owning_controller; 796 797 /* For NCQ operation we do not issue a isci_remote_device_not_ready(). 798 * As a result, avoid sending the ready notification. 799 */ 800 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) 801 isci_remote_device_ready(ihost, idev); 802 } 803 804 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) 805 { 806 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 807 808 /* Initial state is a transitional state to the stopped state */ 809 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 810 } 811 812 /** 813 * sci_remote_device_destruct() - free remote node context and destruct 814 * @remote_device: This parameter specifies the remote device to be destructed. 815 * 816 * Remote device objects are a limited resource. As such, they must be 817 * protected. Thus calls to construct and destruct are mutually exclusive and 818 * non-reentrant. The return value shall indicate if the device was 819 * successfully destructed or if some failure occurred. enum sci_status This value 820 * is returned if the device is successfully destructed. 821 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied 822 * device isn't valid (e.g. it's already been destoryed, the handle isn't 823 * valid, etc.). 824 */ 825 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) 826 { 827 struct sci_base_state_machine *sm = &idev->sm; 828 enum sci_remote_device_states state = sm->current_state_id; 829 struct isci_host *ihost; 830 831 if (state != SCI_DEV_STOPPED) { 832 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 833 __func__, state); 834 return SCI_FAILURE_INVALID_STATE; 835 } 836 837 ihost = idev->owning_port->owning_controller; 838 sci_controller_free_remote_node_context(ihost, idev, 839 idev->rnc.remote_node_index); 840 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; 841 sci_change_state(sm, SCI_DEV_FINAL); 842 843 return SCI_SUCCESS; 844 } 845 846 /** 847 * isci_remote_device_deconstruct() - This function frees an isci_remote_device. 848 * @ihost: This parameter specifies the isci host object. 849 * @idev: This parameter specifies the remote device to be freed. 850 * 851 */ 852 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) 853 { 854 dev_dbg(&ihost->pdev->dev, 855 "%s: isci_device = %p\n", __func__, idev); 856 857 /* There should not be any outstanding io's. All paths to 858 * here should go through isci_remote_device_nuke_requests. 859 * If we hit this condition, we will need a way to complete 860 * io requests in process */ 861 BUG_ON(!list_empty(&idev->reqs_in_process)); 862 863 sci_remote_device_destruct(idev); 864 list_del_init(&idev->node); 865 isci_put_device(idev); 866 } 867 868 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) 869 { 870 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 871 struct isci_host *ihost = idev->owning_port->owning_controller; 872 u32 prev_state; 873 874 /* If we are entering from the stopping state let the SCI User know that 875 * the stop operation has completed. 876 */ 877 prev_state = idev->sm.previous_state_id; 878 if (prev_state == SCI_DEV_STOPPING) 879 isci_remote_device_deconstruct(ihost, idev); 880 881 sci_controller_remote_device_stopped(ihost, idev); 882 } 883 884 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) 885 { 886 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 887 struct isci_host *ihost = idev->owning_port->owning_controller; 888 889 isci_remote_device_not_ready(ihost, idev, 890 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); 891 } 892 893 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) 894 { 895 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 896 struct isci_host *ihost = idev->owning_port->owning_controller; 897 struct domain_device *dev = idev->domain_dev; 898 899 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 900 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 901 } else if (dev_is_expander(dev)) { 902 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 903 } else 904 isci_remote_device_ready(ihost, idev); 905 } 906 907 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) 908 { 909 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 910 struct domain_device *dev = idev->domain_dev; 911 912 if (dev->dev_type == SAS_END_DEV) { 913 struct isci_host *ihost = idev->owning_port->owning_controller; 914 915 isci_remote_device_not_ready(ihost, idev, 916 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); 917 } 918 } 919 920 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 921 { 922 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 923 924 sci_remote_node_context_suspend( 925 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); 926 } 927 928 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 929 { 930 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 931 932 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 933 } 934 935 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 936 { 937 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 938 939 idev->working_request = NULL; 940 if (sci_remote_node_context_is_ready(&idev->rnc)) { 941 /* 942 * Since the RNC is ready, it's alright to finish completion 943 * processing (e.g. signal the remote device is ready). */ 944 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); 945 } else { 946 sci_remote_node_context_resume(&idev->rnc, 947 sci_stp_remote_device_ready_idle_substate_resume_complete_handler, 948 idev); 949 } 950 } 951 952 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 953 { 954 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 955 struct isci_host *ihost = idev->owning_port->owning_controller; 956 957 BUG_ON(idev->working_request == NULL); 958 959 isci_remote_device_not_ready(ihost, idev, 960 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); 961 } 962 963 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) 964 { 965 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 966 struct isci_host *ihost = idev->owning_port->owning_controller; 967 968 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) 969 isci_remote_device_not_ready(ihost, idev, 970 idev->not_ready_reason); 971 } 972 973 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 974 { 975 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 976 struct isci_host *ihost = idev->owning_port->owning_controller; 977 978 isci_remote_device_ready(ihost, idev); 979 } 980 981 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 982 { 983 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 984 struct isci_host *ihost = idev->owning_port->owning_controller; 985 986 BUG_ON(idev->working_request == NULL); 987 988 isci_remote_device_not_ready(ihost, idev, 989 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); 990 } 991 992 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) 993 { 994 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 995 996 idev->working_request = NULL; 997 } 998 999 static const struct sci_base_state sci_remote_device_state_table[] = { 1000 [SCI_DEV_INITIAL] = { 1001 .enter_state = sci_remote_device_initial_state_enter, 1002 }, 1003 [SCI_DEV_STOPPED] = { 1004 .enter_state = sci_remote_device_stopped_state_enter, 1005 }, 1006 [SCI_DEV_STARTING] = { 1007 .enter_state = sci_remote_device_starting_state_enter, 1008 }, 1009 [SCI_DEV_READY] = { 1010 .enter_state = sci_remote_device_ready_state_enter, 1011 .exit_state = sci_remote_device_ready_state_exit 1012 }, 1013 [SCI_STP_DEV_IDLE] = { 1014 .enter_state = sci_stp_remote_device_ready_idle_substate_enter, 1015 }, 1016 [SCI_STP_DEV_CMD] = { 1017 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, 1018 }, 1019 [SCI_STP_DEV_NCQ] = { }, 1020 [SCI_STP_DEV_NCQ_ERROR] = { 1021 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, 1022 }, 1023 [SCI_STP_DEV_AWAIT_RESET] = { }, 1024 [SCI_SMP_DEV_IDLE] = { 1025 .enter_state = sci_smp_remote_device_ready_idle_substate_enter, 1026 }, 1027 [SCI_SMP_DEV_CMD] = { 1028 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, 1029 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, 1030 }, 1031 [SCI_DEV_STOPPING] = { }, 1032 [SCI_DEV_FAILED] = { }, 1033 [SCI_DEV_RESETTING] = { 1034 .enter_state = sci_remote_device_resetting_state_enter, 1035 .exit_state = sci_remote_device_resetting_state_exit 1036 }, 1037 [SCI_DEV_FINAL] = { }, 1038 }; 1039 1040 /** 1041 * sci_remote_device_construct() - common construction 1042 * @sci_port: SAS/SATA port through which this device is accessed. 1043 * @sci_dev: remote device to construct 1044 * 1045 * This routine just performs benign initialization and does not 1046 * allocate the remote_node_context which is left to 1047 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() 1048 * frees the remote_node_context(s) for the device. 1049 */ 1050 static void sci_remote_device_construct(struct isci_port *iport, 1051 struct isci_remote_device *idev) 1052 { 1053 idev->owning_port = iport; 1054 idev->started_request_count = 0; 1055 1056 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); 1057 1058 sci_remote_node_context_construct(&idev->rnc, 1059 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); 1060 } 1061 1062 /** 1063 * sci_remote_device_da_construct() - construct direct attached device. 1064 * 1065 * The information (e.g. IAF, Signature FIS, etc.) necessary to build 1066 * the device is known to the SCI Core since it is contained in the 1067 * sci_phy object. Remote node context(s) is/are a global resource 1068 * allocated by this routine, freed by sci_remote_device_destruct(). 1069 * 1070 * Returns: 1071 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1072 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1073 * sata-only controller instance. 1074 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1075 */ 1076 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, 1077 struct isci_remote_device *idev) 1078 { 1079 enum sci_status status; 1080 struct domain_device *dev = idev->domain_dev; 1081 1082 sci_remote_device_construct(iport, idev); 1083 1084 /* 1085 * This information is request to determine how many remote node context 1086 * entries will be needed to store the remote node. 1087 */ 1088 idev->is_direct_attached = true; 1089 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1090 idev, 1091 &idev->rnc.remote_node_index); 1092 1093 if (status != SCI_SUCCESS) 1094 return status; 1095 1096 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1097 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1098 /* pass */; 1099 else 1100 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1101 1102 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1103 1104 /* / @todo Should I assign the port width by reading all of the phys on the port? */ 1105 idev->device_port_width = 1; 1106 1107 return SCI_SUCCESS; 1108 } 1109 1110 /** 1111 * sci_remote_device_ea_construct() - construct expander attached device 1112 * 1113 * Remote node context(s) is/are a global resource allocated by this 1114 * routine, freed by sci_remote_device_destruct(). 1115 * 1116 * Returns: 1117 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1118 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1119 * sata-only controller instance. 1120 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1121 */ 1122 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, 1123 struct isci_remote_device *idev) 1124 { 1125 struct domain_device *dev = idev->domain_dev; 1126 enum sci_status status; 1127 1128 sci_remote_device_construct(iport, idev); 1129 1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1131 idev, 1132 &idev->rnc.remote_node_index); 1133 if (status != SCI_SUCCESS) 1134 return status; 1135 1136 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1137 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1138 /* pass */; 1139 else 1140 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1141 1142 /* 1143 * For SAS-2 the physical link rate is actually a logical link 1144 * rate that incorporates multiplexing. The SCU doesn't 1145 * incorporate multiplexing and for the purposes of the 1146 * connection the logical link rate is that same as the 1147 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1148 * one another, so this code works for both situations. */ 1149 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1150 dev->linkrate); 1151 1152 /* / @todo Should I assign the port width by reading all of the phys on the port? */ 1153 idev->device_port_width = 1; 1154 1155 return SCI_SUCCESS; 1156 } 1157 1158 /** 1159 * sci_remote_device_start() - This method will start the supplied remote 1160 * device. This method enables normal IO requests to flow through to the 1161 * remote device. 1162 * @remote_device: This parameter specifies the device to be started. 1163 * @timeout: This parameter specifies the number of milliseconds in which the 1164 * start operation should complete. 1165 * 1166 * An indication of whether the device was successfully started. SCI_SUCCESS 1167 * This value is returned if the device was successfully started. 1168 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start 1169 * the device when there have been no phys added to it. 1170 */ 1171 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1172 u32 timeout) 1173 { 1174 struct sci_base_state_machine *sm = &idev->sm; 1175 enum sci_remote_device_states state = sm->current_state_id; 1176 enum sci_status status; 1177 1178 if (state != SCI_DEV_STOPPED) { 1179 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 1180 __func__, state); 1181 return SCI_FAILURE_INVALID_STATE; 1182 } 1183 1184 status = sci_remote_node_context_resume(&idev->rnc, 1185 remote_device_resume_done, 1186 idev); 1187 if (status != SCI_SUCCESS) 1188 return status; 1189 1190 sci_change_state(sm, SCI_DEV_STARTING); 1191 1192 return SCI_SUCCESS; 1193 } 1194 1195 static enum sci_status isci_remote_device_construct(struct isci_port *iport, 1196 struct isci_remote_device *idev) 1197 { 1198 struct isci_host *ihost = iport->isci_host; 1199 struct domain_device *dev = idev->domain_dev; 1200 enum sci_status status; 1201 1202 if (dev->parent && dev_is_expander(dev->parent)) 1203 status = sci_remote_device_ea_construct(iport, idev); 1204 else 1205 status = sci_remote_device_da_construct(iport, idev); 1206 1207 if (status != SCI_SUCCESS) { 1208 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", 1209 __func__, status); 1210 1211 return status; 1212 } 1213 1214 /* start the device. */ 1215 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); 1216 1217 if (status != SCI_SUCCESS) 1218 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", 1219 status); 1220 1221 return status; 1222 } 1223 1224 void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) 1225 { 1226 DECLARE_COMPLETION_ONSTACK(aborted_task_completion); 1227 1228 dev_dbg(&ihost->pdev->dev, 1229 "%s: idev = %p\n", __func__, idev); 1230 1231 /* Cleanup all requests pending for this device. */ 1232 isci_terminate_pending_requests(ihost, idev); 1233 1234 dev_dbg(&ihost->pdev->dev, 1235 "%s: idev = %p, done\n", __func__, idev); 1236 } 1237 1238 /** 1239 * This function builds the isci_remote_device when a libsas dev_found message 1240 * is received. 1241 * @isci_host: This parameter specifies the isci host object. 1242 * @port: This parameter specifies the isci_port conected to this device. 1243 * 1244 * pointer to new isci_remote_device. 1245 */ 1246 static struct isci_remote_device * 1247 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) 1248 { 1249 struct isci_remote_device *idev; 1250 int i; 1251 1252 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 1253 idev = &ihost->devices[i]; 1254 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) 1255 break; 1256 } 1257 1258 if (i >= SCI_MAX_REMOTE_DEVICES) { 1259 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1260 return NULL; 1261 } 1262 1263 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) 1264 return NULL; 1265 1266 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1267 return NULL; 1268 1269 return idev; 1270 } 1271 1272 void isci_remote_device_release(struct kref *kref) 1273 { 1274 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); 1275 struct isci_host *ihost = idev->isci_port->isci_host; 1276 1277 idev->domain_dev = NULL; 1278 idev->isci_port = NULL; 1279 clear_bit(IDEV_START_PENDING, &idev->flags); 1280 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1281 clear_bit(IDEV_IO_READY, &idev->flags); 1282 clear_bit(IDEV_GONE, &idev->flags); 1283 clear_bit(IDEV_EH, &idev->flags); 1284 smp_mb__before_clear_bit(); 1285 clear_bit(IDEV_ALLOCATED, &idev->flags); 1286 wake_up(&ihost->eventq); 1287 } 1288 1289 /** 1290 * isci_remote_device_stop() - This function is called internally to stop the 1291 * remote device. 1292 * @isci_host: This parameter specifies the isci host object. 1293 * @isci_device: This parameter specifies the remote device. 1294 * 1295 * The status of the ihost request to stop. 1296 */ 1297 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) 1298 { 1299 enum sci_status status; 1300 unsigned long flags; 1301 1302 dev_dbg(&ihost->pdev->dev, 1303 "%s: isci_device = %p\n", __func__, idev); 1304 1305 spin_lock_irqsave(&ihost->scic_lock, flags); 1306 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1307 set_bit(IDEV_GONE, &idev->flags); 1308 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1309 1310 /* Kill all outstanding requests. */ 1311 isci_remote_device_nuke_requests(ihost, idev); 1312 1313 set_bit(IDEV_STOP_PENDING, &idev->flags); 1314 1315 spin_lock_irqsave(&ihost->scic_lock, flags); 1316 status = sci_remote_device_stop(idev, 50); 1317 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1318 1319 /* Wait for the stop complete callback. */ 1320 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) 1321 /* nothing to wait for */; 1322 else 1323 wait_for_device_stop(ihost, idev); 1324 1325 return status; 1326 } 1327 1328 /** 1329 * isci_remote_device_gone() - This function is called by libsas when a domain 1330 * device is removed. 1331 * @domain_device: This parameter specifies the libsas domain device. 1332 * 1333 */ 1334 void isci_remote_device_gone(struct domain_device *dev) 1335 { 1336 struct isci_host *ihost = dev_to_ihost(dev); 1337 struct isci_remote_device *idev = dev->lldd_dev; 1338 1339 dev_dbg(&ihost->pdev->dev, 1340 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", 1341 __func__, dev, idev, idev->isci_port); 1342 1343 isci_remote_device_stop(ihost, idev); 1344 } 1345 1346 1347 /** 1348 * isci_remote_device_found() - This function is called by libsas when a remote 1349 * device is discovered. A remote device object is created and started. the 1350 * function then sleeps until the sci core device started message is 1351 * received. 1352 * @domain_device: This parameter specifies the libsas domain device. 1353 * 1354 * status, zero indicates success. 1355 */ 1356 int isci_remote_device_found(struct domain_device *domain_dev) 1357 { 1358 struct isci_host *isci_host = dev_to_ihost(domain_dev); 1359 struct isci_port *isci_port; 1360 struct isci_phy *isci_phy; 1361 struct asd_sas_port *sas_port; 1362 struct asd_sas_phy *sas_phy; 1363 struct isci_remote_device *isci_device; 1364 enum sci_status status; 1365 1366 dev_dbg(&isci_host->pdev->dev, 1367 "%s: domain_device = %p\n", __func__, domain_dev); 1368 1369 wait_for_start(isci_host); 1370 1371 sas_port = domain_dev->port; 1372 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy, 1373 port_phy_el); 1374 isci_phy = to_iphy(sas_phy); 1375 isci_port = isci_phy->isci_port; 1376 1377 /* we are being called for a device on this port, 1378 * so it has to come up eventually 1379 */ 1380 wait_for_completion(&isci_port->start_complete); 1381 1382 if ((isci_stopping == isci_port_get_state(isci_port)) || 1383 (isci_stopped == isci_port_get_state(isci_port))) 1384 return -ENODEV; 1385 1386 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1387 if (!isci_device) 1388 return -ENODEV; 1389 1390 kref_init(&isci_device->kref); 1391 INIT_LIST_HEAD(&isci_device->node); 1392 1393 spin_lock_irq(&isci_host->scic_lock); 1394 isci_device->domain_dev = domain_dev; 1395 isci_device->isci_port = isci_port; 1396 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1397 1398 set_bit(IDEV_START_PENDING, &isci_device->flags); 1399 status = isci_remote_device_construct(isci_port, isci_device); 1400 1401 dev_dbg(&isci_host->pdev->dev, 1402 "%s: isci_device = %p\n", 1403 __func__, isci_device); 1404 1405 if (status == SCI_SUCCESS) { 1406 /* device came up, advertise it to the world */ 1407 domain_dev->lldd_dev = isci_device; 1408 } else 1409 isci_put_device(isci_device); 1410 spin_unlock_irq(&isci_host->scic_lock); 1411 1412 /* wait for the device ready callback. */ 1413 wait_for_device_start(isci_host, isci_device); 1414 1415 return status == SCI_SUCCESS ? 0 : -ENODEV; 1416 } 1417 /** 1418 * isci_device_is_reset_pending() - This function will check if there is any 1419 * pending reset condition on the device. 1420 * @request: This parameter is the isci_device object. 1421 * 1422 * true if there is a reset pending for the device. 1423 */ 1424 bool isci_device_is_reset_pending( 1425 struct isci_host *isci_host, 1426 struct isci_remote_device *isci_device) 1427 { 1428 struct isci_request *isci_request; 1429 struct isci_request *tmp_req; 1430 bool reset_is_pending = false; 1431 unsigned long flags; 1432 1433 dev_dbg(&isci_host->pdev->dev, 1434 "%s: isci_device = %p\n", __func__, isci_device); 1435 1436 spin_lock_irqsave(&isci_host->scic_lock, flags); 1437 1438 /* Check for reset on all pending requests. */ 1439 list_for_each_entry_safe(isci_request, tmp_req, 1440 &isci_device->reqs_in_process, dev_node) { 1441 dev_dbg(&isci_host->pdev->dev, 1442 "%s: isci_device = %p request = %p\n", 1443 __func__, isci_device, isci_request); 1444 1445 if (isci_request->ttype == io_task) { 1446 struct sas_task *task = isci_request_access_task( 1447 isci_request); 1448 1449 spin_lock(&task->task_state_lock); 1450 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) 1451 reset_is_pending = true; 1452 spin_unlock(&task->task_state_lock); 1453 } 1454 } 1455 1456 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 1457 1458 dev_dbg(&isci_host->pdev->dev, 1459 "%s: isci_device = %p reset_is_pending = %d\n", 1460 __func__, isci_device, reset_is_pending); 1461 1462 return reset_is_pending; 1463 } 1464 1465 /** 1466 * isci_device_clear_reset_pending() - This function will clear if any pending 1467 * reset condition flags on the device. 1468 * @request: This parameter is the isci_device object. 1469 * 1470 * true if there is a reset pending for the device. 1471 */ 1472 void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev) 1473 { 1474 struct isci_request *isci_request; 1475 struct isci_request *tmp_req; 1476 unsigned long flags = 0; 1477 1478 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n", 1479 __func__, idev, ihost); 1480 1481 spin_lock_irqsave(&ihost->scic_lock, flags); 1482 1483 /* Clear reset pending on all pending requests. */ 1484 list_for_each_entry_safe(isci_request, tmp_req, 1485 &idev->reqs_in_process, dev_node) { 1486 dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n", 1487 __func__, idev, isci_request); 1488 1489 if (isci_request->ttype == io_task) { 1490 1491 unsigned long flags2; 1492 struct sas_task *task = isci_request_access_task( 1493 isci_request); 1494 1495 spin_lock_irqsave(&task->task_state_lock, flags2); 1496 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; 1497 spin_unlock_irqrestore(&task->task_state_lock, flags2); 1498 } 1499 } 1500 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1501 } 1502