1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 #include <scsi/sas.h> 56 #include <linux/bitops.h> 57 #include "isci.h" 58 #include "port.h" 59 #include "remote_device.h" 60 #include "request.h" 61 #include "remote_node_context.h" 62 #include "scu_event_codes.h" 63 #include "task.h" 64 65 #undef C 66 #define C(a) (#a) 67 const char *dev_state_name(enum sci_remote_device_states state) 68 { 69 static const char * const strings[] = REMOTE_DEV_STATES; 70 71 return strings[state]; 72 } 73 #undef C 74 75 /** 76 * isci_remote_device_not_ready() - This function is called by the ihost when 77 * the remote device is not ready. We mark the isci device as ready (not 78 * "ready_for_io") and signal the waiting proccess. 79 * @isci_host: This parameter specifies the isci host object. 80 * @isci_device: This parameter specifies the remote device 81 * 82 * sci_lock is held on entrance to this function. 83 */ 84 static void isci_remote_device_not_ready(struct isci_host *ihost, 85 struct isci_remote_device *idev, u32 reason) 86 { 87 struct isci_request *ireq; 88 89 dev_dbg(&ihost->pdev->dev, 90 "%s: isci_device = %p\n", __func__, idev); 91 92 switch (reason) { 93 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: 94 set_bit(IDEV_GONE, &idev->flags); 95 break; 96 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: 97 set_bit(IDEV_IO_NCQERROR, &idev->flags); 98 99 /* Kill all outstanding requests for the device. */ 100 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { 101 102 dev_dbg(&ihost->pdev->dev, 103 "%s: isci_device = %p request = %p\n", 104 __func__, idev, ireq); 105 106 sci_controller_terminate_request(ihost, 107 idev, 108 ireq); 109 } 110 /* Fall through into the default case... */ 111 default: 112 clear_bit(IDEV_IO_READY, &idev->flags); 113 break; 114 } 115 } 116 117 /** 118 * isci_remote_device_ready() - This function is called by the ihost when the 119 * remote device is ready. We mark the isci device as ready and signal the 120 * waiting proccess. 121 * @ihost: our valid isci_host 122 * @idev: remote device 123 * 124 */ 125 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) 126 { 127 dev_dbg(&ihost->pdev->dev, 128 "%s: idev = %p\n", __func__, idev); 129 130 clear_bit(IDEV_IO_NCQERROR, &idev->flags); 131 set_bit(IDEV_IO_READY, &idev->flags); 132 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) 133 wake_up(&ihost->eventq); 134 } 135 136 /* called once the remote node context is ready to be freed. 137 * The remote device can now report that its stop operation is complete. none 138 */ 139 static void rnc_destruct_done(void *_dev) 140 { 141 struct isci_remote_device *idev = _dev; 142 143 BUG_ON(idev->started_request_count != 0); 144 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 145 } 146 147 static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 148 { 149 struct isci_host *ihost = idev->owning_port->owning_controller; 150 enum sci_status status = SCI_SUCCESS; 151 u32 i; 152 153 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 154 struct isci_request *ireq = ihost->reqs[i]; 155 enum sci_status s; 156 157 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 158 ireq->target_device != idev) 159 continue; 160 161 s = sci_controller_terminate_request(ihost, idev, ireq); 162 if (s != SCI_SUCCESS) 163 status = s; 164 } 165 166 return status; 167 } 168 169 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 170 u32 timeout) 171 { 172 struct sci_base_state_machine *sm = &idev->sm; 173 enum sci_remote_device_states state = sm->current_state_id; 174 175 switch (state) { 176 case SCI_DEV_INITIAL: 177 case SCI_DEV_FAILED: 178 case SCI_DEV_FINAL: 179 default: 180 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 181 __func__, dev_state_name(state)); 182 return SCI_FAILURE_INVALID_STATE; 183 case SCI_DEV_STOPPED: 184 return SCI_SUCCESS; 185 case SCI_DEV_STARTING: 186 /* device not started so there had better be no requests */ 187 BUG_ON(idev->started_request_count != 0); 188 sci_remote_node_context_destruct(&idev->rnc, 189 rnc_destruct_done, idev); 190 /* Transition to the stopping state and wait for the 191 * remote node to complete being posted and invalidated. 192 */ 193 sci_change_state(sm, SCI_DEV_STOPPING); 194 return SCI_SUCCESS; 195 case SCI_DEV_READY: 196 case SCI_STP_DEV_IDLE: 197 case SCI_STP_DEV_CMD: 198 case SCI_STP_DEV_NCQ: 199 case SCI_STP_DEV_NCQ_ERROR: 200 case SCI_STP_DEV_AWAIT_RESET: 201 case SCI_SMP_DEV_IDLE: 202 case SCI_SMP_DEV_CMD: 203 sci_change_state(sm, SCI_DEV_STOPPING); 204 if (idev->started_request_count == 0) { 205 sci_remote_node_context_destruct(&idev->rnc, 206 rnc_destruct_done, idev); 207 return SCI_SUCCESS; 208 } else 209 return sci_remote_device_terminate_requests(idev); 210 break; 211 case SCI_DEV_STOPPING: 212 /* All requests should have been terminated, but if there is an 213 * attempt to stop a device already in the stopping state, then 214 * try again to terminate. 215 */ 216 return sci_remote_device_terminate_requests(idev); 217 case SCI_DEV_RESETTING: 218 sci_change_state(sm, SCI_DEV_STOPPING); 219 return SCI_SUCCESS; 220 } 221 } 222 223 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) 224 { 225 struct sci_base_state_machine *sm = &idev->sm; 226 enum sci_remote_device_states state = sm->current_state_id; 227 228 switch (state) { 229 case SCI_DEV_INITIAL: 230 case SCI_DEV_STOPPED: 231 case SCI_DEV_STARTING: 232 case SCI_SMP_DEV_IDLE: 233 case SCI_SMP_DEV_CMD: 234 case SCI_DEV_STOPPING: 235 case SCI_DEV_FAILED: 236 case SCI_DEV_RESETTING: 237 case SCI_DEV_FINAL: 238 default: 239 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 240 __func__, dev_state_name(state)); 241 return SCI_FAILURE_INVALID_STATE; 242 case SCI_DEV_READY: 243 case SCI_STP_DEV_IDLE: 244 case SCI_STP_DEV_CMD: 245 case SCI_STP_DEV_NCQ: 246 case SCI_STP_DEV_NCQ_ERROR: 247 case SCI_STP_DEV_AWAIT_RESET: 248 sci_change_state(sm, SCI_DEV_RESETTING); 249 return SCI_SUCCESS; 250 } 251 } 252 253 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) 254 { 255 struct sci_base_state_machine *sm = &idev->sm; 256 enum sci_remote_device_states state = sm->current_state_id; 257 258 if (state != SCI_DEV_RESETTING) { 259 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 260 __func__, dev_state_name(state)); 261 return SCI_FAILURE_INVALID_STATE; 262 } 263 264 sci_change_state(sm, SCI_DEV_READY); 265 return SCI_SUCCESS; 266 } 267 268 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, 269 u32 suspend_type) 270 { 271 struct sci_base_state_machine *sm = &idev->sm; 272 enum sci_remote_device_states state = sm->current_state_id; 273 274 if (state != SCI_STP_DEV_CMD) { 275 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 276 __func__, dev_state_name(state)); 277 return SCI_FAILURE_INVALID_STATE; 278 } 279 280 return sci_remote_node_context_suspend(&idev->rnc, 281 suspend_type, NULL, NULL); 282 } 283 284 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 285 u32 frame_index) 286 { 287 struct sci_base_state_machine *sm = &idev->sm; 288 enum sci_remote_device_states state = sm->current_state_id; 289 struct isci_host *ihost = idev->owning_port->owning_controller; 290 enum sci_status status; 291 292 switch (state) { 293 case SCI_DEV_INITIAL: 294 case SCI_DEV_STOPPED: 295 case SCI_DEV_STARTING: 296 case SCI_STP_DEV_IDLE: 297 case SCI_SMP_DEV_IDLE: 298 case SCI_DEV_FINAL: 299 default: 300 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 301 __func__, dev_state_name(state)); 302 /* Return the frame back to the controller */ 303 sci_controller_release_frame(ihost, frame_index); 304 return SCI_FAILURE_INVALID_STATE; 305 case SCI_DEV_READY: 306 case SCI_STP_DEV_NCQ_ERROR: 307 case SCI_STP_DEV_AWAIT_RESET: 308 case SCI_DEV_STOPPING: 309 case SCI_DEV_FAILED: 310 case SCI_DEV_RESETTING: { 311 struct isci_request *ireq; 312 struct ssp_frame_hdr hdr; 313 void *frame_header; 314 ssize_t word_cnt; 315 316 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 317 frame_index, 318 &frame_header); 319 if (status != SCI_SUCCESS) 320 return status; 321 322 word_cnt = sizeof(hdr) / sizeof(u32); 323 sci_swab32_cpy(&hdr, frame_header, word_cnt); 324 325 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); 326 if (ireq && ireq->target_device == idev) { 327 /* The IO request is now in charge of releasing the frame */ 328 status = sci_io_request_frame_handler(ireq, frame_index); 329 } else { 330 /* We could not map this tag to a valid IO 331 * request Just toss the frame and continue 332 */ 333 sci_controller_release_frame(ihost, frame_index); 334 } 335 break; 336 } 337 case SCI_STP_DEV_NCQ: { 338 struct dev_to_host_fis *hdr; 339 340 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 341 frame_index, 342 (void **)&hdr); 343 if (status != SCI_SUCCESS) 344 return status; 345 346 if (hdr->fis_type == FIS_SETDEVBITS && 347 (hdr->status & ATA_ERR)) { 348 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 349 350 /* TODO Check sactive and complete associated IO if any. */ 351 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); 352 } else if (hdr->fis_type == FIS_REGD2H && 353 (hdr->status & ATA_ERR)) { 354 /* 355 * Some devices return D2H FIS when an NCQ error is detected. 356 * Treat this like an SDB error FIS ready reason. 357 */ 358 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 359 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); 360 } else 361 status = SCI_FAILURE; 362 363 sci_controller_release_frame(ihost, frame_index); 364 break; 365 } 366 case SCI_STP_DEV_CMD: 367 case SCI_SMP_DEV_CMD: 368 /* The device does not process any UF received from the hardware while 369 * in this state. All unsolicited frames are forwarded to the io request 370 * object. 371 */ 372 status = sci_io_request_frame_handler(idev->working_request, frame_index); 373 break; 374 } 375 376 return status; 377 } 378 379 static bool is_remote_device_ready(struct isci_remote_device *idev) 380 { 381 382 struct sci_base_state_machine *sm = &idev->sm; 383 enum sci_remote_device_states state = sm->current_state_id; 384 385 switch (state) { 386 case SCI_DEV_READY: 387 case SCI_STP_DEV_IDLE: 388 case SCI_STP_DEV_CMD: 389 case SCI_STP_DEV_NCQ: 390 case SCI_STP_DEV_NCQ_ERROR: 391 case SCI_STP_DEV_AWAIT_RESET: 392 case SCI_SMP_DEV_IDLE: 393 case SCI_SMP_DEV_CMD: 394 return true; 395 default: 396 return false; 397 } 398 } 399 400 /* 401 * called once the remote node context has transisitioned to a ready 402 * state (after suspending RX and/or TX due to early D2H fis) 403 */ 404 static void atapi_remote_device_resume_done(void *_dev) 405 { 406 struct isci_remote_device *idev = _dev; 407 struct isci_request *ireq = idev->working_request; 408 409 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 410 } 411 412 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 413 u32 event_code) 414 { 415 struct sci_base_state_machine *sm = &idev->sm; 416 enum sci_remote_device_states state = sm->current_state_id; 417 enum sci_status status; 418 419 switch (scu_get_event_type(event_code)) { 420 case SCU_EVENT_TYPE_RNC_OPS_MISC: 421 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 422 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 423 status = sci_remote_node_context_event_handler(&idev->rnc, event_code); 424 break; 425 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 426 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { 427 status = SCI_SUCCESS; 428 429 /* Suspend the associated RNC */ 430 sci_remote_node_context_suspend(&idev->rnc, 431 SCI_SOFTWARE_SUSPENSION, 432 NULL, NULL); 433 434 dev_dbg(scirdev_to_dev(idev), 435 "%s: device: %p event code: %x: %s\n", 436 __func__, idev, event_code, 437 is_remote_device_ready(idev) 438 ? "I_T_Nexus_Timeout event" 439 : "I_T_Nexus_Timeout event in wrong state"); 440 441 break; 442 } 443 /* Else, fall through and treat as unhandled... */ 444 default: 445 dev_dbg(scirdev_to_dev(idev), 446 "%s: device: %p event code: %x: %s\n", 447 __func__, idev, event_code, 448 is_remote_device_ready(idev) 449 ? "unexpected event" 450 : "unexpected event in wrong state"); 451 status = SCI_FAILURE_INVALID_STATE; 452 break; 453 } 454 455 if (status != SCI_SUCCESS) 456 return status; 457 458 if (state == SCI_STP_DEV_ATAPI_ERROR) { 459 /* For ATAPI error state resume the RNC right away. */ 460 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 461 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { 462 return sci_remote_node_context_resume(&idev->rnc, 463 atapi_remote_device_resume_done, 464 idev); 465 } 466 } 467 468 if (state == SCI_STP_DEV_IDLE) { 469 470 /* We pick up suspension events to handle specifically to this 471 * state. We resume the RNC right away. 472 */ 473 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 474 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) 475 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 476 } 477 478 return status; 479 } 480 481 static void sci_remote_device_start_request(struct isci_remote_device *idev, 482 struct isci_request *ireq, 483 enum sci_status status) 484 { 485 struct isci_port *iport = idev->owning_port; 486 487 /* cleanup requests that failed after starting on the port */ 488 if (status != SCI_SUCCESS) 489 sci_port_complete_io(iport, idev, ireq); 490 else { 491 kref_get(&idev->kref); 492 idev->started_request_count++; 493 } 494 } 495 496 enum sci_status sci_remote_device_start_io(struct isci_host *ihost, 497 struct isci_remote_device *idev, 498 struct isci_request *ireq) 499 { 500 struct sci_base_state_machine *sm = &idev->sm; 501 enum sci_remote_device_states state = sm->current_state_id; 502 struct isci_port *iport = idev->owning_port; 503 enum sci_status status; 504 505 switch (state) { 506 case SCI_DEV_INITIAL: 507 case SCI_DEV_STOPPED: 508 case SCI_DEV_STARTING: 509 case SCI_STP_DEV_NCQ_ERROR: 510 case SCI_DEV_STOPPING: 511 case SCI_DEV_FAILED: 512 case SCI_DEV_RESETTING: 513 case SCI_DEV_FINAL: 514 default: 515 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 516 __func__, dev_state_name(state)); 517 return SCI_FAILURE_INVALID_STATE; 518 case SCI_DEV_READY: 519 /* attempt to start an io request for this device object. The remote 520 * device object will issue the start request for the io and if 521 * successful it will start the request for the port object then 522 * increment its own request count. 523 */ 524 status = sci_port_start_io(iport, idev, ireq); 525 if (status != SCI_SUCCESS) 526 return status; 527 528 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 529 if (status != SCI_SUCCESS) 530 break; 531 532 status = sci_request_start(ireq); 533 break; 534 case SCI_STP_DEV_IDLE: { 535 /* handle the start io operation for a sata device that is in 536 * the command idle state. - Evalute the type of IO request to 537 * be started - If its an NCQ request change to NCQ substate - 538 * If its any other command change to the CMD substate 539 * 540 * If this is a softreset we may want to have a different 541 * substate. 542 */ 543 enum sci_remote_device_states new_state; 544 struct sas_task *task = isci_request_access_task(ireq); 545 546 status = sci_port_start_io(iport, idev, ireq); 547 if (status != SCI_SUCCESS) 548 return status; 549 550 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 551 if (status != SCI_SUCCESS) 552 break; 553 554 status = sci_request_start(ireq); 555 if (status != SCI_SUCCESS) 556 break; 557 558 if (task->ata_task.use_ncq) 559 new_state = SCI_STP_DEV_NCQ; 560 else { 561 idev->working_request = ireq; 562 new_state = SCI_STP_DEV_CMD; 563 } 564 sci_change_state(sm, new_state); 565 break; 566 } 567 case SCI_STP_DEV_NCQ: { 568 struct sas_task *task = isci_request_access_task(ireq); 569 570 if (task->ata_task.use_ncq) { 571 status = sci_port_start_io(iport, idev, ireq); 572 if (status != SCI_SUCCESS) 573 return status; 574 575 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 576 if (status != SCI_SUCCESS) 577 break; 578 579 status = sci_request_start(ireq); 580 } else 581 return SCI_FAILURE_INVALID_STATE; 582 break; 583 } 584 case SCI_STP_DEV_AWAIT_RESET: 585 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 586 case SCI_SMP_DEV_IDLE: 587 status = sci_port_start_io(iport, idev, ireq); 588 if (status != SCI_SUCCESS) 589 return status; 590 591 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 592 if (status != SCI_SUCCESS) 593 break; 594 595 status = sci_request_start(ireq); 596 if (status != SCI_SUCCESS) 597 break; 598 599 idev->working_request = ireq; 600 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); 601 break; 602 case SCI_STP_DEV_CMD: 603 case SCI_SMP_DEV_CMD: 604 /* device is already handling a command it can not accept new commands 605 * until this one is complete. 606 */ 607 return SCI_FAILURE_INVALID_STATE; 608 } 609 610 sci_remote_device_start_request(idev, ireq, status); 611 return status; 612 } 613 614 static enum sci_status common_complete_io(struct isci_port *iport, 615 struct isci_remote_device *idev, 616 struct isci_request *ireq) 617 { 618 enum sci_status status; 619 620 status = sci_request_complete(ireq); 621 if (status != SCI_SUCCESS) 622 return status; 623 624 status = sci_port_complete_io(iport, idev, ireq); 625 if (status != SCI_SUCCESS) 626 return status; 627 628 sci_remote_device_decrement_request_count(idev); 629 return status; 630 } 631 632 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, 633 struct isci_remote_device *idev, 634 struct isci_request *ireq) 635 { 636 struct sci_base_state_machine *sm = &idev->sm; 637 enum sci_remote_device_states state = sm->current_state_id; 638 struct isci_port *iport = idev->owning_port; 639 enum sci_status status; 640 641 switch (state) { 642 case SCI_DEV_INITIAL: 643 case SCI_DEV_STOPPED: 644 case SCI_DEV_STARTING: 645 case SCI_STP_DEV_IDLE: 646 case SCI_SMP_DEV_IDLE: 647 case SCI_DEV_FAILED: 648 case SCI_DEV_FINAL: 649 default: 650 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 651 __func__, dev_state_name(state)); 652 return SCI_FAILURE_INVALID_STATE; 653 case SCI_DEV_READY: 654 case SCI_STP_DEV_AWAIT_RESET: 655 case SCI_DEV_RESETTING: 656 status = common_complete_io(iport, idev, ireq); 657 break; 658 case SCI_STP_DEV_CMD: 659 case SCI_STP_DEV_NCQ: 660 case SCI_STP_DEV_NCQ_ERROR: 661 case SCI_STP_DEV_ATAPI_ERROR: 662 status = common_complete_io(iport, idev, ireq); 663 if (status != SCI_SUCCESS) 664 break; 665 666 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 667 /* This request causes hardware error, device needs to be Lun Reset. 668 * So here we force the state machine to IDLE state so the rest IOs 669 * can reach RNC state handler, these IOs will be completed by RNC with 670 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". 671 */ 672 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); 673 } else if (idev->started_request_count == 0) 674 sci_change_state(sm, SCI_STP_DEV_IDLE); 675 break; 676 case SCI_SMP_DEV_CMD: 677 status = common_complete_io(iport, idev, ireq); 678 if (status != SCI_SUCCESS) 679 break; 680 sci_change_state(sm, SCI_SMP_DEV_IDLE); 681 break; 682 case SCI_DEV_STOPPING: 683 status = common_complete_io(iport, idev, ireq); 684 if (status != SCI_SUCCESS) 685 break; 686 687 if (idev->started_request_count == 0) 688 sci_remote_node_context_destruct(&idev->rnc, 689 rnc_destruct_done, 690 idev); 691 break; 692 } 693 694 if (status != SCI_SUCCESS) 695 dev_err(scirdev_to_dev(idev), 696 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 697 "could not complete\n", __func__, iport, 698 idev, ireq, status); 699 else 700 isci_put_device(idev); 701 702 return status; 703 } 704 705 static void sci_remote_device_continue_request(void *dev) 706 { 707 struct isci_remote_device *idev = dev; 708 709 /* we need to check if this request is still valid to continue. */ 710 if (idev->working_request) 711 sci_controller_continue_io(idev->working_request); 712 } 713 714 enum sci_status sci_remote_device_start_task(struct isci_host *ihost, 715 struct isci_remote_device *idev, 716 struct isci_request *ireq) 717 { 718 struct sci_base_state_machine *sm = &idev->sm; 719 enum sci_remote_device_states state = sm->current_state_id; 720 struct isci_port *iport = idev->owning_port; 721 enum sci_status status; 722 723 switch (state) { 724 case SCI_DEV_INITIAL: 725 case SCI_DEV_STOPPED: 726 case SCI_DEV_STARTING: 727 case SCI_SMP_DEV_IDLE: 728 case SCI_SMP_DEV_CMD: 729 case SCI_DEV_STOPPING: 730 case SCI_DEV_FAILED: 731 case SCI_DEV_RESETTING: 732 case SCI_DEV_FINAL: 733 default: 734 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 735 __func__, dev_state_name(state)); 736 return SCI_FAILURE_INVALID_STATE; 737 case SCI_STP_DEV_IDLE: 738 case SCI_STP_DEV_CMD: 739 case SCI_STP_DEV_NCQ: 740 case SCI_STP_DEV_NCQ_ERROR: 741 case SCI_STP_DEV_AWAIT_RESET: 742 status = sci_port_start_io(iport, idev, ireq); 743 if (status != SCI_SUCCESS) 744 return status; 745 746 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 747 if (status != SCI_SUCCESS) 748 goto out; 749 750 status = sci_request_start(ireq); 751 if (status != SCI_SUCCESS) 752 goto out; 753 754 /* Note: If the remote device state is not IDLE this will 755 * replace the request that probably resulted in the task 756 * management request. 757 */ 758 idev->working_request = ireq; 759 sci_change_state(sm, SCI_STP_DEV_CMD); 760 761 /* The remote node context must cleanup the TCi to NCQ mapping 762 * table. The only way to do this correctly is to either write 763 * to the TLCR register or to invalidate and repost the RNC. In 764 * either case the remote node context state machine will take 765 * the correct action when the remote node context is suspended 766 * and later resumed. 767 */ 768 sci_remote_node_context_suspend(&idev->rnc, 769 SCI_SOFTWARE_SUSPENSION, NULL, NULL); 770 sci_remote_node_context_resume(&idev->rnc, 771 sci_remote_device_continue_request, 772 idev); 773 774 out: 775 sci_remote_device_start_request(idev, ireq, status); 776 /* We need to let the controller start request handler know that 777 * it can't post TC yet. We will provide a callback function to 778 * post TC when RNC gets resumed. 779 */ 780 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; 781 case SCI_DEV_READY: 782 status = sci_port_start_io(iport, idev, ireq); 783 if (status != SCI_SUCCESS) 784 return status; 785 786 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 787 if (status != SCI_SUCCESS) 788 break; 789 790 status = sci_request_start(ireq); 791 break; 792 } 793 sci_remote_device_start_request(idev, ireq, status); 794 795 return status; 796 } 797 798 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) 799 { 800 struct isci_port *iport = idev->owning_port; 801 u32 context; 802 803 context = request | 804 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 805 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 806 idev->rnc.remote_node_index; 807 808 sci_controller_post_request(iport->owning_controller, context); 809 } 810 811 /* called once the remote node context has transisitioned to a 812 * ready state. This is the indication that the remote device object can also 813 * transition to ready. 814 */ 815 static void remote_device_resume_done(void *_dev) 816 { 817 struct isci_remote_device *idev = _dev; 818 819 if (is_remote_device_ready(idev)) 820 return; 821 822 /* go 'ready' if we are not already in a ready state */ 823 sci_change_state(&idev->sm, SCI_DEV_READY); 824 } 825 826 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) 827 { 828 struct isci_remote_device *idev = _dev; 829 struct isci_host *ihost = idev->owning_port->owning_controller; 830 831 /* For NCQ operation we do not issue a isci_remote_device_not_ready(). 832 * As a result, avoid sending the ready notification. 833 */ 834 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) 835 isci_remote_device_ready(ihost, idev); 836 } 837 838 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) 839 { 840 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 841 842 /* Initial state is a transitional state to the stopped state */ 843 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 844 } 845 846 /** 847 * sci_remote_device_destruct() - free remote node context and destruct 848 * @remote_device: This parameter specifies the remote device to be destructed. 849 * 850 * Remote device objects are a limited resource. As such, they must be 851 * protected. Thus calls to construct and destruct are mutually exclusive and 852 * non-reentrant. The return value shall indicate if the device was 853 * successfully destructed or if some failure occurred. enum sci_status This value 854 * is returned if the device is successfully destructed. 855 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied 856 * device isn't valid (e.g. it's already been destoryed, the handle isn't 857 * valid, etc.). 858 */ 859 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) 860 { 861 struct sci_base_state_machine *sm = &idev->sm; 862 enum sci_remote_device_states state = sm->current_state_id; 863 struct isci_host *ihost; 864 865 if (state != SCI_DEV_STOPPED) { 866 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 867 __func__, dev_state_name(state)); 868 return SCI_FAILURE_INVALID_STATE; 869 } 870 871 ihost = idev->owning_port->owning_controller; 872 sci_controller_free_remote_node_context(ihost, idev, 873 idev->rnc.remote_node_index); 874 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; 875 sci_change_state(sm, SCI_DEV_FINAL); 876 877 return SCI_SUCCESS; 878 } 879 880 /** 881 * isci_remote_device_deconstruct() - This function frees an isci_remote_device. 882 * @ihost: This parameter specifies the isci host object. 883 * @idev: This parameter specifies the remote device to be freed. 884 * 885 */ 886 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) 887 { 888 dev_dbg(&ihost->pdev->dev, 889 "%s: isci_device = %p\n", __func__, idev); 890 891 /* There should not be any outstanding io's. All paths to 892 * here should go through isci_remote_device_nuke_requests. 893 * If we hit this condition, we will need a way to complete 894 * io requests in process */ 895 BUG_ON(!list_empty(&idev->reqs_in_process)); 896 897 sci_remote_device_destruct(idev); 898 list_del_init(&idev->node); 899 isci_put_device(idev); 900 } 901 902 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) 903 { 904 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 905 struct isci_host *ihost = idev->owning_port->owning_controller; 906 u32 prev_state; 907 908 /* If we are entering from the stopping state let the SCI User know that 909 * the stop operation has completed. 910 */ 911 prev_state = idev->sm.previous_state_id; 912 if (prev_state == SCI_DEV_STOPPING) 913 isci_remote_device_deconstruct(ihost, idev); 914 915 sci_controller_remote_device_stopped(ihost, idev); 916 } 917 918 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) 919 { 920 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 921 struct isci_host *ihost = idev->owning_port->owning_controller; 922 923 isci_remote_device_not_ready(ihost, idev, 924 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); 925 } 926 927 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) 928 { 929 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 930 struct isci_host *ihost = idev->owning_port->owning_controller; 931 struct domain_device *dev = idev->domain_dev; 932 933 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 934 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 935 } else if (dev_is_expander(dev)) { 936 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 937 } else 938 isci_remote_device_ready(ihost, idev); 939 } 940 941 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) 942 { 943 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 944 struct domain_device *dev = idev->domain_dev; 945 946 if (dev->dev_type == SAS_END_DEV) { 947 struct isci_host *ihost = idev->owning_port->owning_controller; 948 949 isci_remote_device_not_ready(ihost, idev, 950 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); 951 } 952 } 953 954 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 955 { 956 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 957 958 sci_remote_node_context_suspend( 959 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); 960 } 961 962 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 963 { 964 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 965 966 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 967 } 968 969 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 970 { 971 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 972 973 idev->working_request = NULL; 974 if (sci_remote_node_context_is_ready(&idev->rnc)) { 975 /* 976 * Since the RNC is ready, it's alright to finish completion 977 * processing (e.g. signal the remote device is ready). */ 978 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); 979 } else { 980 sci_remote_node_context_resume(&idev->rnc, 981 sci_stp_remote_device_ready_idle_substate_resume_complete_handler, 982 idev); 983 } 984 } 985 986 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 987 { 988 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 989 struct isci_host *ihost = idev->owning_port->owning_controller; 990 991 BUG_ON(idev->working_request == NULL); 992 993 isci_remote_device_not_ready(ihost, idev, 994 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); 995 } 996 997 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) 998 { 999 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1000 struct isci_host *ihost = idev->owning_port->owning_controller; 1001 1002 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) 1003 isci_remote_device_not_ready(ihost, idev, 1004 idev->not_ready_reason); 1005 } 1006 1007 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 1008 { 1009 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1010 struct isci_host *ihost = idev->owning_port->owning_controller; 1011 1012 isci_remote_device_ready(ihost, idev); 1013 } 1014 1015 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 1016 { 1017 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1018 struct isci_host *ihost = idev->owning_port->owning_controller; 1019 1020 BUG_ON(idev->working_request == NULL); 1021 1022 isci_remote_device_not_ready(ihost, idev, 1023 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); 1024 } 1025 1026 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) 1027 { 1028 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1029 1030 idev->working_request = NULL; 1031 } 1032 1033 static const struct sci_base_state sci_remote_device_state_table[] = { 1034 [SCI_DEV_INITIAL] = { 1035 .enter_state = sci_remote_device_initial_state_enter, 1036 }, 1037 [SCI_DEV_STOPPED] = { 1038 .enter_state = sci_remote_device_stopped_state_enter, 1039 }, 1040 [SCI_DEV_STARTING] = { 1041 .enter_state = sci_remote_device_starting_state_enter, 1042 }, 1043 [SCI_DEV_READY] = { 1044 .enter_state = sci_remote_device_ready_state_enter, 1045 .exit_state = sci_remote_device_ready_state_exit 1046 }, 1047 [SCI_STP_DEV_IDLE] = { 1048 .enter_state = sci_stp_remote_device_ready_idle_substate_enter, 1049 }, 1050 [SCI_STP_DEV_CMD] = { 1051 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, 1052 }, 1053 [SCI_STP_DEV_NCQ] = { }, 1054 [SCI_STP_DEV_NCQ_ERROR] = { 1055 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, 1056 }, 1057 [SCI_STP_DEV_ATAPI_ERROR] = { }, 1058 [SCI_STP_DEV_AWAIT_RESET] = { }, 1059 [SCI_SMP_DEV_IDLE] = { 1060 .enter_state = sci_smp_remote_device_ready_idle_substate_enter, 1061 }, 1062 [SCI_SMP_DEV_CMD] = { 1063 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, 1064 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, 1065 }, 1066 [SCI_DEV_STOPPING] = { }, 1067 [SCI_DEV_FAILED] = { }, 1068 [SCI_DEV_RESETTING] = { 1069 .enter_state = sci_remote_device_resetting_state_enter, 1070 .exit_state = sci_remote_device_resetting_state_exit 1071 }, 1072 [SCI_DEV_FINAL] = { }, 1073 }; 1074 1075 /** 1076 * sci_remote_device_construct() - common construction 1077 * @sci_port: SAS/SATA port through which this device is accessed. 1078 * @sci_dev: remote device to construct 1079 * 1080 * This routine just performs benign initialization and does not 1081 * allocate the remote_node_context which is left to 1082 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() 1083 * frees the remote_node_context(s) for the device. 1084 */ 1085 static void sci_remote_device_construct(struct isci_port *iport, 1086 struct isci_remote_device *idev) 1087 { 1088 idev->owning_port = iport; 1089 idev->started_request_count = 0; 1090 1091 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); 1092 1093 sci_remote_node_context_construct(&idev->rnc, 1094 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); 1095 } 1096 1097 /** 1098 * sci_remote_device_da_construct() - construct direct attached device. 1099 * 1100 * The information (e.g. IAF, Signature FIS, etc.) necessary to build 1101 * the device is known to the SCI Core since it is contained in the 1102 * sci_phy object. Remote node context(s) is/are a global resource 1103 * allocated by this routine, freed by sci_remote_device_destruct(). 1104 * 1105 * Returns: 1106 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1107 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1108 * sata-only controller instance. 1109 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1110 */ 1111 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, 1112 struct isci_remote_device *idev) 1113 { 1114 enum sci_status status; 1115 struct sci_port_properties properties; 1116 struct domain_device *dev = idev->domain_dev; 1117 1118 sci_remote_device_construct(iport, idev); 1119 1120 /* 1121 * This information is request to determine how many remote node context 1122 * entries will be needed to store the remote node. 1123 */ 1124 idev->is_direct_attached = true; 1125 1126 sci_port_get_properties(iport, &properties); 1127 /* Get accurate port width from port's phy mask for a DA device. */ 1128 idev->device_port_width = hweight32(properties.phy_mask); 1129 1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1131 idev, 1132 &idev->rnc.remote_node_index); 1133 1134 if (status != SCI_SUCCESS) 1135 return status; 1136 1137 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1138 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1139 /* pass */; 1140 else 1141 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1142 1143 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1144 1145 return SCI_SUCCESS; 1146 } 1147 1148 /** 1149 * sci_remote_device_ea_construct() - construct expander attached device 1150 * 1151 * Remote node context(s) is/are a global resource allocated by this 1152 * routine, freed by sci_remote_device_destruct(). 1153 * 1154 * Returns: 1155 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1156 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1157 * sata-only controller instance. 1158 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1159 */ 1160 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, 1161 struct isci_remote_device *idev) 1162 { 1163 struct domain_device *dev = idev->domain_dev; 1164 enum sci_status status; 1165 1166 sci_remote_device_construct(iport, idev); 1167 1168 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1169 idev, 1170 &idev->rnc.remote_node_index); 1171 if (status != SCI_SUCCESS) 1172 return status; 1173 1174 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1175 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) 1176 /* pass */; 1177 else 1178 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 1179 1180 /* 1181 * For SAS-2 the physical link rate is actually a logical link 1182 * rate that incorporates multiplexing. The SCU doesn't 1183 * incorporate multiplexing and for the purposes of the 1184 * connection the logical link rate is that same as the 1185 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1186 * one another, so this code works for both situations. */ 1187 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1188 dev->linkrate); 1189 1190 /* / @todo Should I assign the port width by reading all of the phys on the port? */ 1191 idev->device_port_width = 1; 1192 1193 return SCI_SUCCESS; 1194 } 1195 1196 /** 1197 * sci_remote_device_start() - This method will start the supplied remote 1198 * device. This method enables normal IO requests to flow through to the 1199 * remote device. 1200 * @remote_device: This parameter specifies the device to be started. 1201 * @timeout: This parameter specifies the number of milliseconds in which the 1202 * start operation should complete. 1203 * 1204 * An indication of whether the device was successfully started. SCI_SUCCESS 1205 * This value is returned if the device was successfully started. 1206 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start 1207 * the device when there have been no phys added to it. 1208 */ 1209 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1210 u32 timeout) 1211 { 1212 struct sci_base_state_machine *sm = &idev->sm; 1213 enum sci_remote_device_states state = sm->current_state_id; 1214 enum sci_status status; 1215 1216 if (state != SCI_DEV_STOPPED) { 1217 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 1218 __func__, dev_state_name(state)); 1219 return SCI_FAILURE_INVALID_STATE; 1220 } 1221 1222 status = sci_remote_node_context_resume(&idev->rnc, 1223 remote_device_resume_done, 1224 idev); 1225 if (status != SCI_SUCCESS) 1226 return status; 1227 1228 sci_change_state(sm, SCI_DEV_STARTING); 1229 1230 return SCI_SUCCESS; 1231 } 1232 1233 static enum sci_status isci_remote_device_construct(struct isci_port *iport, 1234 struct isci_remote_device *idev) 1235 { 1236 struct isci_host *ihost = iport->isci_host; 1237 struct domain_device *dev = idev->domain_dev; 1238 enum sci_status status; 1239 1240 if (dev->parent && dev_is_expander(dev->parent)) 1241 status = sci_remote_device_ea_construct(iport, idev); 1242 else 1243 status = sci_remote_device_da_construct(iport, idev); 1244 1245 if (status != SCI_SUCCESS) { 1246 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", 1247 __func__, status); 1248 1249 return status; 1250 } 1251 1252 /* start the device. */ 1253 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); 1254 1255 if (status != SCI_SUCCESS) 1256 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", 1257 status); 1258 1259 return status; 1260 } 1261 1262 void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) 1263 { 1264 DECLARE_COMPLETION_ONSTACK(aborted_task_completion); 1265 1266 dev_dbg(&ihost->pdev->dev, 1267 "%s: idev = %p\n", __func__, idev); 1268 1269 /* Cleanup all requests pending for this device. */ 1270 isci_terminate_pending_requests(ihost, idev); 1271 1272 dev_dbg(&ihost->pdev->dev, 1273 "%s: idev = %p, done\n", __func__, idev); 1274 } 1275 1276 /** 1277 * This function builds the isci_remote_device when a libsas dev_found message 1278 * is received. 1279 * @isci_host: This parameter specifies the isci host object. 1280 * @port: This parameter specifies the isci_port conected to this device. 1281 * 1282 * pointer to new isci_remote_device. 1283 */ 1284 static struct isci_remote_device * 1285 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) 1286 { 1287 struct isci_remote_device *idev; 1288 int i; 1289 1290 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 1291 idev = &ihost->devices[i]; 1292 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) 1293 break; 1294 } 1295 1296 if (i >= SCI_MAX_REMOTE_DEVICES) { 1297 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1298 return NULL; 1299 } 1300 1301 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) 1302 return NULL; 1303 1304 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1305 return NULL; 1306 1307 return idev; 1308 } 1309 1310 void isci_remote_device_release(struct kref *kref) 1311 { 1312 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); 1313 struct isci_host *ihost = idev->isci_port->isci_host; 1314 1315 idev->domain_dev = NULL; 1316 idev->isci_port = NULL; 1317 clear_bit(IDEV_START_PENDING, &idev->flags); 1318 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1319 clear_bit(IDEV_IO_READY, &idev->flags); 1320 clear_bit(IDEV_GONE, &idev->flags); 1321 smp_mb__before_clear_bit(); 1322 clear_bit(IDEV_ALLOCATED, &idev->flags); 1323 wake_up(&ihost->eventq); 1324 } 1325 1326 /** 1327 * isci_remote_device_stop() - This function is called internally to stop the 1328 * remote device. 1329 * @isci_host: This parameter specifies the isci host object. 1330 * @isci_device: This parameter specifies the remote device. 1331 * 1332 * The status of the ihost request to stop. 1333 */ 1334 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) 1335 { 1336 enum sci_status status; 1337 unsigned long flags; 1338 1339 dev_dbg(&ihost->pdev->dev, 1340 "%s: isci_device = %p\n", __func__, idev); 1341 1342 spin_lock_irqsave(&ihost->scic_lock, flags); 1343 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1344 set_bit(IDEV_GONE, &idev->flags); 1345 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1346 1347 /* Kill all outstanding requests. */ 1348 isci_remote_device_nuke_requests(ihost, idev); 1349 1350 set_bit(IDEV_STOP_PENDING, &idev->flags); 1351 1352 spin_lock_irqsave(&ihost->scic_lock, flags); 1353 status = sci_remote_device_stop(idev, 50); 1354 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1355 1356 /* Wait for the stop complete callback. */ 1357 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) 1358 /* nothing to wait for */; 1359 else 1360 wait_for_device_stop(ihost, idev); 1361 1362 return status; 1363 } 1364 1365 /** 1366 * isci_remote_device_gone() - This function is called by libsas when a domain 1367 * device is removed. 1368 * @domain_device: This parameter specifies the libsas domain device. 1369 * 1370 */ 1371 void isci_remote_device_gone(struct domain_device *dev) 1372 { 1373 struct isci_host *ihost = dev_to_ihost(dev); 1374 struct isci_remote_device *idev = dev->lldd_dev; 1375 1376 dev_dbg(&ihost->pdev->dev, 1377 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", 1378 __func__, dev, idev, idev->isci_port); 1379 1380 isci_remote_device_stop(ihost, idev); 1381 } 1382 1383 1384 /** 1385 * isci_remote_device_found() - This function is called by libsas when a remote 1386 * device is discovered. A remote device object is created and started. the 1387 * function then sleeps until the sci core device started message is 1388 * received. 1389 * @domain_device: This parameter specifies the libsas domain device. 1390 * 1391 * status, zero indicates success. 1392 */ 1393 int isci_remote_device_found(struct domain_device *dev) 1394 { 1395 struct isci_host *isci_host = dev_to_ihost(dev); 1396 struct isci_port *isci_port = dev->port->lldd_port; 1397 struct isci_remote_device *isci_device; 1398 enum sci_status status; 1399 1400 dev_dbg(&isci_host->pdev->dev, 1401 "%s: domain_device = %p\n", __func__, dev); 1402 1403 if (!isci_port) 1404 return -ENODEV; 1405 1406 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1407 if (!isci_device) 1408 return -ENODEV; 1409 1410 kref_init(&isci_device->kref); 1411 INIT_LIST_HEAD(&isci_device->node); 1412 1413 spin_lock_irq(&isci_host->scic_lock); 1414 isci_device->domain_dev = dev; 1415 isci_device->isci_port = isci_port; 1416 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1417 1418 set_bit(IDEV_START_PENDING, &isci_device->flags); 1419 status = isci_remote_device_construct(isci_port, isci_device); 1420 1421 dev_dbg(&isci_host->pdev->dev, 1422 "%s: isci_device = %p\n", 1423 __func__, isci_device); 1424 1425 if (status == SCI_SUCCESS) { 1426 /* device came up, advertise it to the world */ 1427 dev->lldd_dev = isci_device; 1428 } else 1429 isci_put_device(isci_device); 1430 spin_unlock_irq(&isci_host->scic_lock); 1431 1432 /* wait for the device ready callback. */ 1433 wait_for_device_start(isci_host, isci_device); 1434 1435 return status == SCI_SUCCESS ? 0 : -ENODEV; 1436 } 1437