1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include <scsi/scsi_cmnd.h> 57 #include "isci.h" 58 #include "task.h" 59 #include "request.h" 60 #include "scu_completion_codes.h" 61 #include "scu_event_codes.h" 62 #include "sas.h" 63 64 #undef C 65 #define C(a) (#a) 66 const char *req_state_name(enum sci_base_request_states state) 67 { 68 static const char * const strings[] = REQUEST_STATES; 69 70 return strings[state]; 71 } 72 #undef C 73 74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 75 int idx) 76 { 77 if (idx == 0) 78 return &ireq->tc->sgl_pair_ab; 79 else if (idx == 1) 80 return &ireq->tc->sgl_pair_cd; 81 else if (idx < 0) 82 return NULL; 83 else 84 return &ireq->sg_table[idx - 2]; 85 } 86 87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 88 struct isci_request *ireq, u32 idx) 89 { 90 u32 offset; 91 92 if (idx == 0) { 93 offset = (void *) &ireq->tc->sgl_pair_ab - 94 (void *) &ihost->task_context_table[0]; 95 return ihost->task_context_dma + offset; 96 } else if (idx == 1) { 97 offset = (void *) &ireq->tc->sgl_pair_cd - 98 (void *) &ihost->task_context_table[0]; 99 return ihost->task_context_dma + offset; 100 } 101 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 103 } 104 105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 106 { 107 e->length = sg_dma_len(sg); 108 e->address_upper = upper_32_bits(sg_dma_address(sg)); 109 e->address_lower = lower_32_bits(sg_dma_address(sg)); 110 e->address_modifier = 0; 111 } 112 113 static void sci_request_build_sgl(struct isci_request *ireq) 114 { 115 struct isci_host *ihost = ireq->isci_host; 116 struct sas_task *task = isci_request_access_task(ireq); 117 struct scatterlist *sg = NULL; 118 dma_addr_t dma_addr; 119 u32 sg_idx = 0; 120 struct scu_sgl_element_pair *scu_sg = NULL; 121 struct scu_sgl_element_pair *prev_sg = NULL; 122 123 if (task->num_scatter > 0) { 124 sg = task->scatter; 125 126 while (sg) { 127 scu_sg = to_sgl_element_pair(ireq, sg_idx); 128 init_sgl_element(&scu_sg->A, sg); 129 sg = sg_next(sg); 130 if (sg) { 131 init_sgl_element(&scu_sg->B, sg); 132 sg = sg_next(sg); 133 } else 134 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 135 136 if (prev_sg) { 137 dma_addr = to_sgl_element_pair_dma(ihost, 138 ireq, 139 sg_idx); 140 141 prev_sg->next_pair_upper = 142 upper_32_bits(dma_addr); 143 prev_sg->next_pair_lower = 144 lower_32_bits(dma_addr); 145 } 146 147 prev_sg = scu_sg; 148 sg_idx++; 149 } 150 } else { /* handle when no sg */ 151 scu_sg = to_sgl_element_pair(ireq, sg_idx); 152 153 dma_addr = dma_map_single(&ihost->pdev->dev, 154 task->scatter, 155 task->total_xfer_len, 156 task->data_dir); 157 158 ireq->zero_scatter_daddr = dma_addr; 159 160 scu_sg->A.length = task->total_xfer_len; 161 scu_sg->A.address_upper = upper_32_bits(dma_addr); 162 scu_sg->A.address_lower = lower_32_bits(dma_addr); 163 } 164 165 if (scu_sg) { 166 scu_sg->next_pair_upper = 0; 167 scu_sg->next_pair_lower = 0; 168 } 169 } 170 171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 172 { 173 struct ssp_cmd_iu *cmd_iu; 174 struct sas_task *task = isci_request_access_task(ireq); 175 176 cmd_iu = &ireq->ssp.cmd; 177 178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 179 cmd_iu->add_cdb_len = 0; 180 cmd_iu->_r_a = 0; 181 cmd_iu->_r_b = 0; 182 cmd_iu->en_fburst = 0; /* unsupported */ 183 cmd_iu->task_prio = task->ssp_task.task_prio; 184 cmd_iu->task_attr = task->ssp_task.task_attr; 185 cmd_iu->_r_c = 0; 186 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 188 sizeof(task->ssp_task.cdb) / sizeof(u32)); 189 } 190 191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 192 { 193 struct ssp_task_iu *task_iu; 194 struct sas_task *task = isci_request_access_task(ireq); 195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 196 197 task_iu = &ireq->ssp.tmf; 198 199 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 200 201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 202 203 task_iu->task_func = isci_tmf->tmf_code; 204 task_iu->task_tag = 205 (test_bit(IREQ_TMF, &ireq->flags)) ? 206 isci_tmf->io_tag : 207 SCI_CONTROLLER_INVALID_IO_TAG; 208 } 209 210 /** 211 * This method is will fill in the SCU Task Context for any type of SSP request. 212 * @sci_req: 213 * @task_context: 214 * 215 */ 216 static void scu_ssp_reqeust_construct_task_context( 217 struct isci_request *ireq, 218 struct scu_task_context *task_context) 219 { 220 dma_addr_t dma_addr; 221 struct isci_remote_device *idev; 222 struct isci_port *iport; 223 224 idev = ireq->target_device; 225 iport = idev->owning_port; 226 227 /* Fill in the TC with the its required data */ 228 task_context->abort = 0; 229 task_context->priority = 0; 230 task_context->initiator_request = 1; 231 task_context->connection_rate = idev->connection_rate; 232 task_context->protocol_engine_index = ISCI_PEG; 233 task_context->logical_port_index = iport->physical_port_index; 234 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 235 task_context->valid = SCU_TASK_CONTEXT_VALID; 236 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 237 238 task_context->remote_node_index = idev->rnc.remote_node_index; 239 task_context->command_code = 0; 240 241 task_context->link_layer_control = 0; 242 task_context->do_not_dma_ssp_good_response = 1; 243 task_context->strict_ordering = 0; 244 task_context->control_frame = 0; 245 task_context->timeout_enable = 0; 246 task_context->block_guard_enable = 0; 247 248 task_context->address_modifier = 0; 249 250 /* task_context->type.ssp.tag = ireq->io_tag; */ 251 task_context->task_phase = 0x01; 252 253 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 254 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 255 (iport->physical_port_index << 256 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 257 ISCI_TAG_TCI(ireq->io_tag)); 258 259 /* 260 * Copy the physical address for the command buffer to the 261 * SCU Task Context 262 */ 263 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 264 265 task_context->command_iu_upper = upper_32_bits(dma_addr); 266 task_context->command_iu_lower = lower_32_bits(dma_addr); 267 268 /* 269 * Copy the physical address for the response buffer to the 270 * SCU Task Context 271 */ 272 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 273 274 task_context->response_iu_upper = upper_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr); 276 } 277 278 static u8 scu_bg_blk_size(struct scsi_device *sdp) 279 { 280 switch (sdp->sector_size) { 281 case 512: 282 return 0; 283 case 1024: 284 return 1; 285 case 4096: 286 return 3; 287 default: 288 return 0xff; 289 } 290 } 291 292 static u32 scu_dif_bytes(u32 len, u32 sector_size) 293 { 294 return (len >> ilog2(sector_size)) * 8; 295 } 296 297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 298 { 299 struct scu_task_context *tc = ireq->tc; 300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 301 u8 blk_sz = scu_bg_blk_size(scmd->device); 302 303 tc->block_guard_enable = 1; 304 tc->blk_prot_en = 1; 305 tc->blk_sz = blk_sz; 306 /* DIF write insert */ 307 tc->blk_prot_func = 0x2; 308 309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 310 scmd->device->sector_size); 311 312 /* always init to 0, used by hw */ 313 tc->interm_crc_val = 0; 314 315 tc->init_crc_seed = 0; 316 tc->app_tag_verify = 0; 317 tc->app_tag_gen = 0; 318 tc->ref_tag_seed_verify = 0; 319 320 /* always init to same as bg_blk_sz */ 321 tc->UD_bytes_immed_val = scmd->device->sector_size; 322 323 tc->reserved_DC_0 = 0; 324 325 /* always init to 8 */ 326 tc->DIF_bytes_immed_val = 8; 327 328 tc->reserved_DC_1 = 0; 329 tc->bgc_blk_sz = scmd->device->sector_size; 330 tc->reserved_E0_0 = 0; 331 tc->app_tag_gen_mask = 0; 332 333 /** setup block guard control **/ 334 tc->bgctl = 0; 335 336 /* DIF write insert */ 337 tc->bgctl_f.op = 0x2; 338 339 tc->app_tag_verify_mask = 0; 340 341 /* must init to 0 for hw */ 342 tc->blk_guard_err = 0; 343 344 tc->reserved_E8_0 = 0; 345 346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 348 else if (type & SCSI_PROT_DIF_TYPE3) 349 tc->ref_tag_seed_gen = 0; 350 } 351 352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 353 { 354 struct scu_task_context *tc = ireq->tc; 355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 356 u8 blk_sz = scu_bg_blk_size(scmd->device); 357 358 tc->block_guard_enable = 1; 359 tc->blk_prot_en = 1; 360 tc->blk_sz = blk_sz; 361 /* DIF read strip */ 362 tc->blk_prot_func = 0x1; 363 364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 365 scmd->device->sector_size); 366 367 /* always init to 0, used by hw */ 368 tc->interm_crc_val = 0; 369 370 tc->init_crc_seed = 0; 371 tc->app_tag_verify = 0; 372 tc->app_tag_gen = 0; 373 374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 376 else if (type & SCSI_PROT_DIF_TYPE3) 377 tc->ref_tag_seed_verify = 0; 378 379 /* always init to same as bg_blk_sz */ 380 tc->UD_bytes_immed_val = scmd->device->sector_size; 381 382 tc->reserved_DC_0 = 0; 383 384 /* always init to 8 */ 385 tc->DIF_bytes_immed_val = 8; 386 387 tc->reserved_DC_1 = 0; 388 tc->bgc_blk_sz = scmd->device->sector_size; 389 tc->reserved_E0_0 = 0; 390 tc->app_tag_gen_mask = 0; 391 392 /** setup block guard control **/ 393 tc->bgctl = 0; 394 395 /* DIF read strip */ 396 tc->bgctl_f.crc_verify = 1; 397 tc->bgctl_f.op = 0x1; 398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 399 tc->bgctl_f.ref_tag_chk = 1; 400 tc->bgctl_f.app_f_detect = 1; 401 } else if (type & SCSI_PROT_DIF_TYPE3) 402 tc->bgctl_f.app_ref_f_detect = 1; 403 404 tc->app_tag_verify_mask = 0; 405 406 /* must init to 0 for hw */ 407 tc->blk_guard_err = 0; 408 409 tc->reserved_E8_0 = 0; 410 tc->ref_tag_seed_gen = 0; 411 } 412 413 /** 414 * This method is will fill in the SCU Task Context for a SSP IO request. 415 * @sci_req: 416 * 417 */ 418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 419 enum dma_data_direction dir, 420 u32 len) 421 { 422 struct scu_task_context *task_context = ireq->tc; 423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 424 struct scsi_cmnd *scmd = sas_task->uldd_task; 425 u8 prot_type = scsi_get_prot_type(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd); 427 428 scu_ssp_reqeust_construct_task_context(ireq, task_context); 429 430 task_context->ssp_command_iu_length = 431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 432 task_context->type.ssp.frame_type = SSP_COMMAND; 433 434 switch (dir) { 435 case DMA_FROM_DEVICE: 436 case DMA_NONE: 437 default: 438 task_context->task_type = SCU_TASK_TYPE_IOREAD; 439 break; 440 case DMA_TO_DEVICE: 441 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 442 break; 443 } 444 445 task_context->transfer_length_bytes = len; 446 447 if (task_context->transfer_length_bytes > 0) 448 sci_request_build_sgl(ireq); 449 450 if (prot_type != SCSI_PROT_DIF_TYPE0) { 451 if (prot_op == SCSI_PROT_READ_STRIP) 452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 453 else if (prot_op == SCSI_PROT_WRITE_INSERT) 454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 455 } 456 } 457 458 /** 459 * This method will fill in the SCU Task Context for a SSP Task request. The 460 * following important settings are utilized: -# priority == 461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 462 * ahead of other task destined for the same Remote Node. -# task_type == 463 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 464 * (i.e. non-raw frame) is being utilized to perform task management. -# 465 * control_frame == 1. This ensures that the proper endianess is set so 466 * that the bytes are transmitted in the right order for a task frame. 467 * @sci_req: This parameter specifies the task request object being 468 * constructed. 469 * 470 */ 471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 472 { 473 struct scu_task_context *task_context = ireq->tc; 474 475 scu_ssp_reqeust_construct_task_context(ireq, task_context); 476 477 task_context->control_frame = 1; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 479 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 480 task_context->transfer_length_bytes = 0; 481 task_context->type.ssp.frame_type = SSP_TASK; 482 task_context->ssp_command_iu_length = 483 sizeof(struct ssp_task_iu) / sizeof(u32); 484 } 485 486 /** 487 * This method is will fill in the SCU Task Context for any type of SATA 488 * request. This is called from the various SATA constructors. 489 * @sci_req: The general IO request object which is to be used in 490 * constructing the SCU task context. 491 * @task_context: The buffer pointer for the SCU task context which is being 492 * constructed. 493 * 494 * The general io request construction is complete. The buffer assignment for 495 * the command buffer is complete. none Revisit task context construction to 496 * determine what is common for SSP/SMP/STP task context structures. 497 */ 498 static void scu_sata_reqeust_construct_task_context( 499 struct isci_request *ireq, 500 struct scu_task_context *task_context) 501 { 502 dma_addr_t dma_addr; 503 struct isci_remote_device *idev; 504 struct isci_port *iport; 505 506 idev = ireq->target_device; 507 iport = idev->owning_port; 508 509 /* Fill in the TC with the its required data */ 510 task_context->abort = 0; 511 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 512 task_context->initiator_request = 1; 513 task_context->connection_rate = idev->connection_rate; 514 task_context->protocol_engine_index = ISCI_PEG; 515 task_context->logical_port_index = iport->physical_port_index; 516 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 517 task_context->valid = SCU_TASK_CONTEXT_VALID; 518 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 519 520 task_context->remote_node_index = idev->rnc.remote_node_index; 521 task_context->command_code = 0; 522 523 task_context->link_layer_control = 0; 524 task_context->do_not_dma_ssp_good_response = 1; 525 task_context->strict_ordering = 0; 526 task_context->control_frame = 0; 527 task_context->timeout_enable = 0; 528 task_context->block_guard_enable = 0; 529 530 task_context->address_modifier = 0; 531 task_context->task_phase = 0x01; 532 533 task_context->ssp_command_iu_length = 534 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 535 536 /* Set the first word of the H2D REG FIS */ 537 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 538 539 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 540 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 541 (iport->physical_port_index << 542 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 543 ISCI_TAG_TCI(ireq->io_tag)); 544 /* 545 * Copy the physical address for the command buffer to the SCU Task 546 * Context. We must offset the command buffer by 4 bytes because the 547 * first 4 bytes are transfered in the body of the TC. 548 */ 549 dma_addr = sci_io_request_get_dma_addr(ireq, 550 ((char *) &ireq->stp.cmd) + 551 sizeof(u32)); 552 553 task_context->command_iu_upper = upper_32_bits(dma_addr); 554 task_context->command_iu_lower = lower_32_bits(dma_addr); 555 556 /* SATA Requests do not have a response buffer */ 557 task_context->response_iu_upper = 0; 558 task_context->response_iu_lower = 0; 559 } 560 561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 562 { 563 struct scu_task_context *task_context = ireq->tc; 564 565 scu_sata_reqeust_construct_task_context(ireq, task_context); 566 567 task_context->control_frame = 0; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 569 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 570 task_context->type.stp.fis_type = FIS_REGH2D; 571 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 572 } 573 574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 575 bool copy_rx_frame) 576 { 577 struct isci_stp_request *stp_req = &ireq->stp.req; 578 579 scu_stp_raw_request_construct_task_context(ireq); 580 581 stp_req->status = 0; 582 stp_req->sgl.offset = 0; 583 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 584 585 if (copy_rx_frame) { 586 sci_request_build_sgl(ireq); 587 stp_req->sgl.index = 0; 588 } else { 589 /* The user does not want the data copied to the SGL buffer location */ 590 stp_req->sgl.index = -1; 591 } 592 593 return SCI_SUCCESS; 594 } 595 596 /** 597 * 598 * @sci_req: This parameter specifies the request to be constructed as an 599 * optimized request. 600 * @optimized_task_type: This parameter specifies whether the request is to be 601 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 602 * value of 1 indicates NCQ. 603 * 604 * This method will perform request construction common to all types of STP 605 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 606 * returns an indication as to whether the construction was successful. 607 */ 608 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 609 u8 optimized_task_type, 610 u32 len, 611 enum dma_data_direction dir) 612 { 613 struct scu_task_context *task_context = ireq->tc; 614 615 /* Build the STP task context structure */ 616 scu_sata_reqeust_construct_task_context(ireq, task_context); 617 618 /* Copy over the SGL elements */ 619 sci_request_build_sgl(ireq); 620 621 /* Copy over the number of bytes to be transfered */ 622 task_context->transfer_length_bytes = len; 623 624 if (dir == DMA_TO_DEVICE) { 625 /* 626 * The difference between the DMA IN and DMA OUT request task type 627 * values are consistent with the difference between FPDMA READ 628 * and FPDMA WRITE values. Add the supplied task type parameter 629 * to this difference to set the task type properly for this 630 * DATA OUT (WRITE) case. */ 631 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 632 - SCU_TASK_TYPE_DMA_IN); 633 } else { 634 /* 635 * For the DATA IN (READ) case, simply save the supplied 636 * optimized task type. */ 637 task_context->task_type = optimized_task_type; 638 } 639 } 640 641 static void sci_atapi_construct(struct isci_request *ireq) 642 { 643 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 644 struct sas_task *task; 645 646 /* To simplify the implementation we take advantage of the 647 * silicon's partial acceleration of atapi protocol (dma data 648 * transfers), so we promote all commands to dma protocol. This 649 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 650 */ 651 h2d_fis->features |= ATAPI_PKT_DMA; 652 653 scu_stp_raw_request_construct_task_context(ireq); 654 655 task = isci_request_access_task(ireq); 656 if (task->data_dir == DMA_NONE) 657 task->total_xfer_len = 0; 658 659 /* clear the response so we can detect arrivial of an 660 * unsolicited h2d fis 661 */ 662 ireq->stp.rsp.fis_type = 0; 663 } 664 665 static enum sci_status 666 sci_io_request_construct_sata(struct isci_request *ireq, 667 u32 len, 668 enum dma_data_direction dir, 669 bool copy) 670 { 671 enum sci_status status = SCI_SUCCESS; 672 struct sas_task *task = isci_request_access_task(ireq); 673 struct domain_device *dev = ireq->target_device->domain_dev; 674 675 /* check for management protocols */ 676 if (test_bit(IREQ_TMF, &ireq->flags)) { 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 678 679 dev_err(&ireq->owning_controller->pdev->dev, 680 "%s: Request 0x%p received un-handled SAT " 681 "management protocol 0x%x.\n", 682 __func__, ireq, tmf->tmf_code); 683 684 return SCI_FAILURE; 685 } 686 687 if (!sas_protocol_ata(task->task_proto)) { 688 dev_err(&ireq->owning_controller->pdev->dev, 689 "%s: Non-ATA protocol in SATA path: 0x%x\n", 690 __func__, 691 task->task_proto); 692 return SCI_FAILURE; 693 694 } 695 696 /* ATAPI */ 697 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 698 task->ata_task.fis.command == ATA_CMD_PACKET) { 699 sci_atapi_construct(ireq); 700 return SCI_SUCCESS; 701 } 702 703 /* non data */ 704 if (task->data_dir == DMA_NONE) { 705 scu_stp_raw_request_construct_task_context(ireq); 706 return SCI_SUCCESS; 707 } 708 709 /* NCQ */ 710 if (task->ata_task.use_ncq) { 711 sci_stp_optimized_request_construct(ireq, 712 SCU_TASK_TYPE_FPDMAQ_READ, 713 len, dir); 714 return SCI_SUCCESS; 715 } 716 717 /* DMA */ 718 if (task->ata_task.dma_xfer) { 719 sci_stp_optimized_request_construct(ireq, 720 SCU_TASK_TYPE_DMA_IN, 721 len, dir); 722 return SCI_SUCCESS; 723 } else /* PIO */ 724 return sci_stp_pio_request_construct(ireq, copy); 725 726 return status; 727 } 728 729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 730 { 731 struct sas_task *task = isci_request_access_task(ireq); 732 733 ireq->protocol = SCIC_SSP_PROTOCOL; 734 735 scu_ssp_io_request_construct_task_context(ireq, 736 task->data_dir, 737 task->total_xfer_len); 738 739 sci_io_request_build_ssp_command_iu(ireq); 740 741 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 742 743 return SCI_SUCCESS; 744 } 745 746 enum sci_status sci_task_request_construct_ssp( 747 struct isci_request *ireq) 748 { 749 /* Construct the SSP Task SCU Task Context */ 750 scu_ssp_task_request_construct_task_context(ireq); 751 752 /* Fill in the SSP Task IU */ 753 sci_task_request_build_ssp_task_iu(ireq); 754 755 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 756 757 return SCI_SUCCESS; 758 } 759 760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 761 { 762 enum sci_status status; 763 bool copy = false; 764 struct sas_task *task = isci_request_access_task(ireq); 765 766 ireq->protocol = SCIC_STP_PROTOCOL; 767 768 copy = (task->data_dir == DMA_NONE) ? false : true; 769 770 status = sci_io_request_construct_sata(ireq, 771 task->total_xfer_len, 772 task->data_dir, 773 copy); 774 775 if (status == SCI_SUCCESS) 776 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 777 778 return status; 779 } 780 781 /** 782 * sci_req_tx_bytes - bytes transferred when reply underruns request 783 * @ireq: request that was terminated early 784 */ 785 #define SCU_TASK_CONTEXT_SRAM 0x200000 786 static u32 sci_req_tx_bytes(struct isci_request *ireq) 787 { 788 struct isci_host *ihost = ireq->owning_controller; 789 u32 ret_val = 0; 790 791 if (readl(&ihost->smu_registers->address_modifier) == 0) { 792 void __iomem *scu_reg_base = ihost->scu_registers; 793 794 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 795 * BAR1 is the scu_registers 796 * 0x20002C = 0x200000 + 0x2c 797 * = start of task context SRAM + offset of (type.ssp.data_offset) 798 * TCi is the io_tag of struct sci_request 799 */ 800 ret_val = readl(scu_reg_base + 801 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 802 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 803 } 804 805 return ret_val; 806 } 807 808 enum sci_status sci_request_start(struct isci_request *ireq) 809 { 810 enum sci_base_request_states state; 811 struct scu_task_context *tc = ireq->tc; 812 struct isci_host *ihost = ireq->owning_controller; 813 814 state = ireq->sm.current_state_id; 815 if (state != SCI_REQ_CONSTRUCTED) { 816 dev_warn(&ihost->pdev->dev, 817 "%s: SCIC IO Request requested to start while in wrong " 818 "state %d\n", __func__, state); 819 return SCI_FAILURE_INVALID_STATE; 820 } 821 822 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 823 824 switch (tc->protocol_type) { 825 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 826 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 827 /* SSP/SMP Frame */ 828 tc->type.ssp.tag = ireq->io_tag; 829 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 830 break; 831 832 case SCU_TASK_CONTEXT_PROTOCOL_STP: 833 /* STP/SATA Frame 834 * tc->type.stp.ncq_tag = ireq->ncq_tag; 835 */ 836 break; 837 838 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 839 /* / @todo When do we set no protocol type? */ 840 break; 841 842 default: 843 /* This should never happen since we build the IO 844 * requests */ 845 break; 846 } 847 848 /* Add to the post_context the io tag value */ 849 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 850 851 /* Everything is good go ahead and change state */ 852 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 853 854 return SCI_SUCCESS; 855 } 856 857 enum sci_status 858 sci_io_request_terminate(struct isci_request *ireq) 859 { 860 enum sci_base_request_states state; 861 862 state = ireq->sm.current_state_id; 863 864 switch (state) { 865 case SCI_REQ_CONSTRUCTED: 866 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 867 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 868 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 869 return SCI_SUCCESS; 870 case SCI_REQ_STARTED: 871 case SCI_REQ_TASK_WAIT_TC_COMP: 872 case SCI_REQ_SMP_WAIT_RESP: 873 case SCI_REQ_SMP_WAIT_TC_COMP: 874 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 875 case SCI_REQ_STP_UDMA_WAIT_D2H: 876 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 877 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 878 case SCI_REQ_STP_PIO_WAIT_H2D: 879 case SCI_REQ_STP_PIO_WAIT_FRAME: 880 case SCI_REQ_STP_PIO_DATA_IN: 881 case SCI_REQ_STP_PIO_DATA_OUT: 882 case SCI_REQ_ATAPI_WAIT_H2D: 883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 884 case SCI_REQ_ATAPI_WAIT_D2H: 885 case SCI_REQ_ATAPI_WAIT_TC_COMP: 886 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 887 return SCI_SUCCESS; 888 case SCI_REQ_TASK_WAIT_TC_RESP: 889 /* The task frame was already confirmed to have been 890 * sent by the SCU HW. Since the state machine is 891 * now only waiting for the task response itself, 892 * abort the request and complete it immediately 893 * and don't wait for the task response. 894 */ 895 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 896 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 897 return SCI_SUCCESS; 898 case SCI_REQ_ABORTING: 899 /* If a request has a termination requested twice, return 900 * a failure indication, since HW confirmation of the first 901 * abort is still outstanding. 902 */ 903 case SCI_REQ_COMPLETED: 904 default: 905 dev_warn(&ireq->owning_controller->pdev->dev, 906 "%s: SCIC IO Request requested to abort while in wrong " 907 "state %d\n", 908 __func__, 909 ireq->sm.current_state_id); 910 break; 911 } 912 913 return SCI_FAILURE_INVALID_STATE; 914 } 915 916 enum sci_status sci_request_complete(struct isci_request *ireq) 917 { 918 enum sci_base_request_states state; 919 struct isci_host *ihost = ireq->owning_controller; 920 921 state = ireq->sm.current_state_id; 922 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 923 "isci: request completion from wrong state (%s)\n", 924 req_state_name(state))) 925 return SCI_FAILURE_INVALID_STATE; 926 927 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 928 sci_controller_release_frame(ihost, 929 ireq->saved_rx_frame_index); 930 931 /* XXX can we just stop the machine and remove the 'final' state? */ 932 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 933 return SCI_SUCCESS; 934 } 935 936 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 937 u32 event_code) 938 { 939 enum sci_base_request_states state; 940 struct isci_host *ihost = ireq->owning_controller; 941 942 state = ireq->sm.current_state_id; 943 944 if (state != SCI_REQ_STP_PIO_DATA_IN) { 945 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", 946 __func__, event_code, req_state_name(state)); 947 948 return SCI_FAILURE_INVALID_STATE; 949 } 950 951 switch (scu_get_event_specifier(event_code)) { 952 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 953 /* We are waiting for data and the SCU has R_ERR the data frame. 954 * Go back to waiting for the D2H Register FIS 955 */ 956 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 957 return SCI_SUCCESS; 958 default: 959 dev_err(&ihost->pdev->dev, 960 "%s: pio request unexpected event %#x\n", 961 __func__, event_code); 962 963 /* TODO Should we fail the PIO request when we get an 964 * unexpected event? 965 */ 966 return SCI_FAILURE; 967 } 968 } 969 970 /* 971 * This function copies response data for requests returning response data 972 * instead of sense data. 973 * @sci_req: This parameter specifies the request object for which to copy 974 * the response data. 975 */ 976 static void sci_io_request_copy_response(struct isci_request *ireq) 977 { 978 void *resp_buf; 979 u32 len; 980 struct ssp_response_iu *ssp_response; 981 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 982 983 ssp_response = &ireq->ssp.rsp; 984 985 resp_buf = &isci_tmf->resp.resp_iu; 986 987 len = min_t(u32, 988 SSP_RESP_IU_MAX_SIZE, 989 be32_to_cpu(ssp_response->response_data_len)); 990 991 memcpy(resp_buf, ssp_response->resp_data, len); 992 } 993 994 static enum sci_status 995 request_started_state_tc_event(struct isci_request *ireq, 996 u32 completion_code) 997 { 998 struct ssp_response_iu *resp_iu; 999 u8 datapres; 1000 1001 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 1002 * to determine SDMA status 1003 */ 1004 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1005 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1006 ireq->scu_status = SCU_TASK_DONE_GOOD; 1007 ireq->sci_status = SCI_SUCCESS; 1008 break; 1009 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 1010 /* There are times when the SCU hardware will return an early 1011 * response because the io request specified more data than is 1012 * returned by the target device (mode pages, inquiry data, 1013 * etc.). We must check the response stats to see if this is 1014 * truly a failed request or a good request that just got 1015 * completed early. 1016 */ 1017 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1018 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1019 1020 sci_swab32_cpy(&ireq->ssp.rsp, 1021 &ireq->ssp.rsp, 1022 word_cnt); 1023 1024 if (resp->status == 0) { 1025 ireq->scu_status = SCU_TASK_DONE_GOOD; 1026 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1027 } else { 1028 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1029 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1030 } 1031 break; 1032 } 1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1034 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1035 1036 sci_swab32_cpy(&ireq->ssp.rsp, 1037 &ireq->ssp.rsp, 1038 word_cnt); 1039 1040 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1041 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1042 break; 1043 } 1044 1045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1046 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1047 * guaranteed to be received before this completion status is 1048 * posted? 1049 */ 1050 resp_iu = &ireq->ssp.rsp; 1051 datapres = resp_iu->datapres; 1052 1053 if (datapres == 1 || datapres == 2) { 1054 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1055 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1056 } else { 1057 ireq->scu_status = SCU_TASK_DONE_GOOD; 1058 ireq->sci_status = SCI_SUCCESS; 1059 } 1060 break; 1061 /* only stp device gets suspended. */ 1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1073 if (ireq->protocol == SCIC_STP_PROTOCOL) { 1074 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1075 SCU_COMPLETION_TL_STATUS_SHIFT; 1076 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1077 } else { 1078 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1079 SCU_COMPLETION_TL_STATUS_SHIFT; 1080 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1081 } 1082 break; 1083 1084 /* both stp/ssp device gets suspended */ 1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1095 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1096 SCU_COMPLETION_TL_STATUS_SHIFT; 1097 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1098 break; 1099 1100 /* neither ssp nor stp gets suspended. */ 1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1116 default: 1117 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1118 SCU_COMPLETION_TL_STATUS_SHIFT; 1119 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1120 break; 1121 } 1122 1123 /* 1124 * TODO: This is probably wrong for ACK/NAK timeout conditions 1125 */ 1126 1127 /* In all cases we will treat this as the completion of the IO req. */ 1128 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1129 return SCI_SUCCESS; 1130 } 1131 1132 static enum sci_status 1133 request_aborting_state_tc_event(struct isci_request *ireq, 1134 u32 completion_code) 1135 { 1136 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1137 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1138 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1139 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1140 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1141 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1142 break; 1143 1144 default: 1145 /* Unless we get some strange error wait for the task abort to complete 1146 * TODO: Should there be a state change for this completion? 1147 */ 1148 break; 1149 } 1150 1151 return SCI_SUCCESS; 1152 } 1153 1154 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1155 u32 completion_code) 1156 { 1157 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1158 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1159 ireq->scu_status = SCU_TASK_DONE_GOOD; 1160 ireq->sci_status = SCI_SUCCESS; 1161 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1162 break; 1163 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1164 /* Currently, the decision is to simply allow the task request 1165 * to timeout if the task IU wasn't received successfully. 1166 * There is a potential for receiving multiple task responses if 1167 * we decide to send the task IU again. 1168 */ 1169 dev_warn(&ireq->owning_controller->pdev->dev, 1170 "%s: TaskRequest:0x%p CompletionCode:%x - " 1171 "ACK/NAK timeout\n", __func__, ireq, 1172 completion_code); 1173 1174 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1175 break; 1176 default: 1177 /* 1178 * All other completion status cause the IO to be complete. 1179 * If a NAK was received, then it is up to the user to retry 1180 * the request. 1181 */ 1182 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1183 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1184 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1185 break; 1186 } 1187 1188 return SCI_SUCCESS; 1189 } 1190 1191 static enum sci_status 1192 smp_request_await_response_tc_event(struct isci_request *ireq, 1193 u32 completion_code) 1194 { 1195 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1197 /* In the AWAIT RESPONSE state, any TC completion is 1198 * unexpected. but if the TC has success status, we 1199 * complete the IO anyway. 1200 */ 1201 ireq->scu_status = SCU_TASK_DONE_GOOD; 1202 ireq->sci_status = SCI_SUCCESS; 1203 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1204 break; 1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1206 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1209 /* These status has been seen in a specific LSI 1210 * expander, which sometimes is not able to send smp 1211 * response within 2 ms. This causes our hardware break 1212 * the connection and set TC completion with one of 1213 * these SMP_XXX_XX_ERR status. For these type of error, 1214 * we ask ihost user to retry the request. 1215 */ 1216 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1217 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1218 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1219 break; 1220 default: 1221 /* All other completion status cause the IO to be complete. If a NAK 1222 * was received, then it is up to the user to retry the request 1223 */ 1224 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1225 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1226 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1227 break; 1228 } 1229 1230 return SCI_SUCCESS; 1231 } 1232 1233 static enum sci_status 1234 smp_request_await_tc_event(struct isci_request *ireq, 1235 u32 completion_code) 1236 { 1237 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1238 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1239 ireq->scu_status = SCU_TASK_DONE_GOOD; 1240 ireq->sci_status = SCI_SUCCESS; 1241 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1242 break; 1243 default: 1244 /* All other completion status cause the IO to be 1245 * complete. If a NAK was received, then it is up to 1246 * the user to retry the request. 1247 */ 1248 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1249 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1250 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1251 break; 1252 } 1253 1254 return SCI_SUCCESS; 1255 } 1256 1257 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1258 { 1259 struct scu_sgl_element *sgl; 1260 struct scu_sgl_element_pair *sgl_pair; 1261 struct isci_request *ireq = to_ireq(stp_req); 1262 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1263 1264 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1265 if (!sgl_pair) 1266 sgl = NULL; 1267 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1268 if (sgl_pair->B.address_lower == 0 && 1269 sgl_pair->B.address_upper == 0) { 1270 sgl = NULL; 1271 } else { 1272 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1273 sgl = &sgl_pair->B; 1274 } 1275 } else { 1276 if (sgl_pair->next_pair_lower == 0 && 1277 sgl_pair->next_pair_upper == 0) { 1278 sgl = NULL; 1279 } else { 1280 pio_sgl->index++; 1281 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1282 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1283 sgl = &sgl_pair->A; 1284 } 1285 } 1286 1287 return sgl; 1288 } 1289 1290 static enum sci_status 1291 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1292 u32 completion_code) 1293 { 1294 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1295 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1296 ireq->scu_status = SCU_TASK_DONE_GOOD; 1297 ireq->sci_status = SCI_SUCCESS; 1298 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1299 break; 1300 1301 default: 1302 /* All other completion status cause the IO to be 1303 * complete. If a NAK was received, then it is up to 1304 * the user to retry the request. 1305 */ 1306 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1307 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1308 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1309 break; 1310 } 1311 1312 return SCI_SUCCESS; 1313 } 1314 1315 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1316 1317 /* transmit DATA_FIS from (current sgl + offset) for input 1318 * parameter length. current sgl and offset is alreay stored in the IO request 1319 */ 1320 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1321 struct isci_request *ireq, 1322 u32 length) 1323 { 1324 struct isci_stp_request *stp_req = &ireq->stp.req; 1325 struct scu_task_context *task_context = ireq->tc; 1326 struct scu_sgl_element_pair *sgl_pair; 1327 struct scu_sgl_element *current_sgl; 1328 1329 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1330 * for the data from current_sgl+offset for the input length 1331 */ 1332 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1333 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1334 current_sgl = &sgl_pair->A; 1335 else 1336 current_sgl = &sgl_pair->B; 1337 1338 /* update the TC */ 1339 task_context->command_iu_upper = current_sgl->address_upper; 1340 task_context->command_iu_lower = current_sgl->address_lower; 1341 task_context->transfer_length_bytes = length; 1342 task_context->type.stp.fis_type = FIS_DATA; 1343 1344 /* send the new TC out. */ 1345 return sci_controller_continue_io(ireq); 1346 } 1347 1348 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1349 { 1350 struct isci_stp_request *stp_req = &ireq->stp.req; 1351 struct scu_sgl_element_pair *sgl_pair; 1352 enum sci_status status = SCI_SUCCESS; 1353 struct scu_sgl_element *sgl; 1354 u32 offset; 1355 u32 len = 0; 1356 1357 offset = stp_req->sgl.offset; 1358 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1359 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1360 return SCI_FAILURE; 1361 1362 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1363 sgl = &sgl_pair->A; 1364 len = sgl_pair->A.length - offset; 1365 } else { 1366 sgl = &sgl_pair->B; 1367 len = sgl_pair->B.length - offset; 1368 } 1369 1370 if (stp_req->pio_len == 0) 1371 return SCI_SUCCESS; 1372 1373 if (stp_req->pio_len >= len) { 1374 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1375 if (status != SCI_SUCCESS) 1376 return status; 1377 stp_req->pio_len -= len; 1378 1379 /* update the current sgl, offset and save for future */ 1380 sgl = pio_sgl_next(stp_req); 1381 offset = 0; 1382 } else if (stp_req->pio_len < len) { 1383 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1384 1385 /* Sgl offset will be adjusted and saved for future */ 1386 offset += stp_req->pio_len; 1387 sgl->address_lower += stp_req->pio_len; 1388 stp_req->pio_len = 0; 1389 } 1390 1391 stp_req->sgl.offset = offset; 1392 1393 return status; 1394 } 1395 1396 /** 1397 * 1398 * @stp_request: The request that is used for the SGL processing. 1399 * @data_buffer: The buffer of data to be copied. 1400 * @length: The length of the data transfer. 1401 * 1402 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1403 * specified data region. enum sci_status 1404 */ 1405 static enum sci_status 1406 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1407 u8 *data_buf, u32 len) 1408 { 1409 struct isci_request *ireq; 1410 u8 *src_addr; 1411 int copy_len; 1412 struct sas_task *task; 1413 struct scatterlist *sg; 1414 void *kaddr; 1415 int total_len = len; 1416 1417 ireq = to_ireq(stp_req); 1418 task = isci_request_access_task(ireq); 1419 src_addr = data_buf; 1420 1421 if (task->num_scatter > 0) { 1422 sg = task->scatter; 1423 1424 while (total_len > 0) { 1425 struct page *page = sg_page(sg); 1426 1427 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1428 kaddr = kmap_atomic(page); 1429 memcpy(kaddr + sg->offset, src_addr, copy_len); 1430 kunmap_atomic(kaddr); 1431 total_len -= copy_len; 1432 src_addr += copy_len; 1433 sg = sg_next(sg); 1434 } 1435 } else { 1436 BUG_ON(task->total_xfer_len < total_len); 1437 memcpy(task->scatter, src_addr, total_len); 1438 } 1439 1440 return SCI_SUCCESS; 1441 } 1442 1443 /** 1444 * 1445 * @sci_req: The PIO DATA IN request that is to receive the data. 1446 * @data_buffer: The buffer to copy from. 1447 * 1448 * Copy the data buffer to the io request data region. enum sci_status 1449 */ 1450 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1451 struct isci_stp_request *stp_req, 1452 u8 *data_buffer) 1453 { 1454 enum sci_status status; 1455 1456 /* 1457 * If there is less than 1K remaining in the transfer request 1458 * copy just the data for the transfer */ 1459 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1460 status = sci_stp_request_pio_data_in_copy_data_buffer( 1461 stp_req, data_buffer, stp_req->pio_len); 1462 1463 if (status == SCI_SUCCESS) 1464 stp_req->pio_len = 0; 1465 } else { 1466 /* We are transfering the whole frame so copy */ 1467 status = sci_stp_request_pio_data_in_copy_data_buffer( 1468 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1469 1470 if (status == SCI_SUCCESS) 1471 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1472 } 1473 1474 return status; 1475 } 1476 1477 static enum sci_status 1478 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1479 u32 completion_code) 1480 { 1481 enum sci_status status = SCI_SUCCESS; 1482 1483 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1484 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1485 ireq->scu_status = SCU_TASK_DONE_GOOD; 1486 ireq->sci_status = SCI_SUCCESS; 1487 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1488 break; 1489 1490 default: 1491 /* All other completion status cause the IO to be 1492 * complete. If a NAK was received, then it is up to 1493 * the user to retry the request. 1494 */ 1495 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1496 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1497 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1498 break; 1499 } 1500 1501 return status; 1502 } 1503 1504 static enum sci_status 1505 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1506 u32 completion_code) 1507 { 1508 enum sci_status status = SCI_SUCCESS; 1509 bool all_frames_transferred = false; 1510 struct isci_stp_request *stp_req = &ireq->stp.req; 1511 1512 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1513 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1514 /* Transmit data */ 1515 if (stp_req->pio_len != 0) { 1516 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1517 if (status == SCI_SUCCESS) { 1518 if (stp_req->pio_len == 0) 1519 all_frames_transferred = true; 1520 } 1521 } else if (stp_req->pio_len == 0) { 1522 /* 1523 * this will happen if the all data is written at the 1524 * first time after the pio setup fis is received 1525 */ 1526 all_frames_transferred = true; 1527 } 1528 1529 /* all data transferred. */ 1530 if (all_frames_transferred) { 1531 /* 1532 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1533 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1534 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1535 } 1536 break; 1537 1538 default: 1539 /* 1540 * All other completion status cause the IO to be complete. 1541 * If a NAK was received, then it is up to the user to retry 1542 * the request. 1543 */ 1544 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1545 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1546 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1547 break; 1548 } 1549 1550 return status; 1551 } 1552 1553 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1554 u32 frame_index) 1555 { 1556 struct isci_host *ihost = ireq->owning_controller; 1557 struct dev_to_host_fis *frame_header; 1558 enum sci_status status; 1559 u32 *frame_buffer; 1560 1561 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1562 frame_index, 1563 (void **)&frame_header); 1564 1565 if ((status == SCI_SUCCESS) && 1566 (frame_header->fis_type == FIS_REGD2H)) { 1567 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1568 frame_index, 1569 (void **)&frame_buffer); 1570 1571 sci_controller_copy_sata_response(&ireq->stp.rsp, 1572 frame_header, 1573 frame_buffer); 1574 } 1575 1576 sci_controller_release_frame(ihost, frame_index); 1577 1578 return status; 1579 } 1580 1581 static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1582 u32 frame_index) 1583 { 1584 struct isci_host *ihost = ireq->owning_controller; 1585 enum sci_status status; 1586 struct dev_to_host_fis *frame_header; 1587 u32 *frame_buffer; 1588 1589 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1590 frame_index, 1591 (void **)&frame_header); 1592 1593 if (status != SCI_SUCCESS) 1594 return status; 1595 1596 if (frame_header->fis_type != FIS_REGD2H) { 1597 dev_err(&ireq->isci_host->pdev->dev, 1598 "%s ERROR: invalid fis type 0x%X\n", 1599 __func__, frame_header->fis_type); 1600 return SCI_FAILURE; 1601 } 1602 1603 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1604 frame_index, 1605 (void **)&frame_buffer); 1606 1607 sci_controller_copy_sata_response(&ireq->stp.rsp, 1608 (u32 *)frame_header, 1609 frame_buffer); 1610 1611 /* Frame has been decoded return it to the controller */ 1612 sci_controller_release_frame(ihost, frame_index); 1613 1614 return status; 1615 } 1616 1617 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1618 u32 frame_index) 1619 { 1620 struct sas_task *task = isci_request_access_task(ireq); 1621 enum sci_status status; 1622 1623 status = process_unsolicited_fis(ireq, frame_index); 1624 1625 if (status == SCI_SUCCESS) { 1626 if (ireq->stp.rsp.status & ATA_ERR) 1627 status = SCI_IO_FAILURE_RESPONSE_VALID; 1628 } else { 1629 status = SCI_IO_FAILURE_RESPONSE_VALID; 1630 } 1631 1632 if (status != SCI_SUCCESS) { 1633 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1634 ireq->sci_status = status; 1635 } else { 1636 ireq->scu_status = SCU_TASK_DONE_GOOD; 1637 ireq->sci_status = SCI_SUCCESS; 1638 } 1639 1640 /* the d2h ufi is the end of non-data commands */ 1641 if (task->data_dir == DMA_NONE) 1642 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1643 1644 return status; 1645 } 1646 1647 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1648 { 1649 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1650 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1651 struct scu_task_context *task_context = ireq->tc; 1652 1653 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1654 * type. The TC for previous Packet fis was already there, we only need to 1655 * change the H2D fis content. 1656 */ 1657 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1658 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1659 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1660 task_context->type.stp.fis_type = FIS_DATA; 1661 task_context->transfer_length_bytes = dev->cdb_len; 1662 } 1663 1664 static void scu_atapi_construct_task_context(struct isci_request *ireq) 1665 { 1666 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1667 struct sas_task *task = isci_request_access_task(ireq); 1668 struct scu_task_context *task_context = ireq->tc; 1669 int cdb_len = dev->cdb_len; 1670 1671 /* reference: SSTL 1.13.4.2 1672 * task_type, sata_direction 1673 */ 1674 if (task->data_dir == DMA_TO_DEVICE) { 1675 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1676 task_context->sata_direction = 0; 1677 } else { 1678 /* todo: for NO_DATA command, we need to send out raw frame. */ 1679 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1680 task_context->sata_direction = 1; 1681 } 1682 1683 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1684 task_context->type.stp.fis_type = FIS_DATA; 1685 1686 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1687 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1688 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1689 1690 /* task phase is set to TX_CMD */ 1691 task_context->task_phase = 0x1; 1692 1693 /* retry counter */ 1694 task_context->stp_retry_count = 0; 1695 1696 /* data transfer size. */ 1697 task_context->transfer_length_bytes = task->total_xfer_len; 1698 1699 /* setup sgl */ 1700 sci_request_build_sgl(ireq); 1701 } 1702 1703 enum sci_status 1704 sci_io_request_frame_handler(struct isci_request *ireq, 1705 u32 frame_index) 1706 { 1707 struct isci_host *ihost = ireq->owning_controller; 1708 struct isci_stp_request *stp_req = &ireq->stp.req; 1709 enum sci_base_request_states state; 1710 enum sci_status status; 1711 ssize_t word_cnt; 1712 1713 state = ireq->sm.current_state_id; 1714 switch (state) { 1715 case SCI_REQ_STARTED: { 1716 struct ssp_frame_hdr ssp_hdr; 1717 void *frame_header; 1718 1719 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1720 frame_index, 1721 &frame_header); 1722 1723 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1724 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1725 1726 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1727 struct ssp_response_iu *resp_iu; 1728 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1729 1730 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1731 frame_index, 1732 (void **)&resp_iu); 1733 1734 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1735 1736 resp_iu = &ireq->ssp.rsp; 1737 1738 if (resp_iu->datapres == 0x01 || 1739 resp_iu->datapres == 0x02) { 1740 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1741 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1742 } else { 1743 ireq->scu_status = SCU_TASK_DONE_GOOD; 1744 ireq->sci_status = SCI_SUCCESS; 1745 } 1746 } else { 1747 /* not a response frame, why did it get forwarded? */ 1748 dev_err(&ihost->pdev->dev, 1749 "%s: SCIC IO Request 0x%p received unexpected " 1750 "frame %d type 0x%02x\n", __func__, ireq, 1751 frame_index, ssp_hdr.frame_type); 1752 } 1753 1754 /* 1755 * In any case we are done with this frame buffer return it to 1756 * the controller 1757 */ 1758 sci_controller_release_frame(ihost, frame_index); 1759 1760 return SCI_SUCCESS; 1761 } 1762 1763 case SCI_REQ_TASK_WAIT_TC_RESP: 1764 sci_io_request_copy_response(ireq); 1765 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1766 sci_controller_release_frame(ihost, frame_index); 1767 return SCI_SUCCESS; 1768 1769 case SCI_REQ_SMP_WAIT_RESP: { 1770 struct sas_task *task = isci_request_access_task(ireq); 1771 struct scatterlist *sg = &task->smp_task.smp_resp; 1772 void *frame_header, *kaddr; 1773 u8 *rsp; 1774 1775 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1776 frame_index, 1777 &frame_header); 1778 kaddr = kmap_atomic(sg_page(sg)); 1779 rsp = kaddr + sg->offset; 1780 sci_swab32_cpy(rsp, frame_header, 1); 1781 1782 if (rsp[0] == SMP_RESPONSE) { 1783 void *smp_resp; 1784 1785 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1786 frame_index, 1787 &smp_resp); 1788 1789 word_cnt = (sg->length/4)-1; 1790 if (word_cnt > 0) 1791 word_cnt = min_t(unsigned int, word_cnt, 1792 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1793 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1794 1795 ireq->scu_status = SCU_TASK_DONE_GOOD; 1796 ireq->sci_status = SCI_SUCCESS; 1797 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1798 } else { 1799 /* 1800 * This was not a response frame why did it get 1801 * forwarded? 1802 */ 1803 dev_err(&ihost->pdev->dev, 1804 "%s: SCIC SMP Request 0x%p received unexpected " 1805 "frame %d type 0x%02x\n", 1806 __func__, 1807 ireq, 1808 frame_index, 1809 rsp[0]); 1810 1811 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1812 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1813 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1814 } 1815 kunmap_atomic(kaddr); 1816 1817 sci_controller_release_frame(ihost, frame_index); 1818 1819 return SCI_SUCCESS; 1820 } 1821 1822 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1823 return sci_stp_request_udma_general_frame_handler(ireq, 1824 frame_index); 1825 1826 case SCI_REQ_STP_UDMA_WAIT_D2H: 1827 /* Use the general frame handler to copy the resposne data */ 1828 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1829 1830 if (status != SCI_SUCCESS) 1831 return status; 1832 1833 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1834 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1835 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1836 return SCI_SUCCESS; 1837 1838 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1839 struct dev_to_host_fis *frame_header; 1840 u32 *frame_buffer; 1841 1842 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1843 frame_index, 1844 (void **)&frame_header); 1845 1846 if (status != SCI_SUCCESS) { 1847 dev_err(&ihost->pdev->dev, 1848 "%s: SCIC IO Request 0x%p could not get frame " 1849 "header for frame index %d, status %x\n", 1850 __func__, 1851 stp_req, 1852 frame_index, 1853 status); 1854 1855 return status; 1856 } 1857 1858 switch (frame_header->fis_type) { 1859 case FIS_REGD2H: 1860 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1861 frame_index, 1862 (void **)&frame_buffer); 1863 1864 sci_controller_copy_sata_response(&ireq->stp.rsp, 1865 frame_header, 1866 frame_buffer); 1867 1868 /* The command has completed with error */ 1869 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1870 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1871 break; 1872 1873 default: 1874 dev_warn(&ihost->pdev->dev, 1875 "%s: IO Request:0x%p Frame Id:%d protocol " 1876 "violation occurred\n", __func__, stp_req, 1877 frame_index); 1878 1879 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1880 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1881 break; 1882 } 1883 1884 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1885 1886 /* Frame has been decoded return it to the controller */ 1887 sci_controller_release_frame(ihost, frame_index); 1888 1889 return status; 1890 } 1891 1892 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1893 struct sas_task *task = isci_request_access_task(ireq); 1894 struct dev_to_host_fis *frame_header; 1895 u32 *frame_buffer; 1896 1897 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1898 frame_index, 1899 (void **)&frame_header); 1900 1901 if (status != SCI_SUCCESS) { 1902 dev_err(&ihost->pdev->dev, 1903 "%s: SCIC IO Request 0x%p could not get frame " 1904 "header for frame index %d, status %x\n", 1905 __func__, stp_req, frame_index, status); 1906 return status; 1907 } 1908 1909 switch (frame_header->fis_type) { 1910 case FIS_PIO_SETUP: 1911 /* Get from the frame buffer the PIO Setup Data */ 1912 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1913 frame_index, 1914 (void **)&frame_buffer); 1915 1916 /* Get the data from the PIO Setup The SCU Hardware 1917 * returns first word in the frame_header and the rest 1918 * of the data is in the frame buffer so we need to 1919 * back up one dword 1920 */ 1921 1922 /* transfer_count: first 16bits in the 4th dword */ 1923 stp_req->pio_len = frame_buffer[3] & 0xffff; 1924 1925 /* status: 4th byte in the 3rd dword */ 1926 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1927 1928 sci_controller_copy_sata_response(&ireq->stp.rsp, 1929 frame_header, 1930 frame_buffer); 1931 1932 ireq->stp.rsp.status = stp_req->status; 1933 1934 /* The next state is dependent on whether the 1935 * request was PIO Data-in or Data out 1936 */ 1937 if (task->data_dir == DMA_FROM_DEVICE) { 1938 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1939 } else if (task->data_dir == DMA_TO_DEVICE) { 1940 /* Transmit data */ 1941 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1942 if (status != SCI_SUCCESS) 1943 break; 1944 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1945 } 1946 break; 1947 1948 case FIS_SETDEVBITS: 1949 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1950 break; 1951 1952 case FIS_REGD2H: 1953 if (frame_header->status & ATA_BUSY) { 1954 /* 1955 * Now why is the drive sending a D2H Register 1956 * FIS when it is still busy? Do nothing since 1957 * we are still in the right state. 1958 */ 1959 dev_dbg(&ihost->pdev->dev, 1960 "%s: SCIC PIO Request 0x%p received " 1961 "D2H Register FIS with BSY status " 1962 "0x%x\n", 1963 __func__, 1964 stp_req, 1965 frame_header->status); 1966 break; 1967 } 1968 1969 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1970 frame_index, 1971 (void **)&frame_buffer); 1972 1973 sci_controller_copy_sata_response(&ireq->stp.req, 1974 frame_header, 1975 frame_buffer); 1976 1977 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1978 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1979 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1980 break; 1981 1982 default: 1983 /* FIXME: what do we do here? */ 1984 break; 1985 } 1986 1987 /* Frame is decoded return it to the controller */ 1988 sci_controller_release_frame(ihost, frame_index); 1989 1990 return status; 1991 } 1992 1993 case SCI_REQ_STP_PIO_DATA_IN: { 1994 struct dev_to_host_fis *frame_header; 1995 struct sata_fis_data *frame_buffer; 1996 1997 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1998 frame_index, 1999 (void **)&frame_header); 2000 2001 if (status != SCI_SUCCESS) { 2002 dev_err(&ihost->pdev->dev, 2003 "%s: SCIC IO Request 0x%p could not get frame " 2004 "header for frame index %d, status %x\n", 2005 __func__, 2006 stp_req, 2007 frame_index, 2008 status); 2009 return status; 2010 } 2011 2012 if (frame_header->fis_type != FIS_DATA) { 2013 dev_err(&ihost->pdev->dev, 2014 "%s: SCIC PIO Request 0x%p received frame %d " 2015 "with fis type 0x%02x when expecting a data " 2016 "fis.\n", 2017 __func__, 2018 stp_req, 2019 frame_index, 2020 frame_header->fis_type); 2021 2022 ireq->scu_status = SCU_TASK_DONE_GOOD; 2023 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2024 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2025 2026 /* Frame is decoded return it to the controller */ 2027 sci_controller_release_frame(ihost, frame_index); 2028 return status; 2029 } 2030 2031 if (stp_req->sgl.index < 0) { 2032 ireq->saved_rx_frame_index = frame_index; 2033 stp_req->pio_len = 0; 2034 } else { 2035 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2036 frame_index, 2037 (void **)&frame_buffer); 2038 2039 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2040 (u8 *)frame_buffer); 2041 2042 /* Frame is decoded return it to the controller */ 2043 sci_controller_release_frame(ihost, frame_index); 2044 } 2045 2046 /* Check for the end of the transfer, are there more 2047 * bytes remaining for this data transfer 2048 */ 2049 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2050 return status; 2051 2052 if ((stp_req->status & ATA_BUSY) == 0) { 2053 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2054 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2055 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2056 } else { 2057 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2058 } 2059 return status; 2060 } 2061 2062 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2063 struct sas_task *task = isci_request_access_task(ireq); 2064 2065 sci_controller_release_frame(ihost, frame_index); 2066 ireq->target_device->working_request = ireq; 2067 if (task->data_dir == DMA_NONE) { 2068 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2069 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2070 } else { 2071 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2072 scu_atapi_construct_task_context(ireq); 2073 } 2074 2075 sci_controller_continue_io(ireq); 2076 return SCI_SUCCESS; 2077 } 2078 case SCI_REQ_ATAPI_WAIT_D2H: 2079 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2080 case SCI_REQ_ABORTING: 2081 /* 2082 * TODO: Is it even possible to get an unsolicited frame in the 2083 * aborting state? 2084 */ 2085 sci_controller_release_frame(ihost, frame_index); 2086 return SCI_SUCCESS; 2087 2088 default: 2089 dev_warn(&ihost->pdev->dev, 2090 "%s: SCIC IO Request given unexpected frame %x while " 2091 "in state %d\n", 2092 __func__, 2093 frame_index, 2094 state); 2095 2096 sci_controller_release_frame(ihost, frame_index); 2097 return SCI_FAILURE_INVALID_STATE; 2098 } 2099 } 2100 2101 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2102 u32 completion_code) 2103 { 2104 enum sci_status status = SCI_SUCCESS; 2105 2106 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2108 ireq->scu_status = SCU_TASK_DONE_GOOD; 2109 ireq->sci_status = SCI_SUCCESS; 2110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2111 break; 2112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2114 /* We must check ther response buffer to see if the D2H 2115 * Register FIS was received before we got the TC 2116 * completion. 2117 */ 2118 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2119 sci_remote_device_suspend(ireq->target_device, 2120 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2121 2122 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2123 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2124 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2125 } else { 2126 /* If we have an error completion status for the 2127 * TC then we can expect a D2H register FIS from 2128 * the device so we must change state to wait 2129 * for it 2130 */ 2131 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2132 } 2133 break; 2134 2135 /* TODO Check to see if any of these completion status need to 2136 * wait for the device to host register fis. 2137 */ 2138 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2139 * - this comes only for B0 2140 */ 2141 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): 2142 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 2143 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 2144 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 2145 sci_remote_device_suspend(ireq->target_device, 2146 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2147 /* Fall through to the default case */ 2148 default: 2149 /* All other completion status cause the IO to be complete. */ 2150 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2151 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2152 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2153 break; 2154 } 2155 2156 return status; 2157 } 2158 2159 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2160 enum sci_base_request_states next) 2161 { 2162 enum sci_status status = SCI_SUCCESS; 2163 2164 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2165 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2166 ireq->scu_status = SCU_TASK_DONE_GOOD; 2167 ireq->sci_status = SCI_SUCCESS; 2168 sci_change_state(&ireq->sm, next); 2169 break; 2170 default: 2171 /* All other completion status cause the IO to be complete. 2172 * If a NAK was received, then it is up to the user to retry 2173 * the request. 2174 */ 2175 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2176 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2177 2178 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2179 break; 2180 } 2181 2182 return status; 2183 } 2184 2185 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2186 u32 completion_code) 2187 { 2188 struct isci_remote_device *idev = ireq->target_device; 2189 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2190 enum sci_status status = SCI_SUCCESS; 2191 2192 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2193 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2194 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2195 break; 2196 2197 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2198 u16 len = sci_req_tx_bytes(ireq); 2199 2200 /* likely non-error data underrrun, workaround missing 2201 * d2h frame from the controller 2202 */ 2203 if (d2h->fis_type != FIS_REGD2H) { 2204 d2h->fis_type = FIS_REGD2H; 2205 d2h->flags = (1 << 6); 2206 d2h->status = 0x50; 2207 d2h->error = 0; 2208 d2h->lbal = 0; 2209 d2h->byte_count_low = len & 0xff; 2210 d2h->byte_count_high = len >> 8; 2211 d2h->device = 0xa0; 2212 d2h->lbal_exp = 0; 2213 d2h->lbam_exp = 0; 2214 d2h->lbah_exp = 0; 2215 d2h->_r_a = 0; 2216 d2h->sector_count = 0x3; 2217 d2h->sector_count_exp = 0; 2218 d2h->_r_b = 0; 2219 d2h->_r_c = 0; 2220 d2h->_r_d = 0; 2221 } 2222 2223 ireq->scu_status = SCU_TASK_DONE_GOOD; 2224 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2225 status = ireq->sci_status; 2226 2227 /* the hw will have suspended the rnc, so complete the 2228 * request upon pending resume 2229 */ 2230 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2231 break; 2232 } 2233 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2234 /* In this case, there is no UF coming after. 2235 * compelte the IO now. 2236 */ 2237 ireq->scu_status = SCU_TASK_DONE_GOOD; 2238 ireq->sci_status = SCI_SUCCESS; 2239 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2240 break; 2241 2242 default: 2243 if (d2h->fis_type == FIS_REGD2H) { 2244 /* UF received change the device state to ATAPI_ERROR */ 2245 status = ireq->sci_status; 2246 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2247 } else { 2248 /* If receiving any non-sucess TC status, no UF 2249 * received yet, then an UF for the status fis 2250 * is coming after (XXX: suspect this is 2251 * actually a protocol error or a bug like the 2252 * DONE_UNEXP_FIS case) 2253 */ 2254 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2255 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2256 2257 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2258 } 2259 break; 2260 } 2261 2262 return status; 2263 } 2264 2265 enum sci_status 2266 sci_io_request_tc_completion(struct isci_request *ireq, 2267 u32 completion_code) 2268 { 2269 enum sci_base_request_states state; 2270 struct isci_host *ihost = ireq->owning_controller; 2271 2272 state = ireq->sm.current_state_id; 2273 2274 switch (state) { 2275 case SCI_REQ_STARTED: 2276 return request_started_state_tc_event(ireq, completion_code); 2277 2278 case SCI_REQ_TASK_WAIT_TC_COMP: 2279 return ssp_task_request_await_tc_event(ireq, 2280 completion_code); 2281 2282 case SCI_REQ_SMP_WAIT_RESP: 2283 return smp_request_await_response_tc_event(ireq, 2284 completion_code); 2285 2286 case SCI_REQ_SMP_WAIT_TC_COMP: 2287 return smp_request_await_tc_event(ireq, completion_code); 2288 2289 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2290 return stp_request_udma_await_tc_event(ireq, 2291 completion_code); 2292 2293 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2294 return stp_request_non_data_await_h2d_tc_event(ireq, 2295 completion_code); 2296 2297 case SCI_REQ_STP_PIO_WAIT_H2D: 2298 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2299 completion_code); 2300 2301 case SCI_REQ_STP_PIO_DATA_OUT: 2302 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2303 2304 case SCI_REQ_ABORTING: 2305 return request_aborting_state_tc_event(ireq, 2306 completion_code); 2307 2308 case SCI_REQ_ATAPI_WAIT_H2D: 2309 return atapi_raw_completion(ireq, completion_code, 2310 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2311 2312 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2313 return atapi_raw_completion(ireq, completion_code, 2314 SCI_REQ_ATAPI_WAIT_D2H); 2315 2316 case SCI_REQ_ATAPI_WAIT_D2H: 2317 return atapi_data_tc_completion_handler(ireq, completion_code); 2318 2319 default: 2320 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", 2321 __func__, completion_code, req_state_name(state)); 2322 return SCI_FAILURE_INVALID_STATE; 2323 } 2324 } 2325 2326 /** 2327 * isci_request_process_response_iu() - This function sets the status and 2328 * response iu, in the task struct, from the request object for the upper 2329 * layer driver. 2330 * @sas_task: This parameter is the task struct from the upper layer driver. 2331 * @resp_iu: This parameter points to the response iu of the completed request. 2332 * @dev: This parameter specifies the linux device struct. 2333 * 2334 * none. 2335 */ 2336 static void isci_request_process_response_iu( 2337 struct sas_task *task, 2338 struct ssp_response_iu *resp_iu, 2339 struct device *dev) 2340 { 2341 dev_dbg(dev, 2342 "%s: resp_iu = %p " 2343 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2344 "resp_iu->response_data_len = %x, " 2345 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2346 __func__, 2347 resp_iu, 2348 resp_iu->status, 2349 resp_iu->datapres, 2350 resp_iu->response_data_len, 2351 resp_iu->sense_data_len); 2352 2353 task->task_status.stat = resp_iu->status; 2354 2355 /* libsas updates the task status fields based on the response iu. */ 2356 sas_ssp_task_response(dev, task, resp_iu); 2357 } 2358 2359 /** 2360 * isci_request_set_open_reject_status() - This function prepares the I/O 2361 * completion for OPEN_REJECT conditions. 2362 * @request: This parameter is the completed isci_request object. 2363 * @response_ptr: This parameter specifies the service response for the I/O. 2364 * @status_ptr: This parameter specifies the exec status for the I/O. 2365 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2366 * the LLDD with respect to completing this request or forcing an abort 2367 * condition on the I/O. 2368 * @open_rej_reason: This parameter specifies the encoded reason for the 2369 * abandon-class reject. 2370 * 2371 * none. 2372 */ 2373 static void isci_request_set_open_reject_status( 2374 struct isci_request *request, 2375 struct sas_task *task, 2376 enum service_response *response_ptr, 2377 enum exec_status *status_ptr, 2378 enum isci_completion_selection *complete_to_host_ptr, 2379 enum sas_open_rej_reason open_rej_reason) 2380 { 2381 /* Task in the target is done. */ 2382 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2383 *response_ptr = SAS_TASK_UNDELIVERED; 2384 *status_ptr = SAS_OPEN_REJECT; 2385 *complete_to_host_ptr = isci_perform_normal_io_completion; 2386 task->task_status.open_rej_reason = open_rej_reason; 2387 } 2388 2389 /** 2390 * isci_request_handle_controller_specific_errors() - This function decodes 2391 * controller-specific I/O completion error conditions. 2392 * @request: This parameter is the completed isci_request object. 2393 * @response_ptr: This parameter specifies the service response for the I/O. 2394 * @status_ptr: This parameter specifies the exec status for the I/O. 2395 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2396 * the LLDD with respect to completing this request or forcing an abort 2397 * condition on the I/O. 2398 * 2399 * none. 2400 */ 2401 static void isci_request_handle_controller_specific_errors( 2402 struct isci_remote_device *idev, 2403 struct isci_request *request, 2404 struct sas_task *task, 2405 enum service_response *response_ptr, 2406 enum exec_status *status_ptr, 2407 enum isci_completion_selection *complete_to_host_ptr) 2408 { 2409 unsigned int cstatus; 2410 2411 cstatus = request->scu_status; 2412 2413 dev_dbg(&request->isci_host->pdev->dev, 2414 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2415 "- controller status = 0x%x\n", 2416 __func__, request, cstatus); 2417 2418 /* Decode the controller-specific errors; most 2419 * important is to recognize those conditions in which 2420 * the target may still have a task outstanding that 2421 * must be aborted. 2422 * 2423 * Note that there are SCU completion codes being 2424 * named in the decode below for which SCIC has already 2425 * done work to handle them in a way other than as 2426 * a controller-specific completion code; these are left 2427 * in the decode below for completeness sake. 2428 */ 2429 switch (cstatus) { 2430 case SCU_TASK_DONE_DMASETUP_DIRERR: 2431 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2432 case SCU_TASK_DONE_XFERCNT_ERR: 2433 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2434 if (task->task_proto == SAS_PROTOCOL_SMP) { 2435 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2436 *response_ptr = SAS_TASK_COMPLETE; 2437 2438 /* See if the device has been/is being stopped. Note 2439 * that we ignore the quiesce state, since we are 2440 * concerned about the actual device state. 2441 */ 2442 if (!idev) 2443 *status_ptr = SAS_DEVICE_UNKNOWN; 2444 else 2445 *status_ptr = SAS_ABORTED_TASK; 2446 2447 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2448 2449 *complete_to_host_ptr = 2450 isci_perform_normal_io_completion; 2451 } else { 2452 /* Task in the target is not done. */ 2453 *response_ptr = SAS_TASK_UNDELIVERED; 2454 2455 if (!idev) 2456 *status_ptr = SAS_DEVICE_UNKNOWN; 2457 else 2458 *status_ptr = SAM_STAT_TASK_ABORTED; 2459 2460 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2461 2462 *complete_to_host_ptr = 2463 isci_perform_error_io_completion; 2464 } 2465 2466 break; 2467 2468 case SCU_TASK_DONE_CRC_ERR: 2469 case SCU_TASK_DONE_NAK_CMD_ERR: 2470 case SCU_TASK_DONE_EXCESS_DATA: 2471 case SCU_TASK_DONE_UNEXP_FIS: 2472 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2473 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2474 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2475 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2476 /* These are conditions in which the target 2477 * has completed the task, so that no cleanup 2478 * is necessary. 2479 */ 2480 *response_ptr = SAS_TASK_COMPLETE; 2481 2482 /* See if the device has been/is being stopped. Note 2483 * that we ignore the quiesce state, since we are 2484 * concerned about the actual device state. 2485 */ 2486 if (!idev) 2487 *status_ptr = SAS_DEVICE_UNKNOWN; 2488 else 2489 *status_ptr = SAS_ABORTED_TASK; 2490 2491 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2492 2493 *complete_to_host_ptr = isci_perform_normal_io_completion; 2494 break; 2495 2496 2497 /* Note that the only open reject completion codes seen here will be 2498 * abandon-class codes; all others are automatically retried in the SCU. 2499 */ 2500 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2501 2502 isci_request_set_open_reject_status( 2503 request, task, response_ptr, status_ptr, 2504 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2505 break; 2506 2507 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2508 2509 /* Note - the return of AB0 will change when 2510 * libsas implements detection of zone violations. 2511 */ 2512 isci_request_set_open_reject_status( 2513 request, task, response_ptr, status_ptr, 2514 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2515 break; 2516 2517 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2518 2519 isci_request_set_open_reject_status( 2520 request, task, response_ptr, status_ptr, 2521 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2522 break; 2523 2524 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2525 2526 isci_request_set_open_reject_status( 2527 request, task, response_ptr, status_ptr, 2528 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2529 break; 2530 2531 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2532 2533 isci_request_set_open_reject_status( 2534 request, task, response_ptr, status_ptr, 2535 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2536 break; 2537 2538 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2539 2540 isci_request_set_open_reject_status( 2541 request, task, response_ptr, status_ptr, 2542 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2543 break; 2544 2545 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2546 2547 isci_request_set_open_reject_status( 2548 request, task, response_ptr, status_ptr, 2549 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2550 break; 2551 2552 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2553 2554 isci_request_set_open_reject_status( 2555 request, task, response_ptr, status_ptr, 2556 complete_to_host_ptr, SAS_OREJ_EPROTO); 2557 break; 2558 2559 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2560 2561 isci_request_set_open_reject_status( 2562 request, task, response_ptr, status_ptr, 2563 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2564 break; 2565 2566 case SCU_TASK_DONE_LL_R_ERR: 2567 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2568 case SCU_TASK_DONE_LL_PERR: 2569 case SCU_TASK_DONE_LL_SY_TERM: 2570 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2571 case SCU_TASK_DONE_LL_LF_TERM: 2572 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2573 case SCU_TASK_DONE_LL_ABORT_ERR: 2574 case SCU_TASK_DONE_SEQ_INV_TYPE: 2575 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2576 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2577 case SCU_TASK_DONE_INV_FIS_LEN: 2578 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2579 case SCU_TASK_DONE_SDMA_ERR: 2580 case SCU_TASK_DONE_OFFSET_ERR: 2581 case SCU_TASK_DONE_MAX_PLD_ERR: 2582 case SCU_TASK_DONE_LF_ERR: 2583 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2584 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2585 case SCU_TASK_DONE_UNEXP_DATA: 2586 case SCU_TASK_DONE_UNEXP_SDBFIS: 2587 case SCU_TASK_DONE_REG_ERR: 2588 case SCU_TASK_DONE_SDB_ERR: 2589 case SCU_TASK_DONE_TASK_ABORT: 2590 default: 2591 /* Task in the target is not done. */ 2592 *response_ptr = SAS_TASK_UNDELIVERED; 2593 *status_ptr = SAM_STAT_TASK_ABORTED; 2594 2595 if (task->task_proto == SAS_PROTOCOL_SMP) { 2596 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2597 2598 *complete_to_host_ptr = isci_perform_normal_io_completion; 2599 } else { 2600 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2601 2602 *complete_to_host_ptr = isci_perform_error_io_completion; 2603 } 2604 break; 2605 } 2606 } 2607 2608 /** 2609 * isci_task_save_for_upper_layer_completion() - This function saves the 2610 * request for later completion to the upper layer driver. 2611 * @host: This parameter is a pointer to the host on which the the request 2612 * should be queued (either as an error or success). 2613 * @request: This parameter is the completed request. 2614 * @response: This parameter is the response code for the completed task. 2615 * @status: This parameter is the status code for the completed task. 2616 * 2617 * none. 2618 */ 2619 static void isci_task_save_for_upper_layer_completion( 2620 struct isci_host *host, 2621 struct isci_request *request, 2622 enum service_response response, 2623 enum exec_status status, 2624 enum isci_completion_selection task_notification_selection) 2625 { 2626 struct sas_task *task = isci_request_access_task(request); 2627 2628 task_notification_selection 2629 = isci_task_set_completion_status(task, response, status, 2630 task_notification_selection); 2631 2632 /* Tasks aborted specifically by a call to the lldd_abort_task 2633 * function should not be completed to the host in the regular path. 2634 */ 2635 switch (task_notification_selection) { 2636 2637 case isci_perform_normal_io_completion: 2638 /* Normal notification (task_done) */ 2639 2640 /* Add to the completed list. */ 2641 list_add(&request->completed_node, 2642 &host->requests_to_complete); 2643 2644 /* Take the request off the device's pending request list. */ 2645 list_del_init(&request->dev_node); 2646 break; 2647 2648 case isci_perform_aborted_io_completion: 2649 /* No notification to libsas because this request is 2650 * already in the abort path. 2651 */ 2652 /* Wake up whatever process was waiting for this 2653 * request to complete. 2654 */ 2655 WARN_ON(request->io_request_completion == NULL); 2656 2657 if (request->io_request_completion != NULL) { 2658 2659 /* Signal whoever is waiting that this 2660 * request is complete. 2661 */ 2662 complete(request->io_request_completion); 2663 } 2664 break; 2665 2666 case isci_perform_error_io_completion: 2667 /* Use sas_task_abort */ 2668 /* Add to the aborted list. */ 2669 list_add(&request->completed_node, 2670 &host->requests_to_errorback); 2671 break; 2672 2673 default: 2674 /* Add to the error to libsas list. */ 2675 list_add(&request->completed_node, 2676 &host->requests_to_errorback); 2677 break; 2678 } 2679 dev_dbg(&host->pdev->dev, 2680 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", 2681 __func__, task_notification_selection, task, 2682 (task) ? task->task_status.resp : 0, response, 2683 (task) ? task->task_status.stat : 0, status); 2684 } 2685 2686 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2687 { 2688 struct task_status_struct *ts = &task->task_status; 2689 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2690 2691 resp->frame_len = sizeof(*fis); 2692 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2693 ts->buf_valid_size = sizeof(*resp); 2694 2695 /* If the device fault bit is set in the status register, then 2696 * set the sense data and return. 2697 */ 2698 if (fis->status & ATA_DF) 2699 ts->stat = SAS_PROTO_RESPONSE; 2700 else if (fis->status & ATA_ERR) 2701 ts->stat = SAM_STAT_CHECK_CONDITION; 2702 else 2703 ts->stat = SAM_STAT_GOOD; 2704 2705 ts->resp = SAS_TASK_COMPLETE; 2706 } 2707 2708 static void isci_request_io_request_complete(struct isci_host *ihost, 2709 struct isci_request *request, 2710 enum sci_io_status completion_status) 2711 { 2712 struct sas_task *task = isci_request_access_task(request); 2713 struct ssp_response_iu *resp_iu; 2714 unsigned long task_flags; 2715 struct isci_remote_device *idev = request->target_device; 2716 enum service_response response = SAS_TASK_UNDELIVERED; 2717 enum exec_status status = SAS_ABORTED_TASK; 2718 enum isci_request_status request_status; 2719 enum isci_completion_selection complete_to_host 2720 = isci_perform_normal_io_completion; 2721 2722 dev_dbg(&ihost->pdev->dev, 2723 "%s: request = %p, task = %p,\n" 2724 "task->data_dir = %d completion_status = 0x%x\n", 2725 __func__, 2726 request, 2727 task, 2728 task->data_dir, 2729 completion_status); 2730 2731 spin_lock(&request->state_lock); 2732 request_status = request->status; 2733 2734 /* Decode the request status. Note that if the request has been 2735 * aborted by a task management function, we don't care 2736 * what the status is. 2737 */ 2738 switch (request_status) { 2739 2740 case aborted: 2741 /* "aborted" indicates that the request was aborted by a task 2742 * management function, since once a task management request is 2743 * perfomed by the device, the request only completes because 2744 * of the subsequent driver terminate. 2745 * 2746 * Aborted also means an external thread is explicitly managing 2747 * this request, so that we do not complete it up the stack. 2748 * 2749 * The target is still there (since the TMF was successful). 2750 */ 2751 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2752 response = SAS_TASK_COMPLETE; 2753 2754 /* See if the device has been/is being stopped. Note 2755 * that we ignore the quiesce state, since we are 2756 * concerned about the actual device state. 2757 */ 2758 if (!idev) 2759 status = SAS_DEVICE_UNKNOWN; 2760 else 2761 status = SAS_ABORTED_TASK; 2762 2763 complete_to_host = isci_perform_aborted_io_completion; 2764 /* This was an aborted request. */ 2765 2766 spin_unlock(&request->state_lock); 2767 break; 2768 2769 case aborting: 2770 /* aborting means that the task management function tried and 2771 * failed to abort the request. We need to note the request 2772 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2773 * target as down. 2774 * 2775 * Aborting also means an external thread is explicitly managing 2776 * this request, so that we do not complete it up the stack. 2777 */ 2778 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2779 response = SAS_TASK_UNDELIVERED; 2780 2781 if (!idev) 2782 /* The device has been /is being stopped. Note that 2783 * we ignore the quiesce state, since we are 2784 * concerned about the actual device state. 2785 */ 2786 status = SAS_DEVICE_UNKNOWN; 2787 else 2788 status = SAS_PHY_DOWN; 2789 2790 complete_to_host = isci_perform_aborted_io_completion; 2791 2792 /* This was an aborted request. */ 2793 2794 spin_unlock(&request->state_lock); 2795 break; 2796 2797 case terminating: 2798 2799 /* This was an terminated request. This happens when 2800 * the I/O is being terminated because of an action on 2801 * the device (reset, tear down, etc.), and the I/O needs 2802 * to be completed up the stack. 2803 */ 2804 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2805 response = SAS_TASK_UNDELIVERED; 2806 2807 /* See if the device has been/is being stopped. Note 2808 * that we ignore the quiesce state, since we are 2809 * concerned about the actual device state. 2810 */ 2811 if (!idev) 2812 status = SAS_DEVICE_UNKNOWN; 2813 else 2814 status = SAS_ABORTED_TASK; 2815 2816 complete_to_host = isci_perform_aborted_io_completion; 2817 2818 /* This was a terminated request. */ 2819 2820 spin_unlock(&request->state_lock); 2821 break; 2822 2823 case dead: 2824 /* This was a terminated request that timed-out during the 2825 * termination process. There is no task to complete to 2826 * libsas. 2827 */ 2828 complete_to_host = isci_perform_normal_io_completion; 2829 spin_unlock(&request->state_lock); 2830 break; 2831 2832 default: 2833 2834 /* The request is done from an SCU HW perspective. */ 2835 request->status = completed; 2836 2837 spin_unlock(&request->state_lock); 2838 2839 /* This is an active request being completed from the core. */ 2840 switch (completion_status) { 2841 2842 case SCI_IO_FAILURE_RESPONSE_VALID: 2843 dev_dbg(&ihost->pdev->dev, 2844 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2845 __func__, 2846 request, 2847 task); 2848 2849 if (sas_protocol_ata(task->task_proto)) { 2850 isci_process_stp_response(task, &request->stp.rsp); 2851 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2852 2853 /* crack the iu response buffer. */ 2854 resp_iu = &request->ssp.rsp; 2855 isci_request_process_response_iu(task, resp_iu, 2856 &ihost->pdev->dev); 2857 2858 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2859 2860 dev_err(&ihost->pdev->dev, 2861 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2862 "SAS_PROTOCOL_SMP protocol\n", 2863 __func__); 2864 2865 } else 2866 dev_err(&ihost->pdev->dev, 2867 "%s: unknown protocol\n", __func__); 2868 2869 /* use the task status set in the task struct by the 2870 * isci_request_process_response_iu call. 2871 */ 2872 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2873 response = task->task_status.resp; 2874 status = task->task_status.stat; 2875 break; 2876 2877 case SCI_IO_SUCCESS: 2878 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2879 2880 response = SAS_TASK_COMPLETE; 2881 status = SAM_STAT_GOOD; 2882 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2883 2884 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2885 2886 /* This was an SSP / STP / SATA transfer. 2887 * There is a possibility that less data than 2888 * the maximum was transferred. 2889 */ 2890 u32 transferred_length = sci_req_tx_bytes(request); 2891 2892 task->task_status.residual 2893 = task->total_xfer_len - transferred_length; 2894 2895 /* If there were residual bytes, call this an 2896 * underrun. 2897 */ 2898 if (task->task_status.residual != 0) 2899 status = SAS_DATA_UNDERRUN; 2900 2901 dev_dbg(&ihost->pdev->dev, 2902 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2903 __func__, 2904 status); 2905 2906 } else 2907 dev_dbg(&ihost->pdev->dev, 2908 "%s: SCI_IO_SUCCESS\n", 2909 __func__); 2910 2911 break; 2912 2913 case SCI_IO_FAILURE_TERMINATED: 2914 dev_dbg(&ihost->pdev->dev, 2915 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2916 __func__, 2917 request, 2918 task); 2919 2920 /* The request was terminated explicitly. No handling 2921 * is needed in the SCSI error handler path. 2922 */ 2923 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2924 response = SAS_TASK_UNDELIVERED; 2925 2926 /* See if the device has been/is being stopped. Note 2927 * that we ignore the quiesce state, since we are 2928 * concerned about the actual device state. 2929 */ 2930 if (!idev) 2931 status = SAS_DEVICE_UNKNOWN; 2932 else 2933 status = SAS_ABORTED_TASK; 2934 2935 complete_to_host = isci_perform_normal_io_completion; 2936 break; 2937 2938 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2939 2940 isci_request_handle_controller_specific_errors( 2941 idev, request, task, &response, &status, 2942 &complete_to_host); 2943 2944 break; 2945 2946 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2947 /* This is a special case, in that the I/O completion 2948 * is telling us that the device needs a reset. 2949 * In order for the device reset condition to be 2950 * noticed, the I/O has to be handled in the error 2951 * handler. Set the reset flag and cause the 2952 * SCSI error thread to be scheduled. 2953 */ 2954 spin_lock_irqsave(&task->task_state_lock, task_flags); 2955 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2956 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2957 2958 /* Fail the I/O. */ 2959 response = SAS_TASK_UNDELIVERED; 2960 status = SAM_STAT_TASK_ABORTED; 2961 2962 complete_to_host = isci_perform_error_io_completion; 2963 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2964 break; 2965 2966 case SCI_FAILURE_RETRY_REQUIRED: 2967 2968 /* Fail the I/O so it can be retried. */ 2969 response = SAS_TASK_UNDELIVERED; 2970 if (!idev) 2971 status = SAS_DEVICE_UNKNOWN; 2972 else 2973 status = SAS_ABORTED_TASK; 2974 2975 complete_to_host = isci_perform_normal_io_completion; 2976 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2977 break; 2978 2979 2980 default: 2981 /* Catch any otherwise unhandled error codes here. */ 2982 dev_dbg(&ihost->pdev->dev, 2983 "%s: invalid completion code: 0x%x - " 2984 "isci_request = %p\n", 2985 __func__, completion_status, request); 2986 2987 response = SAS_TASK_UNDELIVERED; 2988 2989 /* See if the device has been/is being stopped. Note 2990 * that we ignore the quiesce state, since we are 2991 * concerned about the actual device state. 2992 */ 2993 if (!idev) 2994 status = SAS_DEVICE_UNKNOWN; 2995 else 2996 status = SAS_ABORTED_TASK; 2997 2998 if (SAS_PROTOCOL_SMP == task->task_proto) { 2999 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3000 complete_to_host = isci_perform_normal_io_completion; 3001 } else { 3002 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3003 complete_to_host = isci_perform_error_io_completion; 3004 } 3005 break; 3006 } 3007 break; 3008 } 3009 3010 switch (task->task_proto) { 3011 case SAS_PROTOCOL_SSP: 3012 if (task->data_dir == DMA_NONE) 3013 break; 3014 if (task->num_scatter == 0) 3015 /* 0 indicates a single dma address */ 3016 dma_unmap_single(&ihost->pdev->dev, 3017 request->zero_scatter_daddr, 3018 task->total_xfer_len, task->data_dir); 3019 else /* unmap the sgl dma addresses */ 3020 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 3021 request->num_sg_entries, task->data_dir); 3022 break; 3023 case SAS_PROTOCOL_SMP: { 3024 struct scatterlist *sg = &task->smp_task.smp_req; 3025 struct smp_req *smp_req; 3026 void *kaddr; 3027 3028 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 3029 3030 /* need to swab it back in case the command buffer is re-used */ 3031 kaddr = kmap_atomic(sg_page(sg)); 3032 smp_req = kaddr + sg->offset; 3033 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3034 kunmap_atomic(kaddr); 3035 break; 3036 } 3037 default: 3038 break; 3039 } 3040 3041 /* Put the completed request on the correct list */ 3042 isci_task_save_for_upper_layer_completion(ihost, request, response, 3043 status, complete_to_host 3044 ); 3045 3046 /* complete the io request to the core. */ 3047 sci_controller_complete_io(ihost, request->target_device, request); 3048 3049 /* set terminated handle so it cannot be completed or 3050 * terminated again, and to cause any calls into abort 3051 * task to recognize the already completed case. 3052 */ 3053 set_bit(IREQ_TERMINATED, &request->flags); 3054 } 3055 3056 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 3057 { 3058 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3059 struct domain_device *dev = ireq->target_device->domain_dev; 3060 enum sci_base_request_states state; 3061 struct sas_task *task; 3062 3063 /* XXX as hch said always creating an internal sas_task for tmf 3064 * requests would simplify the driver 3065 */ 3066 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 3067 3068 /* all unaccelerated request types (non ssp or ncq) handled with 3069 * substates 3070 */ 3071 if (!task && dev->dev_type == SAS_END_DEV) { 3072 state = SCI_REQ_TASK_WAIT_TC_COMP; 3073 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3074 state = SCI_REQ_SMP_WAIT_RESP; 3075 } else if (task && sas_protocol_ata(task->task_proto) && 3076 !task->ata_task.use_ncq) { 3077 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 3078 task->ata_task.fis.command == ATA_CMD_PACKET) { 3079 state = SCI_REQ_ATAPI_WAIT_H2D; 3080 } else if (task->data_dir == DMA_NONE) { 3081 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 3082 } else if (task->ata_task.dma_xfer) { 3083 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 3084 } else /* PIO */ { 3085 state = SCI_REQ_STP_PIO_WAIT_H2D; 3086 } 3087 } else { 3088 /* SSP or NCQ are fully accelerated, no substates */ 3089 return; 3090 } 3091 sci_change_state(sm, state); 3092 } 3093 3094 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 3095 { 3096 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3097 struct isci_host *ihost = ireq->owning_controller; 3098 3099 /* Tell the SCI_USER that the IO request is complete */ 3100 if (!test_bit(IREQ_TMF, &ireq->flags)) 3101 isci_request_io_request_complete(ihost, ireq, 3102 ireq->sci_status); 3103 else 3104 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3105 } 3106 3107 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3108 { 3109 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3110 3111 /* Setting the abort bit in the Task Context is required by the silicon. */ 3112 ireq->tc->abort = 1; 3113 } 3114 3115 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3116 { 3117 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3118 3119 ireq->target_device->working_request = ireq; 3120 } 3121 3122 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3123 { 3124 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3125 3126 ireq->target_device->working_request = ireq; 3127 } 3128 3129 static const struct sci_base_state sci_request_state_table[] = { 3130 [SCI_REQ_INIT] = { }, 3131 [SCI_REQ_CONSTRUCTED] = { }, 3132 [SCI_REQ_STARTED] = { 3133 .enter_state = sci_request_started_state_enter, 3134 }, 3135 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3136 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3137 }, 3138 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3139 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3140 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3141 }, 3142 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3143 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3144 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3145 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3146 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3147 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3148 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3149 [SCI_REQ_SMP_WAIT_RESP] = { }, 3150 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3151 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3152 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3153 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3154 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3155 [SCI_REQ_COMPLETED] = { 3156 .enter_state = sci_request_completed_state_enter, 3157 }, 3158 [SCI_REQ_ABORTING] = { 3159 .enter_state = sci_request_aborting_state_enter, 3160 }, 3161 [SCI_REQ_FINAL] = { }, 3162 }; 3163 3164 static void 3165 sci_general_request_construct(struct isci_host *ihost, 3166 struct isci_remote_device *idev, 3167 struct isci_request *ireq) 3168 { 3169 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3170 3171 ireq->target_device = idev; 3172 ireq->protocol = SCIC_NO_PROTOCOL; 3173 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3174 3175 ireq->sci_status = SCI_SUCCESS; 3176 ireq->scu_status = 0; 3177 ireq->post_context = 0xFFFFFFFF; 3178 } 3179 3180 static enum sci_status 3181 sci_io_request_construct(struct isci_host *ihost, 3182 struct isci_remote_device *idev, 3183 struct isci_request *ireq) 3184 { 3185 struct domain_device *dev = idev->domain_dev; 3186 enum sci_status status = SCI_SUCCESS; 3187 3188 /* Build the common part of the request */ 3189 sci_general_request_construct(ihost, idev, ireq); 3190 3191 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3192 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3193 3194 if (dev->dev_type == SAS_END_DEV) 3195 /* pass */; 3196 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3197 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3198 else if (dev_is_expander(dev)) 3199 /* pass */; 3200 else 3201 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3202 3203 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3204 3205 return status; 3206 } 3207 3208 enum sci_status sci_task_request_construct(struct isci_host *ihost, 3209 struct isci_remote_device *idev, 3210 u16 io_tag, struct isci_request *ireq) 3211 { 3212 struct domain_device *dev = idev->domain_dev; 3213 enum sci_status status = SCI_SUCCESS; 3214 3215 /* Build the common part of the request */ 3216 sci_general_request_construct(ihost, idev, ireq); 3217 3218 if (dev->dev_type == SAS_END_DEV || 3219 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 3220 set_bit(IREQ_TMF, &ireq->flags); 3221 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3222 } else 3223 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3224 3225 return status; 3226 } 3227 3228 static enum sci_status isci_request_ssp_request_construct( 3229 struct isci_request *request) 3230 { 3231 enum sci_status status; 3232 3233 dev_dbg(&request->isci_host->pdev->dev, 3234 "%s: request = %p\n", 3235 __func__, 3236 request); 3237 status = sci_io_request_construct_basic_ssp(request); 3238 return status; 3239 } 3240 3241 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3242 { 3243 struct sas_task *task = isci_request_access_task(ireq); 3244 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3245 struct ata_queued_cmd *qc = task->uldd_task; 3246 enum sci_status status; 3247 3248 dev_dbg(&ireq->isci_host->pdev->dev, 3249 "%s: ireq = %p\n", 3250 __func__, 3251 ireq); 3252 3253 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3254 if (!task->ata_task.device_control_reg_update) 3255 fis->flags |= 0x80; 3256 fis->flags &= 0xF0; 3257 3258 status = sci_io_request_construct_basic_sata(ireq); 3259 3260 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3261 qc->tf.command == ATA_CMD_FPDMA_READ)) { 3262 fis->sector_count = qc->tag << 3; 3263 ireq->tc->type.stp.ncq_tag = qc->tag; 3264 } 3265 3266 return status; 3267 } 3268 3269 static enum sci_status 3270 sci_io_request_construct_smp(struct device *dev, 3271 struct isci_request *ireq, 3272 struct sas_task *task) 3273 { 3274 struct scatterlist *sg = &task->smp_task.smp_req; 3275 struct isci_remote_device *idev; 3276 struct scu_task_context *task_context; 3277 struct isci_port *iport; 3278 struct smp_req *smp_req; 3279 void *kaddr; 3280 u8 req_len; 3281 u32 cmd; 3282 3283 kaddr = kmap_atomic(sg_page(sg)); 3284 smp_req = kaddr + sg->offset; 3285 /* 3286 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3287 * functions under SAS 2.0, a zero request length really indicates 3288 * a non-zero default length. 3289 */ 3290 if (smp_req->req_len == 0) { 3291 switch (smp_req->func) { 3292 case SMP_DISCOVER: 3293 case SMP_REPORT_PHY_ERR_LOG: 3294 case SMP_REPORT_PHY_SATA: 3295 case SMP_REPORT_ROUTE_INFO: 3296 smp_req->req_len = 2; 3297 break; 3298 case SMP_CONF_ROUTE_INFO: 3299 case SMP_PHY_CONTROL: 3300 case SMP_PHY_TEST_FUNCTION: 3301 smp_req->req_len = 9; 3302 break; 3303 /* Default - zero is a valid default for 2.0. */ 3304 } 3305 } 3306 req_len = smp_req->req_len; 3307 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3308 cmd = *(u32 *) smp_req; 3309 kunmap_atomic(kaddr); 3310 3311 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3312 return SCI_FAILURE; 3313 3314 ireq->protocol = SCIC_SMP_PROTOCOL; 3315 3316 /* byte swap the smp request. */ 3317 3318 task_context = ireq->tc; 3319 3320 idev = ireq->target_device; 3321 iport = idev->owning_port; 3322 3323 /* 3324 * Fill in the TC with the its required data 3325 * 00h 3326 */ 3327 task_context->priority = 0; 3328 task_context->initiator_request = 1; 3329 task_context->connection_rate = idev->connection_rate; 3330 task_context->protocol_engine_index = ISCI_PEG; 3331 task_context->logical_port_index = iport->physical_port_index; 3332 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3333 task_context->abort = 0; 3334 task_context->valid = SCU_TASK_CONTEXT_VALID; 3335 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3336 3337 /* 04h */ 3338 task_context->remote_node_index = idev->rnc.remote_node_index; 3339 task_context->command_code = 0; 3340 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3341 3342 /* 08h */ 3343 task_context->link_layer_control = 0; 3344 task_context->do_not_dma_ssp_good_response = 1; 3345 task_context->strict_ordering = 0; 3346 task_context->control_frame = 1; 3347 task_context->timeout_enable = 0; 3348 task_context->block_guard_enable = 0; 3349 3350 /* 0ch */ 3351 task_context->address_modifier = 0; 3352 3353 /* 10h */ 3354 task_context->ssp_command_iu_length = req_len; 3355 3356 /* 14h */ 3357 task_context->transfer_length_bytes = 0; 3358 3359 /* 3360 * 18h ~ 30h, protocol specific 3361 * since commandIU has been build by framework at this point, we just 3362 * copy the frist DWord from command IU to this location. */ 3363 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3364 3365 /* 3366 * 40h 3367 * "For SMP you could program it to zero. We would prefer that way 3368 * so that done code will be consistent." - Venki 3369 */ 3370 task_context->task_phase = 0; 3371 3372 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3373 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3374 (iport->physical_port_index << 3375 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3376 ISCI_TAG_TCI(ireq->io_tag)); 3377 /* 3378 * Copy the physical address for the command buffer to the SCU Task 3379 * Context command buffer should not contain command header. 3380 */ 3381 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3382 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3383 3384 /* SMP response comes as UF, so no need to set response IU address. */ 3385 task_context->response_iu_upper = 0; 3386 task_context->response_iu_lower = 0; 3387 3388 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3389 3390 return SCI_SUCCESS; 3391 } 3392 3393 /* 3394 * isci_smp_request_build() - This function builds the smp request. 3395 * @ireq: This parameter points to the isci_request allocated in the 3396 * request construct function. 3397 * 3398 * SCI_SUCCESS on successfull completion, or specific failure code. 3399 */ 3400 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3401 { 3402 struct sas_task *task = isci_request_access_task(ireq); 3403 struct device *dev = &ireq->isci_host->pdev->dev; 3404 enum sci_status status = SCI_FAILURE; 3405 3406 status = sci_io_request_construct_smp(dev, ireq, task); 3407 if (status != SCI_SUCCESS) 3408 dev_dbg(&ireq->isci_host->pdev->dev, 3409 "%s: failed with status = %d\n", 3410 __func__, 3411 status); 3412 3413 return status; 3414 } 3415 3416 /** 3417 * isci_io_request_build() - This function builds the io request object. 3418 * @ihost: This parameter specifies the ISCI host object 3419 * @request: This parameter points to the isci_request object allocated in the 3420 * request construct function. 3421 * @sci_device: This parameter is the handle for the sci core's remote device 3422 * object that is the destination for this request. 3423 * 3424 * SCI_SUCCESS on successfull completion, or specific failure code. 3425 */ 3426 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3427 struct isci_request *request, 3428 struct isci_remote_device *idev) 3429 { 3430 enum sci_status status = SCI_SUCCESS; 3431 struct sas_task *task = isci_request_access_task(request); 3432 3433 dev_dbg(&ihost->pdev->dev, 3434 "%s: idev = 0x%p; request = %p, " 3435 "num_scatter = %d\n", 3436 __func__, 3437 idev, 3438 request, 3439 task->num_scatter); 3440 3441 /* map the sgl addresses, if present. 3442 * libata does the mapping for sata devices 3443 * before we get the request. 3444 */ 3445 if (task->num_scatter && 3446 !sas_protocol_ata(task->task_proto) && 3447 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3448 3449 request->num_sg_entries = dma_map_sg( 3450 &ihost->pdev->dev, 3451 task->scatter, 3452 task->num_scatter, 3453 task->data_dir 3454 ); 3455 3456 if (request->num_sg_entries == 0) 3457 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3458 } 3459 3460 status = sci_io_request_construct(ihost, idev, request); 3461 3462 if (status != SCI_SUCCESS) { 3463 dev_dbg(&ihost->pdev->dev, 3464 "%s: failed request construct\n", 3465 __func__); 3466 return SCI_FAILURE; 3467 } 3468 3469 switch (task->task_proto) { 3470 case SAS_PROTOCOL_SMP: 3471 status = isci_smp_request_build(request); 3472 break; 3473 case SAS_PROTOCOL_SSP: 3474 status = isci_request_ssp_request_construct(request); 3475 break; 3476 case SAS_PROTOCOL_SATA: 3477 case SAS_PROTOCOL_STP: 3478 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3479 status = isci_request_stp_request_construct(request); 3480 break; 3481 default: 3482 dev_dbg(&ihost->pdev->dev, 3483 "%s: unknown protocol\n", __func__); 3484 return SCI_FAILURE; 3485 } 3486 3487 return SCI_SUCCESS; 3488 } 3489 3490 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3491 { 3492 struct isci_request *ireq; 3493 3494 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3495 ireq->io_tag = tag; 3496 ireq->io_request_completion = NULL; 3497 ireq->flags = 0; 3498 ireq->num_sg_entries = 0; 3499 INIT_LIST_HEAD(&ireq->completed_node); 3500 INIT_LIST_HEAD(&ireq->dev_node); 3501 isci_request_change_state(ireq, allocated); 3502 3503 return ireq; 3504 } 3505 3506 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3507 struct sas_task *task, 3508 u16 tag) 3509 { 3510 struct isci_request *ireq; 3511 3512 ireq = isci_request_from_tag(ihost, tag); 3513 ireq->ttype_ptr.io_task_ptr = task; 3514 clear_bit(IREQ_TMF, &ireq->flags); 3515 task->lldd_task = ireq; 3516 3517 return ireq; 3518 } 3519 3520 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3521 struct isci_tmf *isci_tmf, 3522 u16 tag) 3523 { 3524 struct isci_request *ireq; 3525 3526 ireq = isci_request_from_tag(ihost, tag); 3527 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3528 set_bit(IREQ_TMF, &ireq->flags); 3529 3530 return ireq; 3531 } 3532 3533 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3534 struct sas_task *task, u16 tag) 3535 { 3536 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3537 struct isci_request *ireq; 3538 unsigned long flags; 3539 int ret = 0; 3540 3541 /* do common allocation and init of request object. */ 3542 ireq = isci_io_request_from_tag(ihost, task, tag); 3543 3544 status = isci_io_request_build(ihost, ireq, idev); 3545 if (status != SCI_SUCCESS) { 3546 dev_dbg(&ihost->pdev->dev, 3547 "%s: request_construct failed - status = 0x%x\n", 3548 __func__, 3549 status); 3550 return status; 3551 } 3552 3553 spin_lock_irqsave(&ihost->scic_lock, flags); 3554 3555 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3556 3557 if (isci_task_is_ncq_recovery(task)) { 3558 3559 /* The device is in an NCQ recovery state. Issue the 3560 * request on the task side. Note that it will 3561 * complete on the I/O request side because the 3562 * request was built that way (ie. 3563 * ireq->is_task_management_request is false). 3564 */ 3565 status = sci_controller_start_task(ihost, 3566 idev, 3567 ireq); 3568 } else { 3569 status = SCI_FAILURE; 3570 } 3571 } else { 3572 /* send the request, let the core assign the IO TAG. */ 3573 status = sci_controller_start_io(ihost, idev, 3574 ireq); 3575 } 3576 3577 if (status != SCI_SUCCESS && 3578 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3579 dev_dbg(&ihost->pdev->dev, 3580 "%s: failed request start (0x%x)\n", 3581 __func__, status); 3582 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3583 return status; 3584 } 3585 3586 /* Either I/O started OK, or the core has signaled that 3587 * the device needs a target reset. 3588 * 3589 * In either case, hold onto the I/O for later. 3590 * 3591 * Update it's status and add it to the list in the 3592 * remote device object. 3593 */ 3594 list_add(&ireq->dev_node, &idev->reqs_in_process); 3595 3596 if (status == SCI_SUCCESS) { 3597 isci_request_change_state(ireq, started); 3598 } else { 3599 /* The request did not really start in the 3600 * hardware, so clear the request handle 3601 * here so no terminations will be done. 3602 */ 3603 set_bit(IREQ_TERMINATED, &ireq->flags); 3604 isci_request_change_state(ireq, completed); 3605 } 3606 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3607 3608 if (status == 3609 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3610 /* Signal libsas that we need the SCSI error 3611 * handler thread to work on this I/O and that 3612 * we want a device reset. 3613 */ 3614 spin_lock_irqsave(&task->task_state_lock, flags); 3615 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3616 spin_unlock_irqrestore(&task->task_state_lock, flags); 3617 3618 /* Cause this task to be scheduled in the SCSI error 3619 * handler thread. 3620 */ 3621 sas_task_abort(task); 3622 3623 /* Change the status, since we are holding 3624 * the I/O until it is managed by the SCSI 3625 * error handler. 3626 */ 3627 status = SCI_SUCCESS; 3628 } 3629 3630 return ret; 3631 } 3632