1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include <scsi/scsi_cmnd.h> 57 #include "isci.h" 58 #include "task.h" 59 #include "request.h" 60 #include "scu_completion_codes.h" 61 #include "scu_event_codes.h" 62 #include "sas.h" 63 64 #undef C 65 #define C(a) (#a) 66 const char *req_state_name(enum sci_base_request_states state) 67 { 68 static const char * const strings[] = REQUEST_STATES; 69 70 return strings[state]; 71 } 72 #undef C 73 74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 75 int idx) 76 { 77 if (idx == 0) 78 return &ireq->tc->sgl_pair_ab; 79 else if (idx == 1) 80 return &ireq->tc->sgl_pair_cd; 81 else if (idx < 0) 82 return NULL; 83 else 84 return &ireq->sg_table[idx - 2]; 85 } 86 87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 88 struct isci_request *ireq, u32 idx) 89 { 90 u32 offset; 91 92 if (idx == 0) { 93 offset = (void *) &ireq->tc->sgl_pair_ab - 94 (void *) &ihost->task_context_table[0]; 95 return ihost->tc_dma + offset; 96 } else if (idx == 1) { 97 offset = (void *) &ireq->tc->sgl_pair_cd - 98 (void *) &ihost->task_context_table[0]; 99 return ihost->tc_dma + offset; 100 } 101 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 103 } 104 105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 106 { 107 e->length = sg_dma_len(sg); 108 e->address_upper = upper_32_bits(sg_dma_address(sg)); 109 e->address_lower = lower_32_bits(sg_dma_address(sg)); 110 e->address_modifier = 0; 111 } 112 113 static void sci_request_build_sgl(struct isci_request *ireq) 114 { 115 struct isci_host *ihost = ireq->isci_host; 116 struct sas_task *task = isci_request_access_task(ireq); 117 struct scatterlist *sg = NULL; 118 dma_addr_t dma_addr; 119 u32 sg_idx = 0; 120 struct scu_sgl_element_pair *scu_sg = NULL; 121 struct scu_sgl_element_pair *prev_sg = NULL; 122 123 if (task->num_scatter > 0) { 124 sg = task->scatter; 125 126 while (sg) { 127 scu_sg = to_sgl_element_pair(ireq, sg_idx); 128 init_sgl_element(&scu_sg->A, sg); 129 sg = sg_next(sg); 130 if (sg) { 131 init_sgl_element(&scu_sg->B, sg); 132 sg = sg_next(sg); 133 } else 134 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 135 136 if (prev_sg) { 137 dma_addr = to_sgl_element_pair_dma(ihost, 138 ireq, 139 sg_idx); 140 141 prev_sg->next_pair_upper = 142 upper_32_bits(dma_addr); 143 prev_sg->next_pair_lower = 144 lower_32_bits(dma_addr); 145 } 146 147 prev_sg = scu_sg; 148 sg_idx++; 149 } 150 } else { /* handle when no sg */ 151 scu_sg = to_sgl_element_pair(ireq, sg_idx); 152 153 dma_addr = dma_map_single(&ihost->pdev->dev, 154 task->scatter, 155 task->total_xfer_len, 156 task->data_dir); 157 158 ireq->zero_scatter_daddr = dma_addr; 159 160 scu_sg->A.length = task->total_xfer_len; 161 scu_sg->A.address_upper = upper_32_bits(dma_addr); 162 scu_sg->A.address_lower = lower_32_bits(dma_addr); 163 } 164 165 if (scu_sg) { 166 scu_sg->next_pair_upper = 0; 167 scu_sg->next_pair_lower = 0; 168 } 169 } 170 171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 172 { 173 struct ssp_cmd_iu *cmd_iu; 174 struct sas_task *task = isci_request_access_task(ireq); 175 176 cmd_iu = &ireq->ssp.cmd; 177 178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 179 cmd_iu->add_cdb_len = 0; 180 cmd_iu->_r_a = 0; 181 cmd_iu->_r_b = 0; 182 cmd_iu->en_fburst = 0; /* unsupported */ 183 cmd_iu->task_prio = task->ssp_task.task_prio; 184 cmd_iu->task_attr = task->ssp_task.task_attr; 185 cmd_iu->_r_c = 0; 186 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); 189 } 190 191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 192 { 193 struct ssp_task_iu *task_iu; 194 struct sas_task *task = isci_request_access_task(ireq); 195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 196 197 task_iu = &ireq->ssp.tmf; 198 199 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 200 201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 202 203 task_iu->task_func = isci_tmf->tmf_code; 204 task_iu->task_tag = 205 (test_bit(IREQ_TMF, &ireq->flags)) ? 206 isci_tmf->io_tag : 207 SCI_CONTROLLER_INVALID_IO_TAG; 208 } 209 210 /** 211 * This method is will fill in the SCU Task Context for any type of SSP request. 212 * @sci_req: 213 * @task_context: 214 * 215 */ 216 static void scu_ssp_request_construct_task_context( 217 struct isci_request *ireq, 218 struct scu_task_context *task_context) 219 { 220 dma_addr_t dma_addr; 221 struct isci_remote_device *idev; 222 struct isci_port *iport; 223 224 idev = ireq->target_device; 225 iport = idev->owning_port; 226 227 /* Fill in the TC with the its required data */ 228 task_context->abort = 0; 229 task_context->priority = 0; 230 task_context->initiator_request = 1; 231 task_context->connection_rate = idev->connection_rate; 232 task_context->protocol_engine_index = ISCI_PEG; 233 task_context->logical_port_index = iport->physical_port_index; 234 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 235 task_context->valid = SCU_TASK_CONTEXT_VALID; 236 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 237 238 task_context->remote_node_index = idev->rnc.remote_node_index; 239 task_context->command_code = 0; 240 241 task_context->link_layer_control = 0; 242 task_context->do_not_dma_ssp_good_response = 1; 243 task_context->strict_ordering = 0; 244 task_context->control_frame = 0; 245 task_context->timeout_enable = 0; 246 task_context->block_guard_enable = 0; 247 248 task_context->address_modifier = 0; 249 250 /* task_context->type.ssp.tag = ireq->io_tag; */ 251 task_context->task_phase = 0x01; 252 253 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 254 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 255 (iport->physical_port_index << 256 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 257 ISCI_TAG_TCI(ireq->io_tag)); 258 259 /* 260 * Copy the physical address for the command buffer to the 261 * SCU Task Context 262 */ 263 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 264 265 task_context->command_iu_upper = upper_32_bits(dma_addr); 266 task_context->command_iu_lower = lower_32_bits(dma_addr); 267 268 /* 269 * Copy the physical address for the response buffer to the 270 * SCU Task Context 271 */ 272 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 273 274 task_context->response_iu_upper = upper_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr); 276 } 277 278 static u8 scu_bg_blk_size(struct scsi_device *sdp) 279 { 280 switch (sdp->sector_size) { 281 case 512: 282 return 0; 283 case 1024: 284 return 1; 285 case 4096: 286 return 3; 287 default: 288 return 0xff; 289 } 290 } 291 292 static u32 scu_dif_bytes(u32 len, u32 sector_size) 293 { 294 return (len >> ilog2(sector_size)) * 8; 295 } 296 297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 298 { 299 struct scu_task_context *tc = ireq->tc; 300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 301 u8 blk_sz = scu_bg_blk_size(scmd->device); 302 303 tc->block_guard_enable = 1; 304 tc->blk_prot_en = 1; 305 tc->blk_sz = blk_sz; 306 /* DIF write insert */ 307 tc->blk_prot_func = 0x2; 308 309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 310 scmd->device->sector_size); 311 312 /* always init to 0, used by hw */ 313 tc->interm_crc_val = 0; 314 315 tc->init_crc_seed = 0; 316 tc->app_tag_verify = 0; 317 tc->app_tag_gen = 0; 318 tc->ref_tag_seed_verify = 0; 319 320 /* always init to same as bg_blk_sz */ 321 tc->UD_bytes_immed_val = scmd->device->sector_size; 322 323 tc->reserved_DC_0 = 0; 324 325 /* always init to 8 */ 326 tc->DIF_bytes_immed_val = 8; 327 328 tc->reserved_DC_1 = 0; 329 tc->bgc_blk_sz = scmd->device->sector_size; 330 tc->reserved_E0_0 = 0; 331 tc->app_tag_gen_mask = 0; 332 333 /** setup block guard control **/ 334 tc->bgctl = 0; 335 336 /* DIF write insert */ 337 tc->bgctl_f.op = 0x2; 338 339 tc->app_tag_verify_mask = 0; 340 341 /* must init to 0 for hw */ 342 tc->blk_guard_err = 0; 343 344 tc->reserved_E8_0 = 0; 345 346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 348 else if (type & SCSI_PROT_DIF_TYPE3) 349 tc->ref_tag_seed_gen = 0; 350 } 351 352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 353 { 354 struct scu_task_context *tc = ireq->tc; 355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 356 u8 blk_sz = scu_bg_blk_size(scmd->device); 357 358 tc->block_guard_enable = 1; 359 tc->blk_prot_en = 1; 360 tc->blk_sz = blk_sz; 361 /* DIF read strip */ 362 tc->blk_prot_func = 0x1; 363 364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 365 scmd->device->sector_size); 366 367 /* always init to 0, used by hw */ 368 tc->interm_crc_val = 0; 369 370 tc->init_crc_seed = 0; 371 tc->app_tag_verify = 0; 372 tc->app_tag_gen = 0; 373 374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 376 else if (type & SCSI_PROT_DIF_TYPE3) 377 tc->ref_tag_seed_verify = 0; 378 379 /* always init to same as bg_blk_sz */ 380 tc->UD_bytes_immed_val = scmd->device->sector_size; 381 382 tc->reserved_DC_0 = 0; 383 384 /* always init to 8 */ 385 tc->DIF_bytes_immed_val = 8; 386 387 tc->reserved_DC_1 = 0; 388 tc->bgc_blk_sz = scmd->device->sector_size; 389 tc->reserved_E0_0 = 0; 390 tc->app_tag_gen_mask = 0; 391 392 /** setup block guard control **/ 393 tc->bgctl = 0; 394 395 /* DIF read strip */ 396 tc->bgctl_f.crc_verify = 1; 397 tc->bgctl_f.op = 0x1; 398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 399 tc->bgctl_f.ref_tag_chk = 1; 400 tc->bgctl_f.app_f_detect = 1; 401 } else if (type & SCSI_PROT_DIF_TYPE3) 402 tc->bgctl_f.app_ref_f_detect = 1; 403 404 tc->app_tag_verify_mask = 0; 405 406 /* must init to 0 for hw */ 407 tc->blk_guard_err = 0; 408 409 tc->reserved_E8_0 = 0; 410 tc->ref_tag_seed_gen = 0; 411 } 412 413 /** 414 * This method is will fill in the SCU Task Context for a SSP IO request. 415 * @sci_req: 416 * 417 */ 418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 419 enum dma_data_direction dir, 420 u32 len) 421 { 422 struct scu_task_context *task_context = ireq->tc; 423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 424 struct scsi_cmnd *scmd = sas_task->uldd_task; 425 u8 prot_type = scsi_get_prot_type(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd); 427 428 scu_ssp_request_construct_task_context(ireq, task_context); 429 430 task_context->ssp_command_iu_length = 431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 432 task_context->type.ssp.frame_type = SSP_COMMAND; 433 434 switch (dir) { 435 case DMA_FROM_DEVICE: 436 case DMA_NONE: 437 default: 438 task_context->task_type = SCU_TASK_TYPE_IOREAD; 439 break; 440 case DMA_TO_DEVICE: 441 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 442 break; 443 } 444 445 task_context->transfer_length_bytes = len; 446 447 if (task_context->transfer_length_bytes > 0) 448 sci_request_build_sgl(ireq); 449 450 if (prot_type != SCSI_PROT_DIF_TYPE0) { 451 if (prot_op == SCSI_PROT_READ_STRIP) 452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 453 else if (prot_op == SCSI_PROT_WRITE_INSERT) 454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 455 } 456 } 457 458 /** 459 * This method will fill in the SCU Task Context for a SSP Task request. The 460 * following important settings are utilized: -# priority == 461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 462 * ahead of other task destined for the same Remote Node. -# task_type == 463 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 464 * (i.e. non-raw frame) is being utilized to perform task management. -# 465 * control_frame == 1. This ensures that the proper endianess is set so 466 * that the bytes are transmitted in the right order for a task frame. 467 * @sci_req: This parameter specifies the task request object being 468 * constructed. 469 * 470 */ 471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 472 { 473 struct scu_task_context *task_context = ireq->tc; 474 475 scu_ssp_request_construct_task_context(ireq, task_context); 476 477 task_context->control_frame = 1; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 479 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 480 task_context->transfer_length_bytes = 0; 481 task_context->type.ssp.frame_type = SSP_TASK; 482 task_context->ssp_command_iu_length = 483 sizeof(struct ssp_task_iu) / sizeof(u32); 484 } 485 486 /** 487 * This method is will fill in the SCU Task Context for any type of SATA 488 * request. This is called from the various SATA constructors. 489 * @sci_req: The general IO request object which is to be used in 490 * constructing the SCU task context. 491 * @task_context: The buffer pointer for the SCU task context which is being 492 * constructed. 493 * 494 * The general io request construction is complete. The buffer assignment for 495 * the command buffer is complete. none Revisit task context construction to 496 * determine what is common for SSP/SMP/STP task context structures. 497 */ 498 static void scu_sata_request_construct_task_context( 499 struct isci_request *ireq, 500 struct scu_task_context *task_context) 501 { 502 dma_addr_t dma_addr; 503 struct isci_remote_device *idev; 504 struct isci_port *iport; 505 506 idev = ireq->target_device; 507 iport = idev->owning_port; 508 509 /* Fill in the TC with the its required data */ 510 task_context->abort = 0; 511 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 512 task_context->initiator_request = 1; 513 task_context->connection_rate = idev->connection_rate; 514 task_context->protocol_engine_index = ISCI_PEG; 515 task_context->logical_port_index = iport->physical_port_index; 516 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 517 task_context->valid = SCU_TASK_CONTEXT_VALID; 518 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 519 520 task_context->remote_node_index = idev->rnc.remote_node_index; 521 task_context->command_code = 0; 522 523 task_context->link_layer_control = 0; 524 task_context->do_not_dma_ssp_good_response = 1; 525 task_context->strict_ordering = 0; 526 task_context->control_frame = 0; 527 task_context->timeout_enable = 0; 528 task_context->block_guard_enable = 0; 529 530 task_context->address_modifier = 0; 531 task_context->task_phase = 0x01; 532 533 task_context->ssp_command_iu_length = 534 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 535 536 /* Set the first word of the H2D REG FIS */ 537 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 538 539 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 540 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 541 (iport->physical_port_index << 542 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 543 ISCI_TAG_TCI(ireq->io_tag)); 544 /* 545 * Copy the physical address for the command buffer to the SCU Task 546 * Context. We must offset the command buffer by 4 bytes because the 547 * first 4 bytes are transfered in the body of the TC. 548 */ 549 dma_addr = sci_io_request_get_dma_addr(ireq, 550 ((char *) &ireq->stp.cmd) + 551 sizeof(u32)); 552 553 task_context->command_iu_upper = upper_32_bits(dma_addr); 554 task_context->command_iu_lower = lower_32_bits(dma_addr); 555 556 /* SATA Requests do not have a response buffer */ 557 task_context->response_iu_upper = 0; 558 task_context->response_iu_lower = 0; 559 } 560 561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 562 { 563 struct scu_task_context *task_context = ireq->tc; 564 565 scu_sata_request_construct_task_context(ireq, task_context); 566 567 task_context->control_frame = 0; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 569 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 570 task_context->type.stp.fis_type = FIS_REGH2D; 571 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 572 } 573 574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 575 bool copy_rx_frame) 576 { 577 struct isci_stp_request *stp_req = &ireq->stp.req; 578 579 scu_stp_raw_request_construct_task_context(ireq); 580 581 stp_req->status = 0; 582 stp_req->sgl.offset = 0; 583 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 584 585 if (copy_rx_frame) { 586 sci_request_build_sgl(ireq); 587 stp_req->sgl.index = 0; 588 } else { 589 /* The user does not want the data copied to the SGL buffer location */ 590 stp_req->sgl.index = -1; 591 } 592 593 return SCI_SUCCESS; 594 } 595 596 /** 597 * 598 * @sci_req: This parameter specifies the request to be constructed as an 599 * optimized request. 600 * @optimized_task_type: This parameter specifies whether the request is to be 601 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 602 * value of 1 indicates NCQ. 603 * 604 * This method will perform request construction common to all types of STP 605 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 606 * returns an indication as to whether the construction was successful. 607 */ 608 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 609 u8 optimized_task_type, 610 u32 len, 611 enum dma_data_direction dir) 612 { 613 struct scu_task_context *task_context = ireq->tc; 614 615 /* Build the STP task context structure */ 616 scu_sata_request_construct_task_context(ireq, task_context); 617 618 /* Copy over the SGL elements */ 619 sci_request_build_sgl(ireq); 620 621 /* Copy over the number of bytes to be transfered */ 622 task_context->transfer_length_bytes = len; 623 624 if (dir == DMA_TO_DEVICE) { 625 /* 626 * The difference between the DMA IN and DMA OUT request task type 627 * values are consistent with the difference between FPDMA READ 628 * and FPDMA WRITE values. Add the supplied task type parameter 629 * to this difference to set the task type properly for this 630 * DATA OUT (WRITE) case. */ 631 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 632 - SCU_TASK_TYPE_DMA_IN); 633 } else { 634 /* 635 * For the DATA IN (READ) case, simply save the supplied 636 * optimized task type. */ 637 task_context->task_type = optimized_task_type; 638 } 639 } 640 641 static void sci_atapi_construct(struct isci_request *ireq) 642 { 643 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 644 struct sas_task *task; 645 646 /* To simplify the implementation we take advantage of the 647 * silicon's partial acceleration of atapi protocol (dma data 648 * transfers), so we promote all commands to dma protocol. This 649 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 650 */ 651 h2d_fis->features |= ATAPI_PKT_DMA; 652 653 scu_stp_raw_request_construct_task_context(ireq); 654 655 task = isci_request_access_task(ireq); 656 if (task->data_dir == DMA_NONE) 657 task->total_xfer_len = 0; 658 659 /* clear the response so we can detect arrivial of an 660 * unsolicited h2d fis 661 */ 662 ireq->stp.rsp.fis_type = 0; 663 } 664 665 static enum sci_status 666 sci_io_request_construct_sata(struct isci_request *ireq, 667 u32 len, 668 enum dma_data_direction dir, 669 bool copy) 670 { 671 enum sci_status status = SCI_SUCCESS; 672 struct sas_task *task = isci_request_access_task(ireq); 673 struct domain_device *dev = ireq->target_device->domain_dev; 674 675 /* check for management protocols */ 676 if (test_bit(IREQ_TMF, &ireq->flags)) { 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 678 679 dev_err(&ireq->owning_controller->pdev->dev, 680 "%s: Request 0x%p received un-handled SAT " 681 "management protocol 0x%x.\n", 682 __func__, ireq, tmf->tmf_code); 683 684 return SCI_FAILURE; 685 } 686 687 if (!sas_protocol_ata(task->task_proto)) { 688 dev_err(&ireq->owning_controller->pdev->dev, 689 "%s: Non-ATA protocol in SATA path: 0x%x\n", 690 __func__, 691 task->task_proto); 692 return SCI_FAILURE; 693 694 } 695 696 /* ATAPI */ 697 if (dev->sata_dev.class == ATA_DEV_ATAPI && 698 task->ata_task.fis.command == ATA_CMD_PACKET) { 699 sci_atapi_construct(ireq); 700 return SCI_SUCCESS; 701 } 702 703 /* non data */ 704 if (task->data_dir == DMA_NONE) { 705 scu_stp_raw_request_construct_task_context(ireq); 706 return SCI_SUCCESS; 707 } 708 709 /* NCQ */ 710 if (task->ata_task.use_ncq) { 711 sci_stp_optimized_request_construct(ireq, 712 SCU_TASK_TYPE_FPDMAQ_READ, 713 len, dir); 714 return SCI_SUCCESS; 715 } 716 717 /* DMA */ 718 if (task->ata_task.dma_xfer) { 719 sci_stp_optimized_request_construct(ireq, 720 SCU_TASK_TYPE_DMA_IN, 721 len, dir); 722 return SCI_SUCCESS; 723 } else /* PIO */ 724 return sci_stp_pio_request_construct(ireq, copy); 725 726 return status; 727 } 728 729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 730 { 731 struct sas_task *task = isci_request_access_task(ireq); 732 733 ireq->protocol = SAS_PROTOCOL_SSP; 734 735 scu_ssp_io_request_construct_task_context(ireq, 736 task->data_dir, 737 task->total_xfer_len); 738 739 sci_io_request_build_ssp_command_iu(ireq); 740 741 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 742 743 return SCI_SUCCESS; 744 } 745 746 enum sci_status sci_task_request_construct_ssp( 747 struct isci_request *ireq) 748 { 749 /* Construct the SSP Task SCU Task Context */ 750 scu_ssp_task_request_construct_task_context(ireq); 751 752 /* Fill in the SSP Task IU */ 753 sci_task_request_build_ssp_task_iu(ireq); 754 755 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 756 757 return SCI_SUCCESS; 758 } 759 760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 761 { 762 enum sci_status status; 763 bool copy = false; 764 struct sas_task *task = isci_request_access_task(ireq); 765 766 ireq->protocol = SAS_PROTOCOL_STP; 767 768 copy = (task->data_dir == DMA_NONE) ? false : true; 769 770 status = sci_io_request_construct_sata(ireq, 771 task->total_xfer_len, 772 task->data_dir, 773 copy); 774 775 if (status == SCI_SUCCESS) 776 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 777 778 return status; 779 } 780 781 /** 782 * sci_req_tx_bytes - bytes transferred when reply underruns request 783 * @ireq: request that was terminated early 784 */ 785 #define SCU_TASK_CONTEXT_SRAM 0x200000 786 static u32 sci_req_tx_bytes(struct isci_request *ireq) 787 { 788 struct isci_host *ihost = ireq->owning_controller; 789 u32 ret_val = 0; 790 791 if (readl(&ihost->smu_registers->address_modifier) == 0) { 792 void __iomem *scu_reg_base = ihost->scu_registers; 793 794 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 795 * BAR1 is the scu_registers 796 * 0x20002C = 0x200000 + 0x2c 797 * = start of task context SRAM + offset of (type.ssp.data_offset) 798 * TCi is the io_tag of struct sci_request 799 */ 800 ret_val = readl(scu_reg_base + 801 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 802 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 803 } 804 805 return ret_val; 806 } 807 808 enum sci_status sci_request_start(struct isci_request *ireq) 809 { 810 enum sci_base_request_states state; 811 struct scu_task_context *tc = ireq->tc; 812 struct isci_host *ihost = ireq->owning_controller; 813 814 state = ireq->sm.current_state_id; 815 if (state != SCI_REQ_CONSTRUCTED) { 816 dev_warn(&ihost->pdev->dev, 817 "%s: SCIC IO Request requested to start while in wrong " 818 "state %d\n", __func__, state); 819 return SCI_FAILURE_INVALID_STATE; 820 } 821 822 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 823 824 switch (tc->protocol_type) { 825 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 826 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 827 /* SSP/SMP Frame */ 828 tc->type.ssp.tag = ireq->io_tag; 829 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 830 break; 831 832 case SCU_TASK_CONTEXT_PROTOCOL_STP: 833 /* STP/SATA Frame 834 * tc->type.stp.ncq_tag = ireq->ncq_tag; 835 */ 836 break; 837 838 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 839 /* / @todo When do we set no protocol type? */ 840 break; 841 842 default: 843 /* This should never happen since we build the IO 844 * requests */ 845 break; 846 } 847 848 /* Add to the post_context the io tag value */ 849 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 850 851 /* Everything is good go ahead and change state */ 852 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 853 854 return SCI_SUCCESS; 855 } 856 857 enum sci_status 858 sci_io_request_terminate(struct isci_request *ireq) 859 { 860 enum sci_base_request_states state; 861 862 state = ireq->sm.current_state_id; 863 864 switch (state) { 865 case SCI_REQ_CONSTRUCTED: 866 /* Set to make sure no HW terminate posting is done: */ 867 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); 868 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 869 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 870 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 871 return SCI_SUCCESS; 872 case SCI_REQ_STARTED: 873 case SCI_REQ_TASK_WAIT_TC_COMP: 874 case SCI_REQ_SMP_WAIT_RESP: 875 case SCI_REQ_SMP_WAIT_TC_COMP: 876 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 877 case SCI_REQ_STP_UDMA_WAIT_D2H: 878 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 879 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 880 case SCI_REQ_STP_PIO_WAIT_H2D: 881 case SCI_REQ_STP_PIO_WAIT_FRAME: 882 case SCI_REQ_STP_PIO_DATA_IN: 883 case SCI_REQ_STP_PIO_DATA_OUT: 884 case SCI_REQ_ATAPI_WAIT_H2D: 885 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 886 case SCI_REQ_ATAPI_WAIT_D2H: 887 case SCI_REQ_ATAPI_WAIT_TC_COMP: 888 /* Fall through and change state to ABORTING... */ 889 case SCI_REQ_TASK_WAIT_TC_RESP: 890 /* The task frame was already confirmed to have been 891 * sent by the SCU HW. Since the state machine is 892 * now only waiting for the task response itself, 893 * abort the request and complete it immediately 894 * and don't wait for the task response. 895 */ 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 897 /* Fall through - and handle like ABORTING... */ 898 case SCI_REQ_ABORTING: 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags); 901 else 902 clear_bit(IREQ_PENDING_ABORT, &ireq->flags); 903 /* If the request is only waiting on the remote device 904 * suspension, return SUCCESS so the caller will wait too. 905 */ 906 return SCI_SUCCESS; 907 case SCI_REQ_COMPLETED: 908 default: 909 dev_warn(&ireq->owning_controller->pdev->dev, 910 "%s: SCIC IO Request requested to abort while in wrong " 911 "state %d\n", __func__, ireq->sm.current_state_id); 912 break; 913 } 914 915 return SCI_FAILURE_INVALID_STATE; 916 } 917 918 enum sci_status sci_request_complete(struct isci_request *ireq) 919 { 920 enum sci_base_request_states state; 921 struct isci_host *ihost = ireq->owning_controller; 922 923 state = ireq->sm.current_state_id; 924 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 925 "isci: request completion from wrong state (%s)\n", 926 req_state_name(state))) 927 return SCI_FAILURE_INVALID_STATE; 928 929 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 930 sci_controller_release_frame(ihost, 931 ireq->saved_rx_frame_index); 932 933 /* XXX can we just stop the machine and remove the 'final' state? */ 934 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 935 return SCI_SUCCESS; 936 } 937 938 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 939 u32 event_code) 940 { 941 enum sci_base_request_states state; 942 struct isci_host *ihost = ireq->owning_controller; 943 944 state = ireq->sm.current_state_id; 945 946 if (state != SCI_REQ_STP_PIO_DATA_IN) { 947 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", 948 __func__, event_code, req_state_name(state)); 949 950 return SCI_FAILURE_INVALID_STATE; 951 } 952 953 switch (scu_get_event_specifier(event_code)) { 954 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 955 /* We are waiting for data and the SCU has R_ERR the data frame. 956 * Go back to waiting for the D2H Register FIS 957 */ 958 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 959 return SCI_SUCCESS; 960 default: 961 dev_err(&ihost->pdev->dev, 962 "%s: pio request unexpected event %#x\n", 963 __func__, event_code); 964 965 /* TODO Should we fail the PIO request when we get an 966 * unexpected event? 967 */ 968 return SCI_FAILURE; 969 } 970 } 971 972 /* 973 * This function copies response data for requests returning response data 974 * instead of sense data. 975 * @sci_req: This parameter specifies the request object for which to copy 976 * the response data. 977 */ 978 static void sci_io_request_copy_response(struct isci_request *ireq) 979 { 980 void *resp_buf; 981 u32 len; 982 struct ssp_response_iu *ssp_response; 983 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 984 985 ssp_response = &ireq->ssp.rsp; 986 987 resp_buf = &isci_tmf->resp.resp_iu; 988 989 len = min_t(u32, 990 SSP_RESP_IU_MAX_SIZE, 991 be32_to_cpu(ssp_response->response_data_len)); 992 993 memcpy(resp_buf, ssp_response->resp_data, len); 994 } 995 996 static enum sci_status 997 request_started_state_tc_event(struct isci_request *ireq, 998 u32 completion_code) 999 { 1000 struct ssp_response_iu *resp_iu; 1001 u8 datapres; 1002 1003 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 1004 * to determine SDMA status 1005 */ 1006 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1007 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1008 ireq->scu_status = SCU_TASK_DONE_GOOD; 1009 ireq->sci_status = SCI_SUCCESS; 1010 break; 1011 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 1012 /* There are times when the SCU hardware will return an early 1013 * response because the io request specified more data than is 1014 * returned by the target device (mode pages, inquiry data, 1015 * etc.). We must check the response stats to see if this is 1016 * truly a failed request or a good request that just got 1017 * completed early. 1018 */ 1019 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1020 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1021 1022 sci_swab32_cpy(&ireq->ssp.rsp, 1023 &ireq->ssp.rsp, 1024 word_cnt); 1025 1026 if (resp->status == 0) { 1027 ireq->scu_status = SCU_TASK_DONE_GOOD; 1028 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1029 } else { 1030 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1031 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1032 } 1033 break; 1034 } 1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1036 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1037 1038 sci_swab32_cpy(&ireq->ssp.rsp, 1039 &ireq->ssp.rsp, 1040 word_cnt); 1041 1042 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1043 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1044 break; 1045 } 1046 1047 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1048 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1049 * guaranteed to be received before this completion status is 1050 * posted? 1051 */ 1052 resp_iu = &ireq->ssp.rsp; 1053 datapres = resp_iu->datapres; 1054 1055 if (datapres == 1 || datapres == 2) { 1056 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1057 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1058 } else { 1059 ireq->scu_status = SCU_TASK_DONE_GOOD; 1060 ireq->sci_status = SCI_SUCCESS; 1061 } 1062 break; 1063 /* only stp device gets suspended. */ 1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1075 if (ireq->protocol == SAS_PROTOCOL_STP) { 1076 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1077 SCU_COMPLETION_TL_STATUS_SHIFT; 1078 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1079 } else { 1080 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1081 SCU_COMPLETION_TL_STATUS_SHIFT; 1082 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1083 } 1084 break; 1085 1086 /* both stp/ssp device gets suspended */ 1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1097 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1098 SCU_COMPLETION_TL_STATUS_SHIFT; 1099 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1100 break; 1101 1102 /* neither ssp nor stp gets suspended. */ 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1116 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1117 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1118 default: 1119 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1120 SCU_COMPLETION_TL_STATUS_SHIFT; 1121 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1122 break; 1123 } 1124 1125 /* 1126 * TODO: This is probably wrong for ACK/NAK timeout conditions 1127 */ 1128 1129 /* In all cases we will treat this as the completion of the IO req. */ 1130 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1131 return SCI_SUCCESS; 1132 } 1133 1134 static enum sci_status 1135 request_aborting_state_tc_event(struct isci_request *ireq, 1136 u32 completion_code) 1137 { 1138 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1139 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1140 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1141 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1142 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1143 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1144 break; 1145 1146 default: 1147 /* Unless we get some strange error wait for the task abort to complete 1148 * TODO: Should there be a state change for this completion? 1149 */ 1150 break; 1151 } 1152 1153 return SCI_SUCCESS; 1154 } 1155 1156 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1157 u32 completion_code) 1158 { 1159 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1161 ireq->scu_status = SCU_TASK_DONE_GOOD; 1162 ireq->sci_status = SCI_SUCCESS; 1163 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1164 break; 1165 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1166 /* Currently, the decision is to simply allow the task request 1167 * to timeout if the task IU wasn't received successfully. 1168 * There is a potential for receiving multiple task responses if 1169 * we decide to send the task IU again. 1170 */ 1171 dev_warn(&ireq->owning_controller->pdev->dev, 1172 "%s: TaskRequest:0x%p CompletionCode:%x - " 1173 "ACK/NAK timeout\n", __func__, ireq, 1174 completion_code); 1175 1176 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1177 break; 1178 default: 1179 /* 1180 * All other completion status cause the IO to be complete. 1181 * If a NAK was received, then it is up to the user to retry 1182 * the request. 1183 */ 1184 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1185 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1186 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1187 break; 1188 } 1189 1190 return SCI_SUCCESS; 1191 } 1192 1193 static enum sci_status 1194 smp_request_await_response_tc_event(struct isci_request *ireq, 1195 u32 completion_code) 1196 { 1197 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1198 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1199 /* In the AWAIT RESPONSE state, any TC completion is 1200 * unexpected. but if the TC has success status, we 1201 * complete the IO anyway. 1202 */ 1203 ireq->scu_status = SCU_TASK_DONE_GOOD; 1204 ireq->sci_status = SCI_SUCCESS; 1205 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1206 break; 1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1209 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1210 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1211 /* These status has been seen in a specific LSI 1212 * expander, which sometimes is not able to send smp 1213 * response within 2 ms. This causes our hardware break 1214 * the connection and set TC completion with one of 1215 * these SMP_XXX_XX_ERR status. For these type of error, 1216 * we ask ihost user to retry the request. 1217 */ 1218 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1219 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1220 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1221 break; 1222 default: 1223 /* All other completion status cause the IO to be complete. If a NAK 1224 * was received, then it is up to the user to retry the request 1225 */ 1226 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1227 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1228 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1229 break; 1230 } 1231 1232 return SCI_SUCCESS; 1233 } 1234 1235 static enum sci_status 1236 smp_request_await_tc_event(struct isci_request *ireq, 1237 u32 completion_code) 1238 { 1239 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1240 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1241 ireq->scu_status = SCU_TASK_DONE_GOOD; 1242 ireq->sci_status = SCI_SUCCESS; 1243 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1244 break; 1245 default: 1246 /* All other completion status cause the IO to be 1247 * complete. If a NAK was received, then it is up to 1248 * the user to retry the request. 1249 */ 1250 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1251 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1252 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1253 break; 1254 } 1255 1256 return SCI_SUCCESS; 1257 } 1258 1259 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1260 { 1261 struct scu_sgl_element *sgl; 1262 struct scu_sgl_element_pair *sgl_pair; 1263 struct isci_request *ireq = to_ireq(stp_req); 1264 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1265 1266 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1267 if (!sgl_pair) 1268 sgl = NULL; 1269 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1270 if (sgl_pair->B.address_lower == 0 && 1271 sgl_pair->B.address_upper == 0) { 1272 sgl = NULL; 1273 } else { 1274 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1275 sgl = &sgl_pair->B; 1276 } 1277 } else { 1278 if (sgl_pair->next_pair_lower == 0 && 1279 sgl_pair->next_pair_upper == 0) { 1280 sgl = NULL; 1281 } else { 1282 pio_sgl->index++; 1283 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1284 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1285 sgl = &sgl_pair->A; 1286 } 1287 } 1288 1289 return sgl; 1290 } 1291 1292 static enum sci_status 1293 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1294 u32 completion_code) 1295 { 1296 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1297 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1298 ireq->scu_status = SCU_TASK_DONE_GOOD; 1299 ireq->sci_status = SCI_SUCCESS; 1300 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1301 break; 1302 1303 default: 1304 /* All other completion status cause the IO to be 1305 * complete. If a NAK was received, then it is up to 1306 * the user to retry the request. 1307 */ 1308 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1309 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1310 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1311 break; 1312 } 1313 1314 return SCI_SUCCESS; 1315 } 1316 1317 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1318 1319 /* transmit DATA_FIS from (current sgl + offset) for input 1320 * parameter length. current sgl and offset is alreay stored in the IO request 1321 */ 1322 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1323 struct isci_request *ireq, 1324 u32 length) 1325 { 1326 struct isci_stp_request *stp_req = &ireq->stp.req; 1327 struct scu_task_context *task_context = ireq->tc; 1328 struct scu_sgl_element_pair *sgl_pair; 1329 struct scu_sgl_element *current_sgl; 1330 1331 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1332 * for the data from current_sgl+offset for the input length 1333 */ 1334 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1335 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1336 current_sgl = &sgl_pair->A; 1337 else 1338 current_sgl = &sgl_pair->B; 1339 1340 /* update the TC */ 1341 task_context->command_iu_upper = current_sgl->address_upper; 1342 task_context->command_iu_lower = current_sgl->address_lower; 1343 task_context->transfer_length_bytes = length; 1344 task_context->type.stp.fis_type = FIS_DATA; 1345 1346 /* send the new TC out. */ 1347 return sci_controller_continue_io(ireq); 1348 } 1349 1350 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1351 { 1352 struct isci_stp_request *stp_req = &ireq->stp.req; 1353 struct scu_sgl_element_pair *sgl_pair; 1354 enum sci_status status = SCI_SUCCESS; 1355 struct scu_sgl_element *sgl; 1356 u32 offset; 1357 u32 len = 0; 1358 1359 offset = stp_req->sgl.offset; 1360 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1361 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1362 return SCI_FAILURE; 1363 1364 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1365 sgl = &sgl_pair->A; 1366 len = sgl_pair->A.length - offset; 1367 } else { 1368 sgl = &sgl_pair->B; 1369 len = sgl_pair->B.length - offset; 1370 } 1371 1372 if (stp_req->pio_len == 0) 1373 return SCI_SUCCESS; 1374 1375 if (stp_req->pio_len >= len) { 1376 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1377 if (status != SCI_SUCCESS) 1378 return status; 1379 stp_req->pio_len -= len; 1380 1381 /* update the current sgl, offset and save for future */ 1382 sgl = pio_sgl_next(stp_req); 1383 offset = 0; 1384 } else if (stp_req->pio_len < len) { 1385 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1386 1387 /* Sgl offset will be adjusted and saved for future */ 1388 offset += stp_req->pio_len; 1389 sgl->address_lower += stp_req->pio_len; 1390 stp_req->pio_len = 0; 1391 } 1392 1393 stp_req->sgl.offset = offset; 1394 1395 return status; 1396 } 1397 1398 /** 1399 * 1400 * @stp_request: The request that is used for the SGL processing. 1401 * @data_buffer: The buffer of data to be copied. 1402 * @length: The length of the data transfer. 1403 * 1404 * Copy the data from the buffer for the length specified to the IO request SGL 1405 * specified data region. enum sci_status 1406 */ 1407 static enum sci_status 1408 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1409 u8 *data_buf, u32 len) 1410 { 1411 struct isci_request *ireq; 1412 u8 *src_addr; 1413 int copy_len; 1414 struct sas_task *task; 1415 struct scatterlist *sg; 1416 void *kaddr; 1417 int total_len = len; 1418 1419 ireq = to_ireq(stp_req); 1420 task = isci_request_access_task(ireq); 1421 src_addr = data_buf; 1422 1423 if (task->num_scatter > 0) { 1424 sg = task->scatter; 1425 1426 while (total_len > 0) { 1427 struct page *page = sg_page(sg); 1428 1429 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1430 kaddr = kmap_atomic(page); 1431 memcpy(kaddr + sg->offset, src_addr, copy_len); 1432 kunmap_atomic(kaddr); 1433 total_len -= copy_len; 1434 src_addr += copy_len; 1435 sg = sg_next(sg); 1436 } 1437 } else { 1438 BUG_ON(task->total_xfer_len < total_len); 1439 memcpy(task->scatter, src_addr, total_len); 1440 } 1441 1442 return SCI_SUCCESS; 1443 } 1444 1445 /** 1446 * 1447 * @sci_req: The PIO DATA IN request that is to receive the data. 1448 * @data_buffer: The buffer to copy from. 1449 * 1450 * Copy the data buffer to the io request data region. enum sci_status 1451 */ 1452 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1453 struct isci_stp_request *stp_req, 1454 u8 *data_buffer) 1455 { 1456 enum sci_status status; 1457 1458 /* 1459 * If there is less than 1K remaining in the transfer request 1460 * copy just the data for the transfer */ 1461 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1462 status = sci_stp_request_pio_data_in_copy_data_buffer( 1463 stp_req, data_buffer, stp_req->pio_len); 1464 1465 if (status == SCI_SUCCESS) 1466 stp_req->pio_len = 0; 1467 } else { 1468 /* We are transfering the whole frame so copy */ 1469 status = sci_stp_request_pio_data_in_copy_data_buffer( 1470 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1471 1472 if (status == SCI_SUCCESS) 1473 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1474 } 1475 1476 return status; 1477 } 1478 1479 static enum sci_status 1480 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1481 u32 completion_code) 1482 { 1483 enum sci_status status = SCI_SUCCESS; 1484 1485 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1486 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1487 ireq->scu_status = SCU_TASK_DONE_GOOD; 1488 ireq->sci_status = SCI_SUCCESS; 1489 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1490 break; 1491 1492 default: 1493 /* All other completion status cause the IO to be 1494 * complete. If a NAK was received, then it is up to 1495 * the user to retry the request. 1496 */ 1497 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1498 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1499 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1500 break; 1501 } 1502 1503 return status; 1504 } 1505 1506 static enum sci_status 1507 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1508 u32 completion_code) 1509 { 1510 enum sci_status status = SCI_SUCCESS; 1511 bool all_frames_transferred = false; 1512 struct isci_stp_request *stp_req = &ireq->stp.req; 1513 1514 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1515 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1516 /* Transmit data */ 1517 if (stp_req->pio_len != 0) { 1518 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1519 if (status == SCI_SUCCESS) { 1520 if (stp_req->pio_len == 0) 1521 all_frames_transferred = true; 1522 } 1523 } else if (stp_req->pio_len == 0) { 1524 /* 1525 * this will happen if the all data is written at the 1526 * first time after the pio setup fis is received 1527 */ 1528 all_frames_transferred = true; 1529 } 1530 1531 /* all data transferred. */ 1532 if (all_frames_transferred) { 1533 /* 1534 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1535 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1536 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1537 } 1538 break; 1539 1540 default: 1541 /* 1542 * All other completion status cause the IO to be complete. 1543 * If a NAK was received, then it is up to the user to retry 1544 * the request. 1545 */ 1546 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1547 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1548 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1549 break; 1550 } 1551 1552 return status; 1553 } 1554 1555 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1556 u32 frame_index) 1557 { 1558 struct isci_host *ihost = ireq->owning_controller; 1559 struct dev_to_host_fis *frame_header; 1560 enum sci_status status; 1561 u32 *frame_buffer; 1562 1563 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1564 frame_index, 1565 (void **)&frame_header); 1566 1567 if ((status == SCI_SUCCESS) && 1568 (frame_header->fis_type == FIS_REGD2H)) { 1569 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1570 frame_index, 1571 (void **)&frame_buffer); 1572 1573 sci_controller_copy_sata_response(&ireq->stp.rsp, 1574 frame_header, 1575 frame_buffer); 1576 } 1577 1578 sci_controller_release_frame(ihost, frame_index); 1579 1580 return status; 1581 } 1582 1583 static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1584 u32 frame_index) 1585 { 1586 struct isci_host *ihost = ireq->owning_controller; 1587 enum sci_status status; 1588 struct dev_to_host_fis *frame_header; 1589 u32 *frame_buffer; 1590 1591 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1592 frame_index, 1593 (void **)&frame_header); 1594 1595 if (status != SCI_SUCCESS) 1596 return status; 1597 1598 if (frame_header->fis_type != FIS_REGD2H) { 1599 dev_err(&ireq->isci_host->pdev->dev, 1600 "%s ERROR: invalid fis type 0x%X\n", 1601 __func__, frame_header->fis_type); 1602 return SCI_FAILURE; 1603 } 1604 1605 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1606 frame_index, 1607 (void **)&frame_buffer); 1608 1609 sci_controller_copy_sata_response(&ireq->stp.rsp, 1610 (u32 *)frame_header, 1611 frame_buffer); 1612 1613 /* Frame has been decoded return it to the controller */ 1614 sci_controller_release_frame(ihost, frame_index); 1615 1616 return status; 1617 } 1618 1619 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1620 u32 frame_index) 1621 { 1622 struct sas_task *task = isci_request_access_task(ireq); 1623 enum sci_status status; 1624 1625 status = process_unsolicited_fis(ireq, frame_index); 1626 1627 if (status == SCI_SUCCESS) { 1628 if (ireq->stp.rsp.status & ATA_ERR) 1629 status = SCI_FAILURE_IO_RESPONSE_VALID; 1630 } else { 1631 status = SCI_FAILURE_IO_RESPONSE_VALID; 1632 } 1633 1634 if (status != SCI_SUCCESS) { 1635 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1636 ireq->sci_status = status; 1637 } else { 1638 ireq->scu_status = SCU_TASK_DONE_GOOD; 1639 ireq->sci_status = SCI_SUCCESS; 1640 } 1641 1642 /* the d2h ufi is the end of non-data commands */ 1643 if (task->data_dir == DMA_NONE) 1644 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1645 1646 return status; 1647 } 1648 1649 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1650 { 1651 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1652 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1653 struct scu_task_context *task_context = ireq->tc; 1654 1655 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1656 * type. The TC for previous Packet fis was already there, we only need to 1657 * change the H2D fis content. 1658 */ 1659 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1660 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1661 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1662 task_context->type.stp.fis_type = FIS_DATA; 1663 task_context->transfer_length_bytes = dev->cdb_len; 1664 } 1665 1666 static void scu_atapi_construct_task_context(struct isci_request *ireq) 1667 { 1668 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1669 struct sas_task *task = isci_request_access_task(ireq); 1670 struct scu_task_context *task_context = ireq->tc; 1671 int cdb_len = dev->cdb_len; 1672 1673 /* reference: SSTL 1.13.4.2 1674 * task_type, sata_direction 1675 */ 1676 if (task->data_dir == DMA_TO_DEVICE) { 1677 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1678 task_context->sata_direction = 0; 1679 } else { 1680 /* todo: for NO_DATA command, we need to send out raw frame. */ 1681 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1682 task_context->sata_direction = 1; 1683 } 1684 1685 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1686 task_context->type.stp.fis_type = FIS_DATA; 1687 1688 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1689 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1690 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1691 1692 /* task phase is set to TX_CMD */ 1693 task_context->task_phase = 0x1; 1694 1695 /* retry counter */ 1696 task_context->stp_retry_count = 0; 1697 1698 /* data transfer size. */ 1699 task_context->transfer_length_bytes = task->total_xfer_len; 1700 1701 /* setup sgl */ 1702 sci_request_build_sgl(ireq); 1703 } 1704 1705 enum sci_status 1706 sci_io_request_frame_handler(struct isci_request *ireq, 1707 u32 frame_index) 1708 { 1709 struct isci_host *ihost = ireq->owning_controller; 1710 struct isci_stp_request *stp_req = &ireq->stp.req; 1711 enum sci_base_request_states state; 1712 enum sci_status status; 1713 ssize_t word_cnt; 1714 1715 state = ireq->sm.current_state_id; 1716 switch (state) { 1717 case SCI_REQ_STARTED: { 1718 struct ssp_frame_hdr ssp_hdr; 1719 void *frame_header; 1720 1721 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1722 frame_index, 1723 &frame_header); 1724 1725 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1726 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1727 1728 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1729 struct ssp_response_iu *resp_iu; 1730 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1731 1732 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1733 frame_index, 1734 (void **)&resp_iu); 1735 1736 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1737 1738 resp_iu = &ireq->ssp.rsp; 1739 1740 if (resp_iu->datapres == 0x01 || 1741 resp_iu->datapres == 0x02) { 1742 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1743 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1744 } else { 1745 ireq->scu_status = SCU_TASK_DONE_GOOD; 1746 ireq->sci_status = SCI_SUCCESS; 1747 } 1748 } else { 1749 /* not a response frame, why did it get forwarded? */ 1750 dev_err(&ihost->pdev->dev, 1751 "%s: SCIC IO Request 0x%p received unexpected " 1752 "frame %d type 0x%02x\n", __func__, ireq, 1753 frame_index, ssp_hdr.frame_type); 1754 } 1755 1756 /* 1757 * In any case we are done with this frame buffer return it to 1758 * the controller 1759 */ 1760 sci_controller_release_frame(ihost, frame_index); 1761 1762 return SCI_SUCCESS; 1763 } 1764 1765 case SCI_REQ_TASK_WAIT_TC_RESP: 1766 sci_io_request_copy_response(ireq); 1767 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1768 sci_controller_release_frame(ihost, frame_index); 1769 return SCI_SUCCESS; 1770 1771 case SCI_REQ_SMP_WAIT_RESP: { 1772 struct sas_task *task = isci_request_access_task(ireq); 1773 struct scatterlist *sg = &task->smp_task.smp_resp; 1774 void *frame_header, *kaddr; 1775 u8 *rsp; 1776 1777 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1778 frame_index, 1779 &frame_header); 1780 kaddr = kmap_atomic(sg_page(sg)); 1781 rsp = kaddr + sg->offset; 1782 sci_swab32_cpy(rsp, frame_header, 1); 1783 1784 if (rsp[0] == SMP_RESPONSE) { 1785 void *smp_resp; 1786 1787 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1788 frame_index, 1789 &smp_resp); 1790 1791 word_cnt = (sg->length/4)-1; 1792 if (word_cnt > 0) 1793 word_cnt = min_t(unsigned int, word_cnt, 1794 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1795 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1796 1797 ireq->scu_status = SCU_TASK_DONE_GOOD; 1798 ireq->sci_status = SCI_SUCCESS; 1799 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1800 } else { 1801 /* 1802 * This was not a response frame why did it get 1803 * forwarded? 1804 */ 1805 dev_err(&ihost->pdev->dev, 1806 "%s: SCIC SMP Request 0x%p received unexpected " 1807 "frame %d type 0x%02x\n", 1808 __func__, 1809 ireq, 1810 frame_index, 1811 rsp[0]); 1812 1813 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1814 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1815 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1816 } 1817 kunmap_atomic(kaddr); 1818 1819 sci_controller_release_frame(ihost, frame_index); 1820 1821 return SCI_SUCCESS; 1822 } 1823 1824 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1825 return sci_stp_request_udma_general_frame_handler(ireq, 1826 frame_index); 1827 1828 case SCI_REQ_STP_UDMA_WAIT_D2H: 1829 /* Use the general frame handler to copy the resposne data */ 1830 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1831 1832 if (status != SCI_SUCCESS) 1833 return status; 1834 1835 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1836 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1837 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1838 return SCI_SUCCESS; 1839 1840 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1841 struct dev_to_host_fis *frame_header; 1842 u32 *frame_buffer; 1843 1844 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1845 frame_index, 1846 (void **)&frame_header); 1847 1848 if (status != SCI_SUCCESS) { 1849 dev_err(&ihost->pdev->dev, 1850 "%s: SCIC IO Request 0x%p could not get frame " 1851 "header for frame index %d, status %x\n", 1852 __func__, 1853 stp_req, 1854 frame_index, 1855 status); 1856 1857 return status; 1858 } 1859 1860 switch (frame_header->fis_type) { 1861 case FIS_REGD2H: 1862 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1863 frame_index, 1864 (void **)&frame_buffer); 1865 1866 sci_controller_copy_sata_response(&ireq->stp.rsp, 1867 frame_header, 1868 frame_buffer); 1869 1870 /* The command has completed with error */ 1871 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1872 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1873 break; 1874 1875 default: 1876 dev_warn(&ihost->pdev->dev, 1877 "%s: IO Request:0x%p Frame Id:%d protocol " 1878 "violation occurred\n", __func__, stp_req, 1879 frame_index); 1880 1881 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1882 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1883 break; 1884 } 1885 1886 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1887 1888 /* Frame has been decoded return it to the controller */ 1889 sci_controller_release_frame(ihost, frame_index); 1890 1891 return status; 1892 } 1893 1894 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1895 struct sas_task *task = isci_request_access_task(ireq); 1896 struct dev_to_host_fis *frame_header; 1897 u32 *frame_buffer; 1898 1899 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1900 frame_index, 1901 (void **)&frame_header); 1902 1903 if (status != SCI_SUCCESS) { 1904 dev_err(&ihost->pdev->dev, 1905 "%s: SCIC IO Request 0x%p could not get frame " 1906 "header for frame index %d, status %x\n", 1907 __func__, stp_req, frame_index, status); 1908 return status; 1909 } 1910 1911 switch (frame_header->fis_type) { 1912 case FIS_PIO_SETUP: 1913 /* Get from the frame buffer the PIO Setup Data */ 1914 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1915 frame_index, 1916 (void **)&frame_buffer); 1917 1918 /* Get the data from the PIO Setup The SCU Hardware 1919 * returns first word in the frame_header and the rest 1920 * of the data is in the frame buffer so we need to 1921 * back up one dword 1922 */ 1923 1924 /* transfer_count: first 16bits in the 4th dword */ 1925 stp_req->pio_len = frame_buffer[3] & 0xffff; 1926 1927 /* status: 4th byte in the 3rd dword */ 1928 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1929 1930 sci_controller_copy_sata_response(&ireq->stp.rsp, 1931 frame_header, 1932 frame_buffer); 1933 1934 ireq->stp.rsp.status = stp_req->status; 1935 1936 /* The next state is dependent on whether the 1937 * request was PIO Data-in or Data out 1938 */ 1939 if (task->data_dir == DMA_FROM_DEVICE) { 1940 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1941 } else if (task->data_dir == DMA_TO_DEVICE) { 1942 /* Transmit data */ 1943 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1944 if (status != SCI_SUCCESS) 1945 break; 1946 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1947 } 1948 break; 1949 1950 case FIS_SETDEVBITS: 1951 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1952 break; 1953 1954 case FIS_REGD2H: 1955 if (frame_header->status & ATA_BUSY) { 1956 /* 1957 * Now why is the drive sending a D2H Register 1958 * FIS when it is still busy? Do nothing since 1959 * we are still in the right state. 1960 */ 1961 dev_dbg(&ihost->pdev->dev, 1962 "%s: SCIC PIO Request 0x%p received " 1963 "D2H Register FIS with BSY status " 1964 "0x%x\n", 1965 __func__, 1966 stp_req, 1967 frame_header->status); 1968 break; 1969 } 1970 1971 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1972 frame_index, 1973 (void **)&frame_buffer); 1974 1975 sci_controller_copy_sata_response(&ireq->stp.rsp, 1976 frame_header, 1977 frame_buffer); 1978 1979 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1980 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1981 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1982 break; 1983 1984 default: 1985 /* FIXME: what do we do here? */ 1986 break; 1987 } 1988 1989 /* Frame is decoded return it to the controller */ 1990 sci_controller_release_frame(ihost, frame_index); 1991 1992 return status; 1993 } 1994 1995 case SCI_REQ_STP_PIO_DATA_IN: { 1996 struct dev_to_host_fis *frame_header; 1997 struct sata_fis_data *frame_buffer; 1998 1999 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 2000 frame_index, 2001 (void **)&frame_header); 2002 2003 if (status != SCI_SUCCESS) { 2004 dev_err(&ihost->pdev->dev, 2005 "%s: SCIC IO Request 0x%p could not get frame " 2006 "header for frame index %d, status %x\n", 2007 __func__, 2008 stp_req, 2009 frame_index, 2010 status); 2011 return status; 2012 } 2013 2014 if (frame_header->fis_type != FIS_DATA) { 2015 dev_err(&ihost->pdev->dev, 2016 "%s: SCIC PIO Request 0x%p received frame %d " 2017 "with fis type 0x%02x when expecting a data " 2018 "fis.\n", 2019 __func__, 2020 stp_req, 2021 frame_index, 2022 frame_header->fis_type); 2023 2024 ireq->scu_status = SCU_TASK_DONE_GOOD; 2025 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2026 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2027 2028 /* Frame is decoded return it to the controller */ 2029 sci_controller_release_frame(ihost, frame_index); 2030 return status; 2031 } 2032 2033 if (stp_req->sgl.index < 0) { 2034 ireq->saved_rx_frame_index = frame_index; 2035 stp_req->pio_len = 0; 2036 } else { 2037 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2038 frame_index, 2039 (void **)&frame_buffer); 2040 2041 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2042 (u8 *)frame_buffer); 2043 2044 /* Frame is decoded return it to the controller */ 2045 sci_controller_release_frame(ihost, frame_index); 2046 } 2047 2048 /* Check for the end of the transfer, are there more 2049 * bytes remaining for this data transfer 2050 */ 2051 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2052 return status; 2053 2054 if ((stp_req->status & ATA_BUSY) == 0) { 2055 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2056 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2057 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2058 } else { 2059 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2060 } 2061 return status; 2062 } 2063 2064 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2065 struct sas_task *task = isci_request_access_task(ireq); 2066 2067 sci_controller_release_frame(ihost, frame_index); 2068 ireq->target_device->working_request = ireq; 2069 if (task->data_dir == DMA_NONE) { 2070 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2071 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2072 } else { 2073 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2074 scu_atapi_construct_task_context(ireq); 2075 } 2076 2077 sci_controller_continue_io(ireq); 2078 return SCI_SUCCESS; 2079 } 2080 case SCI_REQ_ATAPI_WAIT_D2H: 2081 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2082 case SCI_REQ_ABORTING: 2083 /* 2084 * TODO: Is it even possible to get an unsolicited frame in the 2085 * aborting state? 2086 */ 2087 sci_controller_release_frame(ihost, frame_index); 2088 return SCI_SUCCESS; 2089 2090 default: 2091 dev_warn(&ihost->pdev->dev, 2092 "%s: SCIC IO Request given unexpected frame %x while " 2093 "in state %d\n", 2094 __func__, 2095 frame_index, 2096 state); 2097 2098 sci_controller_release_frame(ihost, frame_index); 2099 return SCI_FAILURE_INVALID_STATE; 2100 } 2101 } 2102 2103 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2104 u32 completion_code) 2105 { 2106 enum sci_status status = SCI_SUCCESS; 2107 2108 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2110 ireq->scu_status = SCU_TASK_DONE_GOOD; 2111 ireq->sci_status = SCI_SUCCESS; 2112 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2113 break; 2114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2116 /* We must check ther response buffer to see if the D2H 2117 * Register FIS was received before we got the TC 2118 * completion. 2119 */ 2120 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2121 sci_remote_device_suspend(ireq->target_device, 2122 SCI_SW_SUSPEND_NORMAL); 2123 2124 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2125 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2126 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2127 } else { 2128 /* If we have an error completion status for the 2129 * TC then we can expect a D2H register FIS from 2130 * the device so we must change state to wait 2131 * for it 2132 */ 2133 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2134 } 2135 break; 2136 2137 /* TODO Check to see if any of these completion status need to 2138 * wait for the device to host register fis. 2139 */ 2140 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2141 * - this comes only for B0 2142 */ 2143 default: 2144 /* All other completion status cause the IO to be complete. */ 2145 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2146 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2147 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2148 break; 2149 } 2150 2151 return status; 2152 } 2153 2154 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2155 enum sci_base_request_states next) 2156 { 2157 enum sci_status status = SCI_SUCCESS; 2158 2159 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2161 ireq->scu_status = SCU_TASK_DONE_GOOD; 2162 ireq->sci_status = SCI_SUCCESS; 2163 sci_change_state(&ireq->sm, next); 2164 break; 2165 default: 2166 /* All other completion status cause the IO to be complete. 2167 * If a NAK was received, then it is up to the user to retry 2168 * the request. 2169 */ 2170 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2171 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2172 2173 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2174 break; 2175 } 2176 2177 return status; 2178 } 2179 2180 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2181 u32 completion_code) 2182 { 2183 struct isci_remote_device *idev = ireq->target_device; 2184 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2185 enum sci_status status = SCI_SUCCESS; 2186 2187 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2188 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2189 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2190 break; 2191 2192 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2193 u16 len = sci_req_tx_bytes(ireq); 2194 2195 /* likely non-error data underrrun, workaround missing 2196 * d2h frame from the controller 2197 */ 2198 if (d2h->fis_type != FIS_REGD2H) { 2199 d2h->fis_type = FIS_REGD2H; 2200 d2h->flags = (1 << 6); 2201 d2h->status = 0x50; 2202 d2h->error = 0; 2203 d2h->lbal = 0; 2204 d2h->byte_count_low = len & 0xff; 2205 d2h->byte_count_high = len >> 8; 2206 d2h->device = 0xa0; 2207 d2h->lbal_exp = 0; 2208 d2h->lbam_exp = 0; 2209 d2h->lbah_exp = 0; 2210 d2h->_r_a = 0; 2211 d2h->sector_count = 0x3; 2212 d2h->sector_count_exp = 0; 2213 d2h->_r_b = 0; 2214 d2h->_r_c = 0; 2215 d2h->_r_d = 0; 2216 } 2217 2218 ireq->scu_status = SCU_TASK_DONE_GOOD; 2219 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2220 status = ireq->sci_status; 2221 2222 /* the hw will have suspended the rnc, so complete the 2223 * request upon pending resume 2224 */ 2225 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2226 break; 2227 } 2228 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2229 /* In this case, there is no UF coming after. 2230 * compelte the IO now. 2231 */ 2232 ireq->scu_status = SCU_TASK_DONE_GOOD; 2233 ireq->sci_status = SCI_SUCCESS; 2234 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2235 break; 2236 2237 default: 2238 if (d2h->fis_type == FIS_REGD2H) { 2239 /* UF received change the device state to ATAPI_ERROR */ 2240 status = ireq->sci_status; 2241 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2242 } else { 2243 /* If receiving any non-success TC status, no UF 2244 * received yet, then an UF for the status fis 2245 * is coming after (XXX: suspect this is 2246 * actually a protocol error or a bug like the 2247 * DONE_UNEXP_FIS case) 2248 */ 2249 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2250 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2251 2252 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2253 } 2254 break; 2255 } 2256 2257 return status; 2258 } 2259 2260 static int sci_request_smp_completion_status_is_tx_suspend( 2261 unsigned int completion_status) 2262 { 2263 switch (completion_status) { 2264 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2265 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2266 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2267 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2268 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2269 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2270 return 1; 2271 } 2272 return 0; 2273 } 2274 2275 static int sci_request_smp_completion_status_is_tx_rx_suspend( 2276 unsigned int completion_status) 2277 { 2278 return 0; /* There are no Tx/Rx SMP suspend conditions. */ 2279 } 2280 2281 static int sci_request_ssp_completion_status_is_tx_suspend( 2282 unsigned int completion_status) 2283 { 2284 switch (completion_status) { 2285 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2286 case SCU_TASK_DONE_LF_ERR: 2287 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2288 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2289 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2290 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2291 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2292 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2293 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2294 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2295 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2296 return 1; 2297 } 2298 return 0; 2299 } 2300 2301 static int sci_request_ssp_completion_status_is_tx_rx_suspend( 2302 unsigned int completion_status) 2303 { 2304 return 0; /* There are no Tx/Rx SSP suspend conditions. */ 2305 } 2306 2307 static int sci_request_stpsata_completion_status_is_tx_suspend( 2308 unsigned int completion_status) 2309 { 2310 switch (completion_status) { 2311 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2312 case SCU_TASK_DONE_LL_R_ERR: 2313 case SCU_TASK_DONE_LL_PERR: 2314 case SCU_TASK_DONE_REG_ERR: 2315 case SCU_TASK_DONE_SDB_ERR: 2316 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2317 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2318 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2319 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2320 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2321 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2322 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2323 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2324 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2325 return 1; 2326 } 2327 return 0; 2328 } 2329 2330 2331 static int sci_request_stpsata_completion_status_is_tx_rx_suspend( 2332 unsigned int completion_status) 2333 { 2334 switch (completion_status) { 2335 case SCU_TASK_DONE_LF_ERR: 2336 case SCU_TASK_DONE_LL_SY_TERM: 2337 case SCU_TASK_DONE_LL_LF_TERM: 2338 case SCU_TASK_DONE_BREAK_RCVD: 2339 case SCU_TASK_DONE_INV_FIS_LEN: 2340 case SCU_TASK_DONE_UNEXP_FIS: 2341 case SCU_TASK_DONE_UNEXP_SDBFIS: 2342 case SCU_TASK_DONE_MAX_PLD_ERR: 2343 return 1; 2344 } 2345 return 0; 2346 } 2347 2348 static void sci_request_handle_suspending_completions( 2349 struct isci_request *ireq, 2350 u32 completion_code) 2351 { 2352 int is_tx = 0; 2353 int is_tx_rx = 0; 2354 2355 switch (ireq->protocol) { 2356 case SAS_PROTOCOL_SMP: 2357 is_tx = sci_request_smp_completion_status_is_tx_suspend( 2358 completion_code); 2359 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( 2360 completion_code); 2361 break; 2362 case SAS_PROTOCOL_SSP: 2363 is_tx = sci_request_ssp_completion_status_is_tx_suspend( 2364 completion_code); 2365 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( 2366 completion_code); 2367 break; 2368 case SAS_PROTOCOL_STP: 2369 is_tx = sci_request_stpsata_completion_status_is_tx_suspend( 2370 completion_code); 2371 is_tx_rx = 2372 sci_request_stpsata_completion_status_is_tx_rx_suspend( 2373 completion_code); 2374 break; 2375 default: 2376 dev_warn(&ireq->isci_host->pdev->dev, 2377 "%s: request %p has no valid protocol\n", 2378 __func__, ireq); 2379 break; 2380 } 2381 if (is_tx || is_tx_rx) { 2382 BUG_ON(is_tx && is_tx_rx); 2383 2384 sci_remote_node_context_suspend( 2385 &ireq->target_device->rnc, 2386 SCI_HW_SUSPEND, 2387 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX 2388 : SCU_EVENT_TL_RNC_SUSPEND_TX); 2389 } 2390 } 2391 2392 enum sci_status 2393 sci_io_request_tc_completion(struct isci_request *ireq, 2394 u32 completion_code) 2395 { 2396 enum sci_base_request_states state; 2397 struct isci_host *ihost = ireq->owning_controller; 2398 2399 state = ireq->sm.current_state_id; 2400 2401 /* Decode those completions that signal upcoming suspension events. */ 2402 sci_request_handle_suspending_completions( 2403 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); 2404 2405 switch (state) { 2406 case SCI_REQ_STARTED: 2407 return request_started_state_tc_event(ireq, completion_code); 2408 2409 case SCI_REQ_TASK_WAIT_TC_COMP: 2410 return ssp_task_request_await_tc_event(ireq, 2411 completion_code); 2412 2413 case SCI_REQ_SMP_WAIT_RESP: 2414 return smp_request_await_response_tc_event(ireq, 2415 completion_code); 2416 2417 case SCI_REQ_SMP_WAIT_TC_COMP: 2418 return smp_request_await_tc_event(ireq, completion_code); 2419 2420 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2421 return stp_request_udma_await_tc_event(ireq, 2422 completion_code); 2423 2424 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2425 return stp_request_non_data_await_h2d_tc_event(ireq, 2426 completion_code); 2427 2428 case SCI_REQ_STP_PIO_WAIT_H2D: 2429 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2430 completion_code); 2431 2432 case SCI_REQ_STP_PIO_DATA_OUT: 2433 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2434 2435 case SCI_REQ_ABORTING: 2436 return request_aborting_state_tc_event(ireq, 2437 completion_code); 2438 2439 case SCI_REQ_ATAPI_WAIT_H2D: 2440 return atapi_raw_completion(ireq, completion_code, 2441 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2442 2443 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2444 return atapi_raw_completion(ireq, completion_code, 2445 SCI_REQ_ATAPI_WAIT_D2H); 2446 2447 case SCI_REQ_ATAPI_WAIT_D2H: 2448 return atapi_data_tc_completion_handler(ireq, completion_code); 2449 2450 default: 2451 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", 2452 __func__, completion_code, req_state_name(state)); 2453 return SCI_FAILURE_INVALID_STATE; 2454 } 2455 } 2456 2457 /** 2458 * isci_request_process_response_iu() - This function sets the status and 2459 * response iu, in the task struct, from the request object for the upper 2460 * layer driver. 2461 * @sas_task: This parameter is the task struct from the upper layer driver. 2462 * @resp_iu: This parameter points to the response iu of the completed request. 2463 * @dev: This parameter specifies the linux device struct. 2464 * 2465 * none. 2466 */ 2467 static void isci_request_process_response_iu( 2468 struct sas_task *task, 2469 struct ssp_response_iu *resp_iu, 2470 struct device *dev) 2471 { 2472 dev_dbg(dev, 2473 "%s: resp_iu = %p " 2474 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2475 "resp_iu->response_data_len = %x, " 2476 "resp_iu->sense_data_len = %x\nresponse data: ", 2477 __func__, 2478 resp_iu, 2479 resp_iu->status, 2480 resp_iu->datapres, 2481 resp_iu->response_data_len, 2482 resp_iu->sense_data_len); 2483 2484 task->task_status.stat = resp_iu->status; 2485 2486 /* libsas updates the task status fields based on the response iu. */ 2487 sas_ssp_task_response(dev, task, resp_iu); 2488 } 2489 2490 /** 2491 * isci_request_set_open_reject_status() - This function prepares the I/O 2492 * completion for OPEN_REJECT conditions. 2493 * @request: This parameter is the completed isci_request object. 2494 * @response_ptr: This parameter specifies the service response for the I/O. 2495 * @status_ptr: This parameter specifies the exec status for the I/O. 2496 * @open_rej_reason: This parameter specifies the encoded reason for the 2497 * abandon-class reject. 2498 * 2499 * none. 2500 */ 2501 static void isci_request_set_open_reject_status( 2502 struct isci_request *request, 2503 struct sas_task *task, 2504 enum service_response *response_ptr, 2505 enum exec_status *status_ptr, 2506 enum sas_open_rej_reason open_rej_reason) 2507 { 2508 /* Task in the target is done. */ 2509 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2510 *response_ptr = SAS_TASK_UNDELIVERED; 2511 *status_ptr = SAS_OPEN_REJECT; 2512 task->task_status.open_rej_reason = open_rej_reason; 2513 } 2514 2515 /** 2516 * isci_request_handle_controller_specific_errors() - This function decodes 2517 * controller-specific I/O completion error conditions. 2518 * @request: This parameter is the completed isci_request object. 2519 * @response_ptr: This parameter specifies the service response for the I/O. 2520 * @status_ptr: This parameter specifies the exec status for the I/O. 2521 * 2522 * none. 2523 */ 2524 static void isci_request_handle_controller_specific_errors( 2525 struct isci_remote_device *idev, 2526 struct isci_request *request, 2527 struct sas_task *task, 2528 enum service_response *response_ptr, 2529 enum exec_status *status_ptr) 2530 { 2531 unsigned int cstatus; 2532 2533 cstatus = request->scu_status; 2534 2535 dev_dbg(&request->isci_host->pdev->dev, 2536 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2537 "- controller status = 0x%x\n", 2538 __func__, request, cstatus); 2539 2540 /* Decode the controller-specific errors; most 2541 * important is to recognize those conditions in which 2542 * the target may still have a task outstanding that 2543 * must be aborted. 2544 * 2545 * Note that there are SCU completion codes being 2546 * named in the decode below for which SCIC has already 2547 * done work to handle them in a way other than as 2548 * a controller-specific completion code; these are left 2549 * in the decode below for completeness sake. 2550 */ 2551 switch (cstatus) { 2552 case SCU_TASK_DONE_DMASETUP_DIRERR: 2553 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2554 case SCU_TASK_DONE_XFERCNT_ERR: 2555 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2556 if (task->task_proto == SAS_PROTOCOL_SMP) { 2557 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2558 *response_ptr = SAS_TASK_COMPLETE; 2559 2560 /* See if the device has been/is being stopped. Note 2561 * that we ignore the quiesce state, since we are 2562 * concerned about the actual device state. 2563 */ 2564 if (!idev) 2565 *status_ptr = SAS_DEVICE_UNKNOWN; 2566 else 2567 *status_ptr = SAS_ABORTED_TASK; 2568 2569 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2570 } else { 2571 /* Task in the target is not done. */ 2572 *response_ptr = SAS_TASK_UNDELIVERED; 2573 2574 if (!idev) 2575 *status_ptr = SAS_DEVICE_UNKNOWN; 2576 else 2577 *status_ptr = SAM_STAT_TASK_ABORTED; 2578 2579 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2580 } 2581 2582 break; 2583 2584 case SCU_TASK_DONE_CRC_ERR: 2585 case SCU_TASK_DONE_NAK_CMD_ERR: 2586 case SCU_TASK_DONE_EXCESS_DATA: 2587 case SCU_TASK_DONE_UNEXP_FIS: 2588 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2589 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2590 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2591 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2592 /* These are conditions in which the target 2593 * has completed the task, so that no cleanup 2594 * is necessary. 2595 */ 2596 *response_ptr = SAS_TASK_COMPLETE; 2597 2598 /* See if the device has been/is being stopped. Note 2599 * that we ignore the quiesce state, since we are 2600 * concerned about the actual device state. 2601 */ 2602 if (!idev) 2603 *status_ptr = SAS_DEVICE_UNKNOWN; 2604 else 2605 *status_ptr = SAS_ABORTED_TASK; 2606 2607 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2608 break; 2609 2610 2611 /* Note that the only open reject completion codes seen here will be 2612 * abandon-class codes; all others are automatically retried in the SCU. 2613 */ 2614 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2615 2616 isci_request_set_open_reject_status( 2617 request, task, response_ptr, status_ptr, 2618 SAS_OREJ_WRONG_DEST); 2619 break; 2620 2621 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2622 2623 /* Note - the return of AB0 will change when 2624 * libsas implements detection of zone violations. 2625 */ 2626 isci_request_set_open_reject_status( 2627 request, task, response_ptr, status_ptr, 2628 SAS_OREJ_RESV_AB0); 2629 break; 2630 2631 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2632 2633 isci_request_set_open_reject_status( 2634 request, task, response_ptr, status_ptr, 2635 SAS_OREJ_RESV_AB1); 2636 break; 2637 2638 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2639 2640 isci_request_set_open_reject_status( 2641 request, task, response_ptr, status_ptr, 2642 SAS_OREJ_RESV_AB2); 2643 break; 2644 2645 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2646 2647 isci_request_set_open_reject_status( 2648 request, task, response_ptr, status_ptr, 2649 SAS_OREJ_RESV_AB3); 2650 break; 2651 2652 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2653 2654 isci_request_set_open_reject_status( 2655 request, task, response_ptr, status_ptr, 2656 SAS_OREJ_BAD_DEST); 2657 break; 2658 2659 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2660 2661 isci_request_set_open_reject_status( 2662 request, task, response_ptr, status_ptr, 2663 SAS_OREJ_STP_NORES); 2664 break; 2665 2666 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2667 2668 isci_request_set_open_reject_status( 2669 request, task, response_ptr, status_ptr, 2670 SAS_OREJ_EPROTO); 2671 break; 2672 2673 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2674 2675 isci_request_set_open_reject_status( 2676 request, task, response_ptr, status_ptr, 2677 SAS_OREJ_CONN_RATE); 2678 break; 2679 2680 case SCU_TASK_DONE_LL_R_ERR: 2681 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2682 case SCU_TASK_DONE_LL_PERR: 2683 case SCU_TASK_DONE_LL_SY_TERM: 2684 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2685 case SCU_TASK_DONE_LL_LF_TERM: 2686 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2687 case SCU_TASK_DONE_LL_ABORT_ERR: 2688 case SCU_TASK_DONE_SEQ_INV_TYPE: 2689 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2690 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2691 case SCU_TASK_DONE_INV_FIS_LEN: 2692 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2693 case SCU_TASK_DONE_SDMA_ERR: 2694 case SCU_TASK_DONE_OFFSET_ERR: 2695 case SCU_TASK_DONE_MAX_PLD_ERR: 2696 case SCU_TASK_DONE_LF_ERR: 2697 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2698 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2699 case SCU_TASK_DONE_UNEXP_DATA: 2700 case SCU_TASK_DONE_UNEXP_SDBFIS: 2701 case SCU_TASK_DONE_REG_ERR: 2702 case SCU_TASK_DONE_SDB_ERR: 2703 case SCU_TASK_DONE_TASK_ABORT: 2704 default: 2705 /* Task in the target is not done. */ 2706 *response_ptr = SAS_TASK_UNDELIVERED; 2707 *status_ptr = SAM_STAT_TASK_ABORTED; 2708 2709 if (task->task_proto == SAS_PROTOCOL_SMP) 2710 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2711 else 2712 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2713 break; 2714 } 2715 } 2716 2717 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2718 { 2719 struct task_status_struct *ts = &task->task_status; 2720 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2721 2722 resp->frame_len = sizeof(*fis); 2723 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2724 ts->buf_valid_size = sizeof(*resp); 2725 2726 /* If an error is flagged let libata decode the fis */ 2727 if (ac_err_mask(fis->status)) 2728 ts->stat = SAS_PROTO_RESPONSE; 2729 else 2730 ts->stat = SAM_STAT_GOOD; 2731 2732 ts->resp = SAS_TASK_COMPLETE; 2733 } 2734 2735 static void isci_request_io_request_complete(struct isci_host *ihost, 2736 struct isci_request *request, 2737 enum sci_io_status completion_status) 2738 { 2739 struct sas_task *task = isci_request_access_task(request); 2740 struct ssp_response_iu *resp_iu; 2741 unsigned long task_flags; 2742 struct isci_remote_device *idev = request->target_device; 2743 enum service_response response = SAS_TASK_UNDELIVERED; 2744 enum exec_status status = SAS_ABORTED_TASK; 2745 2746 dev_dbg(&ihost->pdev->dev, 2747 "%s: request = %p, task = %p, " 2748 "task->data_dir = %d completion_status = 0x%x\n", 2749 __func__, request, task, task->data_dir, completion_status); 2750 2751 /* The request is done from an SCU HW perspective. */ 2752 2753 /* This is an active request being completed from the core. */ 2754 switch (completion_status) { 2755 2756 case SCI_IO_FAILURE_RESPONSE_VALID: 2757 dev_dbg(&ihost->pdev->dev, 2758 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2759 __func__, request, task); 2760 2761 if (sas_protocol_ata(task->task_proto)) { 2762 isci_process_stp_response(task, &request->stp.rsp); 2763 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2764 2765 /* crack the iu response buffer. */ 2766 resp_iu = &request->ssp.rsp; 2767 isci_request_process_response_iu(task, resp_iu, 2768 &ihost->pdev->dev); 2769 2770 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2771 2772 dev_err(&ihost->pdev->dev, 2773 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2774 "SAS_PROTOCOL_SMP protocol\n", 2775 __func__); 2776 2777 } else 2778 dev_err(&ihost->pdev->dev, 2779 "%s: unknown protocol\n", __func__); 2780 2781 /* use the task status set in the task struct by the 2782 * isci_request_process_response_iu call. 2783 */ 2784 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2785 response = task->task_status.resp; 2786 status = task->task_status.stat; 2787 break; 2788 2789 case SCI_IO_SUCCESS: 2790 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2791 2792 response = SAS_TASK_COMPLETE; 2793 status = SAM_STAT_GOOD; 2794 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2795 2796 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2797 2798 /* This was an SSP / STP / SATA transfer. 2799 * There is a possibility that less data than 2800 * the maximum was transferred. 2801 */ 2802 u32 transferred_length = sci_req_tx_bytes(request); 2803 2804 task->task_status.residual 2805 = task->total_xfer_len - transferred_length; 2806 2807 /* If there were residual bytes, call this an 2808 * underrun. 2809 */ 2810 if (task->task_status.residual != 0) 2811 status = SAS_DATA_UNDERRUN; 2812 2813 dev_dbg(&ihost->pdev->dev, 2814 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2815 __func__, status); 2816 2817 } else 2818 dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", 2819 __func__); 2820 break; 2821 2822 case SCI_IO_FAILURE_TERMINATED: 2823 2824 dev_dbg(&ihost->pdev->dev, 2825 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2826 __func__, request, task); 2827 2828 /* The request was terminated explicitly. */ 2829 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2830 response = SAS_TASK_UNDELIVERED; 2831 2832 /* See if the device has been/is being stopped. Note 2833 * that we ignore the quiesce state, since we are 2834 * concerned about the actual device state. 2835 */ 2836 if (!idev) 2837 status = SAS_DEVICE_UNKNOWN; 2838 else 2839 status = SAS_ABORTED_TASK; 2840 break; 2841 2842 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2843 2844 isci_request_handle_controller_specific_errors(idev, request, 2845 task, &response, 2846 &status); 2847 break; 2848 2849 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2850 /* This is a special case, in that the I/O completion 2851 * is telling us that the device needs a reset. 2852 * In order for the device reset condition to be 2853 * noticed, the I/O has to be handled in the error 2854 * handler. Set the reset flag and cause the 2855 * SCSI error thread to be scheduled. 2856 */ 2857 spin_lock_irqsave(&task->task_state_lock, task_flags); 2858 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2859 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2860 2861 /* Fail the I/O. */ 2862 response = SAS_TASK_UNDELIVERED; 2863 status = SAM_STAT_TASK_ABORTED; 2864 2865 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2866 break; 2867 2868 case SCI_FAILURE_RETRY_REQUIRED: 2869 2870 /* Fail the I/O so it can be retried. */ 2871 response = SAS_TASK_UNDELIVERED; 2872 if (!idev) 2873 status = SAS_DEVICE_UNKNOWN; 2874 else 2875 status = SAS_ABORTED_TASK; 2876 2877 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2878 break; 2879 2880 2881 default: 2882 /* Catch any otherwise unhandled error codes here. */ 2883 dev_dbg(&ihost->pdev->dev, 2884 "%s: invalid completion code: 0x%x - " 2885 "isci_request = %p\n", 2886 __func__, completion_status, request); 2887 2888 response = SAS_TASK_UNDELIVERED; 2889 2890 /* See if the device has been/is being stopped. Note 2891 * that we ignore the quiesce state, since we are 2892 * concerned about the actual device state. 2893 */ 2894 if (!idev) 2895 status = SAS_DEVICE_UNKNOWN; 2896 else 2897 status = SAS_ABORTED_TASK; 2898 2899 if (SAS_PROTOCOL_SMP == task->task_proto) 2900 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2901 else 2902 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2903 break; 2904 } 2905 2906 switch (task->task_proto) { 2907 case SAS_PROTOCOL_SSP: 2908 if (task->data_dir == DMA_NONE) 2909 break; 2910 if (task->num_scatter == 0) 2911 /* 0 indicates a single dma address */ 2912 dma_unmap_single(&ihost->pdev->dev, 2913 request->zero_scatter_daddr, 2914 task->total_xfer_len, task->data_dir); 2915 else /* unmap the sgl dma addresses */ 2916 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 2917 request->num_sg_entries, task->data_dir); 2918 break; 2919 case SAS_PROTOCOL_SMP: { 2920 struct scatterlist *sg = &task->smp_task.smp_req; 2921 struct smp_req *smp_req; 2922 void *kaddr; 2923 2924 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 2925 2926 /* need to swab it back in case the command buffer is re-used */ 2927 kaddr = kmap_atomic(sg_page(sg)); 2928 smp_req = kaddr + sg->offset; 2929 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 2930 kunmap_atomic(kaddr); 2931 break; 2932 } 2933 default: 2934 break; 2935 } 2936 2937 spin_lock_irqsave(&task->task_state_lock, task_flags); 2938 2939 task->task_status.resp = response; 2940 task->task_status.stat = status; 2941 2942 if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { 2943 /* Normal notification (task_done) */ 2944 task->task_state_flags |= SAS_TASK_STATE_DONE; 2945 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 2946 SAS_TASK_STATE_PENDING); 2947 } 2948 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2949 2950 /* complete the io request to the core. */ 2951 sci_controller_complete_io(ihost, request->target_device, request); 2952 2953 /* set terminated handle so it cannot be completed or 2954 * terminated again, and to cause any calls into abort 2955 * task to recognize the already completed case. 2956 */ 2957 set_bit(IREQ_TERMINATED, &request->flags); 2958 2959 ireq_done(ihost, request, task); 2960 } 2961 2962 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2963 { 2964 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2965 struct domain_device *dev = ireq->target_device->domain_dev; 2966 enum sci_base_request_states state; 2967 struct sas_task *task; 2968 2969 /* XXX as hch said always creating an internal sas_task for tmf 2970 * requests would simplify the driver 2971 */ 2972 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 2973 2974 /* all unaccelerated request types (non ssp or ncq) handled with 2975 * substates 2976 */ 2977 if (!task && dev->dev_type == SAS_END_DEVICE) { 2978 state = SCI_REQ_TASK_WAIT_TC_COMP; 2979 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2980 state = SCI_REQ_SMP_WAIT_RESP; 2981 } else if (task && sas_protocol_ata(task->task_proto) && 2982 !task->ata_task.use_ncq) { 2983 if (dev->sata_dev.class == ATA_DEV_ATAPI && 2984 task->ata_task.fis.command == ATA_CMD_PACKET) { 2985 state = SCI_REQ_ATAPI_WAIT_H2D; 2986 } else if (task->data_dir == DMA_NONE) { 2987 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 2988 } else if (task->ata_task.dma_xfer) { 2989 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 2990 } else /* PIO */ { 2991 state = SCI_REQ_STP_PIO_WAIT_H2D; 2992 } 2993 } else { 2994 /* SSP or NCQ are fully accelerated, no substates */ 2995 return; 2996 } 2997 sci_change_state(sm, state); 2998 } 2999 3000 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 3001 { 3002 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3003 struct isci_host *ihost = ireq->owning_controller; 3004 3005 /* Tell the SCI_USER that the IO request is complete */ 3006 if (!test_bit(IREQ_TMF, &ireq->flags)) 3007 isci_request_io_request_complete(ihost, ireq, 3008 ireq->sci_status); 3009 else 3010 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3011 } 3012 3013 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3014 { 3015 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3016 3017 /* Setting the abort bit in the Task Context is required by the silicon. */ 3018 ireq->tc->abort = 1; 3019 } 3020 3021 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3022 { 3023 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3024 3025 ireq->target_device->working_request = ireq; 3026 } 3027 3028 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3029 { 3030 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3031 3032 ireq->target_device->working_request = ireq; 3033 } 3034 3035 static const struct sci_base_state sci_request_state_table[] = { 3036 [SCI_REQ_INIT] = { }, 3037 [SCI_REQ_CONSTRUCTED] = { }, 3038 [SCI_REQ_STARTED] = { 3039 .enter_state = sci_request_started_state_enter, 3040 }, 3041 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3042 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3043 }, 3044 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3045 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3046 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3047 }, 3048 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3049 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3050 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3051 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3052 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3053 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3054 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3055 [SCI_REQ_SMP_WAIT_RESP] = { }, 3056 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3057 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3058 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3059 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3060 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3061 [SCI_REQ_COMPLETED] = { 3062 .enter_state = sci_request_completed_state_enter, 3063 }, 3064 [SCI_REQ_ABORTING] = { 3065 .enter_state = sci_request_aborting_state_enter, 3066 }, 3067 [SCI_REQ_FINAL] = { }, 3068 }; 3069 3070 static void 3071 sci_general_request_construct(struct isci_host *ihost, 3072 struct isci_remote_device *idev, 3073 struct isci_request *ireq) 3074 { 3075 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3076 3077 ireq->target_device = idev; 3078 ireq->protocol = SAS_PROTOCOL_NONE; 3079 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3080 3081 ireq->sci_status = SCI_SUCCESS; 3082 ireq->scu_status = 0; 3083 ireq->post_context = 0xFFFFFFFF; 3084 } 3085 3086 static enum sci_status 3087 sci_io_request_construct(struct isci_host *ihost, 3088 struct isci_remote_device *idev, 3089 struct isci_request *ireq) 3090 { 3091 struct domain_device *dev = idev->domain_dev; 3092 enum sci_status status = SCI_SUCCESS; 3093 3094 /* Build the common part of the request */ 3095 sci_general_request_construct(ihost, idev, ireq); 3096 3097 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3098 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3099 3100 if (dev->dev_type == SAS_END_DEVICE) 3101 /* pass */; 3102 else if (dev_is_sata(dev)) 3103 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3104 else if (dev_is_expander(dev)) 3105 /* pass */; 3106 else 3107 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3108 3109 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3110 3111 return status; 3112 } 3113 3114 enum sci_status sci_task_request_construct(struct isci_host *ihost, 3115 struct isci_remote_device *idev, 3116 u16 io_tag, struct isci_request *ireq) 3117 { 3118 struct domain_device *dev = idev->domain_dev; 3119 enum sci_status status = SCI_SUCCESS; 3120 3121 /* Build the common part of the request */ 3122 sci_general_request_construct(ihost, idev, ireq); 3123 3124 if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { 3125 set_bit(IREQ_TMF, &ireq->flags); 3126 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3127 3128 /* Set the protocol indicator. */ 3129 if (dev_is_sata(dev)) 3130 ireq->protocol = SAS_PROTOCOL_STP; 3131 else 3132 ireq->protocol = SAS_PROTOCOL_SSP; 3133 } else 3134 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3135 3136 return status; 3137 } 3138 3139 static enum sci_status isci_request_ssp_request_construct( 3140 struct isci_request *request) 3141 { 3142 enum sci_status status; 3143 3144 dev_dbg(&request->isci_host->pdev->dev, 3145 "%s: request = %p\n", 3146 __func__, 3147 request); 3148 status = sci_io_request_construct_basic_ssp(request); 3149 return status; 3150 } 3151 3152 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3153 { 3154 struct sas_task *task = isci_request_access_task(ireq); 3155 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3156 struct ata_queued_cmd *qc = task->uldd_task; 3157 enum sci_status status; 3158 3159 dev_dbg(&ireq->isci_host->pdev->dev, 3160 "%s: ireq = %p\n", 3161 __func__, 3162 ireq); 3163 3164 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3165 if (!task->ata_task.device_control_reg_update) 3166 fis->flags |= 0x80; 3167 fis->flags &= 0xF0; 3168 3169 status = sci_io_request_construct_basic_sata(ireq); 3170 3171 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3172 qc->tf.command == ATA_CMD_FPDMA_READ || 3173 qc->tf.command == ATA_CMD_FPDMA_RECV || 3174 qc->tf.command == ATA_CMD_FPDMA_SEND || 3175 qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { 3176 fis->sector_count = qc->tag << 3; 3177 ireq->tc->type.stp.ncq_tag = qc->tag; 3178 } 3179 3180 return status; 3181 } 3182 3183 static enum sci_status 3184 sci_io_request_construct_smp(struct device *dev, 3185 struct isci_request *ireq, 3186 struct sas_task *task) 3187 { 3188 struct scatterlist *sg = &task->smp_task.smp_req; 3189 struct isci_remote_device *idev; 3190 struct scu_task_context *task_context; 3191 struct isci_port *iport; 3192 struct smp_req *smp_req; 3193 void *kaddr; 3194 u8 req_len; 3195 u32 cmd; 3196 3197 kaddr = kmap_atomic(sg_page(sg)); 3198 smp_req = kaddr + sg->offset; 3199 /* 3200 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3201 * functions under SAS 2.0, a zero request length really indicates 3202 * a non-zero default length. 3203 */ 3204 if (smp_req->req_len == 0) { 3205 switch (smp_req->func) { 3206 case SMP_DISCOVER: 3207 case SMP_REPORT_PHY_ERR_LOG: 3208 case SMP_REPORT_PHY_SATA: 3209 case SMP_REPORT_ROUTE_INFO: 3210 smp_req->req_len = 2; 3211 break; 3212 case SMP_CONF_ROUTE_INFO: 3213 case SMP_PHY_CONTROL: 3214 case SMP_PHY_TEST_FUNCTION: 3215 smp_req->req_len = 9; 3216 break; 3217 /* Default - zero is a valid default for 2.0. */ 3218 } 3219 } 3220 req_len = smp_req->req_len; 3221 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3222 cmd = *(u32 *) smp_req; 3223 kunmap_atomic(kaddr); 3224 3225 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3226 return SCI_FAILURE; 3227 3228 ireq->protocol = SAS_PROTOCOL_SMP; 3229 3230 /* byte swap the smp request. */ 3231 3232 task_context = ireq->tc; 3233 3234 idev = ireq->target_device; 3235 iport = idev->owning_port; 3236 3237 /* 3238 * Fill in the TC with the its required data 3239 * 00h 3240 */ 3241 task_context->priority = 0; 3242 task_context->initiator_request = 1; 3243 task_context->connection_rate = idev->connection_rate; 3244 task_context->protocol_engine_index = ISCI_PEG; 3245 task_context->logical_port_index = iport->physical_port_index; 3246 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3247 task_context->abort = 0; 3248 task_context->valid = SCU_TASK_CONTEXT_VALID; 3249 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3250 3251 /* 04h */ 3252 task_context->remote_node_index = idev->rnc.remote_node_index; 3253 task_context->command_code = 0; 3254 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3255 3256 /* 08h */ 3257 task_context->link_layer_control = 0; 3258 task_context->do_not_dma_ssp_good_response = 1; 3259 task_context->strict_ordering = 0; 3260 task_context->control_frame = 1; 3261 task_context->timeout_enable = 0; 3262 task_context->block_guard_enable = 0; 3263 3264 /* 0ch */ 3265 task_context->address_modifier = 0; 3266 3267 /* 10h */ 3268 task_context->ssp_command_iu_length = req_len; 3269 3270 /* 14h */ 3271 task_context->transfer_length_bytes = 0; 3272 3273 /* 3274 * 18h ~ 30h, protocol specific 3275 * since commandIU has been build by framework at this point, we just 3276 * copy the frist DWord from command IU to this location. */ 3277 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3278 3279 /* 3280 * 40h 3281 * "For SMP you could program it to zero. We would prefer that way 3282 * so that done code will be consistent." - Venki 3283 */ 3284 task_context->task_phase = 0; 3285 3286 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3287 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3288 (iport->physical_port_index << 3289 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3290 ISCI_TAG_TCI(ireq->io_tag)); 3291 /* 3292 * Copy the physical address for the command buffer to the SCU Task 3293 * Context command buffer should not contain command header. 3294 */ 3295 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3296 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3297 3298 /* SMP response comes as UF, so no need to set response IU address. */ 3299 task_context->response_iu_upper = 0; 3300 task_context->response_iu_lower = 0; 3301 3302 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3303 3304 return SCI_SUCCESS; 3305 } 3306 3307 /* 3308 * isci_smp_request_build() - This function builds the smp request. 3309 * @ireq: This parameter points to the isci_request allocated in the 3310 * request construct function. 3311 * 3312 * SCI_SUCCESS on successfull completion, or specific failure code. 3313 */ 3314 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3315 { 3316 struct sas_task *task = isci_request_access_task(ireq); 3317 struct device *dev = &ireq->isci_host->pdev->dev; 3318 enum sci_status status = SCI_FAILURE; 3319 3320 status = sci_io_request_construct_smp(dev, ireq, task); 3321 if (status != SCI_SUCCESS) 3322 dev_dbg(&ireq->isci_host->pdev->dev, 3323 "%s: failed with status = %d\n", 3324 __func__, 3325 status); 3326 3327 return status; 3328 } 3329 3330 /** 3331 * isci_io_request_build() - This function builds the io request object. 3332 * @ihost: This parameter specifies the ISCI host object 3333 * @request: This parameter points to the isci_request object allocated in the 3334 * request construct function. 3335 * @sci_device: This parameter is the handle for the sci core's remote device 3336 * object that is the destination for this request. 3337 * 3338 * SCI_SUCCESS on successfull completion, or specific failure code. 3339 */ 3340 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3341 struct isci_request *request, 3342 struct isci_remote_device *idev) 3343 { 3344 enum sci_status status = SCI_SUCCESS; 3345 struct sas_task *task = isci_request_access_task(request); 3346 3347 dev_dbg(&ihost->pdev->dev, 3348 "%s: idev = 0x%p; request = %p, " 3349 "num_scatter = %d\n", 3350 __func__, 3351 idev, 3352 request, 3353 task->num_scatter); 3354 3355 /* map the sgl addresses, if present. 3356 * libata does the mapping for sata devices 3357 * before we get the request. 3358 */ 3359 if (task->num_scatter && 3360 !sas_protocol_ata(task->task_proto) && 3361 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3362 3363 request->num_sg_entries = dma_map_sg( 3364 &ihost->pdev->dev, 3365 task->scatter, 3366 task->num_scatter, 3367 task->data_dir 3368 ); 3369 3370 if (request->num_sg_entries == 0) 3371 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3372 } 3373 3374 status = sci_io_request_construct(ihost, idev, request); 3375 3376 if (status != SCI_SUCCESS) { 3377 dev_dbg(&ihost->pdev->dev, 3378 "%s: failed request construct\n", 3379 __func__); 3380 return SCI_FAILURE; 3381 } 3382 3383 switch (task->task_proto) { 3384 case SAS_PROTOCOL_SMP: 3385 status = isci_smp_request_build(request); 3386 break; 3387 case SAS_PROTOCOL_SSP: 3388 status = isci_request_ssp_request_construct(request); 3389 break; 3390 case SAS_PROTOCOL_SATA: 3391 case SAS_PROTOCOL_STP: 3392 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3393 status = isci_request_stp_request_construct(request); 3394 break; 3395 default: 3396 dev_dbg(&ihost->pdev->dev, 3397 "%s: unknown protocol\n", __func__); 3398 return SCI_FAILURE; 3399 } 3400 3401 return SCI_SUCCESS; 3402 } 3403 3404 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3405 { 3406 struct isci_request *ireq; 3407 3408 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3409 ireq->io_tag = tag; 3410 ireq->io_request_completion = NULL; 3411 ireq->flags = 0; 3412 ireq->num_sg_entries = 0; 3413 3414 return ireq; 3415 } 3416 3417 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3418 struct sas_task *task, 3419 u16 tag) 3420 { 3421 struct isci_request *ireq; 3422 3423 ireq = isci_request_from_tag(ihost, tag); 3424 ireq->ttype_ptr.io_task_ptr = task; 3425 clear_bit(IREQ_TMF, &ireq->flags); 3426 task->lldd_task = ireq; 3427 3428 return ireq; 3429 } 3430 3431 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3432 struct isci_tmf *isci_tmf, 3433 u16 tag) 3434 { 3435 struct isci_request *ireq; 3436 3437 ireq = isci_request_from_tag(ihost, tag); 3438 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3439 set_bit(IREQ_TMF, &ireq->flags); 3440 3441 return ireq; 3442 } 3443 3444 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3445 struct sas_task *task, u16 tag) 3446 { 3447 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3448 struct isci_request *ireq; 3449 unsigned long flags; 3450 int ret = 0; 3451 3452 /* do common allocation and init of request object. */ 3453 ireq = isci_io_request_from_tag(ihost, task, tag); 3454 3455 status = isci_io_request_build(ihost, ireq, idev); 3456 if (status != SCI_SUCCESS) { 3457 dev_dbg(&ihost->pdev->dev, 3458 "%s: request_construct failed - status = 0x%x\n", 3459 __func__, 3460 status); 3461 return status; 3462 } 3463 3464 spin_lock_irqsave(&ihost->scic_lock, flags); 3465 3466 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3467 3468 if (isci_task_is_ncq_recovery(task)) { 3469 3470 /* The device is in an NCQ recovery state. Issue the 3471 * request on the task side. Note that it will 3472 * complete on the I/O request side because the 3473 * request was built that way (ie. 3474 * ireq->is_task_management_request is false). 3475 */ 3476 status = sci_controller_start_task(ihost, 3477 idev, 3478 ireq); 3479 } else { 3480 status = SCI_FAILURE; 3481 } 3482 } else { 3483 /* send the request, let the core assign the IO TAG. */ 3484 status = sci_controller_start_io(ihost, idev, 3485 ireq); 3486 } 3487 3488 if (status != SCI_SUCCESS && 3489 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3490 dev_dbg(&ihost->pdev->dev, 3491 "%s: failed request start (0x%x)\n", 3492 __func__, status); 3493 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3494 return status; 3495 } 3496 /* Either I/O started OK, or the core has signaled that 3497 * the device needs a target reset. 3498 */ 3499 if (status != SCI_SUCCESS) { 3500 /* The request did not really start in the 3501 * hardware, so clear the request handle 3502 * here so no terminations will be done. 3503 */ 3504 set_bit(IREQ_TERMINATED, &ireq->flags); 3505 } 3506 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3507 3508 if (status == 3509 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3510 /* Signal libsas that we need the SCSI error 3511 * handler thread to work on this I/O and that 3512 * we want a device reset. 3513 */ 3514 spin_lock_irqsave(&task->task_state_lock, flags); 3515 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3516 spin_unlock_irqrestore(&task->task_state_lock, flags); 3517 3518 /* Cause this task to be scheduled in the SCSI error 3519 * handler thread. 3520 */ 3521 sas_task_abort(task); 3522 3523 /* Change the status, since we are holding 3524 * the I/O until it is managed by the SCSI 3525 * error handler. 3526 */ 3527 status = SCI_SUCCESS; 3528 } 3529 3530 return ret; 3531 } 3532