1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include <scsi/scsi_cmnd.h> 57 #include "isci.h" 58 #include "task.h" 59 #include "request.h" 60 #include "scu_completion_codes.h" 61 #include "scu_event_codes.h" 62 #include "sas.h" 63 64 #undef C 65 #define C(a) (#a) 66 const char *req_state_name(enum sci_base_request_states state) 67 { 68 static const char * const strings[] = REQUEST_STATES; 69 70 return strings[state]; 71 } 72 #undef C 73 74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 75 int idx) 76 { 77 if (idx == 0) 78 return &ireq->tc->sgl_pair_ab; 79 else if (idx == 1) 80 return &ireq->tc->sgl_pair_cd; 81 else if (idx < 0) 82 return NULL; 83 else 84 return &ireq->sg_table[idx - 2]; 85 } 86 87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 88 struct isci_request *ireq, u32 idx) 89 { 90 u32 offset; 91 92 if (idx == 0) { 93 offset = (void *) &ireq->tc->sgl_pair_ab - 94 (void *) &ihost->task_context_table[0]; 95 return ihost->tc_dma + offset; 96 } else if (idx == 1) { 97 offset = (void *) &ireq->tc->sgl_pair_cd - 98 (void *) &ihost->task_context_table[0]; 99 return ihost->tc_dma + offset; 100 } 101 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 103 } 104 105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 106 { 107 e->length = sg_dma_len(sg); 108 e->address_upper = upper_32_bits(sg_dma_address(sg)); 109 e->address_lower = lower_32_bits(sg_dma_address(sg)); 110 e->address_modifier = 0; 111 } 112 113 static void sci_request_build_sgl(struct isci_request *ireq) 114 { 115 struct isci_host *ihost = ireq->isci_host; 116 struct sas_task *task = isci_request_access_task(ireq); 117 struct scatterlist *sg = NULL; 118 dma_addr_t dma_addr; 119 u32 sg_idx = 0; 120 struct scu_sgl_element_pair *scu_sg = NULL; 121 struct scu_sgl_element_pair *prev_sg = NULL; 122 123 if (task->num_scatter > 0) { 124 sg = task->scatter; 125 126 while (sg) { 127 scu_sg = to_sgl_element_pair(ireq, sg_idx); 128 init_sgl_element(&scu_sg->A, sg); 129 sg = sg_next(sg); 130 if (sg) { 131 init_sgl_element(&scu_sg->B, sg); 132 sg = sg_next(sg); 133 } else 134 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 135 136 if (prev_sg) { 137 dma_addr = to_sgl_element_pair_dma(ihost, 138 ireq, 139 sg_idx); 140 141 prev_sg->next_pair_upper = 142 upper_32_bits(dma_addr); 143 prev_sg->next_pair_lower = 144 lower_32_bits(dma_addr); 145 } 146 147 prev_sg = scu_sg; 148 sg_idx++; 149 } 150 } else { /* handle when no sg */ 151 scu_sg = to_sgl_element_pair(ireq, sg_idx); 152 153 dma_addr = dma_map_single(&ihost->pdev->dev, 154 task->scatter, 155 task->total_xfer_len, 156 task->data_dir); 157 158 ireq->zero_scatter_daddr = dma_addr; 159 160 scu_sg->A.length = task->total_xfer_len; 161 scu_sg->A.address_upper = upper_32_bits(dma_addr); 162 scu_sg->A.address_lower = lower_32_bits(dma_addr); 163 } 164 165 if (scu_sg) { 166 scu_sg->next_pair_upper = 0; 167 scu_sg->next_pair_lower = 0; 168 } 169 } 170 171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 172 { 173 struct ssp_cmd_iu *cmd_iu; 174 struct sas_task *task = isci_request_access_task(ireq); 175 176 cmd_iu = &ireq->ssp.cmd; 177 178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 179 cmd_iu->add_cdb_len = 0; 180 cmd_iu->_r_a = 0; 181 cmd_iu->_r_b = 0; 182 cmd_iu->en_fburst = 0; /* unsupported */ 183 cmd_iu->task_prio = task->ssp_task.task_prio; 184 cmd_iu->task_attr = task->ssp_task.task_attr; 185 cmd_iu->_r_c = 0; 186 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, 188 (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); 189 } 190 191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 192 { 193 struct ssp_task_iu *task_iu; 194 struct sas_task *task = isci_request_access_task(ireq); 195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 196 197 task_iu = &ireq->ssp.tmf; 198 199 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 200 201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 202 203 task_iu->task_func = isci_tmf->tmf_code; 204 task_iu->task_tag = 205 (test_bit(IREQ_TMF, &ireq->flags)) ? 206 isci_tmf->io_tag : 207 SCI_CONTROLLER_INVALID_IO_TAG; 208 } 209 210 /* 211 * This method is will fill in the SCU Task Context for any type of SSP request. 212 */ 213 static void scu_ssp_request_construct_task_context( 214 struct isci_request *ireq, 215 struct scu_task_context *task_context) 216 { 217 dma_addr_t dma_addr; 218 struct isci_remote_device *idev; 219 struct isci_port *iport; 220 221 idev = ireq->target_device; 222 iport = idev->owning_port; 223 224 /* Fill in the TC with its required data */ 225 task_context->abort = 0; 226 task_context->priority = 0; 227 task_context->initiator_request = 1; 228 task_context->connection_rate = idev->connection_rate; 229 task_context->protocol_engine_index = ISCI_PEG; 230 task_context->logical_port_index = iport->physical_port_index; 231 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 232 task_context->valid = SCU_TASK_CONTEXT_VALID; 233 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 234 235 task_context->remote_node_index = idev->rnc.remote_node_index; 236 task_context->command_code = 0; 237 238 task_context->link_layer_control = 0; 239 task_context->do_not_dma_ssp_good_response = 1; 240 task_context->strict_ordering = 0; 241 task_context->control_frame = 0; 242 task_context->timeout_enable = 0; 243 task_context->block_guard_enable = 0; 244 245 task_context->address_modifier = 0; 246 247 /* task_context->type.ssp.tag = ireq->io_tag; */ 248 task_context->task_phase = 0x01; 249 250 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 251 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 252 (iport->physical_port_index << 253 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 254 ISCI_TAG_TCI(ireq->io_tag)); 255 256 /* 257 * Copy the physical address for the command buffer to the 258 * SCU Task Context 259 */ 260 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 261 262 task_context->command_iu_upper = upper_32_bits(dma_addr); 263 task_context->command_iu_lower = lower_32_bits(dma_addr); 264 265 /* 266 * Copy the physical address for the response buffer to the 267 * SCU Task Context 268 */ 269 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 270 271 task_context->response_iu_upper = upper_32_bits(dma_addr); 272 task_context->response_iu_lower = lower_32_bits(dma_addr); 273 } 274 275 static u8 scu_bg_blk_size(struct scsi_device *sdp) 276 { 277 switch (sdp->sector_size) { 278 case 512: 279 return 0; 280 case 1024: 281 return 1; 282 case 4096: 283 return 3; 284 default: 285 return 0xff; 286 } 287 } 288 289 static u32 scu_dif_bytes(u32 len, u32 sector_size) 290 { 291 return (len >> ilog2(sector_size)) * 8; 292 } 293 294 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 295 { 296 struct scu_task_context *tc = ireq->tc; 297 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 298 u8 blk_sz = scu_bg_blk_size(scmd->device); 299 300 tc->block_guard_enable = 1; 301 tc->blk_prot_en = 1; 302 tc->blk_sz = blk_sz; 303 /* DIF write insert */ 304 tc->blk_prot_func = 0x2; 305 306 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 307 scmd->device->sector_size); 308 309 /* always init to 0, used by hw */ 310 tc->interm_crc_val = 0; 311 312 tc->init_crc_seed = 0; 313 tc->app_tag_verify = 0; 314 tc->app_tag_gen = 0; 315 tc->ref_tag_seed_verify = 0; 316 317 /* always init to same as bg_blk_sz */ 318 tc->UD_bytes_immed_val = scmd->device->sector_size; 319 320 tc->reserved_DC_0 = 0; 321 322 /* always init to 8 */ 323 tc->DIF_bytes_immed_val = 8; 324 325 tc->reserved_DC_1 = 0; 326 tc->bgc_blk_sz = scmd->device->sector_size; 327 tc->reserved_E0_0 = 0; 328 tc->app_tag_gen_mask = 0; 329 330 /** setup block guard control **/ 331 tc->bgctl = 0; 332 333 /* DIF write insert */ 334 tc->bgctl_f.op = 0x2; 335 336 tc->app_tag_verify_mask = 0; 337 338 /* must init to 0 for hw */ 339 tc->blk_guard_err = 0; 340 341 tc->reserved_E8_0 = 0; 342 343 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 344 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 345 else if (type & SCSI_PROT_DIF_TYPE3) 346 tc->ref_tag_seed_gen = 0; 347 } 348 349 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 350 { 351 struct scu_task_context *tc = ireq->tc; 352 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 353 u8 blk_sz = scu_bg_blk_size(scmd->device); 354 355 tc->block_guard_enable = 1; 356 tc->blk_prot_en = 1; 357 tc->blk_sz = blk_sz; 358 /* DIF read strip */ 359 tc->blk_prot_func = 0x1; 360 361 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 362 scmd->device->sector_size); 363 364 /* always init to 0, used by hw */ 365 tc->interm_crc_val = 0; 366 367 tc->init_crc_seed = 0; 368 tc->app_tag_verify = 0; 369 tc->app_tag_gen = 0; 370 371 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 372 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 373 else if (type & SCSI_PROT_DIF_TYPE3) 374 tc->ref_tag_seed_verify = 0; 375 376 /* always init to same as bg_blk_sz */ 377 tc->UD_bytes_immed_val = scmd->device->sector_size; 378 379 tc->reserved_DC_0 = 0; 380 381 /* always init to 8 */ 382 tc->DIF_bytes_immed_val = 8; 383 384 tc->reserved_DC_1 = 0; 385 tc->bgc_blk_sz = scmd->device->sector_size; 386 tc->reserved_E0_0 = 0; 387 tc->app_tag_gen_mask = 0; 388 389 /** setup block guard control **/ 390 tc->bgctl = 0; 391 392 /* DIF read strip */ 393 tc->bgctl_f.crc_verify = 1; 394 tc->bgctl_f.op = 0x1; 395 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 396 tc->bgctl_f.ref_tag_chk = 1; 397 tc->bgctl_f.app_f_detect = 1; 398 } else if (type & SCSI_PROT_DIF_TYPE3) 399 tc->bgctl_f.app_ref_f_detect = 1; 400 401 tc->app_tag_verify_mask = 0; 402 403 /* must init to 0 for hw */ 404 tc->blk_guard_err = 0; 405 406 tc->reserved_E8_0 = 0; 407 tc->ref_tag_seed_gen = 0; 408 } 409 410 /* 411 * This method is will fill in the SCU Task Context for a SSP IO request. 412 */ 413 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 414 enum dma_data_direction dir, 415 u32 len) 416 { 417 struct scu_task_context *task_context = ireq->tc; 418 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 419 struct scsi_cmnd *scmd = sas_task->uldd_task; 420 u8 prot_type = scsi_get_prot_type(scmd); 421 u8 prot_op = scsi_get_prot_op(scmd); 422 423 scu_ssp_request_construct_task_context(ireq, task_context); 424 425 task_context->ssp_command_iu_length = 426 sizeof(struct ssp_cmd_iu) / sizeof(u32); 427 task_context->type.ssp.frame_type = SSP_COMMAND; 428 429 switch (dir) { 430 case DMA_FROM_DEVICE: 431 case DMA_NONE: 432 default: 433 task_context->task_type = SCU_TASK_TYPE_IOREAD; 434 break; 435 case DMA_TO_DEVICE: 436 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 437 break; 438 } 439 440 task_context->transfer_length_bytes = len; 441 442 if (task_context->transfer_length_bytes > 0) 443 sci_request_build_sgl(ireq); 444 445 if (prot_type != SCSI_PROT_DIF_TYPE0) { 446 if (prot_op == SCSI_PROT_READ_STRIP) 447 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 448 else if (prot_op == SCSI_PROT_WRITE_INSERT) 449 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 450 } 451 } 452 453 /** 454 * scu_ssp_task_request_construct_task_context() - This method will fill in 455 * the SCU Task Context for a SSP Task request. The following important 456 * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This 457 * ensures that the task request is issued ahead of other task destined 458 * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This 459 * simply indicates that a normal request type (i.e. non-raw frame) is 460 * being utilized to perform task management. -#control_frame == 1. This 461 * ensures that the proper endianness is set so that the bytes are 462 * transmitted in the right order for a task frame. 463 * @ireq: This parameter specifies the task request object being constructed. 464 */ 465 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 466 { 467 struct scu_task_context *task_context = ireq->tc; 468 469 scu_ssp_request_construct_task_context(ireq, task_context); 470 471 task_context->control_frame = 1; 472 task_context->priority = SCU_TASK_PRIORITY_HIGH; 473 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 474 task_context->transfer_length_bytes = 0; 475 task_context->type.ssp.frame_type = SSP_TASK; 476 task_context->ssp_command_iu_length = 477 sizeof(struct ssp_task_iu) / sizeof(u32); 478 } 479 480 /** 481 * scu_sata_request_construct_task_context() 482 * This method is will fill in the SCU Task Context for any type of SATA 483 * request. This is called from the various SATA constructors. 484 * @ireq: The general IO request object which is to be used in 485 * constructing the SCU task context. 486 * @task_context: The buffer pointer for the SCU task context which is being 487 * constructed. 488 * 489 * The general io request construction is complete. The buffer assignment for 490 * the command buffer is complete. none Revisit task context construction to 491 * determine what is common for SSP/SMP/STP task context structures. 492 */ 493 static void scu_sata_request_construct_task_context( 494 struct isci_request *ireq, 495 struct scu_task_context *task_context) 496 { 497 dma_addr_t dma_addr; 498 struct isci_remote_device *idev; 499 struct isci_port *iport; 500 501 idev = ireq->target_device; 502 iport = idev->owning_port; 503 504 /* Fill in the TC with its required data */ 505 task_context->abort = 0; 506 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 507 task_context->initiator_request = 1; 508 task_context->connection_rate = idev->connection_rate; 509 task_context->protocol_engine_index = ISCI_PEG; 510 task_context->logical_port_index = iport->physical_port_index; 511 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 512 task_context->valid = SCU_TASK_CONTEXT_VALID; 513 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 514 515 task_context->remote_node_index = idev->rnc.remote_node_index; 516 task_context->command_code = 0; 517 518 task_context->link_layer_control = 0; 519 task_context->do_not_dma_ssp_good_response = 1; 520 task_context->strict_ordering = 0; 521 task_context->control_frame = 0; 522 task_context->timeout_enable = 0; 523 task_context->block_guard_enable = 0; 524 525 task_context->address_modifier = 0; 526 task_context->task_phase = 0x01; 527 528 task_context->ssp_command_iu_length = 529 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 530 531 /* Set the first word of the H2D REG FIS */ 532 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 533 534 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 535 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 536 (iport->physical_port_index << 537 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 538 ISCI_TAG_TCI(ireq->io_tag)); 539 /* 540 * Copy the physical address for the command buffer to the SCU Task 541 * Context. We must offset the command buffer by 4 bytes because the 542 * first 4 bytes are transfered in the body of the TC. 543 */ 544 dma_addr = sci_io_request_get_dma_addr(ireq, 545 ((char *) &ireq->stp.cmd) + 546 sizeof(u32)); 547 548 task_context->command_iu_upper = upper_32_bits(dma_addr); 549 task_context->command_iu_lower = lower_32_bits(dma_addr); 550 551 /* SATA Requests do not have a response buffer */ 552 task_context->response_iu_upper = 0; 553 task_context->response_iu_lower = 0; 554 } 555 556 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 557 { 558 struct scu_task_context *task_context = ireq->tc; 559 560 scu_sata_request_construct_task_context(ireq, task_context); 561 562 task_context->control_frame = 0; 563 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 564 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 565 task_context->type.stp.fis_type = FIS_REGH2D; 566 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 567 } 568 569 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 570 bool copy_rx_frame) 571 { 572 struct isci_stp_request *stp_req = &ireq->stp.req; 573 574 scu_stp_raw_request_construct_task_context(ireq); 575 576 stp_req->status = 0; 577 stp_req->sgl.offset = 0; 578 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 579 580 if (copy_rx_frame) { 581 sci_request_build_sgl(ireq); 582 stp_req->sgl.index = 0; 583 } else { 584 /* The user does not want the data copied to the SGL buffer location */ 585 stp_req->sgl.index = -1; 586 } 587 588 return SCI_SUCCESS; 589 } 590 591 /* 592 * sci_stp_optimized_request_construct() 593 * @ireq: This parameter specifies the request to be constructed as an 594 * optimized request. 595 * @optimized_task_type: This parameter specifies whether the request is to be 596 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 597 * value of 1 indicates NCQ. 598 * 599 * This method will perform request construction common to all types of STP 600 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 601 * returns an indication as to whether the construction was successful. 602 */ 603 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 604 u8 optimized_task_type, 605 u32 len, 606 enum dma_data_direction dir) 607 { 608 struct scu_task_context *task_context = ireq->tc; 609 610 /* Build the STP task context structure */ 611 scu_sata_request_construct_task_context(ireq, task_context); 612 613 /* Copy over the SGL elements */ 614 sci_request_build_sgl(ireq); 615 616 /* Copy over the number of bytes to be transfered */ 617 task_context->transfer_length_bytes = len; 618 619 if (dir == DMA_TO_DEVICE) { 620 /* 621 * The difference between the DMA IN and DMA OUT request task type 622 * values are consistent with the difference between FPDMA READ 623 * and FPDMA WRITE values. Add the supplied task type parameter 624 * to this difference to set the task type properly for this 625 * DATA OUT (WRITE) case. */ 626 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 627 - SCU_TASK_TYPE_DMA_IN); 628 } else { 629 /* 630 * For the DATA IN (READ) case, simply save the supplied 631 * optimized task type. */ 632 task_context->task_type = optimized_task_type; 633 } 634 } 635 636 static void sci_atapi_construct(struct isci_request *ireq) 637 { 638 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 639 struct sas_task *task; 640 641 /* To simplify the implementation we take advantage of the 642 * silicon's partial acceleration of atapi protocol (dma data 643 * transfers), so we promote all commands to dma protocol. This 644 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 645 */ 646 h2d_fis->features |= ATAPI_PKT_DMA; 647 648 scu_stp_raw_request_construct_task_context(ireq); 649 650 task = isci_request_access_task(ireq); 651 if (task->data_dir == DMA_NONE) 652 task->total_xfer_len = 0; 653 654 /* clear the response so we can detect arrivial of an 655 * unsolicited h2d fis 656 */ 657 ireq->stp.rsp.fis_type = 0; 658 } 659 660 static enum sci_status 661 sci_io_request_construct_sata(struct isci_request *ireq, 662 u32 len, 663 enum dma_data_direction dir, 664 bool copy) 665 { 666 enum sci_status status = SCI_SUCCESS; 667 struct sas_task *task = isci_request_access_task(ireq); 668 struct domain_device *dev = ireq->target_device->domain_dev; 669 670 /* check for management protocols */ 671 if (test_bit(IREQ_TMF, &ireq->flags)) { 672 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 673 674 dev_err(&ireq->owning_controller->pdev->dev, 675 "%s: Request 0x%p received un-handled SAT " 676 "management protocol 0x%x.\n", 677 __func__, ireq, tmf->tmf_code); 678 679 return SCI_FAILURE; 680 } 681 682 if (!sas_protocol_ata(task->task_proto)) { 683 dev_err(&ireq->owning_controller->pdev->dev, 684 "%s: Non-ATA protocol in SATA path: 0x%x\n", 685 __func__, 686 task->task_proto); 687 return SCI_FAILURE; 688 689 } 690 691 /* ATAPI */ 692 if (dev->sata_dev.class == ATA_DEV_ATAPI && 693 task->ata_task.fis.command == ATA_CMD_PACKET) { 694 sci_atapi_construct(ireq); 695 return SCI_SUCCESS; 696 } 697 698 /* non data */ 699 if (task->data_dir == DMA_NONE) { 700 scu_stp_raw_request_construct_task_context(ireq); 701 return SCI_SUCCESS; 702 } 703 704 /* NCQ */ 705 if (task->ata_task.use_ncq) { 706 sci_stp_optimized_request_construct(ireq, 707 SCU_TASK_TYPE_FPDMAQ_READ, 708 len, dir); 709 return SCI_SUCCESS; 710 } 711 712 /* DMA */ 713 if (task->ata_task.dma_xfer) { 714 sci_stp_optimized_request_construct(ireq, 715 SCU_TASK_TYPE_DMA_IN, 716 len, dir); 717 return SCI_SUCCESS; 718 } else /* PIO */ 719 return sci_stp_pio_request_construct(ireq, copy); 720 721 return status; 722 } 723 724 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 725 { 726 struct sas_task *task = isci_request_access_task(ireq); 727 728 ireq->protocol = SAS_PROTOCOL_SSP; 729 730 scu_ssp_io_request_construct_task_context(ireq, 731 task->data_dir, 732 task->total_xfer_len); 733 734 sci_io_request_build_ssp_command_iu(ireq); 735 736 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 737 738 return SCI_SUCCESS; 739 } 740 741 enum sci_status sci_task_request_construct_ssp( 742 struct isci_request *ireq) 743 { 744 /* Construct the SSP Task SCU Task Context */ 745 scu_ssp_task_request_construct_task_context(ireq); 746 747 /* Fill in the SSP Task IU */ 748 sci_task_request_build_ssp_task_iu(ireq); 749 750 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 751 752 return SCI_SUCCESS; 753 } 754 755 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 756 { 757 enum sci_status status; 758 bool copy = false; 759 struct sas_task *task = isci_request_access_task(ireq); 760 761 ireq->protocol = SAS_PROTOCOL_STP; 762 763 copy = (task->data_dir == DMA_NONE) ? false : true; 764 765 status = sci_io_request_construct_sata(ireq, 766 task->total_xfer_len, 767 task->data_dir, 768 copy); 769 770 if (status == SCI_SUCCESS) 771 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 772 773 return status; 774 } 775 776 #define SCU_TASK_CONTEXT_SRAM 0x200000 777 /** 778 * sci_req_tx_bytes - bytes transferred when reply underruns request 779 * @ireq: request that was terminated early 780 */ 781 static u32 sci_req_tx_bytes(struct isci_request *ireq) 782 { 783 struct isci_host *ihost = ireq->owning_controller; 784 u32 ret_val = 0; 785 786 if (readl(&ihost->smu_registers->address_modifier) == 0) { 787 void __iomem *scu_reg_base = ihost->scu_registers; 788 789 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 790 * BAR1 is the scu_registers 791 * 0x20002C = 0x200000 + 0x2c 792 * = start of task context SRAM + offset of (type.ssp.data_offset) 793 * TCi is the io_tag of struct sci_request 794 */ 795 ret_val = readl(scu_reg_base + 796 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 797 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 798 } 799 800 return ret_val; 801 } 802 803 enum sci_status sci_request_start(struct isci_request *ireq) 804 { 805 enum sci_base_request_states state; 806 struct scu_task_context *tc = ireq->tc; 807 struct isci_host *ihost = ireq->owning_controller; 808 809 state = ireq->sm.current_state_id; 810 if (state != SCI_REQ_CONSTRUCTED) { 811 dev_warn(&ihost->pdev->dev, 812 "%s: SCIC IO Request requested to start while in wrong " 813 "state %d\n", __func__, state); 814 return SCI_FAILURE_INVALID_STATE; 815 } 816 817 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 818 819 switch (tc->protocol_type) { 820 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 821 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 822 /* SSP/SMP Frame */ 823 tc->type.ssp.tag = ireq->io_tag; 824 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 825 break; 826 827 case SCU_TASK_CONTEXT_PROTOCOL_STP: 828 /* STP/SATA Frame 829 * tc->type.stp.ncq_tag = ireq->ncq_tag; 830 */ 831 break; 832 833 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 834 /* / @todo When do we set no protocol type? */ 835 break; 836 837 default: 838 /* This should never happen since we build the IO 839 * requests */ 840 break; 841 } 842 843 /* Add to the post_context the io tag value */ 844 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 845 846 /* Everything is good go ahead and change state */ 847 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 848 849 return SCI_SUCCESS; 850 } 851 852 enum sci_status 853 sci_io_request_terminate(struct isci_request *ireq) 854 { 855 enum sci_base_request_states state; 856 857 state = ireq->sm.current_state_id; 858 859 switch (state) { 860 case SCI_REQ_CONSTRUCTED: 861 /* Set to make sure no HW terminate posting is done: */ 862 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); 863 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 864 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 865 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 866 return SCI_SUCCESS; 867 case SCI_REQ_STARTED: 868 case SCI_REQ_TASK_WAIT_TC_COMP: 869 case SCI_REQ_SMP_WAIT_RESP: 870 case SCI_REQ_SMP_WAIT_TC_COMP: 871 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 872 case SCI_REQ_STP_UDMA_WAIT_D2H: 873 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 874 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 875 case SCI_REQ_STP_PIO_WAIT_H2D: 876 case SCI_REQ_STP_PIO_WAIT_FRAME: 877 case SCI_REQ_STP_PIO_DATA_IN: 878 case SCI_REQ_STP_PIO_DATA_OUT: 879 case SCI_REQ_ATAPI_WAIT_H2D: 880 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 881 case SCI_REQ_ATAPI_WAIT_D2H: 882 case SCI_REQ_ATAPI_WAIT_TC_COMP: 883 /* Fall through and change state to ABORTING... */ 884 case SCI_REQ_TASK_WAIT_TC_RESP: 885 /* The task frame was already confirmed to have been 886 * sent by the SCU HW. Since the state machine is 887 * now only waiting for the task response itself, 888 * abort the request and complete it immediately 889 * and don't wait for the task response. 890 */ 891 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 892 fallthrough; /* and handle like ABORTING */ 893 case SCI_REQ_ABORTING: 894 if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) 895 set_bit(IREQ_PENDING_ABORT, &ireq->flags); 896 else 897 clear_bit(IREQ_PENDING_ABORT, &ireq->flags); 898 /* If the request is only waiting on the remote device 899 * suspension, return SUCCESS so the caller will wait too. 900 */ 901 return SCI_SUCCESS; 902 case SCI_REQ_COMPLETED: 903 default: 904 dev_warn(&ireq->owning_controller->pdev->dev, 905 "%s: SCIC IO Request requested to abort while in wrong " 906 "state %d\n", __func__, ireq->sm.current_state_id); 907 break; 908 } 909 910 return SCI_FAILURE_INVALID_STATE; 911 } 912 913 enum sci_status sci_request_complete(struct isci_request *ireq) 914 { 915 enum sci_base_request_states state; 916 struct isci_host *ihost = ireq->owning_controller; 917 918 state = ireq->sm.current_state_id; 919 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 920 "isci: request completion from wrong state (%s)\n", 921 req_state_name(state))) 922 return SCI_FAILURE_INVALID_STATE; 923 924 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 925 sci_controller_release_frame(ihost, 926 ireq->saved_rx_frame_index); 927 928 /* XXX can we just stop the machine and remove the 'final' state? */ 929 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 930 return SCI_SUCCESS; 931 } 932 933 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 934 u32 event_code) 935 { 936 enum sci_base_request_states state; 937 struct isci_host *ihost = ireq->owning_controller; 938 939 state = ireq->sm.current_state_id; 940 941 if (state != SCI_REQ_STP_PIO_DATA_IN) { 942 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", 943 __func__, event_code, req_state_name(state)); 944 945 return SCI_FAILURE_INVALID_STATE; 946 } 947 948 switch (scu_get_event_specifier(event_code)) { 949 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 950 /* We are waiting for data and the SCU has R_ERR the data frame. 951 * Go back to waiting for the D2H Register FIS 952 */ 953 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 954 return SCI_SUCCESS; 955 default: 956 dev_err(&ihost->pdev->dev, 957 "%s: pio request unexpected event %#x\n", 958 __func__, event_code); 959 960 /* TODO Should we fail the PIO request when we get an 961 * unexpected event? 962 */ 963 return SCI_FAILURE; 964 } 965 } 966 967 /* 968 * This function copies response data for requests returning response data 969 * instead of sense data. 970 * @sci_req: This parameter specifies the request object for which to copy 971 * the response data. 972 */ 973 static void sci_io_request_copy_response(struct isci_request *ireq) 974 { 975 void *resp_buf; 976 u32 len; 977 struct ssp_response_iu *ssp_response; 978 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 979 980 ssp_response = &ireq->ssp.rsp; 981 982 resp_buf = &isci_tmf->resp.resp_iu; 983 984 len = min_t(u32, 985 SSP_RESP_IU_MAX_SIZE, 986 be32_to_cpu(ssp_response->response_data_len)); 987 988 memcpy(resp_buf, ssp_response->resp_data, len); 989 } 990 991 static enum sci_status 992 request_started_state_tc_event(struct isci_request *ireq, 993 u32 completion_code) 994 { 995 struct ssp_response_iu *resp_iu; 996 u8 datapres; 997 998 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 999 * to determine SDMA status 1000 */ 1001 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1002 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1003 ireq->scu_status = SCU_TASK_DONE_GOOD; 1004 ireq->sci_status = SCI_SUCCESS; 1005 break; 1006 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 1007 /* There are times when the SCU hardware will return an early 1008 * response because the io request specified more data than is 1009 * returned by the target device (mode pages, inquiry data, 1010 * etc.). We must check the response stats to see if this is 1011 * truly a failed request or a good request that just got 1012 * completed early. 1013 */ 1014 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1015 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1016 1017 sci_swab32_cpy(&ireq->ssp.rsp, 1018 &ireq->ssp.rsp, 1019 word_cnt); 1020 1021 if (resp->status == 0) { 1022 ireq->scu_status = SCU_TASK_DONE_GOOD; 1023 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1024 } else { 1025 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1026 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1027 } 1028 break; 1029 } 1030 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1031 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1032 1033 sci_swab32_cpy(&ireq->ssp.rsp, 1034 &ireq->ssp.rsp, 1035 word_cnt); 1036 1037 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1038 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1039 break; 1040 } 1041 1042 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1043 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1044 * guaranteed to be received before this completion status is 1045 * posted? 1046 */ 1047 resp_iu = &ireq->ssp.rsp; 1048 datapres = resp_iu->datapres; 1049 1050 if (datapres == 1 || datapres == 2) { 1051 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1052 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1053 } else { 1054 ireq->scu_status = SCU_TASK_DONE_GOOD; 1055 ireq->sci_status = SCI_SUCCESS; 1056 } 1057 break; 1058 /* only stp device gets suspended. */ 1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1061 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1070 if (ireq->protocol == SAS_PROTOCOL_STP) { 1071 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1072 SCU_COMPLETION_TL_STATUS_SHIFT; 1073 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1074 } else { 1075 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1076 SCU_COMPLETION_TL_STATUS_SHIFT; 1077 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1078 } 1079 break; 1080 1081 /* both stp/ssp device gets suspended */ 1082 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1083 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1084 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1092 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1093 SCU_COMPLETION_TL_STATUS_SHIFT; 1094 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1095 break; 1096 1097 /* neither ssp nor stp gets suspended. */ 1098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1099 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1113 default: 1114 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1115 SCU_COMPLETION_TL_STATUS_SHIFT; 1116 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1117 break; 1118 } 1119 1120 /* 1121 * TODO: This is probably wrong for ACK/NAK timeout conditions 1122 */ 1123 1124 /* In all cases we will treat this as the completion of the IO req. */ 1125 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1126 return SCI_SUCCESS; 1127 } 1128 1129 static enum sci_status 1130 request_aborting_state_tc_event(struct isci_request *ireq, 1131 u32 completion_code) 1132 { 1133 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1134 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1135 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1136 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1137 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1138 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1139 break; 1140 1141 default: 1142 /* Unless we get some strange error wait for the task abort to complete 1143 * TODO: Should there be a state change for this completion? 1144 */ 1145 break; 1146 } 1147 1148 return SCI_SUCCESS; 1149 } 1150 1151 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1152 u32 completion_code) 1153 { 1154 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1155 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1156 ireq->scu_status = SCU_TASK_DONE_GOOD; 1157 ireq->sci_status = SCI_SUCCESS; 1158 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1159 break; 1160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1161 /* Currently, the decision is to simply allow the task request 1162 * to timeout if the task IU wasn't received successfully. 1163 * There is a potential for receiving multiple task responses if 1164 * we decide to send the task IU again. 1165 */ 1166 dev_warn(&ireq->owning_controller->pdev->dev, 1167 "%s: TaskRequest:0x%p CompletionCode:%x - " 1168 "ACK/NAK timeout\n", __func__, ireq, 1169 completion_code); 1170 1171 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1172 break; 1173 default: 1174 /* 1175 * All other completion status cause the IO to be complete. 1176 * If a NAK was received, then it is up to the user to retry 1177 * the request. 1178 */ 1179 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1180 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1181 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1182 break; 1183 } 1184 1185 return SCI_SUCCESS; 1186 } 1187 1188 static enum sci_status 1189 smp_request_await_response_tc_event(struct isci_request *ireq, 1190 u32 completion_code) 1191 { 1192 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1193 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1194 /* In the AWAIT RESPONSE state, any TC completion is 1195 * unexpected. but if the TC has success status, we 1196 * complete the IO anyway. 1197 */ 1198 ireq->scu_status = SCU_TASK_DONE_GOOD; 1199 ireq->sci_status = SCI_SUCCESS; 1200 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1201 break; 1202 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1203 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1204 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1206 /* These status has been seen in a specific LSI 1207 * expander, which sometimes is not able to send smp 1208 * response within 2 ms. This causes our hardware break 1209 * the connection and set TC completion with one of 1210 * these SMP_XXX_XX_ERR status. For these type of error, 1211 * we ask ihost user to retry the request. 1212 */ 1213 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1214 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1215 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1216 break; 1217 default: 1218 /* All other completion status cause the IO to be complete. If a NAK 1219 * was received, then it is up to the user to retry the request 1220 */ 1221 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1222 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1223 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1224 break; 1225 } 1226 1227 return SCI_SUCCESS; 1228 } 1229 1230 static enum sci_status 1231 smp_request_await_tc_event(struct isci_request *ireq, 1232 u32 completion_code) 1233 { 1234 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1235 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1236 ireq->scu_status = SCU_TASK_DONE_GOOD; 1237 ireq->sci_status = SCI_SUCCESS; 1238 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1239 break; 1240 default: 1241 /* All other completion status cause the IO to be 1242 * complete. If a NAK was received, then it is up to 1243 * the user to retry the request. 1244 */ 1245 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1246 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1247 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1248 break; 1249 } 1250 1251 return SCI_SUCCESS; 1252 } 1253 1254 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1255 { 1256 struct scu_sgl_element *sgl; 1257 struct scu_sgl_element_pair *sgl_pair; 1258 struct isci_request *ireq = to_ireq(stp_req); 1259 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1260 1261 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1262 if (!sgl_pair) 1263 sgl = NULL; 1264 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1265 if (sgl_pair->B.address_lower == 0 && 1266 sgl_pair->B.address_upper == 0) { 1267 sgl = NULL; 1268 } else { 1269 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1270 sgl = &sgl_pair->B; 1271 } 1272 } else { 1273 if (sgl_pair->next_pair_lower == 0 && 1274 sgl_pair->next_pair_upper == 0) { 1275 sgl = NULL; 1276 } else { 1277 pio_sgl->index++; 1278 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1279 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1280 sgl = &sgl_pair->A; 1281 } 1282 } 1283 1284 return sgl; 1285 } 1286 1287 static enum sci_status 1288 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1289 u32 completion_code) 1290 { 1291 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1292 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1293 ireq->scu_status = SCU_TASK_DONE_GOOD; 1294 ireq->sci_status = SCI_SUCCESS; 1295 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1296 break; 1297 1298 default: 1299 /* All other completion status cause the IO to be 1300 * complete. If a NAK was received, then it is up to 1301 * the user to retry the request. 1302 */ 1303 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1304 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1305 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1306 break; 1307 } 1308 1309 return SCI_SUCCESS; 1310 } 1311 1312 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1313 1314 /* transmit DATA_FIS from (current sgl + offset) for input 1315 * parameter length. current sgl and offset is alreay stored in the IO request 1316 */ 1317 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1318 struct isci_request *ireq, 1319 u32 length) 1320 { 1321 struct isci_stp_request *stp_req = &ireq->stp.req; 1322 struct scu_task_context *task_context = ireq->tc; 1323 struct scu_sgl_element_pair *sgl_pair; 1324 struct scu_sgl_element *current_sgl; 1325 1326 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1327 * for the data from current_sgl+offset for the input length 1328 */ 1329 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1330 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1331 current_sgl = &sgl_pair->A; 1332 else 1333 current_sgl = &sgl_pair->B; 1334 1335 /* update the TC */ 1336 task_context->command_iu_upper = current_sgl->address_upper; 1337 task_context->command_iu_lower = current_sgl->address_lower; 1338 task_context->transfer_length_bytes = length; 1339 task_context->type.stp.fis_type = FIS_DATA; 1340 1341 /* send the new TC out. */ 1342 return sci_controller_continue_io(ireq); 1343 } 1344 1345 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1346 { 1347 struct isci_stp_request *stp_req = &ireq->stp.req; 1348 struct scu_sgl_element_pair *sgl_pair; 1349 enum sci_status status = SCI_SUCCESS; 1350 struct scu_sgl_element *sgl; 1351 u32 offset; 1352 u32 len = 0; 1353 1354 offset = stp_req->sgl.offset; 1355 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1356 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1357 return SCI_FAILURE; 1358 1359 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1360 sgl = &sgl_pair->A; 1361 len = sgl_pair->A.length - offset; 1362 } else { 1363 sgl = &sgl_pair->B; 1364 len = sgl_pair->B.length - offset; 1365 } 1366 1367 if (stp_req->pio_len == 0) 1368 return SCI_SUCCESS; 1369 1370 if (stp_req->pio_len >= len) { 1371 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1372 if (status != SCI_SUCCESS) 1373 return status; 1374 stp_req->pio_len -= len; 1375 1376 /* update the current sgl, offset and save for future */ 1377 sgl = pio_sgl_next(stp_req); 1378 offset = 0; 1379 } else if (stp_req->pio_len < len) { 1380 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1381 1382 /* Sgl offset will be adjusted and saved for future */ 1383 offset += stp_req->pio_len; 1384 sgl->address_lower += stp_req->pio_len; 1385 stp_req->pio_len = 0; 1386 } 1387 1388 stp_req->sgl.offset = offset; 1389 1390 return status; 1391 } 1392 1393 /** 1394 * sci_stp_request_pio_data_in_copy_data_buffer() 1395 * @stp_req: The request that is used for the SGL processing. 1396 * @data_buf: The buffer of data to be copied. 1397 * @len: The length of the data transfer. 1398 * 1399 * Copy the data from the buffer for the length specified to the IO request SGL 1400 * specified data region. enum sci_status 1401 */ 1402 static enum sci_status 1403 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1404 u8 *data_buf, u32 len) 1405 { 1406 struct isci_request *ireq; 1407 u8 *src_addr; 1408 int copy_len; 1409 struct sas_task *task; 1410 struct scatterlist *sg; 1411 void *kaddr; 1412 int total_len = len; 1413 1414 ireq = to_ireq(stp_req); 1415 task = isci_request_access_task(ireq); 1416 src_addr = data_buf; 1417 1418 if (task->num_scatter > 0) { 1419 sg = task->scatter; 1420 1421 while (total_len > 0) { 1422 struct page *page = sg_page(sg); 1423 1424 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1425 kaddr = kmap_atomic(page); 1426 memcpy(kaddr + sg->offset, src_addr, copy_len); 1427 kunmap_atomic(kaddr); 1428 total_len -= copy_len; 1429 src_addr += copy_len; 1430 sg = sg_next(sg); 1431 } 1432 } else { 1433 BUG_ON(task->total_xfer_len < total_len); 1434 memcpy(task->scatter, src_addr, total_len); 1435 } 1436 1437 return SCI_SUCCESS; 1438 } 1439 1440 /** 1441 * sci_stp_request_pio_data_in_copy_data() 1442 * @stp_req: The PIO DATA IN request that is to receive the data. 1443 * @data_buffer: The buffer to copy from. 1444 * 1445 * Copy the data buffer to the io request data region. enum sci_status 1446 */ 1447 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1448 struct isci_stp_request *stp_req, 1449 u8 *data_buffer) 1450 { 1451 enum sci_status status; 1452 1453 /* 1454 * If there is less than 1K remaining in the transfer request 1455 * copy just the data for the transfer */ 1456 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1457 status = sci_stp_request_pio_data_in_copy_data_buffer( 1458 stp_req, data_buffer, stp_req->pio_len); 1459 1460 if (status == SCI_SUCCESS) 1461 stp_req->pio_len = 0; 1462 } else { 1463 /* We are transfering the whole frame so copy */ 1464 status = sci_stp_request_pio_data_in_copy_data_buffer( 1465 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1466 1467 if (status == SCI_SUCCESS) 1468 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1469 } 1470 1471 return status; 1472 } 1473 1474 static enum sci_status 1475 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1476 u32 completion_code) 1477 { 1478 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1479 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1480 ireq->scu_status = SCU_TASK_DONE_GOOD; 1481 ireq->sci_status = SCI_SUCCESS; 1482 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1483 break; 1484 1485 default: 1486 /* All other completion status cause the IO to be 1487 * complete. If a NAK was received, then it is up to 1488 * the user to retry the request. 1489 */ 1490 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1491 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1492 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1493 break; 1494 } 1495 1496 return SCI_SUCCESS; 1497 } 1498 1499 static enum sci_status 1500 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1501 u32 completion_code) 1502 { 1503 enum sci_status status = SCI_SUCCESS; 1504 bool all_frames_transferred = false; 1505 struct isci_stp_request *stp_req = &ireq->stp.req; 1506 1507 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1508 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1509 /* Transmit data */ 1510 if (stp_req->pio_len != 0) { 1511 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1512 if (status == SCI_SUCCESS) { 1513 if (stp_req->pio_len == 0) 1514 all_frames_transferred = true; 1515 } 1516 } else if (stp_req->pio_len == 0) { 1517 /* 1518 * this will happen if the all data is written at the 1519 * first time after the pio setup fis is received 1520 */ 1521 all_frames_transferred = true; 1522 } 1523 1524 /* all data transferred. */ 1525 if (all_frames_transferred) { 1526 /* 1527 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1528 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1529 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1530 } 1531 break; 1532 1533 default: 1534 /* 1535 * All other completion status cause the IO to be complete. 1536 * If a NAK was received, then it is up to the user to retry 1537 * the request. 1538 */ 1539 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1540 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1541 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1542 break; 1543 } 1544 1545 return status; 1546 } 1547 1548 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1549 u32 frame_index) 1550 { 1551 struct isci_host *ihost = ireq->owning_controller; 1552 struct dev_to_host_fis *frame_header; 1553 enum sci_status status; 1554 u32 *frame_buffer; 1555 1556 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1557 frame_index, 1558 (void **)&frame_header); 1559 1560 if ((status == SCI_SUCCESS) && 1561 (frame_header->fis_type == FIS_REGD2H)) { 1562 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1563 frame_index, 1564 (void **)&frame_buffer); 1565 1566 sci_controller_copy_sata_response(&ireq->stp.rsp, 1567 frame_header, 1568 frame_buffer); 1569 } 1570 1571 sci_controller_release_frame(ihost, frame_index); 1572 1573 return status; 1574 } 1575 1576 static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1577 u32 frame_index) 1578 { 1579 struct isci_host *ihost = ireq->owning_controller; 1580 enum sci_status status; 1581 struct dev_to_host_fis *frame_header; 1582 u32 *frame_buffer; 1583 1584 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1585 frame_index, 1586 (void **)&frame_header); 1587 1588 if (status != SCI_SUCCESS) 1589 return status; 1590 1591 if (frame_header->fis_type != FIS_REGD2H) { 1592 dev_err(&ireq->isci_host->pdev->dev, 1593 "%s ERROR: invalid fis type 0x%X\n", 1594 __func__, frame_header->fis_type); 1595 return SCI_FAILURE; 1596 } 1597 1598 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1599 frame_index, 1600 (void **)&frame_buffer); 1601 1602 sci_controller_copy_sata_response(&ireq->stp.rsp, 1603 (u32 *)frame_header, 1604 frame_buffer); 1605 1606 /* Frame has been decoded return it to the controller */ 1607 sci_controller_release_frame(ihost, frame_index); 1608 1609 return status; 1610 } 1611 1612 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1613 u32 frame_index) 1614 { 1615 struct sas_task *task = isci_request_access_task(ireq); 1616 enum sci_status status; 1617 1618 status = process_unsolicited_fis(ireq, frame_index); 1619 1620 if (status == SCI_SUCCESS) { 1621 if (ireq->stp.rsp.status & ATA_ERR) 1622 status = SCI_FAILURE_IO_RESPONSE_VALID; 1623 } else { 1624 status = SCI_FAILURE_IO_RESPONSE_VALID; 1625 } 1626 1627 if (status != SCI_SUCCESS) { 1628 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1629 ireq->sci_status = status; 1630 } else { 1631 ireq->scu_status = SCU_TASK_DONE_GOOD; 1632 ireq->sci_status = SCI_SUCCESS; 1633 } 1634 1635 /* the d2h ufi is the end of non-data commands */ 1636 if (task->data_dir == DMA_NONE) 1637 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1638 1639 return status; 1640 } 1641 1642 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1643 { 1644 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1645 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1646 struct scu_task_context *task_context = ireq->tc; 1647 1648 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1649 * type. The TC for previous Packet fis was already there, we only need to 1650 * change the H2D fis content. 1651 */ 1652 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1653 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1654 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1655 task_context->type.stp.fis_type = FIS_DATA; 1656 task_context->transfer_length_bytes = dev->cdb_len; 1657 } 1658 1659 static void scu_atapi_construct_task_context(struct isci_request *ireq) 1660 { 1661 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1662 struct sas_task *task = isci_request_access_task(ireq); 1663 struct scu_task_context *task_context = ireq->tc; 1664 int cdb_len = dev->cdb_len; 1665 1666 /* reference: SSTL 1.13.4.2 1667 * task_type, sata_direction 1668 */ 1669 if (task->data_dir == DMA_TO_DEVICE) { 1670 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1671 task_context->sata_direction = 0; 1672 } else { 1673 /* todo: for NO_DATA command, we need to send out raw frame. */ 1674 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1675 task_context->sata_direction = 1; 1676 } 1677 1678 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1679 task_context->type.stp.fis_type = FIS_DATA; 1680 1681 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1682 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1683 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1684 1685 /* task phase is set to TX_CMD */ 1686 task_context->task_phase = 0x1; 1687 1688 /* retry counter */ 1689 task_context->stp_retry_count = 0; 1690 1691 /* data transfer size. */ 1692 task_context->transfer_length_bytes = task->total_xfer_len; 1693 1694 /* setup sgl */ 1695 sci_request_build_sgl(ireq); 1696 } 1697 1698 enum sci_status 1699 sci_io_request_frame_handler(struct isci_request *ireq, 1700 u32 frame_index) 1701 { 1702 struct isci_host *ihost = ireq->owning_controller; 1703 struct isci_stp_request *stp_req = &ireq->stp.req; 1704 enum sci_base_request_states state; 1705 enum sci_status status; 1706 ssize_t word_cnt; 1707 1708 state = ireq->sm.current_state_id; 1709 switch (state) { 1710 case SCI_REQ_STARTED: { 1711 struct ssp_frame_hdr ssp_hdr; 1712 void *frame_header; 1713 1714 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1715 frame_index, 1716 &frame_header); 1717 1718 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1719 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1720 1721 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1722 struct ssp_response_iu *resp_iu; 1723 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1724 1725 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1726 frame_index, 1727 (void **)&resp_iu); 1728 1729 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1730 1731 resp_iu = &ireq->ssp.rsp; 1732 1733 if (resp_iu->datapres == 0x01 || 1734 resp_iu->datapres == 0x02) { 1735 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1736 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1737 } else { 1738 ireq->scu_status = SCU_TASK_DONE_GOOD; 1739 ireq->sci_status = SCI_SUCCESS; 1740 } 1741 } else { 1742 /* not a response frame, why did it get forwarded? */ 1743 dev_err(&ihost->pdev->dev, 1744 "%s: SCIC IO Request 0x%p received unexpected " 1745 "frame %d type 0x%02x\n", __func__, ireq, 1746 frame_index, ssp_hdr.frame_type); 1747 } 1748 1749 /* 1750 * In any case we are done with this frame buffer return it to 1751 * the controller 1752 */ 1753 sci_controller_release_frame(ihost, frame_index); 1754 1755 return SCI_SUCCESS; 1756 } 1757 1758 case SCI_REQ_TASK_WAIT_TC_RESP: 1759 sci_io_request_copy_response(ireq); 1760 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1761 sci_controller_release_frame(ihost, frame_index); 1762 return SCI_SUCCESS; 1763 1764 case SCI_REQ_SMP_WAIT_RESP: { 1765 struct sas_task *task = isci_request_access_task(ireq); 1766 struct scatterlist *sg = &task->smp_task.smp_resp; 1767 void *frame_header, *kaddr; 1768 u8 *rsp; 1769 1770 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1771 frame_index, 1772 &frame_header); 1773 kaddr = kmap_atomic(sg_page(sg)); 1774 rsp = kaddr + sg->offset; 1775 sci_swab32_cpy(rsp, frame_header, 1); 1776 1777 if (rsp[0] == SMP_RESPONSE) { 1778 void *smp_resp; 1779 1780 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1781 frame_index, 1782 &smp_resp); 1783 1784 word_cnt = (sg->length/4)-1; 1785 if (word_cnt > 0) 1786 word_cnt = min_t(unsigned int, word_cnt, 1787 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1788 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1789 1790 ireq->scu_status = SCU_TASK_DONE_GOOD; 1791 ireq->sci_status = SCI_SUCCESS; 1792 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1793 } else { 1794 /* 1795 * This was not a response frame why did it get 1796 * forwarded? 1797 */ 1798 dev_err(&ihost->pdev->dev, 1799 "%s: SCIC SMP Request 0x%p received unexpected " 1800 "frame %d type 0x%02x\n", 1801 __func__, 1802 ireq, 1803 frame_index, 1804 rsp[0]); 1805 1806 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1807 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1808 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1809 } 1810 kunmap_atomic(kaddr); 1811 1812 sci_controller_release_frame(ihost, frame_index); 1813 1814 return SCI_SUCCESS; 1815 } 1816 1817 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1818 return sci_stp_request_udma_general_frame_handler(ireq, 1819 frame_index); 1820 1821 case SCI_REQ_STP_UDMA_WAIT_D2H: 1822 /* Use the general frame handler to copy the resposne data */ 1823 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1824 1825 if (status != SCI_SUCCESS) 1826 return status; 1827 1828 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1829 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1830 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1831 return SCI_SUCCESS; 1832 1833 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1834 struct dev_to_host_fis *frame_header; 1835 u32 *frame_buffer; 1836 1837 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1838 frame_index, 1839 (void **)&frame_header); 1840 1841 if (status != SCI_SUCCESS) { 1842 dev_err(&ihost->pdev->dev, 1843 "%s: SCIC IO Request 0x%p could not get frame " 1844 "header for frame index %d, status %x\n", 1845 __func__, 1846 stp_req, 1847 frame_index, 1848 status); 1849 1850 return status; 1851 } 1852 1853 switch (frame_header->fis_type) { 1854 case FIS_REGD2H: 1855 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1856 frame_index, 1857 (void **)&frame_buffer); 1858 1859 sci_controller_copy_sata_response(&ireq->stp.rsp, 1860 frame_header, 1861 frame_buffer); 1862 1863 /* The command has completed with error */ 1864 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1865 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1866 break; 1867 1868 default: 1869 dev_warn(&ihost->pdev->dev, 1870 "%s: IO Request:0x%p Frame Id:%d protocol " 1871 "violation occurred\n", __func__, stp_req, 1872 frame_index); 1873 1874 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1875 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1876 break; 1877 } 1878 1879 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1880 1881 /* Frame has been decoded return it to the controller */ 1882 sci_controller_release_frame(ihost, frame_index); 1883 1884 return status; 1885 } 1886 1887 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1888 struct sas_task *task = isci_request_access_task(ireq); 1889 struct dev_to_host_fis *frame_header; 1890 u32 *frame_buffer; 1891 1892 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1893 frame_index, 1894 (void **)&frame_header); 1895 1896 if (status != SCI_SUCCESS) { 1897 dev_err(&ihost->pdev->dev, 1898 "%s: SCIC IO Request 0x%p could not get frame " 1899 "header for frame index %d, status %x\n", 1900 __func__, stp_req, frame_index, status); 1901 return status; 1902 } 1903 1904 switch (frame_header->fis_type) { 1905 case FIS_PIO_SETUP: 1906 /* Get from the frame buffer the PIO Setup Data */ 1907 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1908 frame_index, 1909 (void **)&frame_buffer); 1910 1911 /* Get the data from the PIO Setup The SCU Hardware 1912 * returns first word in the frame_header and the rest 1913 * of the data is in the frame buffer so we need to 1914 * back up one dword 1915 */ 1916 1917 /* transfer_count: first 16bits in the 4th dword */ 1918 stp_req->pio_len = frame_buffer[3] & 0xffff; 1919 1920 /* status: 4th byte in the 3rd dword */ 1921 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1922 1923 sci_controller_copy_sata_response(&ireq->stp.rsp, 1924 frame_header, 1925 frame_buffer); 1926 1927 ireq->stp.rsp.status = stp_req->status; 1928 1929 /* The next state is dependent on whether the 1930 * request was PIO Data-in or Data out 1931 */ 1932 if (task->data_dir == DMA_FROM_DEVICE) { 1933 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1934 } else if (task->data_dir == DMA_TO_DEVICE) { 1935 /* Transmit data */ 1936 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1937 if (status != SCI_SUCCESS) 1938 break; 1939 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1940 } 1941 break; 1942 1943 case FIS_SETDEVBITS: 1944 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1945 break; 1946 1947 case FIS_REGD2H: 1948 if (frame_header->status & ATA_BUSY) { 1949 /* 1950 * Now why is the drive sending a D2H Register 1951 * FIS when it is still busy? Do nothing since 1952 * we are still in the right state. 1953 */ 1954 dev_dbg(&ihost->pdev->dev, 1955 "%s: SCIC PIO Request 0x%p received " 1956 "D2H Register FIS with BSY status " 1957 "0x%x\n", 1958 __func__, 1959 stp_req, 1960 frame_header->status); 1961 break; 1962 } 1963 1964 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1965 frame_index, 1966 (void **)&frame_buffer); 1967 1968 sci_controller_copy_sata_response(&ireq->stp.rsp, 1969 frame_header, 1970 frame_buffer); 1971 1972 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1973 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1974 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1975 break; 1976 1977 default: 1978 /* FIXME: what do we do here? */ 1979 break; 1980 } 1981 1982 /* Frame is decoded return it to the controller */ 1983 sci_controller_release_frame(ihost, frame_index); 1984 1985 return status; 1986 } 1987 1988 case SCI_REQ_STP_PIO_DATA_IN: { 1989 struct dev_to_host_fis *frame_header; 1990 struct sata_fis_data *frame_buffer; 1991 1992 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1993 frame_index, 1994 (void **)&frame_header); 1995 1996 if (status != SCI_SUCCESS) { 1997 dev_err(&ihost->pdev->dev, 1998 "%s: SCIC IO Request 0x%p could not get frame " 1999 "header for frame index %d, status %x\n", 2000 __func__, 2001 stp_req, 2002 frame_index, 2003 status); 2004 return status; 2005 } 2006 2007 if (frame_header->fis_type != FIS_DATA) { 2008 dev_err(&ihost->pdev->dev, 2009 "%s: SCIC PIO Request 0x%p received frame %d " 2010 "with fis type 0x%02x when expecting a data " 2011 "fis.\n", 2012 __func__, 2013 stp_req, 2014 frame_index, 2015 frame_header->fis_type); 2016 2017 ireq->scu_status = SCU_TASK_DONE_GOOD; 2018 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2019 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2020 2021 /* Frame is decoded return it to the controller */ 2022 sci_controller_release_frame(ihost, frame_index); 2023 return status; 2024 } 2025 2026 if (stp_req->sgl.index < 0) { 2027 ireq->saved_rx_frame_index = frame_index; 2028 stp_req->pio_len = 0; 2029 } else { 2030 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2031 frame_index, 2032 (void **)&frame_buffer); 2033 2034 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2035 (u8 *)frame_buffer); 2036 2037 /* Frame is decoded return it to the controller */ 2038 sci_controller_release_frame(ihost, frame_index); 2039 } 2040 2041 /* Check for the end of the transfer, are there more 2042 * bytes remaining for this data transfer 2043 */ 2044 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2045 return status; 2046 2047 if ((stp_req->status & ATA_BUSY) == 0) { 2048 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2049 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2050 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2051 } else { 2052 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2053 } 2054 return status; 2055 } 2056 2057 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2058 struct sas_task *task = isci_request_access_task(ireq); 2059 2060 sci_controller_release_frame(ihost, frame_index); 2061 ireq->target_device->working_request = ireq; 2062 if (task->data_dir == DMA_NONE) { 2063 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2064 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2065 } else { 2066 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2067 scu_atapi_construct_task_context(ireq); 2068 } 2069 2070 sci_controller_continue_io(ireq); 2071 return SCI_SUCCESS; 2072 } 2073 case SCI_REQ_ATAPI_WAIT_D2H: 2074 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2075 case SCI_REQ_ABORTING: 2076 /* 2077 * TODO: Is it even possible to get an unsolicited frame in the 2078 * aborting state? 2079 */ 2080 sci_controller_release_frame(ihost, frame_index); 2081 return SCI_SUCCESS; 2082 2083 default: 2084 dev_warn(&ihost->pdev->dev, 2085 "%s: SCIC IO Request given unexpected frame %x while " 2086 "in state %d\n", 2087 __func__, 2088 frame_index, 2089 state); 2090 2091 sci_controller_release_frame(ihost, frame_index); 2092 return SCI_FAILURE_INVALID_STATE; 2093 } 2094 } 2095 2096 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2097 u32 completion_code) 2098 { 2099 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2101 ireq->scu_status = SCU_TASK_DONE_GOOD; 2102 ireq->sci_status = SCI_SUCCESS; 2103 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2104 break; 2105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2107 /* We must check ther response buffer to see if the D2H 2108 * Register FIS was received before we got the TC 2109 * completion. 2110 */ 2111 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2112 sci_remote_device_suspend(ireq->target_device, 2113 SCI_SW_SUSPEND_NORMAL); 2114 2115 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2116 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2117 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2118 } else { 2119 /* If we have an error completion status for the 2120 * TC then we can expect a D2H register FIS from 2121 * the device so we must change state to wait 2122 * for it 2123 */ 2124 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2125 } 2126 break; 2127 2128 /* TODO Check to see if any of these completion status need to 2129 * wait for the device to host register fis. 2130 */ 2131 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2132 * - this comes only for B0 2133 */ 2134 default: 2135 /* All other completion status cause the IO to be complete. */ 2136 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2137 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2138 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2139 break; 2140 } 2141 2142 return SCI_SUCCESS; 2143 } 2144 2145 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2146 enum sci_base_request_states next) 2147 { 2148 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2149 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2150 ireq->scu_status = SCU_TASK_DONE_GOOD; 2151 ireq->sci_status = SCI_SUCCESS; 2152 sci_change_state(&ireq->sm, next); 2153 break; 2154 default: 2155 /* All other completion status cause the IO to be complete. 2156 * If a NAK was received, then it is up to the user to retry 2157 * the request. 2158 */ 2159 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2160 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2161 2162 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2163 break; 2164 } 2165 2166 return SCI_SUCCESS; 2167 } 2168 2169 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2170 u32 completion_code) 2171 { 2172 struct isci_remote_device *idev = ireq->target_device; 2173 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2174 enum sci_status status = SCI_SUCCESS; 2175 2176 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2177 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2178 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2179 break; 2180 2181 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2182 u16 len = sci_req_tx_bytes(ireq); 2183 2184 /* likely non-error data underrrun, workaround missing 2185 * d2h frame from the controller 2186 */ 2187 if (d2h->fis_type != FIS_REGD2H) { 2188 d2h->fis_type = FIS_REGD2H; 2189 d2h->flags = (1 << 6); 2190 d2h->status = 0x50; 2191 d2h->error = 0; 2192 d2h->lbal = 0; 2193 d2h->byte_count_low = len & 0xff; 2194 d2h->byte_count_high = len >> 8; 2195 d2h->device = 0xa0; 2196 d2h->lbal_exp = 0; 2197 d2h->lbam_exp = 0; 2198 d2h->lbah_exp = 0; 2199 d2h->_r_a = 0; 2200 d2h->sector_count = 0x3; 2201 d2h->sector_count_exp = 0; 2202 d2h->_r_b = 0; 2203 d2h->_r_c = 0; 2204 d2h->_r_d = 0; 2205 } 2206 2207 ireq->scu_status = SCU_TASK_DONE_GOOD; 2208 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2209 status = ireq->sci_status; 2210 2211 /* the hw will have suspended the rnc, so complete the 2212 * request upon pending resume 2213 */ 2214 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2215 break; 2216 } 2217 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2218 /* In this case, there is no UF coming after. 2219 * compelte the IO now. 2220 */ 2221 ireq->scu_status = SCU_TASK_DONE_GOOD; 2222 ireq->sci_status = SCI_SUCCESS; 2223 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2224 break; 2225 2226 default: 2227 if (d2h->fis_type == FIS_REGD2H) { 2228 /* UF received change the device state to ATAPI_ERROR */ 2229 status = ireq->sci_status; 2230 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2231 } else { 2232 /* If receiving any non-success TC status, no UF 2233 * received yet, then an UF for the status fis 2234 * is coming after (XXX: suspect this is 2235 * actually a protocol error or a bug like the 2236 * DONE_UNEXP_FIS case) 2237 */ 2238 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2239 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2240 2241 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2242 } 2243 break; 2244 } 2245 2246 return status; 2247 } 2248 2249 static int sci_request_smp_completion_status_is_tx_suspend( 2250 unsigned int completion_status) 2251 { 2252 switch (completion_status) { 2253 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2254 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2255 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2256 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2257 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2258 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2259 return 1; 2260 } 2261 return 0; 2262 } 2263 2264 static int sci_request_smp_completion_status_is_tx_rx_suspend( 2265 unsigned int completion_status) 2266 { 2267 return 0; /* There are no Tx/Rx SMP suspend conditions. */ 2268 } 2269 2270 static int sci_request_ssp_completion_status_is_tx_suspend( 2271 unsigned int completion_status) 2272 { 2273 switch (completion_status) { 2274 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2275 case SCU_TASK_DONE_LF_ERR: 2276 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2277 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2278 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2279 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2280 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2281 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2282 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2283 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2284 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2285 return 1; 2286 } 2287 return 0; 2288 } 2289 2290 static int sci_request_ssp_completion_status_is_tx_rx_suspend( 2291 unsigned int completion_status) 2292 { 2293 return 0; /* There are no Tx/Rx SSP suspend conditions. */ 2294 } 2295 2296 static int sci_request_stpsata_completion_status_is_tx_suspend( 2297 unsigned int completion_status) 2298 { 2299 switch (completion_status) { 2300 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2301 case SCU_TASK_DONE_LL_R_ERR: 2302 case SCU_TASK_DONE_LL_PERR: 2303 case SCU_TASK_DONE_REG_ERR: 2304 case SCU_TASK_DONE_SDB_ERR: 2305 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2306 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2307 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2308 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2309 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2310 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2311 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2312 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2313 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2314 return 1; 2315 } 2316 return 0; 2317 } 2318 2319 2320 static int sci_request_stpsata_completion_status_is_tx_rx_suspend( 2321 unsigned int completion_status) 2322 { 2323 switch (completion_status) { 2324 case SCU_TASK_DONE_LF_ERR: 2325 case SCU_TASK_DONE_LL_SY_TERM: 2326 case SCU_TASK_DONE_LL_LF_TERM: 2327 case SCU_TASK_DONE_BREAK_RCVD: 2328 case SCU_TASK_DONE_INV_FIS_LEN: 2329 case SCU_TASK_DONE_UNEXP_FIS: 2330 case SCU_TASK_DONE_UNEXP_SDBFIS: 2331 case SCU_TASK_DONE_MAX_PLD_ERR: 2332 return 1; 2333 } 2334 return 0; 2335 } 2336 2337 static void sci_request_handle_suspending_completions( 2338 struct isci_request *ireq, 2339 u32 completion_code) 2340 { 2341 int is_tx = 0; 2342 int is_tx_rx = 0; 2343 2344 switch (ireq->protocol) { 2345 case SAS_PROTOCOL_SMP: 2346 is_tx = sci_request_smp_completion_status_is_tx_suspend( 2347 completion_code); 2348 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( 2349 completion_code); 2350 break; 2351 case SAS_PROTOCOL_SSP: 2352 is_tx = sci_request_ssp_completion_status_is_tx_suspend( 2353 completion_code); 2354 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( 2355 completion_code); 2356 break; 2357 case SAS_PROTOCOL_STP: 2358 is_tx = sci_request_stpsata_completion_status_is_tx_suspend( 2359 completion_code); 2360 is_tx_rx = 2361 sci_request_stpsata_completion_status_is_tx_rx_suspend( 2362 completion_code); 2363 break; 2364 default: 2365 dev_warn(&ireq->isci_host->pdev->dev, 2366 "%s: request %p has no valid protocol\n", 2367 __func__, ireq); 2368 break; 2369 } 2370 if (is_tx || is_tx_rx) { 2371 BUG_ON(is_tx && is_tx_rx); 2372 2373 sci_remote_node_context_suspend( 2374 &ireq->target_device->rnc, 2375 SCI_HW_SUSPEND, 2376 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX 2377 : SCU_EVENT_TL_RNC_SUSPEND_TX); 2378 } 2379 } 2380 2381 enum sci_status 2382 sci_io_request_tc_completion(struct isci_request *ireq, 2383 u32 completion_code) 2384 { 2385 enum sci_base_request_states state; 2386 struct isci_host *ihost = ireq->owning_controller; 2387 2388 state = ireq->sm.current_state_id; 2389 2390 /* Decode those completions that signal upcoming suspension events. */ 2391 sci_request_handle_suspending_completions( 2392 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); 2393 2394 switch (state) { 2395 case SCI_REQ_STARTED: 2396 return request_started_state_tc_event(ireq, completion_code); 2397 2398 case SCI_REQ_TASK_WAIT_TC_COMP: 2399 return ssp_task_request_await_tc_event(ireq, 2400 completion_code); 2401 2402 case SCI_REQ_SMP_WAIT_RESP: 2403 return smp_request_await_response_tc_event(ireq, 2404 completion_code); 2405 2406 case SCI_REQ_SMP_WAIT_TC_COMP: 2407 return smp_request_await_tc_event(ireq, completion_code); 2408 2409 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2410 return stp_request_udma_await_tc_event(ireq, 2411 completion_code); 2412 2413 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2414 return stp_request_non_data_await_h2d_tc_event(ireq, 2415 completion_code); 2416 2417 case SCI_REQ_STP_PIO_WAIT_H2D: 2418 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2419 completion_code); 2420 2421 case SCI_REQ_STP_PIO_DATA_OUT: 2422 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2423 2424 case SCI_REQ_ABORTING: 2425 return request_aborting_state_tc_event(ireq, 2426 completion_code); 2427 2428 case SCI_REQ_ATAPI_WAIT_H2D: 2429 return atapi_raw_completion(ireq, completion_code, 2430 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2431 2432 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2433 return atapi_raw_completion(ireq, completion_code, 2434 SCI_REQ_ATAPI_WAIT_D2H); 2435 2436 case SCI_REQ_ATAPI_WAIT_D2H: 2437 return atapi_data_tc_completion_handler(ireq, completion_code); 2438 2439 default: 2440 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", 2441 __func__, completion_code, req_state_name(state)); 2442 return SCI_FAILURE_INVALID_STATE; 2443 } 2444 } 2445 2446 /** 2447 * isci_request_process_response_iu() - This function sets the status and 2448 * response iu, in the task struct, from the request object for the upper 2449 * layer driver. 2450 * @task: This parameter is the task struct from the upper layer driver. 2451 * @resp_iu: This parameter points to the response iu of the completed request. 2452 * @dev: This parameter specifies the linux device struct. 2453 * 2454 * none. 2455 */ 2456 static void isci_request_process_response_iu( 2457 struct sas_task *task, 2458 struct ssp_response_iu *resp_iu, 2459 struct device *dev) 2460 { 2461 dev_dbg(dev, 2462 "%s: resp_iu = %p " 2463 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2464 "resp_iu->response_data_len = %x, " 2465 "resp_iu->sense_data_len = %x\nresponse data: ", 2466 __func__, 2467 resp_iu, 2468 resp_iu->status, 2469 resp_iu->datapres, 2470 resp_iu->response_data_len, 2471 resp_iu->sense_data_len); 2472 2473 task->task_status.stat = resp_iu->status; 2474 2475 /* libsas updates the task status fields based on the response iu. */ 2476 sas_ssp_task_response(dev, task, resp_iu); 2477 } 2478 2479 /** 2480 * isci_request_set_open_reject_status() - This function prepares the I/O 2481 * completion for OPEN_REJECT conditions. 2482 * @request: This parameter is the completed isci_request object. 2483 * @task: This parameter is the task struct from the upper layer driver. 2484 * @response_ptr: This parameter specifies the service response for the I/O. 2485 * @status_ptr: This parameter specifies the exec status for the I/O. 2486 * @open_rej_reason: This parameter specifies the encoded reason for the 2487 * abandon-class reject. 2488 * 2489 * none. 2490 */ 2491 static void isci_request_set_open_reject_status( 2492 struct isci_request *request, 2493 struct sas_task *task, 2494 enum service_response *response_ptr, 2495 enum exec_status *status_ptr, 2496 enum sas_open_rej_reason open_rej_reason) 2497 { 2498 /* Task in the target is done. */ 2499 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2500 *response_ptr = SAS_TASK_UNDELIVERED; 2501 *status_ptr = SAS_OPEN_REJECT; 2502 task->task_status.open_rej_reason = open_rej_reason; 2503 } 2504 2505 /** 2506 * isci_request_handle_controller_specific_errors() - This function decodes 2507 * controller-specific I/O completion error conditions. 2508 * @idev: Remote device 2509 * @request: This parameter is the completed isci_request object. 2510 * @task: This parameter is the task struct from the upper layer driver. 2511 * @response_ptr: This parameter specifies the service response for the I/O. 2512 * @status_ptr: This parameter specifies the exec status for the I/O. 2513 * 2514 * none. 2515 */ 2516 static void isci_request_handle_controller_specific_errors( 2517 struct isci_remote_device *idev, 2518 struct isci_request *request, 2519 struct sas_task *task, 2520 enum service_response *response_ptr, 2521 enum exec_status *status_ptr) 2522 { 2523 unsigned int cstatus; 2524 2525 cstatus = request->scu_status; 2526 2527 dev_dbg(&request->isci_host->pdev->dev, 2528 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2529 "- controller status = 0x%x\n", 2530 __func__, request, cstatus); 2531 2532 /* Decode the controller-specific errors; most 2533 * important is to recognize those conditions in which 2534 * the target may still have a task outstanding that 2535 * must be aborted. 2536 * 2537 * Note that there are SCU completion codes being 2538 * named in the decode below for which SCIC has already 2539 * done work to handle them in a way other than as 2540 * a controller-specific completion code; these are left 2541 * in the decode below for completeness sake. 2542 */ 2543 switch (cstatus) { 2544 case SCU_TASK_DONE_DMASETUP_DIRERR: 2545 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2546 case SCU_TASK_DONE_XFERCNT_ERR: 2547 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2548 if (task->task_proto == SAS_PROTOCOL_SMP) { 2549 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2550 *response_ptr = SAS_TASK_COMPLETE; 2551 2552 /* See if the device has been/is being stopped. Note 2553 * that we ignore the quiesce state, since we are 2554 * concerned about the actual device state. 2555 */ 2556 if (!idev) 2557 *status_ptr = SAS_DEVICE_UNKNOWN; 2558 else 2559 *status_ptr = SAS_ABORTED_TASK; 2560 2561 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2562 } else { 2563 /* Task in the target is not done. */ 2564 *response_ptr = SAS_TASK_UNDELIVERED; 2565 2566 if (!idev) 2567 *status_ptr = SAS_DEVICE_UNKNOWN; 2568 else 2569 *status_ptr = SAS_SAM_STAT_TASK_ABORTED; 2570 2571 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2572 } 2573 2574 break; 2575 2576 case SCU_TASK_DONE_CRC_ERR: 2577 case SCU_TASK_DONE_NAK_CMD_ERR: 2578 case SCU_TASK_DONE_EXCESS_DATA: 2579 case SCU_TASK_DONE_UNEXP_FIS: 2580 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2581 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2582 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2583 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2584 /* These are conditions in which the target 2585 * has completed the task, so that no cleanup 2586 * is necessary. 2587 */ 2588 *response_ptr = SAS_TASK_COMPLETE; 2589 2590 /* See if the device has been/is being stopped. Note 2591 * that we ignore the quiesce state, since we are 2592 * concerned about the actual device state. 2593 */ 2594 if (!idev) 2595 *status_ptr = SAS_DEVICE_UNKNOWN; 2596 else 2597 *status_ptr = SAS_ABORTED_TASK; 2598 2599 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2600 break; 2601 2602 2603 /* Note that the only open reject completion codes seen here will be 2604 * abandon-class codes; all others are automatically retried in the SCU. 2605 */ 2606 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2607 2608 isci_request_set_open_reject_status( 2609 request, task, response_ptr, status_ptr, 2610 SAS_OREJ_WRONG_DEST); 2611 break; 2612 2613 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2614 2615 /* Note - the return of AB0 will change when 2616 * libsas implements detection of zone violations. 2617 */ 2618 isci_request_set_open_reject_status( 2619 request, task, response_ptr, status_ptr, 2620 SAS_OREJ_RESV_AB0); 2621 break; 2622 2623 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2624 2625 isci_request_set_open_reject_status( 2626 request, task, response_ptr, status_ptr, 2627 SAS_OREJ_RESV_AB1); 2628 break; 2629 2630 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2631 2632 isci_request_set_open_reject_status( 2633 request, task, response_ptr, status_ptr, 2634 SAS_OREJ_RESV_AB2); 2635 break; 2636 2637 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2638 2639 isci_request_set_open_reject_status( 2640 request, task, response_ptr, status_ptr, 2641 SAS_OREJ_RESV_AB3); 2642 break; 2643 2644 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2645 2646 isci_request_set_open_reject_status( 2647 request, task, response_ptr, status_ptr, 2648 SAS_OREJ_BAD_DEST); 2649 break; 2650 2651 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2652 2653 isci_request_set_open_reject_status( 2654 request, task, response_ptr, status_ptr, 2655 SAS_OREJ_STP_NORES); 2656 break; 2657 2658 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2659 2660 isci_request_set_open_reject_status( 2661 request, task, response_ptr, status_ptr, 2662 SAS_OREJ_EPROTO); 2663 break; 2664 2665 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2666 2667 isci_request_set_open_reject_status( 2668 request, task, response_ptr, status_ptr, 2669 SAS_OREJ_CONN_RATE); 2670 break; 2671 2672 case SCU_TASK_DONE_LL_R_ERR: 2673 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2674 case SCU_TASK_DONE_LL_PERR: 2675 case SCU_TASK_DONE_LL_SY_TERM: 2676 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2677 case SCU_TASK_DONE_LL_LF_TERM: 2678 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2679 case SCU_TASK_DONE_LL_ABORT_ERR: 2680 case SCU_TASK_DONE_SEQ_INV_TYPE: 2681 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2682 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2683 case SCU_TASK_DONE_INV_FIS_LEN: 2684 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2685 case SCU_TASK_DONE_SDMA_ERR: 2686 case SCU_TASK_DONE_OFFSET_ERR: 2687 case SCU_TASK_DONE_MAX_PLD_ERR: 2688 case SCU_TASK_DONE_LF_ERR: 2689 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2690 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2691 case SCU_TASK_DONE_UNEXP_DATA: 2692 case SCU_TASK_DONE_UNEXP_SDBFIS: 2693 case SCU_TASK_DONE_REG_ERR: 2694 case SCU_TASK_DONE_SDB_ERR: 2695 case SCU_TASK_DONE_TASK_ABORT: 2696 default: 2697 /* Task in the target is not done. */ 2698 *response_ptr = SAS_TASK_UNDELIVERED; 2699 *status_ptr = SAS_SAM_STAT_TASK_ABORTED; 2700 2701 if (task->task_proto == SAS_PROTOCOL_SMP) 2702 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2703 else 2704 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2705 break; 2706 } 2707 } 2708 2709 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2710 { 2711 struct task_status_struct *ts = &task->task_status; 2712 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2713 2714 resp->frame_len = sizeof(*fis); 2715 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2716 ts->buf_valid_size = sizeof(*resp); 2717 2718 /* If an error is flagged let libata decode the fis */ 2719 if (ac_err_mask(fis->status)) 2720 ts->stat = SAS_PROTO_RESPONSE; 2721 else 2722 ts->stat = SAS_SAM_STAT_GOOD; 2723 2724 ts->resp = SAS_TASK_COMPLETE; 2725 } 2726 2727 static void isci_request_io_request_complete(struct isci_host *ihost, 2728 struct isci_request *request, 2729 enum sci_io_status completion_status) 2730 { 2731 struct sas_task *task = isci_request_access_task(request); 2732 struct ssp_response_iu *resp_iu; 2733 unsigned long task_flags; 2734 struct isci_remote_device *idev = request->target_device; 2735 enum service_response response = SAS_TASK_UNDELIVERED; 2736 enum exec_status status = SAS_ABORTED_TASK; 2737 2738 dev_dbg(&ihost->pdev->dev, 2739 "%s: request = %p, task = %p, " 2740 "task->data_dir = %d completion_status = 0x%x\n", 2741 __func__, request, task, task->data_dir, completion_status); 2742 2743 /* The request is done from an SCU HW perspective. */ 2744 2745 /* This is an active request being completed from the core. */ 2746 switch (completion_status) { 2747 2748 case SCI_IO_FAILURE_RESPONSE_VALID: 2749 dev_dbg(&ihost->pdev->dev, 2750 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2751 __func__, request, task); 2752 2753 if (sas_protocol_ata(task->task_proto)) { 2754 isci_process_stp_response(task, &request->stp.rsp); 2755 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2756 2757 /* crack the iu response buffer. */ 2758 resp_iu = &request->ssp.rsp; 2759 isci_request_process_response_iu(task, resp_iu, 2760 &ihost->pdev->dev); 2761 2762 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2763 2764 dev_err(&ihost->pdev->dev, 2765 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2766 "SAS_PROTOCOL_SMP protocol\n", 2767 __func__); 2768 2769 } else 2770 dev_err(&ihost->pdev->dev, 2771 "%s: unknown protocol\n", __func__); 2772 2773 /* use the task status set in the task struct by the 2774 * isci_request_process_response_iu call. 2775 */ 2776 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2777 response = task->task_status.resp; 2778 status = task->task_status.stat; 2779 break; 2780 2781 case SCI_IO_SUCCESS: 2782 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2783 2784 response = SAS_TASK_COMPLETE; 2785 status = SAS_SAM_STAT_GOOD; 2786 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2787 2788 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2789 2790 /* This was an SSP / STP / SATA transfer. 2791 * There is a possibility that less data than 2792 * the maximum was transferred. 2793 */ 2794 u32 transferred_length = sci_req_tx_bytes(request); 2795 2796 task->task_status.residual 2797 = task->total_xfer_len - transferred_length; 2798 2799 /* If there were residual bytes, call this an 2800 * underrun. 2801 */ 2802 if (task->task_status.residual != 0) 2803 status = SAS_DATA_UNDERRUN; 2804 2805 dev_dbg(&ihost->pdev->dev, 2806 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2807 __func__, status); 2808 2809 } else 2810 dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", 2811 __func__); 2812 break; 2813 2814 case SCI_IO_FAILURE_TERMINATED: 2815 2816 dev_dbg(&ihost->pdev->dev, 2817 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2818 __func__, request, task); 2819 2820 /* The request was terminated explicitly. */ 2821 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2822 response = SAS_TASK_UNDELIVERED; 2823 2824 /* See if the device has been/is being stopped. Note 2825 * that we ignore the quiesce state, since we are 2826 * concerned about the actual device state. 2827 */ 2828 if (!idev) 2829 status = SAS_DEVICE_UNKNOWN; 2830 else 2831 status = SAS_ABORTED_TASK; 2832 break; 2833 2834 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2835 2836 isci_request_handle_controller_specific_errors(idev, request, 2837 task, &response, 2838 &status); 2839 break; 2840 2841 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2842 /* This is a special case, in that the I/O completion 2843 * is telling us that the device needs a reset. 2844 * In order for the device reset condition to be 2845 * noticed, the I/O has to be handled in the error 2846 * handler. Set the reset flag and cause the 2847 * SCSI error thread to be scheduled. 2848 */ 2849 spin_lock_irqsave(&task->task_state_lock, task_flags); 2850 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2851 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2852 2853 /* Fail the I/O. */ 2854 response = SAS_TASK_UNDELIVERED; 2855 status = SAS_SAM_STAT_TASK_ABORTED; 2856 2857 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2858 break; 2859 2860 case SCI_FAILURE_RETRY_REQUIRED: 2861 2862 /* Fail the I/O so it can be retried. */ 2863 response = SAS_TASK_UNDELIVERED; 2864 if (!idev) 2865 status = SAS_DEVICE_UNKNOWN; 2866 else 2867 status = SAS_ABORTED_TASK; 2868 2869 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2870 break; 2871 2872 2873 default: 2874 /* Catch any otherwise unhandled error codes here. */ 2875 dev_dbg(&ihost->pdev->dev, 2876 "%s: invalid completion code: 0x%x - " 2877 "isci_request = %p\n", 2878 __func__, completion_status, request); 2879 2880 response = SAS_TASK_UNDELIVERED; 2881 2882 /* See if the device has been/is being stopped. Note 2883 * that we ignore the quiesce state, since we are 2884 * concerned about the actual device state. 2885 */ 2886 if (!idev) 2887 status = SAS_DEVICE_UNKNOWN; 2888 else 2889 status = SAS_ABORTED_TASK; 2890 2891 if (SAS_PROTOCOL_SMP == task->task_proto) 2892 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2893 else 2894 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2895 break; 2896 } 2897 2898 switch (task->task_proto) { 2899 case SAS_PROTOCOL_SSP: 2900 if (task->data_dir == DMA_NONE) 2901 break; 2902 if (task->num_scatter == 0) 2903 /* 0 indicates a single dma address */ 2904 dma_unmap_single(&ihost->pdev->dev, 2905 request->zero_scatter_daddr, 2906 task->total_xfer_len, task->data_dir); 2907 else /* unmap the sgl dma addresses */ 2908 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 2909 request->num_sg_entries, task->data_dir); 2910 break; 2911 case SAS_PROTOCOL_SMP: { 2912 struct scatterlist *sg = &task->smp_task.smp_req; 2913 struct smp_req *smp_req; 2914 void *kaddr; 2915 2916 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 2917 2918 /* need to swab it back in case the command buffer is re-used */ 2919 kaddr = kmap_atomic(sg_page(sg)); 2920 smp_req = kaddr + sg->offset; 2921 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 2922 kunmap_atomic(kaddr); 2923 break; 2924 } 2925 default: 2926 break; 2927 } 2928 2929 spin_lock_irqsave(&task->task_state_lock, task_flags); 2930 2931 task->task_status.resp = response; 2932 task->task_status.stat = status; 2933 2934 if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { 2935 /* Normal notification (task_done) */ 2936 task->task_state_flags |= SAS_TASK_STATE_DONE; 2937 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 2938 SAS_TASK_STATE_PENDING); 2939 } 2940 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2941 2942 /* complete the io request to the core. */ 2943 sci_controller_complete_io(ihost, request->target_device, request); 2944 2945 /* set terminated handle so it cannot be completed or 2946 * terminated again, and to cause any calls into abort 2947 * task to recognize the already completed case. 2948 */ 2949 set_bit(IREQ_TERMINATED, &request->flags); 2950 2951 ireq_done(ihost, request, task); 2952 } 2953 2954 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2955 { 2956 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2957 struct domain_device *dev = ireq->target_device->domain_dev; 2958 enum sci_base_request_states state; 2959 struct sas_task *task; 2960 2961 /* XXX as hch said always creating an internal sas_task for tmf 2962 * requests would simplify the driver 2963 */ 2964 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 2965 2966 /* all unaccelerated request types (non ssp or ncq) handled with 2967 * substates 2968 */ 2969 if (!task && dev->dev_type == SAS_END_DEVICE) { 2970 state = SCI_REQ_TASK_WAIT_TC_COMP; 2971 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2972 state = SCI_REQ_SMP_WAIT_RESP; 2973 } else if (task && sas_protocol_ata(task->task_proto) && 2974 !task->ata_task.use_ncq) { 2975 if (dev->sata_dev.class == ATA_DEV_ATAPI && 2976 task->ata_task.fis.command == ATA_CMD_PACKET) { 2977 state = SCI_REQ_ATAPI_WAIT_H2D; 2978 } else if (task->data_dir == DMA_NONE) { 2979 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 2980 } else if (task->ata_task.dma_xfer) { 2981 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 2982 } else /* PIO */ { 2983 state = SCI_REQ_STP_PIO_WAIT_H2D; 2984 } 2985 } else { 2986 /* SSP or NCQ are fully accelerated, no substates */ 2987 return; 2988 } 2989 sci_change_state(sm, state); 2990 } 2991 2992 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 2993 { 2994 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 2995 struct isci_host *ihost = ireq->owning_controller; 2996 2997 /* Tell the SCI_USER that the IO request is complete */ 2998 if (!test_bit(IREQ_TMF, &ireq->flags)) 2999 isci_request_io_request_complete(ihost, ireq, 3000 ireq->sci_status); 3001 else 3002 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3003 } 3004 3005 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3006 { 3007 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3008 3009 /* Setting the abort bit in the Task Context is required by the silicon. */ 3010 ireq->tc->abort = 1; 3011 } 3012 3013 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3014 { 3015 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3016 3017 ireq->target_device->working_request = ireq; 3018 } 3019 3020 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3021 { 3022 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3023 3024 ireq->target_device->working_request = ireq; 3025 } 3026 3027 static const struct sci_base_state sci_request_state_table[] = { 3028 [SCI_REQ_INIT] = { }, 3029 [SCI_REQ_CONSTRUCTED] = { }, 3030 [SCI_REQ_STARTED] = { 3031 .enter_state = sci_request_started_state_enter, 3032 }, 3033 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3034 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3035 }, 3036 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3037 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3038 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3039 }, 3040 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3041 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3042 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3043 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3044 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3045 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3046 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3047 [SCI_REQ_SMP_WAIT_RESP] = { }, 3048 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3049 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3050 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3051 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3052 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3053 [SCI_REQ_COMPLETED] = { 3054 .enter_state = sci_request_completed_state_enter, 3055 }, 3056 [SCI_REQ_ABORTING] = { 3057 .enter_state = sci_request_aborting_state_enter, 3058 }, 3059 [SCI_REQ_FINAL] = { }, 3060 }; 3061 3062 static void 3063 sci_general_request_construct(struct isci_host *ihost, 3064 struct isci_remote_device *idev, 3065 struct isci_request *ireq) 3066 { 3067 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3068 3069 ireq->target_device = idev; 3070 ireq->protocol = SAS_PROTOCOL_NONE; 3071 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3072 3073 ireq->sci_status = SCI_SUCCESS; 3074 ireq->scu_status = 0; 3075 ireq->post_context = 0xFFFFFFFF; 3076 } 3077 3078 static enum sci_status 3079 sci_io_request_construct(struct isci_host *ihost, 3080 struct isci_remote_device *idev, 3081 struct isci_request *ireq) 3082 { 3083 struct domain_device *dev = idev->domain_dev; 3084 enum sci_status status = SCI_SUCCESS; 3085 3086 /* Build the common part of the request */ 3087 sci_general_request_construct(ihost, idev, ireq); 3088 3089 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3090 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3091 3092 if (dev->dev_type == SAS_END_DEVICE) 3093 /* pass */; 3094 else if (dev_is_sata(dev)) 3095 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3096 else if (dev_is_expander(dev->dev_type)) 3097 /* pass */; 3098 else 3099 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3100 3101 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3102 3103 return status; 3104 } 3105 3106 enum sci_status sci_task_request_construct(struct isci_host *ihost, 3107 struct isci_remote_device *idev, 3108 u16 io_tag, struct isci_request *ireq) 3109 { 3110 struct domain_device *dev = idev->domain_dev; 3111 enum sci_status status = SCI_SUCCESS; 3112 3113 /* Build the common part of the request */ 3114 sci_general_request_construct(ihost, idev, ireq); 3115 3116 if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { 3117 set_bit(IREQ_TMF, &ireq->flags); 3118 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3119 3120 /* Set the protocol indicator. */ 3121 if (dev_is_sata(dev)) 3122 ireq->protocol = SAS_PROTOCOL_STP; 3123 else 3124 ireq->protocol = SAS_PROTOCOL_SSP; 3125 } else 3126 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3127 3128 return status; 3129 } 3130 3131 static enum sci_status isci_request_ssp_request_construct( 3132 struct isci_request *request) 3133 { 3134 enum sci_status status; 3135 3136 dev_dbg(&request->isci_host->pdev->dev, 3137 "%s: request = %p\n", 3138 __func__, 3139 request); 3140 status = sci_io_request_construct_basic_ssp(request); 3141 return status; 3142 } 3143 3144 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3145 { 3146 struct sas_task *task = isci_request_access_task(ireq); 3147 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3148 struct ata_queued_cmd *qc = task->uldd_task; 3149 enum sci_status status; 3150 3151 dev_dbg(&ireq->isci_host->pdev->dev, 3152 "%s: ireq = %p\n", 3153 __func__, 3154 ireq); 3155 3156 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3157 if (!task->ata_task.device_control_reg_update) 3158 fis->flags |= 0x80; 3159 fis->flags &= 0xF0; 3160 3161 status = sci_io_request_construct_basic_sata(ireq); 3162 3163 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3164 qc->tf.command == ATA_CMD_FPDMA_READ || 3165 qc->tf.command == ATA_CMD_FPDMA_RECV || 3166 qc->tf.command == ATA_CMD_FPDMA_SEND || 3167 qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { 3168 fis->sector_count = qc->tag << 3; 3169 ireq->tc->type.stp.ncq_tag = qc->tag; 3170 } 3171 3172 return status; 3173 } 3174 3175 static enum sci_status 3176 sci_io_request_construct_smp(struct device *dev, 3177 struct isci_request *ireq, 3178 struct sas_task *task) 3179 { 3180 struct scatterlist *sg = &task->smp_task.smp_req; 3181 struct isci_remote_device *idev; 3182 struct scu_task_context *task_context; 3183 struct isci_port *iport; 3184 struct smp_req *smp_req; 3185 void *kaddr; 3186 u8 req_len; 3187 u32 cmd; 3188 3189 kaddr = kmap_atomic(sg_page(sg)); 3190 smp_req = kaddr + sg->offset; 3191 /* 3192 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3193 * functions under SAS 2.0, a zero request length really indicates 3194 * a non-zero default length. 3195 */ 3196 if (smp_req->req_len == 0) { 3197 switch (smp_req->func) { 3198 case SMP_DISCOVER: 3199 case SMP_REPORT_PHY_ERR_LOG: 3200 case SMP_REPORT_PHY_SATA: 3201 case SMP_REPORT_ROUTE_INFO: 3202 smp_req->req_len = 2; 3203 break; 3204 case SMP_CONF_ROUTE_INFO: 3205 case SMP_PHY_CONTROL: 3206 case SMP_PHY_TEST_FUNCTION: 3207 smp_req->req_len = 9; 3208 break; 3209 /* Default - zero is a valid default for 2.0. */ 3210 } 3211 } 3212 req_len = smp_req->req_len; 3213 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3214 cmd = *(u32 *) smp_req; 3215 kunmap_atomic(kaddr); 3216 3217 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3218 return SCI_FAILURE; 3219 3220 ireq->protocol = SAS_PROTOCOL_SMP; 3221 3222 /* byte swap the smp request. */ 3223 3224 task_context = ireq->tc; 3225 3226 idev = ireq->target_device; 3227 iport = idev->owning_port; 3228 3229 /* 3230 * Fill in the TC with its required data 3231 * 00h 3232 */ 3233 task_context->priority = 0; 3234 task_context->initiator_request = 1; 3235 task_context->connection_rate = idev->connection_rate; 3236 task_context->protocol_engine_index = ISCI_PEG; 3237 task_context->logical_port_index = iport->physical_port_index; 3238 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3239 task_context->abort = 0; 3240 task_context->valid = SCU_TASK_CONTEXT_VALID; 3241 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3242 3243 /* 04h */ 3244 task_context->remote_node_index = idev->rnc.remote_node_index; 3245 task_context->command_code = 0; 3246 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3247 3248 /* 08h */ 3249 task_context->link_layer_control = 0; 3250 task_context->do_not_dma_ssp_good_response = 1; 3251 task_context->strict_ordering = 0; 3252 task_context->control_frame = 1; 3253 task_context->timeout_enable = 0; 3254 task_context->block_guard_enable = 0; 3255 3256 /* 0ch */ 3257 task_context->address_modifier = 0; 3258 3259 /* 10h */ 3260 task_context->ssp_command_iu_length = req_len; 3261 3262 /* 14h */ 3263 task_context->transfer_length_bytes = 0; 3264 3265 /* 3266 * 18h ~ 30h, protocol specific 3267 * since commandIU has been build by framework at this point, we just 3268 * copy the frist DWord from command IU to this location. */ 3269 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3270 3271 /* 3272 * 40h 3273 * "For SMP you could program it to zero. We would prefer that way 3274 * so that done code will be consistent." - Venki 3275 */ 3276 task_context->task_phase = 0; 3277 3278 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3279 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3280 (iport->physical_port_index << 3281 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3282 ISCI_TAG_TCI(ireq->io_tag)); 3283 /* 3284 * Copy the physical address for the command buffer to the SCU Task 3285 * Context command buffer should not contain command header. 3286 */ 3287 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3288 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3289 3290 /* SMP response comes as UF, so no need to set response IU address. */ 3291 task_context->response_iu_upper = 0; 3292 task_context->response_iu_lower = 0; 3293 3294 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3295 3296 return SCI_SUCCESS; 3297 } 3298 3299 /* 3300 * isci_smp_request_build() - This function builds the smp request. 3301 * @ireq: This parameter points to the isci_request allocated in the 3302 * request construct function. 3303 * 3304 * SCI_SUCCESS on successfull completion, or specific failure code. 3305 */ 3306 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3307 { 3308 struct sas_task *task = isci_request_access_task(ireq); 3309 struct device *dev = &ireq->isci_host->pdev->dev; 3310 enum sci_status status = SCI_FAILURE; 3311 3312 status = sci_io_request_construct_smp(dev, ireq, task); 3313 if (status != SCI_SUCCESS) 3314 dev_dbg(&ireq->isci_host->pdev->dev, 3315 "%s: failed with status = %d\n", 3316 __func__, 3317 status); 3318 3319 return status; 3320 } 3321 3322 /** 3323 * isci_io_request_build() - This function builds the io request object. 3324 * @ihost: This parameter specifies the ISCI host object 3325 * @request: This parameter points to the isci_request object allocated in the 3326 * request construct function. 3327 * @idev: This parameter is the handle for the sci core's remote device 3328 * object that is the destination for this request. 3329 * 3330 * SCI_SUCCESS on successfull completion, or specific failure code. 3331 */ 3332 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3333 struct isci_request *request, 3334 struct isci_remote_device *idev) 3335 { 3336 enum sci_status status = SCI_SUCCESS; 3337 struct sas_task *task = isci_request_access_task(request); 3338 3339 dev_dbg(&ihost->pdev->dev, 3340 "%s: idev = 0x%p; request = %p, " 3341 "num_scatter = %d\n", 3342 __func__, 3343 idev, 3344 request, 3345 task->num_scatter); 3346 3347 /* map the sgl addresses, if present. 3348 * libata does the mapping for sata devices 3349 * before we get the request. 3350 */ 3351 if (task->num_scatter && 3352 !sas_protocol_ata(task->task_proto) && 3353 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3354 3355 request->num_sg_entries = dma_map_sg( 3356 &ihost->pdev->dev, 3357 task->scatter, 3358 task->num_scatter, 3359 task->data_dir 3360 ); 3361 3362 if (request->num_sg_entries == 0) 3363 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3364 } 3365 3366 status = sci_io_request_construct(ihost, idev, request); 3367 3368 if (status != SCI_SUCCESS) { 3369 dev_dbg(&ihost->pdev->dev, 3370 "%s: failed request construct\n", 3371 __func__); 3372 return SCI_FAILURE; 3373 } 3374 3375 switch (task->task_proto) { 3376 case SAS_PROTOCOL_SMP: 3377 status = isci_smp_request_build(request); 3378 break; 3379 case SAS_PROTOCOL_SSP: 3380 status = isci_request_ssp_request_construct(request); 3381 break; 3382 case SAS_PROTOCOL_SATA: 3383 case SAS_PROTOCOL_STP: 3384 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3385 status = isci_request_stp_request_construct(request); 3386 break; 3387 default: 3388 dev_dbg(&ihost->pdev->dev, 3389 "%s: unknown protocol\n", __func__); 3390 return SCI_FAILURE; 3391 } 3392 3393 return SCI_SUCCESS; 3394 } 3395 3396 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3397 { 3398 struct isci_request *ireq; 3399 3400 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3401 ireq->io_tag = tag; 3402 ireq->io_request_completion = NULL; 3403 ireq->flags = 0; 3404 ireq->num_sg_entries = 0; 3405 3406 return ireq; 3407 } 3408 3409 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3410 struct sas_task *task, 3411 u16 tag) 3412 { 3413 struct isci_request *ireq; 3414 3415 ireq = isci_request_from_tag(ihost, tag); 3416 ireq->ttype_ptr.io_task_ptr = task; 3417 clear_bit(IREQ_TMF, &ireq->flags); 3418 task->lldd_task = ireq; 3419 3420 return ireq; 3421 } 3422 3423 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3424 struct isci_tmf *isci_tmf, 3425 u16 tag) 3426 { 3427 struct isci_request *ireq; 3428 3429 ireq = isci_request_from_tag(ihost, tag); 3430 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3431 set_bit(IREQ_TMF, &ireq->flags); 3432 3433 return ireq; 3434 } 3435 3436 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3437 struct sas_task *task, u16 tag) 3438 { 3439 enum sci_status status; 3440 struct isci_request *ireq; 3441 unsigned long flags; 3442 int ret = 0; 3443 3444 /* do common allocation and init of request object. */ 3445 ireq = isci_io_request_from_tag(ihost, task, tag); 3446 3447 status = isci_io_request_build(ihost, ireq, idev); 3448 if (status != SCI_SUCCESS) { 3449 dev_dbg(&ihost->pdev->dev, 3450 "%s: request_construct failed - status = 0x%x\n", 3451 __func__, 3452 status); 3453 return status; 3454 } 3455 3456 spin_lock_irqsave(&ihost->scic_lock, flags); 3457 3458 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3459 3460 if (isci_task_is_ncq_recovery(task)) { 3461 3462 /* The device is in an NCQ recovery state. Issue the 3463 * request on the task side. Note that it will 3464 * complete on the I/O request side because the 3465 * request was built that way (ie. 3466 * ireq->is_task_management_request is false). 3467 */ 3468 status = sci_controller_start_task(ihost, 3469 idev, 3470 ireq); 3471 } else { 3472 status = SCI_FAILURE; 3473 } 3474 } else { 3475 /* send the request, let the core assign the IO TAG. */ 3476 status = sci_controller_start_io(ihost, idev, 3477 ireq); 3478 } 3479 3480 if (status != SCI_SUCCESS && 3481 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3482 dev_dbg(&ihost->pdev->dev, 3483 "%s: failed request start (0x%x)\n", 3484 __func__, status); 3485 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3486 return status; 3487 } 3488 /* Either I/O started OK, or the core has signaled that 3489 * the device needs a target reset. 3490 */ 3491 if (status != SCI_SUCCESS) { 3492 /* The request did not really start in the 3493 * hardware, so clear the request handle 3494 * here so no terminations will be done. 3495 */ 3496 set_bit(IREQ_TERMINATED, &ireq->flags); 3497 } 3498 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3499 3500 if (status == 3501 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3502 /* Signal libsas that we need the SCSI error 3503 * handler thread to work on this I/O and that 3504 * we want a device reset. 3505 */ 3506 spin_lock_irqsave(&task->task_state_lock, flags); 3507 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3508 spin_unlock_irqrestore(&task->task_state_lock, flags); 3509 3510 /* Cause this task to be scheduled in the SCSI error 3511 * handler thread. 3512 */ 3513 sas_task_abort(task); 3514 3515 /* Change the status, since we are holding 3516 * the I/O until it is managed by the SCSI 3517 * error handler. 3518 */ 3519 status = SCI_SUCCESS; 3520 } 3521 3522 return ret; 3523 } 3524