1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/device.h> 36 #include <linux/delay.h> 37 #include <linux/ctype.h> 38 #include <linux/kernel.h> 39 #include <linux/slab.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/export.h> 43 #include <linux/module.h> 44 #include <asm/unaligned.h> 45 #include <asm/page.h> 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_transport_fc.h> 49 50 #include "csio_hw.h" 51 #include "csio_lnode.h" 52 #include "csio_rnode.h" 53 #include "csio_scsi.h" 54 #include "csio_init.h" 55 56 int csio_scsi_eqsize = 65536; 57 int csio_scsi_iqlen = 128; 58 int csio_scsi_ioreqs = 2048; 59 uint32_t csio_max_scan_tmo; 60 uint32_t csio_delta_scan_tmo = 5; 61 int csio_lun_qdepth = 32; 62 63 static int csio_ddp_descs = 128; 64 65 static int csio_do_abrt_cls(struct csio_hw *, 66 struct csio_ioreq *, bool); 67 68 static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); 69 static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); 70 static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); 71 static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); 72 static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); 73 static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); 74 75 /* 76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data. 77 * @ioreq: The I/O request 78 * @sld: Level information 79 * 80 * Should be called with lock held. 81 * 82 */ 83 static bool 84 csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) 85 { 86 struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); 87 88 switch (sld->level) { 89 case CSIO_LEV_LUN: 90 if (scmnd == NULL) 91 return false; 92 93 return ((ioreq->lnode == sld->lnode) && 94 (ioreq->rnode == sld->rnode) && 95 ((uint64_t)scmnd->device->lun == sld->oslun)); 96 97 case CSIO_LEV_RNODE: 98 return ((ioreq->lnode == sld->lnode) && 99 (ioreq->rnode == sld->rnode)); 100 case CSIO_LEV_LNODE: 101 return (ioreq->lnode == sld->lnode); 102 case CSIO_LEV_ALL: 103 return true; 104 default: 105 return false; 106 } 107 } 108 109 /* 110 * csio_scsi_gather_active_ios - Gather active I/Os based on level 111 * @scm: SCSI module 112 * @sld: Level information 113 * @dest: The queue where these I/Os have to be gathered. 114 * 115 * Should be called with lock held. 116 */ 117 static void 118 csio_scsi_gather_active_ios(struct csio_scsim *scm, 119 struct csio_scsi_level_data *sld, 120 struct list_head *dest) 121 { 122 struct list_head *tmp, *next; 123 124 if (list_empty(&scm->active_q)) 125 return; 126 127 /* Just splice the entire active_q into dest */ 128 if (sld->level == CSIO_LEV_ALL) { 129 list_splice_tail_init(&scm->active_q, dest); 130 return; 131 } 132 133 list_for_each_safe(tmp, next, &scm->active_q) { 134 if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { 135 list_del_init(tmp); 136 list_add_tail(tmp, dest); 137 } 138 } 139 } 140 141 static inline bool 142 csio_scsi_itnexus_loss_error(uint16_t error) 143 { 144 switch (error) { 145 case FW_ERR_LINK_DOWN: 146 case FW_RDEV_NOT_READY: 147 case FW_ERR_RDEV_LOST: 148 case FW_ERR_RDEV_LOGO: 149 case FW_ERR_RDEV_IMPL_LOGO: 150 return 1; 151 } 152 return 0; 153 } 154 155 static inline void 156 csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq, 157 uint8_t oq, uint8_t sq) 158 { 159 char stag[2]; 160 161 if (scsi_populate_tag_msg(scmnd, stag)) { 162 switch (stag[0]) { 163 case HEAD_OF_QUEUE_TAG: 164 *tag = hq; 165 break; 166 case ORDERED_QUEUE_TAG: 167 *tag = oq; 168 break; 169 default: 170 *tag = sq; 171 break; 172 } 173 } else 174 *tag = 0; 175 } 176 177 /* 178 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. 179 * @req: IO req structure. 180 * @addr: DMA location to place the payload. 181 * 182 * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. 183 */ 184 static inline void 185 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) 186 { 187 struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; 188 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 189 190 /* Check for Task Management */ 191 if (likely(scmnd->SCp.Message == 0)) { 192 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 193 fcp_cmnd->fc_tm_flags = 0; 194 fcp_cmnd->fc_cmdref = 0; 195 fcp_cmnd->fc_pri_ta = 0; 196 197 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 198 csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta, 199 FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE); 200 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 201 202 if (req->nsge) 203 if (req->datadir == DMA_TO_DEVICE) 204 fcp_cmnd->fc_flags = FCP_CFL_WRDATA; 205 else 206 fcp_cmnd->fc_flags = FCP_CFL_RDDATA; 207 else 208 fcp_cmnd->fc_flags = 0; 209 } else { 210 memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); 211 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 212 fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; 213 } 214 } 215 216 /* 217 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. 218 * @req: IO req structure. 219 * @addr: DMA location to place the payload. 220 * @size: Size of WR (including FW WR + immed data + rsp SG entry 221 * 222 * Wrapper for populating fw_scsi_cmd_wr. 223 */ 224 static inline void 225 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) 226 { 227 struct csio_hw *hw = req->lnode->hwp; 228 struct csio_rnode *rn = req->rnode; 229 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; 230 struct csio_dma_buf *dma_buf; 231 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 232 233 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) | 234 FW_SCSI_CMD_WR_IMMDLEN(imm)); 235 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 236 FW_WR_LEN16( 237 DIV_ROUND_UP(size, 16))); 238 239 wr->cookie = (uintptr_t) req; 240 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 241 wr->tmo_val = (uint8_t) req->tmo; 242 wr->r3 = 0; 243 memset(&wr->r5, 0, 8); 244 245 /* Get RSP DMA buffer */ 246 dma_buf = &req->dma_buf; 247 248 /* Prepare RSP SGL */ 249 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 250 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 251 252 wr->r6 = 0; 253 254 wr->u.fcoe.ctl_pri = 0; 255 wr->u.fcoe.cp_en_class = 0; 256 wr->u.fcoe.r4_lo[0] = 0; 257 wr->u.fcoe.r4_lo[1] = 0; 258 259 /* Frame a FCP command */ 260 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + 261 sizeof(struct fw_scsi_cmd_wr))); 262 } 263 264 #define CSIO_SCSI_CMD_WR_SZ(_imm) \ 265 (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ 266 ALIGN((_imm), 16)) /* Immed data */ 267 268 #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ 269 (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) 270 271 /* 272 * csio_scsi_cmd - Create a SCSI CMD WR. 273 * @req: IO req structure. 274 * 275 * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. 276 * 277 */ 278 static inline void 279 csio_scsi_cmd(struct csio_ioreq *req) 280 { 281 struct csio_wr_pair wrp; 282 struct csio_hw *hw = req->lnode->hwp; 283 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 284 uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); 285 286 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 287 if (unlikely(req->drv_status != 0)) 288 return; 289 290 if (wrp.size1 >= size) { 291 /* Initialize WR in one shot */ 292 csio_scsi_init_cmd_wr(req, wrp.addr1, size); 293 } else { 294 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 295 296 /* 297 * Make a temporary copy of the WR and write back 298 * the copy into the WR pair. 299 */ 300 csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); 301 memcpy(wrp.addr1, tmpwr, wrp.size1); 302 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 303 } 304 } 305 306 /* 307 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL 308 * @hw: HW module 309 * @req: IO request 310 * @sgl: ULP TX SGL pointer. 311 * 312 */ 313 static inline void 314 csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, 315 struct ulptx_sgl *sgl) 316 { 317 struct ulptx_sge_pair *sge_pair = NULL; 318 struct scatterlist *sgel; 319 uint32_t i = 0; 320 uint32_t xfer_len; 321 struct list_head *tmp; 322 struct csio_dma_buf *dma_buf; 323 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 324 325 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | 326 ULPTX_NSGE(req->nsge)); 327 /* Now add the data SGLs */ 328 if (likely(!req->dcopy)) { 329 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 330 if (i == 0) { 331 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); 332 sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); 333 sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 334 continue; 335 } 336 if ((i - 1) & 0x1) { 337 sge_pair->addr[1] = cpu_to_be64( 338 sg_dma_address(sgel)); 339 sge_pair->len[1] = cpu_to_be32( 340 sg_dma_len(sgel)); 341 sge_pair++; 342 } else { 343 sge_pair->addr[0] = cpu_to_be64( 344 sg_dma_address(sgel)); 345 sge_pair->len[0] = cpu_to_be32( 346 sg_dma_len(sgel)); 347 } 348 } 349 } else { 350 /* Program sg elements with driver's DDP buffer */ 351 xfer_len = scsi_bufflen(scmnd); 352 list_for_each(tmp, &req->gen_list) { 353 dma_buf = (struct csio_dma_buf *)tmp; 354 if (i == 0) { 355 sgl->addr0 = cpu_to_be64(dma_buf->paddr); 356 sgl->len0 = cpu_to_be32( 357 min(xfer_len, dma_buf->len)); 358 sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 359 } else if ((i - 1) & 0x1) { 360 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); 361 sge_pair->len[1] = cpu_to_be32( 362 min(xfer_len, dma_buf->len)); 363 sge_pair++; 364 } else { 365 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); 366 sge_pair->len[0] = cpu_to_be32( 367 min(xfer_len, dma_buf->len)); 368 } 369 xfer_len -= min(xfer_len, dma_buf->len); 370 i++; 371 } 372 } 373 } 374 375 /* 376 * csio_scsi_init_read_wr - Initialize the READ SCSI WR. 377 * @req: IO req structure. 378 * @wrp: DMA location to place the payload. 379 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 380 * 381 * Wrapper for populating fw_scsi_read_wr. 382 */ 383 static inline void 384 csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 385 { 386 struct csio_hw *hw = req->lnode->hwp; 387 struct csio_rnode *rn = req->rnode; 388 struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; 389 struct ulptx_sgl *sgl; 390 struct csio_dma_buf *dma_buf; 391 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 392 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 393 394 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) | 395 FW_SCSI_READ_WR_IMMDLEN(imm)); 396 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 397 FW_WR_LEN16(DIV_ROUND_UP(size, 16))); 398 wr->cookie = (uintptr_t)req; 399 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 400 wr->tmo_val = (uint8_t)(req->tmo); 401 wr->use_xfer_cnt = 1; 402 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 403 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 404 /* Get RSP DMA buffer */ 405 dma_buf = &req->dma_buf; 406 407 /* Prepare RSP SGL */ 408 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 409 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 410 411 wr->r4 = 0; 412 413 wr->u.fcoe.ctl_pri = 0; 414 wr->u.fcoe.cp_en_class = 0; 415 wr->u.fcoe.r3_lo[0] = 0; 416 wr->u.fcoe.r3_lo[1] = 0; 417 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 418 sizeof(struct fw_scsi_read_wr))); 419 420 /* Move WR pointer past command and immediate data */ 421 sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 422 sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); 423 424 /* Fill in the DSGL */ 425 csio_scsi_init_ultptx_dsgl(hw, req, sgl); 426 } 427 428 /* 429 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. 430 * @req: IO req structure. 431 * @wrp: DMA location to place the payload. 432 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 433 * 434 * Wrapper for populating fw_scsi_write_wr. 435 */ 436 static inline void 437 csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 438 { 439 struct csio_hw *hw = req->lnode->hwp; 440 struct csio_rnode *rn = req->rnode; 441 struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; 442 struct ulptx_sgl *sgl; 443 struct csio_dma_buf *dma_buf; 444 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 445 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 446 447 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) | 448 FW_SCSI_WRITE_WR_IMMDLEN(imm)); 449 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 450 FW_WR_LEN16(DIV_ROUND_UP(size, 16))); 451 wr->cookie = (uintptr_t)req; 452 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 453 wr->tmo_val = (uint8_t)(req->tmo); 454 wr->use_xfer_cnt = 1; 455 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 456 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 457 /* Get RSP DMA buffer */ 458 dma_buf = &req->dma_buf; 459 460 /* Prepare RSP SGL */ 461 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 462 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 463 464 wr->r4 = 0; 465 466 wr->u.fcoe.ctl_pri = 0; 467 wr->u.fcoe.cp_en_class = 0; 468 wr->u.fcoe.r3_lo[0] = 0; 469 wr->u.fcoe.r3_lo[1] = 0; 470 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 471 sizeof(struct fw_scsi_write_wr))); 472 473 /* Move WR pointer past command and immediate data */ 474 sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 475 sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); 476 477 /* Fill in the DSGL */ 478 csio_scsi_init_ultptx_dsgl(hw, req, sgl); 479 } 480 481 /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ 482 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ 483 do { \ 484 (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ 485 ALIGN((imm), 16) + /* Immed data */ \ 486 sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ 487 \ 488 if (unlikely((req)->nsge > 1)) \ 489 (sz) += (sizeof(struct ulptx_sge_pair) * \ 490 (ALIGN(((req)->nsge - 1), 2) / 2)); \ 491 /* Data SGE */ \ 492 } while (0) 493 494 /* 495 * csio_scsi_read - Create a SCSI READ WR. 496 * @req: IO req structure. 497 * 498 * Gets a WR slot in the ingress queue and initializes it with 499 * SCSI READ WR. 500 * 501 */ 502 static inline void 503 csio_scsi_read(struct csio_ioreq *req) 504 { 505 struct csio_wr_pair wrp; 506 uint32_t size; 507 struct csio_hw *hw = req->lnode->hwp; 508 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 509 510 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); 511 size = ALIGN(size, 16); 512 513 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 514 if (likely(req->drv_status == 0)) { 515 if (likely(wrp.size1 >= size)) { 516 /* Initialize WR in one shot */ 517 csio_scsi_init_read_wr(req, wrp.addr1, size); 518 } else { 519 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 520 /* 521 * Make a temporary copy of the WR and write back 522 * the copy into the WR pair. 523 */ 524 csio_scsi_init_read_wr(req, (void *)tmpwr, size); 525 memcpy(wrp.addr1, tmpwr, wrp.size1); 526 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 527 } 528 } 529 } 530 531 /* 532 * csio_scsi_write - Create a SCSI WRITE WR. 533 * @req: IO req structure. 534 * 535 * Gets a WR slot in the ingress queue and initializes it with 536 * SCSI WRITE WR. 537 * 538 */ 539 static inline void 540 csio_scsi_write(struct csio_ioreq *req) 541 { 542 struct csio_wr_pair wrp; 543 uint32_t size; 544 struct csio_hw *hw = req->lnode->hwp; 545 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 546 547 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); 548 size = ALIGN(size, 16); 549 550 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 551 if (likely(req->drv_status == 0)) { 552 if (likely(wrp.size1 >= size)) { 553 /* Initialize WR in one shot */ 554 csio_scsi_init_write_wr(req, wrp.addr1, size); 555 } else { 556 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 557 /* 558 * Make a temporary copy of the WR and write back 559 * the copy into the WR pair. 560 */ 561 csio_scsi_init_write_wr(req, (void *)tmpwr, size); 562 memcpy(wrp.addr1, tmpwr, wrp.size1); 563 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 564 } 565 } 566 } 567 568 /* 569 * csio_setup_ddp - Setup DDP buffers for Read request. 570 * @req: IO req structure. 571 * 572 * Checks SGLs/Data buffers are virtually contiguous required for DDP. 573 * If contiguous,driver posts SGLs in the WR otherwise post internal 574 * buffers for such request for DDP. 575 */ 576 static inline void 577 csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) 578 { 579 #ifdef __CSIO_DEBUG__ 580 struct csio_hw *hw = req->lnode->hwp; 581 #endif 582 struct scatterlist *sgel = NULL; 583 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 584 uint64_t sg_addr = 0; 585 uint32_t ddp_pagesz = 4096; 586 uint32_t buf_off; 587 struct csio_dma_buf *dma_buf = NULL; 588 uint32_t alloc_len = 0; 589 uint32_t xfer_len = 0; 590 uint32_t sg_len = 0; 591 uint32_t i; 592 593 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 594 sg_addr = sg_dma_address(sgel); 595 sg_len = sg_dma_len(sgel); 596 597 buf_off = sg_addr & (ddp_pagesz - 1); 598 599 /* Except 1st buffer,all buffer addr have to be Page aligned */ 600 if (i != 0 && buf_off) { 601 csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", 602 sg_addr, sg_len); 603 goto unaligned; 604 } 605 606 /* Except last buffer,all buffer must end on page boundary */ 607 if ((i != (req->nsge - 1)) && 608 ((buf_off + sg_len) & (ddp_pagesz - 1))) { 609 csio_dbg(hw, 610 "SGL addr not ending on page boundary" 611 "(%llx:%d)\n", sg_addr, sg_len); 612 goto unaligned; 613 } 614 } 615 616 /* SGL's are virtually contiguous. HW will DDP to SGLs */ 617 req->dcopy = 0; 618 csio_scsi_read(req); 619 620 return; 621 622 unaligned: 623 CSIO_INC_STATS(scsim, n_unaligned); 624 /* 625 * For unaligned SGLs, driver will allocate internal DDP buffer. 626 * Once command is completed data from DDP buffer copied to SGLs 627 */ 628 req->dcopy = 1; 629 630 /* Use gen_list to store the DDP buffers */ 631 INIT_LIST_HEAD(&req->gen_list); 632 xfer_len = scsi_bufflen(scmnd); 633 634 i = 0; 635 /* Allocate ddp buffers for this request */ 636 while (alloc_len < xfer_len) { 637 dma_buf = csio_get_scsi_ddp(scsim); 638 if (dma_buf == NULL || i > scsim->max_sge) { 639 req->drv_status = -EBUSY; 640 break; 641 } 642 alloc_len += dma_buf->len; 643 /* Added to IO req */ 644 list_add_tail(&dma_buf->list, &req->gen_list); 645 i++; 646 } 647 648 if (!req->drv_status) { 649 /* set number of ddp bufs used */ 650 req->nsge = i; 651 csio_scsi_read(req); 652 return; 653 } 654 655 /* release dma descs */ 656 if (i > 0) 657 csio_put_scsi_ddp_list(scsim, &req->gen_list, i); 658 } 659 660 /* 661 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. 662 * @req: IO req structure. 663 * @addr: DMA location to place the payload. 664 * @size: Size of WR 665 * @abort: abort OR close 666 * 667 * Wrapper for populating fw_scsi_cmd_wr. 668 */ 669 static inline void 670 csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, 671 bool abort) 672 { 673 struct csio_hw *hw = req->lnode->hwp; 674 struct csio_rnode *rn = req->rnode; 675 struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; 676 677 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR)); 678 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 679 FW_WR_LEN16( 680 DIV_ROUND_UP(size, 16))); 681 682 wr->cookie = (uintptr_t) req; 683 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 684 wr->tmo_val = (uint8_t) req->tmo; 685 /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ 686 wr->sub_opcode_to_chk_all_io = 687 (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | 688 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); 689 wr->r3[0] = 0; 690 wr->r3[1] = 0; 691 wr->r3[2] = 0; 692 wr->r3[3] = 0; 693 /* Since we re-use the same ioreq for abort as well */ 694 wr->t_cookie = (uintptr_t) req; 695 } 696 697 static inline void 698 csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) 699 { 700 struct csio_wr_pair wrp; 701 struct csio_hw *hw = req->lnode->hwp; 702 uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); 703 704 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 705 if (req->drv_status != 0) 706 return; 707 708 if (wrp.size1 >= size) { 709 /* Initialize WR in one shot */ 710 csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); 711 } else { 712 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 713 /* 714 * Make a temporary copy of the WR and write back 715 * the copy into the WR pair. 716 */ 717 csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); 718 memcpy(wrp.addr1, tmpwr, wrp.size1); 719 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 720 } 721 } 722 723 /*****************************************************************************/ 724 /* START: SCSI SM */ 725 /*****************************************************************************/ 726 static void 727 csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) 728 { 729 struct csio_hw *hw = req->lnode->hwp; 730 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 731 732 switch (evt) { 733 case CSIO_SCSIE_START_IO: 734 735 if (req->nsge) { 736 if (req->datadir == DMA_TO_DEVICE) { 737 req->dcopy = 0; 738 csio_scsi_write(req); 739 } else 740 csio_setup_ddp(scsim, req); 741 } else { 742 csio_scsi_cmd(req); 743 } 744 745 if (likely(req->drv_status == 0)) { 746 /* change state and enqueue on active_q */ 747 csio_set_state(&req->sm, csio_scsis_io_active); 748 list_add_tail(&req->sm.sm_list, &scsim->active_q); 749 csio_wr_issue(hw, req->eq_idx, false); 750 CSIO_INC_STATS(scsim, n_active); 751 752 return; 753 } 754 break; 755 756 case CSIO_SCSIE_START_TM: 757 csio_scsi_cmd(req); 758 if (req->drv_status == 0) { 759 /* 760 * NOTE: We collect the affected I/Os prior to issuing 761 * LUN reset, and not after it. This is to prevent 762 * aborting I/Os that get issued after the LUN reset, 763 * but prior to LUN reset completion (in the event that 764 * the host stack has not blocked I/Os to a LUN that is 765 * being reset. 766 */ 767 csio_set_state(&req->sm, csio_scsis_tm_active); 768 list_add_tail(&req->sm.sm_list, &scsim->active_q); 769 csio_wr_issue(hw, req->eq_idx, false); 770 CSIO_INC_STATS(scsim, n_tm_active); 771 } 772 return; 773 774 case CSIO_SCSIE_ABORT: 775 case CSIO_SCSIE_CLOSE: 776 /* 777 * NOTE: 778 * We could get here due to : 779 * - a window in the cleanup path of the SCSI module 780 * (csio_scsi_abort_io()). Please see NOTE in this function. 781 * - a window in the time we tried to issue an abort/close 782 * of a request to FW, and the FW completed the request 783 * itself. 784 * Print a message for now, and return INVAL either way. 785 */ 786 req->drv_status = -EINVAL; 787 csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); 788 break; 789 790 default: 791 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 792 CSIO_DB_ASSERT(0); 793 } 794 } 795 796 static void 797 csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 798 { 799 struct csio_hw *hw = req->lnode->hwp; 800 struct csio_scsim *scm = csio_hw_to_scsim(hw); 801 struct csio_rnode *rn; 802 803 switch (evt) { 804 case CSIO_SCSIE_COMPLETED: 805 CSIO_DEC_STATS(scm, n_active); 806 list_del_init(&req->sm.sm_list); 807 csio_set_state(&req->sm, csio_scsis_uninit); 808 /* 809 * In MSIX mode, with multiple queues, the SCSI compeltions 810 * could reach us sooner than the FW events sent to indicate 811 * I-T nexus loss (link down, remote device logo etc). We 812 * dont want to be returning such I/Os to the upper layer 813 * immediately, since we wouldnt have reported the I-T nexus 814 * loss itself. This forces us to serialize such completions 815 * with the reporting of the I-T nexus loss. Therefore, we 816 * internally queue up such up such completions in the rnode. 817 * The reporting of I-T nexus loss to the upper layer is then 818 * followed by the returning of I/Os in this internal queue. 819 * Having another state alongwith another queue helps us take 820 * actions for events such as ABORT received while we are 821 * in this rnode queue. 822 */ 823 if (unlikely(req->wr_status != FW_SUCCESS)) { 824 rn = req->rnode; 825 /* 826 * FW says remote device is lost, but rnode 827 * doesnt reflect it. 828 */ 829 if (csio_scsi_itnexus_loss_error(req->wr_status) && 830 csio_is_rnode_ready(rn)) { 831 csio_set_state(&req->sm, 832 csio_scsis_shost_cmpl_await); 833 list_add_tail(&req->sm.sm_list, 834 &rn->host_cmpl_q); 835 } 836 } 837 838 break; 839 840 case CSIO_SCSIE_ABORT: 841 csio_scsi_abrt_cls(req, SCSI_ABORT); 842 if (req->drv_status == 0) { 843 csio_wr_issue(hw, req->eq_idx, false); 844 csio_set_state(&req->sm, csio_scsis_aborting); 845 } 846 break; 847 848 case CSIO_SCSIE_CLOSE: 849 csio_scsi_abrt_cls(req, SCSI_CLOSE); 850 if (req->drv_status == 0) { 851 csio_wr_issue(hw, req->eq_idx, false); 852 csio_set_state(&req->sm, csio_scsis_closing); 853 } 854 break; 855 856 case CSIO_SCSIE_DRVCLEANUP: 857 req->wr_status = FW_HOSTERROR; 858 CSIO_DEC_STATS(scm, n_active); 859 csio_set_state(&req->sm, csio_scsis_uninit); 860 break; 861 862 default: 863 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 864 CSIO_DB_ASSERT(0); 865 } 866 } 867 868 static void 869 csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 870 { 871 struct csio_hw *hw = req->lnode->hwp; 872 struct csio_scsim *scm = csio_hw_to_scsim(hw); 873 874 switch (evt) { 875 case CSIO_SCSIE_COMPLETED: 876 CSIO_DEC_STATS(scm, n_tm_active); 877 list_del_init(&req->sm.sm_list); 878 csio_set_state(&req->sm, csio_scsis_uninit); 879 880 break; 881 882 case CSIO_SCSIE_ABORT: 883 csio_scsi_abrt_cls(req, SCSI_ABORT); 884 if (req->drv_status == 0) { 885 csio_wr_issue(hw, req->eq_idx, false); 886 csio_set_state(&req->sm, csio_scsis_aborting); 887 } 888 break; 889 890 891 case CSIO_SCSIE_CLOSE: 892 csio_scsi_abrt_cls(req, SCSI_CLOSE); 893 if (req->drv_status == 0) { 894 csio_wr_issue(hw, req->eq_idx, false); 895 csio_set_state(&req->sm, csio_scsis_closing); 896 } 897 break; 898 899 case CSIO_SCSIE_DRVCLEANUP: 900 req->wr_status = FW_HOSTERROR; 901 CSIO_DEC_STATS(scm, n_tm_active); 902 csio_set_state(&req->sm, csio_scsis_uninit); 903 break; 904 905 default: 906 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 907 CSIO_DB_ASSERT(0); 908 } 909 } 910 911 static void 912 csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) 913 { 914 struct csio_hw *hw = req->lnode->hwp; 915 struct csio_scsim *scm = csio_hw_to_scsim(hw); 916 917 switch (evt) { 918 case CSIO_SCSIE_COMPLETED: 919 csio_dbg(hw, 920 "ioreq %p recvd cmpltd (wr_status:%d) " 921 "in aborting st\n", req, req->wr_status); 922 /* 923 * Use -ECANCELED to explicitly tell the ABORTED event that 924 * the original I/O was returned to driver by FW. 925 * We dont really care if the I/O was returned with success by 926 * FW (because the ABORT and completion of the I/O crossed each 927 * other), or any other return value. Once we are in aborting 928 * state, the success or failure of the I/O is unimportant to 929 * us. 930 */ 931 req->drv_status = -ECANCELED; 932 break; 933 934 case CSIO_SCSIE_ABORT: 935 CSIO_INC_STATS(scm, n_abrt_dups); 936 break; 937 938 case CSIO_SCSIE_ABORTED: 939 940 csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", 941 req, req->wr_status, req->drv_status); 942 /* 943 * Check if original I/O WR completed before the Abort 944 * completion. 945 */ 946 if (req->drv_status != -ECANCELED) { 947 csio_warn(hw, 948 "Abort completed before original I/O," 949 " req:%p\n", req); 950 CSIO_DB_ASSERT(0); 951 } 952 953 /* 954 * There are the following possible scenarios: 955 * 1. The abort completed successfully, FW returned FW_SUCCESS. 956 * 2. The completion of an I/O and the receipt of 957 * abort for that I/O by the FW crossed each other. 958 * The FW returned FW_EINVAL. The original I/O would have 959 * returned with FW_SUCCESS or any other SCSI error. 960 * 3. The FW couldnt sent the abort out on the wire, as there 961 * was an I-T nexus loss (link down, remote device logged 962 * out etc). FW sent back an appropriate IT nexus loss status 963 * for the abort. 964 * 4. FW sent an abort, but abort timed out (remote device 965 * didnt respond). FW replied back with 966 * FW_SCSI_ABORT_TIMEDOUT. 967 * 5. FW couldnt genuinely abort the request for some reason, 968 * and sent us an error. 969 * 970 * The first 3 scenarios are treated as succesful abort 971 * operations by the host, while the last 2 are failed attempts 972 * to abort. Manipulate the return value of the request 973 * appropriately, so that host can convey these results 974 * back to the upper layer. 975 */ 976 if ((req->wr_status == FW_SUCCESS) || 977 (req->wr_status == FW_EINVAL) || 978 csio_scsi_itnexus_loss_error(req->wr_status)) 979 req->wr_status = FW_SCSI_ABORT_REQUESTED; 980 981 CSIO_DEC_STATS(scm, n_active); 982 list_del_init(&req->sm.sm_list); 983 csio_set_state(&req->sm, csio_scsis_uninit); 984 break; 985 986 case CSIO_SCSIE_DRVCLEANUP: 987 req->wr_status = FW_HOSTERROR; 988 CSIO_DEC_STATS(scm, n_active); 989 csio_set_state(&req->sm, csio_scsis_uninit); 990 break; 991 992 case CSIO_SCSIE_CLOSE: 993 /* 994 * We can receive this event from the module 995 * cleanup paths, if the FW forgot to reply to the ABORT WR 996 * and left this ioreq in this state. For now, just ignore 997 * the event. The CLOSE event is sent to this state, as 998 * the LINK may have already gone down. 999 */ 1000 break; 1001 1002 default: 1003 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 1004 CSIO_DB_ASSERT(0); 1005 } 1006 } 1007 1008 static void 1009 csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) 1010 { 1011 struct csio_hw *hw = req->lnode->hwp; 1012 struct csio_scsim *scm = csio_hw_to_scsim(hw); 1013 1014 switch (evt) { 1015 case CSIO_SCSIE_COMPLETED: 1016 csio_dbg(hw, 1017 "ioreq %p recvd cmpltd (wr_status:%d) " 1018 "in closing st\n", req, req->wr_status); 1019 /* 1020 * Use -ECANCELED to explicitly tell the CLOSED event that 1021 * the original I/O was returned to driver by FW. 1022 * We dont really care if the I/O was returned with success by 1023 * FW (because the CLOSE and completion of the I/O crossed each 1024 * other), or any other return value. Once we are in aborting 1025 * state, the success or failure of the I/O is unimportant to 1026 * us. 1027 */ 1028 req->drv_status = -ECANCELED; 1029 break; 1030 1031 case CSIO_SCSIE_CLOSED: 1032 /* 1033 * Check if original I/O WR completed before the Close 1034 * completion. 1035 */ 1036 if (req->drv_status != -ECANCELED) { 1037 csio_fatal(hw, 1038 "Close completed before original I/O," 1039 " req:%p\n", req); 1040 CSIO_DB_ASSERT(0); 1041 } 1042 1043 /* 1044 * Either close succeeded, or we issued close to FW at the 1045 * same time FW compelted it to us. Either way, the I/O 1046 * is closed. 1047 */ 1048 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || 1049 (req->wr_status == FW_EINVAL)); 1050 req->wr_status = FW_SCSI_CLOSE_REQUESTED; 1051 1052 CSIO_DEC_STATS(scm, n_active); 1053 list_del_init(&req->sm.sm_list); 1054 csio_set_state(&req->sm, csio_scsis_uninit); 1055 break; 1056 1057 case CSIO_SCSIE_CLOSE: 1058 break; 1059 1060 case CSIO_SCSIE_DRVCLEANUP: 1061 req->wr_status = FW_HOSTERROR; 1062 CSIO_DEC_STATS(scm, n_active); 1063 csio_set_state(&req->sm, csio_scsis_uninit); 1064 break; 1065 1066 default: 1067 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 1068 CSIO_DB_ASSERT(0); 1069 } 1070 } 1071 1072 static void 1073 csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) 1074 { 1075 switch (evt) { 1076 case CSIO_SCSIE_ABORT: 1077 case CSIO_SCSIE_CLOSE: 1078 /* 1079 * Just succeed the abort request, and hope that 1080 * the remote device unregister path will cleanup 1081 * this I/O to the upper layer within a sane 1082 * amount of time. 1083 */ 1084 /* 1085 * A close can come in during a LINK DOWN. The FW would have 1086 * returned us the I/O back, but not the remote device lost 1087 * FW event. In this interval, if the I/O times out at the upper 1088 * layer, a close can come in. Take the same action as abort: 1089 * return success, and hope that the remote device unregister 1090 * path will cleanup this I/O. If the FW still doesnt send 1091 * the msg, the close times out, and the upper layer resorts 1092 * to the next level of error recovery. 1093 */ 1094 req->drv_status = 0; 1095 break; 1096 case CSIO_SCSIE_DRVCLEANUP: 1097 csio_set_state(&req->sm, csio_scsis_uninit); 1098 break; 1099 default: 1100 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", 1101 evt, req); 1102 CSIO_DB_ASSERT(0); 1103 } 1104 } 1105 1106 /* 1107 * csio_scsi_cmpl_handler - WR completion handler for SCSI. 1108 * @hw: HW module. 1109 * @wr: The completed WR from the ingress queue. 1110 * @len: Length of the WR. 1111 * @flb: Freelist buffer array. 1112 * @priv: Private object 1113 * @scsiwr: Pointer to SCSI WR. 1114 * 1115 * This is the WR completion handler called per completion from the 1116 * ISR. It is called with lock held. It walks past the RSS and CPL message 1117 * header where the actual WR is present. 1118 * It then gets the status, WR handle (ioreq pointer) and the len of 1119 * the WR, based on WR opcode. Only on a non-good status is the entire 1120 * WR copied into the WR cache (ioreq->fw_wr). 1121 * The ioreq corresponding to the WR is returned to the caller. 1122 * NOTE: The SCSI queue doesnt allocate a freelist today, hence 1123 * no freelist buffer is expected. 1124 */ 1125 struct csio_ioreq * 1126 csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, 1127 struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) 1128 { 1129 struct csio_ioreq *ioreq = NULL; 1130 struct cpl_fw6_msg *cpl; 1131 uint8_t *tempwr; 1132 uint8_t status; 1133 struct csio_scsim *scm = csio_hw_to_scsim(hw); 1134 1135 /* skip RSS header */ 1136 cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); 1137 1138 if (unlikely(cpl->opcode != CPL_FW6_MSG)) { 1139 csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", 1140 cpl->opcode); 1141 CSIO_INC_STATS(scm, n_inval_cplop); 1142 return NULL; 1143 } 1144 1145 tempwr = (uint8_t *)(cpl->data); 1146 status = csio_wr_status(tempwr); 1147 *scsiwr = tempwr; 1148 1149 if (likely((*tempwr == FW_SCSI_READ_WR) || 1150 (*tempwr == FW_SCSI_WRITE_WR) || 1151 (*tempwr == FW_SCSI_CMD_WR))) { 1152 ioreq = (struct csio_ioreq *)((uintptr_t) 1153 (((struct fw_scsi_read_wr *)tempwr)->cookie)); 1154 CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1155 1156 ioreq->wr_status = status; 1157 1158 return ioreq; 1159 } 1160 1161 if (*tempwr == FW_SCSI_ABRT_CLS_WR) { 1162 ioreq = (struct csio_ioreq *)((uintptr_t) 1163 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); 1164 CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1165 1166 ioreq->wr_status = status; 1167 return ioreq; 1168 } 1169 1170 csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); 1171 CSIO_INC_STATS(scm, n_inval_scsiop); 1172 return NULL; 1173 } 1174 1175 /* 1176 * csio_scsi_cleanup_io_q - Cleanup the given queue. 1177 * @scm: SCSI module. 1178 * @q: Queue to be cleaned up. 1179 * 1180 * Called with lock held. Has to exit with lock held. 1181 */ 1182 void 1183 csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) 1184 { 1185 struct csio_hw *hw = scm->hw; 1186 struct csio_ioreq *ioreq; 1187 struct list_head *tmp, *next; 1188 struct scsi_cmnd *scmnd; 1189 1190 /* Call back the completion routines of the active_q */ 1191 list_for_each_safe(tmp, next, q) { 1192 ioreq = (struct csio_ioreq *)tmp; 1193 csio_scsi_drvcleanup(ioreq); 1194 list_del_init(&ioreq->sm.sm_list); 1195 scmnd = csio_scsi_cmnd(ioreq); 1196 spin_unlock_irq(&hw->lock); 1197 1198 /* 1199 * Upper layers may have cleared this command, hence this 1200 * check to avoid accessing stale references. 1201 */ 1202 if (scmnd != NULL) 1203 ioreq->io_cbfn(hw, ioreq); 1204 1205 spin_lock_irq(&scm->freelist_lock); 1206 csio_put_scsi_ioreq(scm, ioreq); 1207 spin_unlock_irq(&scm->freelist_lock); 1208 1209 spin_lock_irq(&hw->lock); 1210 } 1211 } 1212 1213 #define CSIO_SCSI_ABORT_Q_POLL_MS 2000 1214 1215 static void 1216 csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) 1217 { 1218 struct csio_lnode *ln = ioreq->lnode; 1219 struct csio_hw *hw = ln->hwp; 1220 int ready = 0; 1221 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1222 int rv; 1223 1224 if (csio_scsi_cmnd(ioreq) != scmnd) { 1225 CSIO_INC_STATS(scsim, n_abrt_race_comp); 1226 return; 1227 } 1228 1229 ready = csio_is_lnode_ready(ln); 1230 1231 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1232 if (rv != 0) { 1233 if (ready) 1234 CSIO_INC_STATS(scsim, n_abrt_busy_error); 1235 else 1236 CSIO_INC_STATS(scsim, n_cls_busy_error); 1237 } 1238 } 1239 1240 /* 1241 * csio_scsi_abort_io_q - Abort all I/Os on given queue 1242 * @scm: SCSI module. 1243 * @q: Queue to abort. 1244 * @tmo: Timeout in ms 1245 * 1246 * Attempt to abort all I/Os on given queue, and wait for a max 1247 * of tmo milliseconds for them to complete. Returns success 1248 * if all I/Os are aborted. Else returns -ETIMEDOUT. 1249 * Should be entered with lock held. Exits with lock held. 1250 * NOTE: 1251 * Lock has to be held across the loop that aborts I/Os, since dropping the lock 1252 * in between can cause the list to be corrupted. As a result, the caller 1253 * of this function has to ensure that the number of I/os to be aborted 1254 * is finite enough to not cause lock-held-for-too-long issues. 1255 */ 1256 static int 1257 csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) 1258 { 1259 struct csio_hw *hw = scm->hw; 1260 struct list_head *tmp, *next; 1261 int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); 1262 struct scsi_cmnd *scmnd; 1263 1264 if (list_empty(q)) 1265 return 0; 1266 1267 csio_dbg(hw, "Aborting SCSI I/Os\n"); 1268 1269 /* Now abort/close I/Os in the queue passed */ 1270 list_for_each_safe(tmp, next, q) { 1271 scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); 1272 csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); 1273 } 1274 1275 /* Wait till all active I/Os are completed/aborted/closed */ 1276 while (!list_empty(q) && count--) { 1277 spin_unlock_irq(&hw->lock); 1278 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1279 spin_lock_irq(&hw->lock); 1280 } 1281 1282 /* all aborts completed */ 1283 if (list_empty(q)) 1284 return 0; 1285 1286 return -ETIMEDOUT; 1287 } 1288 1289 /* 1290 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. 1291 * @scm: SCSI module. 1292 * @abort: abort required. 1293 * Called with lock held, should exit with lock held. 1294 * Can sleep when waiting for I/Os to complete. 1295 */ 1296 int 1297 csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) 1298 { 1299 struct csio_hw *hw = scm->hw; 1300 int rv = 0; 1301 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1302 1303 /* No I/Os pending */ 1304 if (list_empty(&scm->active_q)) 1305 return 0; 1306 1307 /* Wait until all active I/Os are completed */ 1308 while (!list_empty(&scm->active_q) && count--) { 1309 spin_unlock_irq(&hw->lock); 1310 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1311 spin_lock_irq(&hw->lock); 1312 } 1313 1314 /* all I/Os completed */ 1315 if (list_empty(&scm->active_q)) 1316 return 0; 1317 1318 /* Else abort */ 1319 if (abort) { 1320 rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); 1321 if (rv == 0) 1322 return rv; 1323 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1324 } 1325 1326 csio_scsi_cleanup_io_q(scm, &scm->active_q); 1327 1328 CSIO_DB_ASSERT(list_empty(&scm->active_q)); 1329 1330 return rv; 1331 } 1332 1333 /* 1334 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. 1335 * @scm: SCSI module. 1336 * @lnode: lnode 1337 * 1338 * Called with lock held, should exit with lock held. 1339 * Can sleep (with dropped lock) when waiting for I/Os to complete. 1340 */ 1341 int 1342 csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) 1343 { 1344 struct csio_hw *hw = scm->hw; 1345 struct csio_scsi_level_data sld; 1346 int rv; 1347 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1348 1349 csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); 1350 1351 sld.level = CSIO_LEV_LNODE; 1352 sld.lnode = ln; 1353 INIT_LIST_HEAD(&ln->cmpl_q); 1354 csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); 1355 1356 /* No I/Os pending on this lnode */ 1357 if (list_empty(&ln->cmpl_q)) 1358 return 0; 1359 1360 /* Wait until all active I/Os on this lnode are completed */ 1361 while (!list_empty(&ln->cmpl_q) && count--) { 1362 spin_unlock_irq(&hw->lock); 1363 msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1364 spin_lock_irq(&hw->lock); 1365 } 1366 1367 /* all I/Os completed */ 1368 if (list_empty(&ln->cmpl_q)) 1369 return 0; 1370 1371 csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); 1372 1373 /* I/Os are pending, abort them */ 1374 rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); 1375 if (rv != 0) { 1376 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1377 csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); 1378 } 1379 1380 CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); 1381 1382 return rv; 1383 } 1384 1385 static ssize_t 1386 csio_show_hw_state(struct device *dev, 1387 struct device_attribute *attr, char *buf) 1388 { 1389 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1390 struct csio_hw *hw = csio_lnode_to_hw(ln); 1391 1392 if (csio_is_hw_ready(hw)) 1393 return snprintf(buf, PAGE_SIZE, "ready\n"); 1394 else 1395 return snprintf(buf, PAGE_SIZE, "not ready\n"); 1396 } 1397 1398 /* Device reset */ 1399 static ssize_t 1400 csio_device_reset(struct device *dev, 1401 struct device_attribute *attr, const char *buf, size_t count) 1402 { 1403 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1404 struct csio_hw *hw = csio_lnode_to_hw(ln); 1405 1406 if (*buf != '1') 1407 return -EINVAL; 1408 1409 /* Delete NPIV lnodes */ 1410 csio_lnodes_exit(hw, 1); 1411 1412 /* Block upper IOs */ 1413 csio_lnodes_block_request(hw); 1414 1415 spin_lock_irq(&hw->lock); 1416 csio_hw_reset(hw); 1417 spin_unlock_irq(&hw->lock); 1418 1419 /* Unblock upper IOs */ 1420 csio_lnodes_unblock_request(hw); 1421 return count; 1422 } 1423 1424 /* disable port */ 1425 static ssize_t 1426 csio_disable_port(struct device *dev, 1427 struct device_attribute *attr, const char *buf, size_t count) 1428 { 1429 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1430 struct csio_hw *hw = csio_lnode_to_hw(ln); 1431 bool disable; 1432 1433 if (*buf == '1' || *buf == '0') 1434 disable = (*buf == '1') ? true : false; 1435 else 1436 return -EINVAL; 1437 1438 /* Block upper IOs */ 1439 csio_lnodes_block_by_port(hw, ln->portid); 1440 1441 spin_lock_irq(&hw->lock); 1442 csio_disable_lnodes(hw, ln->portid, disable); 1443 spin_unlock_irq(&hw->lock); 1444 1445 /* Unblock upper IOs */ 1446 csio_lnodes_unblock_by_port(hw, ln->portid); 1447 return count; 1448 } 1449 1450 /* Show debug level */ 1451 static ssize_t 1452 csio_show_dbg_level(struct device *dev, 1453 struct device_attribute *attr, char *buf) 1454 { 1455 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1456 1457 return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); 1458 } 1459 1460 /* Store debug level */ 1461 static ssize_t 1462 csio_store_dbg_level(struct device *dev, 1463 struct device_attribute *attr, const char *buf, size_t count) 1464 { 1465 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1466 struct csio_hw *hw = csio_lnode_to_hw(ln); 1467 uint32_t dbg_level = 0; 1468 1469 if (!isdigit(buf[0])) 1470 return -EINVAL; 1471 1472 if (sscanf(buf, "%i", &dbg_level)) 1473 return -EINVAL; 1474 1475 ln->params.log_level = dbg_level; 1476 hw->params.log_level = dbg_level; 1477 1478 return 0; 1479 } 1480 1481 static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); 1482 static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset); 1483 static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port); 1484 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, 1485 csio_store_dbg_level); 1486 1487 static struct device_attribute *csio_fcoe_lport_attrs[] = { 1488 &dev_attr_hw_state, 1489 &dev_attr_device_reset, 1490 &dev_attr_disable_port, 1491 &dev_attr_dbg_level, 1492 NULL, 1493 }; 1494 1495 static ssize_t 1496 csio_show_num_reg_rnodes(struct device *dev, 1497 struct device_attribute *attr, char *buf) 1498 { 1499 struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1500 1501 return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); 1502 } 1503 1504 static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); 1505 1506 static struct device_attribute *csio_fcoe_vport_attrs[] = { 1507 &dev_attr_num_reg_rnodes, 1508 &dev_attr_dbg_level, 1509 NULL, 1510 }; 1511 1512 static inline uint32_t 1513 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) 1514 { 1515 struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1516 struct scatterlist *sg; 1517 uint32_t bytes_left; 1518 uint32_t bytes_copy; 1519 uint32_t buf_off = 0; 1520 uint32_t start_off = 0; 1521 uint32_t sg_off = 0; 1522 void *sg_addr; 1523 void *buf_addr; 1524 struct csio_dma_buf *dma_buf; 1525 1526 bytes_left = scsi_bufflen(scmnd); 1527 sg = scsi_sglist(scmnd); 1528 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); 1529 1530 /* Copy data from driver buffer to SGs of SCSI CMD */ 1531 while (bytes_left > 0 && sg && dma_buf) { 1532 if (buf_off >= dma_buf->len) { 1533 buf_off = 0; 1534 dma_buf = (struct csio_dma_buf *) 1535 csio_list_next(dma_buf); 1536 continue; 1537 } 1538 1539 if (start_off >= sg->length) { 1540 start_off -= sg->length; 1541 sg = sg_next(sg); 1542 continue; 1543 } 1544 1545 buf_addr = dma_buf->vaddr + buf_off; 1546 sg_off = sg->offset + start_off; 1547 bytes_copy = min((dma_buf->len - buf_off), 1548 sg->length - start_off); 1549 bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), 1550 bytes_copy); 1551 1552 sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); 1553 if (!sg_addr) { 1554 csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", 1555 sg, req); 1556 break; 1557 } 1558 1559 csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", 1560 sg_addr, sg_off, buf_addr, bytes_copy); 1561 memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); 1562 kunmap_atomic(sg_addr); 1563 1564 start_off += bytes_copy; 1565 buf_off += bytes_copy; 1566 bytes_left -= bytes_copy; 1567 } 1568 1569 if (bytes_left > 0) 1570 return DID_ERROR; 1571 else 1572 return DID_OK; 1573 } 1574 1575 /* 1576 * csio_scsi_err_handler - SCSI error handler. 1577 * @hw: HW module. 1578 * @req: IO request. 1579 * 1580 */ 1581 static inline void 1582 csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) 1583 { 1584 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1585 struct csio_scsim *scm = csio_hw_to_scsim(hw); 1586 struct fcp_resp_with_ext *fcp_resp; 1587 struct fcp_resp_rsp_info *rsp_info; 1588 struct csio_dma_buf *dma_buf; 1589 uint8_t flags, scsi_status = 0; 1590 uint32_t host_status = DID_OK; 1591 uint32_t rsp_len = 0, sns_len = 0; 1592 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1593 1594 1595 switch (req->wr_status) { 1596 case FW_HOSTERROR: 1597 if (unlikely(!csio_is_hw_ready(hw))) 1598 return; 1599 1600 host_status = DID_ERROR; 1601 CSIO_INC_STATS(scm, n_hosterror); 1602 1603 break; 1604 case FW_SCSI_RSP_ERR: 1605 dma_buf = &req->dma_buf; 1606 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 1607 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 1608 flags = fcp_resp->resp.fr_flags; 1609 scsi_status = fcp_resp->resp.fr_status; 1610 1611 if (flags & FCP_RSP_LEN_VAL) { 1612 rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); 1613 if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || 1614 (rsp_info->rsp_code != FCP_TMF_CMPL)) { 1615 host_status = DID_ERROR; 1616 goto out; 1617 } 1618 } 1619 1620 if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { 1621 sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); 1622 if (sns_len > SCSI_SENSE_BUFFERSIZE) 1623 sns_len = SCSI_SENSE_BUFFERSIZE; 1624 1625 memcpy(cmnd->sense_buffer, 1626 &rsp_info->_fr_resvd[0] + rsp_len, sns_len); 1627 CSIO_INC_STATS(scm, n_autosense); 1628 } 1629 1630 scsi_set_resid(cmnd, 0); 1631 1632 /* Under run */ 1633 if (flags & FCP_RESID_UNDER) { 1634 scsi_set_resid(cmnd, 1635 be32_to_cpu(fcp_resp->ext.fr_resid)); 1636 1637 if (!(flags & FCP_SNS_LEN_VAL) && 1638 (scsi_status == SAM_STAT_GOOD) && 1639 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) 1640 < cmnd->underflow)) 1641 host_status = DID_ERROR; 1642 } else if (flags & FCP_RESID_OVER) 1643 host_status = DID_ERROR; 1644 1645 CSIO_INC_STATS(scm, n_rsperror); 1646 break; 1647 1648 case FW_SCSI_OVER_FLOW_ERR: 1649 csio_warn(hw, 1650 "Over-flow error,cmnd:0x%x expected len:0x%x" 1651 " resid:0x%x\n", cmnd->cmnd[0], 1652 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1653 host_status = DID_ERROR; 1654 CSIO_INC_STATS(scm, n_ovflerror); 1655 break; 1656 1657 case FW_SCSI_UNDER_FLOW_ERR: 1658 csio_warn(hw, 1659 "Under-flow error,cmnd:0x%x expected" 1660 " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n", 1661 cmnd->cmnd[0], scsi_bufflen(cmnd), 1662 scsi_get_resid(cmnd), cmnd->device->lun, 1663 rn->flowid); 1664 host_status = DID_ERROR; 1665 CSIO_INC_STATS(scm, n_unflerror); 1666 break; 1667 1668 case FW_SCSI_ABORT_REQUESTED: 1669 case FW_SCSI_ABORTED: 1670 case FW_SCSI_CLOSE_REQUESTED: 1671 csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, 1672 cmnd->cmnd[0], 1673 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? 1674 "closed" : "aborted"); 1675 /* 1676 * csio_eh_abort_handler checks this value to 1677 * succeed or fail the abort request. 1678 */ 1679 host_status = DID_REQUEUE; 1680 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) 1681 CSIO_INC_STATS(scm, n_closed); 1682 else 1683 CSIO_INC_STATS(scm, n_aborted); 1684 break; 1685 1686 case FW_SCSI_ABORT_TIMEDOUT: 1687 /* FW timed out the abort itself */ 1688 csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", 1689 req, cmnd, req->wr_status); 1690 host_status = DID_ERROR; 1691 CSIO_INC_STATS(scm, n_abrt_timedout); 1692 break; 1693 1694 case FW_RDEV_NOT_READY: 1695 /* 1696 * In firmware, a RDEV can get into this state 1697 * temporarily, before moving into dissapeared/lost 1698 * state. So, the driver should complete the request equivalent 1699 * to device-disappeared! 1700 */ 1701 CSIO_INC_STATS(scm, n_rdev_nr_error); 1702 host_status = DID_ERROR; 1703 break; 1704 1705 case FW_ERR_RDEV_LOST: 1706 CSIO_INC_STATS(scm, n_rdev_lost_error); 1707 host_status = DID_ERROR; 1708 break; 1709 1710 case FW_ERR_RDEV_LOGO: 1711 CSIO_INC_STATS(scm, n_rdev_logo_error); 1712 host_status = DID_ERROR; 1713 break; 1714 1715 case FW_ERR_RDEV_IMPL_LOGO: 1716 host_status = DID_ERROR; 1717 break; 1718 1719 case FW_ERR_LINK_DOWN: 1720 CSIO_INC_STATS(scm, n_link_down_error); 1721 host_status = DID_ERROR; 1722 break; 1723 1724 case FW_FCOE_NO_XCHG: 1725 CSIO_INC_STATS(scm, n_no_xchg_error); 1726 host_status = DID_ERROR; 1727 break; 1728 1729 default: 1730 csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", 1731 req->wr_status, req, cmnd); 1732 CSIO_DB_ASSERT(0); 1733 1734 CSIO_INC_STATS(scm, n_unknown_error); 1735 host_status = DID_ERROR; 1736 break; 1737 } 1738 1739 out: 1740 if (req->nsge > 0) 1741 scsi_dma_unmap(cmnd); 1742 1743 cmnd->result = (((host_status) << 16) | scsi_status); 1744 cmnd->scsi_done(cmnd); 1745 1746 /* Wake up waiting threads */ 1747 csio_scsi_cmnd(req) = NULL; 1748 complete_all(&req->cmplobj); 1749 } 1750 1751 /* 1752 * csio_scsi_cbfn - SCSI callback function. 1753 * @hw: HW module. 1754 * @req: IO request. 1755 * 1756 */ 1757 static void 1758 csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 1759 { 1760 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1761 uint8_t scsi_status = SAM_STAT_GOOD; 1762 uint32_t host_status = DID_OK; 1763 1764 if (likely(req->wr_status == FW_SUCCESS)) { 1765 if (req->nsge > 0) { 1766 scsi_dma_unmap(cmnd); 1767 if (req->dcopy) 1768 host_status = csio_scsi_copy_to_sgl(hw, req); 1769 } 1770 1771 cmnd->result = (((host_status) << 16) | scsi_status); 1772 cmnd->scsi_done(cmnd); 1773 csio_scsi_cmnd(req) = NULL; 1774 CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); 1775 } else { 1776 /* Error handling */ 1777 csio_scsi_err_handler(hw, req); 1778 } 1779 } 1780 1781 /** 1782 * csio_queuecommand - Entry point to kickstart an I/O request. 1783 * @host: The scsi_host pointer. 1784 * @cmnd: The I/O request from ML. 1785 * 1786 * This routine does the following: 1787 * - Checks for HW and Rnode module readiness. 1788 * - Gets a free ioreq structure (which is already initialized 1789 * to uninit during its allocation). 1790 * - Maps SG elements. 1791 * - Initializes ioreq members. 1792 * - Kicks off the SCSI state machine for this IO. 1793 * - Returns busy status on error. 1794 */ 1795 static int 1796 csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) 1797 { 1798 struct csio_lnode *ln = shost_priv(host); 1799 struct csio_hw *hw = csio_lnode_to_hw(ln); 1800 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1801 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1802 struct csio_ioreq *ioreq = NULL; 1803 unsigned long flags; 1804 int nsge = 0; 1805 int rv = SCSI_MLQUEUE_HOST_BUSY, nr; 1806 int retval; 1807 int cpu; 1808 struct csio_scsi_qset *sqset; 1809 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1810 1811 if (!blk_rq_cpu_valid(cmnd->request)) 1812 cpu = smp_processor_id(); 1813 else 1814 cpu = cmnd->request->cpu; 1815 1816 sqset = &hw->sqset[ln->portid][cpu]; 1817 1818 nr = fc_remote_port_chkready(rport); 1819 if (nr) { 1820 cmnd->result = nr; 1821 CSIO_INC_STATS(scsim, n_rn_nr_error); 1822 goto err_done; 1823 } 1824 1825 if (unlikely(!csio_is_hw_ready(hw))) { 1826 cmnd->result = (DID_REQUEUE << 16); 1827 CSIO_INC_STATS(scsim, n_hw_nr_error); 1828 goto err_done; 1829 } 1830 1831 /* Get req->nsge, if there are SG elements to be mapped */ 1832 nsge = scsi_dma_map(cmnd); 1833 if (unlikely(nsge < 0)) { 1834 CSIO_INC_STATS(scsim, n_dmamap_error); 1835 goto err; 1836 } 1837 1838 /* Do we support so many mappings? */ 1839 if (unlikely(nsge > scsim->max_sge)) { 1840 csio_warn(hw, 1841 "More SGEs than can be supported." 1842 " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); 1843 CSIO_INC_STATS(scsim, n_unsupp_sge_error); 1844 goto err_dma_unmap; 1845 } 1846 1847 /* Get a free ioreq structure - SM is already set to uninit */ 1848 ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 1849 if (!ioreq) { 1850 csio_err(hw, "Out of I/O request elements. Active #:%d\n", 1851 scsim->stats.n_active); 1852 CSIO_INC_STATS(scsim, n_no_req_error); 1853 goto err_dma_unmap; 1854 } 1855 1856 ioreq->nsge = nsge; 1857 ioreq->lnode = ln; 1858 ioreq->rnode = rn; 1859 ioreq->iq_idx = sqset->iq_idx; 1860 ioreq->eq_idx = sqset->eq_idx; 1861 ioreq->wr_status = 0; 1862 ioreq->drv_status = 0; 1863 csio_scsi_cmnd(ioreq) = (void *)cmnd; 1864 ioreq->tmo = 0; 1865 ioreq->datadir = cmnd->sc_data_direction; 1866 1867 if (cmnd->sc_data_direction == DMA_TO_DEVICE) { 1868 CSIO_INC_STATS(ln, n_output_requests); 1869 ln->stats.n_output_bytes += scsi_bufflen(cmnd); 1870 } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { 1871 CSIO_INC_STATS(ln, n_input_requests); 1872 ln->stats.n_input_bytes += scsi_bufflen(cmnd); 1873 } else 1874 CSIO_INC_STATS(ln, n_control_requests); 1875 1876 /* Set cbfn */ 1877 ioreq->io_cbfn = csio_scsi_cbfn; 1878 1879 /* Needed during abort */ 1880 cmnd->host_scribble = (unsigned char *)ioreq; 1881 cmnd->SCp.Message = 0; 1882 1883 /* Kick off SCSI IO SM on the ioreq */ 1884 spin_lock_irqsave(&hw->lock, flags); 1885 retval = csio_scsi_start_io(ioreq); 1886 spin_unlock_irqrestore(&hw->lock, flags); 1887 1888 if (retval != 0) { 1889 csio_err(hw, "ioreq: %p couldnt be started, status:%d\n", 1890 ioreq, retval); 1891 CSIO_INC_STATS(scsim, n_busy_error); 1892 goto err_put_req; 1893 } 1894 1895 return 0; 1896 1897 err_put_req: 1898 csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 1899 err_dma_unmap: 1900 if (nsge > 0) 1901 scsi_dma_unmap(cmnd); 1902 err: 1903 return rv; 1904 1905 err_done: 1906 cmnd->scsi_done(cmnd); 1907 return 0; 1908 } 1909 1910 static int 1911 csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) 1912 { 1913 int rv; 1914 int cpu = smp_processor_id(); 1915 struct csio_lnode *ln = ioreq->lnode; 1916 struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; 1917 1918 ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; 1919 /* 1920 * Use current processor queue for posting the abort/close, but retain 1921 * the ingress queue ID of the original I/O being aborted/closed - we 1922 * need the abort/close completion to be received on the same queue 1923 * as the original I/O. 1924 */ 1925 ioreq->eq_idx = sqset->eq_idx; 1926 1927 if (abort == SCSI_ABORT) 1928 rv = csio_scsi_abort(ioreq); 1929 else 1930 rv = csio_scsi_close(ioreq); 1931 1932 return rv; 1933 } 1934 1935 static int 1936 csio_eh_abort_handler(struct scsi_cmnd *cmnd) 1937 { 1938 struct csio_ioreq *ioreq; 1939 struct csio_lnode *ln = shost_priv(cmnd->device->host); 1940 struct csio_hw *hw = csio_lnode_to_hw(ln); 1941 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1942 int ready = 0, ret; 1943 unsigned long tmo = 0; 1944 int rv; 1945 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1946 1947 ret = fc_block_scsi_eh(cmnd); 1948 if (ret) 1949 return ret; 1950 1951 ioreq = (struct csio_ioreq *)cmnd->host_scribble; 1952 if (!ioreq) 1953 return SUCCESS; 1954 1955 if (!rn) 1956 return FAILED; 1957 1958 csio_dbg(hw, 1959 "Request to abort ioreq:%p cmd:%p cdb:%08llx" 1960 " ssni:0x%x lun:%d iq:0x%x\n", 1961 ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, 1962 cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); 1963 1964 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { 1965 CSIO_INC_STATS(scsim, n_abrt_race_comp); 1966 return SUCCESS; 1967 } 1968 1969 ready = csio_is_lnode_ready(ln); 1970 tmo = CSIO_SCSI_ABRT_TMO_MS; 1971 1972 spin_lock_irq(&hw->lock); 1973 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1974 spin_unlock_irq(&hw->lock); 1975 1976 if (rv != 0) { 1977 if (rv == -EINVAL) { 1978 /* Return success, if abort/close request issued on 1979 * already completed IO 1980 */ 1981 return SUCCESS; 1982 } 1983 if (ready) 1984 CSIO_INC_STATS(scsim, n_abrt_busy_error); 1985 else 1986 CSIO_INC_STATS(scsim, n_cls_busy_error); 1987 1988 goto inval_scmnd; 1989 } 1990 1991 /* Wait for completion */ 1992 init_completion(&ioreq->cmplobj); 1993 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); 1994 1995 /* FW didnt respond to abort within our timeout */ 1996 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 1997 1998 csio_err(hw, "Abort timed out -- req: %p\n", ioreq); 1999 CSIO_INC_STATS(scsim, n_abrt_timedout); 2000 2001 inval_scmnd: 2002 if (ioreq->nsge > 0) 2003 scsi_dma_unmap(cmnd); 2004 2005 spin_lock_irq(&hw->lock); 2006 csio_scsi_cmnd(ioreq) = NULL; 2007 spin_unlock_irq(&hw->lock); 2008 2009 cmnd->result = (DID_ERROR << 16); 2010 cmnd->scsi_done(cmnd); 2011 2012 return FAILED; 2013 } 2014 2015 /* FW successfully aborted the request */ 2016 if (host_byte(cmnd->result) == DID_REQUEUE) { 2017 csio_info(hw, 2018 "Aborted SCSI command to (%d:%d) serial#:0x%lx\n", 2019 cmnd->device->id, cmnd->device->lun, 2020 cmnd->serial_number); 2021 return SUCCESS; 2022 } else { 2023 csio_info(hw, 2024 "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n", 2025 cmnd->device->id, cmnd->device->lun, 2026 cmnd->serial_number); 2027 return FAILED; 2028 } 2029 } 2030 2031 /* 2032 * csio_tm_cbfn - TM callback function. 2033 * @hw: HW module. 2034 * @req: IO request. 2035 * 2036 * Cache the result in 'cmnd', since ioreq will be freed soon 2037 * after we return from here, and the waiting thread shouldnt trust 2038 * the ioreq contents. 2039 */ 2040 static void 2041 csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 2042 { 2043 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 2044 struct csio_dma_buf *dma_buf; 2045 uint8_t flags = 0; 2046 struct fcp_resp_with_ext *fcp_resp; 2047 struct fcp_resp_rsp_info *rsp_info; 2048 2049 csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", 2050 req, req->wr_status); 2051 2052 /* Cache FW return status */ 2053 cmnd->SCp.Status = req->wr_status; 2054 2055 /* Special handling based on FCP response */ 2056 2057 /* 2058 * FW returns us this error, if flags were set. FCP4 says 2059 * FCP_RSP_LEN_VAL in flags shall be set for TM completions. 2060 * So if a target were to set this bit, we expect that the 2061 * rsp_code is set to FCP_TMF_CMPL for a successful TM 2062 * completion. Any other rsp_code means TM operation failed. 2063 * If a target were to just ignore setting flags, we treat 2064 * the TM operation as success, and FW returns FW_SUCCESS. 2065 */ 2066 if (req->wr_status == FW_SCSI_RSP_ERR) { 2067 dma_buf = &req->dma_buf; 2068 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 2069 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 2070 2071 flags = fcp_resp->resp.fr_flags; 2072 2073 /* Modify return status if flags indicate success */ 2074 if (flags & FCP_RSP_LEN_VAL) 2075 if (rsp_info->rsp_code == FCP_TMF_CMPL) 2076 cmnd->SCp.Status = FW_SUCCESS; 2077 2078 csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); 2079 } 2080 2081 /* Wake up the TM handler thread */ 2082 csio_scsi_cmnd(req) = NULL; 2083 } 2084 2085 static int 2086 csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) 2087 { 2088 struct csio_lnode *ln = shost_priv(cmnd->device->host); 2089 struct csio_hw *hw = csio_lnode_to_hw(ln); 2090 struct csio_scsim *scsim = csio_hw_to_scsim(hw); 2091 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 2092 struct csio_ioreq *ioreq = NULL; 2093 struct csio_scsi_qset *sqset; 2094 unsigned long flags; 2095 int retval; 2096 int count, ret; 2097 LIST_HEAD(local_q); 2098 struct csio_scsi_level_data sld; 2099 2100 if (!rn) 2101 goto fail; 2102 2103 csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n", 2104 cmnd->device->lun, rn->flowid, rn->scsi_id); 2105 2106 if (!csio_is_lnode_ready(ln)) { 2107 csio_err(hw, 2108 "LUN reset cannot be issued on non-ready" 2109 " local node vnpi:0x%x (LUN:%d)\n", 2110 ln->vnp_flowid, cmnd->device->lun); 2111 goto fail; 2112 } 2113 2114 /* Lnode is ready, now wait on rport node readiness */ 2115 ret = fc_block_scsi_eh(cmnd); 2116 if (ret) 2117 return ret; 2118 2119 /* 2120 * If we have blocked in the previous call, at this point, either the 2121 * remote node has come back online, or device loss timer has fired 2122 * and the remote node is destroyed. Allow the LUN reset only for 2123 * the former case, since LUN reset is a TMF I/O on the wire, and we 2124 * need a valid session to issue it. 2125 */ 2126 if (fc_remote_port_chkready(rn->rport)) { 2127 csio_err(hw, 2128 "LUN reset cannot be issued on non-ready" 2129 " remote node ssni:0x%x (LUN:%d)\n", 2130 rn->flowid, cmnd->device->lun); 2131 goto fail; 2132 } 2133 2134 /* Get a free ioreq structure - SM is already set to uninit */ 2135 ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 2136 2137 if (!ioreq) { 2138 csio_err(hw, "Out of IO request elements. Active # :%d\n", 2139 scsim->stats.n_active); 2140 goto fail; 2141 } 2142 2143 sqset = &hw->sqset[ln->portid][smp_processor_id()]; 2144 ioreq->nsge = 0; 2145 ioreq->lnode = ln; 2146 ioreq->rnode = rn; 2147 ioreq->iq_idx = sqset->iq_idx; 2148 ioreq->eq_idx = sqset->eq_idx; 2149 2150 csio_scsi_cmnd(ioreq) = cmnd; 2151 cmnd->host_scribble = (unsigned char *)ioreq; 2152 cmnd->SCp.Status = 0; 2153 2154 cmnd->SCp.Message = FCP_TMF_LUN_RESET; 2155 ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; 2156 2157 /* 2158 * FW times the LUN reset for ioreq->tmo, so we got to wait a little 2159 * longer (10s for now) than that to allow FW to return the timed 2160 * out command. 2161 */ 2162 count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); 2163 2164 /* Set cbfn */ 2165 ioreq->io_cbfn = csio_tm_cbfn; 2166 2167 /* Save of the ioreq info for later use */ 2168 sld.level = CSIO_LEV_LUN; 2169 sld.lnode = ioreq->lnode; 2170 sld.rnode = ioreq->rnode; 2171 sld.oslun = (uint64_t)cmnd->device->lun; 2172 2173 spin_lock_irqsave(&hw->lock, flags); 2174 /* Kick off TM SM on the ioreq */ 2175 retval = csio_scsi_start_tm(ioreq); 2176 spin_unlock_irqrestore(&hw->lock, flags); 2177 2178 if (retval != 0) { 2179 csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", 2180 ioreq, retval); 2181 goto fail_ret_ioreq; 2182 } 2183 2184 csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", 2185 count * (CSIO_SCSI_TM_POLL_MS / 1000)); 2186 /* Wait for completion */ 2187 while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) 2188 && count--) 2189 msleep(CSIO_SCSI_TM_POLL_MS); 2190 2191 /* LUN reset timed-out */ 2192 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 2193 csio_err(hw, "LUN reset (%d:%d) timed out\n", 2194 cmnd->device->id, cmnd->device->lun); 2195 2196 spin_lock_irq(&hw->lock); 2197 csio_scsi_drvcleanup(ioreq); 2198 list_del_init(&ioreq->sm.sm_list); 2199 spin_unlock_irq(&hw->lock); 2200 2201 goto fail_ret_ioreq; 2202 } 2203 2204 /* LUN reset returned, check cached status */ 2205 if (cmnd->SCp.Status != FW_SUCCESS) { 2206 csio_err(hw, "LUN reset failed (%d:%d), status: %d\n", 2207 cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); 2208 goto fail; 2209 } 2210 2211 /* LUN reset succeeded, Start aborting affected I/Os */ 2212 /* 2213 * Since the host guarantees during LUN reset that there 2214 * will not be any more I/Os to that LUN, until the LUN reset 2215 * completes, we gather pending I/Os after the LUN reset. 2216 */ 2217 spin_lock_irq(&hw->lock); 2218 csio_scsi_gather_active_ios(scsim, &sld, &local_q); 2219 2220 retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); 2221 spin_unlock_irq(&hw->lock); 2222 2223 /* Aborts may have timed out */ 2224 if (retval != 0) { 2225 csio_err(hw, 2226 "Attempt to abort I/Os during LUN reset of %d" 2227 " returned %d\n", cmnd->device->lun, retval); 2228 /* Return I/Os back to active_q */ 2229 spin_lock_irq(&hw->lock); 2230 list_splice_tail_init(&local_q, &scsim->active_q); 2231 spin_unlock_irq(&hw->lock); 2232 goto fail; 2233 } 2234 2235 CSIO_INC_STATS(rn, n_lun_rst); 2236 2237 csio_info(hw, "LUN reset occurred (%d:%d)\n", 2238 cmnd->device->id, cmnd->device->lun); 2239 2240 return SUCCESS; 2241 2242 fail_ret_ioreq: 2243 csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 2244 fail: 2245 CSIO_INC_STATS(rn, n_lun_rst_fail); 2246 return FAILED; 2247 } 2248 2249 static int 2250 csio_slave_alloc(struct scsi_device *sdev) 2251 { 2252 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2253 2254 if (!rport || fc_remote_port_chkready(rport)) 2255 return -ENXIO; 2256 2257 sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); 2258 2259 return 0; 2260 } 2261 2262 static int 2263 csio_slave_configure(struct scsi_device *sdev) 2264 { 2265 if (sdev->tagged_supported) 2266 scsi_activate_tcq(sdev, csio_lun_qdepth); 2267 else 2268 scsi_deactivate_tcq(sdev, csio_lun_qdepth); 2269 2270 return 0; 2271 } 2272 2273 static void 2274 csio_slave_destroy(struct scsi_device *sdev) 2275 { 2276 sdev->hostdata = NULL; 2277 } 2278 2279 static int 2280 csio_scan_finished(struct Scsi_Host *shost, unsigned long time) 2281 { 2282 struct csio_lnode *ln = shost_priv(shost); 2283 int rv = 1; 2284 2285 spin_lock_irq(shost->host_lock); 2286 if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) 2287 goto out; 2288 2289 rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, 2290 csio_delta_scan_tmo * HZ); 2291 out: 2292 spin_unlock_irq(shost->host_lock); 2293 2294 return rv; 2295 } 2296 2297 struct scsi_host_template csio_fcoe_shost_template = { 2298 .module = THIS_MODULE, 2299 .name = CSIO_DRV_DESC, 2300 .proc_name = KBUILD_MODNAME, 2301 .queuecommand = csio_queuecommand, 2302 .eh_abort_handler = csio_eh_abort_handler, 2303 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2304 .slave_alloc = csio_slave_alloc, 2305 .slave_configure = csio_slave_configure, 2306 .slave_destroy = csio_slave_destroy, 2307 .scan_finished = csio_scan_finished, 2308 .this_id = -1, 2309 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2310 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2311 .use_clustering = ENABLE_CLUSTERING, 2312 .shost_attrs = csio_fcoe_lport_attrs, 2313 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2314 }; 2315 2316 struct scsi_host_template csio_fcoe_shost_vport_template = { 2317 .module = THIS_MODULE, 2318 .name = CSIO_DRV_DESC, 2319 .proc_name = KBUILD_MODNAME, 2320 .queuecommand = csio_queuecommand, 2321 .eh_abort_handler = csio_eh_abort_handler, 2322 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2323 .slave_alloc = csio_slave_alloc, 2324 .slave_configure = csio_slave_configure, 2325 .slave_destroy = csio_slave_destroy, 2326 .scan_finished = csio_scan_finished, 2327 .this_id = -1, 2328 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2329 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2330 .use_clustering = ENABLE_CLUSTERING, 2331 .shost_attrs = csio_fcoe_vport_attrs, 2332 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2333 }; 2334 2335 /* 2336 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. 2337 * @scm: SCSI Module 2338 * @hw: HW device. 2339 * @buf_size: buffer size 2340 * @num_buf : Number of buffers. 2341 * 2342 * This routine allocates DMA buffers required for SCSI Data xfer, if 2343 * each SGL buffer for a SCSI Read request posted by SCSI midlayer are 2344 * not virtually contiguous. 2345 */ 2346 static int 2347 csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, 2348 int buf_size, int num_buf) 2349 { 2350 int n = 0; 2351 struct list_head *tmp; 2352 struct csio_dma_buf *ddp_desc = NULL; 2353 uint32_t unit_size = 0; 2354 2355 if (!num_buf) 2356 return 0; 2357 2358 if (!buf_size) 2359 return -EINVAL; 2360 2361 INIT_LIST_HEAD(&scm->ddp_freelist); 2362 2363 /* Align buf size to page size */ 2364 buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; 2365 /* Initialize dma descriptors */ 2366 for (n = 0; n < num_buf; n++) { 2367 /* Set unit size to request size */ 2368 unit_size = buf_size; 2369 ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); 2370 if (!ddp_desc) { 2371 csio_err(hw, 2372 "Failed to allocate ddp descriptors," 2373 " Num allocated = %d.\n", 2374 scm->stats.n_free_ddp); 2375 goto no_mem; 2376 } 2377 2378 /* Allocate Dma buffers for DDP */ 2379 ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, 2380 &ddp_desc->paddr); 2381 if (!ddp_desc->vaddr) { 2382 csio_err(hw, 2383 "SCSI response DMA buffer (ddp) allocation" 2384 " failed!\n"); 2385 kfree(ddp_desc); 2386 goto no_mem; 2387 } 2388 2389 ddp_desc->len = unit_size; 2390 2391 /* Added it to scsi ddp freelist */ 2392 list_add_tail(&ddp_desc->list, &scm->ddp_freelist); 2393 CSIO_INC_STATS(scm, n_free_ddp); 2394 } 2395 2396 return 0; 2397 no_mem: 2398 /* release dma descs back to freelist and free dma memory */ 2399 list_for_each(tmp, &scm->ddp_freelist) { 2400 ddp_desc = (struct csio_dma_buf *) tmp; 2401 tmp = csio_list_prev(tmp); 2402 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2403 ddp_desc->paddr); 2404 list_del_init(&ddp_desc->list); 2405 kfree(ddp_desc); 2406 } 2407 scm->stats.n_free_ddp = 0; 2408 2409 return -ENOMEM; 2410 } 2411 2412 /* 2413 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. 2414 * @scm: SCSI Module 2415 * @hw: HW device. 2416 * 2417 * This routine frees ddp buffers. 2418 */ 2419 static void 2420 csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) 2421 { 2422 struct list_head *tmp; 2423 struct csio_dma_buf *ddp_desc; 2424 2425 /* release dma descs back to freelist and free dma memory */ 2426 list_for_each(tmp, &scm->ddp_freelist) { 2427 ddp_desc = (struct csio_dma_buf *) tmp; 2428 tmp = csio_list_prev(tmp); 2429 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2430 ddp_desc->paddr); 2431 list_del_init(&ddp_desc->list); 2432 kfree(ddp_desc); 2433 } 2434 scm->stats.n_free_ddp = 0; 2435 } 2436 2437 /** 2438 * csio_scsim_init - Initialize SCSI Module 2439 * @scm: SCSI Module 2440 * @hw: HW module 2441 * 2442 */ 2443 int 2444 csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) 2445 { 2446 int i; 2447 struct csio_ioreq *ioreq; 2448 struct csio_dma_buf *dma_buf; 2449 2450 INIT_LIST_HEAD(&scm->active_q); 2451 scm->hw = hw; 2452 2453 scm->proto_cmd_len = sizeof(struct fcp_cmnd); 2454 scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; 2455 scm->max_sge = CSIO_SCSI_MAX_SGE; 2456 2457 spin_lock_init(&scm->freelist_lock); 2458 2459 /* Pre-allocate ioreqs and initialize them */ 2460 INIT_LIST_HEAD(&scm->ioreq_freelist); 2461 for (i = 0; i < csio_scsi_ioreqs; i++) { 2462 2463 ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); 2464 if (!ioreq) { 2465 csio_err(hw, 2466 "I/O request element allocation failed, " 2467 " Num allocated = %d.\n", 2468 scm->stats.n_free_ioreq); 2469 2470 goto free_ioreq; 2471 } 2472 2473 /* Allocate Dma buffers for Response Payload */ 2474 dma_buf = &ioreq->dma_buf; 2475 dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL, 2476 &dma_buf->paddr); 2477 if (!dma_buf->vaddr) { 2478 csio_err(hw, 2479 "SCSI response DMA buffer allocation" 2480 " failed!\n"); 2481 kfree(ioreq); 2482 goto free_ioreq; 2483 } 2484 2485 dma_buf->len = scm->proto_rsp_len; 2486 2487 /* Set state to uninit */ 2488 csio_init_state(&ioreq->sm, csio_scsis_uninit); 2489 INIT_LIST_HEAD(&ioreq->gen_list); 2490 init_completion(&ioreq->cmplobj); 2491 2492 list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); 2493 CSIO_INC_STATS(scm, n_free_ioreq); 2494 } 2495 2496 if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) 2497 goto free_ioreq; 2498 2499 return 0; 2500 2501 free_ioreq: 2502 /* 2503 * Free up existing allocations, since an error 2504 * from here means we are returning for good 2505 */ 2506 while (!list_empty(&scm->ioreq_freelist)) { 2507 struct csio_sm *tmp; 2508 2509 tmp = list_first_entry(&scm->ioreq_freelist, 2510 struct csio_sm, sm_list); 2511 list_del_init(&tmp->sm_list); 2512 ioreq = (struct csio_ioreq *)tmp; 2513 2514 dma_buf = &ioreq->dma_buf; 2515 pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr, 2516 dma_buf->paddr); 2517 2518 kfree(ioreq); 2519 } 2520 2521 scm->stats.n_free_ioreq = 0; 2522 2523 return -ENOMEM; 2524 } 2525 2526 /** 2527 * csio_scsim_exit: Uninitialize SCSI Module 2528 * @scm: SCSI Module 2529 * 2530 */ 2531 void 2532 csio_scsim_exit(struct csio_scsim *scm) 2533 { 2534 struct csio_ioreq *ioreq; 2535 struct csio_dma_buf *dma_buf; 2536 2537 while (!list_empty(&scm->ioreq_freelist)) { 2538 struct csio_sm *tmp; 2539 2540 tmp = list_first_entry(&scm->ioreq_freelist, 2541 struct csio_sm, sm_list); 2542 list_del_init(&tmp->sm_list); 2543 ioreq = (struct csio_ioreq *)tmp; 2544 2545 dma_buf = &ioreq->dma_buf; 2546 pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr, 2547 dma_buf->paddr); 2548 2549 kfree(ioreq); 2550 } 2551 2552 scm->stats.n_free_ioreq = 0; 2553 2554 csio_scsi_free_ddp_bufs(scm, scm->hw); 2555 } 2556