1 /* bnx2fc_io.c: QLogic Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008-2013 Broadcom Corporation 5 * Copyright (c) 2014-2015 QLogic Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 12 */ 13 14 #include "bnx2fc.h" 15 16 #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 17 18 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 19 int bd_index); 20 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 21 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 22 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 23 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 24 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 25 struct fcoe_fcp_rsp_payload *fcp_rsp, 26 u8 num_rq); 27 28 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 29 unsigned int timer_msec) 30 { 31 struct bnx2fc_interface *interface = io_req->port->priv; 32 33 if (queue_delayed_work(interface->timer_work_queue, 34 &io_req->timeout_work, 35 msecs_to_jiffies(timer_msec))) 36 kref_get(&io_req->refcount); 37 } 38 39 static void bnx2fc_cmd_timeout(struct work_struct *work) 40 { 41 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 42 timeout_work.work); 43 u8 cmd_type = io_req->cmd_type; 44 struct bnx2fc_rport *tgt = io_req->tgt; 45 int rc; 46 47 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 48 "req_flags = %lx\n", cmd_type, io_req->req_flags); 49 50 spin_lock_bh(&tgt->tgt_lock); 51 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 52 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 53 /* 54 * ideally we should hold the io_req until RRQ complets, 55 * and release io_req from timeout hold. 56 */ 57 spin_unlock_bh(&tgt->tgt_lock); 58 bnx2fc_send_rrq(io_req); 59 return; 60 } 61 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 62 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 63 goto done; 64 } 65 66 switch (cmd_type) { 67 case BNX2FC_SCSI_CMD: 68 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 69 &io_req->req_flags)) { 70 /* Handle eh_abort timeout */ 71 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 72 complete(&io_req->tm_done); 73 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 74 &io_req->req_flags)) { 75 /* Handle internally generated ABTS timeout */ 76 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 77 io_req->refcount.refcount.counter); 78 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 79 &io_req->req_flags))) { 80 /* 81 * Cleanup and return original command to 82 * mid-layer. 83 */ 84 bnx2fc_initiate_cleanup(io_req); 85 kref_put(&io_req->refcount, bnx2fc_cmd_release); 86 spin_unlock_bh(&tgt->tgt_lock); 87 88 return; 89 } 90 } else { 91 /* Hanlde IO timeout */ 92 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 93 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 94 &io_req->req_flags)) { 95 BNX2FC_IO_DBG(io_req, "IO completed before " 96 " timer expiry\n"); 97 goto done; 98 } 99 100 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 101 &io_req->req_flags)) { 102 rc = bnx2fc_initiate_abts(io_req); 103 if (rc == SUCCESS) 104 goto done; 105 106 kref_put(&io_req->refcount, bnx2fc_cmd_release); 107 spin_unlock_bh(&tgt->tgt_lock); 108 109 return; 110 } else { 111 BNX2FC_IO_DBG(io_req, "IO already in " 112 "ABTS processing\n"); 113 } 114 } 115 break; 116 case BNX2FC_ELS: 117 118 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 119 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 120 121 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 122 &io_req->req_flags)) { 123 kref_put(&io_req->refcount, bnx2fc_cmd_release); 124 spin_unlock_bh(&tgt->tgt_lock); 125 126 return; 127 } 128 } else { 129 /* 130 * Handle ELS timeout. 131 * tgt_lock is used to sync compl path and timeout 132 * path. If els compl path is processing this IO, we 133 * have nothing to do here, just release the timer hold 134 */ 135 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 136 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 137 &io_req->req_flags)) 138 goto done; 139 140 /* Indicate the cb_func that this ELS is timed out */ 141 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 142 143 if ((io_req->cb_func) && (io_req->cb_arg)) { 144 io_req->cb_func(io_req->cb_arg); 145 io_req->cb_arg = NULL; 146 } 147 } 148 break; 149 default: 150 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 151 cmd_type); 152 break; 153 } 154 155 done: 156 /* release the cmd that was held when timer was set */ 157 kref_put(&io_req->refcount, bnx2fc_cmd_release); 158 spin_unlock_bh(&tgt->tgt_lock); 159 } 160 161 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 162 { 163 /* Called with host lock held */ 164 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 165 166 /* 167 * active_cmd_queue may have other command types as well, 168 * and during flush operation, we want to error back only 169 * scsi commands. 170 */ 171 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 172 return; 173 174 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 175 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { 176 /* Do not call scsi done for this IO */ 177 return; 178 } 179 180 bnx2fc_unmap_sg_list(io_req); 181 io_req->sc_cmd = NULL; 182 if (!sc_cmd) { 183 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 184 "IO(0x%x) already cleaned up\n", 185 io_req->xid); 186 return; 187 } 188 sc_cmd->result = err_code << 16; 189 190 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 191 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 192 sc_cmd->allowed); 193 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 194 sc_cmd->SCp.ptr = NULL; 195 sc_cmd->scsi_done(sc_cmd); 196 } 197 198 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) 199 { 200 struct bnx2fc_cmd_mgr *cmgr; 201 struct io_bdt *bdt_info; 202 struct bnx2fc_cmd *io_req; 203 size_t len; 204 u32 mem_size; 205 u16 xid; 206 int i; 207 int num_ios, num_pri_ios; 208 size_t bd_tbl_sz; 209 int arr_sz = num_possible_cpus() + 1; 210 u16 min_xid = BNX2FC_MIN_XID; 211 u16 max_xid = hba->max_xid; 212 213 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 214 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 215 and max_xid 0x%x\n", min_xid, max_xid); 216 return NULL; 217 } 218 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 219 220 num_ios = max_xid - min_xid + 1; 221 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 222 len += sizeof(struct bnx2fc_cmd_mgr); 223 224 cmgr = kzalloc(len, GFP_KERNEL); 225 if (!cmgr) { 226 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 227 return NULL; 228 } 229 230 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 231 arr_sz, GFP_KERNEL); 232 if (!cmgr->free_list) { 233 printk(KERN_ERR PFX "failed to alloc free_list\n"); 234 goto mem_err; 235 } 236 237 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 238 arr_sz, GFP_KERNEL); 239 if (!cmgr->free_list_lock) { 240 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 241 kfree(cmgr->free_list); 242 cmgr->free_list = NULL; 243 goto mem_err; 244 } 245 246 cmgr->hba = hba; 247 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 248 249 for (i = 0; i < arr_sz; i++) { 250 INIT_LIST_HEAD(&cmgr->free_list[i]); 251 spin_lock_init(&cmgr->free_list_lock[i]); 252 } 253 254 /* 255 * Pre-allocated pool of bnx2fc_cmds. 256 * Last entry in the free list array is the free list 257 * of slow path requests. 258 */ 259 xid = BNX2FC_MIN_XID; 260 num_pri_ios = num_ios - hba->elstm_xids; 261 for (i = 0; i < num_ios; i++) { 262 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 263 264 if (!io_req) { 265 printk(KERN_ERR PFX "failed to alloc io_req\n"); 266 goto mem_err; 267 } 268 269 INIT_LIST_HEAD(&io_req->link); 270 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 271 272 io_req->xid = xid++; 273 if (i < num_pri_ios) 274 list_add_tail(&io_req->link, 275 &cmgr->free_list[io_req->xid % 276 num_possible_cpus()]); 277 else 278 list_add_tail(&io_req->link, 279 &cmgr->free_list[num_possible_cpus()]); 280 io_req++; 281 } 282 283 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 284 mem_size = num_ios * sizeof(struct io_bdt *); 285 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 286 if (!cmgr->io_bdt_pool) { 287 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 288 goto mem_err; 289 } 290 291 mem_size = sizeof(struct io_bdt); 292 for (i = 0; i < num_ios; i++) { 293 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 294 if (!cmgr->io_bdt_pool[i]) { 295 printk(KERN_ERR PFX "failed to alloc " 296 "io_bdt_pool[%d]\n", i); 297 goto mem_err; 298 } 299 } 300 301 /* Allocate an map fcoe_bdt_ctx structures */ 302 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 303 for (i = 0; i < num_ios; i++) { 304 bdt_info = cmgr->io_bdt_pool[i]; 305 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 306 bd_tbl_sz, 307 &bdt_info->bd_tbl_dma, 308 GFP_KERNEL); 309 if (!bdt_info->bd_tbl) { 310 printk(KERN_ERR PFX "failed to alloc " 311 "bdt_tbl[%d]\n", i); 312 goto mem_err; 313 } 314 } 315 316 return cmgr; 317 318 mem_err: 319 bnx2fc_cmd_mgr_free(cmgr); 320 return NULL; 321 } 322 323 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 324 { 325 struct io_bdt *bdt_info; 326 struct bnx2fc_hba *hba = cmgr->hba; 327 size_t bd_tbl_sz; 328 u16 min_xid = BNX2FC_MIN_XID; 329 u16 max_xid = hba->max_xid; 330 int num_ios; 331 int i; 332 333 num_ios = max_xid - min_xid + 1; 334 335 /* Free fcoe_bdt_ctx structures */ 336 if (!cmgr->io_bdt_pool) 337 goto free_cmd_pool; 338 339 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 340 for (i = 0; i < num_ios; i++) { 341 bdt_info = cmgr->io_bdt_pool[i]; 342 if (bdt_info->bd_tbl) { 343 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 344 bdt_info->bd_tbl, 345 bdt_info->bd_tbl_dma); 346 bdt_info->bd_tbl = NULL; 347 } 348 } 349 350 /* Destroy io_bdt pool */ 351 for (i = 0; i < num_ios; i++) { 352 kfree(cmgr->io_bdt_pool[i]); 353 cmgr->io_bdt_pool[i] = NULL; 354 } 355 356 kfree(cmgr->io_bdt_pool); 357 cmgr->io_bdt_pool = NULL; 358 359 free_cmd_pool: 360 kfree(cmgr->free_list_lock); 361 362 /* Destroy cmd pool */ 363 if (!cmgr->free_list) 364 goto free_cmgr; 365 366 for (i = 0; i < num_possible_cpus() + 1; i++) { 367 struct bnx2fc_cmd *tmp, *io_req; 368 369 list_for_each_entry_safe(io_req, tmp, 370 &cmgr->free_list[i], link) { 371 list_del(&io_req->link); 372 kfree(io_req); 373 } 374 } 375 kfree(cmgr->free_list); 376 free_cmgr: 377 /* Free command manager itself */ 378 kfree(cmgr); 379 } 380 381 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 382 { 383 struct fcoe_port *port = tgt->port; 384 struct bnx2fc_interface *interface = port->priv; 385 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 386 struct bnx2fc_cmd *io_req; 387 struct list_head *listp; 388 struct io_bdt *bd_tbl; 389 int index = RESERVE_FREE_LIST_INDEX; 390 u32 free_sqes; 391 u32 max_sqes; 392 u16 xid; 393 394 max_sqes = tgt->max_sqes; 395 switch (type) { 396 case BNX2FC_TASK_MGMT_CMD: 397 max_sqes = BNX2FC_TM_MAX_SQES; 398 break; 399 case BNX2FC_ELS: 400 max_sqes = BNX2FC_ELS_MAX_SQES; 401 break; 402 default: 403 break; 404 } 405 406 /* 407 * NOTE: Free list insertions and deletions are protected with 408 * cmgr lock 409 */ 410 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 411 free_sqes = atomic_read(&tgt->free_sqes); 412 if ((list_empty(&(cmd_mgr->free_list[index]))) || 413 (tgt->num_active_ios.counter >= max_sqes) || 414 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 415 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 416 "ios(%d):sqes(%d)\n", 417 tgt->num_active_ios.counter, tgt->max_sqes); 418 if (list_empty(&(cmd_mgr->free_list[index]))) 419 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 420 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 421 return NULL; 422 } 423 424 listp = (struct list_head *) 425 cmd_mgr->free_list[index].next; 426 list_del_init(listp); 427 io_req = (struct bnx2fc_cmd *) listp; 428 xid = io_req->xid; 429 cmd_mgr->cmds[xid] = io_req; 430 atomic_inc(&tgt->num_active_ios); 431 atomic_dec(&tgt->free_sqes); 432 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 433 434 INIT_LIST_HEAD(&io_req->link); 435 436 io_req->port = port; 437 io_req->cmd_mgr = cmd_mgr; 438 io_req->req_flags = 0; 439 io_req->cmd_type = type; 440 441 /* Bind io_bdt for this io_req */ 442 /* Have a static link between io_req and io_bdt_pool */ 443 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 444 bd_tbl->io_req = io_req; 445 446 /* Hold the io_req against deletion */ 447 kref_init(&io_req->refcount); 448 return io_req; 449 } 450 451 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 452 { 453 struct fcoe_port *port = tgt->port; 454 struct bnx2fc_interface *interface = port->priv; 455 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 456 struct bnx2fc_cmd *io_req; 457 struct list_head *listp; 458 struct io_bdt *bd_tbl; 459 u32 free_sqes; 460 u32 max_sqes; 461 u16 xid; 462 int index = get_cpu(); 463 464 max_sqes = BNX2FC_SCSI_MAX_SQES; 465 /* 466 * NOTE: Free list insertions and deletions are protected with 467 * cmgr lock 468 */ 469 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 470 free_sqes = atomic_read(&tgt->free_sqes); 471 if ((list_empty(&cmd_mgr->free_list[index])) || 472 (tgt->num_active_ios.counter >= max_sqes) || 473 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 474 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 475 put_cpu(); 476 return NULL; 477 } 478 479 listp = (struct list_head *) 480 cmd_mgr->free_list[index].next; 481 list_del_init(listp); 482 io_req = (struct bnx2fc_cmd *) listp; 483 xid = io_req->xid; 484 cmd_mgr->cmds[xid] = io_req; 485 atomic_inc(&tgt->num_active_ios); 486 atomic_dec(&tgt->free_sqes); 487 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 488 put_cpu(); 489 490 INIT_LIST_HEAD(&io_req->link); 491 492 io_req->port = port; 493 io_req->cmd_mgr = cmd_mgr; 494 io_req->req_flags = 0; 495 496 /* Bind io_bdt for this io_req */ 497 /* Have a static link between io_req and io_bdt_pool */ 498 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 499 bd_tbl->io_req = io_req; 500 501 /* Hold the io_req against deletion */ 502 kref_init(&io_req->refcount); 503 return io_req; 504 } 505 506 void bnx2fc_cmd_release(struct kref *ref) 507 { 508 struct bnx2fc_cmd *io_req = container_of(ref, 509 struct bnx2fc_cmd, refcount); 510 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 511 int index; 512 513 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 514 index = io_req->xid % num_possible_cpus(); 515 else 516 index = RESERVE_FREE_LIST_INDEX; 517 518 519 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 520 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 521 bnx2fc_free_mp_resc(io_req); 522 cmd_mgr->cmds[io_req->xid] = NULL; 523 /* Delete IO from retire queue */ 524 list_del_init(&io_req->link); 525 /* Add it to the free list */ 526 list_add(&io_req->link, 527 &cmd_mgr->free_list[index]); 528 atomic_dec(&io_req->tgt->num_active_ios); 529 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 530 531 } 532 533 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 534 { 535 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 536 struct bnx2fc_interface *interface = io_req->port->priv; 537 struct bnx2fc_hba *hba = interface->hba; 538 size_t sz = sizeof(struct fcoe_bd_ctx); 539 540 /* clear tm flags */ 541 mp_req->tm_flags = 0; 542 if (mp_req->mp_req_bd) { 543 dma_free_coherent(&hba->pcidev->dev, sz, 544 mp_req->mp_req_bd, 545 mp_req->mp_req_bd_dma); 546 mp_req->mp_req_bd = NULL; 547 } 548 if (mp_req->mp_resp_bd) { 549 dma_free_coherent(&hba->pcidev->dev, sz, 550 mp_req->mp_resp_bd, 551 mp_req->mp_resp_bd_dma); 552 mp_req->mp_resp_bd = NULL; 553 } 554 if (mp_req->req_buf) { 555 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 556 mp_req->req_buf, 557 mp_req->req_buf_dma); 558 mp_req->req_buf = NULL; 559 } 560 if (mp_req->resp_buf) { 561 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 562 mp_req->resp_buf, 563 mp_req->resp_buf_dma); 564 mp_req->resp_buf = NULL; 565 } 566 } 567 568 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 569 { 570 struct bnx2fc_mp_req *mp_req; 571 struct fcoe_bd_ctx *mp_req_bd; 572 struct fcoe_bd_ctx *mp_resp_bd; 573 struct bnx2fc_interface *interface = io_req->port->priv; 574 struct bnx2fc_hba *hba = interface->hba; 575 dma_addr_t addr; 576 size_t sz; 577 578 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 579 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 580 581 if (io_req->cmd_type != BNX2FC_ELS) { 582 mp_req->req_len = sizeof(struct fcp_cmnd); 583 io_req->data_xfer_len = mp_req->req_len; 584 } else 585 mp_req->req_len = io_req->data_xfer_len; 586 587 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 588 &mp_req->req_buf_dma, 589 GFP_ATOMIC); 590 if (!mp_req->req_buf) { 591 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 592 bnx2fc_free_mp_resc(io_req); 593 return FAILED; 594 } 595 596 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 597 &mp_req->resp_buf_dma, 598 GFP_ATOMIC); 599 if (!mp_req->resp_buf) { 600 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 601 bnx2fc_free_mp_resc(io_req); 602 return FAILED; 603 } 604 memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); 605 memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); 606 607 /* Allocate and map mp_req_bd and mp_resp_bd */ 608 sz = sizeof(struct fcoe_bd_ctx); 609 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 610 &mp_req->mp_req_bd_dma, 611 GFP_ATOMIC); 612 if (!mp_req->mp_req_bd) { 613 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 614 bnx2fc_free_mp_resc(io_req); 615 return FAILED; 616 } 617 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 618 &mp_req->mp_resp_bd_dma, 619 GFP_ATOMIC); 620 if (!mp_req->mp_resp_bd) { 621 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 622 bnx2fc_free_mp_resc(io_req); 623 return FAILED; 624 } 625 /* Fill bd table */ 626 addr = mp_req->req_buf_dma; 627 mp_req_bd = mp_req->mp_req_bd; 628 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 629 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 630 mp_req_bd->buf_len = CNIC_PAGE_SIZE; 631 mp_req_bd->flags = 0; 632 633 /* 634 * MP buffer is either a task mgmt command or an ELS. 635 * So the assumption is that it consumes a single bd 636 * entry in the bd table 637 */ 638 mp_resp_bd = mp_req->mp_resp_bd; 639 addr = mp_req->resp_buf_dma; 640 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 641 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 642 mp_resp_bd->buf_len = CNIC_PAGE_SIZE; 643 mp_resp_bd->flags = 0; 644 645 return SUCCESS; 646 } 647 648 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 649 { 650 struct fc_lport *lport; 651 struct fc_rport *rport; 652 struct fc_rport_libfc_priv *rp; 653 struct fcoe_port *port; 654 struct bnx2fc_interface *interface; 655 struct bnx2fc_rport *tgt; 656 struct bnx2fc_cmd *io_req; 657 struct bnx2fc_mp_req *tm_req; 658 struct fcoe_task_ctx_entry *task; 659 struct fcoe_task_ctx_entry *task_page; 660 struct Scsi_Host *host = sc_cmd->device->host; 661 struct fc_frame_header *fc_hdr; 662 struct fcp_cmnd *fcp_cmnd; 663 int task_idx, index; 664 int rc = SUCCESS; 665 u16 xid; 666 u32 sid, did; 667 unsigned long start = jiffies; 668 669 lport = shost_priv(host); 670 rport = starget_to_rport(scsi_target(sc_cmd->device)); 671 port = lport_priv(lport); 672 interface = port->priv; 673 674 if (rport == NULL) { 675 printk(KERN_ERR PFX "device_reset: rport is NULL\n"); 676 rc = FAILED; 677 goto tmf_err; 678 } 679 rp = rport->dd_data; 680 681 rc = fc_block_scsi_eh(sc_cmd); 682 if (rc) 683 return rc; 684 685 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 686 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 687 rc = FAILED; 688 goto tmf_err; 689 } 690 /* rport and tgt are allocated together, so tgt should be non-NULL */ 691 tgt = (struct bnx2fc_rport *)&rp[1]; 692 693 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 694 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 695 rc = FAILED; 696 goto tmf_err; 697 } 698 retry_tmf: 699 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 700 if (!io_req) { 701 if (time_after(jiffies, start + HZ)) { 702 printk(KERN_ERR PFX "tmf: Failed TMF"); 703 rc = FAILED; 704 goto tmf_err; 705 } 706 msleep(20); 707 goto retry_tmf; 708 } 709 /* Initialize rest of io_req fields */ 710 io_req->sc_cmd = sc_cmd; 711 io_req->port = port; 712 io_req->tgt = tgt; 713 714 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 715 716 rc = bnx2fc_init_mp_req(io_req); 717 if (rc == FAILED) { 718 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 719 spin_lock_bh(&tgt->tgt_lock); 720 kref_put(&io_req->refcount, bnx2fc_cmd_release); 721 spin_unlock_bh(&tgt->tgt_lock); 722 goto tmf_err; 723 } 724 725 /* Set TM flags */ 726 io_req->io_req_flags = 0; 727 tm_req->tm_flags = tm_flags; 728 729 /* Fill FCP_CMND */ 730 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 731 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 732 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 733 fcp_cmnd->fc_dl = 0; 734 735 /* Fill FC header */ 736 fc_hdr = &(tm_req->req_fc_hdr); 737 sid = tgt->sid; 738 did = rport->port_id; 739 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 740 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 741 FC_FC_SEQ_INIT, 0); 742 /* Obtain exchange id */ 743 xid = io_req->xid; 744 745 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 746 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 747 index = xid % BNX2FC_TASKS_PER_PAGE; 748 749 /* Initialize task context for this IO request */ 750 task_page = (struct fcoe_task_ctx_entry *) 751 interface->hba->task_ctx[task_idx]; 752 task = &(task_page[index]); 753 bnx2fc_init_mp_task(io_req, task); 754 755 sc_cmd->SCp.ptr = (char *)io_req; 756 757 /* Obtain free SQ entry */ 758 spin_lock_bh(&tgt->tgt_lock); 759 bnx2fc_add_2_sq(tgt, xid); 760 761 /* Enqueue the io_req to active_tm_queue */ 762 io_req->on_tmf_queue = 1; 763 list_add_tail(&io_req->link, &tgt->active_tm_queue); 764 765 init_completion(&io_req->tm_done); 766 io_req->wait_for_comp = 1; 767 768 /* Ring doorbell */ 769 bnx2fc_ring_doorbell(tgt); 770 spin_unlock_bh(&tgt->tgt_lock); 771 772 rc = wait_for_completion_timeout(&io_req->tm_done, 773 BNX2FC_TM_TIMEOUT * HZ); 774 spin_lock_bh(&tgt->tgt_lock); 775 776 io_req->wait_for_comp = 0; 777 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { 778 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 779 if (io_req->on_tmf_queue) { 780 list_del_init(&io_req->link); 781 io_req->on_tmf_queue = 0; 782 } 783 io_req->wait_for_comp = 1; 784 bnx2fc_initiate_cleanup(io_req); 785 spin_unlock_bh(&tgt->tgt_lock); 786 rc = wait_for_completion_timeout(&io_req->tm_done, 787 BNX2FC_FW_TIMEOUT); 788 spin_lock_bh(&tgt->tgt_lock); 789 io_req->wait_for_comp = 0; 790 if (!rc) 791 kref_put(&io_req->refcount, bnx2fc_cmd_release); 792 } 793 794 spin_unlock_bh(&tgt->tgt_lock); 795 796 if (!rc) { 797 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); 798 rc = FAILED; 799 } else { 800 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); 801 rc = SUCCESS; 802 } 803 tmf_err: 804 return rc; 805 } 806 807 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 808 { 809 struct fc_lport *lport; 810 struct bnx2fc_rport *tgt = io_req->tgt; 811 struct fc_rport *rport = tgt->rport; 812 struct fc_rport_priv *rdata = tgt->rdata; 813 struct bnx2fc_interface *interface; 814 struct fcoe_port *port; 815 struct bnx2fc_cmd *abts_io_req; 816 struct fcoe_task_ctx_entry *task; 817 struct fcoe_task_ctx_entry *task_page; 818 struct fc_frame_header *fc_hdr; 819 struct bnx2fc_mp_req *abts_req; 820 int task_idx, index; 821 u32 sid, did; 822 u16 xid; 823 int rc = SUCCESS; 824 u32 r_a_tov = rdata->r_a_tov; 825 826 /* called with tgt_lock held */ 827 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 828 829 port = io_req->port; 830 interface = port->priv; 831 lport = port->lport; 832 833 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 834 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 835 rc = FAILED; 836 goto abts_err; 837 } 838 839 if (rport == NULL) { 840 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); 841 rc = FAILED; 842 goto abts_err; 843 } 844 845 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 846 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 847 rc = FAILED; 848 goto abts_err; 849 } 850 851 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 852 if (!abts_io_req) { 853 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 854 rc = FAILED; 855 goto abts_err; 856 } 857 858 /* Initialize rest of io_req fields */ 859 abts_io_req->sc_cmd = NULL; 860 abts_io_req->port = port; 861 abts_io_req->tgt = tgt; 862 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 863 864 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 865 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 866 867 /* Fill FC header */ 868 fc_hdr = &(abts_req->req_fc_hdr); 869 870 /* Obtain oxid and rxid for the original exchange to be aborted */ 871 fc_hdr->fh_ox_id = htons(io_req->xid); 872 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 873 874 sid = tgt->sid; 875 did = rport->port_id; 876 877 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 878 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 879 FC_FC_SEQ_INIT, 0); 880 881 xid = abts_io_req->xid; 882 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 883 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 884 index = xid % BNX2FC_TASKS_PER_PAGE; 885 886 /* Initialize task context for this IO request */ 887 task_page = (struct fcoe_task_ctx_entry *) 888 interface->hba->task_ctx[task_idx]; 889 task = &(task_page[index]); 890 bnx2fc_init_mp_task(abts_io_req, task); 891 892 /* 893 * ABTS task is a temporary task that will be cleaned up 894 * irrespective of ABTS response. We need to start the timer 895 * for the original exchange, as the CQE is posted for the original 896 * IO request. 897 * 898 * Timer for ABTS is started only when it is originated by a 899 * TM request. For the ABTS issued as part of ULP timeout, 900 * scsi-ml maintains the timers. 901 */ 902 903 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 904 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 905 906 /* Obtain free SQ entry */ 907 bnx2fc_add_2_sq(tgt, xid); 908 909 /* Ring doorbell */ 910 bnx2fc_ring_doorbell(tgt); 911 912 abts_err: 913 return rc; 914 } 915 916 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 917 enum fc_rctl r_ctl) 918 { 919 struct fc_lport *lport; 920 struct bnx2fc_rport *tgt = orig_io_req->tgt; 921 struct bnx2fc_interface *interface; 922 struct fcoe_port *port; 923 struct bnx2fc_cmd *seq_clnp_req; 924 struct fcoe_task_ctx_entry *task; 925 struct fcoe_task_ctx_entry *task_page; 926 struct bnx2fc_els_cb_arg *cb_arg = NULL; 927 int task_idx, index; 928 u16 xid; 929 int rc = 0; 930 931 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", 932 orig_io_req->xid); 933 kref_get(&orig_io_req->refcount); 934 935 port = orig_io_req->port; 936 interface = port->priv; 937 lport = port->lport; 938 939 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 940 if (!cb_arg) { 941 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); 942 rc = -ENOMEM; 943 goto cleanup_err; 944 } 945 946 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); 947 if (!seq_clnp_req) { 948 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 949 rc = -ENOMEM; 950 kfree(cb_arg); 951 goto cleanup_err; 952 } 953 /* Initialize rest of io_req fields */ 954 seq_clnp_req->sc_cmd = NULL; 955 seq_clnp_req->port = port; 956 seq_clnp_req->tgt = tgt; 957 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ 958 959 xid = seq_clnp_req->xid; 960 961 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 962 index = xid % BNX2FC_TASKS_PER_PAGE; 963 964 /* Initialize task context for this IO request */ 965 task_page = (struct fcoe_task_ctx_entry *) 966 interface->hba->task_ctx[task_idx]; 967 task = &(task_page[index]); 968 cb_arg->aborted_io_req = orig_io_req; 969 cb_arg->io_req = seq_clnp_req; 970 cb_arg->r_ctl = r_ctl; 971 cb_arg->offset = offset; 972 seq_clnp_req->cb_arg = cb_arg; 973 974 printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); 975 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); 976 977 /* Obtain free SQ entry */ 978 bnx2fc_add_2_sq(tgt, xid); 979 980 /* Ring doorbell */ 981 bnx2fc_ring_doorbell(tgt); 982 cleanup_err: 983 return rc; 984 } 985 986 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 987 { 988 struct fc_lport *lport; 989 struct bnx2fc_rport *tgt = io_req->tgt; 990 struct bnx2fc_interface *interface; 991 struct fcoe_port *port; 992 struct bnx2fc_cmd *cleanup_io_req; 993 struct fcoe_task_ctx_entry *task; 994 struct fcoe_task_ctx_entry *task_page; 995 int task_idx, index; 996 u16 xid, orig_xid; 997 int rc = 0; 998 999 /* ASSUMPTION: called with tgt_lock held */ 1000 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1001 1002 port = io_req->port; 1003 interface = port->priv; 1004 lport = port->lport; 1005 1006 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1007 if (!cleanup_io_req) { 1008 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 1009 rc = -1; 1010 goto cleanup_err; 1011 } 1012 1013 /* Initialize rest of io_req fields */ 1014 cleanup_io_req->sc_cmd = NULL; 1015 cleanup_io_req->port = port; 1016 cleanup_io_req->tgt = tgt; 1017 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 1018 1019 xid = cleanup_io_req->xid; 1020 1021 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1022 index = xid % BNX2FC_TASKS_PER_PAGE; 1023 1024 /* Initialize task context for this IO request */ 1025 task_page = (struct fcoe_task_ctx_entry *) 1026 interface->hba->task_ctx[task_idx]; 1027 task = &(task_page[index]); 1028 orig_xid = io_req->xid; 1029 1030 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 1031 1032 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 1033 1034 /* Obtain free SQ entry */ 1035 bnx2fc_add_2_sq(tgt, xid); 1036 1037 /* Ring doorbell */ 1038 bnx2fc_ring_doorbell(tgt); 1039 1040 cleanup_err: 1041 return rc; 1042 } 1043 1044 /** 1045 * bnx2fc_eh_target_reset: Reset a target 1046 * 1047 * @sc_cmd: SCSI command 1048 * 1049 * Set from SCSI host template to send task mgmt command to the target 1050 * and wait for the response 1051 */ 1052 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 1053 { 1054 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 1055 } 1056 1057 /** 1058 * bnx2fc_eh_device_reset - Reset a single LUN 1059 * 1060 * @sc_cmd: SCSI command 1061 * 1062 * Set from SCSI host template to send task mgmt command to the target 1063 * and wait for the response 1064 */ 1065 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1066 { 1067 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1068 } 1069 1070 int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) 1071 { 1072 struct bnx2fc_rport *tgt = io_req->tgt; 1073 int rc = SUCCESS; 1074 1075 io_req->wait_for_comp = 1; 1076 bnx2fc_initiate_cleanup(io_req); 1077 1078 spin_unlock_bh(&tgt->tgt_lock); 1079 1080 wait_for_completion(&io_req->tm_done); 1081 1082 io_req->wait_for_comp = 0; 1083 /* 1084 * release the reference taken in eh_abort to allow the 1085 * target to re-login after flushing IOs 1086 */ 1087 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1088 1089 spin_lock_bh(&tgt->tgt_lock); 1090 return rc; 1091 } 1092 /** 1093 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1094 * SCSI command 1095 * 1096 * @sc_cmd: SCSI_ML command pointer 1097 * 1098 * SCSI abort request handler 1099 */ 1100 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1101 { 1102 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1103 struct fc_rport_libfc_priv *rp = rport->dd_data; 1104 struct bnx2fc_cmd *io_req; 1105 struct fc_lport *lport; 1106 struct bnx2fc_rport *tgt; 1107 int rc = FAILED; 1108 1109 1110 rc = fc_block_scsi_eh(sc_cmd); 1111 if (rc) 1112 return rc; 1113 1114 lport = shost_priv(sc_cmd->device->host); 1115 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1116 printk(KERN_ERR PFX "eh_abort: link not ready\n"); 1117 return rc; 1118 } 1119 1120 tgt = (struct bnx2fc_rport *)&rp[1]; 1121 1122 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1123 1124 spin_lock_bh(&tgt->tgt_lock); 1125 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1126 if (!io_req) { 1127 /* Command might have just completed */ 1128 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1129 spin_unlock_bh(&tgt->tgt_lock); 1130 return SUCCESS; 1131 } 1132 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1133 io_req->refcount.refcount.counter); 1134 1135 /* Hold IO request across abort processing */ 1136 kref_get(&io_req->refcount); 1137 1138 BUG_ON(tgt != io_req->tgt); 1139 1140 /* Remove the io_req from the active_q. */ 1141 /* 1142 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1143 * issue an ABTS on this particular IO req, as the 1144 * io_req is no longer in the active_q. 1145 */ 1146 if (tgt->flush_in_prog) { 1147 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1148 "flush in progress\n", io_req->xid); 1149 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1150 spin_unlock_bh(&tgt->tgt_lock); 1151 return SUCCESS; 1152 } 1153 1154 if (io_req->on_active_queue == 0) { 1155 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1156 "not on active_q\n", io_req->xid); 1157 /* 1158 * This condition can happen only due to the FW bug, 1159 * where we do not receive cleanup response from 1160 * the FW. Handle this case gracefully by erroring 1161 * back the IO request to SCSI-ml 1162 */ 1163 bnx2fc_scsi_done(io_req, DID_ABORT); 1164 1165 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1166 spin_unlock_bh(&tgt->tgt_lock); 1167 return SUCCESS; 1168 } 1169 1170 /* 1171 * Only eh_abort processing will remove the IO from 1172 * active_cmd_q before processing the request. this is 1173 * done to avoid race conditions between IOs aborted 1174 * as part of task management completion and eh_abort 1175 * processing 1176 */ 1177 list_del_init(&io_req->link); 1178 io_req->on_active_queue = 0; 1179 /* Move IO req to retire queue */ 1180 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1181 1182 init_completion(&io_req->tm_done); 1183 1184 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1185 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1186 "already in abts processing\n", io_req->xid); 1187 if (cancel_delayed_work(&io_req->timeout_work)) 1188 kref_put(&io_req->refcount, 1189 bnx2fc_cmd_release); /* drop timer hold */ 1190 rc = bnx2fc_abts_cleanup(io_req); 1191 /* This only occurs when an task abort was requested while ABTS 1192 is in progress. Setting the IO_CLEANUP flag will skip the 1193 RRQ process in the case when the fw generated SCSI_CMD cmpl 1194 was a result from the ABTS request rather than the CLEANUP 1195 request */ 1196 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 1197 goto out; 1198 } 1199 1200 /* Cancel the current timer running on this io_req */ 1201 if (cancel_delayed_work(&io_req->timeout_work)) 1202 kref_put(&io_req->refcount, 1203 bnx2fc_cmd_release); /* drop timer hold */ 1204 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1205 io_req->wait_for_comp = 1; 1206 rc = bnx2fc_initiate_abts(io_req); 1207 if (rc == FAILED) { 1208 bnx2fc_initiate_cleanup(io_req); 1209 spin_unlock_bh(&tgt->tgt_lock); 1210 wait_for_completion(&io_req->tm_done); 1211 spin_lock_bh(&tgt->tgt_lock); 1212 io_req->wait_for_comp = 0; 1213 goto done; 1214 } 1215 spin_unlock_bh(&tgt->tgt_lock); 1216 1217 wait_for_completion(&io_req->tm_done); 1218 1219 spin_lock_bh(&tgt->tgt_lock); 1220 io_req->wait_for_comp = 0; 1221 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1222 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); 1223 rc = SUCCESS; 1224 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1225 &io_req->req_flags))) { 1226 /* Let the scsi-ml try to recover this command */ 1227 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1228 io_req->xid); 1229 rc = bnx2fc_abts_cleanup(io_req); 1230 goto out; 1231 } else { 1232 /* 1233 * We come here even when there was a race condition 1234 * between timeout and abts completion, and abts 1235 * completion happens just in time. 1236 */ 1237 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1238 rc = SUCCESS; 1239 bnx2fc_scsi_done(io_req, DID_ABORT); 1240 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1241 } 1242 done: 1243 /* release the reference taken in eh_abort */ 1244 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1245 out: 1246 spin_unlock_bh(&tgt->tgt_lock); 1247 return rc; 1248 } 1249 1250 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, 1251 struct fcoe_task_ctx_entry *task, 1252 u8 rx_state) 1253 { 1254 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; 1255 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; 1256 u32 offset = cb_arg->offset; 1257 enum fc_rctl r_ctl = cb_arg->r_ctl; 1258 int rc = 0; 1259 struct bnx2fc_rport *tgt = orig_io_req->tgt; 1260 1261 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" 1262 "cmd_type = %d\n", 1263 seq_clnp_req->xid, seq_clnp_req->cmd_type); 1264 1265 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { 1266 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", 1267 seq_clnp_req->xid); 1268 goto free_cb_arg; 1269 } 1270 1271 spin_unlock_bh(&tgt->tgt_lock); 1272 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1273 spin_lock_bh(&tgt->tgt_lock); 1274 1275 if (rc) 1276 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" 1277 " IO will abort\n"); 1278 seq_clnp_req->cb_arg = NULL; 1279 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 1280 free_cb_arg: 1281 kfree(cb_arg); 1282 return; 1283 } 1284 1285 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1286 struct fcoe_task_ctx_entry *task, 1287 u8 num_rq) 1288 { 1289 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1290 "refcnt = %d, cmd_type = %d\n", 1291 io_req->refcount.refcount.counter, io_req->cmd_type); 1292 bnx2fc_scsi_done(io_req, DID_ERROR); 1293 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1294 if (io_req->wait_for_comp) 1295 complete(&io_req->tm_done); 1296 } 1297 1298 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1299 struct fcoe_task_ctx_entry *task, 1300 u8 num_rq) 1301 { 1302 u32 r_ctl; 1303 u32 r_a_tov = FC_DEF_R_A_TOV; 1304 u8 issue_rrq = 0; 1305 struct bnx2fc_rport *tgt = io_req->tgt; 1306 1307 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1308 "refcnt = %d, cmd_type = %d\n", 1309 io_req->xid, 1310 io_req->refcount.refcount.counter, io_req->cmd_type); 1311 1312 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1313 &io_req->req_flags)) { 1314 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1315 " this io\n"); 1316 return; 1317 } 1318 1319 /* Do not issue RRQ as this IO is already cleanedup */ 1320 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1321 &io_req->req_flags)) 1322 goto io_compl; 1323 1324 /* 1325 * For ABTS issued due to SCSI eh_abort_handler, timeout 1326 * values are maintained by scsi-ml itself. Cancel timeout 1327 * in case ABTS issued as part of task management function 1328 * or due to FW error. 1329 */ 1330 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1331 if (cancel_delayed_work(&io_req->timeout_work)) 1332 kref_put(&io_req->refcount, 1333 bnx2fc_cmd_release); /* drop timer hold */ 1334 1335 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1336 1337 switch (r_ctl) { 1338 case FC_RCTL_BA_ACC: 1339 /* 1340 * Dont release this cmd yet. It will be relesed 1341 * after we get RRQ response 1342 */ 1343 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1344 issue_rrq = 1; 1345 break; 1346 1347 case FC_RCTL_BA_RJT: 1348 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1349 break; 1350 default: 1351 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1352 break; 1353 } 1354 1355 if (issue_rrq) { 1356 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1357 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1358 } 1359 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1360 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1361 1362 io_compl: 1363 if (io_req->wait_for_comp) { 1364 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1365 &io_req->req_flags)) 1366 complete(&io_req->tm_done); 1367 } else { 1368 /* 1369 * We end up here when ABTS is issued as 1370 * in asynchronous context, i.e., as part 1371 * of task management completion, or 1372 * when FW error is received or when the 1373 * ABTS is issued when the IO is timed 1374 * out. 1375 */ 1376 1377 if (io_req->on_active_queue) { 1378 list_del_init(&io_req->link); 1379 io_req->on_active_queue = 0; 1380 /* Move IO req to retire queue */ 1381 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1382 } 1383 bnx2fc_scsi_done(io_req, DID_ERROR); 1384 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1385 } 1386 } 1387 1388 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1389 { 1390 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1391 struct bnx2fc_rport *tgt = io_req->tgt; 1392 struct bnx2fc_cmd *cmd, *tmp; 1393 u64 tm_lun = sc_cmd->device->lun; 1394 u64 lun; 1395 int rc = 0; 1396 1397 /* called with tgt_lock held */ 1398 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1399 /* 1400 * Walk thru the active_ios queue and ABORT the IO 1401 * that matches with the LUN that was reset 1402 */ 1403 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1404 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1405 lun = cmd->sc_cmd->device->lun; 1406 if (lun == tm_lun) { 1407 /* Initiate ABTS on this cmd */ 1408 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1409 &cmd->req_flags)) { 1410 /* cancel the IO timeout */ 1411 if (cancel_delayed_work(&io_req->timeout_work)) 1412 kref_put(&io_req->refcount, 1413 bnx2fc_cmd_release); 1414 /* timer hold */ 1415 rc = bnx2fc_initiate_abts(cmd); 1416 /* abts shouldn't fail in this context */ 1417 WARN_ON(rc != SUCCESS); 1418 } else 1419 printk(KERN_ERR PFX "lun_rst: abts already in" 1420 " progress for this IO 0x%x\n", 1421 cmd->xid); 1422 } 1423 } 1424 } 1425 1426 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1427 { 1428 struct bnx2fc_rport *tgt = io_req->tgt; 1429 struct bnx2fc_cmd *cmd, *tmp; 1430 int rc = 0; 1431 1432 /* called with tgt_lock held */ 1433 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1434 /* 1435 * Walk thru the active_ios queue and ABORT the IO 1436 * that matches with the LUN that was reset 1437 */ 1438 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1439 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1440 /* Initiate ABTS */ 1441 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1442 &cmd->req_flags)) { 1443 /* cancel the IO timeout */ 1444 if (cancel_delayed_work(&io_req->timeout_work)) 1445 kref_put(&io_req->refcount, 1446 bnx2fc_cmd_release); /* timer hold */ 1447 rc = bnx2fc_initiate_abts(cmd); 1448 /* abts shouldn't fail in this context */ 1449 WARN_ON(rc != SUCCESS); 1450 1451 } else 1452 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1453 " for this IO 0x%x\n", cmd->xid); 1454 } 1455 } 1456 1457 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1458 struct fcoe_task_ctx_entry *task, u8 num_rq) 1459 { 1460 struct bnx2fc_mp_req *tm_req; 1461 struct fc_frame_header *fc_hdr; 1462 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1463 u64 *hdr; 1464 u64 *temp_hdr; 1465 void *rsp_buf; 1466 1467 /* Called with tgt_lock held */ 1468 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1469 1470 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1471 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1472 else { 1473 /* TM has already timed out and we got 1474 * delayed completion. Ignore completion 1475 * processing. 1476 */ 1477 return; 1478 } 1479 1480 tm_req = &(io_req->mp_req); 1481 fc_hdr = &(tm_req->resp_fc_hdr); 1482 hdr = (u64 *)fc_hdr; 1483 temp_hdr = (u64 *) 1484 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1485 hdr[0] = cpu_to_be64(temp_hdr[0]); 1486 hdr[1] = cpu_to_be64(temp_hdr[1]); 1487 hdr[2] = cpu_to_be64(temp_hdr[2]); 1488 1489 tm_req->resp_len = 1490 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1491 1492 rsp_buf = tm_req->resp_buf; 1493 1494 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1495 bnx2fc_parse_fcp_rsp(io_req, 1496 (struct fcoe_fcp_rsp_payload *) 1497 rsp_buf, num_rq); 1498 if (io_req->fcp_rsp_code == 0) { 1499 /* TM successful */ 1500 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1501 bnx2fc_lun_reset_cmpl(io_req); 1502 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1503 bnx2fc_tgt_reset_cmpl(io_req); 1504 } 1505 } else { 1506 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1507 fc_hdr->fh_r_ctl); 1508 } 1509 if (!sc_cmd->SCp.ptr) { 1510 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); 1511 return; 1512 } 1513 switch (io_req->fcp_status) { 1514 case FC_GOOD: 1515 if (io_req->cdb_status == 0) { 1516 /* Good IO completion */ 1517 sc_cmd->result = DID_OK << 16; 1518 } else { 1519 /* Transport status is good, SCSI status not good */ 1520 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1521 } 1522 if (io_req->fcp_resid) 1523 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1524 break; 1525 1526 default: 1527 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1528 io_req->fcp_status); 1529 break; 1530 } 1531 1532 sc_cmd = io_req->sc_cmd; 1533 io_req->sc_cmd = NULL; 1534 1535 /* check if the io_req exists in tgt's tmf_q */ 1536 if (io_req->on_tmf_queue) { 1537 1538 list_del_init(&io_req->link); 1539 io_req->on_tmf_queue = 0; 1540 } else { 1541 1542 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); 1543 return; 1544 } 1545 1546 sc_cmd->SCp.ptr = NULL; 1547 sc_cmd->scsi_done(sc_cmd); 1548 1549 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1550 if (io_req->wait_for_comp) { 1551 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1552 complete(&io_req->tm_done); 1553 } 1554 } 1555 1556 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1557 int bd_index) 1558 { 1559 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1560 int frag_size, sg_frags; 1561 1562 sg_frags = 0; 1563 while (sg_len) { 1564 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1565 frag_size = BNX2FC_BD_SPLIT_SZ; 1566 else 1567 frag_size = sg_len; 1568 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1569 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1570 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1571 bd[bd_index + sg_frags].flags = 0; 1572 1573 addr += (u64) frag_size; 1574 sg_frags++; 1575 sg_len -= frag_size; 1576 } 1577 return sg_frags; 1578 1579 } 1580 1581 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1582 { 1583 struct bnx2fc_interface *interface = io_req->port->priv; 1584 struct bnx2fc_hba *hba = interface->hba; 1585 struct scsi_cmnd *sc = io_req->sc_cmd; 1586 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1587 struct scatterlist *sg; 1588 int byte_count = 0; 1589 int sg_count = 0; 1590 int bd_count = 0; 1591 int sg_frags; 1592 unsigned int sg_len; 1593 u64 addr; 1594 int i; 1595 1596 /* 1597 * Use dma_map_sg directly to ensure we're using the correct 1598 * dev struct off of pcidev. 1599 */ 1600 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), 1601 scsi_sg_count(sc), sc->sc_data_direction); 1602 scsi_for_each_sg(sc, sg, sg_count, i) { 1603 sg_len = sg_dma_len(sg); 1604 addr = sg_dma_address(sg); 1605 if (sg_len > BNX2FC_MAX_BD_LEN) { 1606 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1607 bd_count); 1608 } else { 1609 1610 sg_frags = 1; 1611 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1612 bd[bd_count].buf_addr_hi = addr >> 32; 1613 bd[bd_count].buf_len = (u16)sg_len; 1614 bd[bd_count].flags = 0; 1615 } 1616 bd_count += sg_frags; 1617 byte_count += sg_len; 1618 } 1619 if (byte_count != scsi_bufflen(sc)) 1620 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1621 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1622 io_req->xid); 1623 return bd_count; 1624 } 1625 1626 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1627 { 1628 struct scsi_cmnd *sc = io_req->sc_cmd; 1629 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1630 int bd_count; 1631 1632 if (scsi_sg_count(sc)) { 1633 bd_count = bnx2fc_map_sg(io_req); 1634 if (bd_count == 0) 1635 return -ENOMEM; 1636 } else { 1637 bd_count = 0; 1638 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1639 bd[0].buf_len = bd[0].flags = 0; 1640 } 1641 io_req->bd_tbl->bd_valid = bd_count; 1642 1643 return 0; 1644 } 1645 1646 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1647 { 1648 struct scsi_cmnd *sc = io_req->sc_cmd; 1649 struct bnx2fc_interface *interface = io_req->port->priv; 1650 struct bnx2fc_hba *hba = interface->hba; 1651 1652 /* 1653 * Use dma_unmap_sg directly to ensure we're using the correct 1654 * dev struct off of pcidev. 1655 */ 1656 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1657 dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), 1658 scsi_sg_count(sc), sc->sc_data_direction); 1659 io_req->bd_tbl->bd_valid = 0; 1660 } 1661 } 1662 1663 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1664 struct fcp_cmnd *fcp_cmnd) 1665 { 1666 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1667 1668 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1669 1670 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); 1671 1672 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1673 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1674 1675 fcp_cmnd->fc_cmdref = 0; 1676 fcp_cmnd->fc_pri_ta = 0; 1677 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1678 fcp_cmnd->fc_flags = io_req->io_req_flags; 1679 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1680 } 1681 1682 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1683 struct fcoe_fcp_rsp_payload *fcp_rsp, 1684 u8 num_rq) 1685 { 1686 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1687 struct bnx2fc_rport *tgt = io_req->tgt; 1688 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1689 u32 rq_buff_len = 0; 1690 int i; 1691 unsigned char *rq_data; 1692 unsigned char *dummy; 1693 int fcp_sns_len = 0; 1694 int fcp_rsp_len = 0; 1695 1696 io_req->fcp_status = FC_GOOD; 1697 io_req->fcp_resid = 0; 1698 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | 1699 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) 1700 io_req->fcp_resid = fcp_rsp->fcp_resid; 1701 1702 io_req->scsi_comp_flags = rsp_flags; 1703 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1704 fcp_rsp->scsi_status_code; 1705 1706 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1707 if (num_rq) { 1708 1709 /* 1710 * We do not anticipate num_rq >1, as the linux defined 1711 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1712 * 256 bytes of single rq buffer is good enough to hold this. 1713 */ 1714 1715 if (rsp_flags & 1716 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1717 fcp_rsp_len = rq_buff_len 1718 = fcp_rsp->fcp_rsp_len; 1719 } 1720 1721 if (rsp_flags & 1722 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1723 fcp_sns_len = fcp_rsp->fcp_sns_len; 1724 rq_buff_len += fcp_rsp->fcp_sns_len; 1725 } 1726 1727 io_req->fcp_rsp_len = fcp_rsp_len; 1728 io_req->fcp_sns_len = fcp_sns_len; 1729 1730 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1731 /* Invalid sense sense length. */ 1732 printk(KERN_ERR PFX "invalid sns length %d\n", 1733 rq_buff_len); 1734 /* reset rq_buff_len */ 1735 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1736 } 1737 1738 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1739 1740 if (num_rq > 1) { 1741 /* We do not need extra sense data */ 1742 for (i = 1; i < num_rq; i++) 1743 dummy = bnx2fc_get_next_rqe(tgt, 1); 1744 } 1745 1746 /* fetch fcp_rsp_code */ 1747 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1748 /* Only for task management function */ 1749 io_req->fcp_rsp_code = rq_data[3]; 1750 printk(KERN_ERR PFX "fcp_rsp_code = %d\n", 1751 io_req->fcp_rsp_code); 1752 } 1753 1754 /* fetch sense data */ 1755 rq_data += fcp_rsp_len; 1756 1757 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1758 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1759 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1760 } 1761 1762 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1763 if (fcp_sns_len) 1764 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1765 1766 /* return RQ entries */ 1767 for (i = 0; i < num_rq; i++) 1768 bnx2fc_return_rqe(tgt, 1); 1769 } 1770 } 1771 1772 /** 1773 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1774 * 1775 * @host: The Scsi_Host the command was issued to 1776 * @sc_cmd: struct scsi_cmnd to be executed 1777 * 1778 * This is the IO strategy routine, called by SCSI-ML 1779 **/ 1780 int bnx2fc_queuecommand(struct Scsi_Host *host, 1781 struct scsi_cmnd *sc_cmd) 1782 { 1783 struct fc_lport *lport = shost_priv(host); 1784 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1785 struct fc_rport_libfc_priv *rp = rport->dd_data; 1786 struct bnx2fc_rport *tgt; 1787 struct bnx2fc_cmd *io_req; 1788 int rc = 0; 1789 int rval; 1790 1791 rval = fc_remote_port_chkready(rport); 1792 if (rval) { 1793 sc_cmd->result = rval; 1794 sc_cmd->scsi_done(sc_cmd); 1795 return 0; 1796 } 1797 1798 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1799 rc = SCSI_MLQUEUE_HOST_BUSY; 1800 goto exit_qcmd; 1801 } 1802 1803 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1804 tgt = (struct bnx2fc_rport *)&rp[1]; 1805 1806 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1807 /* 1808 * Session is not offloaded yet. Let SCSI-ml retry 1809 * the command. 1810 */ 1811 rc = SCSI_MLQUEUE_TARGET_BUSY; 1812 goto exit_qcmd; 1813 } 1814 if (tgt->retry_delay_timestamp) { 1815 if (time_after(jiffies, tgt->retry_delay_timestamp)) { 1816 tgt->retry_delay_timestamp = 0; 1817 } else { 1818 /* If retry_delay timer is active, flow off the ML */ 1819 rc = SCSI_MLQUEUE_TARGET_BUSY; 1820 goto exit_qcmd; 1821 } 1822 } 1823 1824 spin_lock_bh(&tgt->tgt_lock); 1825 1826 io_req = bnx2fc_cmd_alloc(tgt); 1827 if (!io_req) { 1828 rc = SCSI_MLQUEUE_HOST_BUSY; 1829 goto exit_qcmd_tgtlock; 1830 } 1831 io_req->sc_cmd = sc_cmd; 1832 1833 if (bnx2fc_post_io_req(tgt, io_req)) { 1834 printk(KERN_ERR PFX "Unable to post io_req\n"); 1835 rc = SCSI_MLQUEUE_HOST_BUSY; 1836 goto exit_qcmd_tgtlock; 1837 } 1838 1839 exit_qcmd_tgtlock: 1840 spin_unlock_bh(&tgt->tgt_lock); 1841 exit_qcmd: 1842 return rc; 1843 } 1844 1845 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1846 struct fcoe_task_ctx_entry *task, 1847 u8 num_rq) 1848 { 1849 struct fcoe_fcp_rsp_payload *fcp_rsp; 1850 struct bnx2fc_rport *tgt = io_req->tgt; 1851 struct scsi_cmnd *sc_cmd; 1852 struct Scsi_Host *host; 1853 1854 1855 /* scsi_cmd_cmpl is called with tgt lock held */ 1856 1857 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1858 /* we will not receive ABTS response for this IO */ 1859 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1860 "this scsi cmd\n"); 1861 } 1862 1863 /* Cancel the timeout_work, as we received IO completion */ 1864 if (cancel_delayed_work(&io_req->timeout_work)) 1865 kref_put(&io_req->refcount, 1866 bnx2fc_cmd_release); /* drop timer hold */ 1867 1868 sc_cmd = io_req->sc_cmd; 1869 if (sc_cmd == NULL) { 1870 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1871 return; 1872 } 1873 1874 /* Fetch fcp_rsp from task context and perform cmd completion */ 1875 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1876 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1877 1878 /* parse fcp_rsp and obtain sense data from RQ if available */ 1879 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1880 1881 host = sc_cmd->device->host; 1882 if (!sc_cmd->SCp.ptr) { 1883 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1884 return; 1885 } 1886 1887 if (io_req->on_active_queue) { 1888 list_del_init(&io_req->link); 1889 io_req->on_active_queue = 0; 1890 /* Move IO req to retire queue */ 1891 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1892 } else { 1893 /* This should not happen, but could have been pulled 1894 * by bnx2fc_flush_active_ios(), or during a race 1895 * between command abort and (late) completion. 1896 */ 1897 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1898 if (io_req->wait_for_comp) 1899 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1900 &io_req->req_flags)) 1901 complete(&io_req->tm_done); 1902 } 1903 1904 bnx2fc_unmap_sg_list(io_req); 1905 io_req->sc_cmd = NULL; 1906 1907 switch (io_req->fcp_status) { 1908 case FC_GOOD: 1909 if (io_req->cdb_status == 0) { 1910 /* Good IO completion */ 1911 sc_cmd->result = DID_OK << 16; 1912 } else { 1913 /* Transport status is good, SCSI status not good */ 1914 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1915 " fcp_resid = 0x%x\n", 1916 io_req->cdb_status, io_req->fcp_resid); 1917 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1918 1919 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1920 io_req->cdb_status == SAM_STAT_BUSY) { 1921 /* Set the jiffies + retry_delay_timer * 100ms 1922 for the rport/tgt */ 1923 tgt->retry_delay_timestamp = jiffies + 1924 fcp_rsp->retry_delay_timer * HZ / 10; 1925 } 1926 1927 } 1928 if (io_req->fcp_resid) 1929 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1930 break; 1931 default: 1932 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", 1933 io_req->fcp_status); 1934 break; 1935 } 1936 sc_cmd->SCp.ptr = NULL; 1937 sc_cmd->scsi_done(sc_cmd); 1938 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1939 } 1940 1941 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1942 struct bnx2fc_cmd *io_req) 1943 { 1944 struct fcoe_task_ctx_entry *task; 1945 struct fcoe_task_ctx_entry *task_page; 1946 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1947 struct fcoe_port *port = tgt->port; 1948 struct bnx2fc_interface *interface = port->priv; 1949 struct bnx2fc_hba *hba = interface->hba; 1950 struct fc_lport *lport = port->lport; 1951 struct fc_stats *stats; 1952 int task_idx, index; 1953 u16 xid; 1954 1955 /* bnx2fc_post_io_req() is called with the tgt_lock held */ 1956 1957 /* Initialize rest of io_req fields */ 1958 io_req->cmd_type = BNX2FC_SCSI_CMD; 1959 io_req->port = port; 1960 io_req->tgt = tgt; 1961 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1962 sc_cmd->SCp.ptr = (char *)io_req; 1963 1964 stats = per_cpu_ptr(lport->stats, get_cpu()); 1965 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1966 io_req->io_req_flags = BNX2FC_READ; 1967 stats->InputRequests++; 1968 stats->InputBytes += io_req->data_xfer_len; 1969 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1970 io_req->io_req_flags = BNX2FC_WRITE; 1971 stats->OutputRequests++; 1972 stats->OutputBytes += io_req->data_xfer_len; 1973 } else { 1974 io_req->io_req_flags = 0; 1975 stats->ControlRequests++; 1976 } 1977 put_cpu(); 1978 1979 xid = io_req->xid; 1980 1981 /* Build buffer descriptor list for firmware from sg list */ 1982 if (bnx2fc_build_bd_list_from_sg(io_req)) { 1983 printk(KERN_ERR PFX "BD list creation failed\n"); 1984 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1985 return -EAGAIN; 1986 } 1987 1988 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1989 index = xid % BNX2FC_TASKS_PER_PAGE; 1990 1991 /* Initialize task context for this IO request */ 1992 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1993 task = &(task_page[index]); 1994 bnx2fc_init_task(io_req, task); 1995 1996 if (tgt->flush_in_prog) { 1997 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 1998 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1999 return -EAGAIN; 2000 } 2001 2002 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 2003 printk(KERN_ERR PFX "Session not ready...post_io\n"); 2004 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2005 return -EAGAIN; 2006 } 2007 2008 /* Time IO req */ 2009 if (tgt->io_timeout) 2010 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 2011 /* Obtain free SQ entry */ 2012 bnx2fc_add_2_sq(tgt, xid); 2013 2014 /* Enqueue the io_req to active_cmd_queue */ 2015 2016 io_req->on_active_queue = 1; 2017 /* move io_req from pending_queue to active_queue */ 2018 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 2019 2020 /* Ring doorbell */ 2021 bnx2fc_ring_doorbell(tgt); 2022 return 0; 2023 } 2024