1 /* bnx2fc_io.c: QLogic Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008-2013 Broadcom Corporation 5 * Copyright (c) 2014-2015 QLogic Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 12 */ 13 14 #include "bnx2fc.h" 15 16 #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 17 18 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 19 int bd_index); 20 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 21 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 22 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 23 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 24 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 25 struct fcoe_fcp_rsp_payload *fcp_rsp, 26 u8 num_rq); 27 28 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 29 unsigned int timer_msec) 30 { 31 struct bnx2fc_interface *interface = io_req->port->priv; 32 33 if (queue_delayed_work(interface->timer_work_queue, 34 &io_req->timeout_work, 35 msecs_to_jiffies(timer_msec))) 36 kref_get(&io_req->refcount); 37 } 38 39 static void bnx2fc_cmd_timeout(struct work_struct *work) 40 { 41 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 42 timeout_work.work); 43 u8 cmd_type = io_req->cmd_type; 44 struct bnx2fc_rport *tgt = io_req->tgt; 45 int rc; 46 47 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 48 "req_flags = %lx\n", cmd_type, io_req->req_flags); 49 50 spin_lock_bh(&tgt->tgt_lock); 51 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 52 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 53 /* 54 * ideally we should hold the io_req until RRQ complets, 55 * and release io_req from timeout hold. 56 */ 57 spin_unlock_bh(&tgt->tgt_lock); 58 bnx2fc_send_rrq(io_req); 59 return; 60 } 61 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 62 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 63 goto done; 64 } 65 66 switch (cmd_type) { 67 case BNX2FC_SCSI_CMD: 68 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 69 &io_req->req_flags)) { 70 /* Handle eh_abort timeout */ 71 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 72 complete(&io_req->tm_done); 73 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 74 &io_req->req_flags)) { 75 /* Handle internally generated ABTS timeout */ 76 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 77 io_req->refcount.refcount.counter); 78 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 79 &io_req->req_flags))) { 80 /* 81 * Cleanup and return original command to 82 * mid-layer. 83 */ 84 bnx2fc_initiate_cleanup(io_req); 85 kref_put(&io_req->refcount, bnx2fc_cmd_release); 86 spin_unlock_bh(&tgt->tgt_lock); 87 88 return; 89 } 90 } else { 91 /* Hanlde IO timeout */ 92 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 93 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 94 &io_req->req_flags)) { 95 BNX2FC_IO_DBG(io_req, "IO completed before " 96 " timer expiry\n"); 97 goto done; 98 } 99 100 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 101 &io_req->req_flags)) { 102 rc = bnx2fc_initiate_abts(io_req); 103 if (rc == SUCCESS) 104 goto done; 105 106 kref_put(&io_req->refcount, bnx2fc_cmd_release); 107 spin_unlock_bh(&tgt->tgt_lock); 108 109 return; 110 } else { 111 BNX2FC_IO_DBG(io_req, "IO already in " 112 "ABTS processing\n"); 113 } 114 } 115 break; 116 case BNX2FC_ELS: 117 118 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 119 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 120 121 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 122 &io_req->req_flags)) { 123 kref_put(&io_req->refcount, bnx2fc_cmd_release); 124 spin_unlock_bh(&tgt->tgt_lock); 125 126 return; 127 } 128 } else { 129 /* 130 * Handle ELS timeout. 131 * tgt_lock is used to sync compl path and timeout 132 * path. If els compl path is processing this IO, we 133 * have nothing to do here, just release the timer hold 134 */ 135 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 136 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 137 &io_req->req_flags)) 138 goto done; 139 140 /* Indicate the cb_func that this ELS is timed out */ 141 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 142 143 if ((io_req->cb_func) && (io_req->cb_arg)) { 144 io_req->cb_func(io_req->cb_arg); 145 io_req->cb_arg = NULL; 146 } 147 } 148 break; 149 default: 150 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 151 cmd_type); 152 break; 153 } 154 155 done: 156 /* release the cmd that was held when timer was set */ 157 kref_put(&io_req->refcount, bnx2fc_cmd_release); 158 spin_unlock_bh(&tgt->tgt_lock); 159 } 160 161 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 162 { 163 /* Called with host lock held */ 164 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 165 166 /* 167 * active_cmd_queue may have other command types as well, 168 * and during flush operation, we want to error back only 169 * scsi commands. 170 */ 171 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 172 return; 173 174 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 175 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { 176 /* Do not call scsi done for this IO */ 177 return; 178 } 179 180 bnx2fc_unmap_sg_list(io_req); 181 io_req->sc_cmd = NULL; 182 183 /* Sanity checks before returning command to mid-layer */ 184 if (!sc_cmd) { 185 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 186 "IO(0x%x) already cleaned up\n", 187 io_req->xid); 188 return; 189 } 190 if (!sc_cmd->device) { 191 pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); 192 return; 193 } 194 if (!sc_cmd->device->host) { 195 pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", 196 io_req->xid); 197 return; 198 } 199 200 sc_cmd->result = err_code << 16; 201 202 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 203 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 204 sc_cmd->allowed); 205 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 206 sc_cmd->SCp.ptr = NULL; 207 sc_cmd->scsi_done(sc_cmd); 208 } 209 210 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) 211 { 212 struct bnx2fc_cmd_mgr *cmgr; 213 struct io_bdt *bdt_info; 214 struct bnx2fc_cmd *io_req; 215 size_t len; 216 u32 mem_size; 217 u16 xid; 218 int i; 219 int num_ios, num_pri_ios; 220 size_t bd_tbl_sz; 221 int arr_sz = num_possible_cpus() + 1; 222 u16 min_xid = BNX2FC_MIN_XID; 223 u16 max_xid = hba->max_xid; 224 225 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 226 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 227 and max_xid 0x%x\n", min_xid, max_xid); 228 return NULL; 229 } 230 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 231 232 num_ios = max_xid - min_xid + 1; 233 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 234 len += sizeof(struct bnx2fc_cmd_mgr); 235 236 cmgr = kzalloc(len, GFP_KERNEL); 237 if (!cmgr) { 238 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 239 return NULL; 240 } 241 242 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 243 arr_sz, GFP_KERNEL); 244 if (!cmgr->free_list) { 245 printk(KERN_ERR PFX "failed to alloc free_list\n"); 246 goto mem_err; 247 } 248 249 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 250 arr_sz, GFP_KERNEL); 251 if (!cmgr->free_list_lock) { 252 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 253 kfree(cmgr->free_list); 254 cmgr->free_list = NULL; 255 goto mem_err; 256 } 257 258 cmgr->hba = hba; 259 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 260 261 for (i = 0; i < arr_sz; i++) { 262 INIT_LIST_HEAD(&cmgr->free_list[i]); 263 spin_lock_init(&cmgr->free_list_lock[i]); 264 } 265 266 /* 267 * Pre-allocated pool of bnx2fc_cmds. 268 * Last entry in the free list array is the free list 269 * of slow path requests. 270 */ 271 xid = BNX2FC_MIN_XID; 272 num_pri_ios = num_ios - hba->elstm_xids; 273 for (i = 0; i < num_ios; i++) { 274 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 275 276 if (!io_req) { 277 printk(KERN_ERR PFX "failed to alloc io_req\n"); 278 goto mem_err; 279 } 280 281 INIT_LIST_HEAD(&io_req->link); 282 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 283 284 io_req->xid = xid++; 285 if (i < num_pri_ios) 286 list_add_tail(&io_req->link, 287 &cmgr->free_list[io_req->xid % 288 num_possible_cpus()]); 289 else 290 list_add_tail(&io_req->link, 291 &cmgr->free_list[num_possible_cpus()]); 292 io_req++; 293 } 294 295 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 296 mem_size = num_ios * sizeof(struct io_bdt *); 297 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 if (!cmgr->io_bdt_pool) { 299 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 300 goto mem_err; 301 } 302 303 mem_size = sizeof(struct io_bdt); 304 for (i = 0; i < num_ios; i++) { 305 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 306 if (!cmgr->io_bdt_pool[i]) { 307 printk(KERN_ERR PFX "failed to alloc " 308 "io_bdt_pool[%d]\n", i); 309 goto mem_err; 310 } 311 } 312 313 /* Allocate an map fcoe_bdt_ctx structures */ 314 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 315 for (i = 0; i < num_ios; i++) { 316 bdt_info = cmgr->io_bdt_pool[i]; 317 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 318 bd_tbl_sz, 319 &bdt_info->bd_tbl_dma, 320 GFP_KERNEL); 321 if (!bdt_info->bd_tbl) { 322 printk(KERN_ERR PFX "failed to alloc " 323 "bdt_tbl[%d]\n", i); 324 goto mem_err; 325 } 326 } 327 328 return cmgr; 329 330 mem_err: 331 bnx2fc_cmd_mgr_free(cmgr); 332 return NULL; 333 } 334 335 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 336 { 337 struct io_bdt *bdt_info; 338 struct bnx2fc_hba *hba = cmgr->hba; 339 size_t bd_tbl_sz; 340 u16 min_xid = BNX2FC_MIN_XID; 341 u16 max_xid = hba->max_xid; 342 int num_ios; 343 int i; 344 345 num_ios = max_xid - min_xid + 1; 346 347 /* Free fcoe_bdt_ctx structures */ 348 if (!cmgr->io_bdt_pool) 349 goto free_cmd_pool; 350 351 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 352 for (i = 0; i < num_ios; i++) { 353 bdt_info = cmgr->io_bdt_pool[i]; 354 if (bdt_info->bd_tbl) { 355 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 356 bdt_info->bd_tbl, 357 bdt_info->bd_tbl_dma); 358 bdt_info->bd_tbl = NULL; 359 } 360 } 361 362 /* Destroy io_bdt pool */ 363 for (i = 0; i < num_ios; i++) { 364 kfree(cmgr->io_bdt_pool[i]); 365 cmgr->io_bdt_pool[i] = NULL; 366 } 367 368 kfree(cmgr->io_bdt_pool); 369 cmgr->io_bdt_pool = NULL; 370 371 free_cmd_pool: 372 kfree(cmgr->free_list_lock); 373 374 /* Destroy cmd pool */ 375 if (!cmgr->free_list) 376 goto free_cmgr; 377 378 for (i = 0; i < num_possible_cpus() + 1; i++) { 379 struct bnx2fc_cmd *tmp, *io_req; 380 381 list_for_each_entry_safe(io_req, tmp, 382 &cmgr->free_list[i], link) { 383 list_del(&io_req->link); 384 kfree(io_req); 385 } 386 } 387 kfree(cmgr->free_list); 388 free_cmgr: 389 /* Free command manager itself */ 390 kfree(cmgr); 391 } 392 393 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 394 { 395 struct fcoe_port *port = tgt->port; 396 struct bnx2fc_interface *interface = port->priv; 397 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 398 struct bnx2fc_cmd *io_req; 399 struct list_head *listp; 400 struct io_bdt *bd_tbl; 401 int index = RESERVE_FREE_LIST_INDEX; 402 u32 free_sqes; 403 u32 max_sqes; 404 u16 xid; 405 406 max_sqes = tgt->max_sqes; 407 switch (type) { 408 case BNX2FC_TASK_MGMT_CMD: 409 max_sqes = BNX2FC_TM_MAX_SQES; 410 break; 411 case BNX2FC_ELS: 412 max_sqes = BNX2FC_ELS_MAX_SQES; 413 break; 414 default: 415 break; 416 } 417 418 /* 419 * NOTE: Free list insertions and deletions are protected with 420 * cmgr lock 421 */ 422 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 423 free_sqes = atomic_read(&tgt->free_sqes); 424 if ((list_empty(&(cmd_mgr->free_list[index]))) || 425 (tgt->num_active_ios.counter >= max_sqes) || 426 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 427 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 428 "ios(%d):sqes(%d)\n", 429 tgt->num_active_ios.counter, tgt->max_sqes); 430 if (list_empty(&(cmd_mgr->free_list[index]))) 431 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 432 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 433 return NULL; 434 } 435 436 listp = (struct list_head *) 437 cmd_mgr->free_list[index].next; 438 list_del_init(listp); 439 io_req = (struct bnx2fc_cmd *) listp; 440 xid = io_req->xid; 441 cmd_mgr->cmds[xid] = io_req; 442 atomic_inc(&tgt->num_active_ios); 443 atomic_dec(&tgt->free_sqes); 444 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 445 446 INIT_LIST_HEAD(&io_req->link); 447 448 io_req->port = port; 449 io_req->cmd_mgr = cmd_mgr; 450 io_req->req_flags = 0; 451 io_req->cmd_type = type; 452 453 /* Bind io_bdt for this io_req */ 454 /* Have a static link between io_req and io_bdt_pool */ 455 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 456 bd_tbl->io_req = io_req; 457 458 /* Hold the io_req against deletion */ 459 kref_init(&io_req->refcount); 460 return io_req; 461 } 462 463 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 464 { 465 struct fcoe_port *port = tgt->port; 466 struct bnx2fc_interface *interface = port->priv; 467 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 468 struct bnx2fc_cmd *io_req; 469 struct list_head *listp; 470 struct io_bdt *bd_tbl; 471 u32 free_sqes; 472 u32 max_sqes; 473 u16 xid; 474 int index = get_cpu(); 475 476 max_sqes = BNX2FC_SCSI_MAX_SQES; 477 /* 478 * NOTE: Free list insertions and deletions are protected with 479 * cmgr lock 480 */ 481 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 482 free_sqes = atomic_read(&tgt->free_sqes); 483 if ((list_empty(&cmd_mgr->free_list[index])) || 484 (tgt->num_active_ios.counter >= max_sqes) || 485 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 486 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 487 put_cpu(); 488 return NULL; 489 } 490 491 listp = (struct list_head *) 492 cmd_mgr->free_list[index].next; 493 list_del_init(listp); 494 io_req = (struct bnx2fc_cmd *) listp; 495 xid = io_req->xid; 496 cmd_mgr->cmds[xid] = io_req; 497 atomic_inc(&tgt->num_active_ios); 498 atomic_dec(&tgt->free_sqes); 499 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 500 put_cpu(); 501 502 INIT_LIST_HEAD(&io_req->link); 503 504 io_req->port = port; 505 io_req->cmd_mgr = cmd_mgr; 506 io_req->req_flags = 0; 507 508 /* Bind io_bdt for this io_req */ 509 /* Have a static link between io_req and io_bdt_pool */ 510 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 511 bd_tbl->io_req = io_req; 512 513 /* Hold the io_req against deletion */ 514 kref_init(&io_req->refcount); 515 return io_req; 516 } 517 518 void bnx2fc_cmd_release(struct kref *ref) 519 { 520 struct bnx2fc_cmd *io_req = container_of(ref, 521 struct bnx2fc_cmd, refcount); 522 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 523 int index; 524 525 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 526 index = io_req->xid % num_possible_cpus(); 527 else 528 index = RESERVE_FREE_LIST_INDEX; 529 530 531 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 532 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 533 bnx2fc_free_mp_resc(io_req); 534 cmd_mgr->cmds[io_req->xid] = NULL; 535 /* Delete IO from retire queue */ 536 list_del_init(&io_req->link); 537 /* Add it to the free list */ 538 list_add(&io_req->link, 539 &cmd_mgr->free_list[index]); 540 atomic_dec(&io_req->tgt->num_active_ios); 541 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 542 543 } 544 545 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 546 { 547 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 548 struct bnx2fc_interface *interface = io_req->port->priv; 549 struct bnx2fc_hba *hba = interface->hba; 550 size_t sz = sizeof(struct fcoe_bd_ctx); 551 552 /* clear tm flags */ 553 mp_req->tm_flags = 0; 554 if (mp_req->mp_req_bd) { 555 dma_free_coherent(&hba->pcidev->dev, sz, 556 mp_req->mp_req_bd, 557 mp_req->mp_req_bd_dma); 558 mp_req->mp_req_bd = NULL; 559 } 560 if (mp_req->mp_resp_bd) { 561 dma_free_coherent(&hba->pcidev->dev, sz, 562 mp_req->mp_resp_bd, 563 mp_req->mp_resp_bd_dma); 564 mp_req->mp_resp_bd = NULL; 565 } 566 if (mp_req->req_buf) { 567 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 568 mp_req->req_buf, 569 mp_req->req_buf_dma); 570 mp_req->req_buf = NULL; 571 } 572 if (mp_req->resp_buf) { 573 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 574 mp_req->resp_buf, 575 mp_req->resp_buf_dma); 576 mp_req->resp_buf = NULL; 577 } 578 } 579 580 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 581 { 582 struct bnx2fc_mp_req *mp_req; 583 struct fcoe_bd_ctx *mp_req_bd; 584 struct fcoe_bd_ctx *mp_resp_bd; 585 struct bnx2fc_interface *interface = io_req->port->priv; 586 struct bnx2fc_hba *hba = interface->hba; 587 dma_addr_t addr; 588 size_t sz; 589 590 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 591 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 592 593 if (io_req->cmd_type != BNX2FC_ELS) { 594 mp_req->req_len = sizeof(struct fcp_cmnd); 595 io_req->data_xfer_len = mp_req->req_len; 596 } else 597 mp_req->req_len = io_req->data_xfer_len; 598 599 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 600 &mp_req->req_buf_dma, 601 GFP_ATOMIC); 602 if (!mp_req->req_buf) { 603 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 604 bnx2fc_free_mp_resc(io_req); 605 return FAILED; 606 } 607 608 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 609 &mp_req->resp_buf_dma, 610 GFP_ATOMIC); 611 if (!mp_req->resp_buf) { 612 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 613 bnx2fc_free_mp_resc(io_req); 614 return FAILED; 615 } 616 memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); 617 memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); 618 619 /* Allocate and map mp_req_bd and mp_resp_bd */ 620 sz = sizeof(struct fcoe_bd_ctx); 621 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 622 &mp_req->mp_req_bd_dma, 623 GFP_ATOMIC); 624 if (!mp_req->mp_req_bd) { 625 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 626 bnx2fc_free_mp_resc(io_req); 627 return FAILED; 628 } 629 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 630 &mp_req->mp_resp_bd_dma, 631 GFP_ATOMIC); 632 if (!mp_req->mp_resp_bd) { 633 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 634 bnx2fc_free_mp_resc(io_req); 635 return FAILED; 636 } 637 /* Fill bd table */ 638 addr = mp_req->req_buf_dma; 639 mp_req_bd = mp_req->mp_req_bd; 640 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 641 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 642 mp_req_bd->buf_len = CNIC_PAGE_SIZE; 643 mp_req_bd->flags = 0; 644 645 /* 646 * MP buffer is either a task mgmt command or an ELS. 647 * So the assumption is that it consumes a single bd 648 * entry in the bd table 649 */ 650 mp_resp_bd = mp_req->mp_resp_bd; 651 addr = mp_req->resp_buf_dma; 652 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 653 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 654 mp_resp_bd->buf_len = CNIC_PAGE_SIZE; 655 mp_resp_bd->flags = 0; 656 657 return SUCCESS; 658 } 659 660 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 661 { 662 struct fc_lport *lport; 663 struct fc_rport *rport; 664 struct fc_rport_libfc_priv *rp; 665 struct fcoe_port *port; 666 struct bnx2fc_interface *interface; 667 struct bnx2fc_rport *tgt; 668 struct bnx2fc_cmd *io_req; 669 struct bnx2fc_mp_req *tm_req; 670 struct fcoe_task_ctx_entry *task; 671 struct fcoe_task_ctx_entry *task_page; 672 struct Scsi_Host *host = sc_cmd->device->host; 673 struct fc_frame_header *fc_hdr; 674 struct fcp_cmnd *fcp_cmnd; 675 int task_idx, index; 676 int rc = SUCCESS; 677 u16 xid; 678 u32 sid, did; 679 unsigned long start = jiffies; 680 681 lport = shost_priv(host); 682 rport = starget_to_rport(scsi_target(sc_cmd->device)); 683 port = lport_priv(lport); 684 interface = port->priv; 685 686 if (rport == NULL) { 687 printk(KERN_ERR PFX "device_reset: rport is NULL\n"); 688 rc = FAILED; 689 goto tmf_err; 690 } 691 rp = rport->dd_data; 692 693 rc = fc_block_scsi_eh(sc_cmd); 694 if (rc) 695 return rc; 696 697 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 698 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 699 rc = FAILED; 700 goto tmf_err; 701 } 702 /* rport and tgt are allocated together, so tgt should be non-NULL */ 703 tgt = (struct bnx2fc_rport *)&rp[1]; 704 705 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 706 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 707 rc = FAILED; 708 goto tmf_err; 709 } 710 retry_tmf: 711 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 712 if (!io_req) { 713 if (time_after(jiffies, start + HZ)) { 714 printk(KERN_ERR PFX "tmf: Failed TMF"); 715 rc = FAILED; 716 goto tmf_err; 717 } 718 msleep(20); 719 goto retry_tmf; 720 } 721 /* Initialize rest of io_req fields */ 722 io_req->sc_cmd = sc_cmd; 723 io_req->port = port; 724 io_req->tgt = tgt; 725 726 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 727 728 rc = bnx2fc_init_mp_req(io_req); 729 if (rc == FAILED) { 730 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 731 spin_lock_bh(&tgt->tgt_lock); 732 kref_put(&io_req->refcount, bnx2fc_cmd_release); 733 spin_unlock_bh(&tgt->tgt_lock); 734 goto tmf_err; 735 } 736 737 /* Set TM flags */ 738 io_req->io_req_flags = 0; 739 tm_req->tm_flags = tm_flags; 740 741 /* Fill FCP_CMND */ 742 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 743 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 744 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 745 fcp_cmnd->fc_dl = 0; 746 747 /* Fill FC header */ 748 fc_hdr = &(tm_req->req_fc_hdr); 749 sid = tgt->sid; 750 did = rport->port_id; 751 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 752 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 753 FC_FC_SEQ_INIT, 0); 754 /* Obtain exchange id */ 755 xid = io_req->xid; 756 757 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 758 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 759 index = xid % BNX2FC_TASKS_PER_PAGE; 760 761 /* Initialize task context for this IO request */ 762 task_page = (struct fcoe_task_ctx_entry *) 763 interface->hba->task_ctx[task_idx]; 764 task = &(task_page[index]); 765 bnx2fc_init_mp_task(io_req, task); 766 767 sc_cmd->SCp.ptr = (char *)io_req; 768 769 /* Obtain free SQ entry */ 770 spin_lock_bh(&tgt->tgt_lock); 771 bnx2fc_add_2_sq(tgt, xid); 772 773 /* Enqueue the io_req to active_tm_queue */ 774 io_req->on_tmf_queue = 1; 775 list_add_tail(&io_req->link, &tgt->active_tm_queue); 776 777 init_completion(&io_req->tm_done); 778 io_req->wait_for_comp = 1; 779 780 /* Ring doorbell */ 781 bnx2fc_ring_doorbell(tgt); 782 spin_unlock_bh(&tgt->tgt_lock); 783 784 rc = wait_for_completion_timeout(&io_req->tm_done, 785 interface->tm_timeout * HZ); 786 spin_lock_bh(&tgt->tgt_lock); 787 788 io_req->wait_for_comp = 0; 789 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { 790 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 791 if (io_req->on_tmf_queue) { 792 list_del_init(&io_req->link); 793 io_req->on_tmf_queue = 0; 794 } 795 io_req->wait_for_comp = 1; 796 bnx2fc_initiate_cleanup(io_req); 797 spin_unlock_bh(&tgt->tgt_lock); 798 rc = wait_for_completion_timeout(&io_req->tm_done, 799 BNX2FC_FW_TIMEOUT); 800 spin_lock_bh(&tgt->tgt_lock); 801 io_req->wait_for_comp = 0; 802 if (!rc) 803 kref_put(&io_req->refcount, bnx2fc_cmd_release); 804 } 805 806 spin_unlock_bh(&tgt->tgt_lock); 807 808 if (!rc) { 809 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); 810 rc = FAILED; 811 } else { 812 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); 813 rc = SUCCESS; 814 } 815 tmf_err: 816 return rc; 817 } 818 819 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 820 { 821 struct fc_lport *lport; 822 struct bnx2fc_rport *tgt = io_req->tgt; 823 struct fc_rport *rport = tgt->rport; 824 struct fc_rport_priv *rdata = tgt->rdata; 825 struct bnx2fc_interface *interface; 826 struct fcoe_port *port; 827 struct bnx2fc_cmd *abts_io_req; 828 struct fcoe_task_ctx_entry *task; 829 struct fcoe_task_ctx_entry *task_page; 830 struct fc_frame_header *fc_hdr; 831 struct bnx2fc_mp_req *abts_req; 832 int task_idx, index; 833 u32 sid, did; 834 u16 xid; 835 int rc = SUCCESS; 836 u32 r_a_tov = rdata->r_a_tov; 837 838 /* called with tgt_lock held */ 839 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 840 841 port = io_req->port; 842 interface = port->priv; 843 lport = port->lport; 844 845 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 846 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 847 rc = FAILED; 848 goto abts_err; 849 } 850 851 if (rport == NULL) { 852 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); 853 rc = FAILED; 854 goto abts_err; 855 } 856 857 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 858 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 859 rc = FAILED; 860 goto abts_err; 861 } 862 863 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 864 if (!abts_io_req) { 865 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 866 rc = FAILED; 867 goto abts_err; 868 } 869 870 /* Initialize rest of io_req fields */ 871 abts_io_req->sc_cmd = NULL; 872 abts_io_req->port = port; 873 abts_io_req->tgt = tgt; 874 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 875 876 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 877 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 878 879 /* Fill FC header */ 880 fc_hdr = &(abts_req->req_fc_hdr); 881 882 /* Obtain oxid and rxid for the original exchange to be aborted */ 883 fc_hdr->fh_ox_id = htons(io_req->xid); 884 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 885 886 sid = tgt->sid; 887 did = rport->port_id; 888 889 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 890 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 891 FC_FC_SEQ_INIT, 0); 892 893 xid = abts_io_req->xid; 894 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 895 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 896 index = xid % BNX2FC_TASKS_PER_PAGE; 897 898 /* Initialize task context for this IO request */ 899 task_page = (struct fcoe_task_ctx_entry *) 900 interface->hba->task_ctx[task_idx]; 901 task = &(task_page[index]); 902 bnx2fc_init_mp_task(abts_io_req, task); 903 904 /* 905 * ABTS task is a temporary task that will be cleaned up 906 * irrespective of ABTS response. We need to start the timer 907 * for the original exchange, as the CQE is posted for the original 908 * IO request. 909 * 910 * Timer for ABTS is started only when it is originated by a 911 * TM request. For the ABTS issued as part of ULP timeout, 912 * scsi-ml maintains the timers. 913 */ 914 915 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 916 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 917 918 /* Obtain free SQ entry */ 919 bnx2fc_add_2_sq(tgt, xid); 920 921 /* Ring doorbell */ 922 bnx2fc_ring_doorbell(tgt); 923 924 abts_err: 925 return rc; 926 } 927 928 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 929 enum fc_rctl r_ctl) 930 { 931 struct fc_lport *lport; 932 struct bnx2fc_rport *tgt = orig_io_req->tgt; 933 struct bnx2fc_interface *interface; 934 struct fcoe_port *port; 935 struct bnx2fc_cmd *seq_clnp_req; 936 struct fcoe_task_ctx_entry *task; 937 struct fcoe_task_ctx_entry *task_page; 938 struct bnx2fc_els_cb_arg *cb_arg = NULL; 939 int task_idx, index; 940 u16 xid; 941 int rc = 0; 942 943 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", 944 orig_io_req->xid); 945 kref_get(&orig_io_req->refcount); 946 947 port = orig_io_req->port; 948 interface = port->priv; 949 lport = port->lport; 950 951 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 952 if (!cb_arg) { 953 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); 954 rc = -ENOMEM; 955 goto cleanup_err; 956 } 957 958 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); 959 if (!seq_clnp_req) { 960 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 961 rc = -ENOMEM; 962 kfree(cb_arg); 963 goto cleanup_err; 964 } 965 /* Initialize rest of io_req fields */ 966 seq_clnp_req->sc_cmd = NULL; 967 seq_clnp_req->port = port; 968 seq_clnp_req->tgt = tgt; 969 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ 970 971 xid = seq_clnp_req->xid; 972 973 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 974 index = xid % BNX2FC_TASKS_PER_PAGE; 975 976 /* Initialize task context for this IO request */ 977 task_page = (struct fcoe_task_ctx_entry *) 978 interface->hba->task_ctx[task_idx]; 979 task = &(task_page[index]); 980 cb_arg->aborted_io_req = orig_io_req; 981 cb_arg->io_req = seq_clnp_req; 982 cb_arg->r_ctl = r_ctl; 983 cb_arg->offset = offset; 984 seq_clnp_req->cb_arg = cb_arg; 985 986 printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); 987 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); 988 989 /* Obtain free SQ entry */ 990 bnx2fc_add_2_sq(tgt, xid); 991 992 /* Ring doorbell */ 993 bnx2fc_ring_doorbell(tgt); 994 cleanup_err: 995 return rc; 996 } 997 998 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 999 { 1000 struct fc_lport *lport; 1001 struct bnx2fc_rport *tgt = io_req->tgt; 1002 struct bnx2fc_interface *interface; 1003 struct fcoe_port *port; 1004 struct bnx2fc_cmd *cleanup_io_req; 1005 struct fcoe_task_ctx_entry *task; 1006 struct fcoe_task_ctx_entry *task_page; 1007 int task_idx, index; 1008 u16 xid, orig_xid; 1009 int rc = 0; 1010 1011 /* ASSUMPTION: called with tgt_lock held */ 1012 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1013 1014 port = io_req->port; 1015 interface = port->priv; 1016 lport = port->lport; 1017 1018 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1019 if (!cleanup_io_req) { 1020 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 1021 rc = -1; 1022 goto cleanup_err; 1023 } 1024 1025 /* Initialize rest of io_req fields */ 1026 cleanup_io_req->sc_cmd = NULL; 1027 cleanup_io_req->port = port; 1028 cleanup_io_req->tgt = tgt; 1029 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 1030 1031 xid = cleanup_io_req->xid; 1032 1033 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1034 index = xid % BNX2FC_TASKS_PER_PAGE; 1035 1036 /* Initialize task context for this IO request */ 1037 task_page = (struct fcoe_task_ctx_entry *) 1038 interface->hba->task_ctx[task_idx]; 1039 task = &(task_page[index]); 1040 orig_xid = io_req->xid; 1041 1042 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 1043 1044 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 1045 1046 /* Obtain free SQ entry */ 1047 bnx2fc_add_2_sq(tgt, xid); 1048 1049 /* Ring doorbell */ 1050 bnx2fc_ring_doorbell(tgt); 1051 1052 cleanup_err: 1053 return rc; 1054 } 1055 1056 /** 1057 * bnx2fc_eh_target_reset: Reset a target 1058 * 1059 * @sc_cmd: SCSI command 1060 * 1061 * Set from SCSI host template to send task mgmt command to the target 1062 * and wait for the response 1063 */ 1064 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 1065 { 1066 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 1067 } 1068 1069 /** 1070 * bnx2fc_eh_device_reset - Reset a single LUN 1071 * 1072 * @sc_cmd: SCSI command 1073 * 1074 * Set from SCSI host template to send task mgmt command to the target 1075 * and wait for the response 1076 */ 1077 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1078 { 1079 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1080 } 1081 1082 static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) 1083 { 1084 struct bnx2fc_rport *tgt = io_req->tgt; 1085 int rc = SUCCESS; 1086 1087 io_req->wait_for_comp = 1; 1088 bnx2fc_initiate_cleanup(io_req); 1089 1090 spin_unlock_bh(&tgt->tgt_lock); 1091 1092 wait_for_completion(&io_req->tm_done); 1093 1094 io_req->wait_for_comp = 0; 1095 /* 1096 * release the reference taken in eh_abort to allow the 1097 * target to re-login after flushing IOs 1098 */ 1099 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1100 1101 spin_lock_bh(&tgt->tgt_lock); 1102 return rc; 1103 } 1104 /** 1105 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1106 * SCSI command 1107 * 1108 * @sc_cmd: SCSI_ML command pointer 1109 * 1110 * SCSI abort request handler 1111 */ 1112 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1113 { 1114 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1115 struct fc_rport_libfc_priv *rp = rport->dd_data; 1116 struct bnx2fc_cmd *io_req; 1117 struct fc_lport *lport; 1118 struct bnx2fc_rport *tgt; 1119 int rc; 1120 1121 rc = fc_block_scsi_eh(sc_cmd); 1122 if (rc) 1123 return rc; 1124 1125 lport = shost_priv(sc_cmd->device->host); 1126 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1127 printk(KERN_ERR PFX "eh_abort: link not ready\n"); 1128 return FAILED; 1129 } 1130 1131 tgt = (struct bnx2fc_rport *)&rp[1]; 1132 1133 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1134 1135 spin_lock_bh(&tgt->tgt_lock); 1136 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1137 if (!io_req) { 1138 /* Command might have just completed */ 1139 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1140 spin_unlock_bh(&tgt->tgt_lock); 1141 return SUCCESS; 1142 } 1143 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1144 io_req->refcount.refcount.counter); 1145 1146 /* Hold IO request across abort processing */ 1147 kref_get(&io_req->refcount); 1148 1149 BUG_ON(tgt != io_req->tgt); 1150 1151 /* Remove the io_req from the active_q. */ 1152 /* 1153 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1154 * issue an ABTS on this particular IO req, as the 1155 * io_req is no longer in the active_q. 1156 */ 1157 if (tgt->flush_in_prog) { 1158 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1159 "flush in progress\n", io_req->xid); 1160 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1161 spin_unlock_bh(&tgt->tgt_lock); 1162 return SUCCESS; 1163 } 1164 1165 if (io_req->on_active_queue == 0) { 1166 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1167 "not on active_q\n", io_req->xid); 1168 /* 1169 * This condition can happen only due to the FW bug, 1170 * where we do not receive cleanup response from 1171 * the FW. Handle this case gracefully by erroring 1172 * back the IO request to SCSI-ml 1173 */ 1174 bnx2fc_scsi_done(io_req, DID_ABORT); 1175 1176 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1177 spin_unlock_bh(&tgt->tgt_lock); 1178 return SUCCESS; 1179 } 1180 1181 /* 1182 * Only eh_abort processing will remove the IO from 1183 * active_cmd_q before processing the request. this is 1184 * done to avoid race conditions between IOs aborted 1185 * as part of task management completion and eh_abort 1186 * processing 1187 */ 1188 list_del_init(&io_req->link); 1189 io_req->on_active_queue = 0; 1190 /* Move IO req to retire queue */ 1191 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1192 1193 init_completion(&io_req->tm_done); 1194 1195 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1196 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1197 "already in abts processing\n", io_req->xid); 1198 if (cancel_delayed_work(&io_req->timeout_work)) 1199 kref_put(&io_req->refcount, 1200 bnx2fc_cmd_release); /* drop timer hold */ 1201 rc = bnx2fc_abts_cleanup(io_req); 1202 /* This only occurs when an task abort was requested while ABTS 1203 is in progress. Setting the IO_CLEANUP flag will skip the 1204 RRQ process in the case when the fw generated SCSI_CMD cmpl 1205 was a result from the ABTS request rather than the CLEANUP 1206 request */ 1207 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 1208 goto out; 1209 } 1210 1211 /* Cancel the current timer running on this io_req */ 1212 if (cancel_delayed_work(&io_req->timeout_work)) 1213 kref_put(&io_req->refcount, 1214 bnx2fc_cmd_release); /* drop timer hold */ 1215 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1216 io_req->wait_for_comp = 1; 1217 rc = bnx2fc_initiate_abts(io_req); 1218 if (rc == FAILED) { 1219 bnx2fc_initiate_cleanup(io_req); 1220 spin_unlock_bh(&tgt->tgt_lock); 1221 wait_for_completion(&io_req->tm_done); 1222 spin_lock_bh(&tgt->tgt_lock); 1223 io_req->wait_for_comp = 0; 1224 goto done; 1225 } 1226 spin_unlock_bh(&tgt->tgt_lock); 1227 1228 wait_for_completion(&io_req->tm_done); 1229 1230 spin_lock_bh(&tgt->tgt_lock); 1231 io_req->wait_for_comp = 0; 1232 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1233 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); 1234 rc = SUCCESS; 1235 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1236 &io_req->req_flags))) { 1237 /* Let the scsi-ml try to recover this command */ 1238 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1239 io_req->xid); 1240 rc = bnx2fc_abts_cleanup(io_req); 1241 goto out; 1242 } else { 1243 /* 1244 * We come here even when there was a race condition 1245 * between timeout and abts completion, and abts 1246 * completion happens just in time. 1247 */ 1248 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1249 rc = SUCCESS; 1250 bnx2fc_scsi_done(io_req, DID_ABORT); 1251 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1252 } 1253 done: 1254 /* release the reference taken in eh_abort */ 1255 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1256 out: 1257 spin_unlock_bh(&tgt->tgt_lock); 1258 return rc; 1259 } 1260 1261 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, 1262 struct fcoe_task_ctx_entry *task, 1263 u8 rx_state) 1264 { 1265 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; 1266 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; 1267 u32 offset = cb_arg->offset; 1268 enum fc_rctl r_ctl = cb_arg->r_ctl; 1269 int rc = 0; 1270 struct bnx2fc_rport *tgt = orig_io_req->tgt; 1271 1272 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" 1273 "cmd_type = %d\n", 1274 seq_clnp_req->xid, seq_clnp_req->cmd_type); 1275 1276 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { 1277 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", 1278 seq_clnp_req->xid); 1279 goto free_cb_arg; 1280 } 1281 1282 spin_unlock_bh(&tgt->tgt_lock); 1283 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1284 spin_lock_bh(&tgt->tgt_lock); 1285 1286 if (rc) 1287 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" 1288 " IO will abort\n"); 1289 seq_clnp_req->cb_arg = NULL; 1290 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 1291 free_cb_arg: 1292 kfree(cb_arg); 1293 return; 1294 } 1295 1296 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1297 struct fcoe_task_ctx_entry *task, 1298 u8 num_rq) 1299 { 1300 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1301 "refcnt = %d, cmd_type = %d\n", 1302 io_req->refcount.refcount.counter, io_req->cmd_type); 1303 bnx2fc_scsi_done(io_req, DID_ERROR); 1304 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1305 if (io_req->wait_for_comp) 1306 complete(&io_req->tm_done); 1307 } 1308 1309 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1310 struct fcoe_task_ctx_entry *task, 1311 u8 num_rq) 1312 { 1313 u32 r_ctl; 1314 u32 r_a_tov = FC_DEF_R_A_TOV; 1315 u8 issue_rrq = 0; 1316 struct bnx2fc_rport *tgt = io_req->tgt; 1317 1318 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1319 "refcnt = %d, cmd_type = %d\n", 1320 io_req->xid, 1321 io_req->refcount.refcount.counter, io_req->cmd_type); 1322 1323 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1324 &io_req->req_flags)) { 1325 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1326 " this io\n"); 1327 return; 1328 } 1329 1330 /* Do not issue RRQ as this IO is already cleanedup */ 1331 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1332 &io_req->req_flags)) 1333 goto io_compl; 1334 1335 /* 1336 * For ABTS issued due to SCSI eh_abort_handler, timeout 1337 * values are maintained by scsi-ml itself. Cancel timeout 1338 * in case ABTS issued as part of task management function 1339 * or due to FW error. 1340 */ 1341 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1342 if (cancel_delayed_work(&io_req->timeout_work)) 1343 kref_put(&io_req->refcount, 1344 bnx2fc_cmd_release); /* drop timer hold */ 1345 1346 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1347 1348 switch (r_ctl) { 1349 case FC_RCTL_BA_ACC: 1350 /* 1351 * Dont release this cmd yet. It will be relesed 1352 * after we get RRQ response 1353 */ 1354 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1355 issue_rrq = 1; 1356 break; 1357 1358 case FC_RCTL_BA_RJT: 1359 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1360 break; 1361 default: 1362 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1363 break; 1364 } 1365 1366 if (issue_rrq) { 1367 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1368 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1369 } 1370 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1371 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1372 1373 io_compl: 1374 if (io_req->wait_for_comp) { 1375 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1376 &io_req->req_flags)) 1377 complete(&io_req->tm_done); 1378 } else { 1379 /* 1380 * We end up here when ABTS is issued as 1381 * in asynchronous context, i.e., as part 1382 * of task management completion, or 1383 * when FW error is received or when the 1384 * ABTS is issued when the IO is timed 1385 * out. 1386 */ 1387 1388 if (io_req->on_active_queue) { 1389 list_del_init(&io_req->link); 1390 io_req->on_active_queue = 0; 1391 /* Move IO req to retire queue */ 1392 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1393 } 1394 bnx2fc_scsi_done(io_req, DID_ERROR); 1395 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1396 } 1397 } 1398 1399 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1400 { 1401 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1402 struct bnx2fc_rport *tgt = io_req->tgt; 1403 struct bnx2fc_cmd *cmd, *tmp; 1404 u64 tm_lun = sc_cmd->device->lun; 1405 u64 lun; 1406 int rc = 0; 1407 1408 /* called with tgt_lock held */ 1409 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1410 /* 1411 * Walk thru the active_ios queue and ABORT the IO 1412 * that matches with the LUN that was reset 1413 */ 1414 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1415 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1416 lun = cmd->sc_cmd->device->lun; 1417 if (lun == tm_lun) { 1418 /* Initiate ABTS on this cmd */ 1419 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1420 &cmd->req_flags)) { 1421 /* cancel the IO timeout */ 1422 if (cancel_delayed_work(&io_req->timeout_work)) 1423 kref_put(&io_req->refcount, 1424 bnx2fc_cmd_release); 1425 /* timer hold */ 1426 rc = bnx2fc_initiate_abts(cmd); 1427 /* abts shouldn't fail in this context */ 1428 WARN_ON(rc != SUCCESS); 1429 } else 1430 printk(KERN_ERR PFX "lun_rst: abts already in" 1431 " progress for this IO 0x%x\n", 1432 cmd->xid); 1433 } 1434 } 1435 } 1436 1437 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1438 { 1439 struct bnx2fc_rport *tgt = io_req->tgt; 1440 struct bnx2fc_cmd *cmd, *tmp; 1441 int rc = 0; 1442 1443 /* called with tgt_lock held */ 1444 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1445 /* 1446 * Walk thru the active_ios queue and ABORT the IO 1447 * that matches with the LUN that was reset 1448 */ 1449 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1450 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1451 /* Initiate ABTS */ 1452 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1453 &cmd->req_flags)) { 1454 /* cancel the IO timeout */ 1455 if (cancel_delayed_work(&io_req->timeout_work)) 1456 kref_put(&io_req->refcount, 1457 bnx2fc_cmd_release); /* timer hold */ 1458 rc = bnx2fc_initiate_abts(cmd); 1459 /* abts shouldn't fail in this context */ 1460 WARN_ON(rc != SUCCESS); 1461 1462 } else 1463 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1464 " for this IO 0x%x\n", cmd->xid); 1465 } 1466 } 1467 1468 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1469 struct fcoe_task_ctx_entry *task, u8 num_rq) 1470 { 1471 struct bnx2fc_mp_req *tm_req; 1472 struct fc_frame_header *fc_hdr; 1473 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1474 u64 *hdr; 1475 u64 *temp_hdr; 1476 void *rsp_buf; 1477 1478 /* Called with tgt_lock held */ 1479 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1480 1481 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1482 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1483 else { 1484 /* TM has already timed out and we got 1485 * delayed completion. Ignore completion 1486 * processing. 1487 */ 1488 return; 1489 } 1490 1491 tm_req = &(io_req->mp_req); 1492 fc_hdr = &(tm_req->resp_fc_hdr); 1493 hdr = (u64 *)fc_hdr; 1494 temp_hdr = (u64 *) 1495 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1496 hdr[0] = cpu_to_be64(temp_hdr[0]); 1497 hdr[1] = cpu_to_be64(temp_hdr[1]); 1498 hdr[2] = cpu_to_be64(temp_hdr[2]); 1499 1500 tm_req->resp_len = 1501 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1502 1503 rsp_buf = tm_req->resp_buf; 1504 1505 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1506 bnx2fc_parse_fcp_rsp(io_req, 1507 (struct fcoe_fcp_rsp_payload *) 1508 rsp_buf, num_rq); 1509 if (io_req->fcp_rsp_code == 0) { 1510 /* TM successful */ 1511 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1512 bnx2fc_lun_reset_cmpl(io_req); 1513 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1514 bnx2fc_tgt_reset_cmpl(io_req); 1515 } 1516 } else { 1517 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1518 fc_hdr->fh_r_ctl); 1519 } 1520 if (!sc_cmd->SCp.ptr) { 1521 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); 1522 return; 1523 } 1524 switch (io_req->fcp_status) { 1525 case FC_GOOD: 1526 if (io_req->cdb_status == 0) { 1527 /* Good IO completion */ 1528 sc_cmd->result = DID_OK << 16; 1529 } else { 1530 /* Transport status is good, SCSI status not good */ 1531 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1532 } 1533 if (io_req->fcp_resid) 1534 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1535 break; 1536 1537 default: 1538 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1539 io_req->fcp_status); 1540 break; 1541 } 1542 1543 sc_cmd = io_req->sc_cmd; 1544 io_req->sc_cmd = NULL; 1545 1546 /* check if the io_req exists in tgt's tmf_q */ 1547 if (io_req->on_tmf_queue) { 1548 1549 list_del_init(&io_req->link); 1550 io_req->on_tmf_queue = 0; 1551 } else { 1552 1553 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); 1554 return; 1555 } 1556 1557 sc_cmd->SCp.ptr = NULL; 1558 sc_cmd->scsi_done(sc_cmd); 1559 1560 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1561 if (io_req->wait_for_comp) { 1562 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1563 complete(&io_req->tm_done); 1564 } 1565 } 1566 1567 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1568 int bd_index) 1569 { 1570 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1571 int frag_size, sg_frags; 1572 1573 sg_frags = 0; 1574 while (sg_len) { 1575 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1576 frag_size = BNX2FC_BD_SPLIT_SZ; 1577 else 1578 frag_size = sg_len; 1579 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1580 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1581 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1582 bd[bd_index + sg_frags].flags = 0; 1583 1584 addr += (u64) frag_size; 1585 sg_frags++; 1586 sg_len -= frag_size; 1587 } 1588 return sg_frags; 1589 1590 } 1591 1592 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1593 { 1594 struct bnx2fc_interface *interface = io_req->port->priv; 1595 struct bnx2fc_hba *hba = interface->hba; 1596 struct scsi_cmnd *sc = io_req->sc_cmd; 1597 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1598 struct scatterlist *sg; 1599 int byte_count = 0; 1600 int sg_count = 0; 1601 int bd_count = 0; 1602 int sg_frags; 1603 unsigned int sg_len; 1604 u64 addr; 1605 int i; 1606 1607 /* 1608 * Use dma_map_sg directly to ensure we're using the correct 1609 * dev struct off of pcidev. 1610 */ 1611 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), 1612 scsi_sg_count(sc), sc->sc_data_direction); 1613 scsi_for_each_sg(sc, sg, sg_count, i) { 1614 sg_len = sg_dma_len(sg); 1615 addr = sg_dma_address(sg); 1616 if (sg_len > BNX2FC_MAX_BD_LEN) { 1617 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1618 bd_count); 1619 } else { 1620 1621 sg_frags = 1; 1622 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1623 bd[bd_count].buf_addr_hi = addr >> 32; 1624 bd[bd_count].buf_len = (u16)sg_len; 1625 bd[bd_count].flags = 0; 1626 } 1627 bd_count += sg_frags; 1628 byte_count += sg_len; 1629 } 1630 if (byte_count != scsi_bufflen(sc)) 1631 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1632 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1633 io_req->xid); 1634 return bd_count; 1635 } 1636 1637 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1638 { 1639 struct scsi_cmnd *sc = io_req->sc_cmd; 1640 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1641 int bd_count; 1642 1643 if (scsi_sg_count(sc)) { 1644 bd_count = bnx2fc_map_sg(io_req); 1645 if (bd_count == 0) 1646 return -ENOMEM; 1647 } else { 1648 bd_count = 0; 1649 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1650 bd[0].buf_len = bd[0].flags = 0; 1651 } 1652 io_req->bd_tbl->bd_valid = bd_count; 1653 1654 return 0; 1655 } 1656 1657 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1658 { 1659 struct scsi_cmnd *sc = io_req->sc_cmd; 1660 struct bnx2fc_interface *interface = io_req->port->priv; 1661 struct bnx2fc_hba *hba = interface->hba; 1662 1663 /* 1664 * Use dma_unmap_sg directly to ensure we're using the correct 1665 * dev struct off of pcidev. 1666 */ 1667 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1668 dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), 1669 scsi_sg_count(sc), sc->sc_data_direction); 1670 io_req->bd_tbl->bd_valid = 0; 1671 } 1672 } 1673 1674 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1675 struct fcp_cmnd *fcp_cmnd) 1676 { 1677 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1678 1679 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1680 1681 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); 1682 1683 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1684 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1685 1686 fcp_cmnd->fc_cmdref = 0; 1687 fcp_cmnd->fc_pri_ta = 0; 1688 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1689 fcp_cmnd->fc_flags = io_req->io_req_flags; 1690 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1691 } 1692 1693 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1694 struct fcoe_fcp_rsp_payload *fcp_rsp, 1695 u8 num_rq) 1696 { 1697 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1698 struct bnx2fc_rport *tgt = io_req->tgt; 1699 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1700 u32 rq_buff_len = 0; 1701 int i; 1702 unsigned char *rq_data; 1703 unsigned char *dummy; 1704 int fcp_sns_len = 0; 1705 int fcp_rsp_len = 0; 1706 1707 io_req->fcp_status = FC_GOOD; 1708 io_req->fcp_resid = 0; 1709 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | 1710 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) 1711 io_req->fcp_resid = fcp_rsp->fcp_resid; 1712 1713 io_req->scsi_comp_flags = rsp_flags; 1714 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1715 fcp_rsp->scsi_status_code; 1716 1717 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1718 if (num_rq) { 1719 1720 /* 1721 * We do not anticipate num_rq >1, as the linux defined 1722 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1723 * 256 bytes of single rq buffer is good enough to hold this. 1724 */ 1725 1726 if (rsp_flags & 1727 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1728 fcp_rsp_len = rq_buff_len 1729 = fcp_rsp->fcp_rsp_len; 1730 } 1731 1732 if (rsp_flags & 1733 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1734 fcp_sns_len = fcp_rsp->fcp_sns_len; 1735 rq_buff_len += fcp_rsp->fcp_sns_len; 1736 } 1737 1738 io_req->fcp_rsp_len = fcp_rsp_len; 1739 io_req->fcp_sns_len = fcp_sns_len; 1740 1741 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1742 /* Invalid sense sense length. */ 1743 printk(KERN_ERR PFX "invalid sns length %d\n", 1744 rq_buff_len); 1745 /* reset rq_buff_len */ 1746 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1747 } 1748 1749 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1750 1751 if (num_rq > 1) { 1752 /* We do not need extra sense data */ 1753 for (i = 1; i < num_rq; i++) 1754 dummy = bnx2fc_get_next_rqe(tgt, 1); 1755 } 1756 1757 /* fetch fcp_rsp_code */ 1758 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1759 /* Only for task management function */ 1760 io_req->fcp_rsp_code = rq_data[3]; 1761 BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", 1762 io_req->fcp_rsp_code); 1763 } 1764 1765 /* fetch sense data */ 1766 rq_data += fcp_rsp_len; 1767 1768 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1769 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1770 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1771 } 1772 1773 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1774 if (fcp_sns_len) 1775 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1776 1777 /* return RQ entries */ 1778 for (i = 0; i < num_rq; i++) 1779 bnx2fc_return_rqe(tgt, 1); 1780 } 1781 } 1782 1783 /** 1784 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1785 * 1786 * @host: The Scsi_Host the command was issued to 1787 * @sc_cmd: struct scsi_cmnd to be executed 1788 * 1789 * This is the IO strategy routine, called by SCSI-ML 1790 **/ 1791 int bnx2fc_queuecommand(struct Scsi_Host *host, 1792 struct scsi_cmnd *sc_cmd) 1793 { 1794 struct fc_lport *lport = shost_priv(host); 1795 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1796 struct fc_rport_libfc_priv *rp = rport->dd_data; 1797 struct bnx2fc_rport *tgt; 1798 struct bnx2fc_cmd *io_req; 1799 int rc = 0; 1800 int rval; 1801 1802 rval = fc_remote_port_chkready(rport); 1803 if (rval) { 1804 sc_cmd->result = rval; 1805 sc_cmd->scsi_done(sc_cmd); 1806 return 0; 1807 } 1808 1809 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1810 rc = SCSI_MLQUEUE_HOST_BUSY; 1811 goto exit_qcmd; 1812 } 1813 1814 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1815 tgt = (struct bnx2fc_rport *)&rp[1]; 1816 1817 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1818 /* 1819 * Session is not offloaded yet. Let SCSI-ml retry 1820 * the command. 1821 */ 1822 rc = SCSI_MLQUEUE_TARGET_BUSY; 1823 goto exit_qcmd; 1824 } 1825 if (tgt->retry_delay_timestamp) { 1826 if (time_after(jiffies, tgt->retry_delay_timestamp)) { 1827 tgt->retry_delay_timestamp = 0; 1828 } else { 1829 /* If retry_delay timer is active, flow off the ML */ 1830 rc = SCSI_MLQUEUE_TARGET_BUSY; 1831 goto exit_qcmd; 1832 } 1833 } 1834 1835 spin_lock_bh(&tgt->tgt_lock); 1836 1837 io_req = bnx2fc_cmd_alloc(tgt); 1838 if (!io_req) { 1839 rc = SCSI_MLQUEUE_HOST_BUSY; 1840 goto exit_qcmd_tgtlock; 1841 } 1842 io_req->sc_cmd = sc_cmd; 1843 1844 if (bnx2fc_post_io_req(tgt, io_req)) { 1845 printk(KERN_ERR PFX "Unable to post io_req\n"); 1846 rc = SCSI_MLQUEUE_HOST_BUSY; 1847 goto exit_qcmd_tgtlock; 1848 } 1849 1850 exit_qcmd_tgtlock: 1851 spin_unlock_bh(&tgt->tgt_lock); 1852 exit_qcmd: 1853 return rc; 1854 } 1855 1856 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1857 struct fcoe_task_ctx_entry *task, 1858 u8 num_rq) 1859 { 1860 struct fcoe_fcp_rsp_payload *fcp_rsp; 1861 struct bnx2fc_rport *tgt = io_req->tgt; 1862 struct scsi_cmnd *sc_cmd; 1863 struct Scsi_Host *host; 1864 1865 1866 /* scsi_cmd_cmpl is called with tgt lock held */ 1867 1868 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1869 /* we will not receive ABTS response for this IO */ 1870 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1871 "this scsi cmd\n"); 1872 } 1873 1874 /* Cancel the timeout_work, as we received IO completion */ 1875 if (cancel_delayed_work(&io_req->timeout_work)) 1876 kref_put(&io_req->refcount, 1877 bnx2fc_cmd_release); /* drop timer hold */ 1878 1879 sc_cmd = io_req->sc_cmd; 1880 if (sc_cmd == NULL) { 1881 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1882 return; 1883 } 1884 1885 /* Fetch fcp_rsp from task context and perform cmd completion */ 1886 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1887 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1888 1889 /* parse fcp_rsp and obtain sense data from RQ if available */ 1890 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1891 1892 host = sc_cmd->device->host; 1893 if (!sc_cmd->SCp.ptr) { 1894 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1895 return; 1896 } 1897 1898 if (io_req->on_active_queue) { 1899 list_del_init(&io_req->link); 1900 io_req->on_active_queue = 0; 1901 /* Move IO req to retire queue */ 1902 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1903 } else { 1904 /* This should not happen, but could have been pulled 1905 * by bnx2fc_flush_active_ios(), or during a race 1906 * between command abort and (late) completion. 1907 */ 1908 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1909 if (io_req->wait_for_comp) 1910 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1911 &io_req->req_flags)) 1912 complete(&io_req->tm_done); 1913 } 1914 1915 bnx2fc_unmap_sg_list(io_req); 1916 io_req->sc_cmd = NULL; 1917 1918 switch (io_req->fcp_status) { 1919 case FC_GOOD: 1920 if (io_req->cdb_status == 0) { 1921 /* Good IO completion */ 1922 sc_cmd->result = DID_OK << 16; 1923 } else { 1924 /* Transport status is good, SCSI status not good */ 1925 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1926 " fcp_resid = 0x%x\n", 1927 io_req->cdb_status, io_req->fcp_resid); 1928 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1929 1930 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1931 io_req->cdb_status == SAM_STAT_BUSY) { 1932 /* Set the jiffies + retry_delay_timer * 100ms 1933 for the rport/tgt */ 1934 tgt->retry_delay_timestamp = jiffies + 1935 fcp_rsp->retry_delay_timer * HZ / 10; 1936 } 1937 1938 } 1939 if (io_req->fcp_resid) 1940 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1941 break; 1942 default: 1943 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", 1944 io_req->fcp_status); 1945 break; 1946 } 1947 sc_cmd->SCp.ptr = NULL; 1948 sc_cmd->scsi_done(sc_cmd); 1949 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1950 } 1951 1952 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1953 struct bnx2fc_cmd *io_req) 1954 { 1955 struct fcoe_task_ctx_entry *task; 1956 struct fcoe_task_ctx_entry *task_page; 1957 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1958 struct fcoe_port *port = tgt->port; 1959 struct bnx2fc_interface *interface = port->priv; 1960 struct bnx2fc_hba *hba = interface->hba; 1961 struct fc_lport *lport = port->lport; 1962 struct fc_stats *stats; 1963 int task_idx, index; 1964 u16 xid; 1965 1966 /* bnx2fc_post_io_req() is called with the tgt_lock held */ 1967 1968 /* Initialize rest of io_req fields */ 1969 io_req->cmd_type = BNX2FC_SCSI_CMD; 1970 io_req->port = port; 1971 io_req->tgt = tgt; 1972 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1973 sc_cmd->SCp.ptr = (char *)io_req; 1974 1975 stats = per_cpu_ptr(lport->stats, get_cpu()); 1976 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1977 io_req->io_req_flags = BNX2FC_READ; 1978 stats->InputRequests++; 1979 stats->InputBytes += io_req->data_xfer_len; 1980 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1981 io_req->io_req_flags = BNX2FC_WRITE; 1982 stats->OutputRequests++; 1983 stats->OutputBytes += io_req->data_xfer_len; 1984 } else { 1985 io_req->io_req_flags = 0; 1986 stats->ControlRequests++; 1987 } 1988 put_cpu(); 1989 1990 xid = io_req->xid; 1991 1992 /* Build buffer descriptor list for firmware from sg list */ 1993 if (bnx2fc_build_bd_list_from_sg(io_req)) { 1994 printk(KERN_ERR PFX "BD list creation failed\n"); 1995 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1996 return -EAGAIN; 1997 } 1998 1999 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 2000 index = xid % BNX2FC_TASKS_PER_PAGE; 2001 2002 /* Initialize task context for this IO request */ 2003 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 2004 task = &(task_page[index]); 2005 bnx2fc_init_task(io_req, task); 2006 2007 if (tgt->flush_in_prog) { 2008 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 2009 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2010 return -EAGAIN; 2011 } 2012 2013 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 2014 printk(KERN_ERR PFX "Session not ready...post_io\n"); 2015 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2016 return -EAGAIN; 2017 } 2018 2019 /* Time IO req */ 2020 if (tgt->io_timeout) 2021 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 2022 /* Obtain free SQ entry */ 2023 bnx2fc_add_2_sq(tgt, xid); 2024 2025 /* Enqueue the io_req to active_cmd_queue */ 2026 2027 io_req->on_active_queue = 1; 2028 /* move io_req from pending_queue to active_queue */ 2029 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 2030 2031 /* Ring doorbell */ 2032 bnx2fc_ring_doorbell(tgt); 2033 return 0; 2034 } 2035