1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 11 */ 12 13 #include "bnx2fc.h" 14 15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 16 17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 18 int bd_index); 19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 20 static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 21 static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 22 struct bnx2fc_cmd *io_req); 23 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 26 struct fcoe_fcp_rsp_payload *fcp_rsp, 27 u8 num_rq); 28 29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 30 unsigned int timer_msec) 31 { 32 struct bnx2fc_hba *hba = io_req->port->priv; 33 34 if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, 35 msecs_to_jiffies(timer_msec))) 36 kref_get(&io_req->refcount); 37 } 38 39 static void bnx2fc_cmd_timeout(struct work_struct *work) 40 { 41 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 42 timeout_work.work); 43 struct fc_lport *lport; 44 struct fc_rport_priv *rdata; 45 u8 cmd_type = io_req->cmd_type; 46 struct bnx2fc_rport *tgt = io_req->tgt; 47 int logo_issued; 48 int rc; 49 50 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 51 "req_flags = %lx\n", cmd_type, io_req->req_flags); 52 53 spin_lock_bh(&tgt->tgt_lock); 54 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 55 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 56 /* 57 * ideally we should hold the io_req until RRQ complets, 58 * and release io_req from timeout hold. 59 */ 60 spin_unlock_bh(&tgt->tgt_lock); 61 bnx2fc_send_rrq(io_req); 62 return; 63 } 64 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 65 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 66 goto done; 67 } 68 69 switch (cmd_type) { 70 case BNX2FC_SCSI_CMD: 71 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 72 &io_req->req_flags)) { 73 /* Handle eh_abort timeout */ 74 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 75 complete(&io_req->tm_done); 76 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 77 &io_req->req_flags)) { 78 /* Handle internally generated ABTS timeout */ 79 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 80 io_req->refcount.refcount.counter); 81 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 82 &io_req->req_flags))) { 83 84 lport = io_req->port->lport; 85 rdata = io_req->tgt->rdata; 86 logo_issued = test_and_set_bit( 87 BNX2FC_FLAG_EXPL_LOGO, 88 &tgt->flags); 89 kref_put(&io_req->refcount, bnx2fc_cmd_release); 90 spin_unlock_bh(&tgt->tgt_lock); 91 92 /* Explicitly logo the target */ 93 if (!logo_issued) { 94 BNX2FC_IO_DBG(io_req, "Explicit " 95 "logo - tgt flags = 0x%lx\n", 96 tgt->flags); 97 98 mutex_lock(&lport->disc.disc_mutex); 99 lport->tt.rport_logoff(rdata); 100 mutex_unlock(&lport->disc.disc_mutex); 101 } 102 return; 103 } 104 } else { 105 /* Hanlde IO timeout */ 106 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 107 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 108 &io_req->req_flags)) { 109 BNX2FC_IO_DBG(io_req, "IO completed before " 110 " timer expiry\n"); 111 goto done; 112 } 113 114 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 115 &io_req->req_flags)) { 116 rc = bnx2fc_initiate_abts(io_req); 117 if (rc == SUCCESS) 118 goto done; 119 /* 120 * Explicitly logo the target if 121 * abts initiation fails 122 */ 123 lport = io_req->port->lport; 124 rdata = io_req->tgt->rdata; 125 logo_issued = test_and_set_bit( 126 BNX2FC_FLAG_EXPL_LOGO, 127 &tgt->flags); 128 kref_put(&io_req->refcount, bnx2fc_cmd_release); 129 spin_unlock_bh(&tgt->tgt_lock); 130 131 if (!logo_issued) { 132 BNX2FC_IO_DBG(io_req, "Explicit " 133 "logo - tgt flags = 0x%lx\n", 134 tgt->flags); 135 136 137 mutex_lock(&lport->disc.disc_mutex); 138 lport->tt.rport_logoff(rdata); 139 mutex_unlock(&lport->disc.disc_mutex); 140 } 141 return; 142 } else { 143 BNX2FC_IO_DBG(io_req, "IO already in " 144 "ABTS processing\n"); 145 } 146 } 147 break; 148 case BNX2FC_ELS: 149 150 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 151 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 152 153 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 154 &io_req->req_flags)) { 155 lport = io_req->port->lport; 156 rdata = io_req->tgt->rdata; 157 logo_issued = test_and_set_bit( 158 BNX2FC_FLAG_EXPL_LOGO, 159 &tgt->flags); 160 kref_put(&io_req->refcount, bnx2fc_cmd_release); 161 spin_unlock_bh(&tgt->tgt_lock); 162 163 /* Explicitly logo the target */ 164 if (!logo_issued) { 165 BNX2FC_IO_DBG(io_req, "Explicitly logo" 166 "(els)\n"); 167 mutex_lock(&lport->disc.disc_mutex); 168 lport->tt.rport_logoff(rdata); 169 mutex_unlock(&lport->disc.disc_mutex); 170 } 171 return; 172 } 173 } else { 174 /* 175 * Handle ELS timeout. 176 * tgt_lock is used to sync compl path and timeout 177 * path. If els compl path is processing this IO, we 178 * have nothing to do here, just release the timer hold 179 */ 180 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 181 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 182 &io_req->req_flags)) 183 goto done; 184 185 /* Indicate the cb_func that this ELS is timed out */ 186 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 187 188 if ((io_req->cb_func) && (io_req->cb_arg)) { 189 io_req->cb_func(io_req->cb_arg); 190 io_req->cb_arg = NULL; 191 } 192 } 193 break; 194 default: 195 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 196 cmd_type); 197 break; 198 } 199 200 done: 201 /* release the cmd that was held when timer was set */ 202 kref_put(&io_req->refcount, bnx2fc_cmd_release); 203 spin_unlock_bh(&tgt->tgt_lock); 204 } 205 206 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 207 { 208 /* Called with host lock held */ 209 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 210 211 /* 212 * active_cmd_queue may have other command types as well, 213 * and during flush operation, we want to error back only 214 * scsi commands. 215 */ 216 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 217 return; 218 219 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 220 bnx2fc_unmap_sg_list(io_req); 221 io_req->sc_cmd = NULL; 222 if (!sc_cmd) { 223 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 224 "IO(0x%x) already cleaned up\n", 225 io_req->xid); 226 return; 227 } 228 sc_cmd->result = err_code << 16; 229 230 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 231 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 232 sc_cmd->allowed); 233 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 234 sc_cmd->SCp.ptr = NULL; 235 sc_cmd->scsi_done(sc_cmd); 236 } 237 238 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 239 u16 min_xid, u16 max_xid) 240 { 241 struct bnx2fc_cmd_mgr *cmgr; 242 struct io_bdt *bdt_info; 243 struct bnx2fc_cmd *io_req; 244 size_t len; 245 u32 mem_size; 246 u16 xid; 247 int i; 248 int num_ios, num_pri_ios; 249 size_t bd_tbl_sz; 250 int arr_sz = num_possible_cpus() + 1; 251 252 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 253 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 254 and max_xid 0x%x\n", min_xid, max_xid); 255 return NULL; 256 } 257 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 258 259 num_ios = max_xid - min_xid + 1; 260 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 261 len += sizeof(struct bnx2fc_cmd_mgr); 262 263 cmgr = kzalloc(len, GFP_KERNEL); 264 if (!cmgr) { 265 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 266 return NULL; 267 } 268 269 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 270 arr_sz, GFP_KERNEL); 271 if (!cmgr->free_list) { 272 printk(KERN_ERR PFX "failed to alloc free_list\n"); 273 goto mem_err; 274 } 275 276 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 277 arr_sz, GFP_KERNEL); 278 if (!cmgr->free_list_lock) { 279 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 280 goto mem_err; 281 } 282 283 cmgr->hba = hba; 284 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 285 286 for (i = 0; i < arr_sz; i++) { 287 INIT_LIST_HEAD(&cmgr->free_list[i]); 288 spin_lock_init(&cmgr->free_list_lock[i]); 289 } 290 291 /* 292 * Pre-allocated pool of bnx2fc_cmds. 293 * Last entry in the free list array is the free list 294 * of slow path requests. 295 */ 296 xid = BNX2FC_MIN_XID; 297 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 298 for (i = 0; i < num_ios; i++) { 299 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 300 301 if (!io_req) { 302 printk(KERN_ERR PFX "failed to alloc io_req\n"); 303 goto mem_err; 304 } 305 306 INIT_LIST_HEAD(&io_req->link); 307 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 308 309 io_req->xid = xid++; 310 if (i < num_pri_ios) 311 list_add_tail(&io_req->link, 312 &cmgr->free_list[io_req->xid % 313 num_possible_cpus()]); 314 else 315 list_add_tail(&io_req->link, 316 &cmgr->free_list[num_possible_cpus()]); 317 io_req++; 318 } 319 320 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 321 mem_size = num_ios * sizeof(struct io_bdt *); 322 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 323 if (!cmgr->io_bdt_pool) { 324 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 325 goto mem_err; 326 } 327 328 mem_size = sizeof(struct io_bdt); 329 for (i = 0; i < num_ios; i++) { 330 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 331 if (!cmgr->io_bdt_pool[i]) { 332 printk(KERN_ERR PFX "failed to alloc " 333 "io_bdt_pool[%d]\n", i); 334 goto mem_err; 335 } 336 } 337 338 /* Allocate an map fcoe_bdt_ctx structures */ 339 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 340 for (i = 0; i < num_ios; i++) { 341 bdt_info = cmgr->io_bdt_pool[i]; 342 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 343 bd_tbl_sz, 344 &bdt_info->bd_tbl_dma, 345 GFP_KERNEL); 346 if (!bdt_info->bd_tbl) { 347 printk(KERN_ERR PFX "failed to alloc " 348 "bdt_tbl[%d]\n", i); 349 goto mem_err; 350 } 351 } 352 353 return cmgr; 354 355 mem_err: 356 bnx2fc_cmd_mgr_free(cmgr); 357 return NULL; 358 } 359 360 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 361 { 362 struct io_bdt *bdt_info; 363 struct bnx2fc_hba *hba = cmgr->hba; 364 size_t bd_tbl_sz; 365 u16 min_xid = BNX2FC_MIN_XID; 366 u16 max_xid = BNX2FC_MAX_XID; 367 int num_ios; 368 int i; 369 370 num_ios = max_xid - min_xid + 1; 371 372 /* Free fcoe_bdt_ctx structures */ 373 if (!cmgr->io_bdt_pool) 374 goto free_cmd_pool; 375 376 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 377 for (i = 0; i < num_ios; i++) { 378 bdt_info = cmgr->io_bdt_pool[i]; 379 if (bdt_info->bd_tbl) { 380 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 381 bdt_info->bd_tbl, 382 bdt_info->bd_tbl_dma); 383 bdt_info->bd_tbl = NULL; 384 } 385 } 386 387 /* Destroy io_bdt pool */ 388 for (i = 0; i < num_ios; i++) { 389 kfree(cmgr->io_bdt_pool[i]); 390 cmgr->io_bdt_pool[i] = NULL; 391 } 392 393 kfree(cmgr->io_bdt_pool); 394 cmgr->io_bdt_pool = NULL; 395 396 free_cmd_pool: 397 kfree(cmgr->free_list_lock); 398 399 /* Destroy cmd pool */ 400 if (!cmgr->free_list) 401 goto free_cmgr; 402 403 for (i = 0; i < num_possible_cpus() + 1; i++) { 404 struct list_head *list; 405 struct list_head *tmp; 406 407 list_for_each_safe(list, tmp, &cmgr->free_list[i]) { 408 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; 409 list_del(&io_req->link); 410 kfree(io_req); 411 } 412 } 413 kfree(cmgr->free_list); 414 free_cmgr: 415 /* Free command manager itself */ 416 kfree(cmgr); 417 } 418 419 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 420 { 421 struct fcoe_port *port = tgt->port; 422 struct bnx2fc_hba *hba = port->priv; 423 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 424 struct bnx2fc_cmd *io_req; 425 struct list_head *listp; 426 struct io_bdt *bd_tbl; 427 int index = RESERVE_FREE_LIST_INDEX; 428 u32 free_sqes; 429 u32 max_sqes; 430 u16 xid; 431 432 max_sqes = tgt->max_sqes; 433 switch (type) { 434 case BNX2FC_TASK_MGMT_CMD: 435 max_sqes = BNX2FC_TM_MAX_SQES; 436 break; 437 case BNX2FC_ELS: 438 max_sqes = BNX2FC_ELS_MAX_SQES; 439 break; 440 default: 441 break; 442 } 443 444 /* 445 * NOTE: Free list insertions and deletions are protected with 446 * cmgr lock 447 */ 448 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 449 free_sqes = atomic_read(&tgt->free_sqes); 450 if ((list_empty(&(cmd_mgr->free_list[index]))) || 451 (tgt->num_active_ios.counter >= max_sqes) || 452 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 453 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 454 "ios(%d):sqes(%d)\n", 455 tgt->num_active_ios.counter, tgt->max_sqes); 456 if (list_empty(&(cmd_mgr->free_list[index]))) 457 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 458 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 459 return NULL; 460 } 461 462 listp = (struct list_head *) 463 cmd_mgr->free_list[index].next; 464 list_del_init(listp); 465 io_req = (struct bnx2fc_cmd *) listp; 466 xid = io_req->xid; 467 cmd_mgr->cmds[xid] = io_req; 468 atomic_inc(&tgt->num_active_ios); 469 atomic_dec(&tgt->free_sqes); 470 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 471 472 INIT_LIST_HEAD(&io_req->link); 473 474 io_req->port = port; 475 io_req->cmd_mgr = cmd_mgr; 476 io_req->req_flags = 0; 477 io_req->cmd_type = type; 478 479 /* Bind io_bdt for this io_req */ 480 /* Have a static link between io_req and io_bdt_pool */ 481 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 482 bd_tbl->io_req = io_req; 483 484 /* Hold the io_req against deletion */ 485 kref_init(&io_req->refcount); 486 return io_req; 487 } 488 static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 489 { 490 struct fcoe_port *port = tgt->port; 491 struct bnx2fc_hba *hba = port->priv; 492 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 493 struct bnx2fc_cmd *io_req; 494 struct list_head *listp; 495 struct io_bdt *bd_tbl; 496 u32 free_sqes; 497 u32 max_sqes; 498 u16 xid; 499 int index = get_cpu(); 500 501 max_sqes = BNX2FC_SCSI_MAX_SQES; 502 /* 503 * NOTE: Free list insertions and deletions are protected with 504 * cmgr lock 505 */ 506 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 507 free_sqes = atomic_read(&tgt->free_sqes); 508 if ((list_empty(&cmd_mgr->free_list[index])) || 509 (tgt->num_active_ios.counter >= max_sqes) || 510 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 511 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 512 put_cpu(); 513 return NULL; 514 } 515 516 listp = (struct list_head *) 517 cmd_mgr->free_list[index].next; 518 list_del_init(listp); 519 io_req = (struct bnx2fc_cmd *) listp; 520 xid = io_req->xid; 521 cmd_mgr->cmds[xid] = io_req; 522 atomic_inc(&tgt->num_active_ios); 523 atomic_dec(&tgt->free_sqes); 524 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 525 put_cpu(); 526 527 INIT_LIST_HEAD(&io_req->link); 528 529 io_req->port = port; 530 io_req->cmd_mgr = cmd_mgr; 531 io_req->req_flags = 0; 532 533 /* Bind io_bdt for this io_req */ 534 /* Have a static link between io_req and io_bdt_pool */ 535 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 536 bd_tbl->io_req = io_req; 537 538 /* Hold the io_req against deletion */ 539 kref_init(&io_req->refcount); 540 return io_req; 541 } 542 543 void bnx2fc_cmd_release(struct kref *ref) 544 { 545 struct bnx2fc_cmd *io_req = container_of(ref, 546 struct bnx2fc_cmd, refcount); 547 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 548 int index; 549 550 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 551 index = io_req->xid % num_possible_cpus(); 552 else 553 index = RESERVE_FREE_LIST_INDEX; 554 555 556 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 557 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 558 bnx2fc_free_mp_resc(io_req); 559 cmd_mgr->cmds[io_req->xid] = NULL; 560 /* Delete IO from retire queue */ 561 list_del_init(&io_req->link); 562 /* Add it to the free list */ 563 list_add(&io_req->link, 564 &cmd_mgr->free_list[index]); 565 atomic_dec(&io_req->tgt->num_active_ios); 566 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 567 568 } 569 570 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 571 { 572 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 573 struct bnx2fc_hba *hba = io_req->port->priv; 574 size_t sz = sizeof(struct fcoe_bd_ctx); 575 576 /* clear tm flags */ 577 mp_req->tm_flags = 0; 578 if (mp_req->mp_req_bd) { 579 dma_free_coherent(&hba->pcidev->dev, sz, 580 mp_req->mp_req_bd, 581 mp_req->mp_req_bd_dma); 582 mp_req->mp_req_bd = NULL; 583 } 584 if (mp_req->mp_resp_bd) { 585 dma_free_coherent(&hba->pcidev->dev, sz, 586 mp_req->mp_resp_bd, 587 mp_req->mp_resp_bd_dma); 588 mp_req->mp_resp_bd = NULL; 589 } 590 if (mp_req->req_buf) { 591 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 592 mp_req->req_buf, 593 mp_req->req_buf_dma); 594 mp_req->req_buf = NULL; 595 } 596 if (mp_req->resp_buf) { 597 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 598 mp_req->resp_buf, 599 mp_req->resp_buf_dma); 600 mp_req->resp_buf = NULL; 601 } 602 } 603 604 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 605 { 606 struct bnx2fc_mp_req *mp_req; 607 struct fcoe_bd_ctx *mp_req_bd; 608 struct fcoe_bd_ctx *mp_resp_bd; 609 struct bnx2fc_hba *hba = io_req->port->priv; 610 dma_addr_t addr; 611 size_t sz; 612 613 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 614 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 615 616 mp_req->req_len = sizeof(struct fcp_cmnd); 617 io_req->data_xfer_len = mp_req->req_len; 618 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 619 &mp_req->req_buf_dma, 620 GFP_ATOMIC); 621 if (!mp_req->req_buf) { 622 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 623 bnx2fc_free_mp_resc(io_req); 624 return FAILED; 625 } 626 627 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 628 &mp_req->resp_buf_dma, 629 GFP_ATOMIC); 630 if (!mp_req->resp_buf) { 631 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 632 bnx2fc_free_mp_resc(io_req); 633 return FAILED; 634 } 635 memset(mp_req->req_buf, 0, PAGE_SIZE); 636 memset(mp_req->resp_buf, 0, PAGE_SIZE); 637 638 /* Allocate and map mp_req_bd and mp_resp_bd */ 639 sz = sizeof(struct fcoe_bd_ctx); 640 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 641 &mp_req->mp_req_bd_dma, 642 GFP_ATOMIC); 643 if (!mp_req->mp_req_bd) { 644 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 645 bnx2fc_free_mp_resc(io_req); 646 return FAILED; 647 } 648 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 649 &mp_req->mp_resp_bd_dma, 650 GFP_ATOMIC); 651 if (!mp_req->mp_req_bd) { 652 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 653 bnx2fc_free_mp_resc(io_req); 654 return FAILED; 655 } 656 /* Fill bd table */ 657 addr = mp_req->req_buf_dma; 658 mp_req_bd = mp_req->mp_req_bd; 659 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 660 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 661 mp_req_bd->buf_len = PAGE_SIZE; 662 mp_req_bd->flags = 0; 663 664 /* 665 * MP buffer is either a task mgmt command or an ELS. 666 * So the assumption is that it consumes a single bd 667 * entry in the bd table 668 */ 669 mp_resp_bd = mp_req->mp_resp_bd; 670 addr = mp_req->resp_buf_dma; 671 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 672 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 673 mp_resp_bd->buf_len = PAGE_SIZE; 674 mp_resp_bd->flags = 0; 675 676 return SUCCESS; 677 } 678 679 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 680 { 681 struct fc_lport *lport; 682 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 683 struct fc_rport_libfc_priv *rp = rport->dd_data; 684 struct fcoe_port *port; 685 struct bnx2fc_hba *hba; 686 struct bnx2fc_rport *tgt; 687 struct bnx2fc_cmd *io_req; 688 struct bnx2fc_mp_req *tm_req; 689 struct fcoe_task_ctx_entry *task; 690 struct fcoe_task_ctx_entry *task_page; 691 struct Scsi_Host *host = sc_cmd->device->host; 692 struct fc_frame_header *fc_hdr; 693 struct fcp_cmnd *fcp_cmnd; 694 int task_idx, index; 695 int rc = SUCCESS; 696 u16 xid; 697 u32 sid, did; 698 unsigned long start = jiffies; 699 700 lport = shost_priv(host); 701 port = lport_priv(lport); 702 hba = port->priv; 703 704 if (rport == NULL) { 705 printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); 706 rc = FAILED; 707 goto tmf_err; 708 } 709 710 rc = fc_block_scsi_eh(sc_cmd); 711 if (rc) 712 return rc; 713 714 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 715 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 716 rc = FAILED; 717 goto tmf_err; 718 } 719 /* rport and tgt are allocated together, so tgt should be non-NULL */ 720 tgt = (struct bnx2fc_rport *)&rp[1]; 721 722 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 723 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 724 rc = FAILED; 725 goto tmf_err; 726 } 727 retry_tmf: 728 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 729 if (!io_req) { 730 if (time_after(jiffies, start + HZ)) { 731 printk(KERN_ERR PFX "tmf: Failed TMF"); 732 rc = FAILED; 733 goto tmf_err; 734 } 735 msleep(20); 736 goto retry_tmf; 737 } 738 /* Initialize rest of io_req fields */ 739 io_req->sc_cmd = sc_cmd; 740 io_req->port = port; 741 io_req->tgt = tgt; 742 743 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 744 745 rc = bnx2fc_init_mp_req(io_req); 746 if (rc == FAILED) { 747 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 748 kref_put(&io_req->refcount, bnx2fc_cmd_release); 749 goto tmf_err; 750 } 751 752 /* Set TM flags */ 753 io_req->io_req_flags = 0; 754 tm_req->tm_flags = tm_flags; 755 756 /* Fill FCP_CMND */ 757 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 758 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 759 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 760 fcp_cmnd->fc_dl = 0; 761 762 /* Fill FC header */ 763 fc_hdr = &(tm_req->req_fc_hdr); 764 sid = tgt->sid; 765 did = rport->port_id; 766 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 767 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 768 FC_FC_SEQ_INIT, 0); 769 /* Obtain exchange id */ 770 xid = io_req->xid; 771 772 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 773 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 774 index = xid % BNX2FC_TASKS_PER_PAGE; 775 776 /* Initialize task context for this IO request */ 777 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 778 task = &(task_page[index]); 779 bnx2fc_init_mp_task(io_req, task); 780 781 sc_cmd->SCp.ptr = (char *)io_req; 782 783 /* Obtain free SQ entry */ 784 spin_lock_bh(&tgt->tgt_lock); 785 bnx2fc_add_2_sq(tgt, xid); 786 787 /* Enqueue the io_req to active_tm_queue */ 788 io_req->on_tmf_queue = 1; 789 list_add_tail(&io_req->link, &tgt->active_tm_queue); 790 791 init_completion(&io_req->tm_done); 792 io_req->wait_for_comp = 1; 793 794 /* Ring doorbell */ 795 bnx2fc_ring_doorbell(tgt); 796 spin_unlock_bh(&tgt->tgt_lock); 797 798 rc = wait_for_completion_timeout(&io_req->tm_done, 799 BNX2FC_TM_TIMEOUT * HZ); 800 spin_lock_bh(&tgt->tgt_lock); 801 802 io_req->wait_for_comp = 0; 803 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 804 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 805 806 spin_unlock_bh(&tgt->tgt_lock); 807 808 if (!rc) { 809 printk(KERN_ERR PFX "task mgmt command failed...\n"); 810 rc = FAILED; 811 } else { 812 printk(KERN_ERR PFX "task mgmt command success...\n"); 813 rc = SUCCESS; 814 } 815 tmf_err: 816 return rc; 817 } 818 819 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 820 { 821 struct fc_lport *lport; 822 struct bnx2fc_rport *tgt = io_req->tgt; 823 struct fc_rport *rport = tgt->rport; 824 struct fc_rport_priv *rdata = tgt->rdata; 825 struct bnx2fc_hba *hba; 826 struct fcoe_port *port; 827 struct bnx2fc_cmd *abts_io_req; 828 struct fcoe_task_ctx_entry *task; 829 struct fcoe_task_ctx_entry *task_page; 830 struct fc_frame_header *fc_hdr; 831 struct bnx2fc_mp_req *abts_req; 832 int task_idx, index; 833 u32 sid, did; 834 u16 xid; 835 int rc = SUCCESS; 836 u32 r_a_tov = rdata->r_a_tov; 837 838 /* called with tgt_lock held */ 839 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 840 841 port = io_req->port; 842 hba = port->priv; 843 lport = port->lport; 844 845 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 846 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 847 rc = FAILED; 848 goto abts_err; 849 } 850 851 if (rport == NULL) { 852 printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); 853 rc = FAILED; 854 goto abts_err; 855 } 856 857 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 858 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 859 rc = FAILED; 860 goto abts_err; 861 } 862 863 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 864 if (!abts_io_req) { 865 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 866 rc = FAILED; 867 goto abts_err; 868 } 869 870 /* Initialize rest of io_req fields */ 871 abts_io_req->sc_cmd = NULL; 872 abts_io_req->port = port; 873 abts_io_req->tgt = tgt; 874 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 875 876 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 877 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 878 879 /* Fill FC header */ 880 fc_hdr = &(abts_req->req_fc_hdr); 881 882 /* Obtain oxid and rxid for the original exchange to be aborted */ 883 fc_hdr->fh_ox_id = htons(io_req->xid); 884 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 885 886 sid = tgt->sid; 887 did = rport->port_id; 888 889 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 890 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 891 FC_FC_SEQ_INIT, 0); 892 893 xid = abts_io_req->xid; 894 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 895 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 896 index = xid % BNX2FC_TASKS_PER_PAGE; 897 898 /* Initialize task context for this IO request */ 899 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 900 task = &(task_page[index]); 901 bnx2fc_init_mp_task(abts_io_req, task); 902 903 /* 904 * ABTS task is a temporary task that will be cleaned up 905 * irrespective of ABTS response. We need to start the timer 906 * for the original exchange, as the CQE is posted for the original 907 * IO request. 908 * 909 * Timer for ABTS is started only when it is originated by a 910 * TM request. For the ABTS issued as part of ULP timeout, 911 * scsi-ml maintains the timers. 912 */ 913 914 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 915 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 916 917 /* Obtain free SQ entry */ 918 bnx2fc_add_2_sq(tgt, xid); 919 920 /* Ring doorbell */ 921 bnx2fc_ring_doorbell(tgt); 922 923 abts_err: 924 return rc; 925 } 926 927 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 928 { 929 struct fc_lport *lport; 930 struct bnx2fc_rport *tgt = io_req->tgt; 931 struct bnx2fc_hba *hba; 932 struct fcoe_port *port; 933 struct bnx2fc_cmd *cleanup_io_req; 934 struct fcoe_task_ctx_entry *task; 935 struct fcoe_task_ctx_entry *task_page; 936 int task_idx, index; 937 u16 xid, orig_xid; 938 int rc = 0; 939 940 /* ASSUMPTION: called with tgt_lock held */ 941 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 942 943 port = io_req->port; 944 hba = port->priv; 945 lport = port->lport; 946 947 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 948 if (!cleanup_io_req) { 949 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 950 rc = -1; 951 goto cleanup_err; 952 } 953 954 /* Initialize rest of io_req fields */ 955 cleanup_io_req->sc_cmd = NULL; 956 cleanup_io_req->port = port; 957 cleanup_io_req->tgt = tgt; 958 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 959 960 xid = cleanup_io_req->xid; 961 962 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 963 index = xid % BNX2FC_TASKS_PER_PAGE; 964 965 /* Initialize task context for this IO request */ 966 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 967 task = &(task_page[index]); 968 orig_xid = io_req->xid; 969 970 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 971 972 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 973 974 /* Obtain free SQ entry */ 975 bnx2fc_add_2_sq(tgt, xid); 976 977 /* Ring doorbell */ 978 bnx2fc_ring_doorbell(tgt); 979 980 cleanup_err: 981 return rc; 982 } 983 984 /** 985 * bnx2fc_eh_target_reset: Reset a target 986 * 987 * @sc_cmd: SCSI command 988 * 989 * Set from SCSI host template to send task mgmt command to the target 990 * and wait for the response 991 */ 992 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 993 { 994 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 995 } 996 997 /** 998 * bnx2fc_eh_device_reset - Reset a single LUN 999 * 1000 * @sc_cmd: SCSI command 1001 * 1002 * Set from SCSI host template to send task mgmt command to the target 1003 * and wait for the response 1004 */ 1005 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1006 { 1007 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1008 } 1009 1010 /** 1011 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1012 * SCSI command 1013 * 1014 * @sc_cmd: SCSI_ML command pointer 1015 * 1016 * SCSI abort request handler 1017 */ 1018 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1019 { 1020 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1021 struct fc_rport_libfc_priv *rp = rport->dd_data; 1022 struct bnx2fc_cmd *io_req; 1023 struct fc_lport *lport; 1024 struct bnx2fc_rport *tgt; 1025 int rc = FAILED; 1026 1027 1028 rc = fc_block_scsi_eh(sc_cmd); 1029 if (rc) 1030 return rc; 1031 1032 lport = shost_priv(sc_cmd->device->host); 1033 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1034 printk(KERN_ALERT PFX "eh_abort: link not ready\n"); 1035 return rc; 1036 } 1037 1038 tgt = (struct bnx2fc_rport *)&rp[1]; 1039 1040 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1041 1042 spin_lock_bh(&tgt->tgt_lock); 1043 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1044 if (!io_req) { 1045 /* Command might have just completed */ 1046 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1047 spin_unlock_bh(&tgt->tgt_lock); 1048 return SUCCESS; 1049 } 1050 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1051 io_req->refcount.refcount.counter); 1052 1053 /* Hold IO request across abort processing */ 1054 kref_get(&io_req->refcount); 1055 1056 BUG_ON(tgt != io_req->tgt); 1057 1058 /* Remove the io_req from the active_q. */ 1059 /* 1060 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1061 * issue an ABTS on this particular IO req, as the 1062 * io_req is no longer in the active_q. 1063 */ 1064 if (tgt->flush_in_prog) { 1065 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1066 "flush in progress\n", io_req->xid); 1067 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1068 spin_unlock_bh(&tgt->tgt_lock); 1069 return SUCCESS; 1070 } 1071 1072 if (io_req->on_active_queue == 0) { 1073 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1074 "not on active_q\n", io_req->xid); 1075 /* 1076 * This condition can happen only due to the FW bug, 1077 * where we do not receive cleanup response from 1078 * the FW. Handle this case gracefully by erroring 1079 * back the IO request to SCSI-ml 1080 */ 1081 bnx2fc_scsi_done(io_req, DID_ABORT); 1082 1083 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1084 spin_unlock_bh(&tgt->tgt_lock); 1085 return SUCCESS; 1086 } 1087 1088 /* 1089 * Only eh_abort processing will remove the IO from 1090 * active_cmd_q before processing the request. this is 1091 * done to avoid race conditions between IOs aborted 1092 * as part of task management completion and eh_abort 1093 * processing 1094 */ 1095 list_del_init(&io_req->link); 1096 io_req->on_active_queue = 0; 1097 /* Move IO req to retire queue */ 1098 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1099 1100 init_completion(&io_req->tm_done); 1101 io_req->wait_for_comp = 1; 1102 1103 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1104 /* Cancel the current timer running on this io_req */ 1105 if (cancel_delayed_work(&io_req->timeout_work)) 1106 kref_put(&io_req->refcount, 1107 bnx2fc_cmd_release); /* drop timer hold */ 1108 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1109 rc = bnx2fc_initiate_abts(io_req); 1110 } else { 1111 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1112 "already in abts processing\n", io_req->xid); 1113 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1114 spin_unlock_bh(&tgt->tgt_lock); 1115 return SUCCESS; 1116 } 1117 if (rc == FAILED) { 1118 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1119 spin_unlock_bh(&tgt->tgt_lock); 1120 return rc; 1121 } 1122 spin_unlock_bh(&tgt->tgt_lock); 1123 1124 wait_for_completion(&io_req->tm_done); 1125 1126 spin_lock_bh(&tgt->tgt_lock); 1127 io_req->wait_for_comp = 0; 1128 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1129 &io_req->req_flags))) { 1130 /* Let the scsi-ml try to recover this command */ 1131 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1132 io_req->xid); 1133 rc = FAILED; 1134 } else { 1135 /* 1136 * We come here even when there was a race condition 1137 * between timeout and abts completion, and abts 1138 * completion happens just in time. 1139 */ 1140 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1141 rc = SUCCESS; 1142 bnx2fc_scsi_done(io_req, DID_ABORT); 1143 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1144 } 1145 1146 /* release the reference taken in eh_abort */ 1147 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1148 spin_unlock_bh(&tgt->tgt_lock); 1149 return rc; 1150 } 1151 1152 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1153 struct fcoe_task_ctx_entry *task, 1154 u8 num_rq) 1155 { 1156 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1157 "refcnt = %d, cmd_type = %d\n", 1158 io_req->refcount.refcount.counter, io_req->cmd_type); 1159 bnx2fc_scsi_done(io_req, DID_ERROR); 1160 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1161 } 1162 1163 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1164 struct fcoe_task_ctx_entry *task, 1165 u8 num_rq) 1166 { 1167 u32 r_ctl; 1168 u32 r_a_tov = FC_DEF_R_A_TOV; 1169 u8 issue_rrq = 0; 1170 struct bnx2fc_rport *tgt = io_req->tgt; 1171 1172 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1173 "refcnt = %d, cmd_type = %d\n", 1174 io_req->xid, 1175 io_req->refcount.refcount.counter, io_req->cmd_type); 1176 1177 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1178 &io_req->req_flags)) { 1179 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1180 " this io\n"); 1181 return; 1182 } 1183 1184 /* Do not issue RRQ as this IO is already cleanedup */ 1185 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1186 &io_req->req_flags)) 1187 goto io_compl; 1188 1189 /* 1190 * For ABTS issued due to SCSI eh_abort_handler, timeout 1191 * values are maintained by scsi-ml itself. Cancel timeout 1192 * in case ABTS issued as part of task management function 1193 * or due to FW error. 1194 */ 1195 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1196 if (cancel_delayed_work(&io_req->timeout_work)) 1197 kref_put(&io_req->refcount, 1198 bnx2fc_cmd_release); /* drop timer hold */ 1199 1200 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1201 1202 switch (r_ctl) { 1203 case FC_RCTL_BA_ACC: 1204 /* 1205 * Dont release this cmd yet. It will be relesed 1206 * after we get RRQ response 1207 */ 1208 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1209 issue_rrq = 1; 1210 break; 1211 1212 case FC_RCTL_BA_RJT: 1213 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1214 break; 1215 default: 1216 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1217 break; 1218 } 1219 1220 if (issue_rrq) { 1221 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1222 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1223 } 1224 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1225 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1226 1227 io_compl: 1228 if (io_req->wait_for_comp) { 1229 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1230 &io_req->req_flags)) 1231 complete(&io_req->tm_done); 1232 } else { 1233 /* 1234 * We end up here when ABTS is issued as 1235 * in asynchronous context, i.e., as part 1236 * of task management completion, or 1237 * when FW error is received or when the 1238 * ABTS is issued when the IO is timed 1239 * out. 1240 */ 1241 1242 if (io_req->on_active_queue) { 1243 list_del_init(&io_req->link); 1244 io_req->on_active_queue = 0; 1245 /* Move IO req to retire queue */ 1246 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1247 } 1248 bnx2fc_scsi_done(io_req, DID_ERROR); 1249 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1250 } 1251 } 1252 1253 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1254 { 1255 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1256 struct bnx2fc_rport *tgt = io_req->tgt; 1257 struct list_head *list; 1258 struct list_head *tmp; 1259 struct bnx2fc_cmd *cmd; 1260 int tm_lun = sc_cmd->device->lun; 1261 int rc = 0; 1262 int lun; 1263 1264 /* called with tgt_lock held */ 1265 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1266 /* 1267 * Walk thru the active_ios queue and ABORT the IO 1268 * that matches with the LUN that was reset 1269 */ 1270 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1271 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1272 cmd = (struct bnx2fc_cmd *)list; 1273 lun = cmd->sc_cmd->device->lun; 1274 if (lun == tm_lun) { 1275 /* Initiate ABTS on this cmd */ 1276 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1277 &cmd->req_flags)) { 1278 /* cancel the IO timeout */ 1279 if (cancel_delayed_work(&io_req->timeout_work)) 1280 kref_put(&io_req->refcount, 1281 bnx2fc_cmd_release); 1282 /* timer hold */ 1283 rc = bnx2fc_initiate_abts(cmd); 1284 /* abts shouldn't fail in this context */ 1285 WARN_ON(rc != SUCCESS); 1286 } else 1287 printk(KERN_ERR PFX "lun_rst: abts already in" 1288 " progress for this IO 0x%x\n", 1289 cmd->xid); 1290 } 1291 } 1292 } 1293 1294 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1295 { 1296 struct bnx2fc_rport *tgt = io_req->tgt; 1297 struct list_head *list; 1298 struct list_head *tmp; 1299 struct bnx2fc_cmd *cmd; 1300 int rc = 0; 1301 1302 /* called with tgt_lock held */ 1303 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1304 /* 1305 * Walk thru the active_ios queue and ABORT the IO 1306 * that matches with the LUN that was reset 1307 */ 1308 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1309 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1310 cmd = (struct bnx2fc_cmd *)list; 1311 /* Initiate ABTS */ 1312 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1313 &cmd->req_flags)) { 1314 /* cancel the IO timeout */ 1315 if (cancel_delayed_work(&io_req->timeout_work)) 1316 kref_put(&io_req->refcount, 1317 bnx2fc_cmd_release); /* timer hold */ 1318 rc = bnx2fc_initiate_abts(cmd); 1319 /* abts shouldn't fail in this context */ 1320 WARN_ON(rc != SUCCESS); 1321 1322 } else 1323 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1324 " for this IO 0x%x\n", cmd->xid); 1325 } 1326 } 1327 1328 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1329 struct fcoe_task_ctx_entry *task, u8 num_rq) 1330 { 1331 struct bnx2fc_mp_req *tm_req; 1332 struct fc_frame_header *fc_hdr; 1333 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1334 u64 *hdr; 1335 u64 *temp_hdr; 1336 void *rsp_buf; 1337 1338 /* Called with tgt_lock held */ 1339 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1340 1341 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1342 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1343 else { 1344 /* TM has already timed out and we got 1345 * delayed completion. Ignore completion 1346 * processing. 1347 */ 1348 return; 1349 } 1350 1351 tm_req = &(io_req->mp_req); 1352 fc_hdr = &(tm_req->resp_fc_hdr); 1353 hdr = (u64 *)fc_hdr; 1354 temp_hdr = (u64 *) 1355 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1356 hdr[0] = cpu_to_be64(temp_hdr[0]); 1357 hdr[1] = cpu_to_be64(temp_hdr[1]); 1358 hdr[2] = cpu_to_be64(temp_hdr[2]); 1359 1360 tm_req->resp_len = 1361 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1362 1363 rsp_buf = tm_req->resp_buf; 1364 1365 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1366 bnx2fc_parse_fcp_rsp(io_req, 1367 (struct fcoe_fcp_rsp_payload *) 1368 rsp_buf, num_rq); 1369 if (io_req->fcp_rsp_code == 0) { 1370 /* TM successful */ 1371 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1372 bnx2fc_lun_reset_cmpl(io_req); 1373 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1374 bnx2fc_tgt_reset_cmpl(io_req); 1375 } 1376 } else { 1377 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1378 fc_hdr->fh_r_ctl); 1379 } 1380 if (!sc_cmd->SCp.ptr) { 1381 printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); 1382 return; 1383 } 1384 switch (io_req->fcp_status) { 1385 case FC_GOOD: 1386 if (io_req->cdb_status == 0) { 1387 /* Good IO completion */ 1388 sc_cmd->result = DID_OK << 16; 1389 } else { 1390 /* Transport status is good, SCSI status not good */ 1391 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1392 } 1393 if (io_req->fcp_resid) 1394 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1395 break; 1396 1397 default: 1398 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1399 io_req->fcp_status); 1400 break; 1401 } 1402 1403 sc_cmd = io_req->sc_cmd; 1404 io_req->sc_cmd = NULL; 1405 1406 /* check if the io_req exists in tgt's tmf_q */ 1407 if (io_req->on_tmf_queue) { 1408 1409 list_del_init(&io_req->link); 1410 io_req->on_tmf_queue = 0; 1411 } else { 1412 1413 printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); 1414 return; 1415 } 1416 1417 sc_cmd->SCp.ptr = NULL; 1418 sc_cmd->scsi_done(sc_cmd); 1419 1420 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1421 if (io_req->wait_for_comp) { 1422 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1423 complete(&io_req->tm_done); 1424 } 1425 } 1426 1427 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1428 int bd_index) 1429 { 1430 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1431 int frag_size, sg_frags; 1432 1433 sg_frags = 0; 1434 while (sg_len) { 1435 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1436 frag_size = BNX2FC_BD_SPLIT_SZ; 1437 else 1438 frag_size = sg_len; 1439 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1440 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1441 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1442 bd[bd_index + sg_frags].flags = 0; 1443 1444 addr += (u64) frag_size; 1445 sg_frags++; 1446 sg_len -= frag_size; 1447 } 1448 return sg_frags; 1449 1450 } 1451 1452 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1453 { 1454 struct scsi_cmnd *sc = io_req->sc_cmd; 1455 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1456 struct scatterlist *sg; 1457 int byte_count = 0; 1458 int sg_count = 0; 1459 int bd_count = 0; 1460 int sg_frags; 1461 unsigned int sg_len; 1462 u64 addr; 1463 int i; 1464 1465 sg_count = scsi_dma_map(sc); 1466 scsi_for_each_sg(sc, sg, sg_count, i) { 1467 sg_len = sg_dma_len(sg); 1468 addr = sg_dma_address(sg); 1469 if (sg_len > BNX2FC_MAX_BD_LEN) { 1470 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1471 bd_count); 1472 } else { 1473 1474 sg_frags = 1; 1475 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1476 bd[bd_count].buf_addr_hi = addr >> 32; 1477 bd[bd_count].buf_len = (u16)sg_len; 1478 bd[bd_count].flags = 0; 1479 } 1480 bd_count += sg_frags; 1481 byte_count += sg_len; 1482 } 1483 if (byte_count != scsi_bufflen(sc)) 1484 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1485 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1486 io_req->xid); 1487 return bd_count; 1488 } 1489 1490 static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1491 { 1492 struct scsi_cmnd *sc = io_req->sc_cmd; 1493 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1494 int bd_count; 1495 1496 if (scsi_sg_count(sc)) 1497 bd_count = bnx2fc_map_sg(io_req); 1498 else { 1499 bd_count = 0; 1500 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1501 bd[0].buf_len = bd[0].flags = 0; 1502 } 1503 io_req->bd_tbl->bd_valid = bd_count; 1504 } 1505 1506 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1507 { 1508 struct scsi_cmnd *sc = io_req->sc_cmd; 1509 1510 if (io_req->bd_tbl->bd_valid && sc) { 1511 scsi_dma_unmap(sc); 1512 io_req->bd_tbl->bd_valid = 0; 1513 } 1514 } 1515 1516 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1517 struct fcp_cmnd *fcp_cmnd) 1518 { 1519 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1520 char tag[2]; 1521 1522 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1523 1524 int_to_scsilun(sc_cmd->device->lun, 1525 (struct scsi_lun *) fcp_cmnd->fc_lun); 1526 1527 1528 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1529 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1530 1531 fcp_cmnd->fc_cmdref = 0; 1532 fcp_cmnd->fc_pri_ta = 0; 1533 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1534 fcp_cmnd->fc_flags = io_req->io_req_flags; 1535 1536 if (scsi_populate_tag_msg(sc_cmd, tag)) { 1537 switch (tag[0]) { 1538 case HEAD_OF_QUEUE_TAG: 1539 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; 1540 break; 1541 case ORDERED_QUEUE_TAG: 1542 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; 1543 break; 1544 default: 1545 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1546 break; 1547 } 1548 } else { 1549 fcp_cmnd->fc_pri_ta = 0; 1550 } 1551 } 1552 1553 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1554 struct fcoe_fcp_rsp_payload *fcp_rsp, 1555 u8 num_rq) 1556 { 1557 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1558 struct bnx2fc_rport *tgt = io_req->tgt; 1559 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1560 u32 rq_buff_len = 0; 1561 int i; 1562 unsigned char *rq_data; 1563 unsigned char *dummy; 1564 int fcp_sns_len = 0; 1565 int fcp_rsp_len = 0; 1566 1567 io_req->fcp_status = FC_GOOD; 1568 io_req->fcp_resid = fcp_rsp->fcp_resid; 1569 1570 io_req->scsi_comp_flags = rsp_flags; 1571 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1572 fcp_rsp->scsi_status_code; 1573 1574 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1575 if (num_rq) { 1576 1577 /* 1578 * We do not anticipate num_rq >1, as the linux defined 1579 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1580 * 256 bytes of single rq buffer is good enough to hold this. 1581 */ 1582 1583 if (rsp_flags & 1584 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1585 fcp_rsp_len = rq_buff_len 1586 = fcp_rsp->fcp_rsp_len; 1587 } 1588 1589 if (rsp_flags & 1590 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1591 fcp_sns_len = fcp_rsp->fcp_sns_len; 1592 rq_buff_len += fcp_rsp->fcp_sns_len; 1593 } 1594 1595 io_req->fcp_rsp_len = fcp_rsp_len; 1596 io_req->fcp_sns_len = fcp_sns_len; 1597 1598 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1599 /* Invalid sense sense length. */ 1600 printk(KERN_ALERT PFX "invalid sns length %d\n", 1601 rq_buff_len); 1602 /* reset rq_buff_len */ 1603 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1604 } 1605 1606 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1607 1608 if (num_rq > 1) { 1609 /* We do not need extra sense data */ 1610 for (i = 1; i < num_rq; i++) 1611 dummy = bnx2fc_get_next_rqe(tgt, 1); 1612 } 1613 1614 /* fetch fcp_rsp_code */ 1615 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1616 /* Only for task management function */ 1617 io_req->fcp_rsp_code = rq_data[3]; 1618 printk(KERN_ERR PFX "fcp_rsp_code = %d\n", 1619 io_req->fcp_rsp_code); 1620 } 1621 1622 /* fetch sense data */ 1623 rq_data += fcp_rsp_len; 1624 1625 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1626 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1627 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1628 } 1629 1630 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); 1631 if (fcp_sns_len) 1632 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1633 1634 /* return RQ entries */ 1635 for (i = 0; i < num_rq; i++) 1636 bnx2fc_return_rqe(tgt, 1); 1637 } 1638 } 1639 1640 /** 1641 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1642 * 1643 * @host: The Scsi_Host the command was issued to 1644 * @sc_cmd: struct scsi_cmnd to be executed 1645 * 1646 * This is the IO strategy routine, called by SCSI-ML 1647 **/ 1648 int bnx2fc_queuecommand(struct Scsi_Host *host, 1649 struct scsi_cmnd *sc_cmd) 1650 { 1651 struct fc_lport *lport = shost_priv(host); 1652 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1653 struct fc_rport_libfc_priv *rp = rport->dd_data; 1654 struct bnx2fc_rport *tgt; 1655 struct bnx2fc_cmd *io_req; 1656 int rc = 0; 1657 int rval; 1658 1659 rval = fc_remote_port_chkready(rport); 1660 if (rval) { 1661 sc_cmd->result = rval; 1662 sc_cmd->scsi_done(sc_cmd); 1663 return 0; 1664 } 1665 1666 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1667 rc = SCSI_MLQUEUE_HOST_BUSY; 1668 goto exit_qcmd; 1669 } 1670 1671 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1672 tgt = (struct bnx2fc_rport *)&rp[1]; 1673 1674 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1675 if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) { 1676 sc_cmd->result = DID_NO_CONNECT << 16; 1677 sc_cmd->scsi_done(sc_cmd); 1678 return 0; 1679 1680 } 1681 /* 1682 * Session is not offloaded yet. Let SCSI-ml retry 1683 * the command. 1684 */ 1685 rc = SCSI_MLQUEUE_TARGET_BUSY; 1686 goto exit_qcmd; 1687 } 1688 1689 io_req = bnx2fc_cmd_alloc(tgt); 1690 if (!io_req) { 1691 rc = SCSI_MLQUEUE_HOST_BUSY; 1692 goto exit_qcmd; 1693 } 1694 io_req->sc_cmd = sc_cmd; 1695 1696 if (bnx2fc_post_io_req(tgt, io_req)) { 1697 printk(KERN_ERR PFX "Unable to post io_req\n"); 1698 rc = SCSI_MLQUEUE_HOST_BUSY; 1699 goto exit_qcmd; 1700 } 1701 exit_qcmd: 1702 return rc; 1703 } 1704 1705 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1706 struct fcoe_task_ctx_entry *task, 1707 u8 num_rq) 1708 { 1709 struct fcoe_fcp_rsp_payload *fcp_rsp; 1710 struct bnx2fc_rport *tgt = io_req->tgt; 1711 struct scsi_cmnd *sc_cmd; 1712 struct Scsi_Host *host; 1713 1714 1715 /* scsi_cmd_cmpl is called with tgt lock held */ 1716 1717 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1718 /* we will not receive ABTS response for this IO */ 1719 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1720 "this scsi cmd\n"); 1721 } 1722 1723 /* Cancel the timeout_work, as we received IO completion */ 1724 if (cancel_delayed_work(&io_req->timeout_work)) 1725 kref_put(&io_req->refcount, 1726 bnx2fc_cmd_release); /* drop timer hold */ 1727 1728 sc_cmd = io_req->sc_cmd; 1729 if (sc_cmd == NULL) { 1730 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1731 return; 1732 } 1733 1734 /* Fetch fcp_rsp from task context and perform cmd completion */ 1735 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1736 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1737 1738 /* parse fcp_rsp and obtain sense data from RQ if available */ 1739 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1740 1741 host = sc_cmd->device->host; 1742 if (!sc_cmd->SCp.ptr) { 1743 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1744 return; 1745 } 1746 1747 if (io_req->on_active_queue) { 1748 list_del_init(&io_req->link); 1749 io_req->on_active_queue = 0; 1750 /* Move IO req to retire queue */ 1751 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1752 } else { 1753 /* This should not happen, but could have been pulled 1754 * by bnx2fc_flush_active_ios(), or during a race 1755 * between command abort and (late) completion. 1756 */ 1757 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1758 if (io_req->wait_for_comp) 1759 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1760 &io_req->req_flags)) 1761 complete(&io_req->tm_done); 1762 } 1763 1764 bnx2fc_unmap_sg_list(io_req); 1765 io_req->sc_cmd = NULL; 1766 1767 switch (io_req->fcp_status) { 1768 case FC_GOOD: 1769 if (io_req->cdb_status == 0) { 1770 /* Good IO completion */ 1771 sc_cmd->result = DID_OK << 16; 1772 } else { 1773 /* Transport status is good, SCSI status not good */ 1774 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1775 " fcp_resid = 0x%x\n", 1776 io_req->cdb_status, io_req->fcp_resid); 1777 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1778 } 1779 if (io_req->fcp_resid) 1780 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1781 break; 1782 default: 1783 printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", 1784 io_req->fcp_status); 1785 break; 1786 } 1787 sc_cmd->SCp.ptr = NULL; 1788 sc_cmd->scsi_done(sc_cmd); 1789 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1790 } 1791 1792 static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1793 struct bnx2fc_cmd *io_req) 1794 { 1795 struct fcoe_task_ctx_entry *task; 1796 struct fcoe_task_ctx_entry *task_page; 1797 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1798 struct fcoe_port *port = tgt->port; 1799 struct bnx2fc_hba *hba = port->priv; 1800 struct fc_lport *lport = port->lport; 1801 struct fcoe_dev_stats *stats; 1802 int task_idx, index; 1803 u16 xid; 1804 1805 /* Initialize rest of io_req fields */ 1806 io_req->cmd_type = BNX2FC_SCSI_CMD; 1807 io_req->port = port; 1808 io_req->tgt = tgt; 1809 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1810 sc_cmd->SCp.ptr = (char *)io_req; 1811 1812 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1813 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1814 io_req->io_req_flags = BNX2FC_READ; 1815 stats->InputRequests++; 1816 stats->InputBytes += io_req->data_xfer_len; 1817 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1818 io_req->io_req_flags = BNX2FC_WRITE; 1819 stats->OutputRequests++; 1820 stats->OutputBytes += io_req->data_xfer_len; 1821 } else { 1822 io_req->io_req_flags = 0; 1823 stats->ControlRequests++; 1824 } 1825 put_cpu(); 1826 1827 xid = io_req->xid; 1828 1829 /* Build buffer descriptor list for firmware from sg list */ 1830 bnx2fc_build_bd_list_from_sg(io_req); 1831 1832 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1833 index = xid % BNX2FC_TASKS_PER_PAGE; 1834 1835 /* Initialize task context for this IO request */ 1836 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1837 task = &(task_page[index]); 1838 bnx2fc_init_task(io_req, task); 1839 1840 spin_lock_bh(&tgt->tgt_lock); 1841 1842 if (tgt->flush_in_prog) { 1843 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 1844 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1845 spin_unlock_bh(&tgt->tgt_lock); 1846 return -EAGAIN; 1847 } 1848 1849 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1850 printk(KERN_ERR PFX "Session not ready...post_io\n"); 1851 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1852 spin_unlock_bh(&tgt->tgt_lock); 1853 return -EAGAIN; 1854 } 1855 1856 /* Time IO req */ 1857 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 1858 /* Obtain free SQ entry */ 1859 bnx2fc_add_2_sq(tgt, xid); 1860 1861 /* Enqueue the io_req to active_cmd_queue */ 1862 1863 io_req->on_active_queue = 1; 1864 /* move io_req from pending_queue to active_queue */ 1865 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 1866 1867 /* Ring doorbell */ 1868 bnx2fc_ring_doorbell(tgt); 1869 spin_unlock_bh(&tgt->tgt_lock); 1870 return 0; 1871 } 1872