1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 11 */ 12 13 #include "bnx2fc.h" 14 15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 16 17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 18 int bd_index); 19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 20 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 21 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 22 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 23 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 24 struct fcoe_fcp_rsp_payload *fcp_rsp, 25 u8 num_rq); 26 27 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 28 unsigned int timer_msec) 29 { 30 struct bnx2fc_interface *interface = io_req->port->priv; 31 32 if (queue_delayed_work(interface->timer_work_queue, 33 &io_req->timeout_work, 34 msecs_to_jiffies(timer_msec))) 35 kref_get(&io_req->refcount); 36 } 37 38 static void bnx2fc_cmd_timeout(struct work_struct *work) 39 { 40 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 41 timeout_work.work); 42 struct fc_lport *lport; 43 struct fc_rport_priv *rdata; 44 u8 cmd_type = io_req->cmd_type; 45 struct bnx2fc_rport *tgt = io_req->tgt; 46 int logo_issued; 47 int rc; 48 49 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 50 "req_flags = %lx\n", cmd_type, io_req->req_flags); 51 52 spin_lock_bh(&tgt->tgt_lock); 53 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 54 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 55 /* 56 * ideally we should hold the io_req until RRQ complets, 57 * and release io_req from timeout hold. 58 */ 59 spin_unlock_bh(&tgt->tgt_lock); 60 bnx2fc_send_rrq(io_req); 61 return; 62 } 63 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 64 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 65 goto done; 66 } 67 68 switch (cmd_type) { 69 case BNX2FC_SCSI_CMD: 70 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 71 &io_req->req_flags)) { 72 /* Handle eh_abort timeout */ 73 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 74 complete(&io_req->tm_done); 75 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 76 &io_req->req_flags)) { 77 /* Handle internally generated ABTS timeout */ 78 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 79 io_req->refcount.refcount.counter); 80 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 81 &io_req->req_flags))) { 82 83 lport = io_req->port->lport; 84 rdata = io_req->tgt->rdata; 85 logo_issued = test_and_set_bit( 86 BNX2FC_FLAG_EXPL_LOGO, 87 &tgt->flags); 88 kref_put(&io_req->refcount, bnx2fc_cmd_release); 89 spin_unlock_bh(&tgt->tgt_lock); 90 91 /* Explicitly logo the target */ 92 if (!logo_issued) { 93 BNX2FC_IO_DBG(io_req, "Explicit " 94 "logo - tgt flags = 0x%lx\n", 95 tgt->flags); 96 97 mutex_lock(&lport->disc.disc_mutex); 98 lport->tt.rport_logoff(rdata); 99 mutex_unlock(&lport->disc.disc_mutex); 100 } 101 return; 102 } 103 } else { 104 /* Hanlde IO timeout */ 105 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 106 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 107 &io_req->req_flags)) { 108 BNX2FC_IO_DBG(io_req, "IO completed before " 109 " timer expiry\n"); 110 goto done; 111 } 112 113 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 114 &io_req->req_flags)) { 115 rc = bnx2fc_initiate_abts(io_req); 116 if (rc == SUCCESS) 117 goto done; 118 /* 119 * Explicitly logo the target if 120 * abts initiation fails 121 */ 122 lport = io_req->port->lport; 123 rdata = io_req->tgt->rdata; 124 logo_issued = test_and_set_bit( 125 BNX2FC_FLAG_EXPL_LOGO, 126 &tgt->flags); 127 kref_put(&io_req->refcount, bnx2fc_cmd_release); 128 spin_unlock_bh(&tgt->tgt_lock); 129 130 if (!logo_issued) { 131 BNX2FC_IO_DBG(io_req, "Explicit " 132 "logo - tgt flags = 0x%lx\n", 133 tgt->flags); 134 135 136 mutex_lock(&lport->disc.disc_mutex); 137 lport->tt.rport_logoff(rdata); 138 mutex_unlock(&lport->disc.disc_mutex); 139 } 140 return; 141 } else { 142 BNX2FC_IO_DBG(io_req, "IO already in " 143 "ABTS processing\n"); 144 } 145 } 146 break; 147 case BNX2FC_ELS: 148 149 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 150 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 151 152 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 153 &io_req->req_flags)) { 154 lport = io_req->port->lport; 155 rdata = io_req->tgt->rdata; 156 logo_issued = test_and_set_bit( 157 BNX2FC_FLAG_EXPL_LOGO, 158 &tgt->flags); 159 kref_put(&io_req->refcount, bnx2fc_cmd_release); 160 spin_unlock_bh(&tgt->tgt_lock); 161 162 /* Explicitly logo the target */ 163 if (!logo_issued) { 164 BNX2FC_IO_DBG(io_req, "Explicitly logo" 165 "(els)\n"); 166 mutex_lock(&lport->disc.disc_mutex); 167 lport->tt.rport_logoff(rdata); 168 mutex_unlock(&lport->disc.disc_mutex); 169 } 170 return; 171 } 172 } else { 173 /* 174 * Handle ELS timeout. 175 * tgt_lock is used to sync compl path and timeout 176 * path. If els compl path is processing this IO, we 177 * have nothing to do here, just release the timer hold 178 */ 179 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 180 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 181 &io_req->req_flags)) 182 goto done; 183 184 /* Indicate the cb_func that this ELS is timed out */ 185 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 186 187 if ((io_req->cb_func) && (io_req->cb_arg)) { 188 io_req->cb_func(io_req->cb_arg); 189 io_req->cb_arg = NULL; 190 } 191 } 192 break; 193 default: 194 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 195 cmd_type); 196 break; 197 } 198 199 done: 200 /* release the cmd that was held when timer was set */ 201 kref_put(&io_req->refcount, bnx2fc_cmd_release); 202 spin_unlock_bh(&tgt->tgt_lock); 203 } 204 205 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 206 { 207 /* Called with host lock held */ 208 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 209 210 /* 211 * active_cmd_queue may have other command types as well, 212 * and during flush operation, we want to error back only 213 * scsi commands. 214 */ 215 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 216 return; 217 218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { 220 /* Do not call scsi done for this IO */ 221 return; 222 } 223 224 bnx2fc_unmap_sg_list(io_req); 225 io_req->sc_cmd = NULL; 226 if (!sc_cmd) { 227 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 228 "IO(0x%x) already cleaned up\n", 229 io_req->xid); 230 return; 231 } 232 sc_cmd->result = err_code << 16; 233 234 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 235 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 236 sc_cmd->allowed); 237 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 238 sc_cmd->SCp.ptr = NULL; 239 sc_cmd->scsi_done(sc_cmd); 240 } 241 242 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 243 u16 min_xid, u16 max_xid) 244 { 245 struct bnx2fc_cmd_mgr *cmgr; 246 struct io_bdt *bdt_info; 247 struct bnx2fc_cmd *io_req; 248 size_t len; 249 u32 mem_size; 250 u16 xid; 251 int i; 252 int num_ios, num_pri_ios; 253 size_t bd_tbl_sz; 254 int arr_sz = num_possible_cpus() + 1; 255 256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 258 and max_xid 0x%x\n", min_xid, max_xid); 259 return NULL; 260 } 261 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 262 263 num_ios = max_xid - min_xid + 1; 264 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 265 len += sizeof(struct bnx2fc_cmd_mgr); 266 267 cmgr = kzalloc(len, GFP_KERNEL); 268 if (!cmgr) { 269 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 270 return NULL; 271 } 272 273 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 274 arr_sz, GFP_KERNEL); 275 if (!cmgr->free_list) { 276 printk(KERN_ERR PFX "failed to alloc free_list\n"); 277 goto mem_err; 278 } 279 280 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 281 arr_sz, GFP_KERNEL); 282 if (!cmgr->free_list_lock) { 283 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 284 goto mem_err; 285 } 286 287 cmgr->hba = hba; 288 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 289 290 for (i = 0; i < arr_sz; i++) { 291 INIT_LIST_HEAD(&cmgr->free_list[i]); 292 spin_lock_init(&cmgr->free_list_lock[i]); 293 } 294 295 /* 296 * Pre-allocated pool of bnx2fc_cmds. 297 * Last entry in the free list array is the free list 298 * of slow path requests. 299 */ 300 xid = BNX2FC_MIN_XID; 301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 302 for (i = 0; i < num_ios; i++) { 303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 304 305 if (!io_req) { 306 printk(KERN_ERR PFX "failed to alloc io_req\n"); 307 goto mem_err; 308 } 309 310 INIT_LIST_HEAD(&io_req->link); 311 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 312 313 io_req->xid = xid++; 314 if (i < num_pri_ios) 315 list_add_tail(&io_req->link, 316 &cmgr->free_list[io_req->xid % 317 num_possible_cpus()]); 318 else 319 list_add_tail(&io_req->link, 320 &cmgr->free_list[num_possible_cpus()]); 321 io_req++; 322 } 323 324 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 325 mem_size = num_ios * sizeof(struct io_bdt *); 326 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 327 if (!cmgr->io_bdt_pool) { 328 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 329 goto mem_err; 330 } 331 332 mem_size = sizeof(struct io_bdt); 333 for (i = 0; i < num_ios; i++) { 334 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 335 if (!cmgr->io_bdt_pool[i]) { 336 printk(KERN_ERR PFX "failed to alloc " 337 "io_bdt_pool[%d]\n", i); 338 goto mem_err; 339 } 340 } 341 342 /* Allocate an map fcoe_bdt_ctx structures */ 343 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 344 for (i = 0; i < num_ios; i++) { 345 bdt_info = cmgr->io_bdt_pool[i]; 346 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 347 bd_tbl_sz, 348 &bdt_info->bd_tbl_dma, 349 GFP_KERNEL); 350 if (!bdt_info->bd_tbl) { 351 printk(KERN_ERR PFX "failed to alloc " 352 "bdt_tbl[%d]\n", i); 353 goto mem_err; 354 } 355 } 356 357 return cmgr; 358 359 mem_err: 360 bnx2fc_cmd_mgr_free(cmgr); 361 return NULL; 362 } 363 364 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 365 { 366 struct io_bdt *bdt_info; 367 struct bnx2fc_hba *hba = cmgr->hba; 368 size_t bd_tbl_sz; 369 u16 min_xid = BNX2FC_MIN_XID; 370 u16 max_xid = BNX2FC_MAX_XID; 371 int num_ios; 372 int i; 373 374 num_ios = max_xid - min_xid + 1; 375 376 /* Free fcoe_bdt_ctx structures */ 377 if (!cmgr->io_bdt_pool) 378 goto free_cmd_pool; 379 380 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 381 for (i = 0; i < num_ios; i++) { 382 bdt_info = cmgr->io_bdt_pool[i]; 383 if (bdt_info->bd_tbl) { 384 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 385 bdt_info->bd_tbl, 386 bdt_info->bd_tbl_dma); 387 bdt_info->bd_tbl = NULL; 388 } 389 } 390 391 /* Destroy io_bdt pool */ 392 for (i = 0; i < num_ios; i++) { 393 kfree(cmgr->io_bdt_pool[i]); 394 cmgr->io_bdt_pool[i] = NULL; 395 } 396 397 kfree(cmgr->io_bdt_pool); 398 cmgr->io_bdt_pool = NULL; 399 400 free_cmd_pool: 401 kfree(cmgr->free_list_lock); 402 403 /* Destroy cmd pool */ 404 if (!cmgr->free_list) 405 goto free_cmgr; 406 407 for (i = 0; i < num_possible_cpus() + 1; i++) { 408 struct list_head *list; 409 struct list_head *tmp; 410 411 list_for_each_safe(list, tmp, &cmgr->free_list[i]) { 412 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; 413 list_del(&io_req->link); 414 kfree(io_req); 415 } 416 } 417 kfree(cmgr->free_list); 418 free_cmgr: 419 /* Free command manager itself */ 420 kfree(cmgr); 421 } 422 423 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 424 { 425 struct fcoe_port *port = tgt->port; 426 struct bnx2fc_interface *interface = port->priv; 427 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 428 struct bnx2fc_cmd *io_req; 429 struct list_head *listp; 430 struct io_bdt *bd_tbl; 431 int index = RESERVE_FREE_LIST_INDEX; 432 u32 free_sqes; 433 u32 max_sqes; 434 u16 xid; 435 436 max_sqes = tgt->max_sqes; 437 switch (type) { 438 case BNX2FC_TASK_MGMT_CMD: 439 max_sqes = BNX2FC_TM_MAX_SQES; 440 break; 441 case BNX2FC_ELS: 442 max_sqes = BNX2FC_ELS_MAX_SQES; 443 break; 444 default: 445 break; 446 } 447 448 /* 449 * NOTE: Free list insertions and deletions are protected with 450 * cmgr lock 451 */ 452 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 453 free_sqes = atomic_read(&tgt->free_sqes); 454 if ((list_empty(&(cmd_mgr->free_list[index]))) || 455 (tgt->num_active_ios.counter >= max_sqes) || 456 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 457 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 458 "ios(%d):sqes(%d)\n", 459 tgt->num_active_ios.counter, tgt->max_sqes); 460 if (list_empty(&(cmd_mgr->free_list[index]))) 461 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 462 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 463 return NULL; 464 } 465 466 listp = (struct list_head *) 467 cmd_mgr->free_list[index].next; 468 list_del_init(listp); 469 io_req = (struct bnx2fc_cmd *) listp; 470 xid = io_req->xid; 471 cmd_mgr->cmds[xid] = io_req; 472 atomic_inc(&tgt->num_active_ios); 473 atomic_dec(&tgt->free_sqes); 474 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 475 476 INIT_LIST_HEAD(&io_req->link); 477 478 io_req->port = port; 479 io_req->cmd_mgr = cmd_mgr; 480 io_req->req_flags = 0; 481 io_req->cmd_type = type; 482 483 /* Bind io_bdt for this io_req */ 484 /* Have a static link between io_req and io_bdt_pool */ 485 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 486 bd_tbl->io_req = io_req; 487 488 /* Hold the io_req against deletion */ 489 kref_init(&io_req->refcount); 490 return io_req; 491 } 492 493 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 494 { 495 struct fcoe_port *port = tgt->port; 496 struct bnx2fc_interface *interface = port->priv; 497 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 498 struct bnx2fc_cmd *io_req; 499 struct list_head *listp; 500 struct io_bdt *bd_tbl; 501 u32 free_sqes; 502 u32 max_sqes; 503 u16 xid; 504 int index = get_cpu(); 505 506 max_sqes = BNX2FC_SCSI_MAX_SQES; 507 /* 508 * NOTE: Free list insertions and deletions are protected with 509 * cmgr lock 510 */ 511 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 512 free_sqes = atomic_read(&tgt->free_sqes); 513 if ((list_empty(&cmd_mgr->free_list[index])) || 514 (tgt->num_active_ios.counter >= max_sqes) || 515 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 516 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 517 put_cpu(); 518 return NULL; 519 } 520 521 listp = (struct list_head *) 522 cmd_mgr->free_list[index].next; 523 list_del_init(listp); 524 io_req = (struct bnx2fc_cmd *) listp; 525 xid = io_req->xid; 526 cmd_mgr->cmds[xid] = io_req; 527 atomic_inc(&tgt->num_active_ios); 528 atomic_dec(&tgt->free_sqes); 529 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 530 put_cpu(); 531 532 INIT_LIST_HEAD(&io_req->link); 533 534 io_req->port = port; 535 io_req->cmd_mgr = cmd_mgr; 536 io_req->req_flags = 0; 537 538 /* Bind io_bdt for this io_req */ 539 /* Have a static link between io_req and io_bdt_pool */ 540 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 541 bd_tbl->io_req = io_req; 542 543 /* Hold the io_req against deletion */ 544 kref_init(&io_req->refcount); 545 return io_req; 546 } 547 548 void bnx2fc_cmd_release(struct kref *ref) 549 { 550 struct bnx2fc_cmd *io_req = container_of(ref, 551 struct bnx2fc_cmd, refcount); 552 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 553 int index; 554 555 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 556 index = io_req->xid % num_possible_cpus(); 557 else 558 index = RESERVE_FREE_LIST_INDEX; 559 560 561 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 562 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 563 bnx2fc_free_mp_resc(io_req); 564 cmd_mgr->cmds[io_req->xid] = NULL; 565 /* Delete IO from retire queue */ 566 list_del_init(&io_req->link); 567 /* Add it to the free list */ 568 list_add(&io_req->link, 569 &cmd_mgr->free_list[index]); 570 atomic_dec(&io_req->tgt->num_active_ios); 571 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 572 573 } 574 575 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 576 { 577 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 578 struct bnx2fc_interface *interface = io_req->port->priv; 579 struct bnx2fc_hba *hba = interface->hba; 580 size_t sz = sizeof(struct fcoe_bd_ctx); 581 582 /* clear tm flags */ 583 mp_req->tm_flags = 0; 584 if (mp_req->mp_req_bd) { 585 dma_free_coherent(&hba->pcidev->dev, sz, 586 mp_req->mp_req_bd, 587 mp_req->mp_req_bd_dma); 588 mp_req->mp_req_bd = NULL; 589 } 590 if (mp_req->mp_resp_bd) { 591 dma_free_coherent(&hba->pcidev->dev, sz, 592 mp_req->mp_resp_bd, 593 mp_req->mp_resp_bd_dma); 594 mp_req->mp_resp_bd = NULL; 595 } 596 if (mp_req->req_buf) { 597 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 598 mp_req->req_buf, 599 mp_req->req_buf_dma); 600 mp_req->req_buf = NULL; 601 } 602 if (mp_req->resp_buf) { 603 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 604 mp_req->resp_buf, 605 mp_req->resp_buf_dma); 606 mp_req->resp_buf = NULL; 607 } 608 } 609 610 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 611 { 612 struct bnx2fc_mp_req *mp_req; 613 struct fcoe_bd_ctx *mp_req_bd; 614 struct fcoe_bd_ctx *mp_resp_bd; 615 struct bnx2fc_interface *interface = io_req->port->priv; 616 struct bnx2fc_hba *hba = interface->hba; 617 dma_addr_t addr; 618 size_t sz; 619 620 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 621 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 622 623 mp_req->req_len = sizeof(struct fcp_cmnd); 624 io_req->data_xfer_len = mp_req->req_len; 625 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 626 &mp_req->req_buf_dma, 627 GFP_ATOMIC); 628 if (!mp_req->req_buf) { 629 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 630 bnx2fc_free_mp_resc(io_req); 631 return FAILED; 632 } 633 634 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 635 &mp_req->resp_buf_dma, 636 GFP_ATOMIC); 637 if (!mp_req->resp_buf) { 638 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 639 bnx2fc_free_mp_resc(io_req); 640 return FAILED; 641 } 642 memset(mp_req->req_buf, 0, PAGE_SIZE); 643 memset(mp_req->resp_buf, 0, PAGE_SIZE); 644 645 /* Allocate and map mp_req_bd and mp_resp_bd */ 646 sz = sizeof(struct fcoe_bd_ctx); 647 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 648 &mp_req->mp_req_bd_dma, 649 GFP_ATOMIC); 650 if (!mp_req->mp_req_bd) { 651 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 652 bnx2fc_free_mp_resc(io_req); 653 return FAILED; 654 } 655 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 656 &mp_req->mp_resp_bd_dma, 657 GFP_ATOMIC); 658 if (!mp_req->mp_req_bd) { 659 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 660 bnx2fc_free_mp_resc(io_req); 661 return FAILED; 662 } 663 /* Fill bd table */ 664 addr = mp_req->req_buf_dma; 665 mp_req_bd = mp_req->mp_req_bd; 666 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 667 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 668 mp_req_bd->buf_len = PAGE_SIZE; 669 mp_req_bd->flags = 0; 670 671 /* 672 * MP buffer is either a task mgmt command or an ELS. 673 * So the assumption is that it consumes a single bd 674 * entry in the bd table 675 */ 676 mp_resp_bd = mp_req->mp_resp_bd; 677 addr = mp_req->resp_buf_dma; 678 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 679 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 680 mp_resp_bd->buf_len = PAGE_SIZE; 681 mp_resp_bd->flags = 0; 682 683 return SUCCESS; 684 } 685 686 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 687 { 688 struct fc_lport *lport; 689 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 690 struct fc_rport_libfc_priv *rp = rport->dd_data; 691 struct fcoe_port *port; 692 struct bnx2fc_interface *interface; 693 struct bnx2fc_rport *tgt; 694 struct bnx2fc_cmd *io_req; 695 struct bnx2fc_mp_req *tm_req; 696 struct fcoe_task_ctx_entry *task; 697 struct fcoe_task_ctx_entry *task_page; 698 struct Scsi_Host *host = sc_cmd->device->host; 699 struct fc_frame_header *fc_hdr; 700 struct fcp_cmnd *fcp_cmnd; 701 int task_idx, index; 702 int rc = SUCCESS; 703 u16 xid; 704 u32 sid, did; 705 unsigned long start = jiffies; 706 707 lport = shost_priv(host); 708 port = lport_priv(lport); 709 interface = port->priv; 710 711 if (rport == NULL) { 712 printk(KERN_ERR PFX "device_reset: rport is NULL\n"); 713 rc = FAILED; 714 goto tmf_err; 715 } 716 717 rc = fc_block_scsi_eh(sc_cmd); 718 if (rc) 719 return rc; 720 721 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 722 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 723 rc = FAILED; 724 goto tmf_err; 725 } 726 /* rport and tgt are allocated together, so tgt should be non-NULL */ 727 tgt = (struct bnx2fc_rport *)&rp[1]; 728 729 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 730 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 731 rc = FAILED; 732 goto tmf_err; 733 } 734 retry_tmf: 735 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 736 if (!io_req) { 737 if (time_after(jiffies, start + HZ)) { 738 printk(KERN_ERR PFX "tmf: Failed TMF"); 739 rc = FAILED; 740 goto tmf_err; 741 } 742 msleep(20); 743 goto retry_tmf; 744 } 745 /* Initialize rest of io_req fields */ 746 io_req->sc_cmd = sc_cmd; 747 io_req->port = port; 748 io_req->tgt = tgt; 749 750 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 751 752 rc = bnx2fc_init_mp_req(io_req); 753 if (rc == FAILED) { 754 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 755 spin_lock_bh(&tgt->tgt_lock); 756 kref_put(&io_req->refcount, bnx2fc_cmd_release); 757 spin_unlock_bh(&tgt->tgt_lock); 758 goto tmf_err; 759 } 760 761 /* Set TM flags */ 762 io_req->io_req_flags = 0; 763 tm_req->tm_flags = tm_flags; 764 765 /* Fill FCP_CMND */ 766 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 767 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 768 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 769 fcp_cmnd->fc_dl = 0; 770 771 /* Fill FC header */ 772 fc_hdr = &(tm_req->req_fc_hdr); 773 sid = tgt->sid; 774 did = rport->port_id; 775 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 776 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 777 FC_FC_SEQ_INIT, 0); 778 /* Obtain exchange id */ 779 xid = io_req->xid; 780 781 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 782 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 783 index = xid % BNX2FC_TASKS_PER_PAGE; 784 785 /* Initialize task context for this IO request */ 786 task_page = (struct fcoe_task_ctx_entry *) 787 interface->hba->task_ctx[task_idx]; 788 task = &(task_page[index]); 789 bnx2fc_init_mp_task(io_req, task); 790 791 sc_cmd->SCp.ptr = (char *)io_req; 792 793 /* Obtain free SQ entry */ 794 spin_lock_bh(&tgt->tgt_lock); 795 bnx2fc_add_2_sq(tgt, xid); 796 797 /* Enqueue the io_req to active_tm_queue */ 798 io_req->on_tmf_queue = 1; 799 list_add_tail(&io_req->link, &tgt->active_tm_queue); 800 801 init_completion(&io_req->tm_done); 802 io_req->wait_for_comp = 1; 803 804 /* Ring doorbell */ 805 bnx2fc_ring_doorbell(tgt); 806 spin_unlock_bh(&tgt->tgt_lock); 807 808 rc = wait_for_completion_timeout(&io_req->tm_done, 809 BNX2FC_TM_TIMEOUT * HZ); 810 spin_lock_bh(&tgt->tgt_lock); 811 812 io_req->wait_for_comp = 0; 813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 815 816 spin_unlock_bh(&tgt->tgt_lock); 817 818 if (!rc) { 819 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); 820 rc = FAILED; 821 } else { 822 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); 823 rc = SUCCESS; 824 } 825 tmf_err: 826 return rc; 827 } 828 829 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 830 { 831 struct fc_lport *lport; 832 struct bnx2fc_rport *tgt = io_req->tgt; 833 struct fc_rport *rport = tgt->rport; 834 struct fc_rport_priv *rdata = tgt->rdata; 835 struct bnx2fc_interface *interface; 836 struct fcoe_port *port; 837 struct bnx2fc_cmd *abts_io_req; 838 struct fcoe_task_ctx_entry *task; 839 struct fcoe_task_ctx_entry *task_page; 840 struct fc_frame_header *fc_hdr; 841 struct bnx2fc_mp_req *abts_req; 842 int task_idx, index; 843 u32 sid, did; 844 u16 xid; 845 int rc = SUCCESS; 846 u32 r_a_tov = rdata->r_a_tov; 847 848 /* called with tgt_lock held */ 849 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 850 851 port = io_req->port; 852 interface = port->priv; 853 lport = port->lport; 854 855 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 856 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 857 rc = FAILED; 858 goto abts_err; 859 } 860 861 if (rport == NULL) { 862 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); 863 rc = FAILED; 864 goto abts_err; 865 } 866 867 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 868 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 869 rc = FAILED; 870 goto abts_err; 871 } 872 873 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 874 if (!abts_io_req) { 875 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 876 rc = FAILED; 877 goto abts_err; 878 } 879 880 /* Initialize rest of io_req fields */ 881 abts_io_req->sc_cmd = NULL; 882 abts_io_req->port = port; 883 abts_io_req->tgt = tgt; 884 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 885 886 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 887 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 888 889 /* Fill FC header */ 890 fc_hdr = &(abts_req->req_fc_hdr); 891 892 /* Obtain oxid and rxid for the original exchange to be aborted */ 893 fc_hdr->fh_ox_id = htons(io_req->xid); 894 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 895 896 sid = tgt->sid; 897 did = rport->port_id; 898 899 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 900 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 901 FC_FC_SEQ_INIT, 0); 902 903 xid = abts_io_req->xid; 904 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 905 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 906 index = xid % BNX2FC_TASKS_PER_PAGE; 907 908 /* Initialize task context for this IO request */ 909 task_page = (struct fcoe_task_ctx_entry *) 910 interface->hba->task_ctx[task_idx]; 911 task = &(task_page[index]); 912 bnx2fc_init_mp_task(abts_io_req, task); 913 914 /* 915 * ABTS task is a temporary task that will be cleaned up 916 * irrespective of ABTS response. We need to start the timer 917 * for the original exchange, as the CQE is posted for the original 918 * IO request. 919 * 920 * Timer for ABTS is started only when it is originated by a 921 * TM request. For the ABTS issued as part of ULP timeout, 922 * scsi-ml maintains the timers. 923 */ 924 925 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 926 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 927 928 /* Obtain free SQ entry */ 929 bnx2fc_add_2_sq(tgt, xid); 930 931 /* Ring doorbell */ 932 bnx2fc_ring_doorbell(tgt); 933 934 abts_err: 935 return rc; 936 } 937 938 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 939 enum fc_rctl r_ctl) 940 { 941 struct fc_lport *lport; 942 struct bnx2fc_rport *tgt = orig_io_req->tgt; 943 struct bnx2fc_interface *interface; 944 struct fcoe_port *port; 945 struct bnx2fc_cmd *seq_clnp_req; 946 struct fcoe_task_ctx_entry *task; 947 struct fcoe_task_ctx_entry *task_page; 948 struct bnx2fc_els_cb_arg *cb_arg = NULL; 949 int task_idx, index; 950 u16 xid; 951 int rc = 0; 952 953 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", 954 orig_io_req->xid); 955 kref_get(&orig_io_req->refcount); 956 957 port = orig_io_req->port; 958 interface = port->priv; 959 lport = port->lport; 960 961 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 962 if (!cb_arg) { 963 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); 964 rc = -ENOMEM; 965 goto cleanup_err; 966 } 967 968 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); 969 if (!seq_clnp_req) { 970 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 971 rc = -ENOMEM; 972 kfree(cb_arg); 973 goto cleanup_err; 974 } 975 /* Initialize rest of io_req fields */ 976 seq_clnp_req->sc_cmd = NULL; 977 seq_clnp_req->port = port; 978 seq_clnp_req->tgt = tgt; 979 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ 980 981 xid = seq_clnp_req->xid; 982 983 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 984 index = xid % BNX2FC_TASKS_PER_PAGE; 985 986 /* Initialize task context for this IO request */ 987 task_page = (struct fcoe_task_ctx_entry *) 988 interface->hba->task_ctx[task_idx]; 989 task = &(task_page[index]); 990 cb_arg->aborted_io_req = orig_io_req; 991 cb_arg->io_req = seq_clnp_req; 992 cb_arg->r_ctl = r_ctl; 993 cb_arg->offset = offset; 994 seq_clnp_req->cb_arg = cb_arg; 995 996 printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); 997 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); 998 999 /* Obtain free SQ entry */ 1000 bnx2fc_add_2_sq(tgt, xid); 1001 1002 /* Ring doorbell */ 1003 bnx2fc_ring_doorbell(tgt); 1004 cleanup_err: 1005 return rc; 1006 } 1007 1008 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1009 { 1010 struct fc_lport *lport; 1011 struct bnx2fc_rport *tgt = io_req->tgt; 1012 struct bnx2fc_interface *interface; 1013 struct fcoe_port *port; 1014 struct bnx2fc_cmd *cleanup_io_req; 1015 struct fcoe_task_ctx_entry *task; 1016 struct fcoe_task_ctx_entry *task_page; 1017 int task_idx, index; 1018 u16 xid, orig_xid; 1019 int rc = 0; 1020 1021 /* ASSUMPTION: called with tgt_lock held */ 1022 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1023 1024 port = io_req->port; 1025 interface = port->priv; 1026 lport = port->lport; 1027 1028 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1029 if (!cleanup_io_req) { 1030 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 1031 rc = -1; 1032 goto cleanup_err; 1033 } 1034 1035 /* Initialize rest of io_req fields */ 1036 cleanup_io_req->sc_cmd = NULL; 1037 cleanup_io_req->port = port; 1038 cleanup_io_req->tgt = tgt; 1039 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 1040 1041 xid = cleanup_io_req->xid; 1042 1043 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1044 index = xid % BNX2FC_TASKS_PER_PAGE; 1045 1046 /* Initialize task context for this IO request */ 1047 task_page = (struct fcoe_task_ctx_entry *) 1048 interface->hba->task_ctx[task_idx]; 1049 task = &(task_page[index]); 1050 orig_xid = io_req->xid; 1051 1052 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 1053 1054 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 1055 1056 /* Obtain free SQ entry */ 1057 bnx2fc_add_2_sq(tgt, xid); 1058 1059 /* Ring doorbell */ 1060 bnx2fc_ring_doorbell(tgt); 1061 1062 cleanup_err: 1063 return rc; 1064 } 1065 1066 /** 1067 * bnx2fc_eh_target_reset: Reset a target 1068 * 1069 * @sc_cmd: SCSI command 1070 * 1071 * Set from SCSI host template to send task mgmt command to the target 1072 * and wait for the response 1073 */ 1074 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 1075 { 1076 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 1077 } 1078 1079 /** 1080 * bnx2fc_eh_device_reset - Reset a single LUN 1081 * 1082 * @sc_cmd: SCSI command 1083 * 1084 * Set from SCSI host template to send task mgmt command to the target 1085 * and wait for the response 1086 */ 1087 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1088 { 1089 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1090 } 1091 1092 /** 1093 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1094 * SCSI command 1095 * 1096 * @sc_cmd: SCSI_ML command pointer 1097 * 1098 * SCSI abort request handler 1099 */ 1100 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1101 { 1102 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1103 struct fc_rport_libfc_priv *rp = rport->dd_data; 1104 struct bnx2fc_cmd *io_req; 1105 struct fc_lport *lport; 1106 struct fc_rport_priv *rdata; 1107 struct bnx2fc_rport *tgt; 1108 int logo_issued; 1109 int wait_cnt = 0; 1110 int rc = FAILED; 1111 1112 1113 rc = fc_block_scsi_eh(sc_cmd); 1114 if (rc) 1115 return rc; 1116 1117 lport = shost_priv(sc_cmd->device->host); 1118 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1119 printk(KERN_ERR PFX "eh_abort: link not ready\n"); 1120 return rc; 1121 } 1122 1123 tgt = (struct bnx2fc_rport *)&rp[1]; 1124 1125 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1126 1127 spin_lock_bh(&tgt->tgt_lock); 1128 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1129 if (!io_req) { 1130 /* Command might have just completed */ 1131 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1132 spin_unlock_bh(&tgt->tgt_lock); 1133 return SUCCESS; 1134 } 1135 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1136 io_req->refcount.refcount.counter); 1137 1138 /* Hold IO request across abort processing */ 1139 kref_get(&io_req->refcount); 1140 1141 BUG_ON(tgt != io_req->tgt); 1142 1143 /* Remove the io_req from the active_q. */ 1144 /* 1145 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1146 * issue an ABTS on this particular IO req, as the 1147 * io_req is no longer in the active_q. 1148 */ 1149 if (tgt->flush_in_prog) { 1150 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1151 "flush in progress\n", io_req->xid); 1152 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1153 spin_unlock_bh(&tgt->tgt_lock); 1154 return SUCCESS; 1155 } 1156 1157 if (io_req->on_active_queue == 0) { 1158 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1159 "not on active_q\n", io_req->xid); 1160 /* 1161 * This condition can happen only due to the FW bug, 1162 * where we do not receive cleanup response from 1163 * the FW. Handle this case gracefully by erroring 1164 * back the IO request to SCSI-ml 1165 */ 1166 bnx2fc_scsi_done(io_req, DID_ABORT); 1167 1168 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1169 spin_unlock_bh(&tgt->tgt_lock); 1170 return SUCCESS; 1171 } 1172 1173 /* 1174 * Only eh_abort processing will remove the IO from 1175 * active_cmd_q before processing the request. this is 1176 * done to avoid race conditions between IOs aborted 1177 * as part of task management completion and eh_abort 1178 * processing 1179 */ 1180 list_del_init(&io_req->link); 1181 io_req->on_active_queue = 0; 1182 /* Move IO req to retire queue */ 1183 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1184 1185 init_completion(&io_req->tm_done); 1186 io_req->wait_for_comp = 1; 1187 1188 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1189 /* Cancel the current timer running on this io_req */ 1190 if (cancel_delayed_work(&io_req->timeout_work)) 1191 kref_put(&io_req->refcount, 1192 bnx2fc_cmd_release); /* drop timer hold */ 1193 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1194 rc = bnx2fc_initiate_abts(io_req); 1195 } else { 1196 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1197 "already in abts processing\n", io_req->xid); 1198 if (cancel_delayed_work(&io_req->timeout_work)) 1199 kref_put(&io_req->refcount, 1200 bnx2fc_cmd_release); /* drop timer hold */ 1201 bnx2fc_initiate_cleanup(io_req); 1202 1203 spin_unlock_bh(&tgt->tgt_lock); 1204 1205 wait_for_completion(&io_req->tm_done); 1206 1207 spin_lock_bh(&tgt->tgt_lock); 1208 io_req->wait_for_comp = 0; 1209 rdata = io_req->tgt->rdata; 1210 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO, 1211 &tgt->flags); 1212 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1213 spin_unlock_bh(&tgt->tgt_lock); 1214 1215 if (!logo_issued) { 1216 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n", 1217 tgt->flags); 1218 mutex_lock(&lport->disc.disc_mutex); 1219 lport->tt.rport_logoff(rdata); 1220 mutex_unlock(&lport->disc.disc_mutex); 1221 do { 1222 msleep(BNX2FC_RELOGIN_WAIT_TIME); 1223 /* 1224 * If session not recovered, let SCSI-ml 1225 * escalate error recovery. 1226 */ 1227 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) 1228 return FAILED; 1229 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, 1230 &tgt->flags)); 1231 } 1232 return SUCCESS; 1233 } 1234 if (rc == FAILED) { 1235 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1236 spin_unlock_bh(&tgt->tgt_lock); 1237 return rc; 1238 } 1239 spin_unlock_bh(&tgt->tgt_lock); 1240 1241 wait_for_completion(&io_req->tm_done); 1242 1243 spin_lock_bh(&tgt->tgt_lock); 1244 io_req->wait_for_comp = 0; 1245 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1246 &io_req->req_flags))) { 1247 /* Let the scsi-ml try to recover this command */ 1248 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1249 io_req->xid); 1250 rc = FAILED; 1251 } else { 1252 /* 1253 * We come here even when there was a race condition 1254 * between timeout and abts completion, and abts 1255 * completion happens just in time. 1256 */ 1257 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1258 rc = SUCCESS; 1259 bnx2fc_scsi_done(io_req, DID_ABORT); 1260 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1261 } 1262 1263 /* release the reference taken in eh_abort */ 1264 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1265 spin_unlock_bh(&tgt->tgt_lock); 1266 return rc; 1267 } 1268 1269 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, 1270 struct fcoe_task_ctx_entry *task, 1271 u8 rx_state) 1272 { 1273 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; 1274 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; 1275 u32 offset = cb_arg->offset; 1276 enum fc_rctl r_ctl = cb_arg->r_ctl; 1277 int rc = 0; 1278 struct bnx2fc_rport *tgt = orig_io_req->tgt; 1279 1280 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" 1281 "cmd_type = %d\n", 1282 seq_clnp_req->xid, seq_clnp_req->cmd_type); 1283 1284 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { 1285 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", 1286 seq_clnp_req->xid); 1287 goto free_cb_arg; 1288 } 1289 1290 spin_unlock_bh(&tgt->tgt_lock); 1291 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1292 spin_lock_bh(&tgt->tgt_lock); 1293 1294 if (rc) 1295 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" 1296 " IO will abort\n"); 1297 seq_clnp_req->cb_arg = NULL; 1298 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 1299 free_cb_arg: 1300 kfree(cb_arg); 1301 return; 1302 } 1303 1304 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1305 struct fcoe_task_ctx_entry *task, 1306 u8 num_rq) 1307 { 1308 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1309 "refcnt = %d, cmd_type = %d\n", 1310 io_req->refcount.refcount.counter, io_req->cmd_type); 1311 bnx2fc_scsi_done(io_req, DID_ERROR); 1312 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1313 if (io_req->wait_for_comp) 1314 complete(&io_req->tm_done); 1315 } 1316 1317 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1318 struct fcoe_task_ctx_entry *task, 1319 u8 num_rq) 1320 { 1321 u32 r_ctl; 1322 u32 r_a_tov = FC_DEF_R_A_TOV; 1323 u8 issue_rrq = 0; 1324 struct bnx2fc_rport *tgt = io_req->tgt; 1325 1326 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1327 "refcnt = %d, cmd_type = %d\n", 1328 io_req->xid, 1329 io_req->refcount.refcount.counter, io_req->cmd_type); 1330 1331 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1332 &io_req->req_flags)) { 1333 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1334 " this io\n"); 1335 return; 1336 } 1337 1338 /* Do not issue RRQ as this IO is already cleanedup */ 1339 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1340 &io_req->req_flags)) 1341 goto io_compl; 1342 1343 /* 1344 * For ABTS issued due to SCSI eh_abort_handler, timeout 1345 * values are maintained by scsi-ml itself. Cancel timeout 1346 * in case ABTS issued as part of task management function 1347 * or due to FW error. 1348 */ 1349 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1350 if (cancel_delayed_work(&io_req->timeout_work)) 1351 kref_put(&io_req->refcount, 1352 bnx2fc_cmd_release); /* drop timer hold */ 1353 1354 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1355 1356 switch (r_ctl) { 1357 case FC_RCTL_BA_ACC: 1358 /* 1359 * Dont release this cmd yet. It will be relesed 1360 * after we get RRQ response 1361 */ 1362 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1363 issue_rrq = 1; 1364 break; 1365 1366 case FC_RCTL_BA_RJT: 1367 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1368 break; 1369 default: 1370 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1371 break; 1372 } 1373 1374 if (issue_rrq) { 1375 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1376 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1377 } 1378 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1379 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1380 1381 io_compl: 1382 if (io_req->wait_for_comp) { 1383 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1384 &io_req->req_flags)) 1385 complete(&io_req->tm_done); 1386 } else { 1387 /* 1388 * We end up here when ABTS is issued as 1389 * in asynchronous context, i.e., as part 1390 * of task management completion, or 1391 * when FW error is received or when the 1392 * ABTS is issued when the IO is timed 1393 * out. 1394 */ 1395 1396 if (io_req->on_active_queue) { 1397 list_del_init(&io_req->link); 1398 io_req->on_active_queue = 0; 1399 /* Move IO req to retire queue */ 1400 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1401 } 1402 bnx2fc_scsi_done(io_req, DID_ERROR); 1403 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1404 } 1405 } 1406 1407 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1408 { 1409 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1410 struct bnx2fc_rport *tgt = io_req->tgt; 1411 struct list_head *list; 1412 struct list_head *tmp; 1413 struct bnx2fc_cmd *cmd; 1414 int tm_lun = sc_cmd->device->lun; 1415 int rc = 0; 1416 int lun; 1417 1418 /* called with tgt_lock held */ 1419 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1420 /* 1421 * Walk thru the active_ios queue and ABORT the IO 1422 * that matches with the LUN that was reset 1423 */ 1424 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1425 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1426 cmd = (struct bnx2fc_cmd *)list; 1427 lun = cmd->sc_cmd->device->lun; 1428 if (lun == tm_lun) { 1429 /* Initiate ABTS on this cmd */ 1430 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1431 &cmd->req_flags)) { 1432 /* cancel the IO timeout */ 1433 if (cancel_delayed_work(&io_req->timeout_work)) 1434 kref_put(&io_req->refcount, 1435 bnx2fc_cmd_release); 1436 /* timer hold */ 1437 rc = bnx2fc_initiate_abts(cmd); 1438 /* abts shouldn't fail in this context */ 1439 WARN_ON(rc != SUCCESS); 1440 } else 1441 printk(KERN_ERR PFX "lun_rst: abts already in" 1442 " progress for this IO 0x%x\n", 1443 cmd->xid); 1444 } 1445 } 1446 } 1447 1448 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1449 { 1450 struct bnx2fc_rport *tgt = io_req->tgt; 1451 struct list_head *list; 1452 struct list_head *tmp; 1453 struct bnx2fc_cmd *cmd; 1454 int rc = 0; 1455 1456 /* called with tgt_lock held */ 1457 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1458 /* 1459 * Walk thru the active_ios queue and ABORT the IO 1460 * that matches with the LUN that was reset 1461 */ 1462 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1463 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1464 cmd = (struct bnx2fc_cmd *)list; 1465 /* Initiate ABTS */ 1466 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1467 &cmd->req_flags)) { 1468 /* cancel the IO timeout */ 1469 if (cancel_delayed_work(&io_req->timeout_work)) 1470 kref_put(&io_req->refcount, 1471 bnx2fc_cmd_release); /* timer hold */ 1472 rc = bnx2fc_initiate_abts(cmd); 1473 /* abts shouldn't fail in this context */ 1474 WARN_ON(rc != SUCCESS); 1475 1476 } else 1477 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1478 " for this IO 0x%x\n", cmd->xid); 1479 } 1480 } 1481 1482 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1483 struct fcoe_task_ctx_entry *task, u8 num_rq) 1484 { 1485 struct bnx2fc_mp_req *tm_req; 1486 struct fc_frame_header *fc_hdr; 1487 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1488 u64 *hdr; 1489 u64 *temp_hdr; 1490 void *rsp_buf; 1491 1492 /* Called with tgt_lock held */ 1493 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1494 1495 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1496 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1497 else { 1498 /* TM has already timed out and we got 1499 * delayed completion. Ignore completion 1500 * processing. 1501 */ 1502 return; 1503 } 1504 1505 tm_req = &(io_req->mp_req); 1506 fc_hdr = &(tm_req->resp_fc_hdr); 1507 hdr = (u64 *)fc_hdr; 1508 temp_hdr = (u64 *) 1509 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1510 hdr[0] = cpu_to_be64(temp_hdr[0]); 1511 hdr[1] = cpu_to_be64(temp_hdr[1]); 1512 hdr[2] = cpu_to_be64(temp_hdr[2]); 1513 1514 tm_req->resp_len = 1515 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1516 1517 rsp_buf = tm_req->resp_buf; 1518 1519 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1520 bnx2fc_parse_fcp_rsp(io_req, 1521 (struct fcoe_fcp_rsp_payload *) 1522 rsp_buf, num_rq); 1523 if (io_req->fcp_rsp_code == 0) { 1524 /* TM successful */ 1525 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1526 bnx2fc_lun_reset_cmpl(io_req); 1527 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1528 bnx2fc_tgt_reset_cmpl(io_req); 1529 } 1530 } else { 1531 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1532 fc_hdr->fh_r_ctl); 1533 } 1534 if (!sc_cmd->SCp.ptr) { 1535 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); 1536 return; 1537 } 1538 switch (io_req->fcp_status) { 1539 case FC_GOOD: 1540 if (io_req->cdb_status == 0) { 1541 /* Good IO completion */ 1542 sc_cmd->result = DID_OK << 16; 1543 } else { 1544 /* Transport status is good, SCSI status not good */ 1545 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1546 } 1547 if (io_req->fcp_resid) 1548 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1549 break; 1550 1551 default: 1552 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1553 io_req->fcp_status); 1554 break; 1555 } 1556 1557 sc_cmd = io_req->sc_cmd; 1558 io_req->sc_cmd = NULL; 1559 1560 /* check if the io_req exists in tgt's tmf_q */ 1561 if (io_req->on_tmf_queue) { 1562 1563 list_del_init(&io_req->link); 1564 io_req->on_tmf_queue = 0; 1565 } else { 1566 1567 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); 1568 return; 1569 } 1570 1571 sc_cmd->SCp.ptr = NULL; 1572 sc_cmd->scsi_done(sc_cmd); 1573 1574 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1575 if (io_req->wait_for_comp) { 1576 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1577 complete(&io_req->tm_done); 1578 } 1579 } 1580 1581 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1582 int bd_index) 1583 { 1584 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1585 int frag_size, sg_frags; 1586 1587 sg_frags = 0; 1588 while (sg_len) { 1589 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1590 frag_size = BNX2FC_BD_SPLIT_SZ; 1591 else 1592 frag_size = sg_len; 1593 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1594 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1595 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1596 bd[bd_index + sg_frags].flags = 0; 1597 1598 addr += (u64) frag_size; 1599 sg_frags++; 1600 sg_len -= frag_size; 1601 } 1602 return sg_frags; 1603 1604 } 1605 1606 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1607 { 1608 struct bnx2fc_interface *interface = io_req->port->priv; 1609 struct bnx2fc_hba *hba = interface->hba; 1610 struct scsi_cmnd *sc = io_req->sc_cmd; 1611 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1612 struct scatterlist *sg; 1613 int byte_count = 0; 1614 int sg_count = 0; 1615 int bd_count = 0; 1616 int sg_frags; 1617 unsigned int sg_len; 1618 u64 addr; 1619 int i; 1620 1621 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), 1622 scsi_sg_count(sc), sc->sc_data_direction); 1623 scsi_for_each_sg(sc, sg, sg_count, i) { 1624 sg_len = sg_dma_len(sg); 1625 addr = sg_dma_address(sg); 1626 if (sg_len > BNX2FC_MAX_BD_LEN) { 1627 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1628 bd_count); 1629 } else { 1630 1631 sg_frags = 1; 1632 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1633 bd[bd_count].buf_addr_hi = addr >> 32; 1634 bd[bd_count].buf_len = (u16)sg_len; 1635 bd[bd_count].flags = 0; 1636 } 1637 bd_count += sg_frags; 1638 byte_count += sg_len; 1639 } 1640 if (byte_count != scsi_bufflen(sc)) 1641 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1642 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1643 io_req->xid); 1644 return bd_count; 1645 } 1646 1647 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1648 { 1649 struct scsi_cmnd *sc = io_req->sc_cmd; 1650 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1651 int bd_count; 1652 1653 if (scsi_sg_count(sc)) { 1654 bd_count = bnx2fc_map_sg(io_req); 1655 if (bd_count == 0) 1656 return -ENOMEM; 1657 } else { 1658 bd_count = 0; 1659 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1660 bd[0].buf_len = bd[0].flags = 0; 1661 } 1662 io_req->bd_tbl->bd_valid = bd_count; 1663 1664 return 0; 1665 } 1666 1667 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1668 { 1669 struct scsi_cmnd *sc = io_req->sc_cmd; 1670 1671 if (io_req->bd_tbl->bd_valid && sc) { 1672 scsi_dma_unmap(sc); 1673 io_req->bd_tbl->bd_valid = 0; 1674 } 1675 } 1676 1677 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1678 struct fcp_cmnd *fcp_cmnd) 1679 { 1680 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1681 char tag[2]; 1682 1683 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1684 1685 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); 1686 1687 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1688 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1689 1690 fcp_cmnd->fc_cmdref = 0; 1691 fcp_cmnd->fc_pri_ta = 0; 1692 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1693 fcp_cmnd->fc_flags = io_req->io_req_flags; 1694 1695 if (scsi_populate_tag_msg(sc_cmd, tag)) { 1696 switch (tag[0]) { 1697 case HEAD_OF_QUEUE_TAG: 1698 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; 1699 break; 1700 case ORDERED_QUEUE_TAG: 1701 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; 1702 break; 1703 default: 1704 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1705 break; 1706 } 1707 } else { 1708 fcp_cmnd->fc_pri_ta = 0; 1709 } 1710 } 1711 1712 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1713 struct fcoe_fcp_rsp_payload *fcp_rsp, 1714 u8 num_rq) 1715 { 1716 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1717 struct bnx2fc_rport *tgt = io_req->tgt; 1718 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1719 u32 rq_buff_len = 0; 1720 int i; 1721 unsigned char *rq_data; 1722 unsigned char *dummy; 1723 int fcp_sns_len = 0; 1724 int fcp_rsp_len = 0; 1725 1726 io_req->fcp_status = FC_GOOD; 1727 io_req->fcp_resid = fcp_rsp->fcp_resid; 1728 1729 io_req->scsi_comp_flags = rsp_flags; 1730 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1731 fcp_rsp->scsi_status_code; 1732 1733 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1734 if (num_rq) { 1735 1736 /* 1737 * We do not anticipate num_rq >1, as the linux defined 1738 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1739 * 256 bytes of single rq buffer is good enough to hold this. 1740 */ 1741 1742 if (rsp_flags & 1743 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1744 fcp_rsp_len = rq_buff_len 1745 = fcp_rsp->fcp_rsp_len; 1746 } 1747 1748 if (rsp_flags & 1749 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1750 fcp_sns_len = fcp_rsp->fcp_sns_len; 1751 rq_buff_len += fcp_rsp->fcp_sns_len; 1752 } 1753 1754 io_req->fcp_rsp_len = fcp_rsp_len; 1755 io_req->fcp_sns_len = fcp_sns_len; 1756 1757 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1758 /* Invalid sense sense length. */ 1759 printk(KERN_ERR PFX "invalid sns length %d\n", 1760 rq_buff_len); 1761 /* reset rq_buff_len */ 1762 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1763 } 1764 1765 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1766 1767 if (num_rq > 1) { 1768 /* We do not need extra sense data */ 1769 for (i = 1; i < num_rq; i++) 1770 dummy = bnx2fc_get_next_rqe(tgt, 1); 1771 } 1772 1773 /* fetch fcp_rsp_code */ 1774 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1775 /* Only for task management function */ 1776 io_req->fcp_rsp_code = rq_data[3]; 1777 printk(KERN_ERR PFX "fcp_rsp_code = %d\n", 1778 io_req->fcp_rsp_code); 1779 } 1780 1781 /* fetch sense data */ 1782 rq_data += fcp_rsp_len; 1783 1784 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1785 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1786 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1787 } 1788 1789 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); 1790 if (fcp_sns_len) 1791 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1792 1793 /* return RQ entries */ 1794 for (i = 0; i < num_rq; i++) 1795 bnx2fc_return_rqe(tgt, 1); 1796 } 1797 } 1798 1799 /** 1800 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1801 * 1802 * @host: The Scsi_Host the command was issued to 1803 * @sc_cmd: struct scsi_cmnd to be executed 1804 * 1805 * This is the IO strategy routine, called by SCSI-ML 1806 **/ 1807 int bnx2fc_queuecommand(struct Scsi_Host *host, 1808 struct scsi_cmnd *sc_cmd) 1809 { 1810 struct fc_lport *lport = shost_priv(host); 1811 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1812 struct fc_rport_libfc_priv *rp = rport->dd_data; 1813 struct bnx2fc_rport *tgt; 1814 struct bnx2fc_cmd *io_req; 1815 int rc = 0; 1816 int rval; 1817 1818 rval = fc_remote_port_chkready(rport); 1819 if (rval) { 1820 sc_cmd->result = rval; 1821 sc_cmd->scsi_done(sc_cmd); 1822 return 0; 1823 } 1824 1825 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1826 rc = SCSI_MLQUEUE_HOST_BUSY; 1827 goto exit_qcmd; 1828 } 1829 1830 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1831 tgt = (struct bnx2fc_rport *)&rp[1]; 1832 1833 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1834 /* 1835 * Session is not offloaded yet. Let SCSI-ml retry 1836 * the command. 1837 */ 1838 rc = SCSI_MLQUEUE_TARGET_BUSY; 1839 goto exit_qcmd; 1840 } 1841 1842 io_req = bnx2fc_cmd_alloc(tgt); 1843 if (!io_req) { 1844 rc = SCSI_MLQUEUE_HOST_BUSY; 1845 goto exit_qcmd; 1846 } 1847 io_req->sc_cmd = sc_cmd; 1848 1849 if (bnx2fc_post_io_req(tgt, io_req)) { 1850 printk(KERN_ERR PFX "Unable to post io_req\n"); 1851 rc = SCSI_MLQUEUE_HOST_BUSY; 1852 goto exit_qcmd; 1853 } 1854 exit_qcmd: 1855 return rc; 1856 } 1857 1858 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1859 struct fcoe_task_ctx_entry *task, 1860 u8 num_rq) 1861 { 1862 struct fcoe_fcp_rsp_payload *fcp_rsp; 1863 struct bnx2fc_rport *tgt = io_req->tgt; 1864 struct scsi_cmnd *sc_cmd; 1865 struct Scsi_Host *host; 1866 1867 1868 /* scsi_cmd_cmpl is called with tgt lock held */ 1869 1870 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1871 /* we will not receive ABTS response for this IO */ 1872 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1873 "this scsi cmd\n"); 1874 } 1875 1876 /* Cancel the timeout_work, as we received IO completion */ 1877 if (cancel_delayed_work(&io_req->timeout_work)) 1878 kref_put(&io_req->refcount, 1879 bnx2fc_cmd_release); /* drop timer hold */ 1880 1881 sc_cmd = io_req->sc_cmd; 1882 if (sc_cmd == NULL) { 1883 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1884 return; 1885 } 1886 1887 /* Fetch fcp_rsp from task context and perform cmd completion */ 1888 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1889 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1890 1891 /* parse fcp_rsp and obtain sense data from RQ if available */ 1892 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1893 1894 host = sc_cmd->device->host; 1895 if (!sc_cmd->SCp.ptr) { 1896 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1897 return; 1898 } 1899 1900 if (io_req->on_active_queue) { 1901 list_del_init(&io_req->link); 1902 io_req->on_active_queue = 0; 1903 /* Move IO req to retire queue */ 1904 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1905 } else { 1906 /* This should not happen, but could have been pulled 1907 * by bnx2fc_flush_active_ios(), or during a race 1908 * between command abort and (late) completion. 1909 */ 1910 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1911 if (io_req->wait_for_comp) 1912 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1913 &io_req->req_flags)) 1914 complete(&io_req->tm_done); 1915 } 1916 1917 bnx2fc_unmap_sg_list(io_req); 1918 io_req->sc_cmd = NULL; 1919 1920 switch (io_req->fcp_status) { 1921 case FC_GOOD: 1922 if (io_req->cdb_status == 0) { 1923 /* Good IO completion */ 1924 sc_cmd->result = DID_OK << 16; 1925 } else { 1926 /* Transport status is good, SCSI status not good */ 1927 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1928 " fcp_resid = 0x%x\n", 1929 io_req->cdb_status, io_req->fcp_resid); 1930 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1931 } 1932 if (io_req->fcp_resid) 1933 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1934 break; 1935 default: 1936 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", 1937 io_req->fcp_status); 1938 break; 1939 } 1940 sc_cmd->SCp.ptr = NULL; 1941 sc_cmd->scsi_done(sc_cmd); 1942 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1943 } 1944 1945 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1946 struct bnx2fc_cmd *io_req) 1947 { 1948 struct fcoe_task_ctx_entry *task; 1949 struct fcoe_task_ctx_entry *task_page; 1950 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1951 struct fcoe_port *port = tgt->port; 1952 struct bnx2fc_interface *interface = port->priv; 1953 struct bnx2fc_hba *hba = interface->hba; 1954 struct fc_lport *lport = port->lport; 1955 struct fcoe_dev_stats *stats; 1956 int task_idx, index; 1957 u16 xid; 1958 1959 /* Initialize rest of io_req fields */ 1960 io_req->cmd_type = BNX2FC_SCSI_CMD; 1961 io_req->port = port; 1962 io_req->tgt = tgt; 1963 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1964 sc_cmd->SCp.ptr = (char *)io_req; 1965 1966 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1967 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1968 io_req->io_req_flags = BNX2FC_READ; 1969 stats->InputRequests++; 1970 stats->InputBytes += io_req->data_xfer_len; 1971 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1972 io_req->io_req_flags = BNX2FC_WRITE; 1973 stats->OutputRequests++; 1974 stats->OutputBytes += io_req->data_xfer_len; 1975 } else { 1976 io_req->io_req_flags = 0; 1977 stats->ControlRequests++; 1978 } 1979 put_cpu(); 1980 1981 xid = io_req->xid; 1982 1983 /* Build buffer descriptor list for firmware from sg list */ 1984 if (bnx2fc_build_bd_list_from_sg(io_req)) { 1985 printk(KERN_ERR PFX "BD list creation failed\n"); 1986 spin_lock_bh(&tgt->tgt_lock); 1987 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1988 spin_unlock_bh(&tgt->tgt_lock); 1989 return -EAGAIN; 1990 } 1991 1992 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1993 index = xid % BNX2FC_TASKS_PER_PAGE; 1994 1995 /* Initialize task context for this IO request */ 1996 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1997 task = &(task_page[index]); 1998 bnx2fc_init_task(io_req, task); 1999 2000 spin_lock_bh(&tgt->tgt_lock); 2001 2002 if (tgt->flush_in_prog) { 2003 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 2004 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2005 spin_unlock_bh(&tgt->tgt_lock); 2006 return -EAGAIN; 2007 } 2008 2009 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 2010 printk(KERN_ERR PFX "Session not ready...post_io\n"); 2011 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2012 spin_unlock_bh(&tgt->tgt_lock); 2013 return -EAGAIN; 2014 } 2015 2016 /* Time IO req */ 2017 if (tgt->io_timeout) 2018 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 2019 /* Obtain free SQ entry */ 2020 bnx2fc_add_2_sq(tgt, xid); 2021 2022 /* Enqueue the io_req to active_cmd_queue */ 2023 2024 io_req->on_active_queue = 1; 2025 /* move io_req from pending_queue to active_queue */ 2026 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 2027 2028 /* Ring doorbell */ 2029 bnx2fc_ring_doorbell(tgt); 2030 spin_unlock_bh(&tgt->tgt_lock); 2031 return 0; 2032 } 2033