1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 11 */ 12 13 #include "bnx2fc.h" 14 15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 16 17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 18 int bd_index); 19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 20 static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 21 static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 22 struct bnx2fc_cmd *io_req); 23 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 26 struct fcoe_fcp_rsp_payload *fcp_rsp, 27 u8 num_rq); 28 29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 30 unsigned int timer_msec) 31 { 32 struct bnx2fc_interface *interface = io_req->port->priv; 33 34 if (queue_delayed_work(interface->timer_work_queue, 35 &io_req->timeout_work, 36 msecs_to_jiffies(timer_msec))) 37 kref_get(&io_req->refcount); 38 } 39 40 static void bnx2fc_cmd_timeout(struct work_struct *work) 41 { 42 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 43 timeout_work.work); 44 struct fc_lport *lport; 45 struct fc_rport_priv *rdata; 46 u8 cmd_type = io_req->cmd_type; 47 struct bnx2fc_rport *tgt = io_req->tgt; 48 int logo_issued; 49 int rc; 50 51 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 52 "req_flags = %lx\n", cmd_type, io_req->req_flags); 53 54 spin_lock_bh(&tgt->tgt_lock); 55 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 56 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 57 /* 58 * ideally we should hold the io_req until RRQ complets, 59 * and release io_req from timeout hold. 60 */ 61 spin_unlock_bh(&tgt->tgt_lock); 62 bnx2fc_send_rrq(io_req); 63 return; 64 } 65 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 66 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 67 goto done; 68 } 69 70 switch (cmd_type) { 71 case BNX2FC_SCSI_CMD: 72 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 73 &io_req->req_flags)) { 74 /* Handle eh_abort timeout */ 75 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 76 complete(&io_req->tm_done); 77 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 78 &io_req->req_flags)) { 79 /* Handle internally generated ABTS timeout */ 80 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 81 io_req->refcount.refcount.counter); 82 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 83 &io_req->req_flags))) { 84 85 lport = io_req->port->lport; 86 rdata = io_req->tgt->rdata; 87 logo_issued = test_and_set_bit( 88 BNX2FC_FLAG_EXPL_LOGO, 89 &tgt->flags); 90 kref_put(&io_req->refcount, bnx2fc_cmd_release); 91 spin_unlock_bh(&tgt->tgt_lock); 92 93 /* Explicitly logo the target */ 94 if (!logo_issued) { 95 BNX2FC_IO_DBG(io_req, "Explicit " 96 "logo - tgt flags = 0x%lx\n", 97 tgt->flags); 98 99 mutex_lock(&lport->disc.disc_mutex); 100 lport->tt.rport_logoff(rdata); 101 mutex_unlock(&lport->disc.disc_mutex); 102 } 103 return; 104 } 105 } else { 106 /* Hanlde IO timeout */ 107 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 108 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 109 &io_req->req_flags)) { 110 BNX2FC_IO_DBG(io_req, "IO completed before " 111 " timer expiry\n"); 112 goto done; 113 } 114 115 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 116 &io_req->req_flags)) { 117 rc = bnx2fc_initiate_abts(io_req); 118 if (rc == SUCCESS) 119 goto done; 120 /* 121 * Explicitly logo the target if 122 * abts initiation fails 123 */ 124 lport = io_req->port->lport; 125 rdata = io_req->tgt->rdata; 126 logo_issued = test_and_set_bit( 127 BNX2FC_FLAG_EXPL_LOGO, 128 &tgt->flags); 129 kref_put(&io_req->refcount, bnx2fc_cmd_release); 130 spin_unlock_bh(&tgt->tgt_lock); 131 132 if (!logo_issued) { 133 BNX2FC_IO_DBG(io_req, "Explicit " 134 "logo - tgt flags = 0x%lx\n", 135 tgt->flags); 136 137 138 mutex_lock(&lport->disc.disc_mutex); 139 lport->tt.rport_logoff(rdata); 140 mutex_unlock(&lport->disc.disc_mutex); 141 } 142 return; 143 } else { 144 BNX2FC_IO_DBG(io_req, "IO already in " 145 "ABTS processing\n"); 146 } 147 } 148 break; 149 case BNX2FC_ELS: 150 151 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 152 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 153 154 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 155 &io_req->req_flags)) { 156 lport = io_req->port->lport; 157 rdata = io_req->tgt->rdata; 158 logo_issued = test_and_set_bit( 159 BNX2FC_FLAG_EXPL_LOGO, 160 &tgt->flags); 161 kref_put(&io_req->refcount, bnx2fc_cmd_release); 162 spin_unlock_bh(&tgt->tgt_lock); 163 164 /* Explicitly logo the target */ 165 if (!logo_issued) { 166 BNX2FC_IO_DBG(io_req, "Explicitly logo" 167 "(els)\n"); 168 mutex_lock(&lport->disc.disc_mutex); 169 lport->tt.rport_logoff(rdata); 170 mutex_unlock(&lport->disc.disc_mutex); 171 } 172 return; 173 } 174 } else { 175 /* 176 * Handle ELS timeout. 177 * tgt_lock is used to sync compl path and timeout 178 * path. If els compl path is processing this IO, we 179 * have nothing to do here, just release the timer hold 180 */ 181 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 182 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 183 &io_req->req_flags)) 184 goto done; 185 186 /* Indicate the cb_func that this ELS is timed out */ 187 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 188 189 if ((io_req->cb_func) && (io_req->cb_arg)) { 190 io_req->cb_func(io_req->cb_arg); 191 io_req->cb_arg = NULL; 192 } 193 } 194 break; 195 default: 196 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 197 cmd_type); 198 break; 199 } 200 201 done: 202 /* release the cmd that was held when timer was set */ 203 kref_put(&io_req->refcount, bnx2fc_cmd_release); 204 spin_unlock_bh(&tgt->tgt_lock); 205 } 206 207 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 208 { 209 /* Called with host lock held */ 210 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 211 212 /* 213 * active_cmd_queue may have other command types as well, 214 * and during flush operation, we want to error back only 215 * scsi commands. 216 */ 217 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 218 return; 219 220 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 221 bnx2fc_unmap_sg_list(io_req); 222 io_req->sc_cmd = NULL; 223 if (!sc_cmd) { 224 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 225 "IO(0x%x) already cleaned up\n", 226 io_req->xid); 227 return; 228 } 229 sc_cmd->result = err_code << 16; 230 231 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 232 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 233 sc_cmd->allowed); 234 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 235 sc_cmd->SCp.ptr = NULL; 236 sc_cmd->scsi_done(sc_cmd); 237 } 238 239 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 240 u16 min_xid, u16 max_xid) 241 { 242 struct bnx2fc_cmd_mgr *cmgr; 243 struct io_bdt *bdt_info; 244 struct bnx2fc_cmd *io_req; 245 size_t len; 246 u32 mem_size; 247 u16 xid; 248 int i; 249 int num_ios, num_pri_ios; 250 size_t bd_tbl_sz; 251 int arr_sz = num_possible_cpus() + 1; 252 253 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 254 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 255 and max_xid 0x%x\n", min_xid, max_xid); 256 return NULL; 257 } 258 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 259 260 num_ios = max_xid - min_xid + 1; 261 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 262 len += sizeof(struct bnx2fc_cmd_mgr); 263 264 cmgr = kzalloc(len, GFP_KERNEL); 265 if (!cmgr) { 266 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 267 return NULL; 268 } 269 270 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 271 arr_sz, GFP_KERNEL); 272 if (!cmgr->free_list) { 273 printk(KERN_ERR PFX "failed to alloc free_list\n"); 274 goto mem_err; 275 } 276 277 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 278 arr_sz, GFP_KERNEL); 279 if (!cmgr->free_list_lock) { 280 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 281 goto mem_err; 282 } 283 284 cmgr->hba = hba; 285 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 286 287 for (i = 0; i < arr_sz; i++) { 288 INIT_LIST_HEAD(&cmgr->free_list[i]); 289 spin_lock_init(&cmgr->free_list_lock[i]); 290 } 291 292 /* 293 * Pre-allocated pool of bnx2fc_cmds. 294 * Last entry in the free list array is the free list 295 * of slow path requests. 296 */ 297 xid = BNX2FC_MIN_XID; 298 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 299 for (i = 0; i < num_ios; i++) { 300 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 301 302 if (!io_req) { 303 printk(KERN_ERR PFX "failed to alloc io_req\n"); 304 goto mem_err; 305 } 306 307 INIT_LIST_HEAD(&io_req->link); 308 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 309 310 io_req->xid = xid++; 311 if (i < num_pri_ios) 312 list_add_tail(&io_req->link, 313 &cmgr->free_list[io_req->xid % 314 num_possible_cpus()]); 315 else 316 list_add_tail(&io_req->link, 317 &cmgr->free_list[num_possible_cpus()]); 318 io_req++; 319 } 320 321 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 322 mem_size = num_ios * sizeof(struct io_bdt *); 323 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 324 if (!cmgr->io_bdt_pool) { 325 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 326 goto mem_err; 327 } 328 329 mem_size = sizeof(struct io_bdt); 330 for (i = 0; i < num_ios; i++) { 331 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 332 if (!cmgr->io_bdt_pool[i]) { 333 printk(KERN_ERR PFX "failed to alloc " 334 "io_bdt_pool[%d]\n", i); 335 goto mem_err; 336 } 337 } 338 339 /* Allocate an map fcoe_bdt_ctx structures */ 340 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 341 for (i = 0; i < num_ios; i++) { 342 bdt_info = cmgr->io_bdt_pool[i]; 343 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 344 bd_tbl_sz, 345 &bdt_info->bd_tbl_dma, 346 GFP_KERNEL); 347 if (!bdt_info->bd_tbl) { 348 printk(KERN_ERR PFX "failed to alloc " 349 "bdt_tbl[%d]\n", i); 350 goto mem_err; 351 } 352 } 353 354 return cmgr; 355 356 mem_err: 357 bnx2fc_cmd_mgr_free(cmgr); 358 return NULL; 359 } 360 361 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 362 { 363 struct io_bdt *bdt_info; 364 struct bnx2fc_hba *hba = cmgr->hba; 365 size_t bd_tbl_sz; 366 u16 min_xid = BNX2FC_MIN_XID; 367 u16 max_xid = BNX2FC_MAX_XID; 368 int num_ios; 369 int i; 370 371 num_ios = max_xid - min_xid + 1; 372 373 /* Free fcoe_bdt_ctx structures */ 374 if (!cmgr->io_bdt_pool) 375 goto free_cmd_pool; 376 377 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 378 for (i = 0; i < num_ios; i++) { 379 bdt_info = cmgr->io_bdt_pool[i]; 380 if (bdt_info->bd_tbl) { 381 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 382 bdt_info->bd_tbl, 383 bdt_info->bd_tbl_dma); 384 bdt_info->bd_tbl = NULL; 385 } 386 } 387 388 /* Destroy io_bdt pool */ 389 for (i = 0; i < num_ios; i++) { 390 kfree(cmgr->io_bdt_pool[i]); 391 cmgr->io_bdt_pool[i] = NULL; 392 } 393 394 kfree(cmgr->io_bdt_pool); 395 cmgr->io_bdt_pool = NULL; 396 397 free_cmd_pool: 398 kfree(cmgr->free_list_lock); 399 400 /* Destroy cmd pool */ 401 if (!cmgr->free_list) 402 goto free_cmgr; 403 404 for (i = 0; i < num_possible_cpus() + 1; i++) { 405 struct list_head *list; 406 struct list_head *tmp; 407 408 list_for_each_safe(list, tmp, &cmgr->free_list[i]) { 409 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; 410 list_del(&io_req->link); 411 kfree(io_req); 412 } 413 } 414 kfree(cmgr->free_list); 415 free_cmgr: 416 /* Free command manager itself */ 417 kfree(cmgr); 418 } 419 420 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 421 { 422 struct fcoe_port *port = tgt->port; 423 struct bnx2fc_interface *interface = port->priv; 424 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 425 struct bnx2fc_cmd *io_req; 426 struct list_head *listp; 427 struct io_bdt *bd_tbl; 428 int index = RESERVE_FREE_LIST_INDEX; 429 u32 free_sqes; 430 u32 max_sqes; 431 u16 xid; 432 433 max_sqes = tgt->max_sqes; 434 switch (type) { 435 case BNX2FC_TASK_MGMT_CMD: 436 max_sqes = BNX2FC_TM_MAX_SQES; 437 break; 438 case BNX2FC_ELS: 439 max_sqes = BNX2FC_ELS_MAX_SQES; 440 break; 441 default: 442 break; 443 } 444 445 /* 446 * NOTE: Free list insertions and deletions are protected with 447 * cmgr lock 448 */ 449 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 450 free_sqes = atomic_read(&tgt->free_sqes); 451 if ((list_empty(&(cmd_mgr->free_list[index]))) || 452 (tgt->num_active_ios.counter >= max_sqes) || 453 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 454 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 455 "ios(%d):sqes(%d)\n", 456 tgt->num_active_ios.counter, tgt->max_sqes); 457 if (list_empty(&(cmd_mgr->free_list[index]))) 458 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 459 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 460 return NULL; 461 } 462 463 listp = (struct list_head *) 464 cmd_mgr->free_list[index].next; 465 list_del_init(listp); 466 io_req = (struct bnx2fc_cmd *) listp; 467 xid = io_req->xid; 468 cmd_mgr->cmds[xid] = io_req; 469 atomic_inc(&tgt->num_active_ios); 470 atomic_dec(&tgt->free_sqes); 471 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 472 473 INIT_LIST_HEAD(&io_req->link); 474 475 io_req->port = port; 476 io_req->cmd_mgr = cmd_mgr; 477 io_req->req_flags = 0; 478 io_req->cmd_type = type; 479 480 /* Bind io_bdt for this io_req */ 481 /* Have a static link between io_req and io_bdt_pool */ 482 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 483 bd_tbl->io_req = io_req; 484 485 /* Hold the io_req against deletion */ 486 kref_init(&io_req->refcount); 487 return io_req; 488 } 489 490 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 491 { 492 struct fcoe_port *port = tgt->port; 493 struct bnx2fc_interface *interface = port->priv; 494 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 495 struct bnx2fc_cmd *io_req; 496 struct list_head *listp; 497 struct io_bdt *bd_tbl; 498 u32 free_sqes; 499 u32 max_sqes; 500 u16 xid; 501 int index = get_cpu(); 502 503 max_sqes = BNX2FC_SCSI_MAX_SQES; 504 /* 505 * NOTE: Free list insertions and deletions are protected with 506 * cmgr lock 507 */ 508 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 509 free_sqes = atomic_read(&tgt->free_sqes); 510 if ((list_empty(&cmd_mgr->free_list[index])) || 511 (tgt->num_active_ios.counter >= max_sqes) || 512 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 513 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 514 put_cpu(); 515 return NULL; 516 } 517 518 listp = (struct list_head *) 519 cmd_mgr->free_list[index].next; 520 list_del_init(listp); 521 io_req = (struct bnx2fc_cmd *) listp; 522 xid = io_req->xid; 523 cmd_mgr->cmds[xid] = io_req; 524 atomic_inc(&tgt->num_active_ios); 525 atomic_dec(&tgt->free_sqes); 526 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 527 put_cpu(); 528 529 INIT_LIST_HEAD(&io_req->link); 530 531 io_req->port = port; 532 io_req->cmd_mgr = cmd_mgr; 533 io_req->req_flags = 0; 534 535 /* Bind io_bdt for this io_req */ 536 /* Have a static link between io_req and io_bdt_pool */ 537 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 538 bd_tbl->io_req = io_req; 539 540 /* Hold the io_req against deletion */ 541 kref_init(&io_req->refcount); 542 return io_req; 543 } 544 545 void bnx2fc_cmd_release(struct kref *ref) 546 { 547 struct bnx2fc_cmd *io_req = container_of(ref, 548 struct bnx2fc_cmd, refcount); 549 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 550 int index; 551 552 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 553 index = io_req->xid % num_possible_cpus(); 554 else 555 index = RESERVE_FREE_LIST_INDEX; 556 557 558 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 559 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 560 bnx2fc_free_mp_resc(io_req); 561 cmd_mgr->cmds[io_req->xid] = NULL; 562 /* Delete IO from retire queue */ 563 list_del_init(&io_req->link); 564 /* Add it to the free list */ 565 list_add(&io_req->link, 566 &cmd_mgr->free_list[index]); 567 atomic_dec(&io_req->tgt->num_active_ios); 568 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 569 570 } 571 572 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 573 { 574 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 575 struct bnx2fc_interface *interface = io_req->port->priv; 576 struct bnx2fc_hba *hba = interface->hba; 577 size_t sz = sizeof(struct fcoe_bd_ctx); 578 579 /* clear tm flags */ 580 mp_req->tm_flags = 0; 581 if (mp_req->mp_req_bd) { 582 dma_free_coherent(&hba->pcidev->dev, sz, 583 mp_req->mp_req_bd, 584 mp_req->mp_req_bd_dma); 585 mp_req->mp_req_bd = NULL; 586 } 587 if (mp_req->mp_resp_bd) { 588 dma_free_coherent(&hba->pcidev->dev, sz, 589 mp_req->mp_resp_bd, 590 mp_req->mp_resp_bd_dma); 591 mp_req->mp_resp_bd = NULL; 592 } 593 if (mp_req->req_buf) { 594 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 595 mp_req->req_buf, 596 mp_req->req_buf_dma); 597 mp_req->req_buf = NULL; 598 } 599 if (mp_req->resp_buf) { 600 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 601 mp_req->resp_buf, 602 mp_req->resp_buf_dma); 603 mp_req->resp_buf = NULL; 604 } 605 } 606 607 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 608 { 609 struct bnx2fc_mp_req *mp_req; 610 struct fcoe_bd_ctx *mp_req_bd; 611 struct fcoe_bd_ctx *mp_resp_bd; 612 struct bnx2fc_interface *interface = io_req->port->priv; 613 struct bnx2fc_hba *hba = interface->hba; 614 dma_addr_t addr; 615 size_t sz; 616 617 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 618 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 619 620 mp_req->req_len = sizeof(struct fcp_cmnd); 621 io_req->data_xfer_len = mp_req->req_len; 622 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 623 &mp_req->req_buf_dma, 624 GFP_ATOMIC); 625 if (!mp_req->req_buf) { 626 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 627 bnx2fc_free_mp_resc(io_req); 628 return FAILED; 629 } 630 631 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 632 &mp_req->resp_buf_dma, 633 GFP_ATOMIC); 634 if (!mp_req->resp_buf) { 635 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 636 bnx2fc_free_mp_resc(io_req); 637 return FAILED; 638 } 639 memset(mp_req->req_buf, 0, PAGE_SIZE); 640 memset(mp_req->resp_buf, 0, PAGE_SIZE); 641 642 /* Allocate and map mp_req_bd and mp_resp_bd */ 643 sz = sizeof(struct fcoe_bd_ctx); 644 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 645 &mp_req->mp_req_bd_dma, 646 GFP_ATOMIC); 647 if (!mp_req->mp_req_bd) { 648 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 649 bnx2fc_free_mp_resc(io_req); 650 return FAILED; 651 } 652 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 653 &mp_req->mp_resp_bd_dma, 654 GFP_ATOMIC); 655 if (!mp_req->mp_req_bd) { 656 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 657 bnx2fc_free_mp_resc(io_req); 658 return FAILED; 659 } 660 /* Fill bd table */ 661 addr = mp_req->req_buf_dma; 662 mp_req_bd = mp_req->mp_req_bd; 663 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 664 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 665 mp_req_bd->buf_len = PAGE_SIZE; 666 mp_req_bd->flags = 0; 667 668 /* 669 * MP buffer is either a task mgmt command or an ELS. 670 * So the assumption is that it consumes a single bd 671 * entry in the bd table 672 */ 673 mp_resp_bd = mp_req->mp_resp_bd; 674 addr = mp_req->resp_buf_dma; 675 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 676 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 677 mp_resp_bd->buf_len = PAGE_SIZE; 678 mp_resp_bd->flags = 0; 679 680 return SUCCESS; 681 } 682 683 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 684 { 685 struct fc_lport *lport; 686 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 687 struct fc_rport_libfc_priv *rp = rport->dd_data; 688 struct fcoe_port *port; 689 struct bnx2fc_interface *interface; 690 struct bnx2fc_rport *tgt; 691 struct bnx2fc_cmd *io_req; 692 struct bnx2fc_mp_req *tm_req; 693 struct fcoe_task_ctx_entry *task; 694 struct fcoe_task_ctx_entry *task_page; 695 struct Scsi_Host *host = sc_cmd->device->host; 696 struct fc_frame_header *fc_hdr; 697 struct fcp_cmnd *fcp_cmnd; 698 int task_idx, index; 699 int rc = SUCCESS; 700 u16 xid; 701 u32 sid, did; 702 unsigned long start = jiffies; 703 704 lport = shost_priv(host); 705 port = lport_priv(lport); 706 interface = port->priv; 707 708 if (rport == NULL) { 709 printk(KERN_ERR PFX "device_reset: rport is NULL\n"); 710 rc = FAILED; 711 goto tmf_err; 712 } 713 714 rc = fc_block_scsi_eh(sc_cmd); 715 if (rc) 716 return rc; 717 718 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 719 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 720 rc = FAILED; 721 goto tmf_err; 722 } 723 /* rport and tgt are allocated together, so tgt should be non-NULL */ 724 tgt = (struct bnx2fc_rport *)&rp[1]; 725 726 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 727 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 728 rc = FAILED; 729 goto tmf_err; 730 } 731 retry_tmf: 732 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 733 if (!io_req) { 734 if (time_after(jiffies, start + HZ)) { 735 printk(KERN_ERR PFX "tmf: Failed TMF"); 736 rc = FAILED; 737 goto tmf_err; 738 } 739 msleep(20); 740 goto retry_tmf; 741 } 742 /* Initialize rest of io_req fields */ 743 io_req->sc_cmd = sc_cmd; 744 io_req->port = port; 745 io_req->tgt = tgt; 746 747 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 748 749 rc = bnx2fc_init_mp_req(io_req); 750 if (rc == FAILED) { 751 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 752 kref_put(&io_req->refcount, bnx2fc_cmd_release); 753 goto tmf_err; 754 } 755 756 /* Set TM flags */ 757 io_req->io_req_flags = 0; 758 tm_req->tm_flags = tm_flags; 759 760 /* Fill FCP_CMND */ 761 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 762 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 763 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 764 fcp_cmnd->fc_dl = 0; 765 766 /* Fill FC header */ 767 fc_hdr = &(tm_req->req_fc_hdr); 768 sid = tgt->sid; 769 did = rport->port_id; 770 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 771 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 772 FC_FC_SEQ_INIT, 0); 773 /* Obtain exchange id */ 774 xid = io_req->xid; 775 776 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 777 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 778 index = xid % BNX2FC_TASKS_PER_PAGE; 779 780 /* Initialize task context for this IO request */ 781 task_page = (struct fcoe_task_ctx_entry *) 782 interface->hba->task_ctx[task_idx]; 783 task = &(task_page[index]); 784 bnx2fc_init_mp_task(io_req, task); 785 786 sc_cmd->SCp.ptr = (char *)io_req; 787 788 /* Obtain free SQ entry */ 789 spin_lock_bh(&tgt->tgt_lock); 790 bnx2fc_add_2_sq(tgt, xid); 791 792 /* Enqueue the io_req to active_tm_queue */ 793 io_req->on_tmf_queue = 1; 794 list_add_tail(&io_req->link, &tgt->active_tm_queue); 795 796 init_completion(&io_req->tm_done); 797 io_req->wait_for_comp = 1; 798 799 /* Ring doorbell */ 800 bnx2fc_ring_doorbell(tgt); 801 spin_unlock_bh(&tgt->tgt_lock); 802 803 rc = wait_for_completion_timeout(&io_req->tm_done, 804 BNX2FC_TM_TIMEOUT * HZ); 805 spin_lock_bh(&tgt->tgt_lock); 806 807 io_req->wait_for_comp = 0; 808 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 809 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 810 811 spin_unlock_bh(&tgt->tgt_lock); 812 813 if (!rc) { 814 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); 815 rc = FAILED; 816 } else { 817 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); 818 rc = SUCCESS; 819 } 820 tmf_err: 821 return rc; 822 } 823 824 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 825 { 826 struct fc_lport *lport; 827 struct bnx2fc_rport *tgt = io_req->tgt; 828 struct fc_rport *rport = tgt->rport; 829 struct fc_rport_priv *rdata = tgt->rdata; 830 struct bnx2fc_interface *interface; 831 struct fcoe_port *port; 832 struct bnx2fc_cmd *abts_io_req; 833 struct fcoe_task_ctx_entry *task; 834 struct fcoe_task_ctx_entry *task_page; 835 struct fc_frame_header *fc_hdr; 836 struct bnx2fc_mp_req *abts_req; 837 int task_idx, index; 838 u32 sid, did; 839 u16 xid; 840 int rc = SUCCESS; 841 u32 r_a_tov = rdata->r_a_tov; 842 843 /* called with tgt_lock held */ 844 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 845 846 port = io_req->port; 847 interface = port->priv; 848 lport = port->lport; 849 850 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 851 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 852 rc = FAILED; 853 goto abts_err; 854 } 855 856 if (rport == NULL) { 857 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); 858 rc = FAILED; 859 goto abts_err; 860 } 861 862 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 863 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 864 rc = FAILED; 865 goto abts_err; 866 } 867 868 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 869 if (!abts_io_req) { 870 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 871 rc = FAILED; 872 goto abts_err; 873 } 874 875 /* Initialize rest of io_req fields */ 876 abts_io_req->sc_cmd = NULL; 877 abts_io_req->port = port; 878 abts_io_req->tgt = tgt; 879 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 880 881 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 882 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 883 884 /* Fill FC header */ 885 fc_hdr = &(abts_req->req_fc_hdr); 886 887 /* Obtain oxid and rxid for the original exchange to be aborted */ 888 fc_hdr->fh_ox_id = htons(io_req->xid); 889 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 890 891 sid = tgt->sid; 892 did = rport->port_id; 893 894 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 895 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 896 FC_FC_SEQ_INIT, 0); 897 898 xid = abts_io_req->xid; 899 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 900 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 901 index = xid % BNX2FC_TASKS_PER_PAGE; 902 903 /* Initialize task context for this IO request */ 904 task_page = (struct fcoe_task_ctx_entry *) 905 interface->hba->task_ctx[task_idx]; 906 task = &(task_page[index]); 907 bnx2fc_init_mp_task(abts_io_req, task); 908 909 /* 910 * ABTS task is a temporary task that will be cleaned up 911 * irrespective of ABTS response. We need to start the timer 912 * for the original exchange, as the CQE is posted for the original 913 * IO request. 914 * 915 * Timer for ABTS is started only when it is originated by a 916 * TM request. For the ABTS issued as part of ULP timeout, 917 * scsi-ml maintains the timers. 918 */ 919 920 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 921 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 922 923 /* Obtain free SQ entry */ 924 bnx2fc_add_2_sq(tgt, xid); 925 926 /* Ring doorbell */ 927 bnx2fc_ring_doorbell(tgt); 928 929 abts_err: 930 return rc; 931 } 932 933 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 934 enum fc_rctl r_ctl) 935 { 936 struct fc_lport *lport; 937 struct bnx2fc_rport *tgt = orig_io_req->tgt; 938 struct bnx2fc_interface *interface; 939 struct fcoe_port *port; 940 struct bnx2fc_cmd *seq_clnp_req; 941 struct fcoe_task_ctx_entry *task; 942 struct fcoe_task_ctx_entry *task_page; 943 struct bnx2fc_els_cb_arg *cb_arg = NULL; 944 int task_idx, index; 945 u16 xid; 946 int rc = 0; 947 948 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", 949 orig_io_req->xid); 950 kref_get(&orig_io_req->refcount); 951 952 port = orig_io_req->port; 953 interface = port->priv; 954 lport = port->lport; 955 956 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 957 if (!cb_arg) { 958 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); 959 rc = -ENOMEM; 960 goto cleanup_err; 961 } 962 963 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); 964 if (!seq_clnp_req) { 965 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 966 rc = -ENOMEM; 967 kfree(cb_arg); 968 goto cleanup_err; 969 } 970 /* Initialize rest of io_req fields */ 971 seq_clnp_req->sc_cmd = NULL; 972 seq_clnp_req->port = port; 973 seq_clnp_req->tgt = tgt; 974 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ 975 976 xid = seq_clnp_req->xid; 977 978 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 979 index = xid % BNX2FC_TASKS_PER_PAGE; 980 981 /* Initialize task context for this IO request */ 982 task_page = (struct fcoe_task_ctx_entry *) 983 interface->hba->task_ctx[task_idx]; 984 task = &(task_page[index]); 985 cb_arg->aborted_io_req = orig_io_req; 986 cb_arg->io_req = seq_clnp_req; 987 cb_arg->r_ctl = r_ctl; 988 cb_arg->offset = offset; 989 seq_clnp_req->cb_arg = cb_arg; 990 991 printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); 992 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); 993 994 /* Obtain free SQ entry */ 995 bnx2fc_add_2_sq(tgt, xid); 996 997 /* Ring doorbell */ 998 bnx2fc_ring_doorbell(tgt); 999 cleanup_err: 1000 return rc; 1001 } 1002 1003 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1004 { 1005 struct fc_lport *lport; 1006 struct bnx2fc_rport *tgt = io_req->tgt; 1007 struct bnx2fc_interface *interface; 1008 struct fcoe_port *port; 1009 struct bnx2fc_cmd *cleanup_io_req; 1010 struct fcoe_task_ctx_entry *task; 1011 struct fcoe_task_ctx_entry *task_page; 1012 int task_idx, index; 1013 u16 xid, orig_xid; 1014 int rc = 0; 1015 1016 /* ASSUMPTION: called with tgt_lock held */ 1017 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1018 1019 port = io_req->port; 1020 interface = port->priv; 1021 lport = port->lport; 1022 1023 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1024 if (!cleanup_io_req) { 1025 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 1026 rc = -1; 1027 goto cleanup_err; 1028 } 1029 1030 /* Initialize rest of io_req fields */ 1031 cleanup_io_req->sc_cmd = NULL; 1032 cleanup_io_req->port = port; 1033 cleanup_io_req->tgt = tgt; 1034 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 1035 1036 xid = cleanup_io_req->xid; 1037 1038 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1039 index = xid % BNX2FC_TASKS_PER_PAGE; 1040 1041 /* Initialize task context for this IO request */ 1042 task_page = (struct fcoe_task_ctx_entry *) 1043 interface->hba->task_ctx[task_idx]; 1044 task = &(task_page[index]); 1045 orig_xid = io_req->xid; 1046 1047 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 1048 1049 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 1050 1051 /* Obtain free SQ entry */ 1052 bnx2fc_add_2_sq(tgt, xid); 1053 1054 /* Ring doorbell */ 1055 bnx2fc_ring_doorbell(tgt); 1056 1057 cleanup_err: 1058 return rc; 1059 } 1060 1061 /** 1062 * bnx2fc_eh_target_reset: Reset a target 1063 * 1064 * @sc_cmd: SCSI command 1065 * 1066 * Set from SCSI host template to send task mgmt command to the target 1067 * and wait for the response 1068 */ 1069 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 1070 { 1071 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 1072 } 1073 1074 /** 1075 * bnx2fc_eh_device_reset - Reset a single LUN 1076 * 1077 * @sc_cmd: SCSI command 1078 * 1079 * Set from SCSI host template to send task mgmt command to the target 1080 * and wait for the response 1081 */ 1082 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1083 { 1084 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1085 } 1086 1087 /** 1088 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1089 * SCSI command 1090 * 1091 * @sc_cmd: SCSI_ML command pointer 1092 * 1093 * SCSI abort request handler 1094 */ 1095 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1096 { 1097 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1098 struct fc_rport_libfc_priv *rp = rport->dd_data; 1099 struct bnx2fc_cmd *io_req; 1100 struct fc_lport *lport; 1101 struct bnx2fc_rport *tgt; 1102 int rc = FAILED; 1103 1104 1105 rc = fc_block_scsi_eh(sc_cmd); 1106 if (rc) 1107 return rc; 1108 1109 lport = shost_priv(sc_cmd->device->host); 1110 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1111 printk(KERN_ERR PFX "eh_abort: link not ready\n"); 1112 return rc; 1113 } 1114 1115 tgt = (struct bnx2fc_rport *)&rp[1]; 1116 1117 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1118 1119 spin_lock_bh(&tgt->tgt_lock); 1120 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1121 if (!io_req) { 1122 /* Command might have just completed */ 1123 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1124 spin_unlock_bh(&tgt->tgt_lock); 1125 return SUCCESS; 1126 } 1127 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1128 io_req->refcount.refcount.counter); 1129 1130 /* Hold IO request across abort processing */ 1131 kref_get(&io_req->refcount); 1132 1133 BUG_ON(tgt != io_req->tgt); 1134 1135 /* Remove the io_req from the active_q. */ 1136 /* 1137 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1138 * issue an ABTS on this particular IO req, as the 1139 * io_req is no longer in the active_q. 1140 */ 1141 if (tgt->flush_in_prog) { 1142 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1143 "flush in progress\n", io_req->xid); 1144 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1145 spin_unlock_bh(&tgt->tgt_lock); 1146 return SUCCESS; 1147 } 1148 1149 if (io_req->on_active_queue == 0) { 1150 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1151 "not on active_q\n", io_req->xid); 1152 /* 1153 * This condition can happen only due to the FW bug, 1154 * where we do not receive cleanup response from 1155 * the FW. Handle this case gracefully by erroring 1156 * back the IO request to SCSI-ml 1157 */ 1158 bnx2fc_scsi_done(io_req, DID_ABORT); 1159 1160 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1161 spin_unlock_bh(&tgt->tgt_lock); 1162 return SUCCESS; 1163 } 1164 1165 /* 1166 * Only eh_abort processing will remove the IO from 1167 * active_cmd_q before processing the request. this is 1168 * done to avoid race conditions between IOs aborted 1169 * as part of task management completion and eh_abort 1170 * processing 1171 */ 1172 list_del_init(&io_req->link); 1173 io_req->on_active_queue = 0; 1174 /* Move IO req to retire queue */ 1175 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1176 1177 init_completion(&io_req->tm_done); 1178 io_req->wait_for_comp = 1; 1179 1180 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1181 /* Cancel the current timer running on this io_req */ 1182 if (cancel_delayed_work(&io_req->timeout_work)) 1183 kref_put(&io_req->refcount, 1184 bnx2fc_cmd_release); /* drop timer hold */ 1185 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1186 rc = bnx2fc_initiate_abts(io_req); 1187 } else { 1188 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1189 "already in abts processing\n", io_req->xid); 1190 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1191 spin_unlock_bh(&tgt->tgt_lock); 1192 return SUCCESS; 1193 } 1194 if (rc == FAILED) { 1195 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1196 spin_unlock_bh(&tgt->tgt_lock); 1197 return rc; 1198 } 1199 spin_unlock_bh(&tgt->tgt_lock); 1200 1201 wait_for_completion(&io_req->tm_done); 1202 1203 spin_lock_bh(&tgt->tgt_lock); 1204 io_req->wait_for_comp = 0; 1205 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1206 &io_req->req_flags))) { 1207 /* Let the scsi-ml try to recover this command */ 1208 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1209 io_req->xid); 1210 rc = FAILED; 1211 } else { 1212 /* 1213 * We come here even when there was a race condition 1214 * between timeout and abts completion, and abts 1215 * completion happens just in time. 1216 */ 1217 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1218 rc = SUCCESS; 1219 bnx2fc_scsi_done(io_req, DID_ABORT); 1220 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1221 } 1222 1223 /* release the reference taken in eh_abort */ 1224 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1225 spin_unlock_bh(&tgt->tgt_lock); 1226 return rc; 1227 } 1228 1229 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, 1230 struct fcoe_task_ctx_entry *task, 1231 u8 rx_state) 1232 { 1233 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; 1234 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; 1235 u32 offset = cb_arg->offset; 1236 enum fc_rctl r_ctl = cb_arg->r_ctl; 1237 int rc = 0; 1238 struct bnx2fc_rport *tgt = orig_io_req->tgt; 1239 1240 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" 1241 "cmd_type = %d\n", 1242 seq_clnp_req->xid, seq_clnp_req->cmd_type); 1243 1244 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { 1245 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", 1246 seq_clnp_req->xid); 1247 goto free_cb_arg; 1248 } 1249 kref_get(&orig_io_req->refcount); 1250 1251 spin_unlock_bh(&tgt->tgt_lock); 1252 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1253 spin_lock_bh(&tgt->tgt_lock); 1254 1255 if (rc) 1256 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" 1257 " IO will abort\n"); 1258 seq_clnp_req->cb_arg = NULL; 1259 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 1260 free_cb_arg: 1261 kfree(cb_arg); 1262 return; 1263 } 1264 1265 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1266 struct fcoe_task_ctx_entry *task, 1267 u8 num_rq) 1268 { 1269 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1270 "refcnt = %d, cmd_type = %d\n", 1271 io_req->refcount.refcount.counter, io_req->cmd_type); 1272 bnx2fc_scsi_done(io_req, DID_ERROR); 1273 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1274 } 1275 1276 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1277 struct fcoe_task_ctx_entry *task, 1278 u8 num_rq) 1279 { 1280 u32 r_ctl; 1281 u32 r_a_tov = FC_DEF_R_A_TOV; 1282 u8 issue_rrq = 0; 1283 struct bnx2fc_rport *tgt = io_req->tgt; 1284 1285 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1286 "refcnt = %d, cmd_type = %d\n", 1287 io_req->xid, 1288 io_req->refcount.refcount.counter, io_req->cmd_type); 1289 1290 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1291 &io_req->req_flags)) { 1292 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1293 " this io\n"); 1294 return; 1295 } 1296 1297 /* Do not issue RRQ as this IO is already cleanedup */ 1298 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1299 &io_req->req_flags)) 1300 goto io_compl; 1301 1302 /* 1303 * For ABTS issued due to SCSI eh_abort_handler, timeout 1304 * values are maintained by scsi-ml itself. Cancel timeout 1305 * in case ABTS issued as part of task management function 1306 * or due to FW error. 1307 */ 1308 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1309 if (cancel_delayed_work(&io_req->timeout_work)) 1310 kref_put(&io_req->refcount, 1311 bnx2fc_cmd_release); /* drop timer hold */ 1312 1313 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1314 1315 switch (r_ctl) { 1316 case FC_RCTL_BA_ACC: 1317 /* 1318 * Dont release this cmd yet. It will be relesed 1319 * after we get RRQ response 1320 */ 1321 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1322 issue_rrq = 1; 1323 break; 1324 1325 case FC_RCTL_BA_RJT: 1326 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1327 break; 1328 default: 1329 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1330 break; 1331 } 1332 1333 if (issue_rrq) { 1334 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1335 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1336 } 1337 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1338 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1339 1340 io_compl: 1341 if (io_req->wait_for_comp) { 1342 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1343 &io_req->req_flags)) 1344 complete(&io_req->tm_done); 1345 } else { 1346 /* 1347 * We end up here when ABTS is issued as 1348 * in asynchronous context, i.e., as part 1349 * of task management completion, or 1350 * when FW error is received or when the 1351 * ABTS is issued when the IO is timed 1352 * out. 1353 */ 1354 1355 if (io_req->on_active_queue) { 1356 list_del_init(&io_req->link); 1357 io_req->on_active_queue = 0; 1358 /* Move IO req to retire queue */ 1359 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1360 } 1361 bnx2fc_scsi_done(io_req, DID_ERROR); 1362 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1363 } 1364 } 1365 1366 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1367 { 1368 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1369 struct bnx2fc_rport *tgt = io_req->tgt; 1370 struct list_head *list; 1371 struct list_head *tmp; 1372 struct bnx2fc_cmd *cmd; 1373 int tm_lun = sc_cmd->device->lun; 1374 int rc = 0; 1375 int lun; 1376 1377 /* called with tgt_lock held */ 1378 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1379 /* 1380 * Walk thru the active_ios queue and ABORT the IO 1381 * that matches with the LUN that was reset 1382 */ 1383 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1384 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1385 cmd = (struct bnx2fc_cmd *)list; 1386 lun = cmd->sc_cmd->device->lun; 1387 if (lun == tm_lun) { 1388 /* Initiate ABTS on this cmd */ 1389 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1390 &cmd->req_flags)) { 1391 /* cancel the IO timeout */ 1392 if (cancel_delayed_work(&io_req->timeout_work)) 1393 kref_put(&io_req->refcount, 1394 bnx2fc_cmd_release); 1395 /* timer hold */ 1396 rc = bnx2fc_initiate_abts(cmd); 1397 /* abts shouldn't fail in this context */ 1398 WARN_ON(rc != SUCCESS); 1399 } else 1400 printk(KERN_ERR PFX "lun_rst: abts already in" 1401 " progress for this IO 0x%x\n", 1402 cmd->xid); 1403 } 1404 } 1405 } 1406 1407 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1408 { 1409 struct bnx2fc_rport *tgt = io_req->tgt; 1410 struct list_head *list; 1411 struct list_head *tmp; 1412 struct bnx2fc_cmd *cmd; 1413 int rc = 0; 1414 1415 /* called with tgt_lock held */ 1416 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1417 /* 1418 * Walk thru the active_ios queue and ABORT the IO 1419 * that matches with the LUN that was reset 1420 */ 1421 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { 1422 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1423 cmd = (struct bnx2fc_cmd *)list; 1424 /* Initiate ABTS */ 1425 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1426 &cmd->req_flags)) { 1427 /* cancel the IO timeout */ 1428 if (cancel_delayed_work(&io_req->timeout_work)) 1429 kref_put(&io_req->refcount, 1430 bnx2fc_cmd_release); /* timer hold */ 1431 rc = bnx2fc_initiate_abts(cmd); 1432 /* abts shouldn't fail in this context */ 1433 WARN_ON(rc != SUCCESS); 1434 1435 } else 1436 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1437 " for this IO 0x%x\n", cmd->xid); 1438 } 1439 } 1440 1441 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1442 struct fcoe_task_ctx_entry *task, u8 num_rq) 1443 { 1444 struct bnx2fc_mp_req *tm_req; 1445 struct fc_frame_header *fc_hdr; 1446 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1447 u64 *hdr; 1448 u64 *temp_hdr; 1449 void *rsp_buf; 1450 1451 /* Called with tgt_lock held */ 1452 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1453 1454 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1455 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1456 else { 1457 /* TM has already timed out and we got 1458 * delayed completion. Ignore completion 1459 * processing. 1460 */ 1461 return; 1462 } 1463 1464 tm_req = &(io_req->mp_req); 1465 fc_hdr = &(tm_req->resp_fc_hdr); 1466 hdr = (u64 *)fc_hdr; 1467 temp_hdr = (u64 *) 1468 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1469 hdr[0] = cpu_to_be64(temp_hdr[0]); 1470 hdr[1] = cpu_to_be64(temp_hdr[1]); 1471 hdr[2] = cpu_to_be64(temp_hdr[2]); 1472 1473 tm_req->resp_len = 1474 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1475 1476 rsp_buf = tm_req->resp_buf; 1477 1478 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1479 bnx2fc_parse_fcp_rsp(io_req, 1480 (struct fcoe_fcp_rsp_payload *) 1481 rsp_buf, num_rq); 1482 if (io_req->fcp_rsp_code == 0) { 1483 /* TM successful */ 1484 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1485 bnx2fc_lun_reset_cmpl(io_req); 1486 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1487 bnx2fc_tgt_reset_cmpl(io_req); 1488 } 1489 } else { 1490 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1491 fc_hdr->fh_r_ctl); 1492 } 1493 if (!sc_cmd->SCp.ptr) { 1494 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); 1495 return; 1496 } 1497 switch (io_req->fcp_status) { 1498 case FC_GOOD: 1499 if (io_req->cdb_status == 0) { 1500 /* Good IO completion */ 1501 sc_cmd->result = DID_OK << 16; 1502 } else { 1503 /* Transport status is good, SCSI status not good */ 1504 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1505 } 1506 if (io_req->fcp_resid) 1507 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1508 break; 1509 1510 default: 1511 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1512 io_req->fcp_status); 1513 break; 1514 } 1515 1516 sc_cmd = io_req->sc_cmd; 1517 io_req->sc_cmd = NULL; 1518 1519 /* check if the io_req exists in tgt's tmf_q */ 1520 if (io_req->on_tmf_queue) { 1521 1522 list_del_init(&io_req->link); 1523 io_req->on_tmf_queue = 0; 1524 } else { 1525 1526 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); 1527 return; 1528 } 1529 1530 sc_cmd->SCp.ptr = NULL; 1531 sc_cmd->scsi_done(sc_cmd); 1532 1533 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1534 if (io_req->wait_for_comp) { 1535 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1536 complete(&io_req->tm_done); 1537 } 1538 } 1539 1540 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1541 int bd_index) 1542 { 1543 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1544 int frag_size, sg_frags; 1545 1546 sg_frags = 0; 1547 while (sg_len) { 1548 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1549 frag_size = BNX2FC_BD_SPLIT_SZ; 1550 else 1551 frag_size = sg_len; 1552 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1553 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1554 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1555 bd[bd_index + sg_frags].flags = 0; 1556 1557 addr += (u64) frag_size; 1558 sg_frags++; 1559 sg_len -= frag_size; 1560 } 1561 return sg_frags; 1562 1563 } 1564 1565 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1566 { 1567 struct scsi_cmnd *sc = io_req->sc_cmd; 1568 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1569 struct scatterlist *sg; 1570 int byte_count = 0; 1571 int sg_count = 0; 1572 int bd_count = 0; 1573 int sg_frags; 1574 unsigned int sg_len; 1575 u64 addr; 1576 int i; 1577 1578 sg_count = scsi_dma_map(sc); 1579 scsi_for_each_sg(sc, sg, sg_count, i) { 1580 sg_len = sg_dma_len(sg); 1581 addr = sg_dma_address(sg); 1582 if (sg_len > BNX2FC_MAX_BD_LEN) { 1583 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1584 bd_count); 1585 } else { 1586 1587 sg_frags = 1; 1588 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1589 bd[bd_count].buf_addr_hi = addr >> 32; 1590 bd[bd_count].buf_len = (u16)sg_len; 1591 bd[bd_count].flags = 0; 1592 } 1593 bd_count += sg_frags; 1594 byte_count += sg_len; 1595 } 1596 if (byte_count != scsi_bufflen(sc)) 1597 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1598 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1599 io_req->xid); 1600 return bd_count; 1601 } 1602 1603 static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1604 { 1605 struct scsi_cmnd *sc = io_req->sc_cmd; 1606 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1607 int bd_count; 1608 1609 if (scsi_sg_count(sc)) 1610 bd_count = bnx2fc_map_sg(io_req); 1611 else { 1612 bd_count = 0; 1613 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1614 bd[0].buf_len = bd[0].flags = 0; 1615 } 1616 io_req->bd_tbl->bd_valid = bd_count; 1617 } 1618 1619 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1620 { 1621 struct scsi_cmnd *sc = io_req->sc_cmd; 1622 1623 if (io_req->bd_tbl->bd_valid && sc) { 1624 scsi_dma_unmap(sc); 1625 io_req->bd_tbl->bd_valid = 0; 1626 } 1627 } 1628 1629 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1630 struct fcp_cmnd *fcp_cmnd) 1631 { 1632 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1633 char tag[2]; 1634 1635 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1636 1637 int_to_scsilun(sc_cmd->device->lun, 1638 (struct scsi_lun *) fcp_cmnd->fc_lun); 1639 1640 1641 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1642 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1643 1644 fcp_cmnd->fc_cmdref = 0; 1645 fcp_cmnd->fc_pri_ta = 0; 1646 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1647 fcp_cmnd->fc_flags = io_req->io_req_flags; 1648 1649 if (scsi_populate_tag_msg(sc_cmd, tag)) { 1650 switch (tag[0]) { 1651 case HEAD_OF_QUEUE_TAG: 1652 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; 1653 break; 1654 case ORDERED_QUEUE_TAG: 1655 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; 1656 break; 1657 default: 1658 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1659 break; 1660 } 1661 } else { 1662 fcp_cmnd->fc_pri_ta = 0; 1663 } 1664 } 1665 1666 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1667 struct fcoe_fcp_rsp_payload *fcp_rsp, 1668 u8 num_rq) 1669 { 1670 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1671 struct bnx2fc_rport *tgt = io_req->tgt; 1672 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1673 u32 rq_buff_len = 0; 1674 int i; 1675 unsigned char *rq_data; 1676 unsigned char *dummy; 1677 int fcp_sns_len = 0; 1678 int fcp_rsp_len = 0; 1679 1680 io_req->fcp_status = FC_GOOD; 1681 io_req->fcp_resid = fcp_rsp->fcp_resid; 1682 1683 io_req->scsi_comp_flags = rsp_flags; 1684 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1685 fcp_rsp->scsi_status_code; 1686 1687 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1688 if (num_rq) { 1689 1690 /* 1691 * We do not anticipate num_rq >1, as the linux defined 1692 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1693 * 256 bytes of single rq buffer is good enough to hold this. 1694 */ 1695 1696 if (rsp_flags & 1697 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1698 fcp_rsp_len = rq_buff_len 1699 = fcp_rsp->fcp_rsp_len; 1700 } 1701 1702 if (rsp_flags & 1703 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1704 fcp_sns_len = fcp_rsp->fcp_sns_len; 1705 rq_buff_len += fcp_rsp->fcp_sns_len; 1706 } 1707 1708 io_req->fcp_rsp_len = fcp_rsp_len; 1709 io_req->fcp_sns_len = fcp_sns_len; 1710 1711 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1712 /* Invalid sense sense length. */ 1713 printk(KERN_ERR PFX "invalid sns length %d\n", 1714 rq_buff_len); 1715 /* reset rq_buff_len */ 1716 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1717 } 1718 1719 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1720 1721 if (num_rq > 1) { 1722 /* We do not need extra sense data */ 1723 for (i = 1; i < num_rq; i++) 1724 dummy = bnx2fc_get_next_rqe(tgt, 1); 1725 } 1726 1727 /* fetch fcp_rsp_code */ 1728 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1729 /* Only for task management function */ 1730 io_req->fcp_rsp_code = rq_data[3]; 1731 printk(KERN_ERR PFX "fcp_rsp_code = %d\n", 1732 io_req->fcp_rsp_code); 1733 } 1734 1735 /* fetch sense data */ 1736 rq_data += fcp_rsp_len; 1737 1738 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1739 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1740 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1741 } 1742 1743 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); 1744 if (fcp_sns_len) 1745 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1746 1747 /* return RQ entries */ 1748 for (i = 0; i < num_rq; i++) 1749 bnx2fc_return_rqe(tgt, 1); 1750 } 1751 } 1752 1753 /** 1754 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1755 * 1756 * @host: The Scsi_Host the command was issued to 1757 * @sc_cmd: struct scsi_cmnd to be executed 1758 * 1759 * This is the IO strategy routine, called by SCSI-ML 1760 **/ 1761 int bnx2fc_queuecommand(struct Scsi_Host *host, 1762 struct scsi_cmnd *sc_cmd) 1763 { 1764 struct fc_lport *lport = shost_priv(host); 1765 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1766 struct fc_rport_libfc_priv *rp = rport->dd_data; 1767 struct bnx2fc_rport *tgt; 1768 struct bnx2fc_cmd *io_req; 1769 int rc = 0; 1770 int rval; 1771 1772 rval = fc_remote_port_chkready(rport); 1773 if (rval) { 1774 sc_cmd->result = rval; 1775 sc_cmd->scsi_done(sc_cmd); 1776 return 0; 1777 } 1778 1779 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1780 rc = SCSI_MLQUEUE_HOST_BUSY; 1781 goto exit_qcmd; 1782 } 1783 1784 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1785 tgt = (struct bnx2fc_rport *)&rp[1]; 1786 1787 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1788 if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) { 1789 sc_cmd->result = DID_NO_CONNECT << 16; 1790 sc_cmd->scsi_done(sc_cmd); 1791 return 0; 1792 1793 } 1794 /* 1795 * Session is not offloaded yet. Let SCSI-ml retry 1796 * the command. 1797 */ 1798 rc = SCSI_MLQUEUE_TARGET_BUSY; 1799 goto exit_qcmd; 1800 } 1801 1802 io_req = bnx2fc_cmd_alloc(tgt); 1803 if (!io_req) { 1804 rc = SCSI_MLQUEUE_HOST_BUSY; 1805 goto exit_qcmd; 1806 } 1807 io_req->sc_cmd = sc_cmd; 1808 1809 if (bnx2fc_post_io_req(tgt, io_req)) { 1810 printk(KERN_ERR PFX "Unable to post io_req\n"); 1811 rc = SCSI_MLQUEUE_HOST_BUSY; 1812 goto exit_qcmd; 1813 } 1814 exit_qcmd: 1815 return rc; 1816 } 1817 1818 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1819 struct fcoe_task_ctx_entry *task, 1820 u8 num_rq) 1821 { 1822 struct fcoe_fcp_rsp_payload *fcp_rsp; 1823 struct bnx2fc_rport *tgt = io_req->tgt; 1824 struct scsi_cmnd *sc_cmd; 1825 struct Scsi_Host *host; 1826 1827 1828 /* scsi_cmd_cmpl is called with tgt lock held */ 1829 1830 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1831 /* we will not receive ABTS response for this IO */ 1832 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1833 "this scsi cmd\n"); 1834 } 1835 1836 /* Cancel the timeout_work, as we received IO completion */ 1837 if (cancel_delayed_work(&io_req->timeout_work)) 1838 kref_put(&io_req->refcount, 1839 bnx2fc_cmd_release); /* drop timer hold */ 1840 1841 sc_cmd = io_req->sc_cmd; 1842 if (sc_cmd == NULL) { 1843 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1844 return; 1845 } 1846 1847 /* Fetch fcp_rsp from task context and perform cmd completion */ 1848 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1849 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1850 1851 /* parse fcp_rsp and obtain sense data from RQ if available */ 1852 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1853 1854 host = sc_cmd->device->host; 1855 if (!sc_cmd->SCp.ptr) { 1856 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1857 return; 1858 } 1859 1860 if (io_req->on_active_queue) { 1861 list_del_init(&io_req->link); 1862 io_req->on_active_queue = 0; 1863 /* Move IO req to retire queue */ 1864 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1865 } else { 1866 /* This should not happen, but could have been pulled 1867 * by bnx2fc_flush_active_ios(), or during a race 1868 * between command abort and (late) completion. 1869 */ 1870 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1871 if (io_req->wait_for_comp) 1872 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1873 &io_req->req_flags)) 1874 complete(&io_req->tm_done); 1875 } 1876 1877 bnx2fc_unmap_sg_list(io_req); 1878 io_req->sc_cmd = NULL; 1879 1880 switch (io_req->fcp_status) { 1881 case FC_GOOD: 1882 if (io_req->cdb_status == 0) { 1883 /* Good IO completion */ 1884 sc_cmd->result = DID_OK << 16; 1885 } else { 1886 /* Transport status is good, SCSI status not good */ 1887 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1888 " fcp_resid = 0x%x\n", 1889 io_req->cdb_status, io_req->fcp_resid); 1890 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1891 } 1892 if (io_req->fcp_resid) 1893 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1894 break; 1895 default: 1896 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", 1897 io_req->fcp_status); 1898 break; 1899 } 1900 sc_cmd->SCp.ptr = NULL; 1901 sc_cmd->scsi_done(sc_cmd); 1902 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1903 } 1904 1905 static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1906 struct bnx2fc_cmd *io_req) 1907 { 1908 struct fcoe_task_ctx_entry *task; 1909 struct fcoe_task_ctx_entry *task_page; 1910 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1911 struct fcoe_port *port = tgt->port; 1912 struct bnx2fc_interface *interface = port->priv; 1913 struct bnx2fc_hba *hba = interface->hba; 1914 struct fc_lport *lport = port->lport; 1915 struct fcoe_dev_stats *stats; 1916 int task_idx, index; 1917 u16 xid; 1918 1919 /* Initialize rest of io_req fields */ 1920 io_req->cmd_type = BNX2FC_SCSI_CMD; 1921 io_req->port = port; 1922 io_req->tgt = tgt; 1923 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 1924 sc_cmd->SCp.ptr = (char *)io_req; 1925 1926 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1927 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1928 io_req->io_req_flags = BNX2FC_READ; 1929 stats->InputRequests++; 1930 stats->InputBytes += io_req->data_xfer_len; 1931 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1932 io_req->io_req_flags = BNX2FC_WRITE; 1933 stats->OutputRequests++; 1934 stats->OutputBytes += io_req->data_xfer_len; 1935 } else { 1936 io_req->io_req_flags = 0; 1937 stats->ControlRequests++; 1938 } 1939 put_cpu(); 1940 1941 xid = io_req->xid; 1942 1943 /* Build buffer descriptor list for firmware from sg list */ 1944 bnx2fc_build_bd_list_from_sg(io_req); 1945 1946 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1947 index = xid % BNX2FC_TASKS_PER_PAGE; 1948 1949 /* Initialize task context for this IO request */ 1950 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1951 task = &(task_page[index]); 1952 bnx2fc_init_task(io_req, task); 1953 1954 spin_lock_bh(&tgt->tgt_lock); 1955 1956 if (tgt->flush_in_prog) { 1957 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 1958 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1959 spin_unlock_bh(&tgt->tgt_lock); 1960 return -EAGAIN; 1961 } 1962 1963 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1964 printk(KERN_ERR PFX "Session not ready...post_io\n"); 1965 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1966 spin_unlock_bh(&tgt->tgt_lock); 1967 return -EAGAIN; 1968 } 1969 1970 /* Time IO req */ 1971 if (tgt->io_timeout) 1972 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 1973 /* Obtain free SQ entry */ 1974 bnx2fc_add_2_sq(tgt, xid); 1975 1976 /* Enqueue the io_req to active_cmd_queue */ 1977 1978 io_req->on_active_queue = 1; 1979 /* move io_req from pending_queue to active_queue */ 1980 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 1981 1982 /* Ring doorbell */ 1983 bnx2fc_ring_doorbell(tgt); 1984 spin_unlock_bh(&tgt->tgt_lock); 1985 return 0; 1986 } 1987