1 /* 2 * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver. 3 * This file contains helper routines that handle ELS requests 4 * and responses. 5 * 6 * Copyright (c) 2008 - 2011 Broadcom Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 * 12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 13 */ 14 15 #include "bnx2fc.h" 16 17 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, 18 void *arg); 19 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, 20 void *arg); 21 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 22 void *data, u32 data_len, 23 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 24 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); 25 26 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) 27 { 28 struct bnx2fc_cmd *orig_io_req; 29 struct bnx2fc_cmd *rrq_req; 30 int rc = 0; 31 32 BUG_ON(!cb_arg); 33 rrq_req = cb_arg->io_req; 34 orig_io_req = cb_arg->aborted_io_req; 35 BUG_ON(!orig_io_req); 36 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", 37 orig_io_req->xid, rrq_req->xid); 38 39 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 40 41 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { 42 /* 43 * els req is timed out. cleanup the IO with FW and 44 * drop the completion. Remove from active_cmd_queue. 45 */ 46 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", 47 rrq_req->xid); 48 49 if (rrq_req->on_active_queue) { 50 list_del_init(&rrq_req->link); 51 rrq_req->on_active_queue = 0; 52 rc = bnx2fc_initiate_cleanup(rrq_req); 53 BUG_ON(rc); 54 } 55 } 56 kfree(cb_arg); 57 } 58 int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) 59 { 60 61 struct fc_els_rrq rrq; 62 struct bnx2fc_rport *tgt = aborted_io_req->tgt; 63 struct fc_lport *lport = tgt->rdata->local_port; 64 struct bnx2fc_els_cb_arg *cb_arg = NULL; 65 u32 sid = tgt->sid; 66 u32 r_a_tov = lport->r_a_tov; 67 unsigned long start = jiffies; 68 int rc; 69 70 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", 71 aborted_io_req->xid); 72 memset(&rrq, 0, sizeof(rrq)); 73 74 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); 75 if (!cb_arg) { 76 printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); 77 rc = -ENOMEM; 78 goto rrq_err; 79 } 80 81 cb_arg->aborted_io_req = aborted_io_req; 82 83 rrq.rrq_cmd = ELS_RRQ; 84 hton24(rrq.rrq_s_id, sid); 85 rrq.rrq_ox_id = htons(aborted_io_req->xid); 86 rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); 87 88 retry_rrq: 89 rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), 90 bnx2fc_rrq_compl, cb_arg, 91 r_a_tov); 92 if (rc == -ENOMEM) { 93 if (time_after(jiffies, start + (10 * HZ))) { 94 BNX2FC_ELS_DBG("rrq Failed\n"); 95 rc = FAILED; 96 goto rrq_err; 97 } 98 msleep(20); 99 goto retry_rrq; 100 } 101 rrq_err: 102 if (rc) { 103 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", 104 aborted_io_req->xid); 105 kfree(cb_arg); 106 spin_lock_bh(&tgt->tgt_lock); 107 kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); 108 spin_unlock_bh(&tgt->tgt_lock); 109 } 110 return rc; 111 } 112 113 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) 114 { 115 struct bnx2fc_cmd *els_req; 116 struct bnx2fc_rport *tgt; 117 struct bnx2fc_mp_req *mp_req; 118 struct fc_frame_header *fc_hdr; 119 unsigned char *buf; 120 void *resp_buf; 121 u32 resp_len, hdr_len; 122 u16 l2_oxid; 123 int frame_len; 124 int rc = 0; 125 126 l2_oxid = cb_arg->l2_oxid; 127 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); 128 129 els_req = cb_arg->io_req; 130 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { 131 /* 132 * els req is timed out. cleanup the IO with FW and 133 * drop the completion. libfc will handle the els timeout 134 */ 135 if (els_req->on_active_queue) { 136 list_del_init(&els_req->link); 137 els_req->on_active_queue = 0; 138 rc = bnx2fc_initiate_cleanup(els_req); 139 BUG_ON(rc); 140 } 141 goto free_arg; 142 } 143 144 tgt = els_req->tgt; 145 mp_req = &(els_req->mp_req); 146 fc_hdr = &(mp_req->resp_fc_hdr); 147 resp_len = mp_req->resp_len; 148 resp_buf = mp_req->resp_buf; 149 150 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); 151 if (!buf) { 152 printk(KERN_ERR PFX "Unable to alloc mp buf\n"); 153 goto free_arg; 154 } 155 hdr_len = sizeof(*fc_hdr); 156 if (hdr_len + resp_len > PAGE_SIZE) { 157 printk(KERN_ERR PFX "l2_els_compl: resp len is " 158 "beyond page size\n"); 159 goto free_buf; 160 } 161 memcpy(buf, fc_hdr, hdr_len); 162 memcpy(buf + hdr_len, resp_buf, resp_len); 163 frame_len = hdr_len + resp_len; 164 165 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); 166 167 free_buf: 168 kfree(buf); 169 free_arg: 170 kfree(cb_arg); 171 } 172 173 int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) 174 { 175 struct fc_els_adisc *adisc; 176 struct fc_frame_header *fh; 177 struct bnx2fc_els_cb_arg *cb_arg; 178 struct fc_lport *lport = tgt->rdata->local_port; 179 u32 r_a_tov = lport->r_a_tov; 180 int rc; 181 182 fh = fc_frame_header_get(fp); 183 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 184 if (!cb_arg) { 185 printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); 186 return -ENOMEM; 187 } 188 189 cb_arg->l2_oxid = ntohs(fh->fh_ox_id); 190 191 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); 192 adisc = fc_frame_payload_get(fp, sizeof(*adisc)); 193 /* adisc is initialized by libfc */ 194 rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), 195 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); 196 if (rc) 197 kfree(cb_arg); 198 return rc; 199 } 200 201 int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) 202 { 203 struct fc_els_logo *logo; 204 struct fc_frame_header *fh; 205 struct bnx2fc_els_cb_arg *cb_arg; 206 struct fc_lport *lport = tgt->rdata->local_port; 207 u32 r_a_tov = lport->r_a_tov; 208 int rc; 209 210 fh = fc_frame_header_get(fp); 211 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 212 if (!cb_arg) { 213 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); 214 return -ENOMEM; 215 } 216 217 cb_arg->l2_oxid = ntohs(fh->fh_ox_id); 218 219 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); 220 logo = fc_frame_payload_get(fp, sizeof(*logo)); 221 /* logo is initialized by libfc */ 222 rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), 223 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); 224 if (rc) 225 kfree(cb_arg); 226 return rc; 227 } 228 229 int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) 230 { 231 struct fc_els_rls *rls; 232 struct fc_frame_header *fh; 233 struct bnx2fc_els_cb_arg *cb_arg; 234 struct fc_lport *lport = tgt->rdata->local_port; 235 u32 r_a_tov = lport->r_a_tov; 236 int rc; 237 238 fh = fc_frame_header_get(fp); 239 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 240 if (!cb_arg) { 241 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); 242 return -ENOMEM; 243 } 244 245 cb_arg->l2_oxid = ntohs(fh->fh_ox_id); 246 247 rls = fc_frame_payload_get(fp, sizeof(*rls)); 248 /* rls is initialized by libfc */ 249 rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), 250 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); 251 if (rc) 252 kfree(cb_arg); 253 return rc; 254 } 255 256 void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) 257 { 258 struct bnx2fc_mp_req *mp_req; 259 struct fc_frame_header *fc_hdr, *fh; 260 struct bnx2fc_cmd *srr_req; 261 struct bnx2fc_cmd *orig_io_req; 262 struct fc_frame *fp; 263 unsigned char *buf; 264 void *resp_buf; 265 u32 resp_len, hdr_len; 266 u8 opcode; 267 int rc = 0; 268 269 orig_io_req = cb_arg->aborted_io_req; 270 srr_req = cb_arg->io_req; 271 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { 272 BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed", 273 orig_io_req->xid); 274 goto srr_compl_done; 275 } 276 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { 277 BNX2FC_IO_DBG(srr_req, "rec abts in prog " 278 "orig_io - 0x%x\n", 279 orig_io_req->xid); 280 goto srr_compl_done; 281 } 282 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { 283 /* SRR timedout */ 284 BNX2FC_IO_DBG(srr_req, "srr timed out, abort " 285 "orig_io - 0x%x\n", 286 orig_io_req->xid); 287 rc = bnx2fc_initiate_abts(srr_req); 288 if (rc != SUCCESS) { 289 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " 290 "failed. issue cleanup\n"); 291 bnx2fc_initiate_cleanup(srr_req); 292 } 293 orig_io_req->srr_retry++; 294 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { 295 struct bnx2fc_rport *tgt = orig_io_req->tgt; 296 spin_unlock_bh(&tgt->tgt_lock); 297 rc = bnx2fc_send_srr(orig_io_req, 298 orig_io_req->srr_offset, 299 orig_io_req->srr_rctl); 300 spin_lock_bh(&tgt->tgt_lock); 301 if (!rc) 302 goto srr_compl_done; 303 } 304 305 rc = bnx2fc_initiate_abts(orig_io_req); 306 if (rc != SUCCESS) { 307 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " 308 "failed xid = 0x%x. issue cleanup\n", 309 orig_io_req->xid); 310 bnx2fc_initiate_cleanup(orig_io_req); 311 } 312 goto srr_compl_done; 313 } 314 mp_req = &(srr_req->mp_req); 315 fc_hdr = &(mp_req->resp_fc_hdr); 316 resp_len = mp_req->resp_len; 317 resp_buf = mp_req->resp_buf; 318 319 hdr_len = sizeof(*fc_hdr); 320 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); 321 if (!buf) { 322 printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); 323 goto srr_compl_done; 324 } 325 memcpy(buf, fc_hdr, hdr_len); 326 memcpy(buf + hdr_len, resp_buf, resp_len); 327 328 fp = fc_frame_alloc(NULL, resp_len); 329 if (!fp) { 330 printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 331 goto free_buf; 332 } 333 334 fh = (struct fc_frame_header *) fc_frame_header_get(fp); 335 /* Copy FC Frame header and payload into the frame */ 336 memcpy(fh, buf, hdr_len + resp_len); 337 338 opcode = fc_frame_payload_op(fp); 339 switch (opcode) { 340 case ELS_LS_ACC: 341 BNX2FC_IO_DBG(srr_req, "SRR success\n"); 342 break; 343 case ELS_LS_RJT: 344 BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); 345 rc = bnx2fc_initiate_abts(orig_io_req); 346 if (rc != SUCCESS) { 347 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " 348 "failed xid = 0x%x. issue cleanup\n", 349 orig_io_req->xid); 350 bnx2fc_initiate_cleanup(orig_io_req); 351 } 352 break; 353 default: 354 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", 355 opcode); 356 break; 357 } 358 fc_frame_free(fp); 359 free_buf: 360 kfree(buf); 361 srr_compl_done: 362 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 363 } 364 365 void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) 366 { 367 struct bnx2fc_cmd *orig_io_req, *new_io_req; 368 struct bnx2fc_cmd *rec_req; 369 struct bnx2fc_mp_req *mp_req; 370 struct fc_frame_header *fc_hdr, *fh; 371 struct fc_els_ls_rjt *rjt; 372 struct fc_els_rec_acc *acc; 373 struct bnx2fc_rport *tgt; 374 struct fcoe_err_report_entry *err_entry; 375 struct scsi_cmnd *sc_cmd; 376 enum fc_rctl r_ctl; 377 unsigned char *buf; 378 void *resp_buf; 379 struct fc_frame *fp; 380 u8 opcode; 381 u32 offset; 382 u32 e_stat; 383 u32 resp_len, hdr_len; 384 int rc = 0; 385 bool send_seq_clnp = false; 386 bool abort_io = false; 387 388 BNX2FC_MISC_DBG("Entered rec_compl callback\n"); 389 rec_req = cb_arg->io_req; 390 orig_io_req = cb_arg->aborted_io_req; 391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); 392 tgt = orig_io_req->tgt; 393 394 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { 395 BNX2FC_IO_DBG(rec_req, "completed" 396 "orig_io - 0x%x\n", 397 orig_io_req->xid); 398 goto rec_compl_done; 399 } 400 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { 401 BNX2FC_IO_DBG(rec_req, "abts in prog " 402 "orig_io - 0x%x\n", 403 orig_io_req->xid); 404 goto rec_compl_done; 405 } 406 /* Handle REC timeout case */ 407 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { 408 BNX2FC_IO_DBG(rec_req, "timed out, abort " 409 "orig_io - 0x%x\n", 410 orig_io_req->xid); 411 /* els req is timed out. send abts for els */ 412 rc = bnx2fc_initiate_abts(rec_req); 413 if (rc != SUCCESS) { 414 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " 415 "failed. issue cleanup\n"); 416 bnx2fc_initiate_cleanup(rec_req); 417 } 418 orig_io_req->rec_retry++; 419 /* REC timedout. send ABTS to the orig IO req */ 420 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { 421 spin_unlock_bh(&tgt->tgt_lock); 422 rc = bnx2fc_send_rec(orig_io_req); 423 spin_lock_bh(&tgt->tgt_lock); 424 if (!rc) 425 goto rec_compl_done; 426 } 427 rc = bnx2fc_initiate_abts(orig_io_req); 428 if (rc != SUCCESS) { 429 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " 430 "failed xid = 0x%x. issue cleanup\n", 431 orig_io_req->xid); 432 bnx2fc_initiate_cleanup(orig_io_req); 433 } 434 goto rec_compl_done; 435 } 436 mp_req = &(rec_req->mp_req); 437 fc_hdr = &(mp_req->resp_fc_hdr); 438 resp_len = mp_req->resp_len; 439 acc = resp_buf = mp_req->resp_buf; 440 441 hdr_len = sizeof(*fc_hdr); 442 443 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); 444 if (!buf) { 445 printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); 446 goto rec_compl_done; 447 } 448 memcpy(buf, fc_hdr, hdr_len); 449 memcpy(buf + hdr_len, resp_buf, resp_len); 450 451 fp = fc_frame_alloc(NULL, resp_len); 452 if (!fp) { 453 printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 454 goto free_buf; 455 } 456 457 fh = (struct fc_frame_header *) fc_frame_header_get(fp); 458 /* Copy FC Frame header and payload into the frame */ 459 memcpy(fh, buf, hdr_len + resp_len); 460 461 opcode = fc_frame_payload_op(fp); 462 if (opcode == ELS_LS_RJT) { 463 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); 464 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 465 if ((rjt->er_reason == ELS_RJT_LOGIC || 466 rjt->er_reason == ELS_RJT_UNAB) && 467 rjt->er_explan == ELS_EXPL_OXID_RXID) { 468 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); 469 new_io_req = bnx2fc_cmd_alloc(tgt); 470 if (!new_io_req) 471 goto abort_io; 472 new_io_req->sc_cmd = orig_io_req->sc_cmd; 473 /* cleanup orig_io_req that is with the FW */ 474 set_bit(BNX2FC_FLAG_CMD_LOST, 475 &orig_io_req->req_flags); 476 bnx2fc_initiate_cleanup(orig_io_req); 477 /* Post a new IO req with the same sc_cmd */ 478 BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); 479 spin_unlock_bh(&tgt->tgt_lock); 480 rc = bnx2fc_post_io_req(tgt, new_io_req); 481 spin_lock_bh(&tgt->tgt_lock); 482 if (!rc) 483 goto free_frame; 484 BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); 485 } 486 abort_io: 487 rc = bnx2fc_initiate_abts(orig_io_req); 488 if (rc != SUCCESS) { 489 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " 490 "failed. issue cleanup\n"); 491 bnx2fc_initiate_cleanup(orig_io_req); 492 } 493 } else if (opcode == ELS_LS_ACC) { 494 /* REVISIT: Check if the exchange is already aborted */ 495 offset = ntohl(acc->reca_fc4value); 496 e_stat = ntohl(acc->reca_e_stat); 497 if (e_stat & ESB_ST_SEQ_INIT) { 498 BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); 499 goto free_frame; 500 } 501 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", 502 e_stat, offset); 503 /* Seq initiative is with us */ 504 err_entry = (struct fcoe_err_report_entry *) 505 &orig_io_req->err_entry; 506 sc_cmd = orig_io_req->sc_cmd; 507 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 508 /* SCSI WRITE command */ 509 if (offset == orig_io_req->data_xfer_len) { 510 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); 511 /* FCP_RSP lost */ 512 r_ctl = FC_RCTL_DD_CMD_STATUS; 513 offset = 0; 514 } else { 515 /* start transmitting from offset */ 516 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); 517 send_seq_clnp = true; 518 r_ctl = FC_RCTL_DD_DATA_DESC; 519 if (bnx2fc_initiate_seq_cleanup(orig_io_req, 520 offset, r_ctl)) 521 abort_io = true; 522 /* XFER_RDY */ 523 } 524 } else { 525 /* SCSI READ command */ 526 if (err_entry->data.rx_buf_off == 527 orig_io_req->data_xfer_len) { 528 /* FCP_RSP lost */ 529 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); 530 r_ctl = FC_RCTL_DD_CMD_STATUS; 531 offset = 0; 532 } else { 533 /* request retransmission from this offset */ 534 send_seq_clnp = true; 535 offset = err_entry->data.rx_buf_off; 536 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); 537 /* FCP_DATA lost */ 538 r_ctl = FC_RCTL_DD_SOL_DATA; 539 if (bnx2fc_initiate_seq_cleanup(orig_io_req, 540 offset, r_ctl)) 541 abort_io = true; 542 } 543 } 544 if (abort_io) { 545 rc = bnx2fc_initiate_abts(orig_io_req); 546 if (rc != SUCCESS) { 547 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" 548 " failed. issue cleanup\n"); 549 bnx2fc_initiate_cleanup(orig_io_req); 550 } 551 } else if (!send_seq_clnp) { 552 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); 553 spin_unlock_bh(&tgt->tgt_lock); 554 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 555 spin_lock_bh(&tgt->tgt_lock); 556 557 if (rc) { 558 BNX2FC_IO_DBG(rec_req, "Unable to send SRR" 559 " IO will abort\n"); 560 } 561 } 562 } 563 free_frame: 564 fc_frame_free(fp); 565 free_buf: 566 kfree(buf); 567 rec_compl_done: 568 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 569 kfree(cb_arg); 570 } 571 572 int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req) 573 { 574 struct fc_els_rec rec; 575 struct bnx2fc_rport *tgt = orig_io_req->tgt; 576 struct fc_lport *lport = tgt->rdata->local_port; 577 struct bnx2fc_els_cb_arg *cb_arg = NULL; 578 u32 sid = tgt->sid; 579 u32 r_a_tov = lport->r_a_tov; 580 int rc; 581 582 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n"); 583 memset(&rec, 0, sizeof(rec)); 584 585 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 586 if (!cb_arg) { 587 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n"); 588 rc = -ENOMEM; 589 goto rec_err; 590 } 591 kref_get(&orig_io_req->refcount); 592 593 cb_arg->aborted_io_req = orig_io_req; 594 595 rec.rec_cmd = ELS_REC; 596 hton24(rec.rec_s_id, sid); 597 rec.rec_ox_id = htons(orig_io_req->xid); 598 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); 599 600 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec), 601 bnx2fc_rec_compl, cb_arg, 602 r_a_tov); 603 rec_err: 604 if (rc) { 605 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n"); 606 spin_lock_bh(&tgt->tgt_lock); 607 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 608 spin_unlock_bh(&tgt->tgt_lock); 609 kfree(cb_arg); 610 } 611 return rc; 612 } 613 614 int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl) 615 { 616 struct fcp_srr srr; 617 struct bnx2fc_rport *tgt = orig_io_req->tgt; 618 struct fc_lport *lport = tgt->rdata->local_port; 619 struct bnx2fc_els_cb_arg *cb_arg = NULL; 620 u32 r_a_tov = lport->r_a_tov; 621 int rc; 622 623 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n"); 624 memset(&srr, 0, sizeof(srr)); 625 626 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 627 if (!cb_arg) { 628 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n"); 629 rc = -ENOMEM; 630 goto srr_err; 631 } 632 kref_get(&orig_io_req->refcount); 633 634 cb_arg->aborted_io_req = orig_io_req; 635 636 srr.srr_op = ELS_SRR; 637 srr.srr_ox_id = htons(orig_io_req->xid); 638 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); 639 srr.srr_rel_off = htonl(offset); 640 srr.srr_r_ctl = r_ctl; 641 orig_io_req->srr_offset = offset; 642 orig_io_req->srr_rctl = r_ctl; 643 644 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr), 645 bnx2fc_srr_compl, cb_arg, 646 r_a_tov); 647 srr_err: 648 if (rc) { 649 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n"); 650 spin_lock_bh(&tgt->tgt_lock); 651 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 652 spin_unlock_bh(&tgt->tgt_lock); 653 kfree(cb_arg); 654 } else 655 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags); 656 657 return rc; 658 } 659 660 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 661 void *data, u32 data_len, 662 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 663 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) 664 { 665 struct fcoe_port *port = tgt->port; 666 struct bnx2fc_interface *interface = port->priv; 667 struct fc_rport *rport = tgt->rport; 668 struct fc_lport *lport = port->lport; 669 struct bnx2fc_cmd *els_req; 670 struct bnx2fc_mp_req *mp_req; 671 struct fc_frame_header *fc_hdr; 672 struct fcoe_task_ctx_entry *task; 673 struct fcoe_task_ctx_entry *task_page; 674 int rc = 0; 675 int task_idx, index; 676 u32 did, sid; 677 u16 xid; 678 679 rc = fc_remote_port_chkready(rport); 680 if (rc) { 681 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); 682 rc = -EINVAL; 683 goto els_err; 684 } 685 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 686 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); 687 rc = -EINVAL; 688 goto els_err; 689 } 690 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) || 691 (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) { 692 printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); 693 rc = -EINVAL; 694 goto els_err; 695 } 696 els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); 697 if (!els_req) { 698 rc = -ENOMEM; 699 goto els_err; 700 } 701 702 els_req->sc_cmd = NULL; 703 els_req->port = port; 704 els_req->tgt = tgt; 705 els_req->cb_func = cb_func; 706 cb_arg->io_req = els_req; 707 els_req->cb_arg = cb_arg; 708 709 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); 710 rc = bnx2fc_init_mp_req(els_req); 711 if (rc == FAILED) { 712 printk(KERN_ERR PFX "ELS MP request init failed\n"); 713 spin_lock_bh(&tgt->tgt_lock); 714 kref_put(&els_req->refcount, bnx2fc_cmd_release); 715 spin_unlock_bh(&tgt->tgt_lock); 716 rc = -ENOMEM; 717 goto els_err; 718 } else { 719 /* rc SUCCESS */ 720 rc = 0; 721 } 722 723 /* Set the data_xfer_len to the size of ELS payload */ 724 mp_req->req_len = data_len; 725 els_req->data_xfer_len = mp_req->req_len; 726 727 /* Fill ELS Payload */ 728 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { 729 memcpy(mp_req->req_buf, data, data_len); 730 } else { 731 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); 732 els_req->cb_func = NULL; 733 els_req->cb_arg = NULL; 734 spin_lock_bh(&tgt->tgt_lock); 735 kref_put(&els_req->refcount, bnx2fc_cmd_release); 736 spin_unlock_bh(&tgt->tgt_lock); 737 rc = -EINVAL; 738 } 739 740 if (rc) 741 goto els_err; 742 743 /* Fill FC header */ 744 fc_hdr = &(mp_req->req_fc_hdr); 745 746 did = tgt->rport->port_id; 747 sid = tgt->sid; 748 749 if (op == ELS_SRR) 750 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, 751 FC_TYPE_FCP, FC_FC_FIRST_SEQ | 752 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 753 else 754 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 755 FC_TYPE_ELS, FC_FC_FIRST_SEQ | 756 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 757 758 /* Obtain exchange id */ 759 xid = els_req->xid; 760 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 761 index = xid % BNX2FC_TASKS_PER_PAGE; 762 763 /* Initialize task context for this IO request */ 764 task_page = (struct fcoe_task_ctx_entry *) 765 interface->hba->task_ctx[task_idx]; 766 task = &(task_page[index]); 767 bnx2fc_init_mp_task(els_req, task); 768 769 spin_lock_bh(&tgt->tgt_lock); 770 771 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 772 printk(KERN_ERR PFX "initiate_els.. session not ready\n"); 773 els_req->cb_func = NULL; 774 els_req->cb_arg = NULL; 775 kref_put(&els_req->refcount, bnx2fc_cmd_release); 776 spin_unlock_bh(&tgt->tgt_lock); 777 return -EINVAL; 778 } 779 780 if (timer_msec) 781 bnx2fc_cmd_timer_set(els_req, timer_msec); 782 bnx2fc_add_2_sq(tgt, xid); 783 784 els_req->on_active_queue = 1; 785 list_add_tail(&els_req->link, &tgt->els_queue); 786 787 /* Ring doorbell */ 788 bnx2fc_ring_doorbell(tgt); 789 spin_unlock_bh(&tgt->tgt_lock); 790 791 els_err: 792 return rc; 793 } 794 795 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, 796 struct fcoe_task_ctx_entry *task, u8 num_rq) 797 { 798 struct bnx2fc_mp_req *mp_req; 799 struct fc_frame_header *fc_hdr; 800 u64 *hdr; 801 u64 *temp_hdr; 802 803 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" 804 "cmd_type = %d\n", els_req->xid, els_req->cmd_type); 805 806 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 807 &els_req->req_flags)) { 808 BNX2FC_ELS_DBG("Timer context finished processing this " 809 "els - 0x%x\n", els_req->xid); 810 /* This IO doesn't receive cleanup completion */ 811 kref_put(&els_req->refcount, bnx2fc_cmd_release); 812 return; 813 } 814 815 /* Cancel the timeout_work, as we received the response */ 816 if (cancel_delayed_work(&els_req->timeout_work)) 817 kref_put(&els_req->refcount, 818 bnx2fc_cmd_release); /* drop timer hold */ 819 820 if (els_req->on_active_queue) { 821 list_del_init(&els_req->link); 822 els_req->on_active_queue = 0; 823 } 824 825 mp_req = &(els_req->mp_req); 826 fc_hdr = &(mp_req->resp_fc_hdr); 827 828 hdr = (u64 *)fc_hdr; 829 temp_hdr = (u64 *) 830 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 831 hdr[0] = cpu_to_be64(temp_hdr[0]); 832 hdr[1] = cpu_to_be64(temp_hdr[1]); 833 hdr[2] = cpu_to_be64(temp_hdr[2]); 834 835 mp_req->resp_len = 836 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 837 838 /* Parse ELS response */ 839 if ((els_req->cb_func) && (els_req->cb_arg)) { 840 els_req->cb_func(els_req->cb_arg); 841 els_req->cb_arg = NULL; 842 } 843 844 kref_put(&els_req->refcount, bnx2fc_cmd_release); 845 } 846 847 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, 848 void *arg) 849 { 850 struct fcoe_ctlr *fip = arg; 851 struct fc_exch *exch = fc_seq_exch(seq); 852 struct fc_lport *lport = exch->lp; 853 u8 *mac; 854 struct fc_frame_header *fh; 855 u8 op; 856 857 if (IS_ERR(fp)) 858 goto done; 859 860 mac = fr_cb(fp)->granted_mac; 861 if (is_zero_ether_addr(mac)) { 862 fh = fc_frame_header_get(fp); 863 if (fh->fh_type != FC_TYPE_ELS) { 864 printk(KERN_ERR PFX "bnx2fc_flogi_resp:" 865 "fh_type != FC_TYPE_ELS\n"); 866 fc_frame_free(fp); 867 return; 868 } 869 op = fc_frame_payload_op(fp); 870 if (lport->vport) { 871 if (op == ELS_LS_RJT) { 872 printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n"); 873 fc_vport_terminate(lport->vport); 874 fc_frame_free(fp); 875 return; 876 } 877 } 878 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 879 fc_frame_free(fp); 880 return; 881 } 882 } 883 fip->update_mac(lport, mac); 884 done: 885 fc_lport_flogi_resp(seq, fp, lport); 886 } 887 888 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, 889 void *arg) 890 { 891 struct fcoe_ctlr *fip = arg; 892 struct fc_exch *exch = fc_seq_exch(seq); 893 struct fc_lport *lport = exch->lp; 894 static u8 zero_mac[ETH_ALEN] = { 0 }; 895 896 if (!IS_ERR(fp)) 897 fip->update_mac(lport, zero_mac); 898 fc_lport_logo_resp(seq, fp, lport); 899 } 900 901 struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, 902 struct fc_frame *fp, unsigned int op, 903 void (*resp)(struct fc_seq *, 904 struct fc_frame *, 905 void *), 906 void *arg, u32 timeout) 907 { 908 struct fcoe_port *port = lport_priv(lport); 909 struct bnx2fc_interface *interface = port->priv; 910 struct fcoe_ctlr *fip = &interface->ctlr; 911 struct fc_frame_header *fh = fc_frame_header_get(fp); 912 913 switch (op) { 914 case ELS_FLOGI: 915 case ELS_FDISC: 916 return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, 917 fip, timeout); 918 case ELS_LOGO: 919 /* only hook onto fabric logouts, not port logouts */ 920 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) 921 break; 922 return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, 923 fip, timeout); 924 } 925 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); 926 } 927