1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22 /* 23 * Fibre Channel exchange and sequence handling. 24 */ 25 26 #include <linux/timer.h> 27 #include <linux/gfp.h> 28 #include <linux/err.h> 29 30 #include <scsi/fc/fc_fc2.h> 31 32 #include <scsi/libfc.h> 33 #include <scsi/fc_encode.h> 34 35 u16 fc_cpu_mask; /* cpu mask for possible cpus */ 36 EXPORT_SYMBOL(fc_cpu_mask); 37 static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ 38 static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 39 40 /* 41 * Structure and function definitions for managing Fibre Channel Exchanges 42 * and Sequences. 43 * 44 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq. 45 * 46 * fc_exch_mgr holds the exchange state for an N port 47 * 48 * fc_exch holds state for one exchange and links to its active sequence. 49 * 50 * fc_seq holds the state for an individual sequence. 51 */ 52 53 /* 54 * Per cpu exchange pool 55 * 56 * This structure manages per cpu exchanges in array of exchange pointers. 57 * This array is allocated followed by struct fc_exch_pool memory for 58 * assigned range of exchanges to per cpu pool. 59 */ 60 struct fc_exch_pool { 61 u16 next_index; /* next possible free exchange index */ 62 u16 total_exches; /* total allocated exchanges */ 63 spinlock_t lock; /* exch pool lock */ 64 struct list_head ex_list; /* allocated exchanges list */ 65 }; 66 67 /* 68 * Exchange manager. 69 * 70 * This structure is the center for creating exchanges and sequences. 71 * It manages the allocation of exchange IDs. 72 */ 73 struct fc_exch_mgr { 74 enum fc_class class; /* default class for sequences */ 75 struct kref kref; /* exchange mgr reference count */ 76 u16 min_xid; /* min exchange ID */ 77 u16 max_xid; /* max exchange ID */ 78 struct list_head ex_list; /* allocated exchanges list */ 79 mempool_t *ep_pool; /* reserve ep's */ 80 u16 pool_max_index; /* max exch array index in exch pool */ 81 struct fc_exch_pool *pool; /* per cpu exch pool */ 82 83 /* 84 * currently exchange mgr stats are updated but not used. 85 * either stats can be expose via sysfs or remove them 86 * all together if not used XXX 87 */ 88 struct { 89 atomic_t no_free_exch; 90 atomic_t no_free_exch_xid; 91 atomic_t xid_not_found; 92 atomic_t xid_busy; 93 atomic_t seq_not_found; 94 atomic_t non_bls_resp; 95 } stats; 96 }; 97 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) 98 99 struct fc_exch_mgr_anchor { 100 struct list_head ema_list; 101 struct fc_exch_mgr *mp; 102 bool (*match)(struct fc_frame *); 103 }; 104 105 static void fc_exch_rrq(struct fc_exch *); 106 static void fc_seq_ls_acc(struct fc_seq *); 107 static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, 108 enum fc_els_rjt_explan); 109 static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); 110 static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); 111 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); 112 113 /* 114 * Internal implementation notes. 115 * 116 * The exchange manager is one by default in libfc but LLD may choose 117 * to have one per CPU. The sequence manager is one per exchange manager 118 * and currently never separated. 119 * 120 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field 121 * assigned by the Sequence Initiator that shall be unique for a specific 122 * D_ID and S_ID pair while the Sequence is open." Note that it isn't 123 * qualified by exchange ID, which one might think it would be. 124 * In practice this limits the number of open sequences and exchanges to 256 125 * per session. For most targets we could treat this limit as per exchange. 126 * 127 * The exchange and its sequence are freed when the last sequence is received. 128 * It's possible for the remote port to leave an exchange open without 129 * sending any sequences. 130 * 131 * Notes on reference counts: 132 * 133 * Exchanges are reference counted and exchange gets freed when the reference 134 * count becomes zero. 135 * 136 * Timeouts: 137 * Sequences are timed out for E_D_TOV and R_A_TOV. 138 * 139 * Sequence event handling: 140 * 141 * The following events may occur on initiator sequences: 142 * 143 * Send. 144 * For now, the whole thing is sent. 145 * Receive ACK 146 * This applies only to class F. 147 * The sequence is marked complete. 148 * ULP completion. 149 * The upper layer calls fc_exch_done() when done 150 * with exchange and sequence tuple. 151 * RX-inferred completion. 152 * When we receive the next sequence on the same exchange, we can 153 * retire the previous sequence ID. (XXX not implemented). 154 * Timeout. 155 * R_A_TOV frees the sequence ID. If we're waiting for ACK, 156 * E_D_TOV causes abort and calls upper layer response handler 157 * with FC_EX_TIMEOUT error. 158 * Receive RJT 159 * XXX defer. 160 * Send ABTS 161 * On timeout. 162 * 163 * The following events may occur on recipient sequences: 164 * 165 * Receive 166 * Allocate sequence for first frame received. 167 * Hold during receive handler. 168 * Release when final frame received. 169 * Keep status of last N of these for the ELS RES command. XXX TBD. 170 * Receive ABTS 171 * Deallocate sequence 172 * Send RJT 173 * Deallocate 174 * 175 * For now, we neglect conditions where only part of a sequence was 176 * received or transmitted, or where out-of-order receipt is detected. 177 */ 178 179 /* 180 * Locking notes: 181 * 182 * The EM code run in a per-CPU worker thread. 183 * 184 * To protect against concurrency between a worker thread code and timers, 185 * sequence allocation and deallocation must be locked. 186 * - exchange refcnt can be done atomicly without locks. 187 * - sequence allocation must be locked by exch lock. 188 * - If the EM pool lock and ex_lock must be taken at the same time, then the 189 * EM pool lock must be taken before the ex_lock. 190 */ 191 192 /* 193 * opcode names for debugging. 194 */ 195 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; 196 197 #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) 198 199 static inline const char *fc_exch_name_lookup(unsigned int op, char **table, 200 unsigned int max_index) 201 { 202 const char *name = NULL; 203 204 if (op < max_index) 205 name = table[op]; 206 if (!name) 207 name = "unknown"; 208 return name; 209 } 210 211 static const char *fc_exch_rctl_name(unsigned int op) 212 { 213 return fc_exch_name_lookup(op, fc_exch_rctl_names, 214 FC_TABLE_SIZE(fc_exch_rctl_names)); 215 } 216 217 /* 218 * Hold an exchange - keep it from being freed. 219 */ 220 static void fc_exch_hold(struct fc_exch *ep) 221 { 222 atomic_inc(&ep->ex_refcnt); 223 } 224 225 /* 226 * setup fc hdr by initializing few more FC header fields and sof/eof. 227 * Initialized fields by this func: 228 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt 229 * - sof and eof 230 */ 231 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, 232 u32 f_ctl) 233 { 234 struct fc_frame_header *fh = fc_frame_header_get(fp); 235 u16 fill; 236 237 fr_sof(fp) = ep->class; 238 if (ep->seq.cnt) 239 fr_sof(fp) = fc_sof_normal(ep->class); 240 241 if (f_ctl & FC_FC_END_SEQ) { 242 fr_eof(fp) = FC_EOF_T; 243 if (fc_sof_needs_ack(ep->class)) 244 fr_eof(fp) = FC_EOF_N; 245 /* 246 * Form f_ctl. 247 * The number of fill bytes to make the length a 4-byte 248 * multiple is the low order 2-bits of the f_ctl. 249 * The fill itself will have been cleared by the frame 250 * allocation. 251 * After this, the length will be even, as expected by 252 * the transport. 253 */ 254 fill = fr_len(fp) & 3; 255 if (fill) { 256 fill = 4 - fill; 257 /* TODO, this may be a problem with fragmented skb */ 258 skb_put(fp_skb(fp), fill); 259 hton24(fh->fh_f_ctl, f_ctl | fill); 260 } 261 } else { 262 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ 263 fr_eof(fp) = FC_EOF_N; 264 } 265 266 /* 267 * Initialize remainig fh fields 268 * from fc_fill_fc_hdr 269 */ 270 fh->fh_ox_id = htons(ep->oxid); 271 fh->fh_rx_id = htons(ep->rxid); 272 fh->fh_seq_id = ep->seq.id; 273 fh->fh_seq_cnt = htons(ep->seq.cnt); 274 } 275 276 277 /* 278 * Release a reference to an exchange. 279 * If the refcnt goes to zero and the exchange is complete, it is freed. 280 */ 281 static void fc_exch_release(struct fc_exch *ep) 282 { 283 struct fc_exch_mgr *mp; 284 285 if (atomic_dec_and_test(&ep->ex_refcnt)) { 286 mp = ep->em; 287 if (ep->destructor) 288 ep->destructor(&ep->seq, ep->arg); 289 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); 290 mempool_free(ep, mp->ep_pool); 291 } 292 } 293 294 static int fc_exch_done_locked(struct fc_exch *ep) 295 { 296 int rc = 1; 297 298 /* 299 * We must check for completion in case there are two threads 300 * tyring to complete this. But the rrq code will reuse the 301 * ep, and in that case we only clear the resp and set it as 302 * complete, so it can be reused by the timer to send the rrq. 303 */ 304 ep->resp = NULL; 305 if (ep->state & FC_EX_DONE) 306 return rc; 307 ep->esb_stat |= ESB_ST_COMPLETE; 308 309 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { 310 ep->state |= FC_EX_DONE; 311 if (cancel_delayed_work(&ep->timeout_work)) 312 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 313 rc = 0; 314 } 315 return rc; 316 } 317 318 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, 319 u16 index) 320 { 321 struct fc_exch **exches = (struct fc_exch **)(pool + 1); 322 return exches[index]; 323 } 324 325 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, 326 struct fc_exch *ep) 327 { 328 ((struct fc_exch **)(pool + 1))[index] = ep; 329 } 330 331 static void fc_exch_delete(struct fc_exch *ep) 332 { 333 struct fc_exch_pool *pool; 334 335 pool = ep->pool; 336 spin_lock_bh(&pool->lock); 337 WARN_ON(pool->total_exches <= 0); 338 pool->total_exches--; 339 fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order, 340 NULL); 341 list_del(&ep->ex_list); 342 spin_unlock_bh(&pool->lock); 343 fc_exch_release(ep); /* drop hold for exch in mp */ 344 } 345 346 /* 347 * Internal version of fc_exch_timer_set - used with lock held. 348 */ 349 static inline void fc_exch_timer_set_locked(struct fc_exch *ep, 350 unsigned int timer_msec) 351 { 352 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 353 return; 354 355 FC_EXCH_DBG(ep, "Exchange timer armed\n"); 356 357 if (schedule_delayed_work(&ep->timeout_work, 358 msecs_to_jiffies(timer_msec))) 359 fc_exch_hold(ep); /* hold for timer */ 360 } 361 362 /* 363 * Set timer for an exchange. 364 * The time is a minimum delay in milliseconds until the timer fires. 365 * Used for upper level protocols to time out the exchange. 366 * The timer is cancelled when it fires or when the exchange completes. 367 * Returns non-zero if a timer couldn't be allocated. 368 */ 369 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) 370 { 371 spin_lock_bh(&ep->ex_lock); 372 fc_exch_timer_set_locked(ep, timer_msec); 373 spin_unlock_bh(&ep->ex_lock); 374 } 375 376 int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) 377 { 378 struct fc_seq *sp; 379 struct fc_exch *ep; 380 struct fc_frame *fp; 381 int error; 382 383 ep = fc_seq_exch(req_sp); 384 385 spin_lock_bh(&ep->ex_lock); 386 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 387 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { 388 spin_unlock_bh(&ep->ex_lock); 389 return -ENXIO; 390 } 391 392 /* 393 * Send the abort on a new sequence if possible. 394 */ 395 sp = fc_seq_start_next_locked(&ep->seq); 396 if (!sp) { 397 spin_unlock_bh(&ep->ex_lock); 398 return -ENOMEM; 399 } 400 401 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 402 if (timer_msec) 403 fc_exch_timer_set_locked(ep, timer_msec); 404 spin_unlock_bh(&ep->ex_lock); 405 406 /* 407 * If not logged into the fabric, don't send ABTS but leave 408 * sequence active until next timeout. 409 */ 410 if (!ep->sid) 411 return 0; 412 413 /* 414 * Send an abort for the sequence that timed out. 415 */ 416 fp = fc_frame_alloc(ep->lp, 0); 417 if (fp) { 418 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 419 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 420 error = fc_seq_send(ep->lp, sp, fp); 421 } else 422 error = -ENOBUFS; 423 return error; 424 } 425 EXPORT_SYMBOL(fc_seq_exch_abort); 426 427 /* 428 * Exchange timeout - handle exchange timer expiration. 429 * The timer will have been cancelled before this is called. 430 */ 431 static void fc_exch_timeout(struct work_struct *work) 432 { 433 struct fc_exch *ep = container_of(work, struct fc_exch, 434 timeout_work.work); 435 struct fc_seq *sp = &ep->seq; 436 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 437 void *arg; 438 u32 e_stat; 439 int rc = 1; 440 441 FC_EXCH_DBG(ep, "Exchange timed out\n"); 442 443 spin_lock_bh(&ep->ex_lock); 444 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 445 goto unlock; 446 447 e_stat = ep->esb_stat; 448 if (e_stat & ESB_ST_COMPLETE) { 449 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; 450 spin_unlock_bh(&ep->ex_lock); 451 if (e_stat & ESB_ST_REC_QUAL) 452 fc_exch_rrq(ep); 453 goto done; 454 } else { 455 resp = ep->resp; 456 arg = ep->arg; 457 ep->resp = NULL; 458 if (e_stat & ESB_ST_ABNORMAL) 459 rc = fc_exch_done_locked(ep); 460 spin_unlock_bh(&ep->ex_lock); 461 if (!rc) 462 fc_exch_delete(ep); 463 if (resp) 464 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 465 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 466 goto done; 467 } 468 unlock: 469 spin_unlock_bh(&ep->ex_lock); 470 done: 471 /* 472 * This release matches the hold taken when the timer was set. 473 */ 474 fc_exch_release(ep); 475 } 476 477 /* 478 * Allocate a sequence. 479 * 480 * We don't support multiple originated sequences on the same exchange. 481 * By implication, any previously originated sequence on this exchange 482 * is complete, and we reallocate the same sequence. 483 */ 484 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) 485 { 486 struct fc_seq *sp; 487 488 sp = &ep->seq; 489 sp->ssb_stat = 0; 490 sp->cnt = 0; 491 sp->id = seq_id; 492 return sp; 493 } 494 495 /** 496 * fc_exch_em_alloc() - allocate an exchange from a specified EM. 497 * @lport: ptr to the local port 498 * @mp: ptr to the exchange manager 499 * 500 * Returns pointer to allocated fc_exch with exch lock held. 501 */ 502 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, 503 struct fc_exch_mgr *mp) 504 { 505 struct fc_exch *ep; 506 unsigned int cpu; 507 u16 index; 508 struct fc_exch_pool *pool; 509 510 /* allocate memory for exchange */ 511 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); 512 if (!ep) { 513 atomic_inc(&mp->stats.no_free_exch); 514 goto out; 515 } 516 memset(ep, 0, sizeof(*ep)); 517 518 cpu = smp_processor_id(); 519 pool = per_cpu_ptr(mp->pool, cpu); 520 spin_lock_bh(&pool->lock); 521 index = pool->next_index; 522 /* allocate new exch from pool */ 523 while (fc_exch_ptr_get(pool, index)) { 524 index = index == mp->pool_max_index ? 0 : index + 1; 525 if (index == pool->next_index) 526 goto err; 527 } 528 pool->next_index = index == mp->pool_max_index ? 0 : index + 1; 529 530 fc_exch_hold(ep); /* hold for exch in mp */ 531 spin_lock_init(&ep->ex_lock); 532 /* 533 * Hold exch lock for caller to prevent fc_exch_reset() 534 * from releasing exch while fc_exch_alloc() caller is 535 * still working on exch. 536 */ 537 spin_lock_bh(&ep->ex_lock); 538 539 fc_exch_ptr_set(pool, index, ep); 540 list_add_tail(&ep->ex_list, &pool->ex_list); 541 fc_seq_alloc(ep, ep->seq_id++); 542 pool->total_exches++; 543 spin_unlock_bh(&pool->lock); 544 545 /* 546 * update exchange 547 */ 548 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; 549 ep->em = mp; 550 ep->pool = pool; 551 ep->lp = lport; 552 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 553 ep->rxid = FC_XID_UNKNOWN; 554 ep->class = mp->class; 555 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); 556 out: 557 return ep; 558 err: 559 spin_unlock_bh(&pool->lock); 560 atomic_inc(&mp->stats.no_free_exch_xid); 561 mempool_free(ep, mp->ep_pool); 562 return NULL; 563 } 564 565 /** 566 * fc_exch_alloc() - allocate an exchange. 567 * @lport: ptr to the local port 568 * @fp: ptr to the FC frame 569 * 570 * This function walks the list of the exchange manager(EM) 571 * anchors to select a EM for new exchange allocation. The 572 * EM is selected having either a NULL match function pointer 573 * or call to match function returning true. 574 */ 575 struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) 576 { 577 struct fc_exch_mgr_anchor *ema; 578 struct fc_exch *ep; 579 580 list_for_each_entry(ema, &lport->ema_list, ema_list) { 581 if (!ema->match || ema->match(fp)) { 582 ep = fc_exch_em_alloc(lport, ema->mp); 583 if (ep) 584 return ep; 585 } 586 } 587 return NULL; 588 } 589 EXPORT_SYMBOL(fc_exch_alloc); 590 591 /* 592 * Lookup and hold an exchange. 593 */ 594 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 595 { 596 struct fc_exch_pool *pool; 597 struct fc_exch *ep = NULL; 598 599 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { 600 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 601 spin_lock_bh(&pool->lock); 602 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 603 if (ep) { 604 fc_exch_hold(ep); 605 WARN_ON(ep->xid != xid); 606 } 607 spin_unlock_bh(&pool->lock); 608 } 609 return ep; 610 } 611 612 void fc_exch_done(struct fc_seq *sp) 613 { 614 struct fc_exch *ep = fc_seq_exch(sp); 615 int rc; 616 617 spin_lock_bh(&ep->ex_lock); 618 rc = fc_exch_done_locked(ep); 619 spin_unlock_bh(&ep->ex_lock); 620 if (!rc) 621 fc_exch_delete(ep); 622 } 623 EXPORT_SYMBOL(fc_exch_done); 624 625 /* 626 * Allocate a new exchange as responder. 627 * Sets the responder ID in the frame header. 628 */ 629 static struct fc_exch *fc_exch_resp(struct fc_lport *lport, 630 struct fc_exch_mgr *mp, 631 struct fc_frame *fp) 632 { 633 struct fc_exch *ep; 634 struct fc_frame_header *fh; 635 636 ep = fc_exch_alloc(lport, fp); 637 if (ep) { 638 ep->class = fc_frame_class(fp); 639 640 /* 641 * Set EX_CTX indicating we're responding on this exchange. 642 */ 643 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */ 644 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */ 645 fh = fc_frame_header_get(fp); 646 ep->sid = ntoh24(fh->fh_d_id); 647 ep->did = ntoh24(fh->fh_s_id); 648 ep->oid = ep->did; 649 650 /* 651 * Allocated exchange has placed the XID in the 652 * originator field. Move it to the responder field, 653 * and set the originator XID from the frame. 654 */ 655 ep->rxid = ep->xid; 656 ep->oxid = ntohs(fh->fh_ox_id); 657 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT; 658 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0) 659 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 660 661 fc_exch_hold(ep); /* hold for caller */ 662 spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ 663 } 664 return ep; 665 } 666 667 /* 668 * Find a sequence for receive where the other end is originating the sequence. 669 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold 670 * on the ep that should be released by the caller. 671 */ 672 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, 673 struct fc_exch_mgr *mp, 674 struct fc_frame *fp) 675 { 676 struct fc_frame_header *fh = fc_frame_header_get(fp); 677 struct fc_exch *ep = NULL; 678 struct fc_seq *sp = NULL; 679 enum fc_pf_rjt_reason reject = FC_RJT_NONE; 680 u32 f_ctl; 681 u16 xid; 682 683 f_ctl = ntoh24(fh->fh_f_ctl); 684 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0); 685 686 /* 687 * Lookup or create the exchange if we will be creating the sequence. 688 */ 689 if (f_ctl & FC_FC_EX_CTX) { 690 xid = ntohs(fh->fh_ox_id); /* we originated exch */ 691 ep = fc_exch_find(mp, xid); 692 if (!ep) { 693 atomic_inc(&mp->stats.xid_not_found); 694 reject = FC_RJT_OX_ID; 695 goto out; 696 } 697 if (ep->rxid == FC_XID_UNKNOWN) 698 ep->rxid = ntohs(fh->fh_rx_id); 699 else if (ep->rxid != ntohs(fh->fh_rx_id)) { 700 reject = FC_RJT_OX_ID; 701 goto rel; 702 } 703 } else { 704 xid = ntohs(fh->fh_rx_id); /* we are the responder */ 705 706 /* 707 * Special case for MDS issuing an ELS TEST with a 708 * bad rxid of 0. 709 * XXX take this out once we do the proper reject. 710 */ 711 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ && 712 fc_frame_payload_op(fp) == ELS_TEST) { 713 fh->fh_rx_id = htons(FC_XID_UNKNOWN); 714 xid = FC_XID_UNKNOWN; 715 } 716 717 /* 718 * new sequence - find the exchange 719 */ 720 ep = fc_exch_find(mp, xid); 721 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { 722 if (ep) { 723 atomic_inc(&mp->stats.xid_busy); 724 reject = FC_RJT_RX_ID; 725 goto rel; 726 } 727 ep = fc_exch_resp(lport, mp, fp); 728 if (!ep) { 729 reject = FC_RJT_EXCH_EST; /* XXX */ 730 goto out; 731 } 732 xid = ep->xid; /* get our XID */ 733 } else if (!ep) { 734 atomic_inc(&mp->stats.xid_not_found); 735 reject = FC_RJT_RX_ID; /* XID not found */ 736 goto out; 737 } 738 } 739 740 /* 741 * At this point, we have the exchange held. 742 * Find or create the sequence. 743 */ 744 if (fc_sof_is_init(fr_sof(fp))) { 745 sp = fc_seq_start_next(&ep->seq); 746 if (!sp) { 747 reject = FC_RJT_SEQ_XS; /* exchange shortage */ 748 goto rel; 749 } 750 sp->id = fh->fh_seq_id; 751 sp->ssb_stat |= SSB_ST_RESP; 752 } else { 753 sp = &ep->seq; 754 if (sp->id != fh->fh_seq_id) { 755 atomic_inc(&mp->stats.seq_not_found); 756 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ 757 goto rel; 758 } 759 } 760 WARN_ON(ep != fc_seq_exch(sp)); 761 762 if (f_ctl & FC_FC_SEQ_INIT) 763 ep->esb_stat |= ESB_ST_SEQ_INIT; 764 765 fr_seq(fp) = sp; 766 out: 767 return reject; 768 rel: 769 fc_exch_done(&ep->seq); 770 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */ 771 return reject; 772 } 773 774 /* 775 * Find the sequence for a frame being received. 776 * We originated the sequence, so it should be found. 777 * We may or may not have originated the exchange. 778 * Does not hold the sequence for the caller. 779 */ 780 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, 781 struct fc_frame *fp) 782 { 783 struct fc_frame_header *fh = fc_frame_header_get(fp); 784 struct fc_exch *ep; 785 struct fc_seq *sp = NULL; 786 u32 f_ctl; 787 u16 xid; 788 789 f_ctl = ntoh24(fh->fh_f_ctl); 790 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX); 791 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id); 792 ep = fc_exch_find(mp, xid); 793 if (!ep) 794 return NULL; 795 if (ep->seq.id == fh->fh_seq_id) { 796 /* 797 * Save the RX_ID if we didn't previously know it. 798 */ 799 sp = &ep->seq; 800 if ((f_ctl & FC_FC_EX_CTX) != 0 && 801 ep->rxid == FC_XID_UNKNOWN) { 802 ep->rxid = ntohs(fh->fh_rx_id); 803 } 804 } 805 fc_exch_release(ep); 806 return sp; 807 } 808 809 /* 810 * Set addresses for an exchange. 811 * Note this must be done before the first sequence of the exchange is sent. 812 */ 813 static void fc_exch_set_addr(struct fc_exch *ep, 814 u32 orig_id, u32 resp_id) 815 { 816 ep->oid = orig_id; 817 if (ep->esb_stat & ESB_ST_RESP) { 818 ep->sid = resp_id; 819 ep->did = orig_id; 820 } else { 821 ep->sid = orig_id; 822 ep->did = resp_id; 823 } 824 } 825 826 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) 827 { 828 struct fc_exch *ep = fc_seq_exch(sp); 829 830 sp = fc_seq_alloc(ep, ep->seq_id++); 831 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", 832 ep->f_ctl, sp->id); 833 return sp; 834 } 835 /* 836 * Allocate a new sequence on the same exchange as the supplied sequence. 837 * This will never return NULL. 838 */ 839 struct fc_seq *fc_seq_start_next(struct fc_seq *sp) 840 { 841 struct fc_exch *ep = fc_seq_exch(sp); 842 843 spin_lock_bh(&ep->ex_lock); 844 sp = fc_seq_start_next_locked(sp); 845 spin_unlock_bh(&ep->ex_lock); 846 847 return sp; 848 } 849 EXPORT_SYMBOL(fc_seq_start_next); 850 851 int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp) 852 { 853 struct fc_exch *ep; 854 struct fc_frame_header *fh = fc_frame_header_get(fp); 855 int error; 856 u32 f_ctl; 857 858 ep = fc_seq_exch(sp); 859 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); 860 861 f_ctl = ntoh24(fh->fh_f_ctl); 862 fc_exch_setup_hdr(ep, fp, f_ctl); 863 864 /* 865 * update sequence count if this frame is carrying 866 * multiple FC frames when sequence offload is enabled 867 * by LLD. 868 */ 869 if (fr_max_payload(fp)) 870 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), 871 fr_max_payload(fp)); 872 else 873 sp->cnt++; 874 875 /* 876 * Send the frame. 877 */ 878 error = lp->tt.frame_send(lp, fp); 879 880 /* 881 * Update the exchange and sequence flags, 882 * assuming all frames for the sequence have been sent. 883 * We can only be called to send once for each sequence. 884 */ 885 spin_lock_bh(&ep->ex_lock); 886 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ 887 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) 888 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 889 spin_unlock_bh(&ep->ex_lock); 890 return error; 891 } 892 EXPORT_SYMBOL(fc_seq_send); 893 894 void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, 895 struct fc_seq_els_data *els_data) 896 { 897 switch (els_cmd) { 898 case ELS_LS_RJT: 899 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan); 900 break; 901 case ELS_LS_ACC: 902 fc_seq_ls_acc(sp); 903 break; 904 case ELS_RRQ: 905 fc_exch_els_rrq(sp, els_data->fp); 906 break; 907 case ELS_REC: 908 fc_exch_els_rec(sp, els_data->fp); 909 break; 910 default: 911 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); 912 } 913 } 914 EXPORT_SYMBOL(fc_seq_els_rsp_send); 915 916 /* 917 * Send a sequence, which is also the last sequence in the exchange. 918 */ 919 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, 920 enum fc_rctl rctl, enum fc_fh_type fh_type) 921 { 922 u32 f_ctl; 923 struct fc_exch *ep = fc_seq_exch(sp); 924 925 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 926 f_ctl |= ep->f_ctl; 927 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); 928 fc_seq_send(ep->lp, sp, fp); 929 } 930 931 /* 932 * Send ACK_1 (or equiv.) indicating we received something. 933 * The frame we're acking is supplied. 934 */ 935 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) 936 { 937 struct fc_frame *fp; 938 struct fc_frame_header *rx_fh; 939 struct fc_frame_header *fh; 940 struct fc_exch *ep = fc_seq_exch(sp); 941 struct fc_lport *lp = ep->lp; 942 unsigned int f_ctl; 943 944 /* 945 * Don't send ACKs for class 3. 946 */ 947 if (fc_sof_needs_ack(fr_sof(rx_fp))) { 948 fp = fc_frame_alloc(lp, 0); 949 if (!fp) 950 return; 951 952 fh = fc_frame_header_get(fp); 953 fh->fh_r_ctl = FC_RCTL_ACK_1; 954 fh->fh_type = FC_TYPE_BLS; 955 956 /* 957 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). 958 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. 959 * Bits 9-8 are meaningful (retransmitted or unidirectional). 960 * Last ACK uses bits 7-6 (continue sequence), 961 * bits 5-4 are meaningful (what kind of ACK to use). 962 */ 963 rx_fh = fc_frame_header_get(rx_fp); 964 f_ctl = ntoh24(rx_fh->fh_f_ctl); 965 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | 966 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ | 967 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT | 968 FC_FC_RETX_SEQ | FC_FC_UNI_TX; 969 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; 970 hton24(fh->fh_f_ctl, f_ctl); 971 972 fc_exch_setup_hdr(ep, fp, f_ctl); 973 fh->fh_seq_id = rx_fh->fh_seq_id; 974 fh->fh_seq_cnt = rx_fh->fh_seq_cnt; 975 fh->fh_parm_offset = htonl(1); /* ack single frame */ 976 977 fr_sof(fp) = fr_sof(rx_fp); 978 if (f_ctl & FC_FC_END_SEQ) 979 fr_eof(fp) = FC_EOF_T; 980 else 981 fr_eof(fp) = FC_EOF_N; 982 983 (void) lp->tt.frame_send(lp, fp); 984 } 985 } 986 987 /* 988 * Send BLS Reject. 989 * This is for rejecting BA_ABTS only. 990 */ 991 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, 992 enum fc_ba_rjt_reason reason, 993 enum fc_ba_rjt_explan explan) 994 { 995 struct fc_frame *fp; 996 struct fc_frame_header *rx_fh; 997 struct fc_frame_header *fh; 998 struct fc_ba_rjt *rp; 999 struct fc_lport *lp; 1000 unsigned int f_ctl; 1001 1002 lp = fr_dev(rx_fp); 1003 fp = fc_frame_alloc(lp, sizeof(*rp)); 1004 if (!fp) 1005 return; 1006 fh = fc_frame_header_get(fp); 1007 rx_fh = fc_frame_header_get(rx_fp); 1008 1009 memset(fh, 0, sizeof(*fh) + sizeof(*rp)); 1010 1011 rp = fc_frame_payload_get(fp, sizeof(*rp)); 1012 rp->br_reason = reason; 1013 rp->br_explan = explan; 1014 1015 /* 1016 * seq_id, cs_ctl, df_ctl and param/offset are zero. 1017 */ 1018 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); 1019 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); 1020 fh->fh_ox_id = rx_fh->fh_ox_id; 1021 fh->fh_rx_id = rx_fh->fh_rx_id; 1022 fh->fh_seq_cnt = rx_fh->fh_seq_cnt; 1023 fh->fh_r_ctl = FC_RCTL_BA_RJT; 1024 fh->fh_type = FC_TYPE_BLS; 1025 1026 /* 1027 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). 1028 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. 1029 * Bits 9-8 are meaningful (retransmitted or unidirectional). 1030 * Last ACK uses bits 7-6 (continue sequence), 1031 * bits 5-4 are meaningful (what kind of ACK to use). 1032 * Always set LAST_SEQ, END_SEQ. 1033 */ 1034 f_ctl = ntoh24(rx_fh->fh_f_ctl); 1035 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | 1036 FC_FC_END_CONN | FC_FC_SEQ_INIT | 1037 FC_FC_RETX_SEQ | FC_FC_UNI_TX; 1038 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; 1039 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 1040 f_ctl &= ~FC_FC_FIRST_SEQ; 1041 hton24(fh->fh_f_ctl, f_ctl); 1042 1043 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp)); 1044 fr_eof(fp) = FC_EOF_T; 1045 if (fc_sof_needs_ack(fr_sof(fp))) 1046 fr_eof(fp) = FC_EOF_N; 1047 1048 (void) lp->tt.frame_send(lp, fp); 1049 } 1050 1051 /* 1052 * Handle an incoming ABTS. This would be for target mode usually, 1053 * but could be due to lost FCP transfer ready, confirm or RRQ. 1054 * We always handle this as an exchange abort, ignoring the parameter. 1055 */ 1056 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) 1057 { 1058 struct fc_frame *fp; 1059 struct fc_ba_acc *ap; 1060 struct fc_frame_header *fh; 1061 struct fc_seq *sp; 1062 1063 if (!ep) 1064 goto reject; 1065 spin_lock_bh(&ep->ex_lock); 1066 if (ep->esb_stat & ESB_ST_COMPLETE) { 1067 spin_unlock_bh(&ep->ex_lock); 1068 goto reject; 1069 } 1070 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) 1071 fc_exch_hold(ep); /* hold for REC_QUAL */ 1072 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; 1073 fc_exch_timer_set_locked(ep, ep->r_a_tov); 1074 1075 fp = fc_frame_alloc(ep->lp, sizeof(*ap)); 1076 if (!fp) { 1077 spin_unlock_bh(&ep->ex_lock); 1078 goto free; 1079 } 1080 fh = fc_frame_header_get(fp); 1081 ap = fc_frame_payload_get(fp, sizeof(*ap)); 1082 memset(ap, 0, sizeof(*ap)); 1083 sp = &ep->seq; 1084 ap->ba_high_seq_cnt = htons(0xffff); 1085 if (sp->ssb_stat & SSB_ST_RESP) { 1086 ap->ba_seq_id = sp->id; 1087 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL; 1088 ap->ba_high_seq_cnt = fh->fh_seq_cnt; 1089 ap->ba_low_seq_cnt = htons(sp->cnt); 1090 } 1091 sp = fc_seq_start_next_locked(sp); 1092 spin_unlock_bh(&ep->ex_lock); 1093 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); 1094 fc_frame_free(rx_fp); 1095 return; 1096 1097 reject: 1098 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); 1099 free: 1100 fc_frame_free(rx_fp); 1101 } 1102 1103 /* 1104 * Handle receive where the other end is originating the sequence. 1105 */ 1106 static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, 1107 struct fc_frame *fp) 1108 { 1109 struct fc_frame_header *fh = fc_frame_header_get(fp); 1110 struct fc_seq *sp = NULL; 1111 struct fc_exch *ep = NULL; 1112 enum fc_sof sof; 1113 enum fc_eof eof; 1114 u32 f_ctl; 1115 enum fc_pf_rjt_reason reject; 1116 1117 fr_seq(fp) = NULL; 1118 reject = fc_seq_lookup_recip(lp, mp, fp); 1119 if (reject == FC_RJT_NONE) { 1120 sp = fr_seq(fp); /* sequence will be held */ 1121 ep = fc_seq_exch(sp); 1122 sof = fr_sof(fp); 1123 eof = fr_eof(fp); 1124 f_ctl = ntoh24(fh->fh_f_ctl); 1125 fc_seq_send_ack(sp, fp); 1126 1127 /* 1128 * Call the receive function. 1129 * 1130 * The receive function may allocate a new sequence 1131 * over the old one, so we shouldn't change the 1132 * sequence after this. 1133 * 1134 * The frame will be freed by the receive function. 1135 * If new exch resp handler is valid then call that 1136 * first. 1137 */ 1138 if (ep->resp) 1139 ep->resp(sp, fp, ep->arg); 1140 else 1141 lp->tt.lport_recv(lp, sp, fp); 1142 fc_exch_release(ep); /* release from lookup */ 1143 } else { 1144 FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); 1145 fc_frame_free(fp); 1146 } 1147 } 1148 1149 /* 1150 * Handle receive where the other end is originating the sequence in 1151 * response to our exchange. 1152 */ 1153 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1154 { 1155 struct fc_frame_header *fh = fc_frame_header_get(fp); 1156 struct fc_seq *sp; 1157 struct fc_exch *ep; 1158 enum fc_sof sof; 1159 u32 f_ctl; 1160 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1161 void *ex_resp_arg; 1162 int rc; 1163 1164 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); 1165 if (!ep) { 1166 atomic_inc(&mp->stats.xid_not_found); 1167 goto out; 1168 } 1169 if (ep->esb_stat & ESB_ST_COMPLETE) { 1170 atomic_inc(&mp->stats.xid_not_found); 1171 goto out; 1172 } 1173 if (ep->rxid == FC_XID_UNKNOWN) 1174 ep->rxid = ntohs(fh->fh_rx_id); 1175 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1176 atomic_inc(&mp->stats.xid_not_found); 1177 goto rel; 1178 } 1179 if (ep->did != ntoh24(fh->fh_s_id) && 1180 ep->did != FC_FID_FLOGI) { 1181 atomic_inc(&mp->stats.xid_not_found); 1182 goto rel; 1183 } 1184 sof = fr_sof(fp); 1185 if (fc_sof_is_init(sof)) { 1186 sp = fc_seq_start_next(&ep->seq); 1187 sp->id = fh->fh_seq_id; 1188 sp->ssb_stat |= SSB_ST_RESP; 1189 } else { 1190 sp = &ep->seq; 1191 if (sp->id != fh->fh_seq_id) { 1192 atomic_inc(&mp->stats.seq_not_found); 1193 goto rel; 1194 } 1195 } 1196 f_ctl = ntoh24(fh->fh_f_ctl); 1197 fr_seq(fp) = sp; 1198 if (f_ctl & FC_FC_SEQ_INIT) 1199 ep->esb_stat |= ESB_ST_SEQ_INIT; 1200 1201 if (fc_sof_needs_ack(sof)) 1202 fc_seq_send_ack(sp, fp); 1203 resp = ep->resp; 1204 ex_resp_arg = ep->arg; 1205 1206 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && 1207 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1208 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1209 spin_lock_bh(&ep->ex_lock); 1210 rc = fc_exch_done_locked(ep); 1211 WARN_ON(fc_seq_exch(sp) != ep); 1212 spin_unlock_bh(&ep->ex_lock); 1213 if (!rc) 1214 fc_exch_delete(ep); 1215 } 1216 1217 /* 1218 * Call the receive function. 1219 * The sequence is held (has a refcnt) for us, 1220 * but not for the receive function. 1221 * 1222 * The receive function may allocate a new sequence 1223 * over the old one, so we shouldn't change the 1224 * sequence after this. 1225 * 1226 * The frame will be freed by the receive function. 1227 * If new exch resp handler is valid then call that 1228 * first. 1229 */ 1230 if (resp) 1231 resp(sp, fp, ex_resp_arg); 1232 else 1233 fc_frame_free(fp); 1234 fc_exch_release(ep); 1235 return; 1236 rel: 1237 fc_exch_release(ep); 1238 out: 1239 fc_frame_free(fp); 1240 } 1241 1242 /* 1243 * Handle receive for a sequence where other end is responding to our sequence. 1244 */ 1245 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1246 { 1247 struct fc_seq *sp; 1248 1249 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1250 1251 if (!sp) 1252 atomic_inc(&mp->stats.xid_not_found); 1253 else 1254 atomic_inc(&mp->stats.non_bls_resp); 1255 1256 fc_frame_free(fp); 1257 } 1258 1259 /* 1260 * Handle the response to an ABTS for exchange or sequence. 1261 * This can be BA_ACC or BA_RJT. 1262 */ 1263 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) 1264 { 1265 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1266 void *ex_resp_arg; 1267 struct fc_frame_header *fh; 1268 struct fc_ba_acc *ap; 1269 struct fc_seq *sp; 1270 u16 low; 1271 u16 high; 1272 int rc = 1, has_rec = 0; 1273 1274 fh = fc_frame_header_get(fp); 1275 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, 1276 fc_exch_rctl_name(fh->fh_r_ctl)); 1277 1278 if (cancel_delayed_work_sync(&ep->timeout_work)) 1279 fc_exch_release(ep); /* release from pending timer hold */ 1280 1281 spin_lock_bh(&ep->ex_lock); 1282 switch (fh->fh_r_ctl) { 1283 case FC_RCTL_BA_ACC: 1284 ap = fc_frame_payload_get(fp, sizeof(*ap)); 1285 if (!ap) 1286 break; 1287 1288 /* 1289 * Decide whether to establish a Recovery Qualifier. 1290 * We do this if there is a non-empty SEQ_CNT range and 1291 * SEQ_ID is the same as the one we aborted. 1292 */ 1293 low = ntohs(ap->ba_low_seq_cnt); 1294 high = ntohs(ap->ba_high_seq_cnt); 1295 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 && 1296 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL || 1297 ap->ba_seq_id == ep->seq_id) && low != high) { 1298 ep->esb_stat |= ESB_ST_REC_QUAL; 1299 fc_exch_hold(ep); /* hold for recovery qualifier */ 1300 has_rec = 1; 1301 } 1302 break; 1303 case FC_RCTL_BA_RJT: 1304 break; 1305 default: 1306 break; 1307 } 1308 1309 resp = ep->resp; 1310 ex_resp_arg = ep->arg; 1311 1312 /* do we need to do some other checks here. Can we reuse more of 1313 * fc_exch_recv_seq_resp 1314 */ 1315 sp = &ep->seq; 1316 /* 1317 * do we want to check END_SEQ as well as LAST_SEQ here? 1318 */ 1319 if (ep->fh_type != FC_TYPE_FCP && 1320 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) 1321 rc = fc_exch_done_locked(ep); 1322 spin_unlock_bh(&ep->ex_lock); 1323 if (!rc) 1324 fc_exch_delete(ep); 1325 1326 if (resp) 1327 resp(sp, fp, ex_resp_arg); 1328 else 1329 fc_frame_free(fp); 1330 1331 if (has_rec) 1332 fc_exch_timer_set(ep, ep->r_a_tov); 1333 1334 } 1335 1336 /* 1337 * Receive BLS sequence. 1338 * This is always a sequence initiated by the remote side. 1339 * We may be either the originator or recipient of the exchange. 1340 */ 1341 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) 1342 { 1343 struct fc_frame_header *fh; 1344 struct fc_exch *ep; 1345 u32 f_ctl; 1346 1347 fh = fc_frame_header_get(fp); 1348 f_ctl = ntoh24(fh->fh_f_ctl); 1349 fr_seq(fp) = NULL; 1350 1351 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ? 1352 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id)); 1353 if (ep && (f_ctl & FC_FC_SEQ_INIT)) { 1354 spin_lock_bh(&ep->ex_lock); 1355 ep->esb_stat |= ESB_ST_SEQ_INIT; 1356 spin_unlock_bh(&ep->ex_lock); 1357 } 1358 if (f_ctl & FC_FC_SEQ_CTX) { 1359 /* 1360 * A response to a sequence we initiated. 1361 * This should only be ACKs for class 2 or F. 1362 */ 1363 switch (fh->fh_r_ctl) { 1364 case FC_RCTL_ACK_1: 1365 case FC_RCTL_ACK_0: 1366 break; 1367 default: 1368 FC_EXCH_DBG(ep, "BLS rctl %x - %s received", 1369 fh->fh_r_ctl, 1370 fc_exch_rctl_name(fh->fh_r_ctl)); 1371 break; 1372 } 1373 fc_frame_free(fp); 1374 } else { 1375 switch (fh->fh_r_ctl) { 1376 case FC_RCTL_BA_RJT: 1377 case FC_RCTL_BA_ACC: 1378 if (ep) 1379 fc_exch_abts_resp(ep, fp); 1380 else 1381 fc_frame_free(fp); 1382 break; 1383 case FC_RCTL_BA_ABTS: 1384 fc_exch_recv_abts(ep, fp); 1385 break; 1386 default: /* ignore junk */ 1387 fc_frame_free(fp); 1388 break; 1389 } 1390 } 1391 if (ep) 1392 fc_exch_release(ep); /* release hold taken by fc_exch_find */ 1393 } 1394 1395 /* 1396 * Accept sequence with LS_ACC. 1397 * If this fails due to allocation or transmit congestion, assume the 1398 * originator will repeat the sequence. 1399 */ 1400 static void fc_seq_ls_acc(struct fc_seq *req_sp) 1401 { 1402 struct fc_seq *sp; 1403 struct fc_els_ls_acc *acc; 1404 struct fc_frame *fp; 1405 1406 sp = fc_seq_start_next(req_sp); 1407 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc)); 1408 if (fp) { 1409 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1410 memset(acc, 0, sizeof(*acc)); 1411 acc->la_cmd = ELS_LS_ACC; 1412 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1413 } 1414 } 1415 1416 /* 1417 * Reject sequence with ELS LS_RJT. 1418 * If this fails due to allocation or transmit congestion, assume the 1419 * originator will repeat the sequence. 1420 */ 1421 static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, 1422 enum fc_els_rjt_explan explan) 1423 { 1424 struct fc_seq *sp; 1425 struct fc_els_ls_rjt *rjt; 1426 struct fc_frame *fp; 1427 1428 sp = fc_seq_start_next(req_sp); 1429 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt)); 1430 if (fp) { 1431 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1432 memset(rjt, 0, sizeof(*rjt)); 1433 rjt->er_cmd = ELS_LS_RJT; 1434 rjt->er_reason = reason; 1435 rjt->er_explan = explan; 1436 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1437 } 1438 } 1439 1440 static void fc_exch_reset(struct fc_exch *ep) 1441 { 1442 struct fc_seq *sp; 1443 void (*resp)(struct fc_seq *, struct fc_frame *, void *); 1444 void *arg; 1445 int rc = 1; 1446 1447 spin_lock_bh(&ep->ex_lock); 1448 ep->state |= FC_EX_RST_CLEANUP; 1449 /* 1450 * we really want to call del_timer_sync, but cannot due 1451 * to the lport calling with the lport lock held (some resp 1452 * functions can also grab the lport lock which could cause 1453 * a deadlock). 1454 */ 1455 if (cancel_delayed_work(&ep->timeout_work)) 1456 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 1457 resp = ep->resp; 1458 ep->resp = NULL; 1459 if (ep->esb_stat & ESB_ST_REC_QUAL) 1460 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ 1461 ep->esb_stat &= ~ESB_ST_REC_QUAL; 1462 arg = ep->arg; 1463 sp = &ep->seq; 1464 rc = fc_exch_done_locked(ep); 1465 spin_unlock_bh(&ep->ex_lock); 1466 if (!rc) 1467 fc_exch_delete(ep); 1468 1469 if (resp) 1470 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1471 } 1472 1473 /** 1474 * fc_exch_pool_reset() - Resets an per cpu exches pool. 1475 * @lport: ptr to the local port 1476 * @pool: ptr to the per cpu exches pool 1477 * @sid: source FC ID 1478 * @did: destination FC ID 1479 * 1480 * Resets an per cpu exches pool, releasing its all sequences 1481 * and exchanges. If sid is non-zero, then reset only exchanges 1482 * we sourced from that FID. If did is non-zero, reset only 1483 * exchanges destined to that FID. 1484 */ 1485 static void fc_exch_pool_reset(struct fc_lport *lport, 1486 struct fc_exch_pool *pool, 1487 u32 sid, u32 did) 1488 { 1489 struct fc_exch *ep; 1490 struct fc_exch *next; 1491 1492 spin_lock_bh(&pool->lock); 1493 restart: 1494 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { 1495 if ((lport == ep->lp) && 1496 (sid == 0 || sid == ep->sid) && 1497 (did == 0 || did == ep->did)) { 1498 fc_exch_hold(ep); 1499 spin_unlock_bh(&pool->lock); 1500 1501 fc_exch_reset(ep); 1502 1503 fc_exch_release(ep); 1504 spin_lock_bh(&pool->lock); 1505 1506 /* 1507 * must restart loop incase while lock 1508 * was down multiple eps were released. 1509 */ 1510 goto restart; 1511 } 1512 } 1513 spin_unlock_bh(&pool->lock); 1514 } 1515 1516 /** 1517 * fc_exch_mgr_reset() - Resets all EMs of a lport 1518 * @lport: ptr to the local port 1519 * @sid: source FC ID 1520 * @did: destination FC ID 1521 * 1522 * Reset all EMs of a lport, releasing its all sequences and 1523 * exchanges. If sid is non-zero, then reset only exchanges 1524 * we sourced from that FID. If did is non-zero, reset only 1525 * exchanges destined to that FID. 1526 */ 1527 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) 1528 { 1529 struct fc_exch_mgr_anchor *ema; 1530 unsigned int cpu; 1531 1532 list_for_each_entry(ema, &lport->ema_list, ema_list) { 1533 for_each_possible_cpu(cpu) 1534 fc_exch_pool_reset(lport, 1535 per_cpu_ptr(ema->mp->pool, cpu), 1536 sid, did); 1537 } 1538 } 1539 EXPORT_SYMBOL(fc_exch_mgr_reset); 1540 1541 /* 1542 * Handle incoming ELS REC - Read Exchange Concise. 1543 * Note that the requesting port may be different than the S_ID in the request. 1544 */ 1545 static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) 1546 { 1547 struct fc_frame *fp; 1548 struct fc_exch *ep; 1549 struct fc_exch_mgr *em; 1550 struct fc_els_rec *rp; 1551 struct fc_els_rec_acc *acc; 1552 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; 1553 enum fc_els_rjt_explan explan; 1554 u32 sid; 1555 u16 rxid; 1556 u16 oxid; 1557 1558 rp = fc_frame_payload_get(rfp, sizeof(*rp)); 1559 explan = ELS_EXPL_INV_LEN; 1560 if (!rp) 1561 goto reject; 1562 sid = ntoh24(rp->rec_s_id); 1563 rxid = ntohs(rp->rec_rx_id); 1564 oxid = ntohs(rp->rec_ox_id); 1565 1566 /* 1567 * Currently it's hard to find the local S_ID from the exchange 1568 * manager. This will eventually be fixed, but for now it's easier 1569 * to lookup the subject exchange twice, once as if we were 1570 * the initiator, and then again if we weren't. 1571 */ 1572 em = fc_seq_exch(sp)->em; 1573 ep = fc_exch_find(em, oxid); 1574 explan = ELS_EXPL_OXID_RXID; 1575 if (ep && ep->oid == sid) { 1576 if (ep->rxid != FC_XID_UNKNOWN && 1577 rxid != FC_XID_UNKNOWN && 1578 ep->rxid != rxid) 1579 goto rel; 1580 } else { 1581 if (ep) 1582 fc_exch_release(ep); 1583 ep = NULL; 1584 if (rxid != FC_XID_UNKNOWN) 1585 ep = fc_exch_find(em, rxid); 1586 if (!ep) 1587 goto reject; 1588 } 1589 1590 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc)); 1591 if (!fp) { 1592 fc_exch_done(sp); 1593 goto out; 1594 } 1595 sp = fc_seq_start_next(sp); 1596 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1597 memset(acc, 0, sizeof(*acc)); 1598 acc->reca_cmd = ELS_LS_ACC; 1599 acc->reca_ox_id = rp->rec_ox_id; 1600 memcpy(acc->reca_ofid, rp->rec_s_id, 3); 1601 acc->reca_rx_id = htons(ep->rxid); 1602 if (ep->sid == ep->oid) 1603 hton24(acc->reca_rfid, ep->did); 1604 else 1605 hton24(acc->reca_rfid, ep->sid); 1606 acc->reca_fc4value = htonl(ep->seq.rec_data); 1607 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | 1608 ESB_ST_SEQ_INIT | 1609 ESB_ST_COMPLETE)); 1610 sp = fc_seq_start_next(sp); 1611 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); 1612 out: 1613 fc_exch_release(ep); 1614 fc_frame_free(rfp); 1615 return; 1616 1617 rel: 1618 fc_exch_release(ep); 1619 reject: 1620 fc_seq_ls_rjt(sp, reason, explan); 1621 fc_frame_free(rfp); 1622 } 1623 1624 /* 1625 * Handle response from RRQ. 1626 * Not much to do here, really. 1627 * Should report errors. 1628 * 1629 * TODO: fix error handler. 1630 */ 1631 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) 1632 { 1633 struct fc_exch *aborted_ep = arg; 1634 unsigned int op; 1635 1636 if (IS_ERR(fp)) { 1637 int err = PTR_ERR(fp); 1638 1639 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) 1640 goto cleanup; 1641 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " 1642 "frame error %d\n", err); 1643 return; 1644 } 1645 1646 op = fc_frame_payload_op(fp); 1647 fc_frame_free(fp); 1648 1649 switch (op) { 1650 case ELS_LS_RJT: 1651 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); 1652 /* fall through */ 1653 case ELS_LS_ACC: 1654 goto cleanup; 1655 default: 1656 FC_EXCH_DBG(aborted_ep, "unexpected response op %x " 1657 "for RRQ", op); 1658 return; 1659 } 1660 1661 cleanup: 1662 fc_exch_done(&aborted_ep->seq); 1663 /* drop hold for rec qual */ 1664 fc_exch_release(aborted_ep); 1665 } 1666 1667 /* 1668 * Send ELS RRQ - Reinstate Recovery Qualifier. 1669 * This tells the remote port to stop blocking the use of 1670 * the exchange and the seq_cnt range. 1671 */ 1672 static void fc_exch_rrq(struct fc_exch *ep) 1673 { 1674 struct fc_lport *lp; 1675 struct fc_els_rrq *rrq; 1676 struct fc_frame *fp; 1677 u32 did; 1678 1679 lp = ep->lp; 1680 1681 fp = fc_frame_alloc(lp, sizeof(*rrq)); 1682 if (!fp) 1683 goto retry; 1684 1685 rrq = fc_frame_payload_get(fp, sizeof(*rrq)); 1686 memset(rrq, 0, sizeof(*rrq)); 1687 rrq->rrq_cmd = ELS_RRQ; 1688 hton24(rrq->rrq_s_id, ep->sid); 1689 rrq->rrq_ox_id = htons(ep->oxid); 1690 rrq->rrq_rx_id = htons(ep->rxid); 1691 1692 did = ep->did; 1693 if (ep->esb_stat & ESB_ST_RESP) 1694 did = ep->sid; 1695 1696 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, 1697 fc_host_port_id(lp->host), FC_TYPE_ELS, 1698 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1699 1700 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) 1701 return; 1702 1703 retry: 1704 spin_lock_bh(&ep->ex_lock); 1705 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { 1706 spin_unlock_bh(&ep->ex_lock); 1707 /* drop hold for rec qual */ 1708 fc_exch_release(ep); 1709 return; 1710 } 1711 ep->esb_stat |= ESB_ST_REC_QUAL; 1712 fc_exch_timer_set_locked(ep, ep->r_a_tov); 1713 spin_unlock_bh(&ep->ex_lock); 1714 } 1715 1716 1717 /* 1718 * Handle incoming ELS RRQ - Reset Recovery Qualifier. 1719 */ 1720 static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) 1721 { 1722 struct fc_exch *ep; /* request or subject exchange */ 1723 struct fc_els_rrq *rp; 1724 u32 sid; 1725 u16 xid; 1726 enum fc_els_rjt_explan explan; 1727 1728 rp = fc_frame_payload_get(fp, sizeof(*rp)); 1729 explan = ELS_EXPL_INV_LEN; 1730 if (!rp) 1731 goto reject; 1732 1733 /* 1734 * lookup subject exchange. 1735 */ 1736 ep = fc_seq_exch(sp); 1737 sid = ntoh24(rp->rrq_s_id); /* subject source */ 1738 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); 1739 ep = fc_exch_find(ep->em, xid); 1740 1741 explan = ELS_EXPL_OXID_RXID; 1742 if (!ep) 1743 goto reject; 1744 spin_lock_bh(&ep->ex_lock); 1745 if (ep->oxid != ntohs(rp->rrq_ox_id)) 1746 goto unlock_reject; 1747 if (ep->rxid != ntohs(rp->rrq_rx_id) && 1748 ep->rxid != FC_XID_UNKNOWN) 1749 goto unlock_reject; 1750 explan = ELS_EXPL_SID; 1751 if (ep->sid != sid) 1752 goto unlock_reject; 1753 1754 /* 1755 * Clear Recovery Qualifier state, and cancel timer if complete. 1756 */ 1757 if (ep->esb_stat & ESB_ST_REC_QUAL) { 1758 ep->esb_stat &= ~ESB_ST_REC_QUAL; 1759 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ 1760 } 1761 if (ep->esb_stat & ESB_ST_COMPLETE) { 1762 if (cancel_delayed_work(&ep->timeout_work)) 1763 atomic_dec(&ep->ex_refcnt); /* drop timer hold */ 1764 } 1765 1766 spin_unlock_bh(&ep->ex_lock); 1767 1768 /* 1769 * Send LS_ACC. 1770 */ 1771 fc_seq_ls_acc(sp); 1772 fc_frame_free(fp); 1773 return; 1774 1775 unlock_reject: 1776 spin_unlock_bh(&ep->ex_lock); 1777 fc_exch_release(ep); /* drop hold from fc_exch_find */ 1778 reject: 1779 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); 1780 fc_frame_free(fp); 1781 } 1782 1783 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, 1784 struct fc_exch_mgr *mp, 1785 bool (*match)(struct fc_frame *)) 1786 { 1787 struct fc_exch_mgr_anchor *ema; 1788 1789 ema = kmalloc(sizeof(*ema), GFP_ATOMIC); 1790 if (!ema) 1791 return ema; 1792 1793 ema->mp = mp; 1794 ema->match = match; 1795 /* add EM anchor to EM anchors list */ 1796 list_add_tail(&ema->ema_list, &lport->ema_list); 1797 kref_get(&mp->kref); 1798 return ema; 1799 } 1800 EXPORT_SYMBOL(fc_exch_mgr_add); 1801 1802 static void fc_exch_mgr_destroy(struct kref *kref) 1803 { 1804 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); 1805 1806 mempool_destroy(mp->ep_pool); 1807 free_percpu(mp->pool); 1808 kfree(mp); 1809 } 1810 1811 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) 1812 { 1813 /* remove EM anchor from EM anchors list */ 1814 list_del(&ema->ema_list); 1815 kref_put(&ema->mp->kref, fc_exch_mgr_destroy); 1816 kfree(ema); 1817 } 1818 EXPORT_SYMBOL(fc_exch_mgr_del); 1819 1820 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, 1821 enum fc_class class, 1822 u16 min_xid, u16 max_xid, 1823 bool (*match)(struct fc_frame *)) 1824 { 1825 struct fc_exch_mgr *mp; 1826 u16 pool_exch_range; 1827 size_t pool_size; 1828 unsigned int cpu; 1829 struct fc_exch_pool *pool; 1830 1831 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || 1832 (min_xid & fc_cpu_mask) != 0) { 1833 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", 1834 min_xid, max_xid); 1835 return NULL; 1836 } 1837 1838 /* 1839 * allocate memory for EM 1840 */ 1841 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC); 1842 if (!mp) 1843 return NULL; 1844 1845 mp->class = class; 1846 /* adjust em exch xid range for offload */ 1847 mp->min_xid = min_xid; 1848 mp->max_xid = max_xid; 1849 1850 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); 1851 if (!mp->ep_pool) 1852 goto free_mp; 1853 1854 /* 1855 * Setup per cpu exch pool with entire exchange id range equally 1856 * divided across all cpus. The exch pointers array memory is 1857 * allocated for exch range per pool. 1858 */ 1859 pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1); 1860 mp->pool_max_index = pool_exch_range - 1; 1861 1862 /* 1863 * Allocate and initialize per cpu exch pool 1864 */ 1865 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); 1866 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); 1867 if (!mp->pool) 1868 goto free_mempool; 1869 for_each_possible_cpu(cpu) { 1870 pool = per_cpu_ptr(mp->pool, cpu); 1871 spin_lock_init(&pool->lock); 1872 INIT_LIST_HEAD(&pool->ex_list); 1873 } 1874 1875 kref_init(&mp->kref); 1876 if (!fc_exch_mgr_add(lp, mp, match)) { 1877 free_percpu(mp->pool); 1878 goto free_mempool; 1879 } 1880 1881 /* 1882 * Above kref_init() sets mp->kref to 1 and then 1883 * call to fc_exch_mgr_add incremented mp->kref again, 1884 * so adjust that extra increment. 1885 */ 1886 kref_put(&mp->kref, fc_exch_mgr_destroy); 1887 return mp; 1888 1889 free_mempool: 1890 mempool_destroy(mp->ep_pool); 1891 free_mp: 1892 kfree(mp); 1893 return NULL; 1894 } 1895 EXPORT_SYMBOL(fc_exch_mgr_alloc); 1896 1897 void fc_exch_mgr_free(struct fc_lport *lport) 1898 { 1899 struct fc_exch_mgr_anchor *ema, *next; 1900 1901 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) 1902 fc_exch_mgr_del(ema); 1903 } 1904 EXPORT_SYMBOL(fc_exch_mgr_free); 1905 1906 1907 struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, 1908 struct fc_frame *fp, 1909 void (*resp)(struct fc_seq *, 1910 struct fc_frame *fp, 1911 void *arg), 1912 void (*destructor)(struct fc_seq *, void *), 1913 void *arg, u32 timer_msec) 1914 { 1915 struct fc_exch *ep; 1916 struct fc_seq *sp = NULL; 1917 struct fc_frame_header *fh; 1918 int rc = 1; 1919 1920 ep = fc_exch_alloc(lp, fp); 1921 if (!ep) { 1922 fc_frame_free(fp); 1923 return NULL; 1924 } 1925 ep->esb_stat |= ESB_ST_SEQ_INIT; 1926 fh = fc_frame_header_get(fp); 1927 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); 1928 ep->resp = resp; 1929 ep->destructor = destructor; 1930 ep->arg = arg; 1931 ep->r_a_tov = FC_DEF_R_A_TOV; 1932 ep->lp = lp; 1933 sp = &ep->seq; 1934 1935 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ 1936 ep->f_ctl = ntoh24(fh->fh_f_ctl); 1937 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1938 sp->cnt++; 1939 1940 if (ep->xid <= lp->lro_xid) 1941 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1942 1943 if (unlikely(lp->tt.frame_send(lp, fp))) 1944 goto err; 1945 1946 if (timer_msec) 1947 fc_exch_timer_set_locked(ep, timer_msec); 1948 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ 1949 1950 if (ep->f_ctl & FC_FC_SEQ_INIT) 1951 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 1952 spin_unlock_bh(&ep->ex_lock); 1953 return sp; 1954 err: 1955 rc = fc_exch_done_locked(ep); 1956 spin_unlock_bh(&ep->ex_lock); 1957 if (!rc) 1958 fc_exch_delete(ep); 1959 return NULL; 1960 } 1961 EXPORT_SYMBOL(fc_exch_seq_send); 1962 1963 /* 1964 * Receive a frame 1965 */ 1966 void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) 1967 { 1968 struct fc_frame_header *fh = fc_frame_header_get(fp); 1969 struct fc_exch_mgr_anchor *ema; 1970 u32 f_ctl, found = 0; 1971 u16 oxid; 1972 1973 /* lport lock ? */ 1974 if (!lp || lp->state == LPORT_ST_DISABLED) { 1975 FC_LPORT_DBG(lp, "Receiving frames for an lport that " 1976 "has not been initialized correctly\n"); 1977 fc_frame_free(fp); 1978 return; 1979 } 1980 1981 f_ctl = ntoh24(fh->fh_f_ctl); 1982 oxid = ntohs(fh->fh_ox_id); 1983 if (f_ctl & FC_FC_EX_CTX) { 1984 list_for_each_entry(ema, &lp->ema_list, ema_list) { 1985 if ((oxid >= ema->mp->min_xid) && 1986 (oxid <= ema->mp->max_xid)) { 1987 found = 1; 1988 break; 1989 } 1990 } 1991 1992 if (!found) { 1993 FC_LPORT_DBG(lp, "Received response for out " 1994 "of range oxid:%hx\n", oxid); 1995 fc_frame_free(fp); 1996 return; 1997 } 1998 } else 1999 ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); 2000 2001 /* 2002 * If frame is marked invalid, just drop it. 2003 */ 2004 switch (fr_eof(fp)) { 2005 case FC_EOF_T: 2006 if (f_ctl & FC_FC_END_SEQ) 2007 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl)); 2008 /* fall through */ 2009 case FC_EOF_N: 2010 if (fh->fh_type == FC_TYPE_BLS) 2011 fc_exch_recv_bls(ema->mp, fp); 2012 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == 2013 FC_FC_EX_CTX) 2014 fc_exch_recv_seq_resp(ema->mp, fp); 2015 else if (f_ctl & FC_FC_SEQ_CTX) 2016 fc_exch_recv_resp(ema->mp, fp); 2017 else 2018 fc_exch_recv_req(lp, ema->mp, fp); 2019 break; 2020 default: 2021 FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); 2022 fc_frame_free(fp); 2023 } 2024 } 2025 EXPORT_SYMBOL(fc_exch_recv); 2026 2027 int fc_exch_init(struct fc_lport *lp) 2028 { 2029 if (!lp->tt.seq_start_next) 2030 lp->tt.seq_start_next = fc_seq_start_next; 2031 2032 if (!lp->tt.exch_seq_send) 2033 lp->tt.exch_seq_send = fc_exch_seq_send; 2034 2035 if (!lp->tt.seq_send) 2036 lp->tt.seq_send = fc_seq_send; 2037 2038 if (!lp->tt.seq_els_rsp_send) 2039 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send; 2040 2041 if (!lp->tt.exch_done) 2042 lp->tt.exch_done = fc_exch_done; 2043 2044 if (!lp->tt.exch_mgr_reset) 2045 lp->tt.exch_mgr_reset = fc_exch_mgr_reset; 2046 2047 if (!lp->tt.seq_exch_abort) 2048 lp->tt.seq_exch_abort = fc_seq_exch_abort; 2049 2050 /* 2051 * Initialize fc_cpu_mask and fc_cpu_order. The 2052 * fc_cpu_mask is set for nr_cpu_ids rounded up 2053 * to order of 2's * power and order is stored 2054 * in fc_cpu_order as this is later required in 2055 * mapping between an exch id and exch array index 2056 * in per cpu exch pool. 2057 * 2058 * This round up is required to align fc_cpu_mask 2059 * to exchange id's lower bits such that all incoming 2060 * frames of an exchange gets delivered to the same 2061 * cpu on which exchange originated by simple bitwise 2062 * AND operation between fc_cpu_mask and exchange id. 2063 */ 2064 fc_cpu_mask = 1; 2065 fc_cpu_order = 0; 2066 while (fc_cpu_mask < nr_cpu_ids) { 2067 fc_cpu_mask <<= 1; 2068 fc_cpu_order++; 2069 } 2070 fc_cpu_mask--; 2071 2072 return 0; 2073 } 2074 EXPORT_SYMBOL(fc_exch_init); 2075 2076 int fc_setup_exch_mgr(void) 2077 { 2078 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), 2079 0, SLAB_HWCACHE_ALIGN, NULL); 2080 if (!fc_em_cachep) 2081 return -ENOMEM; 2082 return 0; 2083 } 2084 2085 void fc_destroy_exch_mgr(void) 2086 { 2087 kmem_cache_destroy(fc_em_cachep); 2088 } 2089