1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright(c) 2008 Mike Christie 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Maintained at www.Open-FCoE.org 20 */ 21 22 /* 23 * Fibre Channel exchange and sequence handling. 24 */ 25 26 #include <linux/timer.h> 27 #include <linux/slab.h> 28 #include <linux/err.h> 29 #include <linux/export.h> 30 31 #include <scsi/fc/fc_fc2.h> 32 33 #include <scsi/libfc.h> 34 #include <scsi/fc_encode.h> 35 36 #include "fc_libfc.h" 37 38 u16 fc_cpu_mask; /* cpu mask for possible cpus */ 39 EXPORT_SYMBOL(fc_cpu_mask); 40 static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ 41 static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 42 static struct workqueue_struct *fc_exch_workqueue; 43 44 /* 45 * Structure and function definitions for managing Fibre Channel Exchanges 46 * and Sequences. 47 * 48 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq. 49 * 50 * fc_exch_mgr holds the exchange state for an N port 51 * 52 * fc_exch holds state for one exchange and links to its active sequence. 53 * 54 * fc_seq holds the state for an individual sequence. 55 */ 56 57 /** 58 * struct fc_exch_pool - Per cpu exchange pool 59 * @next_index: Next possible free exchange index 60 * @total_exches: Total allocated exchanges 61 * @lock: Exch pool lock 62 * @ex_list: List of exchanges 63 * 64 * This structure manages per cpu exchanges in array of exchange pointers. 65 * This array is allocated followed by struct fc_exch_pool memory for 66 * assigned range of exchanges to per cpu pool. 67 */ 68 struct fc_exch_pool { 69 spinlock_t lock; 70 struct list_head ex_list; 71 u16 next_index; 72 u16 total_exches; 73 74 /* two cache of free slot in exch array */ 75 u16 left; 76 u16 right; 77 } ____cacheline_aligned_in_smp; 78 79 /** 80 * struct fc_exch_mgr - The Exchange Manager (EM). 81 * @class: Default class for new sequences 82 * @kref: Reference counter 83 * @min_xid: Minimum exchange ID 84 * @max_xid: Maximum exchange ID 85 * @ep_pool: Reserved exchange pointers 86 * @pool_max_index: Max exch array index in exch pool 87 * @pool: Per cpu exch pool 88 * @stats: Statistics structure 89 * 90 * This structure is the center for creating exchanges and sequences. 91 * It manages the allocation of exchange IDs. 92 */ 93 struct fc_exch_mgr { 94 struct fc_exch_pool __percpu *pool; 95 mempool_t *ep_pool; 96 enum fc_class class; 97 struct kref kref; 98 u16 min_xid; 99 u16 max_xid; 100 u16 pool_max_index; 101 102 struct { 103 atomic_t no_free_exch; 104 atomic_t no_free_exch_xid; 105 atomic_t xid_not_found; 106 atomic_t xid_busy; 107 atomic_t seq_not_found; 108 atomic_t non_bls_resp; 109 } stats; 110 }; 111 112 /** 113 * struct fc_exch_mgr_anchor - primary structure for list of EMs 114 * @ema_list: Exchange Manager Anchor list 115 * @mp: Exchange Manager associated with this anchor 116 * @match: Routine to determine if this anchor's EM should be used 117 * 118 * When walking the list of anchors the match routine will be called 119 * for each anchor to determine if that EM should be used. The last 120 * anchor in the list will always match to handle any exchanges not 121 * handled by other EMs. The non-default EMs would be added to the 122 * anchor list by HW that provides offloads. 123 */ 124 struct fc_exch_mgr_anchor { 125 struct list_head ema_list; 126 struct fc_exch_mgr *mp; 127 bool (*match)(struct fc_frame *); 128 }; 129 130 static void fc_exch_rrq(struct fc_exch *); 131 static void fc_seq_ls_acc(struct fc_frame *); 132 static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason, 133 enum fc_els_rjt_explan); 134 static void fc_exch_els_rec(struct fc_frame *); 135 static void fc_exch_els_rrq(struct fc_frame *); 136 137 /* 138 * Internal implementation notes. 139 * 140 * The exchange manager is one by default in libfc but LLD may choose 141 * to have one per CPU. The sequence manager is one per exchange manager 142 * and currently never separated. 143 * 144 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field 145 * assigned by the Sequence Initiator that shall be unique for a specific 146 * D_ID and S_ID pair while the Sequence is open." Note that it isn't 147 * qualified by exchange ID, which one might think it would be. 148 * In practice this limits the number of open sequences and exchanges to 256 149 * per session. For most targets we could treat this limit as per exchange. 150 * 151 * The exchange and its sequence are freed when the last sequence is received. 152 * It's possible for the remote port to leave an exchange open without 153 * sending any sequences. 154 * 155 * Notes on reference counts: 156 * 157 * Exchanges are reference counted and exchange gets freed when the reference 158 * count becomes zero. 159 * 160 * Timeouts: 161 * Sequences are timed out for E_D_TOV and R_A_TOV. 162 * 163 * Sequence event handling: 164 * 165 * The following events may occur on initiator sequences: 166 * 167 * Send. 168 * For now, the whole thing is sent. 169 * Receive ACK 170 * This applies only to class F. 171 * The sequence is marked complete. 172 * ULP completion. 173 * The upper layer calls fc_exch_done() when done 174 * with exchange and sequence tuple. 175 * RX-inferred completion. 176 * When we receive the next sequence on the same exchange, we can 177 * retire the previous sequence ID. (XXX not implemented). 178 * Timeout. 179 * R_A_TOV frees the sequence ID. If we're waiting for ACK, 180 * E_D_TOV causes abort and calls upper layer response handler 181 * with FC_EX_TIMEOUT error. 182 * Receive RJT 183 * XXX defer. 184 * Send ABTS 185 * On timeout. 186 * 187 * The following events may occur on recipient sequences: 188 * 189 * Receive 190 * Allocate sequence for first frame received. 191 * Hold during receive handler. 192 * Release when final frame received. 193 * Keep status of last N of these for the ELS RES command. XXX TBD. 194 * Receive ABTS 195 * Deallocate sequence 196 * Send RJT 197 * Deallocate 198 * 199 * For now, we neglect conditions where only part of a sequence was 200 * received or transmitted, or where out-of-order receipt is detected. 201 */ 202 203 /* 204 * Locking notes: 205 * 206 * The EM code run in a per-CPU worker thread. 207 * 208 * To protect against concurrency between a worker thread code and timers, 209 * sequence allocation and deallocation must be locked. 210 * - exchange refcnt can be done atomicly without locks. 211 * - sequence allocation must be locked by exch lock. 212 * - If the EM pool lock and ex_lock must be taken at the same time, then the 213 * EM pool lock must be taken before the ex_lock. 214 */ 215 216 /* 217 * opcode names for debugging. 218 */ 219 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; 220 221 /** 222 * fc_exch_name_lookup() - Lookup name by opcode 223 * @op: Opcode to be looked up 224 * @table: Opcode/name table 225 * @max_index: Index not to be exceeded 226 * 227 * This routine is used to determine a human-readable string identifying 228 * a R_CTL opcode. 229 */ 230 static inline const char *fc_exch_name_lookup(unsigned int op, char **table, 231 unsigned int max_index) 232 { 233 const char *name = NULL; 234 235 if (op < max_index) 236 name = table[op]; 237 if (!name) 238 name = "unknown"; 239 return name; 240 } 241 242 /** 243 * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup() 244 * @op: The opcode to be looked up 245 */ 246 static const char *fc_exch_rctl_name(unsigned int op) 247 { 248 return fc_exch_name_lookup(op, fc_exch_rctl_names, 249 ARRAY_SIZE(fc_exch_rctl_names)); 250 } 251 252 /** 253 * fc_exch_hold() - Increment an exchange's reference count 254 * @ep: Echange to be held 255 */ 256 static inline void fc_exch_hold(struct fc_exch *ep) 257 { 258 atomic_inc(&ep->ex_refcnt); 259 } 260 261 /** 262 * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields 263 * and determine SOF and EOF. 264 * @ep: The exchange to that will use the header 265 * @fp: The frame whose header is to be modified 266 * @f_ctl: F_CTL bits that will be used for the frame header 267 * 268 * The fields initialized by this routine are: fh_ox_id, fh_rx_id, 269 * fh_seq_id, fh_seq_cnt and the SOF and EOF. 270 */ 271 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, 272 u32 f_ctl) 273 { 274 struct fc_frame_header *fh = fc_frame_header_get(fp); 275 u16 fill; 276 277 fr_sof(fp) = ep->class; 278 if (ep->seq.cnt) 279 fr_sof(fp) = fc_sof_normal(ep->class); 280 281 if (f_ctl & FC_FC_END_SEQ) { 282 fr_eof(fp) = FC_EOF_T; 283 if (fc_sof_needs_ack(ep->class)) 284 fr_eof(fp) = FC_EOF_N; 285 /* 286 * From F_CTL. 287 * The number of fill bytes to make the length a 4-byte 288 * multiple is the low order 2-bits of the f_ctl. 289 * The fill itself will have been cleared by the frame 290 * allocation. 291 * After this, the length will be even, as expected by 292 * the transport. 293 */ 294 fill = fr_len(fp) & 3; 295 if (fill) { 296 fill = 4 - fill; 297 /* TODO, this may be a problem with fragmented skb */ 298 skb_put(fp_skb(fp), fill); 299 hton24(fh->fh_f_ctl, f_ctl | fill); 300 } 301 } else { 302 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ 303 fr_eof(fp) = FC_EOF_N; 304 } 305 306 /* 307 * Initialize remainig fh fields 308 * from fc_fill_fc_hdr 309 */ 310 fh->fh_ox_id = htons(ep->oxid); 311 fh->fh_rx_id = htons(ep->rxid); 312 fh->fh_seq_id = ep->seq.id; 313 fh->fh_seq_cnt = htons(ep->seq.cnt); 314 } 315 316 /** 317 * fc_exch_release() - Decrement an exchange's reference count 318 * @ep: Exchange to be released 319 * 320 * If the reference count reaches zero and the exchange is complete, 321 * it is freed. 322 */ 323 static void fc_exch_release(struct fc_exch *ep) 324 { 325 struct fc_exch_mgr *mp; 326 327 if (atomic_dec_and_test(&ep->ex_refcnt)) { 328 mp = ep->em; 329 if (ep->destructor) 330 ep->destructor(&ep->seq, ep->arg); 331 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); 332 mempool_free(ep, mp->ep_pool); 333 } 334 } 335 336 /** 337 * fc_exch_timer_cancel() - cancel exch timer 338 * @ep: The exchange whose timer to be canceled 339 */ 340 static inline void fc_exch_timer_cancel(struct fc_exch *ep) 341 { 342 if (cancel_delayed_work(&ep->timeout_work)) { 343 FC_EXCH_DBG(ep, "Exchange timer canceled\n"); 344 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 345 } 346 } 347 348 /** 349 * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the 350 * the exchange lock held 351 * @ep: The exchange whose timer will start 352 * @timer_msec: The timeout period 353 * 354 * Used for upper level protocols to time out the exchange. 355 * The timer is cancelled when it fires or when the exchange completes. 356 */ 357 static inline void fc_exch_timer_set_locked(struct fc_exch *ep, 358 unsigned int timer_msec) 359 { 360 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 361 return; 362 363 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); 364 365 if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, 366 msecs_to_jiffies(timer_msec))) 367 fc_exch_hold(ep); /* hold for timer */ 368 } 369 370 /** 371 * fc_exch_timer_set() - Lock the exchange and set the timer 372 * @ep: The exchange whose timer will start 373 * @timer_msec: The timeout period 374 */ 375 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) 376 { 377 spin_lock_bh(&ep->ex_lock); 378 fc_exch_timer_set_locked(ep, timer_msec); 379 spin_unlock_bh(&ep->ex_lock); 380 } 381 382 /** 383 * fc_exch_done_locked() - Complete an exchange with the exchange lock held 384 * @ep: The exchange that is complete 385 */ 386 static int fc_exch_done_locked(struct fc_exch *ep) 387 { 388 int rc = 1; 389 390 /* 391 * We must check for completion in case there are two threads 392 * tyring to complete this. But the rrq code will reuse the 393 * ep, and in that case we only clear the resp and set it as 394 * complete, so it can be reused by the timer to send the rrq. 395 */ 396 ep->resp = NULL; 397 if (ep->state & FC_EX_DONE) 398 return rc; 399 ep->esb_stat |= ESB_ST_COMPLETE; 400 401 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { 402 ep->state |= FC_EX_DONE; 403 fc_exch_timer_cancel(ep); 404 rc = 0; 405 } 406 return rc; 407 } 408 409 /** 410 * fc_exch_ptr_get() - Return an exchange from an exchange pool 411 * @pool: Exchange Pool to get an exchange from 412 * @index: Index of the exchange within the pool 413 * 414 * Use the index to get an exchange from within an exchange pool. exches 415 * will point to an array of exchange pointers. The index will select 416 * the exchange within the array. 417 */ 418 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, 419 u16 index) 420 { 421 struct fc_exch **exches = (struct fc_exch **)(pool + 1); 422 return exches[index]; 423 } 424 425 /** 426 * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool 427 * @pool: The pool to assign the exchange to 428 * @index: The index in the pool where the exchange will be assigned 429 * @ep: The exchange to assign to the pool 430 */ 431 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, 432 struct fc_exch *ep) 433 { 434 ((struct fc_exch **)(pool + 1))[index] = ep; 435 } 436 437 /** 438 * fc_exch_delete() - Delete an exchange 439 * @ep: The exchange to be deleted 440 */ 441 static void fc_exch_delete(struct fc_exch *ep) 442 { 443 struct fc_exch_pool *pool; 444 u16 index; 445 446 pool = ep->pool; 447 spin_lock_bh(&pool->lock); 448 WARN_ON(pool->total_exches <= 0); 449 pool->total_exches--; 450 451 /* update cache of free slot */ 452 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; 453 if (pool->left == FC_XID_UNKNOWN) 454 pool->left = index; 455 else if (pool->right == FC_XID_UNKNOWN) 456 pool->right = index; 457 else 458 pool->next_index = index; 459 460 fc_exch_ptr_set(pool, index, NULL); 461 list_del(&ep->ex_list); 462 spin_unlock_bh(&pool->lock); 463 fc_exch_release(ep); /* drop hold for exch in mp */ 464 } 465 466 static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, 467 struct fc_frame *fp) 468 { 469 struct fc_exch *ep; 470 struct fc_frame_header *fh = fc_frame_header_get(fp); 471 int error; 472 u32 f_ctl; 473 u8 fh_type = fh->fh_type; 474 475 ep = fc_seq_exch(sp); 476 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); 477 478 f_ctl = ntoh24(fh->fh_f_ctl); 479 fc_exch_setup_hdr(ep, fp, f_ctl); 480 fr_encaps(fp) = ep->encaps; 481 482 /* 483 * update sequence count if this frame is carrying 484 * multiple FC frames when sequence offload is enabled 485 * by LLD. 486 */ 487 if (fr_max_payload(fp)) 488 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), 489 fr_max_payload(fp)); 490 else 491 sp->cnt++; 492 493 /* 494 * Send the frame. 495 */ 496 error = lport->tt.frame_send(lport, fp); 497 498 if (fh_type == FC_TYPE_BLS) 499 goto out; 500 501 /* 502 * Update the exchange and sequence flags, 503 * assuming all frames for the sequence have been sent. 504 * We can only be called to send once for each sequence. 505 */ 506 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ 507 if (f_ctl & FC_FC_SEQ_INIT) 508 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 509 out: 510 return error; 511 } 512 513 /** 514 * fc_seq_send() - Send a frame using existing sequence/exchange pair 515 * @lport: The local port that the exchange will be sent on 516 * @sp: The sequence to be sent 517 * @fp: The frame to be sent on the exchange 518 */ 519 static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, 520 struct fc_frame *fp) 521 { 522 struct fc_exch *ep; 523 int error; 524 ep = fc_seq_exch(sp); 525 spin_lock_bh(&ep->ex_lock); 526 error = fc_seq_send_locked(lport, sp, fp); 527 spin_unlock_bh(&ep->ex_lock); 528 return error; 529 } 530 531 /** 532 * fc_seq_alloc() - Allocate a sequence for a given exchange 533 * @ep: The exchange to allocate a new sequence for 534 * @seq_id: The sequence ID to be used 535 * 536 * We don't support multiple originated sequences on the same exchange. 537 * By implication, any previously originated sequence on this exchange 538 * is complete, and we reallocate the same sequence. 539 */ 540 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) 541 { 542 struct fc_seq *sp; 543 544 sp = &ep->seq; 545 sp->ssb_stat = 0; 546 sp->cnt = 0; 547 sp->id = seq_id; 548 return sp; 549 } 550 551 /** 552 * fc_seq_start_next_locked() - Allocate a new sequence on the same 553 * exchange as the supplied sequence 554 * @sp: The sequence/exchange to get a new sequence for 555 */ 556 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) 557 { 558 struct fc_exch *ep = fc_seq_exch(sp); 559 560 sp = fc_seq_alloc(ep, ep->seq_id++); 561 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", 562 ep->f_ctl, sp->id); 563 return sp; 564 } 565 566 /** 567 * fc_seq_start_next() - Lock the exchange and get a new sequence 568 * for a given sequence/exchange pair 569 * @sp: The sequence/exchange to get a new exchange for 570 */ 571 static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) 572 { 573 struct fc_exch *ep = fc_seq_exch(sp); 574 575 spin_lock_bh(&ep->ex_lock); 576 sp = fc_seq_start_next_locked(sp); 577 spin_unlock_bh(&ep->ex_lock); 578 579 return sp; 580 } 581 582 /* 583 * Set the response handler for the exchange associated with a sequence. 584 */ 585 static void fc_seq_set_resp(struct fc_seq *sp, 586 void (*resp)(struct fc_seq *, struct fc_frame *, 587 void *), 588 void *arg) 589 { 590 struct fc_exch *ep = fc_seq_exch(sp); 591 592 spin_lock_bh(&ep->ex_lock); 593 ep->resp = resp; 594 ep->arg = arg; 595 spin_unlock_bh(&ep->ex_lock); 596 } 597 598 /** 599 * fc_exch_abort_locked() - Abort an exchange 600 * @ep: The exchange to be aborted 601 * @timer_msec: The period of time to wait before aborting 602 * 603 * Locking notes: Called with exch lock held 604 * 605 * Return value: 0 on success else error code 606 */ 607 static int fc_exch_abort_locked(struct fc_exch *ep, 608 unsigned int timer_msec) 609 { 610 struct fc_seq *sp; 611 struct fc_frame *fp; 612 int error; 613 614 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 615 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) 616 return -ENXIO; 617 618 /* 619 * Send the abort on a new sequence if possible. 620 */ 621 sp = fc_seq_start_next_locked(&ep->seq); 622 if (!sp) 623 return -ENOMEM; 624 625 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 626 if (timer_msec) 627 fc_exch_timer_set_locked(ep, timer_msec); 628 629 /* 630 * If not logged into the fabric, don't send ABTS but leave 631 * sequence active until next timeout. 632 */ 633 if (!ep->sid) 634 return 0; 635 636 /* 637 * Send an abort for the sequence that timed out. 638 */ 639 fp = fc_frame_alloc(ep->lp, 0); 640 if (fp) { 641 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 642 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 643 error = fc_seq_send_locked(ep->lp, sp, fp); 644 } else 645 error = -ENOBUFS; 646 return error; 647 } 648 649 /** 650 * fc_seq_exch_abort() - Abort an exchange and sequence 651 * @req_sp: The sequence to be aborted 652 * @timer_msec: The period of time to wait before aborting 653 * 654 * Generally called because of a timeout or an abort from the upper layer. 655 * 656 * Return value: 0 on success else error code 657 */ 658 static int fc_seq_exch_abort(const struct fc_seq *req_sp, 659 unsigned int timer_msec) 660 { 661 struct fc_exch *ep; 662 int error; 663 664 ep = fc_seq_exch(req_sp); 665 spin_lock_bh(&ep->ex_lock); 666 error = fc_exch_abort_locked(ep, timer_msec); 667 spin_unlock_bh(&ep->ex_lock); 668 return error; 669 } 670 671 /** 672 * fc_exch_timeout() - Handle exchange timer expiration 673 * @work: The work_struct identifying the exchange that timed out 674 */ 675 static void fc_exch_timeout(struct work_struct *work) 676 { 677 struct fc_exch *ep = container_of(work, struct fc_exch, 678 timeout_work.work); 679 struct fc_seq *sp = &ep->seq; 680 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 681 void *arg; 682 u32 e_stat; 683 int rc = 1; 684 685 FC_EXCH_DBG(ep, "Exchange timed out\n"); 686 687 spin_lock_bh(&ep->ex_lock); 688 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 689 goto unlock; 690 691 e_stat = ep->esb_stat; 692 if (e_stat & ESB_ST_COMPLETE) { 693 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; 694 spin_unlock_bh(&ep->ex_lock); 695 if (e_stat & ESB_ST_REC_QUAL) 696 fc_exch_rrq(ep); 697 goto done; 698 } else { 699 resp = ep->resp; 700 arg = ep->arg; 701 ep->resp = NULL; 702 if (e_stat & ESB_ST_ABNORMAL) 703 rc = fc_exch_done_locked(ep); 704 spin_unlock_bh(&ep->ex_lock); 705 if (!rc) 706 fc_exch_delete(ep); 707 if (resp) 708 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 709 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 710 goto done; 711 } 712 unlock: 713 spin_unlock_bh(&ep->ex_lock); 714 done: 715 /* 716 * This release matches the hold taken when the timer was set. 717 */ 718 fc_exch_release(ep); 719 } 720 721 /** 722 * fc_exch_em_alloc() - Allocate an exchange from a specified EM. 723 * @lport: The local port that the exchange is for 724 * @mp: The exchange manager that will allocate the exchange 725 * 726 * Returns pointer to allocated fc_exch with exch lock held. 727 */ 728 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, 729 struct fc_exch_mgr *mp) 730 { 731 struct fc_exch *ep; 732 unsigned int cpu; 733 u16 index; 734 struct fc_exch_pool *pool; 735 736 /* allocate memory for exchange */ 737 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); 738 if (!ep) { 739 atomic_inc(&mp->stats.no_free_exch); 740 goto out; 741 } 742 memset(ep, 0, sizeof(*ep)); 743 744 cpu = get_cpu(); 745 pool = per_cpu_ptr(mp->pool, cpu); 746 spin_lock_bh(&pool->lock); 747 put_cpu(); 748 749 /* peek cache of free slot */ 750 if (pool->left != FC_XID_UNKNOWN) { 751 index = pool->left; 752 pool->left = FC_XID_UNKNOWN; 753 goto hit; 754 } 755 if (pool->right != FC_XID_UNKNOWN) { 756 index = pool->right; 757 pool->right = FC_XID_UNKNOWN; 758 goto hit; 759 } 760 761 index = pool->next_index; 762 /* allocate new exch from pool */ 763 while (fc_exch_ptr_get(pool, index)) { 764 index = index == mp->pool_max_index ? 0 : index + 1; 765 if (index == pool->next_index) 766 goto err; 767 } 768 pool->next_index = index == mp->pool_max_index ? 0 : index + 1; 769 hit: 770 fc_exch_hold(ep); /* hold for exch in mp */ 771 spin_lock_init(&ep->ex_lock); 772 /* 773 * Hold exch lock for caller to prevent fc_exch_reset() 774 * from releasing exch while fc_exch_alloc() caller is 775 * still working on exch. 776 */ 777 spin_lock_bh(&ep->ex_lock); 778 779 fc_exch_ptr_set(pool, index, ep); 780 list_add_tail(&ep->ex_list, &pool->ex_list); 781 fc_seq_alloc(ep, ep->seq_id++); 782 pool->total_exches++; 783 spin_unlock_bh(&pool->lock); 784 785 /* 786 * update exchange 787 */ 788 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; 789 ep->em = mp; 790 ep->pool = pool; 791 ep->lp = lport; 792 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 793 ep->rxid = FC_XID_UNKNOWN; 794 ep->class = mp->class; 795 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); 796 out: 797 return ep; 798 err: 799 spin_unlock_bh(&pool->lock); 800 atomic_inc(&mp->stats.no_free_exch_xid); 801 mempool_free(ep, mp->ep_pool); 802 return NULL; 803 } 804 805 /** 806 * fc_exch_alloc() - Allocate an exchange from an EM on a 807 * local port's list of EMs. 808 * @lport: The local port that will own the exchange 809 * @fp: The FC frame that the exchange will be for 810 * 811 * This function walks the list of exchange manager(EM) 812 * anchors to select an EM for a new exchange allocation. The 813 * EM is selected when a NULL match function pointer is encountered 814 * or when a call to a match function returns true. 815 */ 816 static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport, 817 struct fc_frame *fp) 818 { 819 struct fc_exch_mgr_anchor *ema; 820 821 list_for_each_entry(ema, &lport->ema_list, ema_list) 822 if (!ema->match || ema->match(fp)) 823 return fc_exch_em_alloc(lport, ema->mp); 824 return NULL; 825 } 826 827 /** 828 * fc_exch_find() - Lookup and hold an exchange 829 * @mp: The exchange manager to lookup the exchange from 830 * @xid: The XID of the exchange to look up 831 */ 832 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 833 { 834 struct fc_exch_pool *pool; 835 struct fc_exch *ep = NULL; 836 837 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { 838 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 839 spin_lock_bh(&pool->lock); 840 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 841 if (ep && ep->xid == xid) 842 fc_exch_hold(ep); 843 spin_unlock_bh(&pool->lock); 844 } 845 return ep; 846 } 847 848 849 /** 850 * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and 851 * the memory allocated for the related objects may be freed. 852 * @sp: The sequence that has completed 853 */ 854 static void fc_exch_done(struct fc_seq *sp) 855 { 856 struct fc_exch *ep = fc_seq_exch(sp); 857 int rc; 858 859 spin_lock_bh(&ep->ex_lock); 860 rc = fc_exch_done_locked(ep); 861 spin_unlock_bh(&ep->ex_lock); 862 if (!rc) 863 fc_exch_delete(ep); 864 } 865 866 /** 867 * fc_exch_resp() - Allocate a new exchange for a response frame 868 * @lport: The local port that the exchange was for 869 * @mp: The exchange manager to allocate the exchange from 870 * @fp: The response frame 871 * 872 * Sets the responder ID in the frame header. 873 */ 874 static struct fc_exch *fc_exch_resp(struct fc_lport *lport, 875 struct fc_exch_mgr *mp, 876 struct fc_frame *fp) 877 { 878 struct fc_exch *ep; 879 struct fc_frame_header *fh; 880 881 ep = fc_exch_alloc(lport, fp); 882 if (ep) { 883 ep->class = fc_frame_class(fp); 884 885 /* 886 * Set EX_CTX indicating we're responding on this exchange. 887 */ 888 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */ 889 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */ 890 fh = fc_frame_header_get(fp); 891 ep->sid = ntoh24(fh->fh_d_id); 892 ep->did = ntoh24(fh->fh_s_id); 893 ep->oid = ep->did; 894 895 /* 896 * Allocated exchange has placed the XID in the 897 * originator field. Move it to the responder field, 898 * and set the originator XID from the frame. 899 */ 900 ep->rxid = ep->xid; 901 ep->oxid = ntohs(fh->fh_ox_id); 902 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT; 903 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0) 904 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 905 906 fc_exch_hold(ep); /* hold for caller */ 907 spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ 908 } 909 return ep; 910 } 911 912 /** 913 * fc_seq_lookup_recip() - Find a sequence where the other end 914 * originated the sequence 915 * @lport: The local port that the frame was sent to 916 * @mp: The Exchange Manager to lookup the exchange from 917 * @fp: The frame associated with the sequence we're looking for 918 * 919 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold 920 * on the ep that should be released by the caller. 921 */ 922 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, 923 struct fc_exch_mgr *mp, 924 struct fc_frame *fp) 925 { 926 struct fc_frame_header *fh = fc_frame_header_get(fp); 927 struct fc_exch *ep = NULL; 928 struct fc_seq *sp = NULL; 929 enum fc_pf_rjt_reason reject = FC_RJT_NONE; 930 u32 f_ctl; 931 u16 xid; 932 933 f_ctl = ntoh24(fh->fh_f_ctl); 934 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0); 935 936 /* 937 * Lookup or create the exchange if we will be creating the sequence. 938 */ 939 if (f_ctl & FC_FC_EX_CTX) { 940 xid = ntohs(fh->fh_ox_id); /* we originated exch */ 941 ep = fc_exch_find(mp, xid); 942 if (!ep) { 943 atomic_inc(&mp->stats.xid_not_found); 944 reject = FC_RJT_OX_ID; 945 goto out; 946 } 947 if (ep->rxid == FC_XID_UNKNOWN) 948 ep->rxid = ntohs(fh->fh_rx_id); 949 else if (ep->rxid != ntohs(fh->fh_rx_id)) { 950 reject = FC_RJT_OX_ID; 951 goto rel; 952 } 953 } else { 954 xid = ntohs(fh->fh_rx_id); /* we are the responder */ 955 956 /* 957 * Special case for MDS issuing an ELS TEST with a 958 * bad rxid of 0. 959 * XXX take this out once we do the proper reject. 960 */ 961 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ && 962 fc_frame_payload_op(fp) == ELS_TEST) { 963 fh->fh_rx_id = htons(FC_XID_UNKNOWN); 964 xid = FC_XID_UNKNOWN; 965 } 966 967 /* 968 * new sequence - find the exchange 969 */ 970 ep = fc_exch_find(mp, xid); 971 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { 972 if (ep) { 973 atomic_inc(&mp->stats.xid_busy); 974 reject = FC_RJT_RX_ID; 975 goto rel; 976 } 977 ep = fc_exch_resp(lport, mp, fp); 978 if (!ep) { 979 reject = FC_RJT_EXCH_EST; /* XXX */ 980 goto out; 981 } 982 xid = ep->xid; /* get our XID */ 983 } else if (!ep) { 984 atomic_inc(&mp->stats.xid_not_found); 985 reject = FC_RJT_RX_ID; /* XID not found */ 986 goto out; 987 } 988 } 989 990 /* 991 * At this point, we have the exchange held. 992 * Find or create the sequence. 993 */ 994 if (fc_sof_is_init(fr_sof(fp))) { 995 sp = &ep->seq; 996 sp->ssb_stat |= SSB_ST_RESP; 997 sp->id = fh->fh_seq_id; 998 } else { 999 sp = &ep->seq; 1000 if (sp->id != fh->fh_seq_id) { 1001 atomic_inc(&mp->stats.seq_not_found); 1002 if (f_ctl & FC_FC_END_SEQ) { 1003 /* 1004 * Update sequence_id based on incoming last 1005 * frame of sequence exchange. This is needed 1006 * for FC target where DDP has been used 1007 * on target where, stack is indicated only 1008 * about last frame's (payload _header) header. 1009 * Whereas "seq_id" which is part of 1010 * frame_header is allocated by initiator 1011 * which is totally different from "seq_id" 1012 * allocated when XFER_RDY was sent by target. 1013 * To avoid false -ve which results into not 1014 * sending RSP, hence write request on other 1015 * end never finishes. 1016 */ 1017 spin_lock_bh(&ep->ex_lock); 1018 sp->ssb_stat |= SSB_ST_RESP; 1019 sp->id = fh->fh_seq_id; 1020 spin_unlock_bh(&ep->ex_lock); 1021 } else { 1022 /* sequence/exch should exist */ 1023 reject = FC_RJT_SEQ_ID; 1024 goto rel; 1025 } 1026 } 1027 } 1028 WARN_ON(ep != fc_seq_exch(sp)); 1029 1030 if (f_ctl & FC_FC_SEQ_INIT) 1031 ep->esb_stat |= ESB_ST_SEQ_INIT; 1032 1033 fr_seq(fp) = sp; 1034 out: 1035 return reject; 1036 rel: 1037 fc_exch_done(&ep->seq); 1038 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */ 1039 return reject; 1040 } 1041 1042 /** 1043 * fc_seq_lookup_orig() - Find a sequence where this end 1044 * originated the sequence 1045 * @mp: The Exchange Manager to lookup the exchange from 1046 * @fp: The frame associated with the sequence we're looking for 1047 * 1048 * Does not hold the sequence for the caller. 1049 */ 1050 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, 1051 struct fc_frame *fp) 1052 { 1053 struct fc_frame_header *fh = fc_frame_header_get(fp); 1054 struct fc_exch *ep; 1055 struct fc_seq *sp = NULL; 1056 u32 f_ctl; 1057 u16 xid; 1058 1059 f_ctl = ntoh24(fh->fh_f_ctl); 1060 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX); 1061 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id); 1062 ep = fc_exch_find(mp, xid); 1063 if (!ep) 1064 return NULL; 1065 if (ep->seq.id == fh->fh_seq_id) { 1066 /* 1067 * Save the RX_ID if we didn't previously know it. 1068 */ 1069 sp = &ep->seq; 1070 if ((f_ctl & FC_FC_EX_CTX) != 0 && 1071 ep->rxid == FC_XID_UNKNOWN) { 1072 ep->rxid = ntohs(fh->fh_rx_id); 1073 } 1074 } 1075 fc_exch_release(ep); 1076 return sp; 1077 } 1078 1079 /** 1080 * fc_exch_set_addr() - Set the source and destination IDs for an exchange 1081 * @ep: The exchange to set the addresses for 1082 * @orig_id: The originator's ID 1083 * @resp_id: The responder's ID 1084 * 1085 * Note this must be done before the first sequence of the exchange is sent. 1086 */ 1087 static void fc_exch_set_addr(struct fc_exch *ep, 1088 u32 orig_id, u32 resp_id) 1089 { 1090 ep->oid = orig_id; 1091 if (ep->esb_stat & ESB_ST_RESP) { 1092 ep->sid = resp_id; 1093 ep->did = orig_id; 1094 } else { 1095 ep->sid = orig_id; 1096 ep->did = resp_id; 1097 } 1098 } 1099 1100 /** 1101 * fc_seq_els_rsp_send() - Send an ELS response using information from 1102 * the existing sequence/exchange. 1103 * @fp: The received frame 1104 * @els_cmd: The ELS command to be sent 1105 * @els_data: The ELS data to be sent 1106 * 1107 * The received frame is not freed. 1108 */ 1109 static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, 1110 struct fc_seq_els_data *els_data) 1111 { 1112 switch (els_cmd) { 1113 case ELS_LS_RJT: 1114 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan); 1115 break; 1116 case ELS_LS_ACC: 1117 fc_seq_ls_acc(fp); 1118 break; 1119 case ELS_RRQ: 1120 fc_exch_els_rrq(fp); 1121 break; 1122 case ELS_REC: 1123 fc_exch_els_rec(fp); 1124 break; 1125 default: 1126 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd); 1127 } 1128 } 1129 1130 /** 1131 * fc_seq_send_last() - Send a sequence that is the last in the exchange 1132 * @sp: The sequence that is to be sent 1133 * @fp: The frame that will be sent on the sequence 1134 * @rctl: The R_CTL information to be sent 1135 * @fh_type: The frame header type 1136 */ 1137 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, 1138 enum fc_rctl rctl, enum fc_fh_type fh_type) 1139 { 1140 u32 f_ctl; 1141 struct fc_exch *ep = fc_seq_exch(sp); 1142 1143 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1144 f_ctl |= ep->f_ctl; 1145 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); 1146 fc_seq_send_locked(ep->lp, sp, fp); 1147 } 1148 1149 /** 1150 * fc_seq_send_ack() - Send an acknowledgement that we've received a frame 1151 * @sp: The sequence to send the ACK on 1152 * @rx_fp: The received frame that is being acknoledged 1153 * 1154 * Send ACK_1 (or equiv.) indicating we received something. 1155 */ 1156 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) 1157 { 1158 struct fc_frame *fp; 1159 struct fc_frame_header *rx_fh; 1160 struct fc_frame_header *fh; 1161 struct fc_exch *ep = fc_seq_exch(sp); 1162 struct fc_lport *lport = ep->lp; 1163 unsigned int f_ctl; 1164 1165 /* 1166 * Don't send ACKs for class 3. 1167 */ 1168 if (fc_sof_needs_ack(fr_sof(rx_fp))) { 1169 fp = fc_frame_alloc(lport, 0); 1170 if (!fp) 1171 return; 1172 1173 fh = fc_frame_header_get(fp); 1174 fh->fh_r_ctl = FC_RCTL_ACK_1; 1175 fh->fh_type = FC_TYPE_BLS; 1176 1177 /* 1178 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). 1179 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. 1180 * Bits 9-8 are meaningful (retransmitted or unidirectional). 1181 * Last ACK uses bits 7-6 (continue sequence), 1182 * bits 5-4 are meaningful (what kind of ACK to use). 1183 */ 1184 rx_fh = fc_frame_header_get(rx_fp); 1185 f_ctl = ntoh24(rx_fh->fh_f_ctl); 1186 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | 1187 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ | 1188 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT | 1189 FC_FC_RETX_SEQ | FC_FC_UNI_TX; 1190 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; 1191 hton24(fh->fh_f_ctl, f_ctl); 1192 1193 fc_exch_setup_hdr(ep, fp, f_ctl); 1194 fh->fh_seq_id = rx_fh->fh_seq_id; 1195 fh->fh_seq_cnt = rx_fh->fh_seq_cnt; 1196 fh->fh_parm_offset = htonl(1); /* ack single frame */ 1197 1198 fr_sof(fp) = fr_sof(rx_fp); 1199 if (f_ctl & FC_FC_END_SEQ) 1200 fr_eof(fp) = FC_EOF_T; 1201 else 1202 fr_eof(fp) = FC_EOF_N; 1203 1204 lport->tt.frame_send(lport, fp); 1205 } 1206 } 1207 1208 /** 1209 * fc_exch_send_ba_rjt() - Send BLS Reject 1210 * @rx_fp: The frame being rejected 1211 * @reason: The reason the frame is being rejected 1212 * @explan: The explanation for the rejection 1213 * 1214 * This is for rejecting BA_ABTS only. 1215 */ 1216 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, 1217 enum fc_ba_rjt_reason reason, 1218 enum fc_ba_rjt_explan explan) 1219 { 1220 struct fc_frame *fp; 1221 struct fc_frame_header *rx_fh; 1222 struct fc_frame_header *fh; 1223 struct fc_ba_rjt *rp; 1224 struct fc_lport *lport; 1225 unsigned int f_ctl; 1226 1227 lport = fr_dev(rx_fp); 1228 fp = fc_frame_alloc(lport, sizeof(*rp)); 1229 if (!fp) 1230 return; 1231 fh = fc_frame_header_get(fp); 1232 rx_fh = fc_frame_header_get(rx_fp); 1233 1234 memset(fh, 0, sizeof(*fh) + sizeof(*rp)); 1235 1236 rp = fc_frame_payload_get(fp, sizeof(*rp)); 1237 rp->br_reason = reason; 1238 rp->br_explan = explan; 1239 1240 /* 1241 * seq_id, cs_ctl, df_ctl and param/offset are zero. 1242 */ 1243 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); 1244 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); 1245 fh->fh_ox_id = rx_fh->fh_ox_id; 1246 fh->fh_rx_id = rx_fh->fh_rx_id; 1247 fh->fh_seq_cnt = rx_fh->fh_seq_cnt; 1248 fh->fh_r_ctl = FC_RCTL_BA_RJT; 1249 fh->fh_type = FC_TYPE_BLS; 1250 1251 /* 1252 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22). 1253 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT. 1254 * Bits 9-8 are meaningful (retransmitted or unidirectional). 1255 * Last ACK uses bits 7-6 (continue sequence), 1256 * bits 5-4 are meaningful (what kind of ACK to use). 1257 * Always set LAST_SEQ, END_SEQ. 1258 */ 1259 f_ctl = ntoh24(rx_fh->fh_f_ctl); 1260 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX | 1261 FC_FC_END_CONN | FC_FC_SEQ_INIT | 1262 FC_FC_RETX_SEQ | FC_FC_UNI_TX; 1263 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; 1264 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 1265 f_ctl &= ~FC_FC_FIRST_SEQ; 1266 hton24(fh->fh_f_ctl, f_ctl); 1267 1268 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp)); 1269 fr_eof(fp) = FC_EOF_T; 1270 if (fc_sof_needs_ack(fr_sof(fp))) 1271 fr_eof(fp) = FC_EOF_N; 1272 1273 lport->tt.frame_send(lport, fp); 1274 } 1275 1276 /** 1277 * fc_exch_recv_abts() - Handle an incoming ABTS 1278 * @ep: The exchange the abort was on 1279 * @rx_fp: The ABTS frame 1280 * 1281 * This would be for target mode usually, but could be due to lost 1282 * FCP transfer ready, confirm or RRQ. We always handle this as an 1283 * exchange abort, ignoring the parameter. 1284 */ 1285 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) 1286 { 1287 struct fc_frame *fp; 1288 struct fc_ba_acc *ap; 1289 struct fc_frame_header *fh; 1290 struct fc_seq *sp; 1291 1292 if (!ep) 1293 goto reject; 1294 spin_lock_bh(&ep->ex_lock); 1295 if (ep->esb_stat & ESB_ST_COMPLETE) { 1296 spin_unlock_bh(&ep->ex_lock); 1297 goto reject; 1298 } 1299 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) 1300 fc_exch_hold(ep); /* hold for REC_QUAL */ 1301 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; 1302 fc_exch_timer_set_locked(ep, ep->r_a_tov); 1303 1304 fp = fc_frame_alloc(ep->lp, sizeof(*ap)); 1305 if (!fp) { 1306 spin_unlock_bh(&ep->ex_lock); 1307 goto free; 1308 } 1309 fh = fc_frame_header_get(fp); 1310 ap = fc_frame_payload_get(fp, sizeof(*ap)); 1311 memset(ap, 0, sizeof(*ap)); 1312 sp = &ep->seq; 1313 ap->ba_high_seq_cnt = htons(0xffff); 1314 if (sp->ssb_stat & SSB_ST_RESP) { 1315 ap->ba_seq_id = sp->id; 1316 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL; 1317 ap->ba_high_seq_cnt = fh->fh_seq_cnt; 1318 ap->ba_low_seq_cnt = htons(sp->cnt); 1319 } 1320 sp = fc_seq_start_next_locked(sp); 1321 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); 1322 spin_unlock_bh(&ep->ex_lock); 1323 fc_frame_free(rx_fp); 1324 return; 1325 1326 reject: 1327 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); 1328 free: 1329 fc_frame_free(rx_fp); 1330 } 1331 1332 /** 1333 * fc_seq_assign() - Assign exchange and sequence for incoming request 1334 * @lport: The local port that received the request 1335 * @fp: The request frame 1336 * 1337 * On success, the sequence pointer will be returned and also in fr_seq(@fp). 1338 * A reference will be held on the exchange/sequence for the caller, which 1339 * must call fc_seq_release(). 1340 */ 1341 static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) 1342 { 1343 struct fc_exch_mgr_anchor *ema; 1344 1345 WARN_ON(lport != fr_dev(fp)); 1346 WARN_ON(fr_seq(fp)); 1347 fr_seq(fp) = NULL; 1348 1349 list_for_each_entry(ema, &lport->ema_list, ema_list) 1350 if ((!ema->match || ema->match(fp)) && 1351 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE) 1352 break; 1353 return fr_seq(fp); 1354 } 1355 1356 /** 1357 * fc_seq_release() - Release the hold 1358 * @sp: The sequence. 1359 */ 1360 static void fc_seq_release(struct fc_seq *sp) 1361 { 1362 fc_exch_release(fc_seq_exch(sp)); 1363 } 1364 1365 /** 1366 * fc_exch_recv_req() - Handler for an incoming request 1367 * @lport: The local port that received the request 1368 * @mp: The EM that the exchange is on 1369 * @fp: The request frame 1370 * 1371 * This is used when the other end is originating the exchange 1372 * and the sequence. 1373 */ 1374 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, 1375 struct fc_frame *fp) 1376 { 1377 struct fc_frame_header *fh = fc_frame_header_get(fp); 1378 struct fc_seq *sp = NULL; 1379 struct fc_exch *ep = NULL; 1380 enum fc_pf_rjt_reason reject; 1381 1382 /* We can have the wrong fc_lport at this point with NPIV, which is a 1383 * problem now that we know a new exchange needs to be allocated 1384 */ 1385 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); 1386 if (!lport) { 1387 fc_frame_free(fp); 1388 return; 1389 } 1390 fr_dev(fp) = lport; 1391 1392 BUG_ON(fr_seq(fp)); /* XXX remove later */ 1393 1394 /* 1395 * If the RX_ID is 0xffff, don't allocate an exchange. 1396 * The upper-level protocol may request one later, if needed. 1397 */ 1398 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN)) 1399 return lport->tt.lport_recv(lport, fp); 1400 1401 reject = fc_seq_lookup_recip(lport, mp, fp); 1402 if (reject == FC_RJT_NONE) { 1403 sp = fr_seq(fp); /* sequence will be held */ 1404 ep = fc_seq_exch(sp); 1405 fc_seq_send_ack(sp, fp); 1406 ep->encaps = fr_encaps(fp); 1407 1408 /* 1409 * Call the receive function. 1410 * 1411 * The receive function may allocate a new sequence 1412 * over the old one, so we shouldn't change the 1413 * sequence after this. 1414 * 1415 * The frame will be freed by the receive function. 1416 * If new exch resp handler is valid then call that 1417 * first. 1418 */ 1419 if (ep->resp) 1420 ep->resp(sp, fp, ep->arg); 1421 else 1422 lport->tt.lport_recv(lport, fp); 1423 fc_exch_release(ep); /* release from lookup */ 1424 } else { 1425 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", 1426 reject); 1427 fc_frame_free(fp); 1428 } 1429 } 1430 1431 /** 1432 * fc_exch_recv_seq_resp() - Handler for an incoming response where the other 1433 * end is the originator of the sequence that is a 1434 * response to our initial exchange 1435 * @mp: The EM that the exchange is on 1436 * @fp: The response frame 1437 */ 1438 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1439 { 1440 struct fc_frame_header *fh = fc_frame_header_get(fp); 1441 struct fc_seq *sp; 1442 struct fc_exch *ep; 1443 enum fc_sof sof; 1444 u32 f_ctl; 1445 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1446 void *ex_resp_arg; 1447 int rc; 1448 1449 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); 1450 if (!ep) { 1451 atomic_inc(&mp->stats.xid_not_found); 1452 goto out; 1453 } 1454 if (ep->esb_stat & ESB_ST_COMPLETE) { 1455 atomic_inc(&mp->stats.xid_not_found); 1456 goto rel; 1457 } 1458 if (ep->rxid == FC_XID_UNKNOWN) 1459 ep->rxid = ntohs(fh->fh_rx_id); 1460 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1461 atomic_inc(&mp->stats.xid_not_found); 1462 goto rel; 1463 } 1464 if (ep->did != ntoh24(fh->fh_s_id) && 1465 ep->did != FC_FID_FLOGI) { 1466 atomic_inc(&mp->stats.xid_not_found); 1467 goto rel; 1468 } 1469 sof = fr_sof(fp); 1470 sp = &ep->seq; 1471 if (fc_sof_is_init(sof)) { 1472 sp->ssb_stat |= SSB_ST_RESP; 1473 sp->id = fh->fh_seq_id; 1474 } else if (sp->id != fh->fh_seq_id) { 1475 atomic_inc(&mp->stats.seq_not_found); 1476 goto rel; 1477 } 1478 1479 f_ctl = ntoh24(fh->fh_f_ctl); 1480 fr_seq(fp) = sp; 1481 if (f_ctl & FC_FC_SEQ_INIT) 1482 ep->esb_stat |= ESB_ST_SEQ_INIT; 1483 1484 if (fc_sof_needs_ack(sof)) 1485 fc_seq_send_ack(sp, fp); 1486 resp = ep->resp; 1487 ex_resp_arg = ep->arg; 1488 1489 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && 1490 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1491 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1492 spin_lock_bh(&ep->ex_lock); 1493 resp = ep->resp; 1494 rc = fc_exch_done_locked(ep); 1495 WARN_ON(fc_seq_exch(sp) != ep); 1496 spin_unlock_bh(&ep->ex_lock); 1497 if (!rc) 1498 fc_exch_delete(ep); 1499 } 1500 1501 /* 1502 * Call the receive function. 1503 * The sequence is held (has a refcnt) for us, 1504 * but not for the receive function. 1505 * 1506 * The receive function may allocate a new sequence 1507 * over the old one, so we shouldn't change the 1508 * sequence after this. 1509 * 1510 * The frame will be freed by the receive function. 1511 * If new exch resp handler is valid then call that 1512 * first. 1513 */ 1514 if (resp) 1515 resp(sp, fp, ex_resp_arg); 1516 else 1517 fc_frame_free(fp); 1518 fc_exch_release(ep); 1519 return; 1520 rel: 1521 fc_exch_release(ep); 1522 out: 1523 fc_frame_free(fp); 1524 } 1525 1526 /** 1527 * fc_exch_recv_resp() - Handler for a sequence where other end is 1528 * responding to our sequence 1529 * @mp: The EM that the exchange is on 1530 * @fp: The response frame 1531 */ 1532 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 1533 { 1534 struct fc_seq *sp; 1535 1536 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1537 1538 if (!sp) 1539 atomic_inc(&mp->stats.xid_not_found); 1540 else 1541 atomic_inc(&mp->stats.non_bls_resp); 1542 1543 fc_frame_free(fp); 1544 } 1545 1546 /** 1547 * fc_exch_abts_resp() - Handler for a response to an ABT 1548 * @ep: The exchange that the frame is on 1549 * @fp: The response frame 1550 * 1551 * This response would be to an ABTS cancelling an exchange or sequence. 1552 * The response can be either BA_ACC or BA_RJT 1553 */ 1554 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) 1555 { 1556 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); 1557 void *ex_resp_arg; 1558 struct fc_frame_header *fh; 1559 struct fc_ba_acc *ap; 1560 struct fc_seq *sp; 1561 u16 low; 1562 u16 high; 1563 int rc = 1, has_rec = 0; 1564 1565 fh = fc_frame_header_get(fp); 1566 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, 1567 fc_exch_rctl_name(fh->fh_r_ctl)); 1568 1569 if (cancel_delayed_work_sync(&ep->timeout_work)) { 1570 FC_EXCH_DBG(ep, "Exchange timer canceled\n"); 1571 fc_exch_release(ep); /* release from pending timer hold */ 1572 } 1573 1574 spin_lock_bh(&ep->ex_lock); 1575 switch (fh->fh_r_ctl) { 1576 case FC_RCTL_BA_ACC: 1577 ap = fc_frame_payload_get(fp, sizeof(*ap)); 1578 if (!ap) 1579 break; 1580 1581 /* 1582 * Decide whether to establish a Recovery Qualifier. 1583 * We do this if there is a non-empty SEQ_CNT range and 1584 * SEQ_ID is the same as the one we aborted. 1585 */ 1586 low = ntohs(ap->ba_low_seq_cnt); 1587 high = ntohs(ap->ba_high_seq_cnt); 1588 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 && 1589 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL || 1590 ap->ba_seq_id == ep->seq_id) && low != high) { 1591 ep->esb_stat |= ESB_ST_REC_QUAL; 1592 fc_exch_hold(ep); /* hold for recovery qualifier */ 1593 has_rec = 1; 1594 } 1595 break; 1596 case FC_RCTL_BA_RJT: 1597 break; 1598 default: 1599 break; 1600 } 1601 1602 resp = ep->resp; 1603 ex_resp_arg = ep->arg; 1604 1605 /* do we need to do some other checks here. Can we reuse more of 1606 * fc_exch_recv_seq_resp 1607 */ 1608 sp = &ep->seq; 1609 /* 1610 * do we want to check END_SEQ as well as LAST_SEQ here? 1611 */ 1612 if (ep->fh_type != FC_TYPE_FCP && 1613 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) 1614 rc = fc_exch_done_locked(ep); 1615 spin_unlock_bh(&ep->ex_lock); 1616 if (!rc) 1617 fc_exch_delete(ep); 1618 1619 if (resp) 1620 resp(sp, fp, ex_resp_arg); 1621 else 1622 fc_frame_free(fp); 1623 1624 if (has_rec) 1625 fc_exch_timer_set(ep, ep->r_a_tov); 1626 1627 } 1628 1629 /** 1630 * fc_exch_recv_bls() - Handler for a BLS sequence 1631 * @mp: The EM that the exchange is on 1632 * @fp: The request frame 1633 * 1634 * The BLS frame is always a sequence initiated by the remote side. 1635 * We may be either the originator or recipient of the exchange. 1636 */ 1637 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) 1638 { 1639 struct fc_frame_header *fh; 1640 struct fc_exch *ep; 1641 u32 f_ctl; 1642 1643 fh = fc_frame_header_get(fp); 1644 f_ctl = ntoh24(fh->fh_f_ctl); 1645 fr_seq(fp) = NULL; 1646 1647 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ? 1648 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id)); 1649 if (ep && (f_ctl & FC_FC_SEQ_INIT)) { 1650 spin_lock_bh(&ep->ex_lock); 1651 ep->esb_stat |= ESB_ST_SEQ_INIT; 1652 spin_unlock_bh(&ep->ex_lock); 1653 } 1654 if (f_ctl & FC_FC_SEQ_CTX) { 1655 /* 1656 * A response to a sequence we initiated. 1657 * This should only be ACKs for class 2 or F. 1658 */ 1659 switch (fh->fh_r_ctl) { 1660 case FC_RCTL_ACK_1: 1661 case FC_RCTL_ACK_0: 1662 break; 1663 default: 1664 if (ep) 1665 FC_EXCH_DBG(ep, "BLS rctl %x - %s received", 1666 fh->fh_r_ctl, 1667 fc_exch_rctl_name(fh->fh_r_ctl)); 1668 break; 1669 } 1670 fc_frame_free(fp); 1671 } else { 1672 switch (fh->fh_r_ctl) { 1673 case FC_RCTL_BA_RJT: 1674 case FC_RCTL_BA_ACC: 1675 if (ep) 1676 fc_exch_abts_resp(ep, fp); 1677 else 1678 fc_frame_free(fp); 1679 break; 1680 case FC_RCTL_BA_ABTS: 1681 fc_exch_recv_abts(ep, fp); 1682 break; 1683 default: /* ignore junk */ 1684 fc_frame_free(fp); 1685 break; 1686 } 1687 } 1688 if (ep) 1689 fc_exch_release(ep); /* release hold taken by fc_exch_find */ 1690 } 1691 1692 /** 1693 * fc_seq_ls_acc() - Accept sequence with LS_ACC 1694 * @rx_fp: The received frame, not freed here. 1695 * 1696 * If this fails due to allocation or transmit congestion, assume the 1697 * originator will repeat the sequence. 1698 */ 1699 static void fc_seq_ls_acc(struct fc_frame *rx_fp) 1700 { 1701 struct fc_lport *lport; 1702 struct fc_els_ls_acc *acc; 1703 struct fc_frame *fp; 1704 1705 lport = fr_dev(rx_fp); 1706 fp = fc_frame_alloc(lport, sizeof(*acc)); 1707 if (!fp) 1708 return; 1709 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1710 memset(acc, 0, sizeof(*acc)); 1711 acc->la_cmd = ELS_LS_ACC; 1712 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 1713 lport->tt.frame_send(lport, fp); 1714 } 1715 1716 /** 1717 * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT 1718 * @rx_fp: The received frame, not freed here. 1719 * @reason: The reason the sequence is being rejected 1720 * @explan: The explanation for the rejection 1721 * 1722 * If this fails due to allocation or transmit congestion, assume the 1723 * originator will repeat the sequence. 1724 */ 1725 static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, 1726 enum fc_els_rjt_explan explan) 1727 { 1728 struct fc_lport *lport; 1729 struct fc_els_ls_rjt *rjt; 1730 struct fc_frame *fp; 1731 1732 lport = fr_dev(rx_fp); 1733 fp = fc_frame_alloc(lport, sizeof(*rjt)); 1734 if (!fp) 1735 return; 1736 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1737 memset(rjt, 0, sizeof(*rjt)); 1738 rjt->er_cmd = ELS_LS_RJT; 1739 rjt->er_reason = reason; 1740 rjt->er_explan = explan; 1741 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 1742 lport->tt.frame_send(lport, fp); 1743 } 1744 1745 /** 1746 * fc_exch_reset() - Reset an exchange 1747 * @ep: The exchange to be reset 1748 */ 1749 static void fc_exch_reset(struct fc_exch *ep) 1750 { 1751 struct fc_seq *sp; 1752 void (*resp)(struct fc_seq *, struct fc_frame *, void *); 1753 void *arg; 1754 int rc = 1; 1755 1756 spin_lock_bh(&ep->ex_lock); 1757 fc_exch_abort_locked(ep, 0); 1758 ep->state |= FC_EX_RST_CLEANUP; 1759 fc_exch_timer_cancel(ep); 1760 resp = ep->resp; 1761 ep->resp = NULL; 1762 if (ep->esb_stat & ESB_ST_REC_QUAL) 1763 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ 1764 ep->esb_stat &= ~ESB_ST_REC_QUAL; 1765 arg = ep->arg; 1766 sp = &ep->seq; 1767 rc = fc_exch_done_locked(ep); 1768 spin_unlock_bh(&ep->ex_lock); 1769 if (!rc) 1770 fc_exch_delete(ep); 1771 1772 if (resp) 1773 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1774 } 1775 1776 /** 1777 * fc_exch_pool_reset() - Reset a per cpu exchange pool 1778 * @lport: The local port that the exchange pool is on 1779 * @pool: The exchange pool to be reset 1780 * @sid: The source ID 1781 * @did: The destination ID 1782 * 1783 * Resets a per cpu exches pool, releasing all of its sequences 1784 * and exchanges. If sid is non-zero then reset only exchanges 1785 * we sourced from the local port's FID. If did is non-zero then 1786 * only reset exchanges destined for the local port's FID. 1787 */ 1788 static void fc_exch_pool_reset(struct fc_lport *lport, 1789 struct fc_exch_pool *pool, 1790 u32 sid, u32 did) 1791 { 1792 struct fc_exch *ep; 1793 struct fc_exch *next; 1794 1795 spin_lock_bh(&pool->lock); 1796 restart: 1797 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { 1798 if ((lport == ep->lp) && 1799 (sid == 0 || sid == ep->sid) && 1800 (did == 0 || did == ep->did)) { 1801 fc_exch_hold(ep); 1802 spin_unlock_bh(&pool->lock); 1803 1804 fc_exch_reset(ep); 1805 1806 fc_exch_release(ep); 1807 spin_lock_bh(&pool->lock); 1808 1809 /* 1810 * must restart loop incase while lock 1811 * was down multiple eps were released. 1812 */ 1813 goto restart; 1814 } 1815 } 1816 pool->next_index = 0; 1817 pool->left = FC_XID_UNKNOWN; 1818 pool->right = FC_XID_UNKNOWN; 1819 spin_unlock_bh(&pool->lock); 1820 } 1821 1822 /** 1823 * fc_exch_mgr_reset() - Reset all EMs of a local port 1824 * @lport: The local port whose EMs are to be reset 1825 * @sid: The source ID 1826 * @did: The destination ID 1827 * 1828 * Reset all EMs associated with a given local port. Release all 1829 * sequences and exchanges. If sid is non-zero then reset only the 1830 * exchanges sent from the local port's FID. If did is non-zero then 1831 * reset only exchanges destined for the local port's FID. 1832 */ 1833 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) 1834 { 1835 struct fc_exch_mgr_anchor *ema; 1836 unsigned int cpu; 1837 1838 list_for_each_entry(ema, &lport->ema_list, ema_list) { 1839 for_each_possible_cpu(cpu) 1840 fc_exch_pool_reset(lport, 1841 per_cpu_ptr(ema->mp->pool, cpu), 1842 sid, did); 1843 } 1844 } 1845 EXPORT_SYMBOL(fc_exch_mgr_reset); 1846 1847 /** 1848 * fc_exch_lookup() - find an exchange 1849 * @lport: The local port 1850 * @xid: The exchange ID 1851 * 1852 * Returns exchange pointer with hold for caller, or NULL if not found. 1853 */ 1854 static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid) 1855 { 1856 struct fc_exch_mgr_anchor *ema; 1857 1858 list_for_each_entry(ema, &lport->ema_list, ema_list) 1859 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid) 1860 return fc_exch_find(ema->mp, xid); 1861 return NULL; 1862 } 1863 1864 /** 1865 * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests 1866 * @rfp: The REC frame, not freed here. 1867 * 1868 * Note that the requesting port may be different than the S_ID in the request. 1869 */ 1870 static void fc_exch_els_rec(struct fc_frame *rfp) 1871 { 1872 struct fc_lport *lport; 1873 struct fc_frame *fp; 1874 struct fc_exch *ep; 1875 struct fc_els_rec *rp; 1876 struct fc_els_rec_acc *acc; 1877 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; 1878 enum fc_els_rjt_explan explan; 1879 u32 sid; 1880 u16 rxid; 1881 u16 oxid; 1882 1883 lport = fr_dev(rfp); 1884 rp = fc_frame_payload_get(rfp, sizeof(*rp)); 1885 explan = ELS_EXPL_INV_LEN; 1886 if (!rp) 1887 goto reject; 1888 sid = ntoh24(rp->rec_s_id); 1889 rxid = ntohs(rp->rec_rx_id); 1890 oxid = ntohs(rp->rec_ox_id); 1891 1892 ep = fc_exch_lookup(lport, 1893 sid == fc_host_port_id(lport->host) ? oxid : rxid); 1894 explan = ELS_EXPL_OXID_RXID; 1895 if (!ep) 1896 goto reject; 1897 if (ep->oid != sid || oxid != ep->oxid) 1898 goto rel; 1899 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) 1900 goto rel; 1901 fp = fc_frame_alloc(lport, sizeof(*acc)); 1902 if (!fp) 1903 goto out; 1904 1905 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1906 memset(acc, 0, sizeof(*acc)); 1907 acc->reca_cmd = ELS_LS_ACC; 1908 acc->reca_ox_id = rp->rec_ox_id; 1909 memcpy(acc->reca_ofid, rp->rec_s_id, 3); 1910 acc->reca_rx_id = htons(ep->rxid); 1911 if (ep->sid == ep->oid) 1912 hton24(acc->reca_rfid, ep->did); 1913 else 1914 hton24(acc->reca_rfid, ep->sid); 1915 acc->reca_fc4value = htonl(ep->seq.rec_data); 1916 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | 1917 ESB_ST_SEQ_INIT | 1918 ESB_ST_COMPLETE)); 1919 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0); 1920 lport->tt.frame_send(lport, fp); 1921 out: 1922 fc_exch_release(ep); 1923 return; 1924 1925 rel: 1926 fc_exch_release(ep); 1927 reject: 1928 fc_seq_ls_rjt(rfp, reason, explan); 1929 } 1930 1931 /** 1932 * fc_exch_rrq_resp() - Handler for RRQ responses 1933 * @sp: The sequence that the RRQ is on 1934 * @fp: The RRQ frame 1935 * @arg: The exchange that the RRQ is on 1936 * 1937 * TODO: fix error handler. 1938 */ 1939 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) 1940 { 1941 struct fc_exch *aborted_ep = arg; 1942 unsigned int op; 1943 1944 if (IS_ERR(fp)) { 1945 int err = PTR_ERR(fp); 1946 1947 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) 1948 goto cleanup; 1949 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " 1950 "frame error %d\n", err); 1951 return; 1952 } 1953 1954 op = fc_frame_payload_op(fp); 1955 fc_frame_free(fp); 1956 1957 switch (op) { 1958 case ELS_LS_RJT: 1959 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); 1960 /* fall through */ 1961 case ELS_LS_ACC: 1962 goto cleanup; 1963 default: 1964 FC_EXCH_DBG(aborted_ep, "unexpected response op %x " 1965 "for RRQ", op); 1966 return; 1967 } 1968 1969 cleanup: 1970 fc_exch_done(&aborted_ep->seq); 1971 /* drop hold for rec qual */ 1972 fc_exch_release(aborted_ep); 1973 } 1974 1975 1976 /** 1977 * fc_exch_seq_send() - Send a frame using a new exchange and sequence 1978 * @lport: The local port to send the frame on 1979 * @fp: The frame to be sent 1980 * @resp: The response handler for this request 1981 * @destructor: The destructor for the exchange 1982 * @arg: The argument to be passed to the response handler 1983 * @timer_msec: The timeout period for the exchange 1984 * 1985 * The frame pointer with some of the header's fields must be 1986 * filled before calling this routine, those fields are: 1987 * 1988 * - routing control 1989 * - FC port did 1990 * - FC port sid 1991 * - FC header type 1992 * - frame control 1993 * - parameter or relative offset 1994 */ 1995 static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, 1996 struct fc_frame *fp, 1997 void (*resp)(struct fc_seq *, 1998 struct fc_frame *fp, 1999 void *arg), 2000 void (*destructor)(struct fc_seq *, 2001 void *), 2002 void *arg, u32 timer_msec) 2003 { 2004 struct fc_exch *ep; 2005 struct fc_seq *sp = NULL; 2006 struct fc_frame_header *fh; 2007 struct fc_fcp_pkt *fsp = NULL; 2008 int rc = 1; 2009 2010 ep = fc_exch_alloc(lport, fp); 2011 if (!ep) { 2012 fc_frame_free(fp); 2013 return NULL; 2014 } 2015 ep->esb_stat |= ESB_ST_SEQ_INIT; 2016 fh = fc_frame_header_get(fp); 2017 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); 2018 ep->resp = resp; 2019 ep->destructor = destructor; 2020 ep->arg = arg; 2021 ep->r_a_tov = FC_DEF_R_A_TOV; 2022 ep->lp = lport; 2023 sp = &ep->seq; 2024 2025 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ 2026 ep->f_ctl = ntoh24(fh->fh_f_ctl); 2027 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 2028 sp->cnt++; 2029 2030 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { 2031 fsp = fr_fsp(fp); 2032 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 2033 } 2034 2035 if (unlikely(lport->tt.frame_send(lport, fp))) 2036 goto err; 2037 2038 if (timer_msec) 2039 fc_exch_timer_set_locked(ep, timer_msec); 2040 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ 2041 2042 if (ep->f_ctl & FC_FC_SEQ_INIT) 2043 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 2044 spin_unlock_bh(&ep->ex_lock); 2045 return sp; 2046 err: 2047 if (fsp) 2048 fc_fcp_ddp_done(fsp); 2049 rc = fc_exch_done_locked(ep); 2050 spin_unlock_bh(&ep->ex_lock); 2051 if (!rc) 2052 fc_exch_delete(ep); 2053 return NULL; 2054 } 2055 2056 /** 2057 * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command 2058 * @ep: The exchange to send the RRQ on 2059 * 2060 * This tells the remote port to stop blocking the use of 2061 * the exchange and the seq_cnt range. 2062 */ 2063 static void fc_exch_rrq(struct fc_exch *ep) 2064 { 2065 struct fc_lport *lport; 2066 struct fc_els_rrq *rrq; 2067 struct fc_frame *fp; 2068 u32 did; 2069 2070 lport = ep->lp; 2071 2072 fp = fc_frame_alloc(lport, sizeof(*rrq)); 2073 if (!fp) 2074 goto retry; 2075 2076 rrq = fc_frame_payload_get(fp, sizeof(*rrq)); 2077 memset(rrq, 0, sizeof(*rrq)); 2078 rrq->rrq_cmd = ELS_RRQ; 2079 hton24(rrq->rrq_s_id, ep->sid); 2080 rrq->rrq_ox_id = htons(ep->oxid); 2081 rrq->rrq_rx_id = htons(ep->rxid); 2082 2083 did = ep->did; 2084 if (ep->esb_stat & ESB_ST_RESP) 2085 did = ep->sid; 2086 2087 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, 2088 lport->port_id, FC_TYPE_ELS, 2089 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 2090 2091 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, 2092 lport->e_d_tov)) 2093 return; 2094 2095 retry: 2096 spin_lock_bh(&ep->ex_lock); 2097 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { 2098 spin_unlock_bh(&ep->ex_lock); 2099 /* drop hold for rec qual */ 2100 fc_exch_release(ep); 2101 return; 2102 } 2103 ep->esb_stat |= ESB_ST_REC_QUAL; 2104 fc_exch_timer_set_locked(ep, ep->r_a_tov); 2105 spin_unlock_bh(&ep->ex_lock); 2106 } 2107 2108 /** 2109 * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests 2110 * @fp: The RRQ frame, not freed here. 2111 */ 2112 static void fc_exch_els_rrq(struct fc_frame *fp) 2113 { 2114 struct fc_lport *lport; 2115 struct fc_exch *ep = NULL; /* request or subject exchange */ 2116 struct fc_els_rrq *rp; 2117 u32 sid; 2118 u16 xid; 2119 enum fc_els_rjt_explan explan; 2120 2121 lport = fr_dev(fp); 2122 rp = fc_frame_payload_get(fp, sizeof(*rp)); 2123 explan = ELS_EXPL_INV_LEN; 2124 if (!rp) 2125 goto reject; 2126 2127 /* 2128 * lookup subject exchange. 2129 */ 2130 sid = ntoh24(rp->rrq_s_id); /* subject source */ 2131 xid = fc_host_port_id(lport->host) == sid ? 2132 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); 2133 ep = fc_exch_lookup(lport, xid); 2134 explan = ELS_EXPL_OXID_RXID; 2135 if (!ep) 2136 goto reject; 2137 spin_lock_bh(&ep->ex_lock); 2138 if (ep->oxid != ntohs(rp->rrq_ox_id)) 2139 goto unlock_reject; 2140 if (ep->rxid != ntohs(rp->rrq_rx_id) && 2141 ep->rxid != FC_XID_UNKNOWN) 2142 goto unlock_reject; 2143 explan = ELS_EXPL_SID; 2144 if (ep->sid != sid) 2145 goto unlock_reject; 2146 2147 /* 2148 * Clear Recovery Qualifier state, and cancel timer if complete. 2149 */ 2150 if (ep->esb_stat & ESB_ST_REC_QUAL) { 2151 ep->esb_stat &= ~ESB_ST_REC_QUAL; 2152 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ 2153 } 2154 if (ep->esb_stat & ESB_ST_COMPLETE) 2155 fc_exch_timer_cancel(ep); 2156 2157 spin_unlock_bh(&ep->ex_lock); 2158 2159 /* 2160 * Send LS_ACC. 2161 */ 2162 fc_seq_ls_acc(fp); 2163 goto out; 2164 2165 unlock_reject: 2166 spin_unlock_bh(&ep->ex_lock); 2167 reject: 2168 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan); 2169 out: 2170 if (ep) 2171 fc_exch_release(ep); /* drop hold from fc_exch_find */ 2172 } 2173 2174 /** 2175 * fc_exch_update_stats() - update exches stats to lport 2176 * @lport: The local port to update exchange manager stats 2177 */ 2178 void fc_exch_update_stats(struct fc_lport *lport) 2179 { 2180 struct fc_host_statistics *st; 2181 struct fc_exch_mgr_anchor *ema; 2182 struct fc_exch_mgr *mp; 2183 2184 st = &lport->host_stats; 2185 2186 list_for_each_entry(ema, &lport->ema_list, ema_list) { 2187 mp = ema->mp; 2188 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch); 2189 st->fc_no_free_exch_xid += 2190 atomic_read(&mp->stats.no_free_exch_xid); 2191 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found); 2192 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy); 2193 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found); 2194 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp); 2195 } 2196 } 2197 EXPORT_SYMBOL(fc_exch_update_stats); 2198 2199 /** 2200 * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs 2201 * @lport: The local port to add the exchange manager to 2202 * @mp: The exchange manager to be added to the local port 2203 * @match: The match routine that indicates when this EM should be used 2204 */ 2205 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, 2206 struct fc_exch_mgr *mp, 2207 bool (*match)(struct fc_frame *)) 2208 { 2209 struct fc_exch_mgr_anchor *ema; 2210 2211 ema = kmalloc(sizeof(*ema), GFP_ATOMIC); 2212 if (!ema) 2213 return ema; 2214 2215 ema->mp = mp; 2216 ema->match = match; 2217 /* add EM anchor to EM anchors list */ 2218 list_add_tail(&ema->ema_list, &lport->ema_list); 2219 kref_get(&mp->kref); 2220 return ema; 2221 } 2222 EXPORT_SYMBOL(fc_exch_mgr_add); 2223 2224 /** 2225 * fc_exch_mgr_destroy() - Destroy an exchange manager 2226 * @kref: The reference to the EM to be destroyed 2227 */ 2228 static void fc_exch_mgr_destroy(struct kref *kref) 2229 { 2230 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); 2231 2232 mempool_destroy(mp->ep_pool); 2233 free_percpu(mp->pool); 2234 kfree(mp); 2235 } 2236 2237 /** 2238 * fc_exch_mgr_del() - Delete an EM from a local port's list 2239 * @ema: The exchange manager anchor identifying the EM to be deleted 2240 */ 2241 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) 2242 { 2243 /* remove EM anchor from EM anchors list */ 2244 list_del(&ema->ema_list); 2245 kref_put(&ema->mp->kref, fc_exch_mgr_destroy); 2246 kfree(ema); 2247 } 2248 EXPORT_SYMBOL(fc_exch_mgr_del); 2249 2250 /** 2251 * fc_exch_mgr_list_clone() - Share all exchange manager objects 2252 * @src: Source lport to clone exchange managers from 2253 * @dst: New lport that takes references to all the exchange managers 2254 */ 2255 int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) 2256 { 2257 struct fc_exch_mgr_anchor *ema, *tmp; 2258 2259 list_for_each_entry(ema, &src->ema_list, ema_list) { 2260 if (!fc_exch_mgr_add(dst, ema->mp, ema->match)) 2261 goto err; 2262 } 2263 return 0; 2264 err: 2265 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list) 2266 fc_exch_mgr_del(ema); 2267 return -ENOMEM; 2268 } 2269 EXPORT_SYMBOL(fc_exch_mgr_list_clone); 2270 2271 /** 2272 * fc_exch_mgr_alloc() - Allocate an exchange manager 2273 * @lport: The local port that the new EM will be associated with 2274 * @class: The default FC class for new exchanges 2275 * @min_xid: The minimum XID for exchanges from the new EM 2276 * @max_xid: The maximum XID for exchanges from the new EM 2277 * @match: The match routine for the new EM 2278 */ 2279 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, 2280 enum fc_class class, 2281 u16 min_xid, u16 max_xid, 2282 bool (*match)(struct fc_frame *)) 2283 { 2284 struct fc_exch_mgr *mp; 2285 u16 pool_exch_range; 2286 size_t pool_size; 2287 unsigned int cpu; 2288 struct fc_exch_pool *pool; 2289 2290 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || 2291 (min_xid & fc_cpu_mask) != 0) { 2292 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", 2293 min_xid, max_xid); 2294 return NULL; 2295 } 2296 2297 /* 2298 * allocate memory for EM 2299 */ 2300 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC); 2301 if (!mp) 2302 return NULL; 2303 2304 mp->class = class; 2305 /* adjust em exch xid range for offload */ 2306 mp->min_xid = min_xid; 2307 2308 /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */ 2309 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) / 2310 sizeof(struct fc_exch *); 2311 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) { 2312 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) + 2313 min_xid - 1; 2314 } else { 2315 mp->max_xid = max_xid; 2316 pool_exch_range = (mp->max_xid - mp->min_xid + 1) / 2317 (fc_cpu_mask + 1); 2318 } 2319 2320 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); 2321 if (!mp->ep_pool) 2322 goto free_mp; 2323 2324 /* 2325 * Setup per cpu exch pool with entire exchange id range equally 2326 * divided across all cpus. The exch pointers array memory is 2327 * allocated for exch range per pool. 2328 */ 2329 mp->pool_max_index = pool_exch_range - 1; 2330 2331 /* 2332 * Allocate and initialize per cpu exch pool 2333 */ 2334 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); 2335 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); 2336 if (!mp->pool) 2337 goto free_mempool; 2338 for_each_possible_cpu(cpu) { 2339 pool = per_cpu_ptr(mp->pool, cpu); 2340 pool->next_index = 0; 2341 pool->left = FC_XID_UNKNOWN; 2342 pool->right = FC_XID_UNKNOWN; 2343 spin_lock_init(&pool->lock); 2344 INIT_LIST_HEAD(&pool->ex_list); 2345 } 2346 2347 kref_init(&mp->kref); 2348 if (!fc_exch_mgr_add(lport, mp, match)) { 2349 free_percpu(mp->pool); 2350 goto free_mempool; 2351 } 2352 2353 /* 2354 * Above kref_init() sets mp->kref to 1 and then 2355 * call to fc_exch_mgr_add incremented mp->kref again, 2356 * so adjust that extra increment. 2357 */ 2358 kref_put(&mp->kref, fc_exch_mgr_destroy); 2359 return mp; 2360 2361 free_mempool: 2362 mempool_destroy(mp->ep_pool); 2363 free_mp: 2364 kfree(mp); 2365 return NULL; 2366 } 2367 EXPORT_SYMBOL(fc_exch_mgr_alloc); 2368 2369 /** 2370 * fc_exch_mgr_free() - Free all exchange managers on a local port 2371 * @lport: The local port whose EMs are to be freed 2372 */ 2373 void fc_exch_mgr_free(struct fc_lport *lport) 2374 { 2375 struct fc_exch_mgr_anchor *ema, *next; 2376 2377 flush_workqueue(fc_exch_workqueue); 2378 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) 2379 fc_exch_mgr_del(ema); 2380 } 2381 EXPORT_SYMBOL(fc_exch_mgr_free); 2382 2383 /** 2384 * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending 2385 * upon 'xid'. 2386 * @f_ctl: f_ctl 2387 * @lport: The local port the frame was received on 2388 * @fh: The received frame header 2389 */ 2390 static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl, 2391 struct fc_lport *lport, 2392 struct fc_frame_header *fh) 2393 { 2394 struct fc_exch_mgr_anchor *ema; 2395 u16 xid; 2396 2397 if (f_ctl & FC_FC_EX_CTX) 2398 xid = ntohs(fh->fh_ox_id); 2399 else { 2400 xid = ntohs(fh->fh_rx_id); 2401 if (xid == FC_XID_UNKNOWN) 2402 return list_entry(lport->ema_list.prev, 2403 typeof(*ema), ema_list); 2404 } 2405 2406 list_for_each_entry(ema, &lport->ema_list, ema_list) { 2407 if ((xid >= ema->mp->min_xid) && 2408 (xid <= ema->mp->max_xid)) 2409 return ema; 2410 } 2411 return NULL; 2412 } 2413 /** 2414 * fc_exch_recv() - Handler for received frames 2415 * @lport: The local port the frame was received on 2416 * @fp: The received frame 2417 */ 2418 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) 2419 { 2420 struct fc_frame_header *fh = fc_frame_header_get(fp); 2421 struct fc_exch_mgr_anchor *ema; 2422 u32 f_ctl; 2423 2424 /* lport lock ? */ 2425 if (!lport || lport->state == LPORT_ST_DISABLED) { 2426 FC_LPORT_DBG(lport, "Receiving frames for an lport that " 2427 "has not been initialized correctly\n"); 2428 fc_frame_free(fp); 2429 return; 2430 } 2431 2432 f_ctl = ntoh24(fh->fh_f_ctl); 2433 ema = fc_find_ema(f_ctl, lport, fh); 2434 if (!ema) { 2435 FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor," 2436 "fc_ctl <0x%x>, xid <0x%x>\n", 2437 f_ctl, 2438 (f_ctl & FC_FC_EX_CTX) ? 2439 ntohs(fh->fh_ox_id) : 2440 ntohs(fh->fh_rx_id)); 2441 fc_frame_free(fp); 2442 return; 2443 } 2444 2445 /* 2446 * If frame is marked invalid, just drop it. 2447 */ 2448 switch (fr_eof(fp)) { 2449 case FC_EOF_T: 2450 if (f_ctl & FC_FC_END_SEQ) 2451 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl)); 2452 /* fall through */ 2453 case FC_EOF_N: 2454 if (fh->fh_type == FC_TYPE_BLS) 2455 fc_exch_recv_bls(ema->mp, fp); 2456 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == 2457 FC_FC_EX_CTX) 2458 fc_exch_recv_seq_resp(ema->mp, fp); 2459 else if (f_ctl & FC_FC_SEQ_CTX) 2460 fc_exch_recv_resp(ema->mp, fp); 2461 else /* no EX_CTX and no SEQ_CTX */ 2462 fc_exch_recv_req(lport, ema->mp, fp); 2463 break; 2464 default: 2465 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)", 2466 fr_eof(fp)); 2467 fc_frame_free(fp); 2468 } 2469 } 2470 EXPORT_SYMBOL(fc_exch_recv); 2471 2472 /** 2473 * fc_exch_init() - Initialize the exchange layer for a local port 2474 * @lport: The local port to initialize the exchange layer for 2475 */ 2476 int fc_exch_init(struct fc_lport *lport) 2477 { 2478 if (!lport->tt.seq_start_next) 2479 lport->tt.seq_start_next = fc_seq_start_next; 2480 2481 if (!lport->tt.seq_set_resp) 2482 lport->tt.seq_set_resp = fc_seq_set_resp; 2483 2484 if (!lport->tt.exch_seq_send) 2485 lport->tt.exch_seq_send = fc_exch_seq_send; 2486 2487 if (!lport->tt.seq_send) 2488 lport->tt.seq_send = fc_seq_send; 2489 2490 if (!lport->tt.seq_els_rsp_send) 2491 lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send; 2492 2493 if (!lport->tt.exch_done) 2494 lport->tt.exch_done = fc_exch_done; 2495 2496 if (!lport->tt.exch_mgr_reset) 2497 lport->tt.exch_mgr_reset = fc_exch_mgr_reset; 2498 2499 if (!lport->tt.seq_exch_abort) 2500 lport->tt.seq_exch_abort = fc_seq_exch_abort; 2501 2502 if (!lport->tt.seq_assign) 2503 lport->tt.seq_assign = fc_seq_assign; 2504 2505 if (!lport->tt.seq_release) 2506 lport->tt.seq_release = fc_seq_release; 2507 2508 return 0; 2509 } 2510 EXPORT_SYMBOL(fc_exch_init); 2511 2512 /** 2513 * fc_setup_exch_mgr() - Setup an exchange manager 2514 */ 2515 int fc_setup_exch_mgr(void) 2516 { 2517 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), 2518 0, SLAB_HWCACHE_ALIGN, NULL); 2519 if (!fc_em_cachep) 2520 return -ENOMEM; 2521 2522 /* 2523 * Initialize fc_cpu_mask and fc_cpu_order. The 2524 * fc_cpu_mask is set for nr_cpu_ids rounded up 2525 * to order of 2's * power and order is stored 2526 * in fc_cpu_order as this is later required in 2527 * mapping between an exch id and exch array index 2528 * in per cpu exch pool. 2529 * 2530 * This round up is required to align fc_cpu_mask 2531 * to exchange id's lower bits such that all incoming 2532 * frames of an exchange gets delivered to the same 2533 * cpu on which exchange originated by simple bitwise 2534 * AND operation between fc_cpu_mask and exchange id. 2535 */ 2536 fc_cpu_mask = 1; 2537 fc_cpu_order = 0; 2538 while (fc_cpu_mask < nr_cpu_ids) { 2539 fc_cpu_mask <<= 1; 2540 fc_cpu_order++; 2541 } 2542 fc_cpu_mask--; 2543 2544 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); 2545 if (!fc_exch_workqueue) 2546 goto err; 2547 return 0; 2548 err: 2549 kmem_cache_destroy(fc_em_cachep); 2550 return -ENOMEM; 2551 } 2552 2553 /** 2554 * fc_destroy_exch_mgr() - Destroy an exchange manager 2555 */ 2556 void fc_destroy_exch_mgr(void) 2557 { 2558 destroy_workqueue(fc_exch_workqueue); 2559 kmem_cache_destroy(fc_em_cachep); 2560 } 2561