1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler becuase no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/slab.h> 92 #include <asm/unaligned.h> 93 94 #include <scsi/fc/fc_gs.h> 95 96 #include <scsi/libfc.h> 97 #include <scsi/fc_encode.h> 98 #include <linux/scatterlist.h> 99 100 #include "fc_libfc.h" 101 102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 103 #define FC_LOCAL_PTP_FID_LO 0x010101 104 #define FC_LOCAL_PTP_FID_HI 0x010102 105 106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 107 108 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 109 110 static void fc_lport_enter_reset(struct fc_lport *); 111 static void fc_lport_enter_flogi(struct fc_lport *); 112 static void fc_lport_enter_dns(struct fc_lport *); 113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 114 static void fc_lport_enter_scr(struct fc_lport *); 115 static void fc_lport_enter_ready(struct fc_lport *); 116 static void fc_lport_enter_logo(struct fc_lport *); 117 118 static const char *fc_lport_state_names[] = { 119 [LPORT_ST_DISABLED] = "disabled", 120 [LPORT_ST_FLOGI] = "FLOGI", 121 [LPORT_ST_DNS] = "dNS", 122 [LPORT_ST_RNN_ID] = "RNN_ID", 123 [LPORT_ST_RSNN_NN] = "RSNN_NN", 124 [LPORT_ST_RSPN_ID] = "RSPN_ID", 125 [LPORT_ST_RFT_ID] = "RFT_ID", 126 [LPORT_ST_RFF_ID] = "RFF_ID", 127 [LPORT_ST_SCR] = "SCR", 128 [LPORT_ST_READY] = "Ready", 129 [LPORT_ST_LOGO] = "LOGO", 130 [LPORT_ST_RESET] = "reset", 131 }; 132 133 /** 134 * struct fc_bsg_info - FC Passthrough managemet structure 135 * @job: The passthrough job 136 * @lport: The local port to pass through a command 137 * @rsp_code: The expected response code 138 * @sg: job->reply_payload.sg_list 139 * @nents: job->reply_payload.sg_cnt 140 * @offset: The offset into the response data 141 */ 142 struct fc_bsg_info { 143 struct fc_bsg_job *job; 144 struct fc_lport *lport; 145 u16 rsp_code; 146 struct scatterlist *sg; 147 u32 nents; 148 size_t offset; 149 }; 150 151 /** 152 * fc_frame_drop() - Dummy frame handler 153 * @lport: The local port the frame was received on 154 * @fp: The received frame 155 */ 156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 157 { 158 fc_frame_free(fp); 159 return 0; 160 } 161 162 /** 163 * fc_lport_rport_callback() - Event handler for rport events 164 * @lport: The lport which is receiving the event 165 * @rdata: private remote port data 166 * @event: The event that occured 167 * 168 * Locking Note: The rport lock should not be held when calling 169 * this function. 170 */ 171 static void fc_lport_rport_callback(struct fc_lport *lport, 172 struct fc_rport_priv *rdata, 173 enum fc_rport_event event) 174 { 175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 176 rdata->ids.port_id); 177 178 mutex_lock(&lport->lp_mutex); 179 switch (event) { 180 case RPORT_EV_READY: 181 if (lport->state == LPORT_ST_DNS) { 182 lport->dns_rdata = rdata; 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 184 } else { 185 FC_LPORT_DBG(lport, "Received an READY event " 186 "on port (%6.6x) for the directory " 187 "server, but the lport is not " 188 "in the DNS state, it's in the " 189 "%d state", rdata->ids.port_id, 190 lport->state); 191 lport->tt.rport_logoff(rdata); 192 } 193 break; 194 case RPORT_EV_LOGO: 195 case RPORT_EV_FAILED: 196 case RPORT_EV_STOP: 197 lport->dns_rdata = NULL; 198 break; 199 case RPORT_EV_NONE: 200 break; 201 } 202 mutex_unlock(&lport->lp_mutex); 203 } 204 205 /** 206 * fc_lport_state() - Return a string which represents the lport's state 207 * @lport: The lport whose state is to converted to a string 208 */ 209 static const char *fc_lport_state(struct fc_lport *lport) 210 { 211 const char *cp; 212 213 cp = fc_lport_state_names[lport->state]; 214 if (!cp) 215 cp = "unknown"; 216 return cp; 217 } 218 219 /** 220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 221 * @lport: The lport to attach the ptp rport to 222 * @remote_fid: The FID of the ptp rport 223 * @remote_wwpn: The WWPN of the ptp rport 224 * @remote_wwnn: The WWNN of the ptp rport 225 */ 226 static void fc_lport_ptp_setup(struct fc_lport *lport, 227 u32 remote_fid, u64 remote_wwpn, 228 u64 remote_wwnn) 229 { 230 mutex_lock(&lport->disc.disc_mutex); 231 if (lport->ptp_rdata) { 232 lport->tt.rport_logoff(lport->ptp_rdata); 233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 234 } 235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 236 kref_get(&lport->ptp_rdata->kref); 237 lport->ptp_rdata->ids.port_name = remote_wwpn; 238 lport->ptp_rdata->ids.node_name = remote_wwnn; 239 mutex_unlock(&lport->disc.disc_mutex); 240 241 lport->tt.rport_login(lport->ptp_rdata); 242 243 fc_lport_enter_ready(lport); 244 } 245 246 /** 247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 248 * @shost: The SCSI host whose port state is to be determined 249 */ 250 void fc_get_host_port_state(struct Scsi_Host *shost) 251 { 252 struct fc_lport *lport = shost_priv(shost); 253 254 mutex_lock(&lport->lp_mutex); 255 if (!lport->link_up) 256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 257 else 258 switch (lport->state) { 259 case LPORT_ST_READY: 260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 261 break; 262 default: 263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 264 } 265 mutex_unlock(&lport->lp_mutex); 266 } 267 EXPORT_SYMBOL(fc_get_host_port_state); 268 269 /** 270 * fc_get_host_speed() - Return the speed of the given Scsi_Host 271 * @shost: The SCSI host whose port speed is to be determined 272 */ 273 void fc_get_host_speed(struct Scsi_Host *shost) 274 { 275 struct fc_lport *lport = shost_priv(shost); 276 277 fc_host_speed(shost) = lport->link_speed; 278 } 279 EXPORT_SYMBOL(fc_get_host_speed); 280 281 /** 282 * fc_get_host_stats() - Return the Scsi_Host's statistics 283 * @shost: The SCSI host whose statistics are to be returned 284 */ 285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 286 { 287 struct fc_host_statistics *fcoe_stats; 288 struct fc_lport *lport = shost_priv(shost); 289 struct timespec v0, v1; 290 unsigned int cpu; 291 u64 fcp_in_bytes = 0; 292 u64 fcp_out_bytes = 0; 293 294 fcoe_stats = &lport->host_stats; 295 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 296 297 jiffies_to_timespec(jiffies, &v0); 298 jiffies_to_timespec(lport->boot_time, &v1); 299 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 300 301 for_each_possible_cpu(cpu) { 302 struct fcoe_dev_stats *stats; 303 304 stats = per_cpu_ptr(lport->dev_stats, cpu); 305 306 fcoe_stats->tx_frames += stats->TxFrames; 307 fcoe_stats->tx_words += stats->TxWords; 308 fcoe_stats->rx_frames += stats->RxFrames; 309 fcoe_stats->rx_words += stats->RxWords; 310 fcoe_stats->error_frames += stats->ErrorFrames; 311 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 312 fcoe_stats->fcp_input_requests += stats->InputRequests; 313 fcoe_stats->fcp_output_requests += stats->OutputRequests; 314 fcoe_stats->fcp_control_requests += stats->ControlRequests; 315 fcp_in_bytes += stats->InputBytes; 316 fcp_out_bytes += stats->OutputBytes; 317 fcoe_stats->link_failure_count += stats->LinkFailureCount; 318 } 319 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 320 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 321 fcoe_stats->lip_count = -1; 322 fcoe_stats->nos_count = -1; 323 fcoe_stats->loss_of_sync_count = -1; 324 fcoe_stats->loss_of_signal_count = -1; 325 fcoe_stats->prim_seq_protocol_err_count = -1; 326 fcoe_stats->dumped_frames = -1; 327 return fcoe_stats; 328 } 329 EXPORT_SYMBOL(fc_get_host_stats); 330 331 /** 332 * fc_lport_flogi_fill() - Fill in FLOGI command for request 333 * @lport: The local port the FLOGI is for 334 * @flogi: The FLOGI command 335 * @op: The opcode 336 */ 337 static void fc_lport_flogi_fill(struct fc_lport *lport, 338 struct fc_els_flogi *flogi, 339 unsigned int op) 340 { 341 struct fc_els_csp *sp; 342 struct fc_els_cssp *cp; 343 344 memset(flogi, 0, sizeof(*flogi)); 345 flogi->fl_cmd = (u8) op; 346 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 347 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 348 sp = &flogi->fl_csp; 349 sp->sp_hi_ver = 0x20; 350 sp->sp_lo_ver = 0x20; 351 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 352 sp->sp_bb_data = htons((u16) lport->mfs); 353 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 354 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 355 if (op != ELS_FLOGI) { 356 sp->sp_features = htons(FC_SP_FT_CIRO); 357 sp->sp_tot_seq = htons(255); /* seq. we accept */ 358 sp->sp_rel_off = htons(0x1f); 359 sp->sp_e_d_tov = htonl(lport->e_d_tov); 360 361 cp->cp_rdfs = htons((u16) lport->mfs); 362 cp->cp_con_seq = htons(255); 363 cp->cp_open_seq = 1; 364 } 365 } 366 367 /** 368 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 369 * @lport: The local port to add a new FC-4 type to 370 * @type: The new FC-4 type 371 */ 372 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 373 { 374 __be32 *mp; 375 376 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 377 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 378 } 379 380 /** 381 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 382 * @lport: Fibre Channel local port recieving the RLIR 383 * @fp: The RLIR request frame 384 * 385 * Locking Note: The lport lock is expected to be held before calling 386 * this function. 387 */ 388 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 389 { 390 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 391 fc_lport_state(lport)); 392 393 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 394 fc_frame_free(fp); 395 } 396 397 /** 398 * fc_lport_recv_echo_req() - Handle received ECHO request 399 * @lport: The local port recieving the ECHO 400 * @fp: ECHO request frame 401 * 402 * Locking Note: The lport lock is expected to be held before calling 403 * this function. 404 */ 405 static void fc_lport_recv_echo_req(struct fc_lport *lport, 406 struct fc_frame *in_fp) 407 { 408 struct fc_frame *fp; 409 unsigned int len; 410 void *pp; 411 void *dp; 412 413 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 414 fc_lport_state(lport)); 415 416 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 417 pp = fc_frame_payload_get(in_fp, len); 418 419 if (len < sizeof(__be32)) 420 len = sizeof(__be32); 421 422 fp = fc_frame_alloc(lport, len); 423 if (fp) { 424 dp = fc_frame_payload_get(fp, len); 425 memcpy(dp, pp, len); 426 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 427 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 428 lport->tt.frame_send(lport, fp); 429 } 430 fc_frame_free(in_fp); 431 } 432 433 /** 434 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 435 * @lport: The local port recieving the RNID 436 * @fp: The RNID request frame 437 * 438 * Locking Note: The lport lock is expected to be held before calling 439 * this function. 440 */ 441 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 442 struct fc_frame *in_fp) 443 { 444 struct fc_frame *fp; 445 struct fc_els_rnid *req; 446 struct { 447 struct fc_els_rnid_resp rnid; 448 struct fc_els_rnid_cid cid; 449 struct fc_els_rnid_gen gen; 450 } *rp; 451 struct fc_seq_els_data rjt_data; 452 u8 fmt; 453 size_t len; 454 455 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 456 fc_lport_state(lport)); 457 458 req = fc_frame_payload_get(in_fp, sizeof(*req)); 459 if (!req) { 460 rjt_data.reason = ELS_RJT_LOGIC; 461 rjt_data.explan = ELS_EXPL_NONE; 462 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 463 } else { 464 fmt = req->rnid_fmt; 465 len = sizeof(*rp); 466 if (fmt != ELS_RNIDF_GEN || 467 ntohl(lport->rnid_gen.rnid_atype) == 0) { 468 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 469 len -= sizeof(rp->gen); 470 } 471 fp = fc_frame_alloc(lport, len); 472 if (fp) { 473 rp = fc_frame_payload_get(fp, len); 474 memset(rp, 0, len); 475 rp->rnid.rnid_cmd = ELS_LS_ACC; 476 rp->rnid.rnid_fmt = fmt; 477 rp->rnid.rnid_cid_len = sizeof(rp->cid); 478 rp->cid.rnid_wwpn = htonll(lport->wwpn); 479 rp->cid.rnid_wwnn = htonll(lport->wwnn); 480 if (fmt == ELS_RNIDF_GEN) { 481 rp->rnid.rnid_sid_len = sizeof(rp->gen); 482 memcpy(&rp->gen, &lport->rnid_gen, 483 sizeof(rp->gen)); 484 } 485 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 486 lport->tt.frame_send(lport, fp); 487 } 488 } 489 fc_frame_free(in_fp); 490 } 491 492 /** 493 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 494 * @lport: The local port recieving the LOGO 495 * @fp: The LOGO request frame 496 * 497 * Locking Note: The lport lock is exected to be held before calling 498 * this function. 499 */ 500 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 501 { 502 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 503 fc_lport_enter_reset(lport); 504 fc_frame_free(fp); 505 } 506 507 /** 508 * fc_fabric_login() - Start the lport state machine 509 * @lport: The local port that should log into the fabric 510 * 511 * Locking Note: This function should not be called 512 * with the lport lock held. 513 */ 514 int fc_fabric_login(struct fc_lport *lport) 515 { 516 int rc = -1; 517 518 mutex_lock(&lport->lp_mutex); 519 if (lport->state == LPORT_ST_DISABLED || 520 lport->state == LPORT_ST_LOGO) { 521 fc_lport_state_enter(lport, LPORT_ST_RESET); 522 fc_lport_enter_reset(lport); 523 rc = 0; 524 } 525 mutex_unlock(&lport->lp_mutex); 526 527 return rc; 528 } 529 EXPORT_SYMBOL(fc_fabric_login); 530 531 /** 532 * __fc_linkup() - Handler for transport linkup events 533 * @lport: The lport whose link is up 534 * 535 * Locking: must be called with the lp_mutex held 536 */ 537 void __fc_linkup(struct fc_lport *lport) 538 { 539 if (!lport->link_up) { 540 lport->link_up = 1; 541 542 if (lport->state == LPORT_ST_RESET) 543 fc_lport_enter_flogi(lport); 544 } 545 } 546 547 /** 548 * fc_linkup() - Handler for transport linkup events 549 * @lport: The local port whose link is up 550 */ 551 void fc_linkup(struct fc_lport *lport) 552 { 553 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 554 lport->host->host_no, lport->port_id); 555 556 mutex_lock(&lport->lp_mutex); 557 __fc_linkup(lport); 558 mutex_unlock(&lport->lp_mutex); 559 } 560 EXPORT_SYMBOL(fc_linkup); 561 562 /** 563 * __fc_linkdown() - Handler for transport linkdown events 564 * @lport: The lport whose link is down 565 * 566 * Locking: must be called with the lp_mutex held 567 */ 568 void __fc_linkdown(struct fc_lport *lport) 569 { 570 if (lport->link_up) { 571 lport->link_up = 0; 572 fc_lport_enter_reset(lport); 573 lport->tt.fcp_cleanup(lport); 574 } 575 } 576 577 /** 578 * fc_linkdown() - Handler for transport linkdown events 579 * @lport: The local port whose link is down 580 */ 581 void fc_linkdown(struct fc_lport *lport) 582 { 583 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 584 lport->host->host_no, lport->port_id); 585 586 mutex_lock(&lport->lp_mutex); 587 __fc_linkdown(lport); 588 mutex_unlock(&lport->lp_mutex); 589 } 590 EXPORT_SYMBOL(fc_linkdown); 591 592 /** 593 * fc_fabric_logoff() - Logout of the fabric 594 * @lport: The local port to logoff the fabric 595 * 596 * Return value: 597 * 0 for success, -1 for failure 598 */ 599 int fc_fabric_logoff(struct fc_lport *lport) 600 { 601 lport->tt.disc_stop_final(lport); 602 mutex_lock(&lport->lp_mutex); 603 if (lport->dns_rdata) 604 lport->tt.rport_logoff(lport->dns_rdata); 605 mutex_unlock(&lport->lp_mutex); 606 lport->tt.rport_flush_queue(); 607 mutex_lock(&lport->lp_mutex); 608 fc_lport_enter_logo(lport); 609 mutex_unlock(&lport->lp_mutex); 610 cancel_delayed_work_sync(&lport->retry_work); 611 return 0; 612 } 613 EXPORT_SYMBOL(fc_fabric_logoff); 614 615 /** 616 * fc_lport_destroy() - Unregister a fc_lport 617 * @lport: The local port to unregister 618 * 619 * Note: 620 * exit routine for fc_lport instance 621 * clean-up all the allocated memory 622 * and free up other system resources. 623 * 624 */ 625 int fc_lport_destroy(struct fc_lport *lport) 626 { 627 mutex_lock(&lport->lp_mutex); 628 lport->state = LPORT_ST_DISABLED; 629 lport->link_up = 0; 630 lport->tt.frame_send = fc_frame_drop; 631 mutex_unlock(&lport->lp_mutex); 632 633 lport->tt.fcp_abort_io(lport); 634 lport->tt.disc_stop_final(lport); 635 lport->tt.exch_mgr_reset(lport, 0, 0); 636 return 0; 637 } 638 EXPORT_SYMBOL(fc_lport_destroy); 639 640 /** 641 * fc_set_mfs() - Set the maximum frame size for a local port 642 * @lport: The local port to set the MFS for 643 * @mfs: The new MFS 644 */ 645 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 646 { 647 unsigned int old_mfs; 648 int rc = -EINVAL; 649 650 mutex_lock(&lport->lp_mutex); 651 652 old_mfs = lport->mfs; 653 654 if (mfs >= FC_MIN_MAX_FRAME) { 655 mfs &= ~3; 656 if (mfs > FC_MAX_FRAME) 657 mfs = FC_MAX_FRAME; 658 mfs -= sizeof(struct fc_frame_header); 659 lport->mfs = mfs; 660 rc = 0; 661 } 662 663 if (!rc && mfs < old_mfs) 664 fc_lport_enter_reset(lport); 665 666 mutex_unlock(&lport->lp_mutex); 667 668 return rc; 669 } 670 EXPORT_SYMBOL(fc_set_mfs); 671 672 /** 673 * fc_lport_disc_callback() - Callback for discovery events 674 * @lport: The local port receiving the event 675 * @event: The discovery event 676 */ 677 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 678 { 679 switch (event) { 680 case DISC_EV_SUCCESS: 681 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 682 break; 683 case DISC_EV_FAILED: 684 printk(KERN_ERR "host%d: libfc: " 685 "Discovery failed for port (%6.6x)\n", 686 lport->host->host_no, lport->port_id); 687 mutex_lock(&lport->lp_mutex); 688 fc_lport_enter_reset(lport); 689 mutex_unlock(&lport->lp_mutex); 690 break; 691 case DISC_EV_NONE: 692 WARN_ON(1); 693 break; 694 } 695 } 696 697 /** 698 * fc_rport_enter_ready() - Enter the ready state and start discovery 699 * @lport: The local port that is ready 700 * 701 * Locking Note: The lport lock is expected to be held before calling 702 * this routine. 703 */ 704 static void fc_lport_enter_ready(struct fc_lport *lport) 705 { 706 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 707 fc_lport_state(lport)); 708 709 fc_lport_state_enter(lport, LPORT_ST_READY); 710 if (lport->vport) 711 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 712 fc_vports_linkchange(lport); 713 714 if (!lport->ptp_rdata) 715 lport->tt.disc_start(fc_lport_disc_callback, lport); 716 } 717 718 /** 719 * fc_lport_set_port_id() - set the local port Port ID 720 * @lport: The local port which will have its Port ID set. 721 * @port_id: The new port ID. 722 * @fp: The frame containing the incoming request, or NULL. 723 * 724 * Locking Note: The lport lock is expected to be held before calling 725 * this function. 726 */ 727 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 728 struct fc_frame *fp) 729 { 730 if (port_id) 731 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 732 lport->host->host_no, port_id); 733 734 lport->port_id = port_id; 735 736 /* Update the fc_host */ 737 fc_host_port_id(lport->host) = port_id; 738 739 if (lport->tt.lport_set_port_id) 740 lport->tt.lport_set_port_id(lport, port_id, fp); 741 } 742 743 /** 744 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 745 * @lport: The local port which will have its Port ID set. 746 * @port_id: The new port ID. 747 * 748 * Called by the lower-level driver when transport sets the local port_id. 749 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 750 * discovery to be skipped. 751 */ 752 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 753 { 754 mutex_lock(&lport->lp_mutex); 755 756 fc_lport_set_port_id(lport, port_id, NULL); 757 758 switch (lport->state) { 759 case LPORT_ST_RESET: 760 case LPORT_ST_FLOGI: 761 if (port_id) 762 fc_lport_enter_ready(lport); 763 break; 764 default: 765 break; 766 } 767 mutex_unlock(&lport->lp_mutex); 768 } 769 EXPORT_SYMBOL(fc_lport_set_local_id); 770 771 /** 772 * fc_lport_recv_flogi_req() - Receive a FLOGI request 773 * @lport: The local port that recieved the request 774 * @rx_fp: The FLOGI frame 775 * 776 * A received FLOGI request indicates a point-to-point connection. 777 * Accept it with the common service parameters indicating our N port. 778 * Set up to do a PLOGI if we have the higher-number WWPN. 779 * 780 * Locking Note: The lport lock is expected to be held before calling 781 * this function. 782 */ 783 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 784 struct fc_frame *rx_fp) 785 { 786 struct fc_frame *fp; 787 struct fc_frame_header *fh; 788 struct fc_els_flogi *flp; 789 struct fc_els_flogi *new_flp; 790 u64 remote_wwpn; 791 u32 remote_fid; 792 u32 local_fid; 793 794 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 795 fc_lport_state(lport)); 796 797 remote_fid = fc_frame_sid(rx_fp); 798 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 799 if (!flp) 800 goto out; 801 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 802 if (remote_wwpn == lport->wwpn) { 803 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 804 "with same WWPN %16.16llx\n", 805 lport->host->host_no, remote_wwpn); 806 goto out; 807 } 808 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 809 810 /* 811 * XXX what is the right thing to do for FIDs? 812 * The originator might expect our S_ID to be 0xfffffe. 813 * But if so, both of us could end up with the same FID. 814 */ 815 local_fid = FC_LOCAL_PTP_FID_LO; 816 if (remote_wwpn < lport->wwpn) { 817 local_fid = FC_LOCAL_PTP_FID_HI; 818 if (!remote_fid || remote_fid == local_fid) 819 remote_fid = FC_LOCAL_PTP_FID_LO; 820 } else if (!remote_fid) { 821 remote_fid = FC_LOCAL_PTP_FID_HI; 822 } 823 824 fc_lport_set_port_id(lport, local_fid, rx_fp); 825 826 fp = fc_frame_alloc(lport, sizeof(*flp)); 827 if (fp) { 828 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 829 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 830 new_flp->fl_cmd = (u8) ELS_LS_ACC; 831 832 /* 833 * Send the response. If this fails, the originator should 834 * repeat the sequence. 835 */ 836 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 837 fh = fc_frame_header_get(fp); 838 hton24(fh->fh_s_id, local_fid); 839 hton24(fh->fh_d_id, remote_fid); 840 lport->tt.frame_send(lport, fp); 841 842 } else { 843 fc_lport_error(lport, fp); 844 } 845 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 846 get_unaligned_be64(&flp->fl_wwnn)); 847 out: 848 fc_frame_free(rx_fp); 849 } 850 851 /** 852 * fc_lport_recv_req() - The generic lport request handler 853 * @lport: The local port that received the request 854 * @fp: The request frame 855 * 856 * This function will see if the lport handles the request or 857 * if an rport should handle the request. 858 * 859 * Locking Note: This function should not be called with the lport 860 * lock held becuase it will grab the lock. 861 */ 862 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) 863 { 864 struct fc_frame_header *fh = fc_frame_header_get(fp); 865 void (*recv)(struct fc_lport *, struct fc_frame *); 866 867 mutex_lock(&lport->lp_mutex); 868 869 /* 870 * Handle special ELS cases like FLOGI, LOGO, and 871 * RSCN here. These don't require a session. 872 * Even if we had a session, it might not be ready. 873 */ 874 if (!lport->link_up) 875 fc_frame_free(fp); 876 else if (fh->fh_type == FC_TYPE_ELS && 877 fh->fh_r_ctl == FC_RCTL_ELS_REQ) { 878 /* 879 * Check opcode. 880 */ 881 recv = lport->tt.rport_recv_req; 882 switch (fc_frame_payload_op(fp)) { 883 case ELS_FLOGI: 884 if (!lport->point_to_multipoint) 885 recv = fc_lport_recv_flogi_req; 886 break; 887 case ELS_LOGO: 888 if (fc_frame_sid(fp) == FC_FID_FLOGI) 889 recv = fc_lport_recv_logo_req; 890 break; 891 case ELS_RSCN: 892 recv = lport->tt.disc_recv_req; 893 break; 894 case ELS_ECHO: 895 recv = fc_lport_recv_echo_req; 896 break; 897 case ELS_RLIR: 898 recv = fc_lport_recv_rlir_req; 899 break; 900 case ELS_RNID: 901 recv = fc_lport_recv_rnid_req; 902 break; 903 } 904 905 recv(lport, fp); 906 } else { 907 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", 908 fr_eof(fp)); 909 fc_frame_free(fp); 910 } 911 mutex_unlock(&lport->lp_mutex); 912 } 913 914 /** 915 * fc_lport_reset() - Reset a local port 916 * @lport: The local port which should be reset 917 * 918 * Locking Note: This functions should not be called with the 919 * lport lock held. 920 */ 921 int fc_lport_reset(struct fc_lport *lport) 922 { 923 cancel_delayed_work_sync(&lport->retry_work); 924 mutex_lock(&lport->lp_mutex); 925 fc_lport_enter_reset(lport); 926 mutex_unlock(&lport->lp_mutex); 927 return 0; 928 } 929 EXPORT_SYMBOL(fc_lport_reset); 930 931 /** 932 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 933 * @lport: The local port to be reset 934 * 935 * Locking Note: The lport lock is expected to be held before calling 936 * this routine. 937 */ 938 static void fc_lport_reset_locked(struct fc_lport *lport) 939 { 940 if (lport->dns_rdata) 941 lport->tt.rport_logoff(lport->dns_rdata); 942 943 if (lport->ptp_rdata) { 944 lport->tt.rport_logoff(lport->ptp_rdata); 945 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 946 lport->ptp_rdata = NULL; 947 } 948 949 lport->tt.disc_stop(lport); 950 951 lport->tt.exch_mgr_reset(lport, 0, 0); 952 fc_host_fabric_name(lport->host) = 0; 953 954 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 955 fc_lport_set_port_id(lport, 0, NULL); 956 } 957 958 /** 959 * fc_lport_enter_reset() - Reset the local port 960 * @lport: The local port to be reset 961 * 962 * Locking Note: The lport lock is expected to be held before calling 963 * this routine. 964 */ 965 static void fc_lport_enter_reset(struct fc_lport *lport) 966 { 967 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 968 fc_lport_state(lport)); 969 970 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 971 return; 972 973 if (lport->vport) { 974 if (lport->link_up) 975 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 976 else 977 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 978 } 979 fc_lport_state_enter(lport, LPORT_ST_RESET); 980 fc_vports_linkchange(lport); 981 fc_lport_reset_locked(lport); 982 if (lport->link_up) 983 fc_lport_enter_flogi(lport); 984 } 985 986 /** 987 * fc_lport_enter_disabled() - Disable the local port 988 * @lport: The local port to be reset 989 * 990 * Locking Note: The lport lock is expected to be held before calling 991 * this routine. 992 */ 993 static void fc_lport_enter_disabled(struct fc_lport *lport) 994 { 995 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 996 fc_lport_state(lport)); 997 998 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 999 fc_vports_linkchange(lport); 1000 fc_lport_reset_locked(lport); 1001 } 1002 1003 /** 1004 * fc_lport_error() - Handler for any errors 1005 * @lport: The local port that the error was on 1006 * @fp: The error code encoded in a frame pointer 1007 * 1008 * If the error was caused by a resource allocation failure 1009 * then wait for half a second and retry, otherwise retry 1010 * after the e_d_tov time. 1011 */ 1012 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1013 { 1014 unsigned long delay = 0; 1015 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1016 PTR_ERR(fp), fc_lport_state(lport), 1017 lport->retry_count); 1018 1019 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1020 return; 1021 1022 /* 1023 * Memory allocation failure, or the exchange timed out 1024 * or we received LS_RJT. 1025 * Retry after delay 1026 */ 1027 if (lport->retry_count < lport->max_retry_count) { 1028 lport->retry_count++; 1029 if (!fp) 1030 delay = msecs_to_jiffies(500); 1031 else 1032 delay = msecs_to_jiffies(lport->e_d_tov); 1033 1034 schedule_delayed_work(&lport->retry_work, delay); 1035 } else 1036 fc_lport_enter_reset(lport); 1037 } 1038 1039 /** 1040 * fc_lport_ns_resp() - Handle response to a name server 1041 * registration exchange 1042 * @sp: current sequence in exchange 1043 * @fp: response frame 1044 * @lp_arg: Fibre Channel host port instance 1045 * 1046 * Locking Note: This function will be called without the lport lock 1047 * held, but it will lock, call an _enter_* function or fc_lport_error() 1048 * and then unlock the lport. 1049 */ 1050 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1051 void *lp_arg) 1052 { 1053 struct fc_lport *lport = lp_arg; 1054 struct fc_frame_header *fh; 1055 struct fc_ct_hdr *ct; 1056 1057 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1058 1059 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1060 return; 1061 1062 mutex_lock(&lport->lp_mutex); 1063 1064 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1065 FC_LPORT_DBG(lport, "Received a name server response, " 1066 "but in state %s\n", fc_lport_state(lport)); 1067 if (IS_ERR(fp)) 1068 goto err; 1069 goto out; 1070 } 1071 1072 if (IS_ERR(fp)) { 1073 fc_lport_error(lport, fp); 1074 goto err; 1075 } 1076 1077 fh = fc_frame_header_get(fp); 1078 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1079 1080 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1081 ct->ct_fs_type == FC_FST_DIR && 1082 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1083 ntohs(ct->ct_cmd) == FC_FS_ACC) 1084 switch (lport->state) { 1085 case LPORT_ST_RNN_ID: 1086 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1087 break; 1088 case LPORT_ST_RSNN_NN: 1089 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1090 break; 1091 case LPORT_ST_RSPN_ID: 1092 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1093 break; 1094 case LPORT_ST_RFT_ID: 1095 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1096 break; 1097 case LPORT_ST_RFF_ID: 1098 fc_lport_enter_scr(lport); 1099 break; 1100 default: 1101 /* should have already been caught by state checks */ 1102 break; 1103 } 1104 else 1105 fc_lport_error(lport, fp); 1106 out: 1107 fc_frame_free(fp); 1108 err: 1109 mutex_unlock(&lport->lp_mutex); 1110 } 1111 1112 /** 1113 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1114 * @sp: current sequence in SCR exchange 1115 * @fp: response frame 1116 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1117 * 1118 * Locking Note: This function will be called without the lport lock 1119 * held, but it will lock, call an _enter_* function or fc_lport_error 1120 * and then unlock the lport. 1121 */ 1122 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1123 void *lp_arg) 1124 { 1125 struct fc_lport *lport = lp_arg; 1126 u8 op; 1127 1128 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1129 1130 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1131 return; 1132 1133 mutex_lock(&lport->lp_mutex); 1134 1135 if (lport->state != LPORT_ST_SCR) { 1136 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1137 "%s\n", fc_lport_state(lport)); 1138 if (IS_ERR(fp)) 1139 goto err; 1140 goto out; 1141 } 1142 1143 if (IS_ERR(fp)) { 1144 fc_lport_error(lport, fp); 1145 goto err; 1146 } 1147 1148 op = fc_frame_payload_op(fp); 1149 if (op == ELS_LS_ACC) 1150 fc_lport_enter_ready(lport); 1151 else 1152 fc_lport_error(lport, fp); 1153 1154 out: 1155 fc_frame_free(fp); 1156 err: 1157 mutex_unlock(&lport->lp_mutex); 1158 } 1159 1160 /** 1161 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1162 * @lport: The local port to register for state changes 1163 * 1164 * Locking Note: The lport lock is expected to be held before calling 1165 * this routine. 1166 */ 1167 static void fc_lport_enter_scr(struct fc_lport *lport) 1168 { 1169 struct fc_frame *fp; 1170 1171 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1172 fc_lport_state(lport)); 1173 1174 fc_lport_state_enter(lport, LPORT_ST_SCR); 1175 1176 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1177 if (!fp) { 1178 fc_lport_error(lport, fp); 1179 return; 1180 } 1181 1182 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1183 fc_lport_scr_resp, lport, 1184 2 * lport->r_a_tov)) 1185 fc_lport_error(lport, NULL); 1186 } 1187 1188 /** 1189 * fc_lport_enter_ns() - register some object with the name server 1190 * @lport: Fibre Channel local port to register 1191 * 1192 * Locking Note: The lport lock is expected to be held before calling 1193 * this routine. 1194 */ 1195 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1196 { 1197 struct fc_frame *fp; 1198 enum fc_ns_req cmd; 1199 int size = sizeof(struct fc_ct_hdr); 1200 size_t len; 1201 1202 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1203 fc_lport_state_names[state], 1204 fc_lport_state(lport)); 1205 1206 fc_lport_state_enter(lport, state); 1207 1208 switch (state) { 1209 case LPORT_ST_RNN_ID: 1210 cmd = FC_NS_RNN_ID; 1211 size += sizeof(struct fc_ns_rn_id); 1212 break; 1213 case LPORT_ST_RSNN_NN: 1214 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1215 /* if there is no symbolic name, skip to RFT_ID */ 1216 if (!len) 1217 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1218 cmd = FC_NS_RSNN_NN; 1219 size += sizeof(struct fc_ns_rsnn) + len; 1220 break; 1221 case LPORT_ST_RSPN_ID: 1222 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1223 /* if there is no symbolic name, skip to RFT_ID */ 1224 if (!len) 1225 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1226 cmd = FC_NS_RSPN_ID; 1227 size += sizeof(struct fc_ns_rspn) + len; 1228 break; 1229 case LPORT_ST_RFT_ID: 1230 cmd = FC_NS_RFT_ID; 1231 size += sizeof(struct fc_ns_rft); 1232 break; 1233 case LPORT_ST_RFF_ID: 1234 cmd = FC_NS_RFF_ID; 1235 size += sizeof(struct fc_ns_rff_id); 1236 break; 1237 default: 1238 fc_lport_error(lport, NULL); 1239 return; 1240 } 1241 1242 fp = fc_frame_alloc(lport, size); 1243 if (!fp) { 1244 fc_lport_error(lport, fp); 1245 return; 1246 } 1247 1248 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1249 fc_lport_ns_resp, 1250 lport, 3 * lport->r_a_tov)) 1251 fc_lport_error(lport, fp); 1252 } 1253 1254 static struct fc_rport_operations fc_lport_rport_ops = { 1255 .event_callback = fc_lport_rport_callback, 1256 }; 1257 1258 /** 1259 * fc_rport_enter_dns() - Create a fc_rport for the name server 1260 * @lport: The local port requesting a remote port for the name server 1261 * 1262 * Locking Note: The lport lock is expected to be held before calling 1263 * this routine. 1264 */ 1265 static void fc_lport_enter_dns(struct fc_lport *lport) 1266 { 1267 struct fc_rport_priv *rdata; 1268 1269 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1270 fc_lport_state(lport)); 1271 1272 fc_lport_state_enter(lport, LPORT_ST_DNS); 1273 1274 mutex_lock(&lport->disc.disc_mutex); 1275 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1276 mutex_unlock(&lport->disc.disc_mutex); 1277 if (!rdata) 1278 goto err; 1279 1280 rdata->ops = &fc_lport_rport_ops; 1281 lport->tt.rport_login(rdata); 1282 return; 1283 1284 err: 1285 fc_lport_error(lport, NULL); 1286 } 1287 1288 /** 1289 * fc_lport_timeout() - Handler for the retry_work timer 1290 * @work: The work struct of the local port 1291 */ 1292 static void fc_lport_timeout(struct work_struct *work) 1293 { 1294 struct fc_lport *lport = 1295 container_of(work, struct fc_lport, 1296 retry_work.work); 1297 1298 mutex_lock(&lport->lp_mutex); 1299 1300 switch (lport->state) { 1301 case LPORT_ST_DISABLED: 1302 WARN_ON(1); 1303 break; 1304 case LPORT_ST_READY: 1305 WARN_ON(1); 1306 break; 1307 case LPORT_ST_RESET: 1308 break; 1309 case LPORT_ST_FLOGI: 1310 fc_lport_enter_flogi(lport); 1311 break; 1312 case LPORT_ST_DNS: 1313 fc_lport_enter_dns(lport); 1314 break; 1315 case LPORT_ST_RNN_ID: 1316 case LPORT_ST_RSNN_NN: 1317 case LPORT_ST_RSPN_ID: 1318 case LPORT_ST_RFT_ID: 1319 case LPORT_ST_RFF_ID: 1320 fc_lport_enter_ns(lport, lport->state); 1321 break; 1322 case LPORT_ST_SCR: 1323 fc_lport_enter_scr(lport); 1324 break; 1325 case LPORT_ST_LOGO: 1326 fc_lport_enter_logo(lport); 1327 break; 1328 } 1329 1330 mutex_unlock(&lport->lp_mutex); 1331 } 1332 1333 /** 1334 * fc_lport_logo_resp() - Handle response to LOGO request 1335 * @sp: The sequence that the LOGO was on 1336 * @fp: The LOGO frame 1337 * @lp_arg: The lport port that received the LOGO request 1338 * 1339 * Locking Note: This function will be called without the lport lock 1340 * held, but it will lock, call an _enter_* function or fc_lport_error() 1341 * and then unlock the lport. 1342 */ 1343 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1344 void *lp_arg) 1345 { 1346 struct fc_lport *lport = lp_arg; 1347 u8 op; 1348 1349 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1350 1351 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1352 return; 1353 1354 mutex_lock(&lport->lp_mutex); 1355 1356 if (lport->state != LPORT_ST_LOGO) { 1357 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1358 "%s\n", fc_lport_state(lport)); 1359 if (IS_ERR(fp)) 1360 goto err; 1361 goto out; 1362 } 1363 1364 if (IS_ERR(fp)) { 1365 fc_lport_error(lport, fp); 1366 goto err; 1367 } 1368 1369 op = fc_frame_payload_op(fp); 1370 if (op == ELS_LS_ACC) 1371 fc_lport_enter_disabled(lport); 1372 else 1373 fc_lport_error(lport, fp); 1374 1375 out: 1376 fc_frame_free(fp); 1377 err: 1378 mutex_unlock(&lport->lp_mutex); 1379 } 1380 EXPORT_SYMBOL(fc_lport_logo_resp); 1381 1382 /** 1383 * fc_rport_enter_logo() - Logout of the fabric 1384 * @lport: The local port to be logged out 1385 * 1386 * Locking Note: The lport lock is expected to be held before calling 1387 * this routine. 1388 */ 1389 static void fc_lport_enter_logo(struct fc_lport *lport) 1390 { 1391 struct fc_frame *fp; 1392 struct fc_els_logo *logo; 1393 1394 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1395 fc_lport_state(lport)); 1396 1397 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1398 fc_vports_linkchange(lport); 1399 1400 fp = fc_frame_alloc(lport, sizeof(*logo)); 1401 if (!fp) { 1402 fc_lport_error(lport, fp); 1403 return; 1404 } 1405 1406 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1407 fc_lport_logo_resp, lport, 1408 2 * lport->r_a_tov)) 1409 fc_lport_error(lport, NULL); 1410 } 1411 1412 /** 1413 * fc_lport_flogi_resp() - Handle response to FLOGI request 1414 * @sp: The sequence that the FLOGI was on 1415 * @fp: The FLOGI response frame 1416 * @lp_arg: The lport port that received the FLOGI response 1417 * 1418 * Locking Note: This function will be called without the lport lock 1419 * held, but it will lock, call an _enter_* function or fc_lport_error() 1420 * and then unlock the lport. 1421 */ 1422 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1423 void *lp_arg) 1424 { 1425 struct fc_lport *lport = lp_arg; 1426 struct fc_els_flogi *flp; 1427 u32 did; 1428 u16 csp_flags; 1429 unsigned int r_a_tov; 1430 unsigned int e_d_tov; 1431 u16 mfs; 1432 1433 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1434 1435 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1436 return; 1437 1438 mutex_lock(&lport->lp_mutex); 1439 1440 if (lport->state != LPORT_ST_FLOGI) { 1441 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1442 "%s\n", fc_lport_state(lport)); 1443 if (IS_ERR(fp)) 1444 goto err; 1445 goto out; 1446 } 1447 1448 if (IS_ERR(fp)) { 1449 fc_lport_error(lport, fp); 1450 goto err; 1451 } 1452 1453 did = fc_frame_did(fp); 1454 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { 1455 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1456 if (flp) { 1457 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1458 FC_SP_BB_DATA_MASK; 1459 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1460 mfs < lport->mfs) 1461 lport->mfs = mfs; 1462 csp_flags = ntohs(flp->fl_csp.sp_features); 1463 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1464 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1465 if (csp_flags & FC_SP_FT_EDTR) 1466 e_d_tov /= 1000000; 1467 1468 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1469 1470 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1471 if (e_d_tov > lport->e_d_tov) 1472 lport->e_d_tov = e_d_tov; 1473 lport->r_a_tov = 2 * e_d_tov; 1474 fc_lport_set_port_id(lport, did, fp); 1475 printk(KERN_INFO "host%d: libfc: " 1476 "Port (%6.6x) entered " 1477 "point-to-point mode\n", 1478 lport->host->host_no, did); 1479 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1480 get_unaligned_be64( 1481 &flp->fl_wwpn), 1482 get_unaligned_be64( 1483 &flp->fl_wwnn)); 1484 } else { 1485 lport->e_d_tov = e_d_tov; 1486 lport->r_a_tov = r_a_tov; 1487 fc_host_fabric_name(lport->host) = 1488 get_unaligned_be64(&flp->fl_wwnn); 1489 fc_lport_set_port_id(lport, did, fp); 1490 fc_lport_enter_dns(lport); 1491 } 1492 } 1493 } else { 1494 FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); 1495 fc_lport_error(lport, fp); 1496 } 1497 1498 out: 1499 fc_frame_free(fp); 1500 err: 1501 mutex_unlock(&lport->lp_mutex); 1502 } 1503 EXPORT_SYMBOL(fc_lport_flogi_resp); 1504 1505 /** 1506 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1507 * @lport: Fibre Channel local port to be logged in to the fabric 1508 * 1509 * Locking Note: The lport lock is expected to be held before calling 1510 * this routine. 1511 */ 1512 void fc_lport_enter_flogi(struct fc_lport *lport) 1513 { 1514 struct fc_frame *fp; 1515 1516 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1517 fc_lport_state(lport)); 1518 1519 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1520 1521 if (lport->point_to_multipoint) { 1522 if (lport->port_id) 1523 fc_lport_enter_ready(lport); 1524 return; 1525 } 1526 1527 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1528 if (!fp) 1529 return fc_lport_error(lport, fp); 1530 1531 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1532 lport->vport ? ELS_FDISC : ELS_FLOGI, 1533 fc_lport_flogi_resp, lport, 1534 lport->vport ? 2 * lport->r_a_tov : 1535 lport->e_d_tov)) 1536 fc_lport_error(lport, NULL); 1537 } 1538 1539 /** 1540 * fc_lport_config() - Configure a fc_lport 1541 * @lport: The local port to be configured 1542 */ 1543 int fc_lport_config(struct fc_lport *lport) 1544 { 1545 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1546 mutex_init(&lport->lp_mutex); 1547 1548 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1549 1550 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1551 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1552 1553 return 0; 1554 } 1555 EXPORT_SYMBOL(fc_lport_config); 1556 1557 /** 1558 * fc_lport_init() - Initialize the lport layer for a local port 1559 * @lport: The local port to initialize the exchange layer for 1560 */ 1561 int fc_lport_init(struct fc_lport *lport) 1562 { 1563 if (!lport->tt.lport_recv) 1564 lport->tt.lport_recv = fc_lport_recv_req; 1565 1566 if (!lport->tt.lport_reset) 1567 lport->tt.lport_reset = fc_lport_reset; 1568 1569 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1570 fc_host_node_name(lport->host) = lport->wwnn; 1571 fc_host_port_name(lport->host) = lport->wwpn; 1572 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1573 memset(fc_host_supported_fc4s(lport->host), 0, 1574 sizeof(fc_host_supported_fc4s(lport->host))); 1575 fc_host_supported_fc4s(lport->host)[2] = 1; 1576 fc_host_supported_fc4s(lport->host)[7] = 1; 1577 1578 /* This value is also unchanging */ 1579 memset(fc_host_active_fc4s(lport->host), 0, 1580 sizeof(fc_host_active_fc4s(lport->host))); 1581 fc_host_active_fc4s(lport->host)[2] = 1; 1582 fc_host_active_fc4s(lport->host)[7] = 1; 1583 fc_host_maxframe_size(lport->host) = lport->mfs; 1584 fc_host_supported_speeds(lport->host) = 0; 1585 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1586 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1587 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1588 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1589 1590 return 0; 1591 } 1592 EXPORT_SYMBOL(fc_lport_init); 1593 1594 /** 1595 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1596 * @sp: The sequence for the FC Passthrough response 1597 * @fp: The response frame 1598 * @info_arg: The BSG info that the response is for 1599 */ 1600 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1601 void *info_arg) 1602 { 1603 struct fc_bsg_info *info = info_arg; 1604 struct fc_bsg_job *job = info->job; 1605 struct fc_lport *lport = info->lport; 1606 struct fc_frame_header *fh; 1607 size_t len; 1608 void *buf; 1609 1610 if (IS_ERR(fp)) { 1611 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1612 -ECONNABORTED : -ETIMEDOUT; 1613 job->reply_len = sizeof(uint32_t); 1614 job->state_flags |= FC_RQST_STATE_DONE; 1615 job->job_done(job); 1616 kfree(info); 1617 return; 1618 } 1619 1620 mutex_lock(&lport->lp_mutex); 1621 fh = fc_frame_header_get(fp); 1622 len = fr_len(fp) - sizeof(*fh); 1623 buf = fc_frame_payload_get(fp, 0); 1624 1625 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1626 /* Get the response code from the first frame payload */ 1627 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1628 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1629 (unsigned short)fc_frame_payload_op(fp); 1630 1631 /* Save the reply status of the job */ 1632 job->reply->reply_data.ctels_reply.status = 1633 (cmd == info->rsp_code) ? 1634 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1635 } 1636 1637 job->reply->reply_payload_rcv_len += 1638 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1639 &info->offset, KM_BIO_SRC_IRQ, NULL); 1640 1641 if (fr_eof(fp) == FC_EOF_T && 1642 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1643 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1644 if (job->reply->reply_payload_rcv_len > 1645 job->reply_payload.payload_len) 1646 job->reply->reply_payload_rcv_len = 1647 job->reply_payload.payload_len; 1648 job->reply->result = 0; 1649 job->state_flags |= FC_RQST_STATE_DONE; 1650 job->job_done(job); 1651 kfree(info); 1652 } 1653 fc_frame_free(fp); 1654 mutex_unlock(&lport->lp_mutex); 1655 } 1656 1657 /** 1658 * fc_lport_els_request() - Send ELS passthrough request 1659 * @job: The BSG Passthrough job 1660 * @lport: The local port sending the request 1661 * @did: The destination port id 1662 * 1663 * Locking Note: The lport lock is expected to be held before calling 1664 * this routine. 1665 */ 1666 static int fc_lport_els_request(struct fc_bsg_job *job, 1667 struct fc_lport *lport, 1668 u32 did, u32 tov) 1669 { 1670 struct fc_bsg_info *info; 1671 struct fc_frame *fp; 1672 struct fc_frame_header *fh; 1673 char *pp; 1674 int len; 1675 1676 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1677 if (!fp) 1678 return -ENOMEM; 1679 1680 len = job->request_payload.payload_len; 1681 pp = fc_frame_payload_get(fp, len); 1682 1683 sg_copy_to_buffer(job->request_payload.sg_list, 1684 job->request_payload.sg_cnt, 1685 pp, len); 1686 1687 fh = fc_frame_header_get(fp); 1688 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1689 hton24(fh->fh_d_id, did); 1690 hton24(fh->fh_s_id, lport->port_id); 1691 fh->fh_type = FC_TYPE_ELS; 1692 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1693 fh->fh_cs_ctl = 0; 1694 fh->fh_df_ctl = 0; 1695 fh->fh_parm_offset = 0; 1696 1697 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1698 if (!info) { 1699 fc_frame_free(fp); 1700 return -ENOMEM; 1701 } 1702 1703 info->job = job; 1704 info->lport = lport; 1705 info->rsp_code = ELS_LS_ACC; 1706 info->nents = job->reply_payload.sg_cnt; 1707 info->sg = job->reply_payload.sg_list; 1708 1709 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1710 NULL, info, tov)) { 1711 kfree(info); 1712 return -ECOMM; 1713 } 1714 return 0; 1715 } 1716 1717 /** 1718 * fc_lport_ct_request() - Send CT Passthrough request 1719 * @job: The BSG Passthrough job 1720 * @lport: The local port sending the request 1721 * @did: The destination FC-ID 1722 * @tov: The timeout period to wait for the response 1723 * 1724 * Locking Note: The lport lock is expected to be held before calling 1725 * this routine. 1726 */ 1727 static int fc_lport_ct_request(struct fc_bsg_job *job, 1728 struct fc_lport *lport, u32 did, u32 tov) 1729 { 1730 struct fc_bsg_info *info; 1731 struct fc_frame *fp; 1732 struct fc_frame_header *fh; 1733 struct fc_ct_req *ct; 1734 size_t len; 1735 1736 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1737 job->request_payload.payload_len); 1738 if (!fp) 1739 return -ENOMEM; 1740 1741 len = job->request_payload.payload_len; 1742 ct = fc_frame_payload_get(fp, len); 1743 1744 sg_copy_to_buffer(job->request_payload.sg_list, 1745 job->request_payload.sg_cnt, 1746 ct, len); 1747 1748 fh = fc_frame_header_get(fp); 1749 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1750 hton24(fh->fh_d_id, did); 1751 hton24(fh->fh_s_id, lport->port_id); 1752 fh->fh_type = FC_TYPE_CT; 1753 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1754 fh->fh_cs_ctl = 0; 1755 fh->fh_df_ctl = 0; 1756 fh->fh_parm_offset = 0; 1757 1758 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1759 if (!info) { 1760 fc_frame_free(fp); 1761 return -ENOMEM; 1762 } 1763 1764 info->job = job; 1765 info->lport = lport; 1766 info->rsp_code = FC_FS_ACC; 1767 info->nents = job->reply_payload.sg_cnt; 1768 info->sg = job->reply_payload.sg_list; 1769 1770 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1771 NULL, info, tov)) { 1772 kfree(info); 1773 return -ECOMM; 1774 } 1775 return 0; 1776 } 1777 1778 /** 1779 * fc_lport_bsg_request() - The common entry point for sending 1780 * FC Passthrough requests 1781 * @job: The BSG passthrough job 1782 */ 1783 int fc_lport_bsg_request(struct fc_bsg_job *job) 1784 { 1785 struct request *rsp = job->req->next_rq; 1786 struct Scsi_Host *shost = job->shost; 1787 struct fc_lport *lport = shost_priv(shost); 1788 struct fc_rport *rport; 1789 struct fc_rport_priv *rdata; 1790 int rc = -EINVAL; 1791 u32 did; 1792 1793 job->reply->reply_payload_rcv_len = 0; 1794 if (rsp) 1795 rsp->resid_len = job->reply_payload.payload_len; 1796 1797 mutex_lock(&lport->lp_mutex); 1798 1799 switch (job->request->msgcode) { 1800 case FC_BSG_RPT_ELS: 1801 rport = job->rport; 1802 if (!rport) 1803 break; 1804 1805 rdata = rport->dd_data; 1806 rc = fc_lport_els_request(job, lport, rport->port_id, 1807 rdata->e_d_tov); 1808 break; 1809 1810 case FC_BSG_RPT_CT: 1811 rport = job->rport; 1812 if (!rport) 1813 break; 1814 1815 rdata = rport->dd_data; 1816 rc = fc_lport_ct_request(job, lport, rport->port_id, 1817 rdata->e_d_tov); 1818 break; 1819 1820 case FC_BSG_HST_CT: 1821 did = ntoh24(job->request->rqst_data.h_ct.port_id); 1822 if (did == FC_FID_DIR_SERV) 1823 rdata = lport->dns_rdata; 1824 else 1825 rdata = lport->tt.rport_lookup(lport, did); 1826 1827 if (!rdata) 1828 break; 1829 1830 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); 1831 break; 1832 1833 case FC_BSG_HST_ELS_NOLOGIN: 1834 did = ntoh24(job->request->rqst_data.h_els.port_id); 1835 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 1836 break; 1837 } 1838 1839 mutex_unlock(&lport->lp_mutex); 1840 return rc; 1841 } 1842 EXPORT_SYMBOL(fc_lport_bsg_request); 1843