1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler becuase no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/slab.h> 92 #include <asm/unaligned.h> 93 94 #include <scsi/fc/fc_gs.h> 95 96 #include <scsi/libfc.h> 97 #include <scsi/fc_encode.h> 98 #include <linux/scatterlist.h> 99 100 #include "fc_libfc.h" 101 102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 103 #define FC_LOCAL_PTP_FID_LO 0x010101 104 #define FC_LOCAL_PTP_FID_HI 0x010102 105 106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 107 108 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 109 110 static void fc_lport_enter_reset(struct fc_lport *); 111 static void fc_lport_enter_flogi(struct fc_lport *); 112 static void fc_lport_enter_dns(struct fc_lport *); 113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 114 static void fc_lport_enter_scr(struct fc_lport *); 115 static void fc_lport_enter_ready(struct fc_lport *); 116 static void fc_lport_enter_logo(struct fc_lport *); 117 118 static const char *fc_lport_state_names[] = { 119 [LPORT_ST_DISABLED] = "disabled", 120 [LPORT_ST_FLOGI] = "FLOGI", 121 [LPORT_ST_DNS] = "dNS", 122 [LPORT_ST_RNN_ID] = "RNN_ID", 123 [LPORT_ST_RSNN_NN] = "RSNN_NN", 124 [LPORT_ST_RSPN_ID] = "RSPN_ID", 125 [LPORT_ST_RFT_ID] = "RFT_ID", 126 [LPORT_ST_RFF_ID] = "RFF_ID", 127 [LPORT_ST_SCR] = "SCR", 128 [LPORT_ST_READY] = "Ready", 129 [LPORT_ST_LOGO] = "LOGO", 130 [LPORT_ST_RESET] = "reset", 131 }; 132 133 /** 134 * struct fc_bsg_info - FC Passthrough managemet structure 135 * @job: The passthrough job 136 * @lport: The local port to pass through a command 137 * @rsp_code: The expected response code 138 * @sg: job->reply_payload.sg_list 139 * @nents: job->reply_payload.sg_cnt 140 * @offset: The offset into the response data 141 */ 142 struct fc_bsg_info { 143 struct fc_bsg_job *job; 144 struct fc_lport *lport; 145 u16 rsp_code; 146 struct scatterlist *sg; 147 u32 nents; 148 size_t offset; 149 }; 150 151 /** 152 * fc_frame_drop() - Dummy frame handler 153 * @lport: The local port the frame was received on 154 * @fp: The received frame 155 */ 156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 157 { 158 fc_frame_free(fp); 159 return 0; 160 } 161 162 /** 163 * fc_lport_rport_callback() - Event handler for rport events 164 * @lport: The lport which is receiving the event 165 * @rdata: private remote port data 166 * @event: The event that occured 167 * 168 * Locking Note: The rport lock should not be held when calling 169 * this function. 170 */ 171 static void fc_lport_rport_callback(struct fc_lport *lport, 172 struct fc_rport_priv *rdata, 173 enum fc_rport_event event) 174 { 175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 176 rdata->ids.port_id); 177 178 mutex_lock(&lport->lp_mutex); 179 switch (event) { 180 case RPORT_EV_READY: 181 if (lport->state == LPORT_ST_DNS) { 182 lport->dns_rdata = rdata; 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 184 } else { 185 FC_LPORT_DBG(lport, "Received an READY event " 186 "on port (%6.6x) for the directory " 187 "server, but the lport is not " 188 "in the DNS state, it's in the " 189 "%d state", rdata->ids.port_id, 190 lport->state); 191 lport->tt.rport_logoff(rdata); 192 } 193 break; 194 case RPORT_EV_LOGO: 195 case RPORT_EV_FAILED: 196 case RPORT_EV_STOP: 197 lport->dns_rdata = NULL; 198 break; 199 case RPORT_EV_NONE: 200 break; 201 } 202 mutex_unlock(&lport->lp_mutex); 203 } 204 205 /** 206 * fc_lport_state() - Return a string which represents the lport's state 207 * @lport: The lport whose state is to converted to a string 208 */ 209 static const char *fc_lport_state(struct fc_lport *lport) 210 { 211 const char *cp; 212 213 cp = fc_lport_state_names[lport->state]; 214 if (!cp) 215 cp = "unknown"; 216 return cp; 217 } 218 219 /** 220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 221 * @lport: The lport to attach the ptp rport to 222 * @remote_fid: The FID of the ptp rport 223 * @remote_wwpn: The WWPN of the ptp rport 224 * @remote_wwnn: The WWNN of the ptp rport 225 */ 226 static void fc_lport_ptp_setup(struct fc_lport *lport, 227 u32 remote_fid, u64 remote_wwpn, 228 u64 remote_wwnn) 229 { 230 mutex_lock(&lport->disc.disc_mutex); 231 if (lport->ptp_rdata) { 232 lport->tt.rport_logoff(lport->ptp_rdata); 233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 234 } 235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 236 kref_get(&lport->ptp_rdata->kref); 237 lport->ptp_rdata->ids.port_name = remote_wwpn; 238 lport->ptp_rdata->ids.node_name = remote_wwnn; 239 mutex_unlock(&lport->disc.disc_mutex); 240 241 lport->tt.rport_login(lport->ptp_rdata); 242 243 fc_lport_enter_ready(lport); 244 } 245 246 /** 247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 248 * @shost: The SCSI host whose port state is to be determined 249 */ 250 void fc_get_host_port_state(struct Scsi_Host *shost) 251 { 252 struct fc_lport *lport = shost_priv(shost); 253 254 mutex_lock(&lport->lp_mutex); 255 if (!lport->link_up) 256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 257 else 258 switch (lport->state) { 259 case LPORT_ST_READY: 260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 261 break; 262 default: 263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 264 } 265 mutex_unlock(&lport->lp_mutex); 266 } 267 EXPORT_SYMBOL(fc_get_host_port_state); 268 269 /** 270 * fc_get_host_speed() - Return the speed of the given Scsi_Host 271 * @shost: The SCSI host whose port speed is to be determined 272 */ 273 void fc_get_host_speed(struct Scsi_Host *shost) 274 { 275 struct fc_lport *lport = shost_priv(shost); 276 277 fc_host_speed(shost) = lport->link_speed; 278 } 279 EXPORT_SYMBOL(fc_get_host_speed); 280 281 /** 282 * fc_get_host_stats() - Return the Scsi_Host's statistics 283 * @shost: The SCSI host whose statistics are to be returned 284 */ 285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 286 { 287 struct fc_host_statistics *fcoe_stats; 288 struct fc_lport *lport = shost_priv(shost); 289 struct timespec v0, v1; 290 unsigned int cpu; 291 u64 fcp_in_bytes = 0; 292 u64 fcp_out_bytes = 0; 293 294 fcoe_stats = &lport->host_stats; 295 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 296 297 jiffies_to_timespec(jiffies, &v0); 298 jiffies_to_timespec(lport->boot_time, &v1); 299 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 300 301 for_each_possible_cpu(cpu) { 302 struct fcoe_dev_stats *stats; 303 304 stats = per_cpu_ptr(lport->dev_stats, cpu); 305 306 fcoe_stats->tx_frames += stats->TxFrames; 307 fcoe_stats->tx_words += stats->TxWords; 308 fcoe_stats->rx_frames += stats->RxFrames; 309 fcoe_stats->rx_words += stats->RxWords; 310 fcoe_stats->error_frames += stats->ErrorFrames; 311 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 312 fcoe_stats->fcp_input_requests += stats->InputRequests; 313 fcoe_stats->fcp_output_requests += stats->OutputRequests; 314 fcoe_stats->fcp_control_requests += stats->ControlRequests; 315 fcp_in_bytes += stats->InputBytes; 316 fcp_out_bytes += stats->OutputBytes; 317 fcoe_stats->link_failure_count += stats->LinkFailureCount; 318 } 319 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 320 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 321 fcoe_stats->lip_count = -1; 322 fcoe_stats->nos_count = -1; 323 fcoe_stats->loss_of_sync_count = -1; 324 fcoe_stats->loss_of_signal_count = -1; 325 fcoe_stats->prim_seq_protocol_err_count = -1; 326 fcoe_stats->dumped_frames = -1; 327 return fcoe_stats; 328 } 329 EXPORT_SYMBOL(fc_get_host_stats); 330 331 /** 332 * fc_lport_flogi_fill() - Fill in FLOGI command for request 333 * @lport: The local port the FLOGI is for 334 * @flogi: The FLOGI command 335 * @op: The opcode 336 */ 337 static void fc_lport_flogi_fill(struct fc_lport *lport, 338 struct fc_els_flogi *flogi, 339 unsigned int op) 340 { 341 struct fc_els_csp *sp; 342 struct fc_els_cssp *cp; 343 344 memset(flogi, 0, sizeof(*flogi)); 345 flogi->fl_cmd = (u8) op; 346 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 347 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 348 sp = &flogi->fl_csp; 349 sp->sp_hi_ver = 0x20; 350 sp->sp_lo_ver = 0x20; 351 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 352 sp->sp_bb_data = htons((u16) lport->mfs); 353 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 354 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 355 if (op != ELS_FLOGI) { 356 sp->sp_features = htons(FC_SP_FT_CIRO); 357 sp->sp_tot_seq = htons(255); /* seq. we accept */ 358 sp->sp_rel_off = htons(0x1f); 359 sp->sp_e_d_tov = htonl(lport->e_d_tov); 360 361 cp->cp_rdfs = htons((u16) lport->mfs); 362 cp->cp_con_seq = htons(255); 363 cp->cp_open_seq = 1; 364 } 365 } 366 367 /** 368 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 369 * @lport: The local port to add a new FC-4 type to 370 * @type: The new FC-4 type 371 */ 372 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 373 { 374 __be32 *mp; 375 376 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 377 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 378 } 379 380 /** 381 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 382 * @lport: Fibre Channel local port recieving the RLIR 383 * @fp: The RLIR request frame 384 * 385 * Locking Note: The lport lock is expected to be held before calling 386 * this function. 387 */ 388 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 389 { 390 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 391 fc_lport_state(lport)); 392 393 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 394 fc_frame_free(fp); 395 } 396 397 /** 398 * fc_lport_recv_echo_req() - Handle received ECHO request 399 * @lport: The local port recieving the ECHO 400 * @fp: ECHO request frame 401 * 402 * Locking Note: The lport lock is expected to be held before calling 403 * this function. 404 */ 405 static void fc_lport_recv_echo_req(struct fc_lport *lport, 406 struct fc_frame *in_fp) 407 { 408 struct fc_frame *fp; 409 unsigned int len; 410 void *pp; 411 void *dp; 412 413 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 414 fc_lport_state(lport)); 415 416 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 417 pp = fc_frame_payload_get(in_fp, len); 418 419 if (len < sizeof(__be32)) 420 len = sizeof(__be32); 421 422 fp = fc_frame_alloc(lport, len); 423 if (fp) { 424 dp = fc_frame_payload_get(fp, len); 425 memcpy(dp, pp, len); 426 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 427 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 428 lport->tt.frame_send(lport, fp); 429 } 430 fc_frame_free(in_fp); 431 } 432 433 /** 434 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 435 * @lport: The local port recieving the RNID 436 * @fp: The RNID request frame 437 * 438 * Locking Note: The lport lock is expected to be held before calling 439 * this function. 440 */ 441 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 442 struct fc_frame *in_fp) 443 { 444 struct fc_frame *fp; 445 struct fc_els_rnid *req; 446 struct { 447 struct fc_els_rnid_resp rnid; 448 struct fc_els_rnid_cid cid; 449 struct fc_els_rnid_gen gen; 450 } *rp; 451 struct fc_seq_els_data rjt_data; 452 u8 fmt; 453 size_t len; 454 455 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 456 fc_lport_state(lport)); 457 458 req = fc_frame_payload_get(in_fp, sizeof(*req)); 459 if (!req) { 460 rjt_data.reason = ELS_RJT_LOGIC; 461 rjt_data.explan = ELS_EXPL_NONE; 462 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 463 } else { 464 fmt = req->rnid_fmt; 465 len = sizeof(*rp); 466 if (fmt != ELS_RNIDF_GEN || 467 ntohl(lport->rnid_gen.rnid_atype) == 0) { 468 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 469 len -= sizeof(rp->gen); 470 } 471 fp = fc_frame_alloc(lport, len); 472 if (fp) { 473 rp = fc_frame_payload_get(fp, len); 474 memset(rp, 0, len); 475 rp->rnid.rnid_cmd = ELS_LS_ACC; 476 rp->rnid.rnid_fmt = fmt; 477 rp->rnid.rnid_cid_len = sizeof(rp->cid); 478 rp->cid.rnid_wwpn = htonll(lport->wwpn); 479 rp->cid.rnid_wwnn = htonll(lport->wwnn); 480 if (fmt == ELS_RNIDF_GEN) { 481 rp->rnid.rnid_sid_len = sizeof(rp->gen); 482 memcpy(&rp->gen, &lport->rnid_gen, 483 sizeof(rp->gen)); 484 } 485 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 486 lport->tt.frame_send(lport, fp); 487 } 488 } 489 fc_frame_free(in_fp); 490 } 491 492 /** 493 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 494 * @lport: The local port recieving the LOGO 495 * @fp: The LOGO request frame 496 * 497 * Locking Note: The lport lock is exected to be held before calling 498 * this function. 499 */ 500 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 501 { 502 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 503 fc_lport_enter_reset(lport); 504 fc_frame_free(fp); 505 } 506 507 /** 508 * fc_fabric_login() - Start the lport state machine 509 * @lport: The local port that should log into the fabric 510 * 511 * Locking Note: This function should not be called 512 * with the lport lock held. 513 */ 514 int fc_fabric_login(struct fc_lport *lport) 515 { 516 int rc = -1; 517 518 mutex_lock(&lport->lp_mutex); 519 if (lport->state == LPORT_ST_DISABLED || 520 lport->state == LPORT_ST_LOGO) { 521 fc_lport_state_enter(lport, LPORT_ST_RESET); 522 fc_lport_enter_reset(lport); 523 rc = 0; 524 } 525 mutex_unlock(&lport->lp_mutex); 526 527 return rc; 528 } 529 EXPORT_SYMBOL(fc_fabric_login); 530 531 /** 532 * __fc_linkup() - Handler for transport linkup events 533 * @lport: The lport whose link is up 534 * 535 * Locking: must be called with the lp_mutex held 536 */ 537 void __fc_linkup(struct fc_lport *lport) 538 { 539 if (!lport->link_up) { 540 lport->link_up = 1; 541 542 if (lport->state == LPORT_ST_RESET) 543 fc_lport_enter_flogi(lport); 544 } 545 } 546 547 /** 548 * fc_linkup() - Handler for transport linkup events 549 * @lport: The local port whose link is up 550 */ 551 void fc_linkup(struct fc_lport *lport) 552 { 553 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 554 lport->host->host_no, lport->port_id); 555 556 mutex_lock(&lport->lp_mutex); 557 __fc_linkup(lport); 558 mutex_unlock(&lport->lp_mutex); 559 } 560 EXPORT_SYMBOL(fc_linkup); 561 562 /** 563 * __fc_linkdown() - Handler for transport linkdown events 564 * @lport: The lport whose link is down 565 * 566 * Locking: must be called with the lp_mutex held 567 */ 568 void __fc_linkdown(struct fc_lport *lport) 569 { 570 if (lport->link_up) { 571 lport->link_up = 0; 572 fc_lport_enter_reset(lport); 573 lport->tt.fcp_cleanup(lport); 574 } 575 } 576 577 /** 578 * fc_linkdown() - Handler for transport linkdown events 579 * @lport: The local port whose link is down 580 */ 581 void fc_linkdown(struct fc_lport *lport) 582 { 583 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 584 lport->host->host_no, lport->port_id); 585 586 mutex_lock(&lport->lp_mutex); 587 __fc_linkdown(lport); 588 mutex_unlock(&lport->lp_mutex); 589 } 590 EXPORT_SYMBOL(fc_linkdown); 591 592 /** 593 * fc_fabric_logoff() - Logout of the fabric 594 * @lport: The local port to logoff the fabric 595 * 596 * Return value: 597 * 0 for success, -1 for failure 598 */ 599 int fc_fabric_logoff(struct fc_lport *lport) 600 { 601 lport->tt.disc_stop_final(lport); 602 mutex_lock(&lport->lp_mutex); 603 if (lport->dns_rdata) 604 lport->tt.rport_logoff(lport->dns_rdata); 605 mutex_unlock(&lport->lp_mutex); 606 lport->tt.rport_flush_queue(); 607 mutex_lock(&lport->lp_mutex); 608 fc_lport_enter_logo(lport); 609 mutex_unlock(&lport->lp_mutex); 610 cancel_delayed_work_sync(&lport->retry_work); 611 return 0; 612 } 613 EXPORT_SYMBOL(fc_fabric_logoff); 614 615 /** 616 * fc_lport_destroy() - Unregister a fc_lport 617 * @lport: The local port to unregister 618 * 619 * Note: 620 * exit routine for fc_lport instance 621 * clean-up all the allocated memory 622 * and free up other system resources. 623 * 624 */ 625 int fc_lport_destroy(struct fc_lport *lport) 626 { 627 mutex_lock(&lport->lp_mutex); 628 lport->state = LPORT_ST_DISABLED; 629 lport->link_up = 0; 630 lport->tt.frame_send = fc_frame_drop; 631 mutex_unlock(&lport->lp_mutex); 632 633 lport->tt.fcp_abort_io(lport); 634 lport->tt.disc_stop_final(lport); 635 lport->tt.exch_mgr_reset(lport, 0, 0); 636 fc_fc4_del_lport(lport); 637 return 0; 638 } 639 EXPORT_SYMBOL(fc_lport_destroy); 640 641 /** 642 * fc_set_mfs() - Set the maximum frame size for a local port 643 * @lport: The local port to set the MFS for 644 * @mfs: The new MFS 645 */ 646 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 647 { 648 unsigned int old_mfs; 649 int rc = -EINVAL; 650 651 mutex_lock(&lport->lp_mutex); 652 653 old_mfs = lport->mfs; 654 655 if (mfs >= FC_MIN_MAX_FRAME) { 656 mfs &= ~3; 657 if (mfs > FC_MAX_FRAME) 658 mfs = FC_MAX_FRAME; 659 mfs -= sizeof(struct fc_frame_header); 660 lport->mfs = mfs; 661 rc = 0; 662 } 663 664 if (!rc && mfs < old_mfs) 665 fc_lport_enter_reset(lport); 666 667 mutex_unlock(&lport->lp_mutex); 668 669 return rc; 670 } 671 EXPORT_SYMBOL(fc_set_mfs); 672 673 /** 674 * fc_lport_disc_callback() - Callback for discovery events 675 * @lport: The local port receiving the event 676 * @event: The discovery event 677 */ 678 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 679 { 680 switch (event) { 681 case DISC_EV_SUCCESS: 682 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 683 break; 684 case DISC_EV_FAILED: 685 printk(KERN_ERR "host%d: libfc: " 686 "Discovery failed for port (%6.6x)\n", 687 lport->host->host_no, lport->port_id); 688 mutex_lock(&lport->lp_mutex); 689 fc_lport_enter_reset(lport); 690 mutex_unlock(&lport->lp_mutex); 691 break; 692 case DISC_EV_NONE: 693 WARN_ON(1); 694 break; 695 } 696 } 697 698 /** 699 * fc_rport_enter_ready() - Enter the ready state and start discovery 700 * @lport: The local port that is ready 701 * 702 * Locking Note: The lport lock is expected to be held before calling 703 * this routine. 704 */ 705 static void fc_lport_enter_ready(struct fc_lport *lport) 706 { 707 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 708 fc_lport_state(lport)); 709 710 fc_lport_state_enter(lport, LPORT_ST_READY); 711 if (lport->vport) 712 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 713 fc_vports_linkchange(lport); 714 715 if (!lport->ptp_rdata) 716 lport->tt.disc_start(fc_lport_disc_callback, lport); 717 } 718 719 /** 720 * fc_lport_set_port_id() - set the local port Port ID 721 * @lport: The local port which will have its Port ID set. 722 * @port_id: The new port ID. 723 * @fp: The frame containing the incoming request, or NULL. 724 * 725 * Locking Note: The lport lock is expected to be held before calling 726 * this function. 727 */ 728 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 729 struct fc_frame *fp) 730 { 731 if (port_id) 732 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 733 lport->host->host_no, port_id); 734 735 lport->port_id = port_id; 736 737 /* Update the fc_host */ 738 fc_host_port_id(lport->host) = port_id; 739 740 if (lport->tt.lport_set_port_id) 741 lport->tt.lport_set_port_id(lport, port_id, fp); 742 } 743 744 /** 745 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 746 * @lport: The local port which will have its Port ID set. 747 * @port_id: The new port ID. 748 * 749 * Called by the lower-level driver when transport sets the local port_id. 750 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 751 * discovery to be skipped. 752 */ 753 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 754 { 755 mutex_lock(&lport->lp_mutex); 756 757 fc_lport_set_port_id(lport, port_id, NULL); 758 759 switch (lport->state) { 760 case LPORT_ST_RESET: 761 case LPORT_ST_FLOGI: 762 if (port_id) 763 fc_lport_enter_ready(lport); 764 break; 765 default: 766 break; 767 } 768 mutex_unlock(&lport->lp_mutex); 769 } 770 EXPORT_SYMBOL(fc_lport_set_local_id); 771 772 /** 773 * fc_lport_recv_flogi_req() - Receive a FLOGI request 774 * @lport: The local port that recieved the request 775 * @rx_fp: The FLOGI frame 776 * 777 * A received FLOGI request indicates a point-to-point connection. 778 * Accept it with the common service parameters indicating our N port. 779 * Set up to do a PLOGI if we have the higher-number WWPN. 780 * 781 * Locking Note: The lport lock is expected to be held before calling 782 * this function. 783 */ 784 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 785 struct fc_frame *rx_fp) 786 { 787 struct fc_frame *fp; 788 struct fc_frame_header *fh; 789 struct fc_els_flogi *flp; 790 struct fc_els_flogi *new_flp; 791 u64 remote_wwpn; 792 u32 remote_fid; 793 u32 local_fid; 794 795 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 796 fc_lport_state(lport)); 797 798 remote_fid = fc_frame_sid(rx_fp); 799 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 800 if (!flp) 801 goto out; 802 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 803 if (remote_wwpn == lport->wwpn) { 804 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 805 "with same WWPN %16.16llx\n", 806 lport->host->host_no, remote_wwpn); 807 goto out; 808 } 809 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 810 811 /* 812 * XXX what is the right thing to do for FIDs? 813 * The originator might expect our S_ID to be 0xfffffe. 814 * But if so, both of us could end up with the same FID. 815 */ 816 local_fid = FC_LOCAL_PTP_FID_LO; 817 if (remote_wwpn < lport->wwpn) { 818 local_fid = FC_LOCAL_PTP_FID_HI; 819 if (!remote_fid || remote_fid == local_fid) 820 remote_fid = FC_LOCAL_PTP_FID_LO; 821 } else if (!remote_fid) { 822 remote_fid = FC_LOCAL_PTP_FID_HI; 823 } 824 825 fc_lport_set_port_id(lport, local_fid, rx_fp); 826 827 fp = fc_frame_alloc(lport, sizeof(*flp)); 828 if (fp) { 829 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 830 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 831 new_flp->fl_cmd = (u8) ELS_LS_ACC; 832 833 /* 834 * Send the response. If this fails, the originator should 835 * repeat the sequence. 836 */ 837 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 838 fh = fc_frame_header_get(fp); 839 hton24(fh->fh_s_id, local_fid); 840 hton24(fh->fh_d_id, remote_fid); 841 lport->tt.frame_send(lport, fp); 842 843 } else { 844 fc_lport_error(lport, fp); 845 } 846 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 847 get_unaligned_be64(&flp->fl_wwnn)); 848 out: 849 fc_frame_free(rx_fp); 850 } 851 852 /** 853 * fc_lport_recv_els_req() - The generic lport ELS request handler 854 * @lport: The local port that received the request 855 * @fp: The request frame 856 * 857 * This function will see if the lport handles the request or 858 * if an rport should handle the request. 859 * 860 * Locking Note: This function should not be called with the lport 861 * lock held becuase it will grab the lock. 862 */ 863 static void fc_lport_recv_els_req(struct fc_lport *lport, 864 struct fc_frame *fp) 865 { 866 void (*recv)(struct fc_lport *, struct fc_frame *); 867 868 mutex_lock(&lport->lp_mutex); 869 870 /* 871 * Handle special ELS cases like FLOGI, LOGO, and 872 * RSCN here. These don't require a session. 873 * Even if we had a session, it might not be ready. 874 */ 875 if (!lport->link_up) 876 fc_frame_free(fp); 877 else { 878 /* 879 * Check opcode. 880 */ 881 recv = lport->tt.rport_recv_req; 882 switch (fc_frame_payload_op(fp)) { 883 case ELS_FLOGI: 884 if (!lport->point_to_multipoint) 885 recv = fc_lport_recv_flogi_req; 886 break; 887 case ELS_LOGO: 888 if (fc_frame_sid(fp) == FC_FID_FLOGI) 889 recv = fc_lport_recv_logo_req; 890 break; 891 case ELS_RSCN: 892 recv = lport->tt.disc_recv_req; 893 break; 894 case ELS_ECHO: 895 recv = fc_lport_recv_echo_req; 896 break; 897 case ELS_RLIR: 898 recv = fc_lport_recv_rlir_req; 899 break; 900 case ELS_RNID: 901 recv = fc_lport_recv_rnid_req; 902 break; 903 } 904 905 recv(lport, fp); 906 } 907 mutex_unlock(&lport->lp_mutex); 908 } 909 910 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, 911 const struct fc_els_spp *spp_in, 912 struct fc_els_spp *spp_out) 913 { 914 return FC_SPP_RESP_INVL; 915 } 916 917 struct fc4_prov fc_lport_els_prov = { 918 .prli = fc_lport_els_prli, 919 .recv = fc_lport_recv_els_req, 920 }; 921 922 /** 923 * fc_lport_recv_req() - The generic lport request handler 924 * @lport: The lport that received the request 925 * @fp: The frame the request is in 926 * 927 * Locking Note: This function should not be called with the lport 928 * lock held becuase it may grab the lock. 929 */ 930 static void fc_lport_recv_req(struct fc_lport *lport, 931 struct fc_frame *fp) 932 { 933 struct fc_frame_header *fh = fc_frame_header_get(fp); 934 struct fc_seq *sp = fr_seq(fp); 935 struct fc4_prov *prov; 936 937 /* 938 * Use RCU read lock and module_lock to be sure module doesn't 939 * deregister and get unloaded while we're calling it. 940 * try_module_get() is inlined and accepts a NULL parameter. 941 * Only ELSes and FCP target ops should come through here. 942 * The locking is unfortunate, and a better scheme is being sought. 943 */ 944 945 rcu_read_lock(); 946 if (fh->fh_type >= FC_FC4_PROV_SIZE) 947 goto drop; 948 prov = rcu_dereference(fc_passive_prov[fh->fh_type]); 949 if (!prov || !try_module_get(prov->module)) 950 goto drop; 951 rcu_read_unlock(); 952 prov->recv(lport, fp); 953 module_put(prov->module); 954 return; 955 drop: 956 rcu_read_unlock(); 957 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 958 fc_frame_free(fp); 959 lport->tt.exch_done(sp); 960 } 961 962 /** 963 * fc_lport_reset() - Reset a local port 964 * @lport: The local port which should be reset 965 * 966 * Locking Note: This functions should not be called with the 967 * lport lock held. 968 */ 969 int fc_lport_reset(struct fc_lport *lport) 970 { 971 cancel_delayed_work_sync(&lport->retry_work); 972 mutex_lock(&lport->lp_mutex); 973 fc_lport_enter_reset(lport); 974 mutex_unlock(&lport->lp_mutex); 975 return 0; 976 } 977 EXPORT_SYMBOL(fc_lport_reset); 978 979 /** 980 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 981 * @lport: The local port to be reset 982 * 983 * Locking Note: The lport lock is expected to be held before calling 984 * this routine. 985 */ 986 static void fc_lport_reset_locked(struct fc_lport *lport) 987 { 988 if (lport->dns_rdata) 989 lport->tt.rport_logoff(lport->dns_rdata); 990 991 if (lport->ptp_rdata) { 992 lport->tt.rport_logoff(lport->ptp_rdata); 993 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 994 lport->ptp_rdata = NULL; 995 } 996 997 lport->tt.disc_stop(lport); 998 999 lport->tt.exch_mgr_reset(lport, 0, 0); 1000 fc_host_fabric_name(lport->host) = 0; 1001 1002 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 1003 fc_lport_set_port_id(lport, 0, NULL); 1004 } 1005 1006 /** 1007 * fc_lport_enter_reset() - Reset the local port 1008 * @lport: The local port to be reset 1009 * 1010 * Locking Note: The lport lock is expected to be held before calling 1011 * this routine. 1012 */ 1013 static void fc_lport_enter_reset(struct fc_lport *lport) 1014 { 1015 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 1016 fc_lport_state(lport)); 1017 1018 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 1019 return; 1020 1021 if (lport->vport) { 1022 if (lport->link_up) 1023 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 1024 else 1025 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1026 } 1027 fc_lport_state_enter(lport, LPORT_ST_RESET); 1028 fc_vports_linkchange(lport); 1029 fc_lport_reset_locked(lport); 1030 if (lport->link_up) 1031 fc_lport_enter_flogi(lport); 1032 } 1033 1034 /** 1035 * fc_lport_enter_disabled() - Disable the local port 1036 * @lport: The local port to be reset 1037 * 1038 * Locking Note: The lport lock is expected to be held before calling 1039 * this routine. 1040 */ 1041 static void fc_lport_enter_disabled(struct fc_lport *lport) 1042 { 1043 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 1044 fc_lport_state(lport)); 1045 1046 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1047 fc_vports_linkchange(lport); 1048 fc_lport_reset_locked(lport); 1049 } 1050 1051 /** 1052 * fc_lport_error() - Handler for any errors 1053 * @lport: The local port that the error was on 1054 * @fp: The error code encoded in a frame pointer 1055 * 1056 * If the error was caused by a resource allocation failure 1057 * then wait for half a second and retry, otherwise retry 1058 * after the e_d_tov time. 1059 */ 1060 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1061 { 1062 unsigned long delay = 0; 1063 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1064 PTR_ERR(fp), fc_lport_state(lport), 1065 lport->retry_count); 1066 1067 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1068 return; 1069 1070 /* 1071 * Memory allocation failure, or the exchange timed out 1072 * or we received LS_RJT. 1073 * Retry after delay 1074 */ 1075 if (lport->retry_count < lport->max_retry_count) { 1076 lport->retry_count++; 1077 if (!fp) 1078 delay = msecs_to_jiffies(500); 1079 else 1080 delay = msecs_to_jiffies(lport->e_d_tov); 1081 1082 schedule_delayed_work(&lport->retry_work, delay); 1083 } else 1084 fc_lport_enter_reset(lport); 1085 } 1086 1087 /** 1088 * fc_lport_ns_resp() - Handle response to a name server 1089 * registration exchange 1090 * @sp: current sequence in exchange 1091 * @fp: response frame 1092 * @lp_arg: Fibre Channel host port instance 1093 * 1094 * Locking Note: This function will be called without the lport lock 1095 * held, but it will lock, call an _enter_* function or fc_lport_error() 1096 * and then unlock the lport. 1097 */ 1098 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1099 void *lp_arg) 1100 { 1101 struct fc_lport *lport = lp_arg; 1102 struct fc_frame_header *fh; 1103 struct fc_ct_hdr *ct; 1104 1105 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1106 1107 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1108 return; 1109 1110 mutex_lock(&lport->lp_mutex); 1111 1112 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1113 FC_LPORT_DBG(lport, "Received a name server response, " 1114 "but in state %s\n", fc_lport_state(lport)); 1115 if (IS_ERR(fp)) 1116 goto err; 1117 goto out; 1118 } 1119 1120 if (IS_ERR(fp)) { 1121 fc_lport_error(lport, fp); 1122 goto err; 1123 } 1124 1125 fh = fc_frame_header_get(fp); 1126 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1127 1128 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1129 ct->ct_fs_type == FC_FST_DIR && 1130 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1131 ntohs(ct->ct_cmd) == FC_FS_ACC) 1132 switch (lport->state) { 1133 case LPORT_ST_RNN_ID: 1134 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1135 break; 1136 case LPORT_ST_RSNN_NN: 1137 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1138 break; 1139 case LPORT_ST_RSPN_ID: 1140 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1141 break; 1142 case LPORT_ST_RFT_ID: 1143 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1144 break; 1145 case LPORT_ST_RFF_ID: 1146 fc_lport_enter_scr(lport); 1147 break; 1148 default: 1149 /* should have already been caught by state checks */ 1150 break; 1151 } 1152 else 1153 fc_lport_error(lport, fp); 1154 out: 1155 fc_frame_free(fp); 1156 err: 1157 mutex_unlock(&lport->lp_mutex); 1158 } 1159 1160 /** 1161 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1162 * @sp: current sequence in SCR exchange 1163 * @fp: response frame 1164 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1165 * 1166 * Locking Note: This function will be called without the lport lock 1167 * held, but it will lock, call an _enter_* function or fc_lport_error 1168 * and then unlock the lport. 1169 */ 1170 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1171 void *lp_arg) 1172 { 1173 struct fc_lport *lport = lp_arg; 1174 u8 op; 1175 1176 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1177 1178 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1179 return; 1180 1181 mutex_lock(&lport->lp_mutex); 1182 1183 if (lport->state != LPORT_ST_SCR) { 1184 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1185 "%s\n", fc_lport_state(lport)); 1186 if (IS_ERR(fp)) 1187 goto err; 1188 goto out; 1189 } 1190 1191 if (IS_ERR(fp)) { 1192 fc_lport_error(lport, fp); 1193 goto err; 1194 } 1195 1196 op = fc_frame_payload_op(fp); 1197 if (op == ELS_LS_ACC) 1198 fc_lport_enter_ready(lport); 1199 else 1200 fc_lport_error(lport, fp); 1201 1202 out: 1203 fc_frame_free(fp); 1204 err: 1205 mutex_unlock(&lport->lp_mutex); 1206 } 1207 1208 /** 1209 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1210 * @lport: The local port to register for state changes 1211 * 1212 * Locking Note: The lport lock is expected to be held before calling 1213 * this routine. 1214 */ 1215 static void fc_lport_enter_scr(struct fc_lport *lport) 1216 { 1217 struct fc_frame *fp; 1218 1219 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1220 fc_lport_state(lport)); 1221 1222 fc_lport_state_enter(lport, LPORT_ST_SCR); 1223 1224 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1225 if (!fp) { 1226 fc_lport_error(lport, fp); 1227 return; 1228 } 1229 1230 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1231 fc_lport_scr_resp, lport, 1232 2 * lport->r_a_tov)) 1233 fc_lport_error(lport, NULL); 1234 } 1235 1236 /** 1237 * fc_lport_enter_ns() - register some object with the name server 1238 * @lport: Fibre Channel local port to register 1239 * 1240 * Locking Note: The lport lock is expected to be held before calling 1241 * this routine. 1242 */ 1243 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1244 { 1245 struct fc_frame *fp; 1246 enum fc_ns_req cmd; 1247 int size = sizeof(struct fc_ct_hdr); 1248 size_t len; 1249 1250 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1251 fc_lport_state_names[state], 1252 fc_lport_state(lport)); 1253 1254 fc_lport_state_enter(lport, state); 1255 1256 switch (state) { 1257 case LPORT_ST_RNN_ID: 1258 cmd = FC_NS_RNN_ID; 1259 size += sizeof(struct fc_ns_rn_id); 1260 break; 1261 case LPORT_ST_RSNN_NN: 1262 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1263 /* if there is no symbolic name, skip to RFT_ID */ 1264 if (!len) 1265 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1266 cmd = FC_NS_RSNN_NN; 1267 size += sizeof(struct fc_ns_rsnn) + len; 1268 break; 1269 case LPORT_ST_RSPN_ID: 1270 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1271 /* if there is no symbolic name, skip to RFT_ID */ 1272 if (!len) 1273 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1274 cmd = FC_NS_RSPN_ID; 1275 size += sizeof(struct fc_ns_rspn) + len; 1276 break; 1277 case LPORT_ST_RFT_ID: 1278 cmd = FC_NS_RFT_ID; 1279 size += sizeof(struct fc_ns_rft); 1280 break; 1281 case LPORT_ST_RFF_ID: 1282 cmd = FC_NS_RFF_ID; 1283 size += sizeof(struct fc_ns_rff_id); 1284 break; 1285 default: 1286 fc_lport_error(lport, NULL); 1287 return; 1288 } 1289 1290 fp = fc_frame_alloc(lport, size); 1291 if (!fp) { 1292 fc_lport_error(lport, fp); 1293 return; 1294 } 1295 1296 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1297 fc_lport_ns_resp, 1298 lport, 3 * lport->r_a_tov)) 1299 fc_lport_error(lport, fp); 1300 } 1301 1302 static struct fc_rport_operations fc_lport_rport_ops = { 1303 .event_callback = fc_lport_rport_callback, 1304 }; 1305 1306 /** 1307 * fc_rport_enter_dns() - Create a fc_rport for the name server 1308 * @lport: The local port requesting a remote port for the name server 1309 * 1310 * Locking Note: The lport lock is expected to be held before calling 1311 * this routine. 1312 */ 1313 static void fc_lport_enter_dns(struct fc_lport *lport) 1314 { 1315 struct fc_rport_priv *rdata; 1316 1317 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1318 fc_lport_state(lport)); 1319 1320 fc_lport_state_enter(lport, LPORT_ST_DNS); 1321 1322 mutex_lock(&lport->disc.disc_mutex); 1323 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1324 mutex_unlock(&lport->disc.disc_mutex); 1325 if (!rdata) 1326 goto err; 1327 1328 rdata->ops = &fc_lport_rport_ops; 1329 lport->tt.rport_login(rdata); 1330 return; 1331 1332 err: 1333 fc_lport_error(lport, NULL); 1334 } 1335 1336 /** 1337 * fc_lport_timeout() - Handler for the retry_work timer 1338 * @work: The work struct of the local port 1339 */ 1340 static void fc_lport_timeout(struct work_struct *work) 1341 { 1342 struct fc_lport *lport = 1343 container_of(work, struct fc_lport, 1344 retry_work.work); 1345 1346 mutex_lock(&lport->lp_mutex); 1347 1348 switch (lport->state) { 1349 case LPORT_ST_DISABLED: 1350 WARN_ON(1); 1351 break; 1352 case LPORT_ST_READY: 1353 WARN_ON(1); 1354 break; 1355 case LPORT_ST_RESET: 1356 break; 1357 case LPORT_ST_FLOGI: 1358 fc_lport_enter_flogi(lport); 1359 break; 1360 case LPORT_ST_DNS: 1361 fc_lport_enter_dns(lport); 1362 break; 1363 case LPORT_ST_RNN_ID: 1364 case LPORT_ST_RSNN_NN: 1365 case LPORT_ST_RSPN_ID: 1366 case LPORT_ST_RFT_ID: 1367 case LPORT_ST_RFF_ID: 1368 fc_lport_enter_ns(lport, lport->state); 1369 break; 1370 case LPORT_ST_SCR: 1371 fc_lport_enter_scr(lport); 1372 break; 1373 case LPORT_ST_LOGO: 1374 fc_lport_enter_logo(lport); 1375 break; 1376 } 1377 1378 mutex_unlock(&lport->lp_mutex); 1379 } 1380 1381 /** 1382 * fc_lport_logo_resp() - Handle response to LOGO request 1383 * @sp: The sequence that the LOGO was on 1384 * @fp: The LOGO frame 1385 * @lp_arg: The lport port that received the LOGO request 1386 * 1387 * Locking Note: This function will be called without the lport lock 1388 * held, but it will lock, call an _enter_* function or fc_lport_error() 1389 * and then unlock the lport. 1390 */ 1391 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1392 void *lp_arg) 1393 { 1394 struct fc_lport *lport = lp_arg; 1395 u8 op; 1396 1397 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1398 1399 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1400 return; 1401 1402 mutex_lock(&lport->lp_mutex); 1403 1404 if (lport->state != LPORT_ST_LOGO) { 1405 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1406 "%s\n", fc_lport_state(lport)); 1407 if (IS_ERR(fp)) 1408 goto err; 1409 goto out; 1410 } 1411 1412 if (IS_ERR(fp)) { 1413 fc_lport_error(lport, fp); 1414 goto err; 1415 } 1416 1417 op = fc_frame_payload_op(fp); 1418 if (op == ELS_LS_ACC) 1419 fc_lport_enter_disabled(lport); 1420 else 1421 fc_lport_error(lport, fp); 1422 1423 out: 1424 fc_frame_free(fp); 1425 err: 1426 mutex_unlock(&lport->lp_mutex); 1427 } 1428 EXPORT_SYMBOL(fc_lport_logo_resp); 1429 1430 /** 1431 * fc_rport_enter_logo() - Logout of the fabric 1432 * @lport: The local port to be logged out 1433 * 1434 * Locking Note: The lport lock is expected to be held before calling 1435 * this routine. 1436 */ 1437 static void fc_lport_enter_logo(struct fc_lport *lport) 1438 { 1439 struct fc_frame *fp; 1440 struct fc_els_logo *logo; 1441 1442 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1443 fc_lport_state(lport)); 1444 1445 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1446 fc_vports_linkchange(lport); 1447 1448 fp = fc_frame_alloc(lport, sizeof(*logo)); 1449 if (!fp) { 1450 fc_lport_error(lport, fp); 1451 return; 1452 } 1453 1454 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1455 fc_lport_logo_resp, lport, 1456 2 * lport->r_a_tov)) 1457 fc_lport_error(lport, NULL); 1458 } 1459 1460 /** 1461 * fc_lport_flogi_resp() - Handle response to FLOGI request 1462 * @sp: The sequence that the FLOGI was on 1463 * @fp: The FLOGI response frame 1464 * @lp_arg: The lport port that received the FLOGI response 1465 * 1466 * Locking Note: This function will be called without the lport lock 1467 * held, but it will lock, call an _enter_* function or fc_lport_error() 1468 * and then unlock the lport. 1469 */ 1470 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1471 void *lp_arg) 1472 { 1473 struct fc_lport *lport = lp_arg; 1474 struct fc_els_flogi *flp; 1475 u32 did; 1476 u16 csp_flags; 1477 unsigned int r_a_tov; 1478 unsigned int e_d_tov; 1479 u16 mfs; 1480 1481 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1482 1483 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1484 return; 1485 1486 mutex_lock(&lport->lp_mutex); 1487 1488 if (lport->state != LPORT_ST_FLOGI) { 1489 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1490 "%s\n", fc_lport_state(lport)); 1491 if (IS_ERR(fp)) 1492 goto err; 1493 goto out; 1494 } 1495 1496 if (IS_ERR(fp)) { 1497 fc_lport_error(lport, fp); 1498 goto err; 1499 } 1500 1501 did = fc_frame_did(fp); 1502 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { 1503 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1504 if (flp) { 1505 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1506 FC_SP_BB_DATA_MASK; 1507 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1508 mfs < lport->mfs) 1509 lport->mfs = mfs; 1510 csp_flags = ntohs(flp->fl_csp.sp_features); 1511 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1512 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1513 if (csp_flags & FC_SP_FT_EDTR) 1514 e_d_tov /= 1000000; 1515 1516 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1517 1518 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1519 if (e_d_tov > lport->e_d_tov) 1520 lport->e_d_tov = e_d_tov; 1521 lport->r_a_tov = 2 * e_d_tov; 1522 fc_lport_set_port_id(lport, did, fp); 1523 printk(KERN_INFO "host%d: libfc: " 1524 "Port (%6.6x) entered " 1525 "point-to-point mode\n", 1526 lport->host->host_no, did); 1527 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1528 get_unaligned_be64( 1529 &flp->fl_wwpn), 1530 get_unaligned_be64( 1531 &flp->fl_wwnn)); 1532 } else { 1533 lport->e_d_tov = e_d_tov; 1534 lport->r_a_tov = r_a_tov; 1535 fc_host_fabric_name(lport->host) = 1536 get_unaligned_be64(&flp->fl_wwnn); 1537 fc_lport_set_port_id(lport, did, fp); 1538 fc_lport_enter_dns(lport); 1539 } 1540 } 1541 } else { 1542 FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); 1543 fc_lport_error(lport, fp); 1544 } 1545 1546 out: 1547 fc_frame_free(fp); 1548 err: 1549 mutex_unlock(&lport->lp_mutex); 1550 } 1551 EXPORT_SYMBOL(fc_lport_flogi_resp); 1552 1553 /** 1554 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1555 * @lport: Fibre Channel local port to be logged in to the fabric 1556 * 1557 * Locking Note: The lport lock is expected to be held before calling 1558 * this routine. 1559 */ 1560 void fc_lport_enter_flogi(struct fc_lport *lport) 1561 { 1562 struct fc_frame *fp; 1563 1564 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1565 fc_lport_state(lport)); 1566 1567 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1568 1569 if (lport->point_to_multipoint) { 1570 if (lport->port_id) 1571 fc_lport_enter_ready(lport); 1572 return; 1573 } 1574 1575 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1576 if (!fp) 1577 return fc_lport_error(lport, fp); 1578 1579 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1580 lport->vport ? ELS_FDISC : ELS_FLOGI, 1581 fc_lport_flogi_resp, lport, 1582 lport->vport ? 2 * lport->r_a_tov : 1583 lport->e_d_tov)) 1584 fc_lport_error(lport, NULL); 1585 } 1586 1587 /** 1588 * fc_lport_config() - Configure a fc_lport 1589 * @lport: The local port to be configured 1590 */ 1591 int fc_lport_config(struct fc_lport *lport) 1592 { 1593 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1594 mutex_init(&lport->lp_mutex); 1595 1596 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1597 1598 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1599 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1600 1601 return 0; 1602 } 1603 EXPORT_SYMBOL(fc_lport_config); 1604 1605 /** 1606 * fc_lport_init() - Initialize the lport layer for a local port 1607 * @lport: The local port to initialize the exchange layer for 1608 */ 1609 int fc_lport_init(struct fc_lport *lport) 1610 { 1611 if (!lport->tt.lport_recv) 1612 lport->tt.lport_recv = fc_lport_recv_req; 1613 1614 if (!lport->tt.lport_reset) 1615 lport->tt.lport_reset = fc_lport_reset; 1616 1617 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1618 fc_host_node_name(lport->host) = lport->wwnn; 1619 fc_host_port_name(lport->host) = lport->wwpn; 1620 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1621 memset(fc_host_supported_fc4s(lport->host), 0, 1622 sizeof(fc_host_supported_fc4s(lport->host))); 1623 fc_host_supported_fc4s(lport->host)[2] = 1; 1624 fc_host_supported_fc4s(lport->host)[7] = 1; 1625 1626 /* This value is also unchanging */ 1627 memset(fc_host_active_fc4s(lport->host), 0, 1628 sizeof(fc_host_active_fc4s(lport->host))); 1629 fc_host_active_fc4s(lport->host)[2] = 1; 1630 fc_host_active_fc4s(lport->host)[7] = 1; 1631 fc_host_maxframe_size(lport->host) = lport->mfs; 1632 fc_host_supported_speeds(lport->host) = 0; 1633 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1634 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1635 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1636 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1637 fc_fc4_add_lport(lport); 1638 1639 return 0; 1640 } 1641 EXPORT_SYMBOL(fc_lport_init); 1642 1643 /** 1644 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1645 * @sp: The sequence for the FC Passthrough response 1646 * @fp: The response frame 1647 * @info_arg: The BSG info that the response is for 1648 */ 1649 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1650 void *info_arg) 1651 { 1652 struct fc_bsg_info *info = info_arg; 1653 struct fc_bsg_job *job = info->job; 1654 struct fc_lport *lport = info->lport; 1655 struct fc_frame_header *fh; 1656 size_t len; 1657 void *buf; 1658 1659 if (IS_ERR(fp)) { 1660 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1661 -ECONNABORTED : -ETIMEDOUT; 1662 job->reply_len = sizeof(uint32_t); 1663 job->state_flags |= FC_RQST_STATE_DONE; 1664 job->job_done(job); 1665 kfree(info); 1666 return; 1667 } 1668 1669 mutex_lock(&lport->lp_mutex); 1670 fh = fc_frame_header_get(fp); 1671 len = fr_len(fp) - sizeof(*fh); 1672 buf = fc_frame_payload_get(fp, 0); 1673 1674 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1675 /* Get the response code from the first frame payload */ 1676 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1677 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1678 (unsigned short)fc_frame_payload_op(fp); 1679 1680 /* Save the reply status of the job */ 1681 job->reply->reply_data.ctels_reply.status = 1682 (cmd == info->rsp_code) ? 1683 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1684 } 1685 1686 job->reply->reply_payload_rcv_len += 1687 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1688 &info->offset, KM_BIO_SRC_IRQ, NULL); 1689 1690 if (fr_eof(fp) == FC_EOF_T && 1691 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1692 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1693 if (job->reply->reply_payload_rcv_len > 1694 job->reply_payload.payload_len) 1695 job->reply->reply_payload_rcv_len = 1696 job->reply_payload.payload_len; 1697 job->reply->result = 0; 1698 job->state_flags |= FC_RQST_STATE_DONE; 1699 job->job_done(job); 1700 kfree(info); 1701 } 1702 fc_frame_free(fp); 1703 mutex_unlock(&lport->lp_mutex); 1704 } 1705 1706 /** 1707 * fc_lport_els_request() - Send ELS passthrough request 1708 * @job: The BSG Passthrough job 1709 * @lport: The local port sending the request 1710 * @did: The destination port id 1711 * 1712 * Locking Note: The lport lock is expected to be held before calling 1713 * this routine. 1714 */ 1715 static int fc_lport_els_request(struct fc_bsg_job *job, 1716 struct fc_lport *lport, 1717 u32 did, u32 tov) 1718 { 1719 struct fc_bsg_info *info; 1720 struct fc_frame *fp; 1721 struct fc_frame_header *fh; 1722 char *pp; 1723 int len; 1724 1725 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1726 if (!fp) 1727 return -ENOMEM; 1728 1729 len = job->request_payload.payload_len; 1730 pp = fc_frame_payload_get(fp, len); 1731 1732 sg_copy_to_buffer(job->request_payload.sg_list, 1733 job->request_payload.sg_cnt, 1734 pp, len); 1735 1736 fh = fc_frame_header_get(fp); 1737 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1738 hton24(fh->fh_d_id, did); 1739 hton24(fh->fh_s_id, lport->port_id); 1740 fh->fh_type = FC_TYPE_ELS; 1741 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1742 fh->fh_cs_ctl = 0; 1743 fh->fh_df_ctl = 0; 1744 fh->fh_parm_offset = 0; 1745 1746 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1747 if (!info) { 1748 fc_frame_free(fp); 1749 return -ENOMEM; 1750 } 1751 1752 info->job = job; 1753 info->lport = lport; 1754 info->rsp_code = ELS_LS_ACC; 1755 info->nents = job->reply_payload.sg_cnt; 1756 info->sg = job->reply_payload.sg_list; 1757 1758 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1759 NULL, info, tov)) { 1760 kfree(info); 1761 return -ECOMM; 1762 } 1763 return 0; 1764 } 1765 1766 /** 1767 * fc_lport_ct_request() - Send CT Passthrough request 1768 * @job: The BSG Passthrough job 1769 * @lport: The local port sending the request 1770 * @did: The destination FC-ID 1771 * @tov: The timeout period to wait for the response 1772 * 1773 * Locking Note: The lport lock is expected to be held before calling 1774 * this routine. 1775 */ 1776 static int fc_lport_ct_request(struct fc_bsg_job *job, 1777 struct fc_lport *lport, u32 did, u32 tov) 1778 { 1779 struct fc_bsg_info *info; 1780 struct fc_frame *fp; 1781 struct fc_frame_header *fh; 1782 struct fc_ct_req *ct; 1783 size_t len; 1784 1785 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1786 job->request_payload.payload_len); 1787 if (!fp) 1788 return -ENOMEM; 1789 1790 len = job->request_payload.payload_len; 1791 ct = fc_frame_payload_get(fp, len); 1792 1793 sg_copy_to_buffer(job->request_payload.sg_list, 1794 job->request_payload.sg_cnt, 1795 ct, len); 1796 1797 fh = fc_frame_header_get(fp); 1798 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1799 hton24(fh->fh_d_id, did); 1800 hton24(fh->fh_s_id, lport->port_id); 1801 fh->fh_type = FC_TYPE_CT; 1802 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1803 fh->fh_cs_ctl = 0; 1804 fh->fh_df_ctl = 0; 1805 fh->fh_parm_offset = 0; 1806 1807 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1808 if (!info) { 1809 fc_frame_free(fp); 1810 return -ENOMEM; 1811 } 1812 1813 info->job = job; 1814 info->lport = lport; 1815 info->rsp_code = FC_FS_ACC; 1816 info->nents = job->reply_payload.sg_cnt; 1817 info->sg = job->reply_payload.sg_list; 1818 1819 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1820 NULL, info, tov)) { 1821 kfree(info); 1822 return -ECOMM; 1823 } 1824 return 0; 1825 } 1826 1827 /** 1828 * fc_lport_bsg_request() - The common entry point for sending 1829 * FC Passthrough requests 1830 * @job: The BSG passthrough job 1831 */ 1832 int fc_lport_bsg_request(struct fc_bsg_job *job) 1833 { 1834 struct request *rsp = job->req->next_rq; 1835 struct Scsi_Host *shost = job->shost; 1836 struct fc_lport *lport = shost_priv(shost); 1837 struct fc_rport *rport; 1838 struct fc_rport_priv *rdata; 1839 int rc = -EINVAL; 1840 u32 did; 1841 1842 job->reply->reply_payload_rcv_len = 0; 1843 if (rsp) 1844 rsp->resid_len = job->reply_payload.payload_len; 1845 1846 mutex_lock(&lport->lp_mutex); 1847 1848 switch (job->request->msgcode) { 1849 case FC_BSG_RPT_ELS: 1850 rport = job->rport; 1851 if (!rport) 1852 break; 1853 1854 rdata = rport->dd_data; 1855 rc = fc_lport_els_request(job, lport, rport->port_id, 1856 rdata->e_d_tov); 1857 break; 1858 1859 case FC_BSG_RPT_CT: 1860 rport = job->rport; 1861 if (!rport) 1862 break; 1863 1864 rdata = rport->dd_data; 1865 rc = fc_lport_ct_request(job, lport, rport->port_id, 1866 rdata->e_d_tov); 1867 break; 1868 1869 case FC_BSG_HST_CT: 1870 did = ntoh24(job->request->rqst_data.h_ct.port_id); 1871 if (did == FC_FID_DIR_SERV) 1872 rdata = lport->dns_rdata; 1873 else 1874 rdata = lport->tt.rport_lookup(lport, did); 1875 1876 if (!rdata) 1877 break; 1878 1879 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); 1880 break; 1881 1882 case FC_BSG_HST_ELS_NOLOGIN: 1883 did = ntoh24(job->request->rqst_data.h_els.port_id); 1884 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 1885 break; 1886 } 1887 1888 mutex_unlock(&lport->lp_mutex); 1889 return rc; 1890 } 1891 EXPORT_SYMBOL(fc_lport_bsg_request); 1892