1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler because no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/delay.h> 92 #include <linux/module.h> 93 #include <linux/slab.h> 94 #include <asm/unaligned.h> 95 96 #include <scsi/fc/fc_gs.h> 97 98 #include <scsi/libfc.h> 99 #include <scsi/fc_encode.h> 100 #include <linux/scatterlist.h> 101 102 #include "fc_libfc.h" 103 104 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 105 #define FC_LOCAL_PTP_FID_LO 0x010101 106 #define FC_LOCAL_PTP_FID_HI 0x010102 107 108 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 109 110 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 111 112 static void fc_lport_enter_reset(struct fc_lport *); 113 static void fc_lport_enter_flogi(struct fc_lport *); 114 static void fc_lport_enter_dns(struct fc_lport *); 115 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 116 static void fc_lport_enter_scr(struct fc_lport *); 117 static void fc_lport_enter_ready(struct fc_lport *); 118 static void fc_lport_enter_logo(struct fc_lport *); 119 static void fc_lport_enter_fdmi(struct fc_lport *lport); 120 static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state); 121 122 static const char *fc_lport_state_names[] = { 123 [LPORT_ST_DISABLED] = "disabled", 124 [LPORT_ST_FLOGI] = "FLOGI", 125 [LPORT_ST_DNS] = "dNS", 126 [LPORT_ST_RNN_ID] = "RNN_ID", 127 [LPORT_ST_RSNN_NN] = "RSNN_NN", 128 [LPORT_ST_RSPN_ID] = "RSPN_ID", 129 [LPORT_ST_RFT_ID] = "RFT_ID", 130 [LPORT_ST_RFF_ID] = "RFF_ID", 131 [LPORT_ST_FDMI] = "FDMI", 132 [LPORT_ST_RHBA] = "RHBA", 133 [LPORT_ST_RPA] = "RPA", 134 [LPORT_ST_DHBA] = "DHBA", 135 [LPORT_ST_DPRT] = "DPRT", 136 [LPORT_ST_SCR] = "SCR", 137 [LPORT_ST_READY] = "Ready", 138 [LPORT_ST_LOGO] = "LOGO", 139 [LPORT_ST_RESET] = "reset", 140 }; 141 142 /** 143 * struct fc_bsg_info - FC Passthrough managemet structure 144 * @job: The passthrough job 145 * @lport: The local port to pass through a command 146 * @rsp_code: The expected response code 147 * @sg: job->reply_payload.sg_list 148 * @nents: job->reply_payload.sg_cnt 149 * @offset: The offset into the response data 150 */ 151 struct fc_bsg_info { 152 struct fc_bsg_job *job; 153 struct fc_lport *lport; 154 u16 rsp_code; 155 struct scatterlist *sg; 156 u32 nents; 157 size_t offset; 158 }; 159 160 /** 161 * fc_frame_drop() - Dummy frame handler 162 * @lport: The local port the frame was received on 163 * @fp: The received frame 164 */ 165 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 166 { 167 fc_frame_free(fp); 168 return 0; 169 } 170 171 /** 172 * fc_lport_rport_callback() - Event handler for rport events 173 * @lport: The lport which is receiving the event 174 * @rdata: private remote port data 175 * @event: The event that occurred 176 * 177 * Locking Note: The rport lock should not be held when calling 178 * this function. 179 */ 180 static void fc_lport_rport_callback(struct fc_lport *lport, 181 struct fc_rport_priv *rdata, 182 enum fc_rport_event event) 183 { 184 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 185 rdata->ids.port_id); 186 187 mutex_lock(&lport->lp_mutex); 188 switch (event) { 189 case RPORT_EV_READY: 190 if (lport->state == LPORT_ST_DNS) { 191 lport->dns_rdata = rdata; 192 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 193 } else if (lport->state == LPORT_ST_FDMI) { 194 lport->ms_rdata = rdata; 195 fc_lport_enter_ms(lport, LPORT_ST_DHBA); 196 } else { 197 FC_LPORT_DBG(lport, "Received an READY event " 198 "on port (%6.6x) for the directory " 199 "server, but the lport is not " 200 "in the DNS or FDMI state, it's in the " 201 "%d state", rdata->ids.port_id, 202 lport->state); 203 lport->tt.rport_logoff(rdata); 204 } 205 break; 206 case RPORT_EV_LOGO: 207 case RPORT_EV_FAILED: 208 case RPORT_EV_STOP: 209 if (rdata->ids.port_id == FC_FID_DIR_SERV) 210 lport->dns_rdata = NULL; 211 else if (rdata->ids.port_id == FC_FID_MGMT_SERV) 212 lport->ms_rdata = NULL; 213 break; 214 case RPORT_EV_NONE: 215 break; 216 } 217 mutex_unlock(&lport->lp_mutex); 218 } 219 220 /** 221 * fc_lport_state() - Return a string which represents the lport's state 222 * @lport: The lport whose state is to converted to a string 223 */ 224 static const char *fc_lport_state(struct fc_lport *lport) 225 { 226 const char *cp; 227 228 cp = fc_lport_state_names[lport->state]; 229 if (!cp) 230 cp = "unknown"; 231 return cp; 232 } 233 234 /** 235 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 236 * @lport: The lport to attach the ptp rport to 237 * @remote_fid: The FID of the ptp rport 238 * @remote_wwpn: The WWPN of the ptp rport 239 * @remote_wwnn: The WWNN of the ptp rport 240 */ 241 static void fc_lport_ptp_setup(struct fc_lport *lport, 242 u32 remote_fid, u64 remote_wwpn, 243 u64 remote_wwnn) 244 { 245 mutex_lock(&lport->disc.disc_mutex); 246 if (lport->ptp_rdata) { 247 lport->tt.rport_logoff(lport->ptp_rdata); 248 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 249 } 250 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 251 kref_get(&lport->ptp_rdata->kref); 252 lport->ptp_rdata->ids.port_name = remote_wwpn; 253 lport->ptp_rdata->ids.node_name = remote_wwnn; 254 mutex_unlock(&lport->disc.disc_mutex); 255 256 lport->tt.rport_login(lport->ptp_rdata); 257 258 fc_lport_enter_ready(lport); 259 } 260 261 /** 262 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 263 * @shost: The SCSI host whose port state is to be determined 264 */ 265 void fc_get_host_port_state(struct Scsi_Host *shost) 266 { 267 struct fc_lport *lport = shost_priv(shost); 268 269 mutex_lock(&lport->lp_mutex); 270 if (!lport->link_up) 271 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 272 else 273 switch (lport->state) { 274 case LPORT_ST_READY: 275 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 276 break; 277 default: 278 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 279 } 280 mutex_unlock(&lport->lp_mutex); 281 } 282 EXPORT_SYMBOL(fc_get_host_port_state); 283 284 /** 285 * fc_get_host_speed() - Return the speed of the given Scsi_Host 286 * @shost: The SCSI host whose port speed is to be determined 287 */ 288 void fc_get_host_speed(struct Scsi_Host *shost) 289 { 290 struct fc_lport *lport = shost_priv(shost); 291 292 fc_host_speed(shost) = lport->link_speed; 293 } 294 EXPORT_SYMBOL(fc_get_host_speed); 295 296 /** 297 * fc_get_host_stats() - Return the Scsi_Host's statistics 298 * @shost: The SCSI host whose statistics are to be returned 299 */ 300 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 301 { 302 struct fc_host_statistics *fc_stats; 303 struct fc_lport *lport = shost_priv(shost); 304 unsigned int cpu; 305 u64 fcp_in_bytes = 0; 306 u64 fcp_out_bytes = 0; 307 308 fc_stats = &lport->host_stats; 309 memset(fc_stats, 0, sizeof(struct fc_host_statistics)); 310 311 fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; 312 313 for_each_possible_cpu(cpu) { 314 struct fc_stats *stats; 315 316 stats = per_cpu_ptr(lport->stats, cpu); 317 318 fc_stats->tx_frames += stats->TxFrames; 319 fc_stats->tx_words += stats->TxWords; 320 fc_stats->rx_frames += stats->RxFrames; 321 fc_stats->rx_words += stats->RxWords; 322 fc_stats->error_frames += stats->ErrorFrames; 323 fc_stats->invalid_crc_count += stats->InvalidCRCCount; 324 fc_stats->fcp_input_requests += stats->InputRequests; 325 fc_stats->fcp_output_requests += stats->OutputRequests; 326 fc_stats->fcp_control_requests += stats->ControlRequests; 327 fcp_in_bytes += stats->InputBytes; 328 fcp_out_bytes += stats->OutputBytes; 329 fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails; 330 fc_stats->fcp_packet_aborts += stats->FcpPktAborts; 331 fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails; 332 fc_stats->link_failure_count += stats->LinkFailureCount; 333 } 334 fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 335 fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 336 fc_stats->lip_count = -1; 337 fc_stats->nos_count = -1; 338 fc_stats->loss_of_sync_count = -1; 339 fc_stats->loss_of_signal_count = -1; 340 fc_stats->prim_seq_protocol_err_count = -1; 341 fc_stats->dumped_frames = -1; 342 343 /* update exches stats */ 344 fc_exch_update_stats(lport); 345 346 return fc_stats; 347 } 348 EXPORT_SYMBOL(fc_get_host_stats); 349 350 /** 351 * fc_lport_flogi_fill() - Fill in FLOGI command for request 352 * @lport: The local port the FLOGI is for 353 * @flogi: The FLOGI command 354 * @op: The opcode 355 */ 356 static void fc_lport_flogi_fill(struct fc_lport *lport, 357 struct fc_els_flogi *flogi, 358 unsigned int op) 359 { 360 struct fc_els_csp *sp; 361 struct fc_els_cssp *cp; 362 363 memset(flogi, 0, sizeof(*flogi)); 364 flogi->fl_cmd = (u8) op; 365 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 366 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 367 sp = &flogi->fl_csp; 368 sp->sp_hi_ver = 0x20; 369 sp->sp_lo_ver = 0x20; 370 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 371 sp->sp_bb_data = htons((u16) lport->mfs); 372 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 373 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 374 if (op != ELS_FLOGI) { 375 sp->sp_features = htons(FC_SP_FT_CIRO); 376 sp->sp_tot_seq = htons(255); /* seq. we accept */ 377 sp->sp_rel_off = htons(0x1f); 378 sp->sp_e_d_tov = htonl(lport->e_d_tov); 379 380 cp->cp_rdfs = htons((u16) lport->mfs); 381 cp->cp_con_seq = htons(255); 382 cp->cp_open_seq = 1; 383 } 384 } 385 386 /** 387 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 388 * @lport: The local port to add a new FC-4 type to 389 * @type: The new FC-4 type 390 */ 391 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 392 { 393 __be32 *mp; 394 395 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 396 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 397 } 398 399 /** 400 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 401 * @lport: Fibre Channel local port receiving the RLIR 402 * @fp: The RLIR request frame 403 * 404 * Locking Note: The lport lock is expected to be held before calling 405 * this function. 406 */ 407 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 408 { 409 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 410 fc_lport_state(lport)); 411 412 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 413 fc_frame_free(fp); 414 } 415 416 /** 417 * fc_lport_recv_echo_req() - Handle received ECHO request 418 * @lport: The local port receiving the ECHO 419 * @fp: ECHO request frame 420 * 421 * Locking Note: The lport lock is expected to be held before calling 422 * this function. 423 */ 424 static void fc_lport_recv_echo_req(struct fc_lport *lport, 425 struct fc_frame *in_fp) 426 { 427 struct fc_frame *fp; 428 unsigned int len; 429 void *pp; 430 void *dp; 431 432 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 433 fc_lport_state(lport)); 434 435 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 436 pp = fc_frame_payload_get(in_fp, len); 437 438 if (len < sizeof(__be32)) 439 len = sizeof(__be32); 440 441 fp = fc_frame_alloc(lport, len); 442 if (fp) { 443 dp = fc_frame_payload_get(fp, len); 444 memcpy(dp, pp, len); 445 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 446 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 447 lport->tt.frame_send(lport, fp); 448 } 449 fc_frame_free(in_fp); 450 } 451 452 /** 453 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 454 * @lport: The local port receiving the RNID 455 * @fp: The RNID request frame 456 * 457 * Locking Note: The lport lock is expected to be held before calling 458 * this function. 459 */ 460 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 461 struct fc_frame *in_fp) 462 { 463 struct fc_frame *fp; 464 struct fc_els_rnid *req; 465 struct { 466 struct fc_els_rnid_resp rnid; 467 struct fc_els_rnid_cid cid; 468 struct fc_els_rnid_gen gen; 469 } *rp; 470 struct fc_seq_els_data rjt_data; 471 u8 fmt; 472 size_t len; 473 474 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 475 fc_lport_state(lport)); 476 477 req = fc_frame_payload_get(in_fp, sizeof(*req)); 478 if (!req) { 479 rjt_data.reason = ELS_RJT_LOGIC; 480 rjt_data.explan = ELS_EXPL_NONE; 481 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 482 } else { 483 fmt = req->rnid_fmt; 484 len = sizeof(*rp); 485 if (fmt != ELS_RNIDF_GEN || 486 ntohl(lport->rnid_gen.rnid_atype) == 0) { 487 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 488 len -= sizeof(rp->gen); 489 } 490 fp = fc_frame_alloc(lport, len); 491 if (fp) { 492 rp = fc_frame_payload_get(fp, len); 493 memset(rp, 0, len); 494 rp->rnid.rnid_cmd = ELS_LS_ACC; 495 rp->rnid.rnid_fmt = fmt; 496 rp->rnid.rnid_cid_len = sizeof(rp->cid); 497 rp->cid.rnid_wwpn = htonll(lport->wwpn); 498 rp->cid.rnid_wwnn = htonll(lport->wwnn); 499 if (fmt == ELS_RNIDF_GEN) { 500 rp->rnid.rnid_sid_len = sizeof(rp->gen); 501 memcpy(&rp->gen, &lport->rnid_gen, 502 sizeof(rp->gen)); 503 } 504 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 505 lport->tt.frame_send(lport, fp); 506 } 507 } 508 fc_frame_free(in_fp); 509 } 510 511 /** 512 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 513 * @lport: The local port receiving the LOGO 514 * @fp: The LOGO request frame 515 * 516 * Locking Note: The lport lock is expected to be held before calling 517 * this function. 518 */ 519 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 520 { 521 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 522 fc_lport_enter_reset(lport); 523 fc_frame_free(fp); 524 } 525 526 /** 527 * fc_fabric_login() - Start the lport state machine 528 * @lport: The local port that should log into the fabric 529 * 530 * Locking Note: This function should not be called 531 * with the lport lock held. 532 */ 533 int fc_fabric_login(struct fc_lport *lport) 534 { 535 int rc = -1; 536 537 mutex_lock(&lport->lp_mutex); 538 if (lport->state == LPORT_ST_DISABLED || 539 lport->state == LPORT_ST_LOGO) { 540 fc_lport_state_enter(lport, LPORT_ST_RESET); 541 fc_lport_enter_reset(lport); 542 rc = 0; 543 } 544 mutex_unlock(&lport->lp_mutex); 545 546 return rc; 547 } 548 EXPORT_SYMBOL(fc_fabric_login); 549 550 /** 551 * __fc_linkup() - Handler for transport linkup events 552 * @lport: The lport whose link is up 553 * 554 * Locking: must be called with the lp_mutex held 555 */ 556 void __fc_linkup(struct fc_lport *lport) 557 { 558 if (!lport->link_up) { 559 lport->link_up = 1; 560 561 if (lport->state == LPORT_ST_RESET) 562 fc_lport_enter_flogi(lport); 563 } 564 } 565 566 /** 567 * fc_linkup() - Handler for transport linkup events 568 * @lport: The local port whose link is up 569 */ 570 void fc_linkup(struct fc_lport *lport) 571 { 572 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 573 lport->host->host_no, lport->port_id); 574 575 mutex_lock(&lport->lp_mutex); 576 __fc_linkup(lport); 577 mutex_unlock(&lport->lp_mutex); 578 } 579 EXPORT_SYMBOL(fc_linkup); 580 581 /** 582 * __fc_linkdown() - Handler for transport linkdown events 583 * @lport: The lport whose link is down 584 * 585 * Locking: must be called with the lp_mutex held 586 */ 587 void __fc_linkdown(struct fc_lport *lport) 588 { 589 if (lport->link_up) { 590 lport->link_up = 0; 591 fc_lport_enter_reset(lport); 592 lport->tt.fcp_cleanup(lport); 593 } 594 } 595 596 /** 597 * fc_linkdown() - Handler for transport linkdown events 598 * @lport: The local port whose link is down 599 */ 600 void fc_linkdown(struct fc_lport *lport) 601 { 602 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 603 lport->host->host_no, lport->port_id); 604 605 mutex_lock(&lport->lp_mutex); 606 __fc_linkdown(lport); 607 mutex_unlock(&lport->lp_mutex); 608 } 609 EXPORT_SYMBOL(fc_linkdown); 610 611 /** 612 * fc_fabric_logoff() - Logout of the fabric 613 * @lport: The local port to logoff the fabric 614 * 615 * Return value: 616 * 0 for success, -1 for failure 617 */ 618 int fc_fabric_logoff(struct fc_lport *lport) 619 { 620 lport->tt.disc_stop_final(lport); 621 mutex_lock(&lport->lp_mutex); 622 if (lport->dns_rdata) 623 lport->tt.rport_logoff(lport->dns_rdata); 624 mutex_unlock(&lport->lp_mutex); 625 lport->tt.rport_flush_queue(); 626 mutex_lock(&lport->lp_mutex); 627 fc_lport_enter_logo(lport); 628 mutex_unlock(&lport->lp_mutex); 629 cancel_delayed_work_sync(&lport->retry_work); 630 return 0; 631 } 632 EXPORT_SYMBOL(fc_fabric_logoff); 633 634 /** 635 * fc_lport_destroy() - Unregister a fc_lport 636 * @lport: The local port to unregister 637 * 638 * Note: 639 * exit routine for fc_lport instance 640 * clean-up all the allocated memory 641 * and free up other system resources. 642 * 643 */ 644 int fc_lport_destroy(struct fc_lport *lport) 645 { 646 mutex_lock(&lport->lp_mutex); 647 lport->state = LPORT_ST_DISABLED; 648 lport->link_up = 0; 649 lport->tt.frame_send = fc_frame_drop; 650 mutex_unlock(&lport->lp_mutex); 651 652 lport->tt.fcp_abort_io(lport); 653 lport->tt.disc_stop_final(lport); 654 lport->tt.exch_mgr_reset(lport, 0, 0); 655 cancel_delayed_work_sync(&lport->retry_work); 656 fc_fc4_del_lport(lport); 657 return 0; 658 } 659 EXPORT_SYMBOL(fc_lport_destroy); 660 661 /** 662 * fc_set_mfs() - Set the maximum frame size for a local port 663 * @lport: The local port to set the MFS for 664 * @mfs: The new MFS 665 */ 666 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 667 { 668 unsigned int old_mfs; 669 int rc = -EINVAL; 670 671 mutex_lock(&lport->lp_mutex); 672 673 old_mfs = lport->mfs; 674 675 if (mfs >= FC_MIN_MAX_FRAME) { 676 mfs &= ~3; 677 if (mfs > FC_MAX_FRAME) 678 mfs = FC_MAX_FRAME; 679 mfs -= sizeof(struct fc_frame_header); 680 lport->mfs = mfs; 681 rc = 0; 682 } 683 684 if (!rc && mfs < old_mfs) 685 fc_lport_enter_reset(lport); 686 687 mutex_unlock(&lport->lp_mutex); 688 689 return rc; 690 } 691 EXPORT_SYMBOL(fc_set_mfs); 692 693 /** 694 * fc_lport_disc_callback() - Callback for discovery events 695 * @lport: The local port receiving the event 696 * @event: The discovery event 697 */ 698 static void fc_lport_disc_callback(struct fc_lport *lport, 699 enum fc_disc_event event) 700 { 701 switch (event) { 702 case DISC_EV_SUCCESS: 703 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 704 break; 705 case DISC_EV_FAILED: 706 printk(KERN_ERR "host%d: libfc: " 707 "Discovery failed for port (%6.6x)\n", 708 lport->host->host_no, lport->port_id); 709 mutex_lock(&lport->lp_mutex); 710 fc_lport_enter_reset(lport); 711 mutex_unlock(&lport->lp_mutex); 712 break; 713 case DISC_EV_NONE: 714 WARN_ON(1); 715 break; 716 } 717 } 718 719 /** 720 * fc_rport_enter_ready() - Enter the ready state and start discovery 721 * @lport: The local port that is ready 722 * 723 * Locking Note: The lport lock is expected to be held before calling 724 * this routine. 725 */ 726 static void fc_lport_enter_ready(struct fc_lport *lport) 727 { 728 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 729 fc_lport_state(lport)); 730 731 fc_lport_state_enter(lport, LPORT_ST_READY); 732 if (lport->vport) 733 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 734 fc_vports_linkchange(lport); 735 736 if (!lport->ptp_rdata) 737 lport->tt.disc_start(fc_lport_disc_callback, lport); 738 } 739 740 /** 741 * fc_lport_set_port_id() - set the local port Port ID 742 * @lport: The local port which will have its Port ID set. 743 * @port_id: The new port ID. 744 * @fp: The frame containing the incoming request, or NULL. 745 * 746 * Locking Note: The lport lock is expected to be held before calling 747 * this function. 748 */ 749 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 750 struct fc_frame *fp) 751 { 752 if (port_id) 753 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 754 lport->host->host_no, port_id); 755 756 lport->port_id = port_id; 757 758 /* Update the fc_host */ 759 fc_host_port_id(lport->host) = port_id; 760 761 if (lport->tt.lport_set_port_id) 762 lport->tt.lport_set_port_id(lport, port_id, fp); 763 } 764 765 /** 766 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 767 * @lport: The local port which will have its Port ID set. 768 * @port_id: The new port ID. 769 * 770 * Called by the lower-level driver when transport sets the local port_id. 771 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 772 * discovery to be skipped. 773 */ 774 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 775 { 776 mutex_lock(&lport->lp_mutex); 777 778 fc_lport_set_port_id(lport, port_id, NULL); 779 780 switch (lport->state) { 781 case LPORT_ST_RESET: 782 case LPORT_ST_FLOGI: 783 if (port_id) 784 fc_lport_enter_ready(lport); 785 break; 786 default: 787 break; 788 } 789 mutex_unlock(&lport->lp_mutex); 790 } 791 EXPORT_SYMBOL(fc_lport_set_local_id); 792 793 /** 794 * fc_lport_recv_flogi_req() - Receive a FLOGI request 795 * @lport: The local port that received the request 796 * @rx_fp: The FLOGI frame 797 * 798 * A received FLOGI request indicates a point-to-point connection. 799 * Accept it with the common service parameters indicating our N port. 800 * Set up to do a PLOGI if we have the higher-number WWPN. 801 * 802 * Locking Note: The lport lock is expected to be held before calling 803 * this function. 804 */ 805 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 806 struct fc_frame *rx_fp) 807 { 808 struct fc_frame *fp; 809 struct fc_frame_header *fh; 810 struct fc_els_flogi *flp; 811 struct fc_els_flogi *new_flp; 812 u64 remote_wwpn; 813 u32 remote_fid; 814 u32 local_fid; 815 816 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 817 fc_lport_state(lport)); 818 819 remote_fid = fc_frame_sid(rx_fp); 820 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 821 if (!flp) 822 goto out; 823 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 824 if (remote_wwpn == lport->wwpn) { 825 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 826 "with same WWPN %16.16llx\n", 827 lport->host->host_no, remote_wwpn); 828 goto out; 829 } 830 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 831 832 /* 833 * XXX what is the right thing to do for FIDs? 834 * The originator might expect our S_ID to be 0xfffffe. 835 * But if so, both of us could end up with the same FID. 836 */ 837 local_fid = FC_LOCAL_PTP_FID_LO; 838 if (remote_wwpn < lport->wwpn) { 839 local_fid = FC_LOCAL_PTP_FID_HI; 840 if (!remote_fid || remote_fid == local_fid) 841 remote_fid = FC_LOCAL_PTP_FID_LO; 842 } else if (!remote_fid) { 843 remote_fid = FC_LOCAL_PTP_FID_HI; 844 } 845 846 fc_lport_set_port_id(lport, local_fid, rx_fp); 847 848 fp = fc_frame_alloc(lport, sizeof(*flp)); 849 if (fp) { 850 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 851 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 852 new_flp->fl_cmd = (u8) ELS_LS_ACC; 853 854 /* 855 * Send the response. If this fails, the originator should 856 * repeat the sequence. 857 */ 858 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 859 fh = fc_frame_header_get(fp); 860 hton24(fh->fh_s_id, local_fid); 861 hton24(fh->fh_d_id, remote_fid); 862 lport->tt.frame_send(lport, fp); 863 864 } else { 865 fc_lport_error(lport, fp); 866 } 867 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 868 get_unaligned_be64(&flp->fl_wwnn)); 869 out: 870 fc_frame_free(rx_fp); 871 } 872 873 /** 874 * fc_lport_recv_els_req() - The generic lport ELS request handler 875 * @lport: The local port that received the request 876 * @fp: The request frame 877 * 878 * This function will see if the lport handles the request or 879 * if an rport should handle the request. 880 * 881 * Locking Note: This function should not be called with the lport 882 * lock held because it will grab the lock. 883 */ 884 static void fc_lport_recv_els_req(struct fc_lport *lport, 885 struct fc_frame *fp) 886 { 887 void (*recv)(struct fc_lport *, struct fc_frame *); 888 889 mutex_lock(&lport->lp_mutex); 890 891 /* 892 * Handle special ELS cases like FLOGI, LOGO, and 893 * RSCN here. These don't require a session. 894 * Even if we had a session, it might not be ready. 895 */ 896 if (!lport->link_up) 897 fc_frame_free(fp); 898 else { 899 /* 900 * Check opcode. 901 */ 902 recv = lport->tt.rport_recv_req; 903 switch (fc_frame_payload_op(fp)) { 904 case ELS_FLOGI: 905 if (!lport->point_to_multipoint) 906 recv = fc_lport_recv_flogi_req; 907 break; 908 case ELS_LOGO: 909 if (fc_frame_sid(fp) == FC_FID_FLOGI) 910 recv = fc_lport_recv_logo_req; 911 break; 912 case ELS_RSCN: 913 recv = lport->tt.disc_recv_req; 914 break; 915 case ELS_ECHO: 916 recv = fc_lport_recv_echo_req; 917 break; 918 case ELS_RLIR: 919 recv = fc_lport_recv_rlir_req; 920 break; 921 case ELS_RNID: 922 recv = fc_lport_recv_rnid_req; 923 break; 924 } 925 926 recv(lport, fp); 927 } 928 mutex_unlock(&lport->lp_mutex); 929 } 930 931 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, 932 const struct fc_els_spp *spp_in, 933 struct fc_els_spp *spp_out) 934 { 935 return FC_SPP_RESP_INVL; 936 } 937 938 struct fc4_prov fc_lport_els_prov = { 939 .prli = fc_lport_els_prli, 940 .recv = fc_lport_recv_els_req, 941 }; 942 943 /** 944 * fc_lport_recv_req() - The generic lport request handler 945 * @lport: The lport that received the request 946 * @fp: The frame the request is in 947 * 948 * Locking Note: This function should not be called with the lport 949 * lock held because it may grab the lock. 950 */ 951 static void fc_lport_recv_req(struct fc_lport *lport, 952 struct fc_frame *fp) 953 { 954 struct fc_frame_header *fh = fc_frame_header_get(fp); 955 struct fc_seq *sp = fr_seq(fp); 956 struct fc4_prov *prov; 957 958 /* 959 * Use RCU read lock and module_lock to be sure module doesn't 960 * deregister and get unloaded while we're calling it. 961 * try_module_get() is inlined and accepts a NULL parameter. 962 * Only ELSes and FCP target ops should come through here. 963 * The locking is unfortunate, and a better scheme is being sought. 964 */ 965 966 rcu_read_lock(); 967 if (fh->fh_type >= FC_FC4_PROV_SIZE) 968 goto drop; 969 prov = rcu_dereference(fc_passive_prov[fh->fh_type]); 970 if (!prov || !try_module_get(prov->module)) 971 goto drop; 972 rcu_read_unlock(); 973 prov->recv(lport, fp); 974 module_put(prov->module); 975 return; 976 drop: 977 rcu_read_unlock(); 978 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 979 fc_frame_free(fp); 980 if (sp) 981 lport->tt.exch_done(sp); 982 } 983 984 /** 985 * fc_lport_reset() - Reset a local port 986 * @lport: The local port which should be reset 987 * 988 * Locking Note: This functions should not be called with the 989 * lport lock held. 990 */ 991 int fc_lport_reset(struct fc_lport *lport) 992 { 993 cancel_delayed_work_sync(&lport->retry_work); 994 mutex_lock(&lport->lp_mutex); 995 fc_lport_enter_reset(lport); 996 mutex_unlock(&lport->lp_mutex); 997 return 0; 998 } 999 EXPORT_SYMBOL(fc_lport_reset); 1000 1001 /** 1002 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 1003 * @lport: The local port to be reset 1004 * 1005 * Locking Note: The lport lock is expected to be held before calling 1006 * this routine. 1007 */ 1008 static void fc_lport_reset_locked(struct fc_lport *lport) 1009 { 1010 if (lport->dns_rdata) 1011 lport->tt.rport_logoff(lport->dns_rdata); 1012 1013 if (lport->ptp_rdata) { 1014 lport->tt.rport_logoff(lport->ptp_rdata); 1015 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 1016 lport->ptp_rdata = NULL; 1017 } 1018 1019 lport->tt.disc_stop(lport); 1020 1021 lport->tt.exch_mgr_reset(lport, 0, 0); 1022 fc_host_fabric_name(lport->host) = 0; 1023 1024 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 1025 fc_lport_set_port_id(lport, 0, NULL); 1026 } 1027 1028 /** 1029 * fc_lport_enter_reset() - Reset the local port 1030 * @lport: The local port to be reset 1031 * 1032 * Locking Note: The lport lock is expected to be held before calling 1033 * this routine. 1034 */ 1035 static void fc_lport_enter_reset(struct fc_lport *lport) 1036 { 1037 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 1038 fc_lport_state(lport)); 1039 1040 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 1041 return; 1042 1043 if (lport->vport) { 1044 if (lport->link_up) 1045 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 1046 else 1047 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1048 } 1049 fc_lport_state_enter(lport, LPORT_ST_RESET); 1050 fc_host_post_event(lport->host, fc_get_event_number(), 1051 FCH_EVT_LIPRESET, 0); 1052 fc_vports_linkchange(lport); 1053 fc_lport_reset_locked(lport); 1054 if (lport->link_up) 1055 fc_lport_enter_flogi(lport); 1056 } 1057 1058 /** 1059 * fc_lport_enter_disabled() - Disable the local port 1060 * @lport: The local port to be reset 1061 * 1062 * Locking Note: The lport lock is expected to be held before calling 1063 * this routine. 1064 */ 1065 static void fc_lport_enter_disabled(struct fc_lport *lport) 1066 { 1067 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 1068 fc_lport_state(lport)); 1069 1070 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1071 fc_vports_linkchange(lport); 1072 fc_lport_reset_locked(lport); 1073 } 1074 1075 /** 1076 * fc_lport_error() - Handler for any errors 1077 * @lport: The local port that the error was on 1078 * @fp: The error code encoded in a frame pointer 1079 * 1080 * If the error was caused by a resource allocation failure 1081 * then wait for half a second and retry, otherwise retry 1082 * after the e_d_tov time. 1083 */ 1084 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1085 { 1086 unsigned long delay = 0; 1087 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1088 IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), 1089 lport->retry_count); 1090 1091 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1092 return; 1093 1094 /* 1095 * Memory allocation failure, or the exchange timed out 1096 * or we received LS_RJT. 1097 * Retry after delay 1098 */ 1099 if (lport->retry_count < lport->max_retry_count) { 1100 lport->retry_count++; 1101 if (!fp) 1102 delay = msecs_to_jiffies(500); 1103 else 1104 delay = msecs_to_jiffies(lport->e_d_tov); 1105 1106 schedule_delayed_work(&lport->retry_work, delay); 1107 } else 1108 fc_lport_enter_reset(lport); 1109 } 1110 1111 /** 1112 * fc_lport_ns_resp() - Handle response to a name server 1113 * registration exchange 1114 * @sp: current sequence in exchange 1115 * @fp: response frame 1116 * @lp_arg: Fibre Channel host port instance 1117 * 1118 * Locking Note: This function will be called without the lport lock 1119 * held, but it will lock, call an _enter_* function or fc_lport_error() 1120 * and then unlock the lport. 1121 */ 1122 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1123 void *lp_arg) 1124 { 1125 struct fc_lport *lport = lp_arg; 1126 struct fc_frame_header *fh; 1127 struct fc_ct_hdr *ct; 1128 1129 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1130 1131 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1132 return; 1133 1134 mutex_lock(&lport->lp_mutex); 1135 1136 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1137 FC_LPORT_DBG(lport, "Received a name server response, " 1138 "but in state %s\n", fc_lport_state(lport)); 1139 if (IS_ERR(fp)) 1140 goto err; 1141 goto out; 1142 } 1143 1144 if (IS_ERR(fp)) { 1145 fc_lport_error(lport, fp); 1146 goto err; 1147 } 1148 1149 fh = fc_frame_header_get(fp); 1150 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1151 1152 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1153 ct->ct_fs_type == FC_FST_DIR && 1154 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1155 ntohs(ct->ct_cmd) == FC_FS_ACC) 1156 switch (lport->state) { 1157 case LPORT_ST_RNN_ID: 1158 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1159 break; 1160 case LPORT_ST_RSNN_NN: 1161 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1162 break; 1163 case LPORT_ST_RSPN_ID: 1164 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1165 break; 1166 case LPORT_ST_RFT_ID: 1167 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1168 break; 1169 case LPORT_ST_RFF_ID: 1170 if (lport->fdmi_enabled) 1171 fc_lport_enter_fdmi(lport); 1172 else 1173 fc_lport_enter_scr(lport); 1174 break; 1175 default: 1176 /* should have already been caught by state checks */ 1177 break; 1178 } 1179 else 1180 fc_lport_error(lport, fp); 1181 out: 1182 fc_frame_free(fp); 1183 err: 1184 mutex_unlock(&lport->lp_mutex); 1185 } 1186 1187 /** 1188 * fc_lport_ms_resp() - Handle response to a management server 1189 * exchange 1190 * @sp: current sequence in exchange 1191 * @fp: response frame 1192 * @lp_arg: Fibre Channel host port instance 1193 * 1194 * Locking Note: This function will be called without the lport lock 1195 * held, but it will lock, call an _enter_* function or fc_lport_error() 1196 * and then unlock the lport. 1197 */ 1198 static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp, 1199 void *lp_arg) 1200 { 1201 struct fc_lport *lport = lp_arg; 1202 struct fc_frame_header *fh; 1203 struct fc_ct_hdr *ct; 1204 1205 FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp)); 1206 1207 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1208 return; 1209 1210 mutex_lock(&lport->lp_mutex); 1211 1212 if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) { 1213 FC_LPORT_DBG(lport, "Received a management server response, " 1214 "but in state %s\n", fc_lport_state(lport)); 1215 if (IS_ERR(fp)) 1216 goto err; 1217 goto out; 1218 } 1219 1220 if (IS_ERR(fp)) { 1221 fc_lport_error(lport, fp); 1222 goto err; 1223 } 1224 1225 fh = fc_frame_header_get(fp); 1226 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1227 1228 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1229 ct->ct_fs_type == FC_FST_MGMT && 1230 ct->ct_fs_subtype == FC_FDMI_SUBTYPE) { 1231 FC_LPORT_DBG(lport, "Received a management server response, " 1232 "reason=%d explain=%d\n", 1233 ct->ct_reason, 1234 ct->ct_explan); 1235 1236 switch (lport->state) { 1237 case LPORT_ST_RHBA: 1238 if (ntohs(ct->ct_cmd) == FC_FS_ACC) 1239 fc_lport_enter_ms(lport, LPORT_ST_RPA); 1240 else /* Error Skip RPA */ 1241 fc_lport_enter_scr(lport); 1242 break; 1243 case LPORT_ST_RPA: 1244 fc_lport_enter_scr(lport); 1245 break; 1246 case LPORT_ST_DPRT: 1247 fc_lport_enter_ms(lport, LPORT_ST_RHBA); 1248 break; 1249 case LPORT_ST_DHBA: 1250 fc_lport_enter_ms(lport, LPORT_ST_DPRT); 1251 break; 1252 default: 1253 /* should have already been caught by state checks */ 1254 break; 1255 } 1256 } else { 1257 /* Invalid Frame? */ 1258 fc_lport_error(lport, fp); 1259 } 1260 out: 1261 fc_frame_free(fp); 1262 err: 1263 mutex_unlock(&lport->lp_mutex); 1264 } 1265 1266 /** 1267 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1268 * @sp: current sequence in SCR exchange 1269 * @fp: response frame 1270 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1271 * 1272 * Locking Note: This function will be called without the lport lock 1273 * held, but it will lock, call an _enter_* function or fc_lport_error 1274 * and then unlock the lport. 1275 */ 1276 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1277 void *lp_arg) 1278 { 1279 struct fc_lport *lport = lp_arg; 1280 u8 op; 1281 1282 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1283 1284 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1285 return; 1286 1287 mutex_lock(&lport->lp_mutex); 1288 1289 if (lport->state != LPORT_ST_SCR) { 1290 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1291 "%s\n", fc_lport_state(lport)); 1292 if (IS_ERR(fp)) 1293 goto err; 1294 goto out; 1295 } 1296 1297 if (IS_ERR(fp)) { 1298 fc_lport_error(lport, fp); 1299 goto err; 1300 } 1301 1302 op = fc_frame_payload_op(fp); 1303 if (op == ELS_LS_ACC) 1304 fc_lport_enter_ready(lport); 1305 else 1306 fc_lport_error(lport, fp); 1307 1308 out: 1309 fc_frame_free(fp); 1310 err: 1311 mutex_unlock(&lport->lp_mutex); 1312 } 1313 1314 /** 1315 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1316 * @lport: The local port to register for state changes 1317 * 1318 * Locking Note: The lport lock is expected to be held before calling 1319 * this routine. 1320 */ 1321 static void fc_lport_enter_scr(struct fc_lport *lport) 1322 { 1323 struct fc_frame *fp; 1324 1325 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1326 fc_lport_state(lport)); 1327 1328 fc_lport_state_enter(lport, LPORT_ST_SCR); 1329 1330 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1331 if (!fp) { 1332 fc_lport_error(lport, fp); 1333 return; 1334 } 1335 1336 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1337 fc_lport_scr_resp, lport, 1338 2 * lport->r_a_tov)) 1339 fc_lport_error(lport, NULL); 1340 } 1341 1342 /** 1343 * fc_lport_enter_ns() - register some object with the name server 1344 * @lport: Fibre Channel local port to register 1345 * 1346 * Locking Note: The lport lock is expected to be held before calling 1347 * this routine. 1348 */ 1349 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1350 { 1351 struct fc_frame *fp; 1352 enum fc_ns_req cmd; 1353 int size = sizeof(struct fc_ct_hdr); 1354 size_t len; 1355 1356 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1357 fc_lport_state_names[state], 1358 fc_lport_state(lport)); 1359 1360 fc_lport_state_enter(lport, state); 1361 1362 switch (state) { 1363 case LPORT_ST_RNN_ID: 1364 cmd = FC_NS_RNN_ID; 1365 size += sizeof(struct fc_ns_rn_id); 1366 break; 1367 case LPORT_ST_RSNN_NN: 1368 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1369 /* if there is no symbolic name, skip to RFT_ID */ 1370 if (!len) 1371 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1372 cmd = FC_NS_RSNN_NN; 1373 size += sizeof(struct fc_ns_rsnn) + len; 1374 break; 1375 case LPORT_ST_RSPN_ID: 1376 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1377 /* if there is no symbolic name, skip to RFT_ID */ 1378 if (!len) 1379 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1380 cmd = FC_NS_RSPN_ID; 1381 size += sizeof(struct fc_ns_rspn) + len; 1382 break; 1383 case LPORT_ST_RFT_ID: 1384 cmd = FC_NS_RFT_ID; 1385 size += sizeof(struct fc_ns_rft); 1386 break; 1387 case LPORT_ST_RFF_ID: 1388 cmd = FC_NS_RFF_ID; 1389 size += sizeof(struct fc_ns_rff_id); 1390 break; 1391 default: 1392 fc_lport_error(lport, NULL); 1393 return; 1394 } 1395 1396 fp = fc_frame_alloc(lport, size); 1397 if (!fp) { 1398 fc_lport_error(lport, fp); 1399 return; 1400 } 1401 1402 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1403 fc_lport_ns_resp, 1404 lport, 3 * lport->r_a_tov)) 1405 fc_lport_error(lport, fp); 1406 } 1407 1408 static struct fc_rport_operations fc_lport_rport_ops = { 1409 .event_callback = fc_lport_rport_callback, 1410 }; 1411 1412 /** 1413 * fc_rport_enter_dns() - Create a fc_rport for the name server 1414 * @lport: The local port requesting a remote port for the name server 1415 * 1416 * Locking Note: The lport lock is expected to be held before calling 1417 * this routine. 1418 */ 1419 static void fc_lport_enter_dns(struct fc_lport *lport) 1420 { 1421 struct fc_rport_priv *rdata; 1422 1423 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1424 fc_lport_state(lport)); 1425 1426 fc_lport_state_enter(lport, LPORT_ST_DNS); 1427 1428 mutex_lock(&lport->disc.disc_mutex); 1429 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1430 mutex_unlock(&lport->disc.disc_mutex); 1431 if (!rdata) 1432 goto err; 1433 1434 rdata->ops = &fc_lport_rport_ops; 1435 lport->tt.rport_login(rdata); 1436 return; 1437 1438 err: 1439 fc_lport_error(lport, NULL); 1440 } 1441 1442 /** 1443 * fc_lport_enter_ms() - management server commands 1444 * @lport: Fibre Channel local port to register 1445 * 1446 * Locking Note: The lport lock is expected to be held before calling 1447 * this routine. 1448 */ 1449 static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) 1450 { 1451 struct fc_frame *fp; 1452 enum fc_fdmi_req cmd; 1453 int size = sizeof(struct fc_ct_hdr); 1454 size_t len; 1455 int numattrs; 1456 1457 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1458 fc_lport_state_names[state], 1459 fc_lport_state(lport)); 1460 1461 fc_lport_state_enter(lport, state); 1462 1463 switch (state) { 1464 case LPORT_ST_RHBA: 1465 cmd = FC_FDMI_RHBA; 1466 /* Number of HBA Attributes */ 1467 numattrs = 10; 1468 len = sizeof(struct fc_fdmi_rhba); 1469 len -= sizeof(struct fc_fdmi_attr_entry); 1470 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); 1471 len += FC_FDMI_HBA_ATTR_NODENAME_LEN; 1472 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN; 1473 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN; 1474 len += FC_FDMI_HBA_ATTR_MODEL_LEN; 1475 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN; 1476 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN; 1477 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN; 1478 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN; 1479 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN; 1480 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN; 1481 1482 size += len; 1483 break; 1484 case LPORT_ST_RPA: 1485 cmd = FC_FDMI_RPA; 1486 /* Number of Port Attributes */ 1487 numattrs = 6; 1488 len = sizeof(struct fc_fdmi_rpa); 1489 len -= sizeof(struct fc_fdmi_attr_entry); 1490 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN); 1491 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN; 1492 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN; 1493 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN; 1494 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN; 1495 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN; 1496 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN; 1497 1498 size += len; 1499 break; 1500 case LPORT_ST_DPRT: 1501 cmd = FC_FDMI_DPRT; 1502 len = sizeof(struct fc_fdmi_dprt); 1503 size += len; 1504 break; 1505 case LPORT_ST_DHBA: 1506 cmd = FC_FDMI_DHBA; 1507 len = sizeof(struct fc_fdmi_dhba); 1508 size += len; 1509 break; 1510 default: 1511 fc_lport_error(lport, NULL); 1512 return; 1513 } 1514 1515 FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n", 1516 cmd, (int)len, size); 1517 fp = fc_frame_alloc(lport, size); 1518 if (!fp) { 1519 fc_lport_error(lport, fp); 1520 return; 1521 } 1522 1523 if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd, 1524 fc_lport_ms_resp, 1525 lport, 3 * lport->r_a_tov)) 1526 fc_lport_error(lport, fp); 1527 } 1528 1529 /** 1530 * fc_rport_enter_fdmi() - Create a fc_rport for the management server 1531 * @lport: The local port requesting a remote port for the management server 1532 * 1533 * Locking Note: The lport lock is expected to be held before calling 1534 * this routine. 1535 */ 1536 static void fc_lport_enter_fdmi(struct fc_lport *lport) 1537 { 1538 struct fc_rport_priv *rdata; 1539 1540 FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n", 1541 fc_lport_state(lport)); 1542 1543 fc_lport_state_enter(lport, LPORT_ST_FDMI); 1544 1545 mutex_lock(&lport->disc.disc_mutex); 1546 rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV); 1547 mutex_unlock(&lport->disc.disc_mutex); 1548 if (!rdata) 1549 goto err; 1550 1551 rdata->ops = &fc_lport_rport_ops; 1552 lport->tt.rport_login(rdata); 1553 return; 1554 1555 err: 1556 fc_lport_error(lport, NULL); 1557 } 1558 1559 /** 1560 * fc_lport_timeout() - Handler for the retry_work timer 1561 * @work: The work struct of the local port 1562 */ 1563 static void fc_lport_timeout(struct work_struct *work) 1564 { 1565 struct fc_lport *lport = 1566 container_of(work, struct fc_lport, 1567 retry_work.work); 1568 1569 mutex_lock(&lport->lp_mutex); 1570 1571 switch (lport->state) { 1572 case LPORT_ST_DISABLED: 1573 break; 1574 case LPORT_ST_READY: 1575 break; 1576 case LPORT_ST_RESET: 1577 break; 1578 case LPORT_ST_FLOGI: 1579 fc_lport_enter_flogi(lport); 1580 break; 1581 case LPORT_ST_DNS: 1582 fc_lport_enter_dns(lport); 1583 break; 1584 case LPORT_ST_RNN_ID: 1585 case LPORT_ST_RSNN_NN: 1586 case LPORT_ST_RSPN_ID: 1587 case LPORT_ST_RFT_ID: 1588 case LPORT_ST_RFF_ID: 1589 fc_lport_enter_ns(lport, lport->state); 1590 break; 1591 case LPORT_ST_FDMI: 1592 fc_lport_enter_fdmi(lport); 1593 break; 1594 case LPORT_ST_RHBA: 1595 case LPORT_ST_RPA: 1596 case LPORT_ST_DHBA: 1597 case LPORT_ST_DPRT: 1598 FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", 1599 fc_lport_state(lport)); 1600 /* fall thru */ 1601 case LPORT_ST_SCR: 1602 fc_lport_enter_scr(lport); 1603 break; 1604 case LPORT_ST_LOGO: 1605 fc_lport_enter_logo(lport); 1606 break; 1607 } 1608 1609 mutex_unlock(&lport->lp_mutex); 1610 } 1611 1612 /** 1613 * fc_lport_logo_resp() - Handle response to LOGO request 1614 * @sp: The sequence that the LOGO was on 1615 * @fp: The LOGO frame 1616 * @lp_arg: The lport port that received the LOGO request 1617 * 1618 * Locking Note: This function will be called without the lport lock 1619 * held, but it will lock, call an _enter_* function or fc_lport_error() 1620 * and then unlock the lport. 1621 */ 1622 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1623 void *lp_arg) 1624 { 1625 struct fc_lport *lport = lp_arg; 1626 u8 op; 1627 1628 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1629 1630 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1631 return; 1632 1633 mutex_lock(&lport->lp_mutex); 1634 1635 if (lport->state != LPORT_ST_LOGO) { 1636 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1637 "%s\n", fc_lport_state(lport)); 1638 if (IS_ERR(fp)) 1639 goto err; 1640 goto out; 1641 } 1642 1643 if (IS_ERR(fp)) { 1644 fc_lport_error(lport, fp); 1645 goto err; 1646 } 1647 1648 op = fc_frame_payload_op(fp); 1649 if (op == ELS_LS_ACC) 1650 fc_lport_enter_disabled(lport); 1651 else 1652 fc_lport_error(lport, fp); 1653 1654 out: 1655 fc_frame_free(fp); 1656 err: 1657 mutex_unlock(&lport->lp_mutex); 1658 } 1659 EXPORT_SYMBOL(fc_lport_logo_resp); 1660 1661 /** 1662 * fc_rport_enter_logo() - Logout of the fabric 1663 * @lport: The local port to be logged out 1664 * 1665 * Locking Note: The lport lock is expected to be held before calling 1666 * this routine. 1667 */ 1668 static void fc_lport_enter_logo(struct fc_lport *lport) 1669 { 1670 struct fc_frame *fp; 1671 struct fc_els_logo *logo; 1672 1673 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1674 fc_lport_state(lport)); 1675 1676 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1677 fc_vports_linkchange(lport); 1678 1679 fp = fc_frame_alloc(lport, sizeof(*logo)); 1680 if (!fp) { 1681 fc_lport_error(lport, fp); 1682 return; 1683 } 1684 1685 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1686 fc_lport_logo_resp, lport, 1687 2 * lport->r_a_tov)) 1688 fc_lport_error(lport, NULL); 1689 } 1690 1691 /** 1692 * fc_lport_flogi_resp() - Handle response to FLOGI request 1693 * @sp: The sequence that the FLOGI was on 1694 * @fp: The FLOGI response frame 1695 * @lp_arg: The lport port that received the FLOGI response 1696 * 1697 * Locking Note: This function will be called without the lport lock 1698 * held, but it will lock, call an _enter_* function or fc_lport_error() 1699 * and then unlock the lport. 1700 */ 1701 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1702 void *lp_arg) 1703 { 1704 struct fc_lport *lport = lp_arg; 1705 struct fc_frame_header *fh; 1706 struct fc_els_flogi *flp; 1707 u32 did; 1708 u16 csp_flags; 1709 unsigned int r_a_tov; 1710 unsigned int e_d_tov; 1711 u16 mfs; 1712 1713 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1714 1715 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1716 return; 1717 1718 mutex_lock(&lport->lp_mutex); 1719 1720 if (lport->state != LPORT_ST_FLOGI) { 1721 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1722 "%s\n", fc_lport_state(lport)); 1723 if (IS_ERR(fp)) 1724 goto err; 1725 goto out; 1726 } 1727 1728 if (IS_ERR(fp)) { 1729 fc_lport_error(lport, fp); 1730 goto err; 1731 } 1732 1733 fh = fc_frame_header_get(fp); 1734 did = fc_frame_did(fp); 1735 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || 1736 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1737 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1738 fc_lport_error(lport, fp); 1739 goto err; 1740 } 1741 1742 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1743 if (!flp) { 1744 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1745 fc_lport_error(lport, fp); 1746 goto err; 1747 } 1748 1749 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1750 FC_SP_BB_DATA_MASK; 1751 1752 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { 1753 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1754 "lport->mfs:%hu\n", mfs, lport->mfs); 1755 fc_lport_error(lport, fp); 1756 goto err; 1757 } 1758 1759 if (mfs <= lport->mfs) { 1760 lport->mfs = mfs; 1761 fc_host_maxframe_size(lport->host) = mfs; 1762 } 1763 1764 csp_flags = ntohs(flp->fl_csp.sp_features); 1765 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1766 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1767 if (csp_flags & FC_SP_FT_EDTR) 1768 e_d_tov /= 1000000; 1769 1770 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1771 1772 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1773 if (e_d_tov > lport->e_d_tov) 1774 lport->e_d_tov = e_d_tov; 1775 lport->r_a_tov = 2 * e_d_tov; 1776 fc_lport_set_port_id(lport, did, fp); 1777 printk(KERN_INFO "host%d: libfc: " 1778 "Port (%6.6x) entered " 1779 "point-to-point mode\n", 1780 lport->host->host_no, did); 1781 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1782 get_unaligned_be64( 1783 &flp->fl_wwpn), 1784 get_unaligned_be64( 1785 &flp->fl_wwnn)); 1786 } else { 1787 lport->e_d_tov = e_d_tov; 1788 lport->r_a_tov = r_a_tov; 1789 fc_host_fabric_name(lport->host) = 1790 get_unaligned_be64(&flp->fl_wwnn); 1791 fc_lport_set_port_id(lport, did, fp); 1792 fc_lport_enter_dns(lport); 1793 } 1794 1795 out: 1796 fc_frame_free(fp); 1797 err: 1798 mutex_unlock(&lport->lp_mutex); 1799 } 1800 EXPORT_SYMBOL(fc_lport_flogi_resp); 1801 1802 /** 1803 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1804 * @lport: Fibre Channel local port to be logged in to the fabric 1805 * 1806 * Locking Note: The lport lock is expected to be held before calling 1807 * this routine. 1808 */ 1809 static void fc_lport_enter_flogi(struct fc_lport *lport) 1810 { 1811 struct fc_frame *fp; 1812 1813 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1814 fc_lport_state(lport)); 1815 1816 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1817 1818 if (lport->point_to_multipoint) { 1819 if (lport->port_id) 1820 fc_lport_enter_ready(lport); 1821 return; 1822 } 1823 1824 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1825 if (!fp) 1826 return fc_lport_error(lport, fp); 1827 1828 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1829 lport->vport ? ELS_FDISC : ELS_FLOGI, 1830 fc_lport_flogi_resp, lport, 1831 lport->vport ? 2 * lport->r_a_tov : 1832 lport->e_d_tov)) 1833 fc_lport_error(lport, NULL); 1834 } 1835 1836 /** 1837 * fc_lport_config() - Configure a fc_lport 1838 * @lport: The local port to be configured 1839 */ 1840 int fc_lport_config(struct fc_lport *lport) 1841 { 1842 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1843 mutex_init(&lport->lp_mutex); 1844 1845 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1846 1847 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1848 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1849 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); 1850 1851 return 0; 1852 } 1853 EXPORT_SYMBOL(fc_lport_config); 1854 1855 /** 1856 * fc_lport_init() - Initialize the lport layer for a local port 1857 * @lport: The local port to initialize the exchange layer for 1858 */ 1859 int fc_lport_init(struct fc_lport *lport) 1860 { 1861 if (!lport->tt.lport_recv) 1862 lport->tt.lport_recv = fc_lport_recv_req; 1863 1864 if (!lport->tt.lport_reset) 1865 lport->tt.lport_reset = fc_lport_reset; 1866 1867 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1868 fc_host_node_name(lport->host) = lport->wwnn; 1869 fc_host_port_name(lport->host) = lport->wwpn; 1870 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1871 memset(fc_host_supported_fc4s(lport->host), 0, 1872 sizeof(fc_host_supported_fc4s(lport->host))); 1873 fc_host_supported_fc4s(lport->host)[2] = 1; 1874 fc_host_supported_fc4s(lport->host)[7] = 1; 1875 1876 /* This value is also unchanging */ 1877 memset(fc_host_active_fc4s(lport->host), 0, 1878 sizeof(fc_host_active_fc4s(lport->host))); 1879 fc_host_active_fc4s(lport->host)[2] = 1; 1880 fc_host_active_fc4s(lport->host)[7] = 1; 1881 fc_host_maxframe_size(lport->host) = lport->mfs; 1882 fc_host_supported_speeds(lport->host) = 0; 1883 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1884 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1885 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1886 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1887 fc_fc4_add_lport(lport); 1888 1889 return 0; 1890 } 1891 EXPORT_SYMBOL(fc_lport_init); 1892 1893 /** 1894 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1895 * @sp: The sequence for the FC Passthrough response 1896 * @fp: The response frame 1897 * @info_arg: The BSG info that the response is for 1898 */ 1899 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1900 void *info_arg) 1901 { 1902 struct fc_bsg_info *info = info_arg; 1903 struct fc_bsg_job *job = info->job; 1904 struct fc_lport *lport = info->lport; 1905 struct fc_frame_header *fh; 1906 size_t len; 1907 void *buf; 1908 1909 if (IS_ERR(fp)) { 1910 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1911 -ECONNABORTED : -ETIMEDOUT; 1912 job->reply_len = sizeof(uint32_t); 1913 job->state_flags |= FC_RQST_STATE_DONE; 1914 job->job_done(job); 1915 kfree(info); 1916 return; 1917 } 1918 1919 mutex_lock(&lport->lp_mutex); 1920 fh = fc_frame_header_get(fp); 1921 len = fr_len(fp) - sizeof(*fh); 1922 buf = fc_frame_payload_get(fp, 0); 1923 1924 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1925 /* Get the response code from the first frame payload */ 1926 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1927 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1928 (unsigned short)fc_frame_payload_op(fp); 1929 1930 /* Save the reply status of the job */ 1931 job->reply->reply_data.ctels_reply.status = 1932 (cmd == info->rsp_code) ? 1933 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1934 } 1935 1936 job->reply->reply_payload_rcv_len += 1937 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1938 &info->offset, NULL); 1939 1940 if (fr_eof(fp) == FC_EOF_T && 1941 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1942 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1943 if (job->reply->reply_payload_rcv_len > 1944 job->reply_payload.payload_len) 1945 job->reply->reply_payload_rcv_len = 1946 job->reply_payload.payload_len; 1947 job->reply->result = 0; 1948 job->state_flags |= FC_RQST_STATE_DONE; 1949 job->job_done(job); 1950 kfree(info); 1951 } 1952 fc_frame_free(fp); 1953 mutex_unlock(&lport->lp_mutex); 1954 } 1955 1956 /** 1957 * fc_lport_els_request() - Send ELS passthrough request 1958 * @job: The BSG Passthrough job 1959 * @lport: The local port sending the request 1960 * @did: The destination port id 1961 * 1962 * Locking Note: The lport lock is expected to be held before calling 1963 * this routine. 1964 */ 1965 static int fc_lport_els_request(struct fc_bsg_job *job, 1966 struct fc_lport *lport, 1967 u32 did, u32 tov) 1968 { 1969 struct fc_bsg_info *info; 1970 struct fc_frame *fp; 1971 struct fc_frame_header *fh; 1972 char *pp; 1973 int len; 1974 1975 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1976 if (!fp) 1977 return -ENOMEM; 1978 1979 len = job->request_payload.payload_len; 1980 pp = fc_frame_payload_get(fp, len); 1981 1982 sg_copy_to_buffer(job->request_payload.sg_list, 1983 job->request_payload.sg_cnt, 1984 pp, len); 1985 1986 fh = fc_frame_header_get(fp); 1987 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1988 hton24(fh->fh_d_id, did); 1989 hton24(fh->fh_s_id, lport->port_id); 1990 fh->fh_type = FC_TYPE_ELS; 1991 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1992 fh->fh_cs_ctl = 0; 1993 fh->fh_df_ctl = 0; 1994 fh->fh_parm_offset = 0; 1995 1996 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1997 if (!info) { 1998 fc_frame_free(fp); 1999 return -ENOMEM; 2000 } 2001 2002 info->job = job; 2003 info->lport = lport; 2004 info->rsp_code = ELS_LS_ACC; 2005 info->nents = job->reply_payload.sg_cnt; 2006 info->sg = job->reply_payload.sg_list; 2007 2008 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 2009 NULL, info, tov)) { 2010 kfree(info); 2011 return -ECOMM; 2012 } 2013 return 0; 2014 } 2015 2016 /** 2017 * fc_lport_ct_request() - Send CT Passthrough request 2018 * @job: The BSG Passthrough job 2019 * @lport: The local port sending the request 2020 * @did: The destination FC-ID 2021 * @tov: The timeout period to wait for the response 2022 * 2023 * Locking Note: The lport lock is expected to be held before calling 2024 * this routine. 2025 */ 2026 static int fc_lport_ct_request(struct fc_bsg_job *job, 2027 struct fc_lport *lport, u32 did, u32 tov) 2028 { 2029 struct fc_bsg_info *info; 2030 struct fc_frame *fp; 2031 struct fc_frame_header *fh; 2032 struct fc_ct_req *ct; 2033 size_t len; 2034 2035 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 2036 job->request_payload.payload_len); 2037 if (!fp) 2038 return -ENOMEM; 2039 2040 len = job->request_payload.payload_len; 2041 ct = fc_frame_payload_get(fp, len); 2042 2043 sg_copy_to_buffer(job->request_payload.sg_list, 2044 job->request_payload.sg_cnt, 2045 ct, len); 2046 2047 fh = fc_frame_header_get(fp); 2048 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 2049 hton24(fh->fh_d_id, did); 2050 hton24(fh->fh_s_id, lport->port_id); 2051 fh->fh_type = FC_TYPE_CT; 2052 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 2053 fh->fh_cs_ctl = 0; 2054 fh->fh_df_ctl = 0; 2055 fh->fh_parm_offset = 0; 2056 2057 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 2058 if (!info) { 2059 fc_frame_free(fp); 2060 return -ENOMEM; 2061 } 2062 2063 info->job = job; 2064 info->lport = lport; 2065 info->rsp_code = FC_FS_ACC; 2066 info->nents = job->reply_payload.sg_cnt; 2067 info->sg = job->reply_payload.sg_list; 2068 2069 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 2070 NULL, info, tov)) { 2071 kfree(info); 2072 return -ECOMM; 2073 } 2074 return 0; 2075 } 2076 2077 /** 2078 * fc_lport_bsg_request() - The common entry point for sending 2079 * FC Passthrough requests 2080 * @job: The BSG passthrough job 2081 */ 2082 int fc_lport_bsg_request(struct fc_bsg_job *job) 2083 { 2084 struct request *rsp = job->req->next_rq; 2085 struct Scsi_Host *shost = job->shost; 2086 struct fc_lport *lport = shost_priv(shost); 2087 struct fc_rport *rport; 2088 struct fc_rport_priv *rdata; 2089 int rc = -EINVAL; 2090 u32 did, tov; 2091 2092 job->reply->reply_payload_rcv_len = 0; 2093 if (rsp) 2094 rsp->resid_len = job->reply_payload.payload_len; 2095 2096 mutex_lock(&lport->lp_mutex); 2097 2098 switch (job->request->msgcode) { 2099 case FC_BSG_RPT_ELS: 2100 rport = job->rport; 2101 if (!rport) 2102 break; 2103 2104 rdata = rport->dd_data; 2105 rc = fc_lport_els_request(job, lport, rport->port_id, 2106 rdata->e_d_tov); 2107 break; 2108 2109 case FC_BSG_RPT_CT: 2110 rport = job->rport; 2111 if (!rport) 2112 break; 2113 2114 rdata = rport->dd_data; 2115 rc = fc_lport_ct_request(job, lport, rport->port_id, 2116 rdata->e_d_tov); 2117 break; 2118 2119 case FC_BSG_HST_CT: 2120 did = ntoh24(job->request->rqst_data.h_ct.port_id); 2121 if (did == FC_FID_DIR_SERV) { 2122 rdata = lport->dns_rdata; 2123 if (!rdata) 2124 break; 2125 tov = rdata->e_d_tov; 2126 } else { 2127 rdata = lport->tt.rport_lookup(lport, did); 2128 if (!rdata) 2129 break; 2130 tov = rdata->e_d_tov; 2131 kref_put(&rdata->kref, lport->tt.rport_destroy); 2132 } 2133 2134 rc = fc_lport_ct_request(job, lport, did, tov); 2135 break; 2136 2137 case FC_BSG_HST_ELS_NOLOGIN: 2138 did = ntoh24(job->request->rqst_data.h_els.port_id); 2139 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 2140 break; 2141 } 2142 2143 mutex_unlock(&lport->lp_mutex); 2144 return rc; 2145 } 2146 EXPORT_SYMBOL(fc_lport_bsg_request); 2147