1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/delay.h> 37 #include <linux/slab.h> 38 #include <linux/utsname.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_transport_fc.h> 41 #include <asm/unaligned.h> 42 #include <scsi/fc/fc_els.h> 43 #include <scsi/fc/fc_fs.h> 44 #include <scsi/fc/fc_gs.h> 45 #include <scsi/fc/fc_ms.h> 46 47 #include "csio_hw.h" 48 #include "csio_mb.h" 49 #include "csio_lnode.h" 50 #include "csio_rnode.h" 51 52 int csio_fcoe_rnodes = 1024; 53 int csio_fdmi_enable = 1; 54 55 #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) 56 57 /* Lnode SM declarations */ 58 static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); 59 static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); 60 static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); 61 static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); 62 63 static int csio_ln_mgmt_submit_req(struct csio_ioreq *, 64 void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), 65 enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); 66 67 /* LN event mapping */ 68 static enum csio_ln_ev fwevt_to_lnevt[] = { 69 CSIO_LNE_NONE, /* None */ 70 CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ 71 CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ 72 CSIO_LNE_NONE, /* PLOGI_RCVD */ 73 CSIO_LNE_NONE, /* PLOGO_RCVD */ 74 CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ 75 CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ 76 CSIO_LNE_NONE, /* PRLI_RCVD */ 77 CSIO_LNE_NONE, /* PRLO_RCVD */ 78 CSIO_LNE_NONE, /* NPORT_ID_CHGD */ 79 CSIO_LNE_LOGO, /* FLOGO_RCVD */ 80 CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ 81 CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ 82 CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ 83 CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ 84 CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ 85 CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ 86 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ 87 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ 88 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ 89 CSIO_LNE_NONE, /* PRLI_TMO */ 90 CSIO_LNE_NONE, /* ADISC_TMO */ 91 CSIO_LNE_NONE, /* RSCN_DEV_LOST */ 92 CSIO_LNE_NONE, /* SCR_ACC_RCVD */ 93 CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ 94 CSIO_LNE_NONE, /* LOGO_SNT */ 95 CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ 96 }; 97 98 #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ 99 CSIO_LNE_NONE : \ 100 fwevt_to_lnevt[_evt]) 101 102 #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) 103 #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) 104 #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) 105 #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) 106 107 /* 108 * csio_ln_match_by_portid - lookup lnode using given portid. 109 * @hw: HW module 110 * @portid: port-id. 111 * 112 * If found, returns lnode matching given portid otherwise returns NULL. 113 */ 114 static struct csio_lnode * 115 csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) 116 { 117 struct csio_lnode *ln; 118 struct list_head *tmp; 119 120 /* Match siblings lnode with portid */ 121 list_for_each(tmp, &hw->sln_head) { 122 ln = (struct csio_lnode *) tmp; 123 if (ln->portid == portid) 124 return ln; 125 } 126 127 return NULL; 128 } 129 130 /* 131 * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. 132 * @hw - HW module 133 * @vnpi - vnp index. 134 * Returns - If found, returns lnode matching given vnp id 135 * otherwise returns NULL. 136 */ 137 static struct csio_lnode * 138 csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) 139 { 140 struct list_head *tmp1, *tmp2; 141 struct csio_lnode *sln = NULL, *cln = NULL; 142 143 if (list_empty(&hw->sln_head)) { 144 CSIO_INC_STATS(hw, n_lnlkup_miss); 145 return NULL; 146 } 147 /* Traverse sibling lnodes */ 148 list_for_each(tmp1, &hw->sln_head) { 149 sln = (struct csio_lnode *) tmp1; 150 151 /* Match sibling lnode */ 152 if (sln->vnp_flowid == vnp_id) 153 return sln; 154 155 if (list_empty(&sln->cln_head)) 156 continue; 157 158 /* Traverse children lnodes */ 159 list_for_each(tmp2, &sln->cln_head) { 160 cln = (struct csio_lnode *) tmp2; 161 162 if (cln->vnp_flowid == vnp_id) 163 return cln; 164 } 165 } 166 CSIO_INC_STATS(hw, n_lnlkup_miss); 167 return NULL; 168 } 169 170 /** 171 * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. 172 * @hw: HW module. 173 * @wwpn: WWPN. 174 * 175 * If found, returns lnode matching given wwpn, returns NULL otherwise. 176 */ 177 struct csio_lnode * 178 csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) 179 { 180 struct list_head *tmp1, *tmp2; 181 struct csio_lnode *sln = NULL, *cln = NULL; 182 183 if (list_empty(&hw->sln_head)) { 184 CSIO_INC_STATS(hw, n_lnlkup_miss); 185 return NULL; 186 } 187 /* Traverse sibling lnodes */ 188 list_for_each(tmp1, &hw->sln_head) { 189 sln = (struct csio_lnode *) tmp1; 190 191 /* Match sibling lnode */ 192 if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) 193 return sln; 194 195 if (list_empty(&sln->cln_head)) 196 continue; 197 198 /* Traverse children lnodes */ 199 list_for_each(tmp2, &sln->cln_head) { 200 cln = (struct csio_lnode *) tmp2; 201 202 if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) 203 return cln; 204 } 205 } 206 return NULL; 207 } 208 209 /* FDMI */ 210 static void 211 csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) 212 { 213 struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; 214 cmd->ct_rev = FC_CT_REV; 215 cmd->ct_fs_type = type; 216 cmd->ct_fs_subtype = sub_type; 217 cmd->ct_cmd = htons(op); 218 } 219 220 static int 221 csio_hostname(uint8_t *buf, size_t buf_len) 222 { 223 if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) 224 return 0; 225 return -1; 226 } 227 228 static int 229 csio_osname(uint8_t *buf, size_t buf_len) 230 { 231 if (snprintf(buf, buf_len, "%s %s %s", 232 init_utsname()->sysname, 233 init_utsname()->release, 234 init_utsname()->version) > 0) 235 return 0; 236 237 return -1; 238 } 239 240 static inline void 241 csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len) 242 { 243 uint16_t len; 244 struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; 245 246 if (WARN_ON(val_len > U16_MAX)) 247 return; 248 249 len = val_len; 250 251 ae->type = htons(type); 252 len += 4; /* includes attribute type and length */ 253 len = (len + 3) & ~3; /* should be multiple of 4 bytes */ 254 ae->len = htons(len); 255 memcpy(ae->value, val, val_len); 256 if (len > val_len) 257 memset(ae->value + val_len, 0, len - val_len); 258 *ptr += len; 259 } 260 261 /* 262 * csio_ln_fdmi_done - FDMI registeration completion 263 * @hw: HW context 264 * @fdmi_req: fdmi request 265 */ 266 static void 267 csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) 268 { 269 void *cmd; 270 struct csio_lnode *ln = fdmi_req->lnode; 271 272 if (fdmi_req->wr_status != FW_SUCCESS) { 273 csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", 274 fdmi_req->wr_status); 275 CSIO_INC_STATS(ln, n_fdmi_err); 276 } 277 278 cmd = fdmi_req->dma_buf.vaddr; 279 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { 280 csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", 281 csio_ct_reason(cmd), csio_ct_expl(cmd)); 282 } 283 } 284 285 /* 286 * csio_ln_fdmi_rhba_cbfn - RHBA completion 287 * @hw: HW context 288 * @fdmi_req: fdmi request 289 */ 290 static void 291 csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) 292 { 293 void *cmd; 294 uint8_t *pld; 295 uint32_t len = 0; 296 __be32 val; 297 __be16 mfs; 298 uint32_t numattrs = 0; 299 struct csio_lnode *ln = fdmi_req->lnode; 300 struct fs_fdmi_attrs *attrib_blk; 301 struct fc_fdmi_port_name *port_name; 302 uint8_t buf[64]; 303 uint8_t *fc4_type; 304 305 if (fdmi_req->wr_status != FW_SUCCESS) { 306 csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", 307 fdmi_req->wr_status); 308 CSIO_INC_STATS(ln, n_fdmi_err); 309 } 310 311 cmd = fdmi_req->dma_buf.vaddr; 312 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { 313 csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", 314 csio_ct_reason(cmd), csio_ct_expl(cmd)); 315 } 316 317 if (!csio_is_rnode_ready(fdmi_req->rnode)) { 318 CSIO_INC_STATS(ln, n_fdmi_err); 319 return; 320 } 321 322 /* Prepare CT hdr for RPA cmd */ 323 memset(cmd, 0, FC_CT_HDR_LEN); 324 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA); 325 326 /* Prepare RPA payload */ 327 pld = (uint8_t *)csio_ct_get_pld(cmd); 328 port_name = (struct fc_fdmi_port_name *)pld; 329 memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); 330 pld += sizeof(*port_name); 331 332 /* Start appending Port attributes */ 333 attrib_blk = (struct fs_fdmi_attrs *)pld; 334 attrib_blk->numattrs = 0; 335 len += sizeof(attrib_blk->numattrs); 336 pld += sizeof(attrib_blk->numattrs); 337 338 fc4_type = &buf[0]; 339 memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); 340 fc4_type[2] = 1; 341 fc4_type[7] = 1; 342 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, 343 fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); 344 numattrs++; 345 val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 346 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, 347 &val, 348 FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); 349 numattrs++; 350 351 if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) 352 val = htonl(FC_PORTSPEED_1GBIT); 353 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) 354 val = htonl(FC_PORTSPEED_10GBIT); 355 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G) 356 val = htonl(FC_PORTSPEED_25GBIT); 357 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G) 358 val = htonl(FC_PORTSPEED_40GBIT); 359 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G) 360 val = htonl(FC_PORTSPEED_50GBIT); 361 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G) 362 val = htonl(FC_PORTSPEED_100GBIT); 363 else 364 val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); 365 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, 366 &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); 367 numattrs++; 368 369 mfs = ln->ln_sparm.csp.sp_bb_data; 370 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, 371 &mfs, sizeof(mfs)); 372 numattrs++; 373 374 strcpy(buf, "csiostor"); 375 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, 376 strlen(buf)); 377 numattrs++; 378 379 if (!csio_hostname(buf, sizeof(buf))) { 380 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, 381 buf, strlen(buf)); 382 numattrs++; 383 } 384 attrib_blk->numattrs = htonl(numattrs); 385 len = (uint32_t)(pld - (uint8_t *)cmd); 386 387 /* Submit FDMI RPA request */ 388 spin_lock_irq(&hw->lock); 389 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, 390 FCOE_CT, &fdmi_req->dma_buf, len)) { 391 CSIO_INC_STATS(ln, n_fdmi_err); 392 csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); 393 } 394 spin_unlock_irq(&hw->lock); 395 } 396 397 /* 398 * csio_ln_fdmi_dprt_cbfn - DPRT completion 399 * @hw: HW context 400 * @fdmi_req: fdmi request 401 */ 402 static void 403 csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) 404 { 405 void *cmd; 406 uint8_t *pld; 407 uint32_t len = 0; 408 uint32_t numattrs = 0; 409 __be32 maxpayload = htonl(65536); 410 struct fc_fdmi_hba_identifier *hbaid; 411 struct csio_lnode *ln = fdmi_req->lnode; 412 struct fc_fdmi_rpl *reg_pl; 413 struct fs_fdmi_attrs *attrib_blk; 414 uint8_t buf[64]; 415 416 if (fdmi_req->wr_status != FW_SUCCESS) { 417 csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", 418 fdmi_req->wr_status); 419 CSIO_INC_STATS(ln, n_fdmi_err); 420 } 421 422 if (!csio_is_rnode_ready(fdmi_req->rnode)) { 423 CSIO_INC_STATS(ln, n_fdmi_err); 424 return; 425 } 426 cmd = fdmi_req->dma_buf.vaddr; 427 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { 428 csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", 429 csio_ct_reason(cmd), csio_ct_expl(cmd)); 430 } 431 432 /* Prepare CT hdr for RHBA cmd */ 433 memset(cmd, 0, FC_CT_HDR_LEN); 434 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA); 435 len = FC_CT_HDR_LEN; 436 437 /* Prepare RHBA payload */ 438 pld = (uint8_t *)csio_ct_get_pld(cmd); 439 hbaid = (struct fc_fdmi_hba_identifier *)pld; 440 memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ 441 pld += sizeof(*hbaid); 442 443 /* Register one port per hba */ 444 reg_pl = (struct fc_fdmi_rpl *)pld; 445 reg_pl->numport = htonl(1); 446 memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); 447 pld += sizeof(*reg_pl); 448 449 /* Start appending HBA attributes hba */ 450 attrib_blk = (struct fs_fdmi_attrs *)pld; 451 attrib_blk->numattrs = 0; 452 len += sizeof(attrib_blk->numattrs); 453 pld += sizeof(attrib_blk->numattrs); 454 455 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), 456 FC_FDMI_HBA_ATTR_NODENAME_LEN); 457 numattrs++; 458 459 memset(buf, 0, sizeof(buf)); 460 461 strcpy(buf, "Chelsio Communications"); 462 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, 463 strlen(buf)); 464 numattrs++; 465 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, 466 hw->vpd.sn, sizeof(hw->vpd.sn)); 467 numattrs++; 468 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, 469 sizeof(hw->vpd.id)); 470 numattrs++; 471 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, 472 hw->model_desc, strlen(hw->model_desc)); 473 numattrs++; 474 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, 475 hw->hw_ver, sizeof(hw->hw_ver)); 476 numattrs++; 477 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, 478 hw->fwrev_str, strlen(hw->fwrev_str)); 479 numattrs++; 480 481 if (!csio_osname(buf, sizeof(buf))) { 482 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, 483 buf, strlen(buf)); 484 numattrs++; 485 } 486 487 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, 488 &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); 489 len = (uint32_t)(pld - (uint8_t *)cmd); 490 numattrs++; 491 attrib_blk->numattrs = htonl(numattrs); 492 493 /* Submit FDMI RHBA request */ 494 spin_lock_irq(&hw->lock); 495 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, 496 FCOE_CT, &fdmi_req->dma_buf, len)) { 497 CSIO_INC_STATS(ln, n_fdmi_err); 498 csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); 499 } 500 spin_unlock_irq(&hw->lock); 501 } 502 503 /* 504 * csio_ln_fdmi_dhba_cbfn - DHBA completion 505 * @hw: HW context 506 * @fdmi_req: fdmi request 507 */ 508 static void 509 csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) 510 { 511 struct csio_lnode *ln = fdmi_req->lnode; 512 void *cmd; 513 struct fc_fdmi_port_name *port_name; 514 uint32_t len; 515 516 if (fdmi_req->wr_status != FW_SUCCESS) { 517 csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", 518 fdmi_req->wr_status); 519 CSIO_INC_STATS(ln, n_fdmi_err); 520 } 521 522 if (!csio_is_rnode_ready(fdmi_req->rnode)) { 523 CSIO_INC_STATS(ln, n_fdmi_err); 524 return; 525 } 526 cmd = fdmi_req->dma_buf.vaddr; 527 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { 528 csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", 529 csio_ct_reason(cmd), csio_ct_expl(cmd)); 530 } 531 532 /* Send FDMI cmd to de-register any Port attributes if registered 533 * before 534 */ 535 536 /* Prepare FDMI DPRT cmd */ 537 memset(cmd, 0, FC_CT_HDR_LEN); 538 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT); 539 len = FC_CT_HDR_LEN; 540 port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); 541 memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); 542 len += sizeof(*port_name); 543 544 /* Submit FDMI request */ 545 spin_lock_irq(&hw->lock); 546 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, 547 FCOE_CT, &fdmi_req->dma_buf, len)) { 548 CSIO_INC_STATS(ln, n_fdmi_err); 549 csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); 550 } 551 spin_unlock_irq(&hw->lock); 552 } 553 554 /** 555 * csio_ln_fdmi_start - Start an FDMI request. 556 * @ln: lnode 557 * @context: session context 558 * 559 * Issued with lock held. 560 */ 561 int 562 csio_ln_fdmi_start(struct csio_lnode *ln, void *context) 563 { 564 struct csio_ioreq *fdmi_req; 565 struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; 566 void *cmd; 567 struct fc_fdmi_hba_identifier *hbaid; 568 uint32_t len; 569 570 if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) 571 return -EPROTONOSUPPORT; 572 573 if (!csio_is_rnode_ready(fdmi_rn)) 574 CSIO_INC_STATS(ln, n_fdmi_err); 575 576 /* Send FDMI cmd to de-register any HBA attributes if registered 577 * before 578 */ 579 580 fdmi_req = ln->mgmt_req; 581 fdmi_req->lnode = ln; 582 fdmi_req->rnode = fdmi_rn; 583 584 /* Prepare FDMI DHBA cmd */ 585 cmd = fdmi_req->dma_buf.vaddr; 586 memset(cmd, 0, FC_CT_HDR_LEN); 587 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA); 588 len = FC_CT_HDR_LEN; 589 590 hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); 591 memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); 592 len += sizeof(*hbaid); 593 594 /* Submit FDMI request */ 595 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, 596 FCOE_CT, &fdmi_req->dma_buf, len)) { 597 CSIO_INC_STATS(ln, n_fdmi_err); 598 csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); 599 } 600 601 return 0; 602 } 603 604 /* 605 * csio_ln_vnp_read_cbfn - vnp read completion handler. 606 * @hw: HW lnode 607 * @cbfn: Completion handler. 608 * 609 * Reads vnp response and updates ln parameters. 610 */ 611 static void 612 csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) 613 { 614 struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); 615 struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); 616 struct fc_els_csp *csp; 617 struct fc_els_cssp *clsp; 618 enum fw_retval retval; 619 __be32 nport_id; 620 621 retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); 622 if (retval != FW_SUCCESS) { 623 csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); 624 mempool_free(mbp, hw->mb_mempool); 625 return; 626 } 627 628 spin_lock_irq(&hw->lock); 629 630 memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); 631 memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3); 632 ln->nport_id = ntohl(nport_id); 633 ln->nport_id = ln->nport_id >> 8; 634 635 /* Update WWNs */ 636 /* 637 * This may look like a duplication of what csio_fcoe_enable_link() 638 * does, but is absolutely necessary if the vnpi changes between 639 * a FCOE LINK UP and FCOE LINK DOWN. 640 */ 641 memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); 642 memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); 643 644 /* Copy common sparam */ 645 csp = (struct fc_els_csp *)rsp->cmn_srv_parms; 646 ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; 647 ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; 648 ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred; 649 ln->ln_sparm.csp.sp_features = csp->sp_features; 650 ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data; 651 ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov; 652 ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov; 653 654 /* Copy word 0 & word 1 of class sparam */ 655 clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; 656 ln->ln_sparm.clsp[2].cp_class = clsp->cp_class; 657 ln->ln_sparm.clsp[2].cp_init = clsp->cp_init; 658 ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip; 659 ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs; 660 661 spin_unlock_irq(&hw->lock); 662 663 mempool_free(mbp, hw->mb_mempool); 664 665 /* Send an event to update local attribs */ 666 csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); 667 } 668 669 /* 670 * csio_ln_vnp_read - Read vnp params. 671 * @ln: lnode 672 * @cbfn: Completion handler. 673 * 674 * Issued with lock held. 675 */ 676 static int 677 csio_ln_vnp_read(struct csio_lnode *ln, 678 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 679 { 680 struct csio_hw *hw = ln->hwp; 681 struct csio_mb *mbp; 682 683 /* Allocate Mbox request */ 684 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 685 if (!mbp) { 686 CSIO_INC_STATS(hw, n_err_nomem); 687 return -ENOMEM; 688 } 689 690 /* Prepare VNP Command */ 691 csio_fcoe_vnp_read_init_mb(ln, mbp, 692 CSIO_MB_DEFAULT_TMO, 693 ln->fcf_flowid, 694 ln->vnp_flowid, 695 cbfn); 696 697 /* Issue MBOX cmd */ 698 if (csio_mb_issue(hw, mbp)) { 699 csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); 700 mempool_free(mbp, hw->mb_mempool); 701 return -EINVAL; 702 } 703 704 return 0; 705 } 706 707 /* 708 * csio_fcoe_enable_link - Enable fcoe link. 709 * @ln: lnode 710 * @enable: enable/disable 711 * Issued with lock held. 712 * Issues mbox cmd to bring up FCOE link on port associated with given ln. 713 */ 714 static int 715 csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) 716 { 717 struct csio_hw *hw = ln->hwp; 718 struct csio_mb *mbp; 719 enum fw_retval retval; 720 uint8_t portid; 721 uint8_t sub_op; 722 struct fw_fcoe_link_cmd *lcmd; 723 int i; 724 725 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 726 if (!mbp) { 727 CSIO_INC_STATS(hw, n_err_nomem); 728 return -ENOMEM; 729 } 730 731 portid = ln->portid; 732 sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; 733 734 csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", 735 sub_op ? "UP" : "DOWN", portid); 736 737 csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, 738 portid, sub_op, 0, 0, 0, NULL); 739 740 if (csio_mb_issue(hw, mbp)) { 741 csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", 742 portid); 743 mempool_free(mbp, hw->mb_mempool); 744 return -EINVAL; 745 } 746 747 retval = csio_mb_fw_retval(mbp); 748 if (retval != FW_SUCCESS) { 749 csio_err(hw, 750 "FCOE LINK %s cmd on port[%d] failed with " 751 "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); 752 mempool_free(mbp, hw->mb_mempool); 753 return -EINVAL; 754 } 755 756 if (!enable) 757 goto out; 758 759 lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; 760 761 memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); 762 memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); 763 764 for (i = 0; i < CSIO_MAX_PPORTS; i++) 765 if (hw->pport[i].portid == portid) 766 memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); 767 768 out: 769 mempool_free(mbp, hw->mb_mempool); 770 return 0; 771 } 772 773 /* 774 * csio_ln_read_fcf_cbfn - Read fcf parameters 775 * @ln: lnode 776 * 777 * read fcf response and Update ln fcf information. 778 */ 779 static void 780 csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) 781 { 782 struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; 783 struct csio_fcf_info *fcf_info; 784 struct fw_fcoe_fcf_cmd *rsp = 785 (struct fw_fcoe_fcf_cmd *)(mbp->mb); 786 enum fw_retval retval; 787 788 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 789 if (retval != FW_SUCCESS) { 790 csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", 791 retval); 792 mempool_free(mbp, hw->mb_mempool); 793 return; 794 } 795 796 spin_lock_irq(&hw->lock); 797 fcf_info = ln->fcfinfo; 798 fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( 799 ntohs(rsp->priority_pkd)); 800 fcf_info->vf_id = ntohs(rsp->vf_id); 801 fcf_info->vlan_id = rsp->vlan_id; 802 fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); 803 fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); 804 fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); 805 fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); 806 fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); 807 fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); 808 fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); 809 memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); 810 memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); 811 memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); 812 memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); 813 memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); 814 815 spin_unlock_irq(&hw->lock); 816 817 mempool_free(mbp, hw->mb_mempool); 818 } 819 820 /* 821 * csio_ln_read_fcf_entry - Read fcf entry. 822 * @ln: lnode 823 * @cbfn: Completion handler. 824 * 825 * Issued with lock held. 826 */ 827 static int 828 csio_ln_read_fcf_entry(struct csio_lnode *ln, 829 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 830 { 831 struct csio_hw *hw = ln->hwp; 832 struct csio_mb *mbp; 833 834 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 835 if (!mbp) { 836 CSIO_INC_STATS(hw, n_err_nomem); 837 return -ENOMEM; 838 } 839 840 /* Get FCoE FCF information */ 841 csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, 842 ln->portid, ln->fcf_flowid, cbfn); 843 844 if (csio_mb_issue(hw, mbp)) { 845 csio_err(hw, "failed to issue FCOE FCF cmd\n"); 846 mempool_free(mbp, hw->mb_mempool); 847 return -EINVAL; 848 } 849 850 return 0; 851 } 852 853 /* 854 * csio_handle_link_up - Logical Linkup event. 855 * @hw - HW module. 856 * @portid - Physical port number 857 * @fcfi - FCF index. 858 * @vnpi - VNP index. 859 * Returns - none. 860 * 861 * This event is received from FW, when virtual link is established between 862 * Physical port[ENode] and FCF. If its new vnpi, then local node object is 863 * created on this FCF and set to [ONLINE] state. 864 * Lnode waits for FW_RDEV_CMD event to be received indicating that 865 * Fabric login is completed and lnode moves to [READY] state. 866 * 867 * This called with hw lock held 868 */ 869 static void 870 csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, 871 uint32_t vnpi) 872 { 873 struct csio_lnode *ln = NULL; 874 875 /* Lookup lnode based on vnpi */ 876 ln = csio_ln_lookup_by_vnpi(hw, vnpi); 877 if (!ln) { 878 /* Pick lnode based on portid */ 879 ln = csio_ln_lookup_by_portid(hw, portid); 880 if (!ln) { 881 csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", 882 portid); 883 CSIO_DB_ASSERT(0); 884 return; 885 } 886 887 /* Check if lnode has valid vnp flowid */ 888 if (ln->vnp_flowid != CSIO_INVALID_IDX) { 889 /* New VN-Port */ 890 spin_unlock_irq(&hw->lock); 891 csio_lnode_alloc(hw); 892 spin_lock_irq(&hw->lock); 893 if (!ln) { 894 csio_err(hw, 895 "failed to allocate fcoe lnode" 896 "for port:%d vnpi:x%x\n", 897 portid, vnpi); 898 CSIO_DB_ASSERT(0); 899 return; 900 } 901 ln->portid = portid; 902 } 903 ln->vnp_flowid = vnpi; 904 ln->dev_num &= ~0xFFFF; 905 ln->dev_num |= vnpi; 906 } 907 908 /*Initialize fcfi */ 909 ln->fcf_flowid = fcfi; 910 911 csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); 912 913 CSIO_INC_STATS(ln, n_link_up); 914 915 /* Send LINKUP event to SM */ 916 csio_post_event(&ln->sm, CSIO_LNE_LINKUP); 917 } 918 919 /* 920 * csio_post_event_rns 921 * @ln - FCOE lnode 922 * @evt - Given rnode event 923 * Returns - none 924 * 925 * Posts given rnode event to all FCOE rnodes connected with given Lnode. 926 * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE 927 * event. 928 * 929 * This called with hw lock held 930 */ 931 static void 932 csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) 933 { 934 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; 935 struct list_head *tmp, *next; 936 struct csio_rnode *rn; 937 938 list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { 939 rn = (struct csio_rnode *) tmp; 940 csio_post_event(&rn->sm, evt); 941 } 942 } 943 944 /* 945 * csio_cleanup_rns 946 * @ln - FCOE lnode 947 * Returns - none 948 * 949 * Frees all FCOE rnodes connected with given Lnode. 950 * 951 * This called with hw lock held 952 */ 953 static void 954 csio_cleanup_rns(struct csio_lnode *ln) 955 { 956 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; 957 struct list_head *tmp, *next_rn; 958 struct csio_rnode *rn; 959 960 list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { 961 rn = (struct csio_rnode *) tmp; 962 csio_put_rnode(ln, rn); 963 } 964 965 } 966 967 /* 968 * csio_post_event_lns 969 * @ln - FCOE lnode 970 * @evt - Given lnode event 971 * Returns - none 972 * 973 * Posts given lnode event to all FCOE lnodes connected with given Lnode. 974 * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE 975 * event. 976 * 977 * This called with hw lock held 978 */ 979 static void 980 csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) 981 { 982 struct list_head *tmp; 983 struct csio_lnode *cln, *sln; 984 985 /* If NPIV lnode, send evt only to that and return */ 986 if (csio_is_npiv_ln(ln)) { 987 csio_post_event(&ln->sm, evt); 988 return; 989 } 990 991 sln = ln; 992 /* Traverse children lnodes list and send evt */ 993 list_for_each(tmp, &sln->cln_head) { 994 cln = (struct csio_lnode *) tmp; 995 csio_post_event(&cln->sm, evt); 996 } 997 998 /* Send evt to parent lnode */ 999 csio_post_event(&ln->sm, evt); 1000 } 1001 1002 /* 1003 * csio_ln_down - Lcoal nport is down 1004 * @ln - FCOE Lnode 1005 * Returns - none 1006 * 1007 * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. 1008 * 1009 * This called with hw lock held 1010 */ 1011 static void 1012 csio_ln_down(struct csio_lnode *ln) 1013 { 1014 csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); 1015 } 1016 1017 /* 1018 * csio_handle_link_down - Logical Linkdown event. 1019 * @hw - HW module. 1020 * @portid - Physical port number 1021 * @fcfi - FCF index. 1022 * @vnpi - VNP index. 1023 * Returns - none 1024 * 1025 * This event is received from FW, when virtual link goes down between 1026 * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on 1027 * this vnpi[VN-Port] will be de-instantiated. 1028 * 1029 * This called with hw lock held 1030 */ 1031 static void 1032 csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, 1033 uint32_t vnpi) 1034 { 1035 struct csio_fcf_info *fp; 1036 struct csio_lnode *ln; 1037 1038 /* Lookup lnode based on vnpi */ 1039 ln = csio_ln_lookup_by_vnpi(hw, vnpi); 1040 if (ln) { 1041 fp = ln->fcfinfo; 1042 CSIO_INC_STATS(ln, n_link_down); 1043 1044 /*Warn if linkdown received if lnode is not in ready state */ 1045 if (!csio_is_lnode_ready(ln)) { 1046 csio_ln_warn(ln, 1047 "warn: FCOE link is already in offline " 1048 "Ignoring Fcoe linkdown event on portid %d\n", 1049 portid); 1050 CSIO_INC_STATS(ln, n_evt_drop); 1051 return; 1052 } 1053 1054 /* Verify portid */ 1055 if (fp->portid != portid) { 1056 csio_ln_warn(ln, 1057 "warn: FCOE linkdown recv with " 1058 "invalid port %d\n", portid); 1059 CSIO_INC_STATS(ln, n_evt_drop); 1060 return; 1061 } 1062 1063 /* verify fcfi */ 1064 if (ln->fcf_flowid != fcfi) { 1065 csio_ln_warn(ln, 1066 "warn: FCOE linkdown recv with " 1067 "invalid fcfi x%x\n", fcfi); 1068 CSIO_INC_STATS(ln, n_evt_drop); 1069 return; 1070 } 1071 1072 csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); 1073 1074 /* Send LINK_DOWN event to lnode s/m */ 1075 csio_ln_down(ln); 1076 1077 return; 1078 } else { 1079 csio_warn(hw, 1080 "warn: FCOE linkdown recv with invalid vnpi x%x\n", 1081 vnpi); 1082 CSIO_INC_STATS(hw, n_evt_drop); 1083 } 1084 } 1085 1086 /* 1087 * csio_is_lnode_ready - Checks FCOE lnode is in ready state. 1088 * @ln: Lnode module 1089 * 1090 * Returns True if FCOE lnode is in ready state. 1091 */ 1092 int 1093 csio_is_lnode_ready(struct csio_lnode *ln) 1094 { 1095 return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); 1096 } 1097 1098 /*****************************************************************************/ 1099 /* START: Lnode SM */ 1100 /*****************************************************************************/ 1101 /* 1102 * csio_lns_uninit - The request in uninit state. 1103 * @ln - FCOE lnode. 1104 * @evt - Event to be processed. 1105 * 1106 * Process the given lnode event which is currently in "uninit" state. 1107 * Invoked with HW lock held. 1108 * Return - none. 1109 */ 1110 static void 1111 csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) 1112 { 1113 struct csio_hw *hw = csio_lnode_to_hw(ln); 1114 struct csio_lnode *rln = hw->rln; 1115 int rv; 1116 1117 CSIO_INC_STATS(ln, n_evt_sm[evt]); 1118 switch (evt) { 1119 case CSIO_LNE_LINKUP: 1120 csio_set_state(&ln->sm, csio_lns_online); 1121 /* Read FCF only for physical lnode */ 1122 if (csio_is_phys_ln(ln)) { 1123 rv = csio_ln_read_fcf_entry(ln, 1124 csio_ln_read_fcf_cbfn); 1125 if (rv != 0) { 1126 /* TODO: Send HW RESET event */ 1127 CSIO_INC_STATS(ln, n_err); 1128 break; 1129 } 1130 1131 /* Add FCF record */ 1132 list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); 1133 } 1134 1135 rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); 1136 if (rv != 0) { 1137 /* TODO: Send HW RESET event */ 1138 CSIO_INC_STATS(ln, n_err); 1139 } 1140 break; 1141 1142 case CSIO_LNE_DOWN_LINK: 1143 break; 1144 1145 default: 1146 csio_ln_dbg(ln, 1147 "unexp ln event %d recv from did:x%x in " 1148 "ln state[uninit].\n", evt, ln->nport_id); 1149 CSIO_INC_STATS(ln, n_evt_unexp); 1150 break; 1151 } /* switch event */ 1152 } 1153 1154 /* 1155 * csio_lns_online - The request in online state. 1156 * @ln - FCOE lnode. 1157 * @evt - Event to be processed. 1158 * 1159 * Process the given lnode event which is currently in "online" state. 1160 * Invoked with HW lock held. 1161 * Return - none. 1162 */ 1163 static void 1164 csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) 1165 { 1166 struct csio_hw *hw = csio_lnode_to_hw(ln); 1167 1168 CSIO_INC_STATS(ln, n_evt_sm[evt]); 1169 switch (evt) { 1170 case CSIO_LNE_LINKUP: 1171 csio_ln_warn(ln, 1172 "warn: FCOE link is up already " 1173 "Ignoring linkup on port:%d\n", ln->portid); 1174 CSIO_INC_STATS(ln, n_evt_drop); 1175 break; 1176 1177 case CSIO_LNE_FAB_INIT_DONE: 1178 csio_set_state(&ln->sm, csio_lns_ready); 1179 1180 spin_unlock_irq(&hw->lock); 1181 csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); 1182 spin_lock_irq(&hw->lock); 1183 1184 break; 1185 1186 case CSIO_LNE_LINK_DOWN: 1187 /* Fall through */ 1188 case CSIO_LNE_DOWN_LINK: 1189 csio_set_state(&ln->sm, csio_lns_uninit); 1190 if (csio_is_phys_ln(ln)) { 1191 /* Remove FCF entry */ 1192 list_del_init(&ln->fcfinfo->list); 1193 } 1194 break; 1195 1196 default: 1197 csio_ln_dbg(ln, 1198 "unexp ln event %d recv from did:x%x in " 1199 "ln state[uninit].\n", evt, ln->nport_id); 1200 CSIO_INC_STATS(ln, n_evt_unexp); 1201 1202 break; 1203 } /* switch event */ 1204 } 1205 1206 /* 1207 * csio_lns_ready - The request in ready state. 1208 * @ln - FCOE lnode. 1209 * @evt - Event to be processed. 1210 * 1211 * Process the given lnode event which is currently in "ready" state. 1212 * Invoked with HW lock held. 1213 * Return - none. 1214 */ 1215 static void 1216 csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) 1217 { 1218 struct csio_hw *hw = csio_lnode_to_hw(ln); 1219 1220 CSIO_INC_STATS(ln, n_evt_sm[evt]); 1221 switch (evt) { 1222 case CSIO_LNE_FAB_INIT_DONE: 1223 csio_ln_dbg(ln, 1224 "ignoring event %d recv from did x%x" 1225 "in ln state[ready].\n", evt, ln->nport_id); 1226 CSIO_INC_STATS(ln, n_evt_drop); 1227 break; 1228 1229 case CSIO_LNE_LINK_DOWN: 1230 csio_set_state(&ln->sm, csio_lns_offline); 1231 csio_post_event_rns(ln, CSIO_RNFE_DOWN); 1232 1233 spin_unlock_irq(&hw->lock); 1234 csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); 1235 spin_lock_irq(&hw->lock); 1236 1237 if (csio_is_phys_ln(ln)) { 1238 /* Remove FCF entry */ 1239 list_del_init(&ln->fcfinfo->list); 1240 } 1241 break; 1242 1243 case CSIO_LNE_DOWN_LINK: 1244 csio_set_state(&ln->sm, csio_lns_offline); 1245 csio_post_event_rns(ln, CSIO_RNFE_DOWN); 1246 1247 /* Host need to issue aborts in case if FW has not returned 1248 * WRs with status "ABORTED" 1249 */ 1250 spin_unlock_irq(&hw->lock); 1251 csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); 1252 spin_lock_irq(&hw->lock); 1253 1254 if (csio_is_phys_ln(ln)) { 1255 /* Remove FCF entry */ 1256 list_del_init(&ln->fcfinfo->list); 1257 } 1258 break; 1259 1260 case CSIO_LNE_CLOSE: 1261 csio_set_state(&ln->sm, csio_lns_uninit); 1262 csio_post_event_rns(ln, CSIO_RNFE_CLOSE); 1263 break; 1264 1265 case CSIO_LNE_LOGO: 1266 csio_set_state(&ln->sm, csio_lns_offline); 1267 csio_post_event_rns(ln, CSIO_RNFE_DOWN); 1268 break; 1269 1270 default: 1271 csio_ln_dbg(ln, 1272 "unexp ln event %d recv from did:x%x in " 1273 "ln state[uninit].\n", evt, ln->nport_id); 1274 CSIO_INC_STATS(ln, n_evt_unexp); 1275 CSIO_DB_ASSERT(0); 1276 break; 1277 } /* switch event */ 1278 } 1279 1280 /* 1281 * csio_lns_offline - The request in offline state. 1282 * @ln - FCOE lnode. 1283 * @evt - Event to be processed. 1284 * 1285 * Process the given lnode event which is currently in "offline" state. 1286 * Invoked with HW lock held. 1287 * Return - none. 1288 */ 1289 static void 1290 csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) 1291 { 1292 struct csio_hw *hw = csio_lnode_to_hw(ln); 1293 struct csio_lnode *rln = hw->rln; 1294 int rv; 1295 1296 CSIO_INC_STATS(ln, n_evt_sm[evt]); 1297 switch (evt) { 1298 case CSIO_LNE_LINKUP: 1299 csio_set_state(&ln->sm, csio_lns_online); 1300 /* Read FCF only for physical lnode */ 1301 if (csio_is_phys_ln(ln)) { 1302 rv = csio_ln_read_fcf_entry(ln, 1303 csio_ln_read_fcf_cbfn); 1304 if (rv != 0) { 1305 /* TODO: Send HW RESET event */ 1306 CSIO_INC_STATS(ln, n_err); 1307 break; 1308 } 1309 1310 /* Add FCF record */ 1311 list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); 1312 } 1313 1314 rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); 1315 if (rv != 0) { 1316 /* TODO: Send HW RESET event */ 1317 CSIO_INC_STATS(ln, n_err); 1318 } 1319 break; 1320 1321 case CSIO_LNE_LINK_DOWN: 1322 case CSIO_LNE_DOWN_LINK: 1323 case CSIO_LNE_LOGO: 1324 csio_ln_dbg(ln, 1325 "ignoring event %d recv from did x%x" 1326 "in ln state[offline].\n", evt, ln->nport_id); 1327 CSIO_INC_STATS(ln, n_evt_drop); 1328 break; 1329 1330 case CSIO_LNE_CLOSE: 1331 csio_set_state(&ln->sm, csio_lns_uninit); 1332 csio_post_event_rns(ln, CSIO_RNFE_CLOSE); 1333 break; 1334 1335 default: 1336 csio_ln_dbg(ln, 1337 "unexp ln event %d recv from did:x%x in " 1338 "ln state[offline]\n", evt, ln->nport_id); 1339 CSIO_INC_STATS(ln, n_evt_unexp); 1340 CSIO_DB_ASSERT(0); 1341 break; 1342 } /* switch event */ 1343 } 1344 1345 /*****************************************************************************/ 1346 /* END: Lnode SM */ 1347 /*****************************************************************************/ 1348 1349 static void 1350 csio_free_fcfinfo(struct kref *kref) 1351 { 1352 struct csio_fcf_info *fcfinfo = container_of(kref, 1353 struct csio_fcf_info, kref); 1354 kfree(fcfinfo); 1355 } 1356 1357 /* Helper routines for attributes */ 1358 /* 1359 * csio_lnode_state_to_str - Get current state of FCOE lnode. 1360 * @ln - lnode 1361 * @str - state of lnode. 1362 * 1363 */ 1364 void 1365 csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) 1366 { 1367 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { 1368 strcpy(str, "UNINIT"); 1369 return; 1370 } 1371 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { 1372 strcpy(str, "READY"); 1373 return; 1374 } 1375 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { 1376 strcpy(str, "OFFLINE"); 1377 return; 1378 } 1379 strcpy(str, "UNKNOWN"); 1380 } /* csio_lnode_state_to_str */ 1381 1382 1383 int 1384 csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, 1385 struct fw_fcoe_port_stats *port_stats) 1386 { 1387 struct csio_mb *mbp; 1388 struct fw_fcoe_port_cmd_params portparams; 1389 enum fw_retval retval; 1390 int idx; 1391 1392 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1393 if (!mbp) { 1394 csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); 1395 return -EINVAL; 1396 } 1397 portparams.portid = portid; 1398 1399 for (idx = 1; idx <= 3; idx++) { 1400 portparams.idx = (idx-1)*6 + 1; 1401 portparams.nstats = 6; 1402 if (idx == 3) 1403 portparams.nstats = 4; 1404 csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, 1405 &portparams, NULL); 1406 if (csio_mb_issue(hw, mbp)) { 1407 csio_err(hw, "Issue of FCoE port params failed!\n"); 1408 mempool_free(mbp, hw->mb_mempool); 1409 return -EINVAL; 1410 } 1411 csio_mb_process_portparams_rsp(hw, mbp, &retval, 1412 &portparams, port_stats); 1413 } 1414 1415 mempool_free(mbp, hw->mb_mempool); 1416 return 0; 1417 } 1418 1419 /* 1420 * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. 1421 * @wr - WR. 1422 * @len - WR len. 1423 * This handler is invoked when an outstanding mgmt WR is completed. 1424 * Its invoked in the context of FW event worker thread for every 1425 * mgmt event received. 1426 * Return - none. 1427 */ 1428 1429 static void 1430 csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) 1431 { 1432 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); 1433 struct csio_ioreq *io_req = NULL; 1434 struct fw_fcoe_els_ct_wr *wr_cmd; 1435 1436 1437 wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; 1438 1439 if (len < sizeof(struct fw_fcoe_els_ct_wr)) { 1440 csio_err(mgmtm->hw, 1441 "Invalid ELS CT WR length recvd, len:%x\n", len); 1442 mgmtm->stats.n_err++; 1443 return; 1444 } 1445 1446 io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); 1447 io_req->wr_status = csio_wr_status(wr_cmd); 1448 1449 /* lookup ioreq exists in our active Q */ 1450 spin_lock_irq(&hw->lock); 1451 if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { 1452 csio_err(mgmtm->hw, 1453 "Error- Invalid IO handle recv in WR. handle: %p\n", 1454 io_req); 1455 mgmtm->stats.n_err++; 1456 spin_unlock_irq(&hw->lock); 1457 return; 1458 } 1459 1460 mgmtm = csio_hw_to_mgmtm(hw); 1461 1462 /* Dequeue from active queue */ 1463 list_del_init(&io_req->sm.sm_list); 1464 mgmtm->stats.n_active--; 1465 spin_unlock_irq(&hw->lock); 1466 1467 /* io_req will be freed by completion handler */ 1468 if (io_req->io_cbfn) 1469 io_req->io_cbfn(hw, io_req); 1470 } 1471 1472 /** 1473 * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. 1474 * @hw: HW module 1475 * @cpl_op: CPL opcode 1476 * @cmd: FW cmd/WR. 1477 * 1478 * Process received FCoE cmd/WR event from FW. 1479 */ 1480 void 1481 csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) 1482 { 1483 struct csio_lnode *ln; 1484 struct csio_rnode *rn; 1485 uint8_t portid, opcode = *(uint8_t *)cmd; 1486 struct fw_fcoe_link_cmd *lcmd; 1487 struct fw_wr_hdr *wr; 1488 struct fw_rdev_wr *rdev_wr; 1489 enum fw_fcoe_link_status lstatus; 1490 uint32_t fcfi, rdev_flowid, vnpi; 1491 enum csio_ln_ev evt; 1492 1493 if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { 1494 1495 lcmd = (struct fw_fcoe_link_cmd *)cmd; 1496 lstatus = lcmd->lstatus; 1497 portid = FW_FCOE_LINK_CMD_PORTID_GET( 1498 ntohl(lcmd->op_to_portid)); 1499 fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); 1500 vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); 1501 1502 if (lstatus == FCOE_LINKUP) { 1503 1504 /* HW lock here */ 1505 spin_lock_irq(&hw->lock); 1506 csio_handle_link_up(hw, portid, fcfi, vnpi); 1507 spin_unlock_irq(&hw->lock); 1508 /* HW un lock here */ 1509 1510 } else if (lstatus == FCOE_LINKDOWN) { 1511 1512 /* HW lock here */ 1513 spin_lock_irq(&hw->lock); 1514 csio_handle_link_down(hw, portid, fcfi, vnpi); 1515 spin_unlock_irq(&hw->lock); 1516 /* HW un lock here */ 1517 } else { 1518 csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", 1519 lcmd->lstatus); 1520 CSIO_INC_STATS(hw, n_cpl_unexp); 1521 } 1522 } else if (cpl_op == CPL_FW6_PLD) { 1523 wr = (struct fw_wr_hdr *) (cmd + 4); 1524 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) 1525 == FW_RDEV_WR) { 1526 1527 rdev_wr = (struct fw_rdev_wr *) (cmd + 4); 1528 1529 rdev_flowid = FW_RDEV_WR_FLOWID_GET( 1530 ntohl(rdev_wr->alloc_to_len16)); 1531 vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( 1532 ntohl(rdev_wr->flags_to_assoc_flowid)); 1533 1534 csio_dbg(hw, 1535 "FW_RDEV_WR: flowid:x%x ev_cause:x%x " 1536 "vnpi:0x%x\n", rdev_flowid, 1537 rdev_wr->event_cause, vnpi); 1538 1539 if (rdev_wr->protocol != PROT_FCOE) { 1540 csio_err(hw, 1541 "FW_RDEV_WR: invalid proto:x%x " 1542 "received with flowid:x%x\n", 1543 rdev_wr->protocol, 1544 rdev_flowid); 1545 CSIO_INC_STATS(hw, n_evt_drop); 1546 return; 1547 } 1548 1549 /* HW lock here */ 1550 spin_lock_irq(&hw->lock); 1551 ln = csio_ln_lookup_by_vnpi(hw, vnpi); 1552 if (!ln) { 1553 csio_err(hw, 1554 "FW_DEV_WR: invalid vnpi:x%x received " 1555 "with flowid:x%x\n", vnpi, rdev_flowid); 1556 CSIO_INC_STATS(hw, n_evt_drop); 1557 goto out_pld; 1558 } 1559 1560 rn = csio_confirm_rnode(ln, rdev_flowid, 1561 &rdev_wr->u.fcoe_rdev); 1562 if (!rn) { 1563 csio_ln_dbg(ln, 1564 "Failed to confirm rnode " 1565 "for flowid:x%x\n", rdev_flowid); 1566 CSIO_INC_STATS(hw, n_evt_drop); 1567 goto out_pld; 1568 } 1569 1570 /* save previous event for debugging */ 1571 ln->prev_evt = ln->cur_evt; 1572 ln->cur_evt = rdev_wr->event_cause; 1573 CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); 1574 1575 /* Translate all the fabric events to lnode SM events */ 1576 evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); 1577 if (evt) { 1578 csio_ln_dbg(ln, 1579 "Posting event to lnode event:%d " 1580 "cause:%d flowid:x%x\n", evt, 1581 rdev_wr->event_cause, rdev_flowid); 1582 csio_post_event(&ln->sm, evt); 1583 } 1584 1585 /* Handover event to rn SM here. */ 1586 csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); 1587 out_pld: 1588 spin_unlock_irq(&hw->lock); 1589 return; 1590 } else { 1591 csio_warn(hw, "unexpected WR op(0x%x) recv\n", 1592 FW_WR_OP_G(be32_to_cpu((wr->hi)))); 1593 CSIO_INC_STATS(hw, n_cpl_unexp); 1594 } 1595 } else if (cpl_op == CPL_FW6_MSG) { 1596 wr = (struct fw_wr_hdr *) (cmd); 1597 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { 1598 csio_ln_mgmt_wr_handler(hw, wr, 1599 sizeof(struct fw_fcoe_els_ct_wr)); 1600 } else { 1601 csio_warn(hw, "unexpected WR op(0x%x) recv\n", 1602 FW_WR_OP_G(be32_to_cpu((wr->hi)))); 1603 CSIO_INC_STATS(hw, n_cpl_unexp); 1604 } 1605 } else { 1606 csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); 1607 CSIO_INC_STATS(hw, n_cpl_unexp); 1608 } 1609 } 1610 1611 /** 1612 * csio_lnode_start - Kickstart lnode discovery. 1613 * @ln: lnode 1614 * 1615 * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. 1616 */ 1617 int 1618 csio_lnode_start(struct csio_lnode *ln) 1619 { 1620 int rv = 0; 1621 if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { 1622 rv = csio_fcoe_enable_link(ln, 1); 1623 ln->flags |= CSIO_LNF_LINK_ENABLE; 1624 } 1625 1626 return rv; 1627 } 1628 1629 /** 1630 * csio_lnode_stop - Stop the lnode. 1631 * @ln: lnode 1632 * 1633 * This routine is invoked by HW module to stop lnode and its associated NPIV 1634 * lnodes. 1635 */ 1636 void 1637 csio_lnode_stop(struct csio_lnode *ln) 1638 { 1639 csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); 1640 if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { 1641 csio_fcoe_enable_link(ln, 0); 1642 ln->flags &= ~CSIO_LNF_LINK_ENABLE; 1643 } 1644 csio_ln_dbg(ln, "stopping ln :%p\n", ln); 1645 } 1646 1647 /** 1648 * csio_lnode_close - Close an lnode. 1649 * @ln: lnode 1650 * 1651 * This routine is invoked by HW module to close an lnode and its 1652 * associated NPIV lnodes. Lnode and its associated NPIV lnodes are 1653 * set to uninitialized state. 1654 */ 1655 void 1656 csio_lnode_close(struct csio_lnode *ln) 1657 { 1658 csio_post_event_lns(ln, CSIO_LNE_CLOSE); 1659 if (csio_is_phys_ln(ln)) 1660 ln->vnp_flowid = CSIO_INVALID_IDX; 1661 1662 csio_ln_dbg(ln, "closed ln :%p\n", ln); 1663 } 1664 1665 /* 1666 * csio_ln_prep_ecwr - Prepare ELS/CT WR. 1667 * @io_req - IO request. 1668 * @wr_len - WR len 1669 * @immd_len - WR immediate data 1670 * @sub_op - Sub opcode 1671 * @sid - source portid. 1672 * @did - destination portid 1673 * @flow_id - flowid 1674 * @fw_wr - ELS/CT WR to be prepared. 1675 * Returns: 0 - on success 1676 */ 1677 static int 1678 csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, 1679 uint32_t immd_len, uint8_t sub_op, uint32_t sid, 1680 uint32_t did, uint32_t flow_id, uint8_t *fw_wr) 1681 { 1682 struct fw_fcoe_els_ct_wr *wr; 1683 __be32 port_id; 1684 1685 wr = (struct fw_fcoe_els_ct_wr *)fw_wr; 1686 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) | 1687 FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); 1688 1689 wr_len = DIV_ROUND_UP(wr_len, 16); 1690 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) | 1691 FW_WR_LEN16_V(wr_len)); 1692 wr->els_ct_type = sub_op; 1693 wr->ctl_pri = 0; 1694 wr->cp_en_class = 0; 1695 wr->cookie = io_req->fw_handle; 1696 wr->iqid = cpu_to_be16(csio_q_physiqid( 1697 io_req->lnode->hwp, io_req->iq_idx)); 1698 wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); 1699 wr->tmo_val = (uint8_t) io_req->tmo; 1700 port_id = htonl(sid); 1701 memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); 1702 port_id = htonl(did); 1703 memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); 1704 1705 /* Prepare RSP SGL */ 1706 wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); 1707 wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); 1708 return 0; 1709 } 1710 1711 /* 1712 * csio_ln_mgmt_submit_wr - Post elsct work request. 1713 * @mgmtm - mgmtm 1714 * @io_req - io request. 1715 * @sub_op - ELS or CT request type 1716 * @pld - Dma Payload buffer 1717 * @pld_len - Payload len 1718 * Prepares ELSCT Work request and sents it to FW. 1719 * Returns: 0 - on success 1720 */ 1721 static int 1722 csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, 1723 uint8_t sub_op, struct csio_dma_buf *pld, 1724 uint32_t pld_len) 1725 { 1726 struct csio_wr_pair wrp; 1727 struct csio_lnode *ln = io_req->lnode; 1728 struct csio_rnode *rn = io_req->rnode; 1729 struct csio_hw *hw = mgmtm->hw; 1730 uint8_t fw_wr[64]; 1731 struct ulptx_sgl dsgl; 1732 uint32_t wr_size = 0; 1733 uint8_t im_len = 0; 1734 uint32_t wr_off = 0; 1735 1736 int ret = 0; 1737 1738 /* Calculate WR Size for this ELS REQ */ 1739 wr_size = sizeof(struct fw_fcoe_els_ct_wr); 1740 1741 /* Send as immediate data if pld < 256 */ 1742 if (pld_len < 256) { 1743 wr_size += ALIGN(pld_len, 8); 1744 im_len = (uint8_t)pld_len; 1745 } else 1746 wr_size += sizeof(struct ulptx_sgl); 1747 1748 /* Roundup WR size in units of 16 bytes */ 1749 wr_size = ALIGN(wr_size, 16); 1750 1751 /* Get WR to send ELS REQ */ 1752 ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); 1753 if (ret != 0) { 1754 csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", 1755 io_req, ret); 1756 return ret; 1757 } 1758 1759 /* Prepare Generic WR used by all ELS/CT cmd */ 1760 csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, 1761 ln->nport_id, rn->nport_id, 1762 csio_rn_flowid(rn), 1763 &fw_wr[0]); 1764 1765 /* Copy ELS/CT WR CMD */ 1766 csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, 1767 sizeof(struct fw_fcoe_els_ct_wr)); 1768 wr_off += sizeof(struct fw_fcoe_els_ct_wr); 1769 1770 /* Copy payload to Immediate section of WR */ 1771 if (im_len) 1772 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); 1773 else { 1774 /* Program DSGL to dma payload */ 1775 dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 1776 ULPTX_MORE_F | ULPTX_NSGE_V(1)); 1777 dsgl.len0 = cpu_to_be32(pld_len); 1778 dsgl.addr0 = cpu_to_be64(pld->paddr); 1779 csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), 1780 sizeof(struct ulptx_sgl)); 1781 } 1782 1783 /* Issue work request to xmit ELS/CT req to FW */ 1784 csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); 1785 return ret; 1786 } 1787 1788 /* 1789 * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. 1790 * @io_req - IO Request 1791 * @io_cbfn - Completion handler. 1792 * @req_type - ELS or CT request type 1793 * @pld - Dma Payload buffer 1794 * @pld_len - Payload len 1795 * 1796 * 1797 * This API used submit managment ELS/CT request. 1798 * This called with hw lock held 1799 * Returns: 0 - on success 1800 * -ENOMEM - on error. 1801 */ 1802 static int 1803 csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, 1804 void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), 1805 enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, 1806 uint32_t pld_len) 1807 { 1808 struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); 1809 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); 1810 int rv; 1811 1812 BUG_ON(pld_len > pld->len); 1813 1814 io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ 1815 io_req->fw_handle = (uintptr_t) (io_req); 1816 io_req->eq_idx = mgmtm->eq_idx; 1817 io_req->iq_idx = mgmtm->iq_idx; 1818 1819 rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); 1820 if (rv == 0) { 1821 list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); 1822 mgmtm->stats.n_active++; 1823 } 1824 return rv; 1825 } 1826 1827 /* 1828 * csio_ln_fdmi_init - FDMI Init entry point. 1829 * @ln: lnode 1830 */ 1831 static int 1832 csio_ln_fdmi_init(struct csio_lnode *ln) 1833 { 1834 struct csio_hw *hw = csio_lnode_to_hw(ln); 1835 struct csio_dma_buf *dma_buf; 1836 1837 /* Allocate MGMT request required for FDMI */ 1838 ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); 1839 if (!ln->mgmt_req) { 1840 csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); 1841 CSIO_INC_STATS(hw, n_err_nomem); 1842 return -ENOMEM; 1843 } 1844 1845 /* Allocate Dma buffers for FDMI response Payload */ 1846 dma_buf = &ln->mgmt_req->dma_buf; 1847 dma_buf->len = 2048; 1848 dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len, 1849 &dma_buf->paddr, GFP_KERNEL); 1850 if (!dma_buf->vaddr) { 1851 csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); 1852 kfree(ln->mgmt_req); 1853 ln->mgmt_req = NULL; 1854 return -ENOMEM; 1855 } 1856 1857 ln->flags |= CSIO_LNF_FDMI_ENABLE; 1858 return 0; 1859 } 1860 1861 /* 1862 * csio_ln_fdmi_exit - FDMI exit entry point. 1863 * @ln: lnode 1864 */ 1865 static int 1866 csio_ln_fdmi_exit(struct csio_lnode *ln) 1867 { 1868 struct csio_dma_buf *dma_buf; 1869 struct csio_hw *hw = csio_lnode_to_hw(ln); 1870 1871 if (!ln->mgmt_req) 1872 return 0; 1873 1874 dma_buf = &ln->mgmt_req->dma_buf; 1875 if (dma_buf->vaddr) 1876 dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr, 1877 dma_buf->paddr); 1878 1879 kfree(ln->mgmt_req); 1880 return 0; 1881 } 1882 1883 int 1884 csio_scan_done(struct csio_lnode *ln, unsigned long ticks, 1885 unsigned long time, unsigned long max_scan_ticks, 1886 unsigned long delta_scan_ticks) 1887 { 1888 int rv = 0; 1889 1890 if (time >= max_scan_ticks) 1891 return 1; 1892 1893 if (!ln->tgt_scan_tick) 1894 ln->tgt_scan_tick = ticks; 1895 1896 if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { 1897 if (!ln->last_scan_ntgts) 1898 ln->last_scan_ntgts = ln->n_scsi_tgts; 1899 else { 1900 if (ln->last_scan_ntgts == ln->n_scsi_tgts) 1901 return 1; 1902 1903 ln->last_scan_ntgts = ln->n_scsi_tgts; 1904 } 1905 ln->tgt_scan_tick = ticks; 1906 } 1907 return rv; 1908 } 1909 1910 /* 1911 * csio_notify_lnodes: 1912 * @hw: HW module 1913 * @note: Notification 1914 * 1915 * Called from the HW SM to fan out notifications to the 1916 * Lnode SM. Since the HW SM is entered with lock held, 1917 * there is no need to hold locks here. 1918 * 1919 */ 1920 void 1921 csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) 1922 { 1923 struct list_head *tmp; 1924 struct csio_lnode *ln; 1925 1926 csio_dbg(hw, "Notifying all nodes of event %d\n", note); 1927 1928 /* Traverse children lnodes list and send evt */ 1929 list_for_each(tmp, &hw->sln_head) { 1930 ln = (struct csio_lnode *) tmp; 1931 1932 switch (note) { 1933 case CSIO_LN_NOTIFY_HWREADY: 1934 csio_lnode_start(ln); 1935 break; 1936 1937 case CSIO_LN_NOTIFY_HWRESET: 1938 case CSIO_LN_NOTIFY_HWREMOVE: 1939 csio_lnode_close(ln); 1940 break; 1941 1942 case CSIO_LN_NOTIFY_HWSTOP: 1943 csio_lnode_stop(ln); 1944 break; 1945 1946 default: 1947 break; 1948 1949 } 1950 } 1951 } 1952 1953 /* 1954 * csio_disable_lnodes: 1955 * @hw: HW module 1956 * @portid:port id 1957 * @disable: disable/enable flag. 1958 * If disable=1, disables all lnode hosted on given physical port. 1959 * otherwise enables all the lnodes on given phsysical port. 1960 * This routine need to called with hw lock held. 1961 */ 1962 void 1963 csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) 1964 { 1965 struct list_head *tmp; 1966 struct csio_lnode *ln; 1967 1968 csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); 1969 1970 /* Traverse sibling lnodes list and send evt */ 1971 list_for_each(tmp, &hw->sln_head) { 1972 ln = (struct csio_lnode *) tmp; 1973 if (ln->portid != portid) 1974 continue; 1975 1976 if (disable) 1977 csio_lnode_stop(ln); 1978 else 1979 csio_lnode_start(ln); 1980 } 1981 } 1982 1983 /* 1984 * csio_ln_init - Initialize an lnode. 1985 * @ln: lnode 1986 * 1987 */ 1988 static int 1989 csio_ln_init(struct csio_lnode *ln) 1990 { 1991 int rv = -EINVAL; 1992 struct csio_lnode *rln, *pln; 1993 struct csio_hw *hw = csio_lnode_to_hw(ln); 1994 1995 csio_init_state(&ln->sm, csio_lns_uninit); 1996 ln->vnp_flowid = CSIO_INVALID_IDX; 1997 ln->fcf_flowid = CSIO_INVALID_IDX; 1998 1999 if (csio_is_root_ln(ln)) { 2000 2001 /* This is the lnode used during initialization */ 2002 2003 ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); 2004 if (!ln->fcfinfo) { 2005 csio_ln_err(ln, "Failed to alloc FCF record\n"); 2006 CSIO_INC_STATS(hw, n_err_nomem); 2007 goto err; 2008 } 2009 2010 INIT_LIST_HEAD(&ln->fcf_lsthead); 2011 kref_init(&ln->fcfinfo->kref); 2012 2013 if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) 2014 goto err; 2015 2016 } else { /* Either a non-root physical or a virtual lnode */ 2017 2018 /* 2019 * THe rest is common for non-root physical and NPIV lnodes. 2020 * Just get references to all other modules 2021 */ 2022 rln = csio_root_lnode(ln); 2023 2024 if (csio_is_npiv_ln(ln)) { 2025 /* NPIV */ 2026 pln = csio_parent_lnode(ln); 2027 kref_get(&pln->fcfinfo->kref); 2028 ln->fcfinfo = pln->fcfinfo; 2029 } else { 2030 /* Another non-root physical lnode (FCF) */ 2031 ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), 2032 GFP_KERNEL); 2033 if (!ln->fcfinfo) { 2034 csio_ln_err(ln, "Failed to alloc FCF info\n"); 2035 CSIO_INC_STATS(hw, n_err_nomem); 2036 goto err; 2037 } 2038 2039 kref_init(&ln->fcfinfo->kref); 2040 2041 if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) 2042 goto err; 2043 } 2044 2045 } /* if (!csio_is_root_ln(ln)) */ 2046 2047 return 0; 2048 err: 2049 return rv; 2050 } 2051 2052 static void 2053 csio_ln_exit(struct csio_lnode *ln) 2054 { 2055 struct csio_lnode *pln; 2056 2057 csio_cleanup_rns(ln); 2058 if (csio_is_npiv_ln(ln)) { 2059 pln = csio_parent_lnode(ln); 2060 kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); 2061 } else { 2062 kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); 2063 if (csio_fdmi_enable) 2064 csio_ln_fdmi_exit(ln); 2065 } 2066 ln->fcfinfo = NULL; 2067 } 2068 2069 /** 2070 * csio_lnode_init - Initialize the members of an lnode. 2071 * @ln: lnode 2072 * 2073 */ 2074 int 2075 csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, 2076 struct csio_lnode *pln) 2077 { 2078 int rv = -EINVAL; 2079 2080 /* Link this lnode to hw */ 2081 csio_lnode_to_hw(ln) = hw; 2082 2083 /* Link child to parent if child lnode */ 2084 if (pln) 2085 ln->pln = pln; 2086 else 2087 ln->pln = NULL; 2088 2089 /* Initialize scsi_tgt and timers to zero */ 2090 ln->n_scsi_tgts = 0; 2091 ln->last_scan_ntgts = 0; 2092 ln->tgt_scan_tick = 0; 2093 2094 /* Initialize rnode list */ 2095 INIT_LIST_HEAD(&ln->rnhead); 2096 INIT_LIST_HEAD(&ln->cln_head); 2097 2098 /* Initialize log level for debug */ 2099 ln->params.log_level = hw->params.log_level; 2100 2101 if (csio_ln_init(ln)) 2102 goto err; 2103 2104 /* Add lnode to list of sibling or children lnodes */ 2105 spin_lock_irq(&hw->lock); 2106 list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); 2107 if (pln) 2108 pln->num_vports++; 2109 spin_unlock_irq(&hw->lock); 2110 2111 hw->num_lns++; 2112 2113 return 0; 2114 err: 2115 csio_lnode_to_hw(ln) = NULL; 2116 return rv; 2117 } 2118 2119 /** 2120 * csio_lnode_exit - De-instantiate an lnode. 2121 * @ln: lnode 2122 * 2123 */ 2124 void 2125 csio_lnode_exit(struct csio_lnode *ln) 2126 { 2127 struct csio_hw *hw = csio_lnode_to_hw(ln); 2128 2129 csio_ln_exit(ln); 2130 2131 /* Remove this lnode from hw->sln_head */ 2132 spin_lock_irq(&hw->lock); 2133 2134 list_del_init(&ln->sm.sm_list); 2135 2136 /* If it is children lnode, decrement the 2137 * counter in its parent lnode 2138 */ 2139 if (ln->pln) 2140 ln->pln->num_vports--; 2141 2142 /* Update root lnode pointer */ 2143 if (list_empty(&hw->sln_head)) 2144 hw->rln = NULL; 2145 else 2146 hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); 2147 2148 spin_unlock_irq(&hw->lock); 2149 2150 csio_lnode_to_hw(ln) = NULL; 2151 hw->num_lns--; 2152 } 2153