1 /* 2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 #include <linux/module.h> 21 #include <linux/version.h> 22 #include <linux/spinlock.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/ethtool.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/crc32.h> 29 #include <linux/slab.h> 30 #include <linux/cpu.h> 31 #include <linux/fs.h> 32 #include <linux/sysfs.h> 33 #include <linux/ctype.h> 34 #include <linux/workqueue.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsicam.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_transport_fc.h> 39 #include <net/rtnetlink.h> 40 41 #include <scsi/fc/fc_encaps.h> 42 #include <scsi/fc/fc_fip.h> 43 44 #include <scsi/libfc.h> 45 #include <scsi/fc_frame.h> 46 #include <scsi/libfcoe.h> 47 48 #include "fcoe.h" 49 50 MODULE_AUTHOR("Open-FCoE.org"); 51 MODULE_DESCRIPTION("FCoE"); 52 MODULE_LICENSE("GPL v2"); 53 54 /* Performance tuning parameters for fcoe */ 55 static unsigned int fcoe_ddp_min; 56 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); 57 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 58 "Direct Data Placement (DDP)."); 59 60 DEFINE_MUTEX(fcoe_config_mutex); 61 62 static struct workqueue_struct *fcoe_wq; 63 64 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ 65 static DECLARE_COMPLETION(fcoe_flush_completion); 66 67 /* fcoe host list */ 68 /* must only by accessed under the RTNL mutex */ 69 LIST_HEAD(fcoe_hostlist); 70 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 71 72 /* Function Prototypes */ 73 static int fcoe_reset(struct Scsi_Host *); 74 static int fcoe_xmit(struct fc_lport *, struct fc_frame *); 75 static int fcoe_rcv(struct sk_buff *, struct net_device *, 76 struct packet_type *, struct net_device *); 77 static int fcoe_percpu_receive_thread(void *); 78 static void fcoe_percpu_clean(struct fc_lport *); 79 static int fcoe_link_speed_update(struct fc_lport *); 80 static int fcoe_link_ok(struct fc_lport *); 81 82 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 83 static int fcoe_hostlist_add(const struct fc_lport *); 84 85 static int fcoe_device_notification(struct notifier_block *, ulong, void *); 86 static void fcoe_dev_setup(void); 87 static void fcoe_dev_cleanup(void); 88 static struct fcoe_interface 89 *fcoe_hostlist_lookup_port(const struct net_device *); 90 91 static int fcoe_fip_recv(struct sk_buff *, struct net_device *, 92 struct packet_type *, struct net_device *); 93 94 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); 95 static void fcoe_update_src_mac(struct fc_lport *, u8 *); 96 static u8 *fcoe_get_src_mac(struct fc_lport *); 97 static void fcoe_destroy_work(struct work_struct *); 98 99 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, 100 unsigned int); 101 static int fcoe_ddp_done(struct fc_lport *, u16); 102 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, 103 unsigned int); 104 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); 105 106 static bool fcoe_match(struct net_device *netdev); 107 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); 108 static int fcoe_destroy(struct net_device *netdev); 109 static int fcoe_enable(struct net_device *netdev); 110 static int fcoe_disable(struct net_device *netdev); 111 112 static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 113 u32 did, struct fc_frame *, 114 unsigned int op, 115 void (*resp)(struct fc_seq *, 116 struct fc_frame *, 117 void *), 118 void *, u32 timeout); 119 static void fcoe_recv_frame(struct sk_buff *skb); 120 121 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 122 123 /* notification function for packets from net device */ 124 static struct notifier_block fcoe_notifier = { 125 .notifier_call = fcoe_device_notification, 126 }; 127 128 /* notification function for CPU hotplug events */ 129 static struct notifier_block fcoe_cpu_notifier = { 130 .notifier_call = fcoe_cpu_callback, 131 }; 132 133 static struct scsi_transport_template *fcoe_nport_scsi_transport; 134 static struct scsi_transport_template *fcoe_vport_scsi_transport; 135 136 static int fcoe_vport_destroy(struct fc_vport *); 137 static int fcoe_vport_create(struct fc_vport *, bool disabled); 138 static int fcoe_vport_disable(struct fc_vport *, bool disable); 139 static void fcoe_set_vport_symbolic_name(struct fc_vport *); 140 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 141 static int fcoe_validate_vport_create(struct fc_vport *); 142 143 static struct libfc_function_template fcoe_libfc_fcn_templ = { 144 .frame_send = fcoe_xmit, 145 .ddp_setup = fcoe_ddp_setup, 146 .ddp_done = fcoe_ddp_done, 147 .ddp_target = fcoe_ddp_target, 148 .elsct_send = fcoe_elsct_send, 149 .get_lesb = fcoe_get_lesb, 150 .lport_set_port_id = fcoe_set_port_id, 151 }; 152 153 struct fc_function_template fcoe_nport_fc_functions = { 154 .show_host_node_name = 1, 155 .show_host_port_name = 1, 156 .show_host_supported_classes = 1, 157 .show_host_supported_fc4s = 1, 158 .show_host_active_fc4s = 1, 159 .show_host_maxframe_size = 1, 160 161 .show_host_port_id = 1, 162 .show_host_supported_speeds = 1, 163 .get_host_speed = fc_get_host_speed, 164 .show_host_speed = 1, 165 .show_host_port_type = 1, 166 .get_host_port_state = fc_get_host_port_state, 167 .show_host_port_state = 1, 168 .show_host_symbolic_name = 1, 169 170 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 171 .show_rport_maxframe_size = 1, 172 .show_rport_supported_classes = 1, 173 174 .show_host_fabric_name = 1, 175 .show_starget_node_name = 1, 176 .show_starget_port_name = 1, 177 .show_starget_port_id = 1, 178 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 179 .show_rport_dev_loss_tmo = 1, 180 .get_fc_host_stats = fc_get_host_stats, 181 .issue_fc_host_lip = fcoe_reset, 182 183 .terminate_rport_io = fc_rport_terminate_io, 184 185 .vport_create = fcoe_vport_create, 186 .vport_delete = fcoe_vport_destroy, 187 .vport_disable = fcoe_vport_disable, 188 .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, 189 190 .bsg_request = fc_lport_bsg_request, 191 }; 192 193 struct fc_function_template fcoe_vport_fc_functions = { 194 .show_host_node_name = 1, 195 .show_host_port_name = 1, 196 .show_host_supported_classes = 1, 197 .show_host_supported_fc4s = 1, 198 .show_host_active_fc4s = 1, 199 .show_host_maxframe_size = 1, 200 201 .show_host_port_id = 1, 202 .show_host_supported_speeds = 1, 203 .get_host_speed = fc_get_host_speed, 204 .show_host_speed = 1, 205 .show_host_port_type = 1, 206 .get_host_port_state = fc_get_host_port_state, 207 .show_host_port_state = 1, 208 .show_host_symbolic_name = 1, 209 210 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 211 .show_rport_maxframe_size = 1, 212 .show_rport_supported_classes = 1, 213 214 .show_host_fabric_name = 1, 215 .show_starget_node_name = 1, 216 .show_starget_port_name = 1, 217 .show_starget_port_id = 1, 218 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 219 .show_rport_dev_loss_tmo = 1, 220 .get_fc_host_stats = fc_get_host_stats, 221 .issue_fc_host_lip = fcoe_reset, 222 223 .terminate_rport_io = fc_rport_terminate_io, 224 225 .bsg_request = fc_lport_bsg_request, 226 }; 227 228 static struct scsi_host_template fcoe_shost_template = { 229 .module = THIS_MODULE, 230 .name = "FCoE Driver", 231 .proc_name = FCOE_NAME, 232 .queuecommand = fc_queuecommand, 233 .eh_abort_handler = fc_eh_abort, 234 .eh_device_reset_handler = fc_eh_device_reset, 235 .eh_host_reset_handler = fc_eh_host_reset, 236 .slave_alloc = fc_slave_alloc, 237 .change_queue_depth = fc_change_queue_depth, 238 .change_queue_type = fc_change_queue_type, 239 .this_id = -1, 240 .cmd_per_lun = 3, 241 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 242 .use_clustering = ENABLE_CLUSTERING, 243 .sg_tablesize = SG_ALL, 244 .max_sectors = 0xffff, 245 }; 246 247 /** 248 * fcoe_interface_setup() - Setup a FCoE interface 249 * @fcoe: The new FCoE interface 250 * @netdev: The net device that the fcoe interface is on 251 * 252 * Returns : 0 for success 253 * Locking: must be called with the RTNL mutex held 254 */ 255 static int fcoe_interface_setup(struct fcoe_interface *fcoe, 256 struct net_device *netdev) 257 { 258 struct fcoe_ctlr *fip = &fcoe->ctlr; 259 struct netdev_hw_addr *ha; 260 struct net_device *real_dev; 261 u8 flogi_maddr[ETH_ALEN]; 262 const struct net_device_ops *ops; 263 264 fcoe->netdev = netdev; 265 266 /* Let LLD initialize for FCoE */ 267 ops = netdev->netdev_ops; 268 if (ops->ndo_fcoe_enable) { 269 if (ops->ndo_fcoe_enable(netdev)) 270 FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" 271 " specific feature for LLD.\n"); 272 } 273 274 /* Do not support for bonding device */ 275 if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) { 276 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); 277 return -EOPNOTSUPP; 278 } 279 280 /* look for SAN MAC address, if multiple SAN MACs exist, only 281 * use the first one for SPMA */ 282 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? 283 vlan_dev_real_dev(netdev) : netdev; 284 rcu_read_lock(); 285 for_each_dev_addr(real_dev, ha) { 286 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 287 (is_valid_ether_addr(ha->addr))) { 288 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); 289 fip->spma = 1; 290 break; 291 } 292 } 293 rcu_read_unlock(); 294 295 /* setup Source Mac Address */ 296 if (!fip->spma) 297 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len); 298 299 /* 300 * Add FCoE MAC address as second unicast MAC address 301 * or enter promiscuous mode if not capable of listening 302 * for multiple unicast MACs. 303 */ 304 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 305 dev_uc_add(netdev, flogi_maddr); 306 if (fip->spma) 307 dev_uc_add(netdev, fip->ctl_src_addr); 308 if (fip->mode == FIP_MODE_VN2VN) { 309 dev_mc_add(netdev, FIP_ALL_VN2VN_MACS); 310 dev_mc_add(netdev, FIP_ALL_P2P_MACS); 311 } else 312 dev_mc_add(netdev, FIP_ALL_ENODE_MACS); 313 314 /* 315 * setup the receive function from ethernet driver 316 * on the ethertype for the given device 317 */ 318 fcoe->fcoe_packet_type.func = fcoe_rcv; 319 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 320 fcoe->fcoe_packet_type.dev = netdev; 321 dev_add_pack(&fcoe->fcoe_packet_type); 322 323 fcoe->fip_packet_type.func = fcoe_fip_recv; 324 fcoe->fip_packet_type.type = htons(ETH_P_FIP); 325 fcoe->fip_packet_type.dev = netdev; 326 dev_add_pack(&fcoe->fip_packet_type); 327 328 return 0; 329 } 330 331 /** 332 * fcoe_interface_create() - Create a FCoE interface on a net device 333 * @netdev: The net device to create the FCoE interface on 334 * @fip_mode: The mode to use for FIP 335 * 336 * Returns: pointer to a struct fcoe_interface or NULL on error 337 */ 338 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, 339 enum fip_state fip_mode) 340 { 341 struct fcoe_interface *fcoe; 342 int err; 343 344 if (!try_module_get(THIS_MODULE)) { 345 FCOE_NETDEV_DBG(netdev, 346 "Could not get a reference to the module\n"); 347 fcoe = ERR_PTR(-EBUSY); 348 goto out; 349 } 350 351 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 352 if (!fcoe) { 353 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 354 fcoe = ERR_PTR(-ENOMEM); 355 goto out_nomod; 356 } 357 358 dev_hold(netdev); 359 kref_init(&fcoe->kref); 360 361 /* 362 * Initialize FIP. 363 */ 364 fcoe_ctlr_init(&fcoe->ctlr, fip_mode); 365 fcoe->ctlr.send = fcoe_fip_send; 366 fcoe->ctlr.update_mac = fcoe_update_src_mac; 367 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 368 369 err = fcoe_interface_setup(fcoe, netdev); 370 if (err) { 371 fcoe_ctlr_destroy(&fcoe->ctlr); 372 kfree(fcoe); 373 dev_put(netdev); 374 fcoe = ERR_PTR(err); 375 goto out_nomod; 376 } 377 378 goto out; 379 380 out_nomod: 381 module_put(THIS_MODULE); 382 out: 383 return fcoe; 384 } 385 386 /** 387 * fcoe_interface_release() - fcoe_port kref release function 388 * @kref: Embedded reference count in an fcoe_interface struct 389 */ 390 static void fcoe_interface_release(struct kref *kref) 391 { 392 struct fcoe_interface *fcoe; 393 struct net_device *netdev; 394 395 fcoe = container_of(kref, struct fcoe_interface, kref); 396 netdev = fcoe->netdev; 397 /* tear-down the FCoE controller */ 398 fcoe_ctlr_destroy(&fcoe->ctlr); 399 kfree(fcoe); 400 dev_put(netdev); 401 module_put(THIS_MODULE); 402 } 403 404 /** 405 * fcoe_interface_get() - Get a reference to a FCoE interface 406 * @fcoe: The FCoE interface to be held 407 */ 408 static inline void fcoe_interface_get(struct fcoe_interface *fcoe) 409 { 410 kref_get(&fcoe->kref); 411 } 412 413 /** 414 * fcoe_interface_put() - Put a reference to a FCoE interface 415 * @fcoe: The FCoE interface to be released 416 */ 417 static inline void fcoe_interface_put(struct fcoe_interface *fcoe) 418 { 419 kref_put(&fcoe->kref, fcoe_interface_release); 420 } 421 422 /** 423 * fcoe_interface_cleanup() - Clean up a FCoE interface 424 * @fcoe: The FCoE interface to be cleaned up 425 * 426 * Caller must be holding the RTNL mutex 427 */ 428 void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 429 { 430 struct net_device *netdev = fcoe->netdev; 431 struct fcoe_ctlr *fip = &fcoe->ctlr; 432 u8 flogi_maddr[ETH_ALEN]; 433 const struct net_device_ops *ops; 434 435 rtnl_lock(); 436 437 /* 438 * Don't listen for Ethernet packets anymore. 439 * synchronize_net() ensures that the packet handlers are not running 440 * on another CPU. dev_remove_pack() would do that, this calls the 441 * unsyncronized version __dev_remove_pack() to avoid multiple delays. 442 */ 443 __dev_remove_pack(&fcoe->fcoe_packet_type); 444 __dev_remove_pack(&fcoe->fip_packet_type); 445 synchronize_net(); 446 447 /* Delete secondary MAC addresses */ 448 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 449 dev_uc_del(netdev, flogi_maddr); 450 if (fip->spma) 451 dev_uc_del(netdev, fip->ctl_src_addr); 452 if (fip->mode == FIP_MODE_VN2VN) { 453 dev_mc_del(netdev, FIP_ALL_VN2VN_MACS); 454 dev_mc_del(netdev, FIP_ALL_P2P_MACS); 455 } else 456 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 457 458 /* Tell the LLD we are done w/ FCoE */ 459 ops = netdev->netdev_ops; 460 if (ops->ndo_fcoe_disable) { 461 if (ops->ndo_fcoe_disable(netdev)) 462 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 463 " specific feature for LLD.\n"); 464 } 465 466 rtnl_unlock(); 467 468 /* Release the self-reference taken during fcoe_interface_create() */ 469 fcoe_interface_put(fcoe); 470 } 471 472 /** 473 * fcoe_fip_recv() - Handler for received FIP frames 474 * @skb: The receive skb 475 * @netdev: The associated net device 476 * @ptype: The packet_type structure which was used to register this handler 477 * @orig_dev: The original net_device the the skb was received on. 478 * (in case dev is a bond) 479 * 480 * Returns: 0 for success 481 */ 482 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, 483 struct packet_type *ptype, 484 struct net_device *orig_dev) 485 { 486 struct fcoe_interface *fcoe; 487 488 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 489 fcoe_ctlr_recv(&fcoe->ctlr, skb); 490 return 0; 491 } 492 493 /** 494 * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame 495 * @port: The FCoE port 496 * @skb: The FIP/FCoE packet to be sent 497 */ 498 static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) 499 { 500 if (port->fcoe_pending_queue.qlen) 501 fcoe_check_wait_queue(port->lport, skb); 502 else if (fcoe_start_io(skb)) 503 fcoe_check_wait_queue(port->lport, skb); 504 } 505 506 /** 507 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 508 * @fip: The FCoE controller 509 * @skb: The FIP packet to be sent 510 */ 511 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 512 { 513 skb->dev = fcoe_from_ctlr(fip)->netdev; 514 fcoe_port_send(lport_priv(fip->lp), skb); 515 } 516 517 /** 518 * fcoe_update_src_mac() - Update the Ethernet MAC filters 519 * @lport: The local port to update the source MAC on 520 * @addr: Unicast MAC address to add 521 * 522 * Remove any previously-set unicast MAC filter. 523 * Add secondary FCoE MAC address filter for our OUI. 524 */ 525 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) 526 { 527 struct fcoe_port *port = lport_priv(lport); 528 struct fcoe_interface *fcoe = port->priv; 529 530 rtnl_lock(); 531 if (!is_zero_ether_addr(port->data_src_addr)) 532 dev_uc_del(fcoe->netdev, port->data_src_addr); 533 if (!is_zero_ether_addr(addr)) 534 dev_uc_add(fcoe->netdev, addr); 535 memcpy(port->data_src_addr, addr, ETH_ALEN); 536 rtnl_unlock(); 537 } 538 539 /** 540 * fcoe_get_src_mac() - return the Ethernet source address for an lport 541 * @lport: libfc lport 542 */ 543 static u8 *fcoe_get_src_mac(struct fc_lport *lport) 544 { 545 struct fcoe_port *port = lport_priv(lport); 546 547 return port->data_src_addr; 548 } 549 550 /** 551 * fcoe_lport_config() - Set up a local port 552 * @lport: The local port to be setup 553 * 554 * Returns: 0 for success 555 */ 556 static int fcoe_lport_config(struct fc_lport *lport) 557 { 558 lport->link_up = 0; 559 lport->qfull = 0; 560 lport->max_retry_count = 3; 561 lport->max_rport_retry_count = 3; 562 lport->e_d_tov = 2 * 1000; /* FC-FS default */ 563 lport->r_a_tov = 2 * 2 * 1000; 564 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 565 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 566 lport->does_npiv = 1; 567 568 fc_lport_init_stats(lport); 569 570 /* lport fc_lport related configuration */ 571 fc_lport_config(lport); 572 573 /* offload related configuration */ 574 lport->crc_offload = 0; 575 lport->seq_offload = 0; 576 lport->lro_enabled = 0; 577 lport->lro_xid = 0; 578 lport->lso_max = 0; 579 580 return 0; 581 } 582 583 /** 584 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it 585 * @netdev: the associated net device 586 * @wwn: the output WWN 587 * @type: the type of WWN (WWPN or WWNN) 588 * 589 * Returns: 0 for success 590 */ 591 static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 592 { 593 const struct net_device_ops *ops = netdev->netdev_ops; 594 595 if (ops->ndo_fcoe_get_wwn) 596 return ops->ndo_fcoe_get_wwn(netdev, wwn, type); 597 return -EINVAL; 598 } 599 600 /** 601 * fcoe_netdev_features_change - Updates the lport's offload flags based 602 * on the LLD netdev's FCoE feature flags 603 */ 604 static void fcoe_netdev_features_change(struct fc_lport *lport, 605 struct net_device *netdev) 606 { 607 mutex_lock(&lport->lp_mutex); 608 609 if (netdev->features & NETIF_F_SG) 610 lport->sg_supp = 1; 611 else 612 lport->sg_supp = 0; 613 614 if (netdev->features & NETIF_F_FCOE_CRC) { 615 lport->crc_offload = 1; 616 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 617 } else { 618 lport->crc_offload = 0; 619 } 620 621 if (netdev->features & NETIF_F_FSO) { 622 lport->seq_offload = 1; 623 lport->lso_max = netdev->gso_max_size; 624 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 625 lport->lso_max); 626 } else { 627 lport->seq_offload = 0; 628 lport->lso_max = 0; 629 } 630 631 if (netdev->fcoe_ddp_xid) { 632 lport->lro_enabled = 1; 633 lport->lro_xid = netdev->fcoe_ddp_xid; 634 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 635 lport->lro_xid); 636 } else { 637 lport->lro_enabled = 0; 638 lport->lro_xid = 0; 639 } 640 641 mutex_unlock(&lport->lp_mutex); 642 } 643 644 /** 645 * fcoe_netdev_config() - Set up net devive for SW FCoE 646 * @lport: The local port that is associated with the net device 647 * @netdev: The associated net device 648 * 649 * Must be called after fcoe_lport_config() as it will use local port mutex 650 * 651 * Returns: 0 for success 652 */ 653 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) 654 { 655 u32 mfs; 656 u64 wwnn, wwpn; 657 struct fcoe_interface *fcoe; 658 struct fcoe_port *port; 659 660 /* Setup lport private data to point to fcoe softc */ 661 port = lport_priv(lport); 662 fcoe = port->priv; 663 664 /* 665 * Determine max frame size based on underlying device and optional 666 * user-configured limit. If the MFS is too low, fcoe_link_ok() 667 * will return 0, so do this first. 668 */ 669 mfs = netdev->mtu; 670 if (netdev->features & NETIF_F_FCOE_MTU) { 671 mfs = FCOE_MTU; 672 FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); 673 } 674 mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); 675 if (fc_set_mfs(lport, mfs)) 676 return -EINVAL; 677 678 /* offload features support */ 679 fcoe_netdev_features_change(lport, netdev); 680 681 skb_queue_head_init(&port->fcoe_pending_queue); 682 port->fcoe_pending_queue_active = 0; 683 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 684 685 fcoe_link_speed_update(lport); 686 687 if (!lport->vport) { 688 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 689 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 690 fc_set_wwnn(lport, wwnn); 691 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 692 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 693 2, 0); 694 fc_set_wwpn(lport, wwpn); 695 } 696 697 return 0; 698 } 699 700 /** 701 * fcoe_shost_config() - Set up the SCSI host associated with a local port 702 * @lport: The local port 703 * @dev: The device associated with the SCSI host 704 * 705 * Must be called after fcoe_lport_config() and fcoe_netdev_config() 706 * 707 * Returns: 0 for success 708 */ 709 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) 710 { 711 int rc = 0; 712 713 /* lport scsi host config */ 714 lport->host->max_lun = FCOE_MAX_LUN; 715 lport->host->max_id = FCOE_MAX_FCP_TARGET; 716 lport->host->max_channel = 0; 717 lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; 718 719 if (lport->vport) 720 lport->host->transportt = fcoe_vport_scsi_transport; 721 else 722 lport->host->transportt = fcoe_nport_scsi_transport; 723 724 /* add the new host to the SCSI-ml */ 725 rc = scsi_add_host(lport->host, dev); 726 if (rc) { 727 FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " 728 "error on scsi_add_host\n"); 729 return rc; 730 } 731 732 if (!lport->vport) 733 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 734 735 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 736 "%s v%s over %s", FCOE_NAME, FCOE_VERSION, 737 fcoe_netdev(lport)->name); 738 739 return 0; 740 } 741 742 /** 743 * fcoe_oem_match() - The match routine for the offloaded exchange manager 744 * @fp: The I/O frame 745 * 746 * This routine will be associated with an exchange manager (EM). When 747 * the libfc exchange handling code is looking for an EM to use it will 748 * call this routine and pass it the frame that it wishes to send. This 749 * routine will return True if the associated EM is to be used and False 750 * if the echange code should continue looking for an EM. 751 * 752 * The offload EM that this routine is associated with will handle any 753 * packets that are for SCSI read requests. 754 * 755 * This has been enhanced to work when FCoE stack is operating in target 756 * mode. 757 * 758 * Returns: True for read types I/O, otherwise returns false. 759 */ 760 bool fcoe_oem_match(struct fc_frame *fp) 761 { 762 struct fc_frame_header *fh = fc_frame_header_get(fp); 763 struct fcp_cmnd *fcp; 764 765 if (fc_fcp_is_read(fr_fsp(fp)) && 766 (fr_fsp(fp)->data_len > fcoe_ddp_min)) 767 return true; 768 else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { 769 fcp = fc_frame_payload_get(fp, sizeof(*fcp)); 770 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && 771 fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && 772 (fcp->fc_flags & FCP_CFL_WRDATA)) 773 return true; 774 } 775 return false; 776 } 777 778 /** 779 * fcoe_em_config() - Allocate and configure an exchange manager 780 * @lport: The local port that the new EM will be associated with 781 * 782 * Returns: 0 on success 783 */ 784 static inline int fcoe_em_config(struct fc_lport *lport) 785 { 786 struct fcoe_port *port = lport_priv(lport); 787 struct fcoe_interface *fcoe = port->priv; 788 struct fcoe_interface *oldfcoe = NULL; 789 struct net_device *old_real_dev, *cur_real_dev; 790 u16 min_xid = FCOE_MIN_XID; 791 u16 max_xid = FCOE_MAX_XID; 792 793 /* 794 * Check if need to allocate an em instance for 795 * offload exchange ids to be shared across all VN_PORTs/lport. 796 */ 797 if (!lport->lro_enabled || !lport->lro_xid || 798 (lport->lro_xid >= max_xid)) { 799 lport->lro_xid = 0; 800 goto skip_oem; 801 } 802 803 /* 804 * Reuse existing offload em instance in case 805 * it is already allocated on real eth device 806 */ 807 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 808 cur_real_dev = vlan_dev_real_dev(fcoe->netdev); 809 else 810 cur_real_dev = fcoe->netdev; 811 812 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { 813 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 814 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); 815 else 816 old_real_dev = oldfcoe->netdev; 817 818 if (cur_real_dev == old_real_dev) { 819 fcoe->oem = oldfcoe->oem; 820 break; 821 } 822 } 823 824 if (fcoe->oem) { 825 if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { 826 printk(KERN_ERR "fcoe_em_config: failed to add " 827 "offload em:%p on interface:%s\n", 828 fcoe->oem, fcoe->netdev->name); 829 return -ENOMEM; 830 } 831 } else { 832 fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, 833 FCOE_MIN_XID, lport->lro_xid, 834 fcoe_oem_match); 835 if (!fcoe->oem) { 836 printk(KERN_ERR "fcoe_em_config: failed to allocate " 837 "em for offload exches on interface:%s\n", 838 fcoe->netdev->name); 839 return -ENOMEM; 840 } 841 } 842 843 /* 844 * Exclude offload EM xid range from next EM xid range. 845 */ 846 min_xid += lport->lro_xid + 1; 847 848 skip_oem: 849 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { 850 printk(KERN_ERR "fcoe_em_config: failed to " 851 "allocate em on interface %s\n", fcoe->netdev->name); 852 return -ENOMEM; 853 } 854 855 return 0; 856 } 857 858 /** 859 * fcoe_if_destroy() - Tear down a SW FCoE instance 860 * @lport: The local port to be destroyed 861 * 862 */ 863 static void fcoe_if_destroy(struct fc_lport *lport) 864 { 865 struct fcoe_port *port = lport_priv(lport); 866 struct fcoe_interface *fcoe = port->priv; 867 struct net_device *netdev = fcoe->netdev; 868 869 FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); 870 871 /* Logout of the fabric */ 872 fc_fabric_logoff(lport); 873 874 /* Cleanup the fc_lport */ 875 fc_lport_destroy(lport); 876 877 /* Stop the transmit retry timer */ 878 del_timer_sync(&port->timer); 879 880 /* Free existing transmit skbs */ 881 fcoe_clean_pending_queue(lport); 882 883 rtnl_lock(); 884 if (!is_zero_ether_addr(port->data_src_addr)) 885 dev_uc_del(netdev, port->data_src_addr); 886 rtnl_unlock(); 887 888 /* Release reference held in fcoe_if_create() */ 889 fcoe_interface_put(fcoe); 890 891 /* Free queued packets for the per-CPU receive threads */ 892 fcoe_percpu_clean(lport); 893 894 /* Detach from the scsi-ml */ 895 fc_remove_host(lport->host); 896 scsi_remove_host(lport->host); 897 898 /* Destroy lport scsi_priv */ 899 fc_fcp_destroy(lport); 900 901 /* There are no more rports or I/O, free the EM */ 902 fc_exch_mgr_free(lport); 903 904 /* Free memory used by statistical counters */ 905 fc_lport_free_stats(lport); 906 907 /* Release the Scsi_Host */ 908 scsi_host_put(lport->host); 909 } 910 911 /** 912 * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device 913 * @lport: The local port to setup DDP for 914 * @xid: The exchange ID for this DDP transfer 915 * @sgl: The scatterlist describing this transfer 916 * @sgc: The number of sg items 917 * 918 * Returns: 0 if the DDP context was not configured 919 */ 920 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, 921 struct scatterlist *sgl, unsigned int sgc) 922 { 923 struct net_device *netdev = fcoe_netdev(lport); 924 925 if (netdev->netdev_ops->ndo_fcoe_ddp_setup) 926 return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, 927 xid, sgl, 928 sgc); 929 930 return 0; 931 } 932 933 /** 934 * fcoe_ddp_target() - Call a LLD's ddp_target through the net device 935 * @lport: The local port to setup DDP for 936 * @xid: The exchange ID for this DDP transfer 937 * @sgl: The scatterlist describing this transfer 938 * @sgc: The number of sg items 939 * 940 * Returns: 0 if the DDP context was not configured 941 */ 942 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid, 943 struct scatterlist *sgl, unsigned int sgc) 944 { 945 struct net_device *netdev = fcoe_netdev(lport); 946 947 if (netdev->netdev_ops->ndo_fcoe_ddp_target) 948 return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid, 949 sgl, sgc); 950 951 return 0; 952 } 953 954 955 /** 956 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device 957 * @lport: The local port to complete DDP on 958 * @xid: The exchange ID for this DDP transfer 959 * 960 * Returns: the length of data that have been completed by DDP 961 */ 962 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) 963 { 964 struct net_device *netdev = fcoe_netdev(lport); 965 966 if (netdev->netdev_ops->ndo_fcoe_ddp_done) 967 return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); 968 return 0; 969 } 970 971 /** 972 * fcoe_if_create() - Create a FCoE instance on an interface 973 * @fcoe: The FCoE interface to create a local port on 974 * @parent: The device pointer to be the parent in sysfs for the SCSI host 975 * @npiv: Indicates if the port is a vport or not 976 * 977 * Creates a fc_lport instance and a Scsi_Host instance and configure them. 978 * 979 * Returns: The allocated fc_lport or an error pointer 980 */ 981 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 982 struct device *parent, int npiv) 983 { 984 struct net_device *netdev = fcoe->netdev; 985 struct fc_lport *lport, *n_port; 986 struct fcoe_port *port; 987 struct Scsi_Host *shost; 988 int rc; 989 /* 990 * parent is only a vport if npiv is 1, 991 * but we'll only use vport in that case so go ahead and set it 992 */ 993 struct fc_vport *vport = dev_to_vport(parent); 994 995 FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 996 997 if (!npiv) 998 lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port)); 999 else 1000 lport = libfc_vport_create(vport, sizeof(*port)); 1001 1002 if (!lport) { 1003 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 1004 rc = -ENOMEM; 1005 goto out; 1006 } 1007 port = lport_priv(lport); 1008 port->lport = lport; 1009 port->priv = fcoe; 1010 port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; 1011 port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; 1012 INIT_WORK(&port->destroy_work, fcoe_destroy_work); 1013 1014 /* configure a fc_lport including the exchange manager */ 1015 rc = fcoe_lport_config(lport); 1016 if (rc) { 1017 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 1018 "interface\n"); 1019 goto out_host_put; 1020 } 1021 1022 if (npiv) { 1023 FCOE_NETDEV_DBG(netdev, "Setting vport names, " 1024 "%16.16llx %16.16llx\n", 1025 vport->node_name, vport->port_name); 1026 fc_set_wwnn(lport, vport->node_name); 1027 fc_set_wwpn(lport, vport->port_name); 1028 } 1029 1030 /* configure lport network properties */ 1031 rc = fcoe_netdev_config(lport, netdev); 1032 if (rc) { 1033 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " 1034 "interface\n"); 1035 goto out_lp_destroy; 1036 } 1037 1038 /* configure lport scsi host properties */ 1039 rc = fcoe_shost_config(lport, parent); 1040 if (rc) { 1041 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 1042 "interface\n"); 1043 goto out_lp_destroy; 1044 } 1045 1046 /* Initialize the library */ 1047 rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); 1048 if (rc) { 1049 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1050 "interface\n"); 1051 goto out_lp_destroy; 1052 } 1053 1054 /* 1055 * fcoe_em_alloc() and fcoe_hostlist_add() both 1056 * need to be atomic with respect to other changes to the 1057 * hostlist since fcoe_em_alloc() looks for an existing EM 1058 * instance on host list updated by fcoe_hostlist_add(). 1059 * 1060 * This is currently handled through the fcoe_config_mutex 1061 * begin held. 1062 */ 1063 if (!npiv) 1064 /* lport exch manager allocation */ 1065 rc = fcoe_em_config(lport); 1066 else { 1067 shost = vport_to_shost(vport); 1068 n_port = shost_priv(shost); 1069 rc = fc_exch_mgr_list_clone(n_port, lport); 1070 } 1071 1072 if (rc) { 1073 FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n"); 1074 goto out_lp_destroy; 1075 } 1076 1077 fcoe_interface_get(fcoe); 1078 return lport; 1079 1080 out_lp_destroy: 1081 fc_exch_mgr_free(lport); 1082 out_host_put: 1083 scsi_host_put(lport->host); 1084 out: 1085 return ERR_PTR(rc); 1086 } 1087 1088 /** 1089 * fcoe_if_init() - Initialization routine for fcoe.ko 1090 * 1091 * Attaches the SW FCoE transport to the FC transport 1092 * 1093 * Returns: 0 on success 1094 */ 1095 static int __init fcoe_if_init(void) 1096 { 1097 /* attach to scsi transport */ 1098 fcoe_nport_scsi_transport = 1099 fc_attach_transport(&fcoe_nport_fc_functions); 1100 fcoe_vport_scsi_transport = 1101 fc_attach_transport(&fcoe_vport_fc_functions); 1102 1103 if (!fcoe_nport_scsi_transport) { 1104 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); 1105 return -ENODEV; 1106 } 1107 1108 return 0; 1109 } 1110 1111 /** 1112 * fcoe_if_exit() - Tear down fcoe.ko 1113 * 1114 * Detaches the SW FCoE transport from the FC transport 1115 * 1116 * Returns: 0 on success 1117 */ 1118 int __exit fcoe_if_exit(void) 1119 { 1120 fc_release_transport(fcoe_nport_scsi_transport); 1121 fc_release_transport(fcoe_vport_scsi_transport); 1122 fcoe_nport_scsi_transport = NULL; 1123 fcoe_vport_scsi_transport = NULL; 1124 return 0; 1125 } 1126 1127 /** 1128 * fcoe_percpu_thread_create() - Create a receive thread for an online CPU 1129 * @cpu: The CPU index of the CPU to create a receive thread for 1130 */ 1131 static void fcoe_percpu_thread_create(unsigned int cpu) 1132 { 1133 struct fcoe_percpu_s *p; 1134 struct task_struct *thread; 1135 1136 p = &per_cpu(fcoe_percpu, cpu); 1137 1138 thread = kthread_create(fcoe_percpu_receive_thread, 1139 (void *)p, "fcoethread/%d", cpu); 1140 1141 if (likely(!IS_ERR(thread))) { 1142 kthread_bind(thread, cpu); 1143 wake_up_process(thread); 1144 1145 spin_lock_bh(&p->fcoe_rx_list.lock); 1146 p->thread = thread; 1147 spin_unlock_bh(&p->fcoe_rx_list.lock); 1148 } 1149 } 1150 1151 /** 1152 * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU 1153 * @cpu: The CPU index of the CPU whose receive thread is to be destroyed 1154 * 1155 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the 1156 * current CPU's Rx thread. If the thread being destroyed is bound to 1157 * the CPU processing this context the skbs will be freed. 1158 */ 1159 static void fcoe_percpu_thread_destroy(unsigned int cpu) 1160 { 1161 struct fcoe_percpu_s *p; 1162 struct task_struct *thread; 1163 struct page *crc_eof; 1164 struct sk_buff *skb; 1165 #ifdef CONFIG_SMP 1166 struct fcoe_percpu_s *p0; 1167 unsigned targ_cpu = get_cpu(); 1168 #endif /* CONFIG_SMP */ 1169 1170 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); 1171 1172 /* Prevent any new skbs from being queued for this CPU. */ 1173 p = &per_cpu(fcoe_percpu, cpu); 1174 spin_lock_bh(&p->fcoe_rx_list.lock); 1175 thread = p->thread; 1176 p->thread = NULL; 1177 crc_eof = p->crc_eof_page; 1178 p->crc_eof_page = NULL; 1179 p->crc_eof_offset = 0; 1180 spin_unlock_bh(&p->fcoe_rx_list.lock); 1181 1182 #ifdef CONFIG_SMP 1183 /* 1184 * Don't bother moving the skb's if this context is running 1185 * on the same CPU that is having its thread destroyed. This 1186 * can easily happen when the module is removed. 1187 */ 1188 if (cpu != targ_cpu) { 1189 p0 = &per_cpu(fcoe_percpu, targ_cpu); 1190 spin_lock_bh(&p0->fcoe_rx_list.lock); 1191 if (p0->thread) { 1192 FCOE_DBG("Moving frames from CPU %d to CPU %d\n", 1193 cpu, targ_cpu); 1194 1195 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1196 __skb_queue_tail(&p0->fcoe_rx_list, skb); 1197 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1198 } else { 1199 /* 1200 * The targeted CPU is not initialized and cannot accept 1201 * new skbs. Unlock the targeted CPU and drop the skbs 1202 * on the CPU that is going offline. 1203 */ 1204 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1205 kfree_skb(skb); 1206 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1207 } 1208 } else { 1209 /* 1210 * This scenario occurs when the module is being removed 1211 * and all threads are being destroyed. skbs will continue 1212 * to be shifted from the CPU thread that is being removed 1213 * to the CPU thread associated with the CPU that is processing 1214 * the module removal. Once there is only one CPU Rx thread it 1215 * will reach this case and we will drop all skbs and later 1216 * stop the thread. 1217 */ 1218 spin_lock_bh(&p->fcoe_rx_list.lock); 1219 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1220 kfree_skb(skb); 1221 spin_unlock_bh(&p->fcoe_rx_list.lock); 1222 } 1223 put_cpu(); 1224 #else 1225 /* 1226 * This a non-SMP scenario where the singular Rx thread is 1227 * being removed. Free all skbs and stop the thread. 1228 */ 1229 spin_lock_bh(&p->fcoe_rx_list.lock); 1230 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1231 kfree_skb(skb); 1232 spin_unlock_bh(&p->fcoe_rx_list.lock); 1233 #endif 1234 1235 if (thread) 1236 kthread_stop(thread); 1237 1238 if (crc_eof) 1239 put_page(crc_eof); 1240 } 1241 1242 /** 1243 * fcoe_cpu_callback() - Handler for CPU hotplug events 1244 * @nfb: The callback data block 1245 * @action: The event triggering the callback 1246 * @hcpu: The index of the CPU that the event is for 1247 * 1248 * This creates or destroys per-CPU data for fcoe 1249 * 1250 * Returns NOTIFY_OK always. 1251 */ 1252 static int fcoe_cpu_callback(struct notifier_block *nfb, 1253 unsigned long action, void *hcpu) 1254 { 1255 unsigned cpu = (unsigned long)hcpu; 1256 1257 switch (action) { 1258 case CPU_ONLINE: 1259 case CPU_ONLINE_FROZEN: 1260 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); 1261 fcoe_percpu_thread_create(cpu); 1262 break; 1263 case CPU_DEAD: 1264 case CPU_DEAD_FROZEN: 1265 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); 1266 fcoe_percpu_thread_destroy(cpu); 1267 break; 1268 default: 1269 break; 1270 } 1271 return NOTIFY_OK; 1272 } 1273 1274 /** 1275 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming 1276 * command. 1277 * 1278 * This routine selects next CPU based on cpumask to distribute 1279 * incoming requests in round robin. 1280 * 1281 * Returns: int CPU number 1282 */ 1283 static inline unsigned int fcoe_select_cpu(void) 1284 { 1285 static unsigned int selected_cpu; 1286 1287 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); 1288 if (selected_cpu >= nr_cpu_ids) 1289 selected_cpu = cpumask_first(cpu_online_mask); 1290 1291 return selected_cpu; 1292 } 1293 1294 /** 1295 * fcoe_rcv() - Receive packets from a net device 1296 * @skb: The received packet 1297 * @netdev: The net device that the packet was received on 1298 * @ptype: The packet type context 1299 * @olddev: The last device net device 1300 * 1301 * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a 1302 * FC frame and passes the frame to libfc. 1303 * 1304 * Returns: 0 for success 1305 */ 1306 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1307 struct packet_type *ptype, struct net_device *olddev) 1308 { 1309 struct fc_lport *lport; 1310 struct fcoe_rcv_info *fr; 1311 struct fcoe_interface *fcoe; 1312 struct fc_frame_header *fh; 1313 struct fcoe_percpu_s *fps; 1314 struct ethhdr *eh; 1315 unsigned int cpu; 1316 1317 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1318 lport = fcoe->ctlr.lp; 1319 if (unlikely(!lport)) { 1320 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1321 goto err2; 1322 } 1323 if (!lport->link_up) 1324 goto err2; 1325 1326 FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " 1327 "data:%p tail:%p end:%p sum:%d dev:%s", 1328 skb->len, skb->data_len, skb->head, skb->data, 1329 skb_tail_pointer(skb), skb_end_pointer(skb), 1330 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1331 1332 eh = eth_hdr(skb); 1333 1334 if (is_fip_mode(&fcoe->ctlr) && 1335 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { 1336 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1337 eh->h_source); 1338 goto err; 1339 } 1340 1341 /* 1342 * Check for minimum frame length, and make sure required FCoE 1343 * and FC headers are pulled into the linear data area. 1344 */ 1345 if (unlikely((skb->len < FCOE_MIN_FRAME) || 1346 !pskb_may_pull(skb, FCOE_HEADER_LEN))) 1347 goto err; 1348 1349 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1350 fh = (struct fc_frame_header *) skb_transport_header(skb); 1351 1352 if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) { 1353 FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n", 1354 eh->h_dest); 1355 goto err; 1356 } 1357 1358 fr = fcoe_dev_from_skb(skb); 1359 fr->fr_dev = lport; 1360 1361 /* 1362 * In case the incoming frame's exchange is originated from 1363 * the initiator, then received frame's exchange id is ANDed 1364 * with fc_cpu_mask bits to get the same cpu on which exchange 1365 * was originated, otherwise select cpu using rx exchange id 1366 * or fcoe_select_cpu(). 1367 */ 1368 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1369 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1370 else { 1371 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) 1372 cpu = fcoe_select_cpu(); 1373 else 1374 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; 1375 } 1376 1377 if (cpu >= nr_cpu_ids) 1378 goto err; 1379 1380 fps = &per_cpu(fcoe_percpu, cpu); 1381 spin_lock_bh(&fps->fcoe_rx_list.lock); 1382 if (unlikely(!fps->thread)) { 1383 /* 1384 * The targeted CPU is not ready, let's target 1385 * the first CPU now. For non-SMP systems this 1386 * will check the same CPU twice. 1387 */ 1388 FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " 1389 "ready for incoming skb- using first online " 1390 "CPU.\n"); 1391 1392 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1393 cpu = cpumask_first(cpu_online_mask); 1394 fps = &per_cpu(fcoe_percpu, cpu); 1395 spin_lock_bh(&fps->fcoe_rx_list.lock); 1396 if (!fps->thread) { 1397 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1398 goto err; 1399 } 1400 } 1401 1402 /* 1403 * We now have a valid CPU that we're targeting for 1404 * this skb. We also have this receive thread locked, 1405 * so we're free to queue skbs into it's queue. 1406 */ 1407 1408 /* If this is a SCSI-FCP frame, and this is already executing on the 1409 * correct CPU, and the queue for this CPU is empty, then go ahead 1410 * and process the frame directly in the softirq context. 1411 * This lets us process completions without context switching from the 1412 * NET_RX softirq, to our receive processing thread, and then back to 1413 * BLOCK softirq context. 1414 */ 1415 if (fh->fh_type == FC_TYPE_FCP && 1416 cpu == smp_processor_id() && 1417 skb_queue_empty(&fps->fcoe_rx_list)) { 1418 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1419 fcoe_recv_frame(skb); 1420 } else { 1421 __skb_queue_tail(&fps->fcoe_rx_list, skb); 1422 if (fps->fcoe_rx_list.qlen == 1) 1423 wake_up_process(fps->thread); 1424 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1425 } 1426 1427 return 0; 1428 err: 1429 per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++; 1430 put_cpu(); 1431 err2: 1432 kfree_skb(skb); 1433 return -1; 1434 } 1435 1436 /** 1437 * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC 1438 * @skb: The packet to be transmitted 1439 * @tlen: The total length of the trailer 1440 * 1441 * Returns: 0 for success 1442 */ 1443 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) 1444 { 1445 struct fcoe_percpu_s *fps; 1446 int rc; 1447 1448 fps = &get_cpu_var(fcoe_percpu); 1449 rc = fcoe_get_paged_crc_eof(skb, tlen, fps); 1450 put_cpu_var(fcoe_percpu); 1451 1452 return rc; 1453 } 1454 1455 /** 1456 * fcoe_xmit() - Transmit a FCoE frame 1457 * @lport: The local port that the frame is to be transmitted for 1458 * @fp: The frame to be transmitted 1459 * 1460 * Return: 0 for success 1461 */ 1462 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1463 { 1464 int wlen; 1465 u32 crc; 1466 struct ethhdr *eh; 1467 struct fcoe_crc_eof *cp; 1468 struct sk_buff *skb; 1469 struct fcoe_dev_stats *stats; 1470 struct fc_frame_header *fh; 1471 unsigned int hlen; /* header length implies the version */ 1472 unsigned int tlen; /* trailer length */ 1473 unsigned int elen; /* eth header, may include vlan */ 1474 struct fcoe_port *port = lport_priv(lport); 1475 struct fcoe_interface *fcoe = port->priv; 1476 u8 sof, eof; 1477 struct fcoe_hdr *hp; 1478 1479 WARN_ON((fr_len(fp) % sizeof(u32)) != 0); 1480 1481 fh = fc_frame_header_get(fp); 1482 skb = fp_skb(fp); 1483 wlen = skb->len / FCOE_WORD_TO_BYTE; 1484 1485 if (!lport->link_up) { 1486 kfree_skb(skb); 1487 return 0; 1488 } 1489 1490 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1491 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1492 return 0; 1493 1494 sof = fr_sof(fp); 1495 eof = fr_eof(fp); 1496 1497 elen = sizeof(struct ethhdr); 1498 hlen = sizeof(struct fcoe_hdr); 1499 tlen = sizeof(struct fcoe_crc_eof); 1500 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1501 1502 /* crc offload */ 1503 if (likely(lport->crc_offload)) { 1504 skb->ip_summed = CHECKSUM_PARTIAL; 1505 skb->csum_start = skb_headroom(skb); 1506 skb->csum_offset = skb->len; 1507 crc = 0; 1508 } else { 1509 skb->ip_summed = CHECKSUM_NONE; 1510 crc = fcoe_fc_crc(fp); 1511 } 1512 1513 /* copy port crc and eof to the skb buff */ 1514 if (skb_is_nonlinear(skb)) { 1515 skb_frag_t *frag; 1516 if (fcoe_alloc_paged_crc_eof(skb, tlen)) { 1517 kfree_skb(skb); 1518 return -ENOMEM; 1519 } 1520 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1521 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) 1522 + frag->page_offset; 1523 } else { 1524 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 1525 } 1526 1527 memset(cp, 0, sizeof(*cp)); 1528 cp->fcoe_eof = eof; 1529 cp->fcoe_crc32 = cpu_to_le32(~crc); 1530 1531 if (skb_is_nonlinear(skb)) { 1532 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 1533 cp = NULL; 1534 } 1535 1536 /* adjust skb network/transport offsets to match mac/fcoe/port */ 1537 skb_push(skb, elen + hlen); 1538 skb_reset_mac_header(skb); 1539 skb_reset_network_header(skb); 1540 skb->mac_len = elen; 1541 skb->protocol = htons(ETH_P_FCOE); 1542 skb->dev = fcoe->netdev; 1543 1544 /* fill up mac and fcoe headers */ 1545 eh = eth_hdr(skb); 1546 eh->h_proto = htons(ETH_P_FCOE); 1547 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); 1548 if (fcoe->ctlr.map_dest) 1549 memcpy(eh->h_dest + 3, fh->fh_d_id, 3); 1550 1551 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1552 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1553 else 1554 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1555 1556 hp = (struct fcoe_hdr *)(eh + 1); 1557 memset(hp, 0, sizeof(*hp)); 1558 if (FC_FCOE_VER) 1559 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 1560 hp->fcoe_sof = sof; 1561 1562 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ 1563 if (lport->seq_offload && fr_max_payload(fp)) { 1564 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; 1565 skb_shinfo(skb)->gso_size = fr_max_payload(fp); 1566 } else { 1567 skb_shinfo(skb)->gso_type = 0; 1568 skb_shinfo(skb)->gso_size = 0; 1569 } 1570 /* update tx stats: regardless if LLD fails */ 1571 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1572 stats->TxFrames++; 1573 stats->TxWords += wlen; 1574 put_cpu(); 1575 1576 /* send down to lld */ 1577 fr_dev(fp) = lport; 1578 fcoe_port_send(port, skb); 1579 return 0; 1580 } 1581 1582 /** 1583 * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion 1584 * @skb: The completed skb (argument required by destructor) 1585 */ 1586 static void fcoe_percpu_flush_done(struct sk_buff *skb) 1587 { 1588 complete(&fcoe_flush_completion); 1589 } 1590 1591 /** 1592 * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC 1593 * @lport: The local port the frame was received on 1594 * @fp: The received frame 1595 * 1596 * Return: 0 on passing filtering checks 1597 */ 1598 static inline int fcoe_filter_frames(struct fc_lport *lport, 1599 struct fc_frame *fp) 1600 { 1601 struct fcoe_interface *fcoe; 1602 struct fc_frame_header *fh; 1603 struct sk_buff *skb = (struct sk_buff *)fp; 1604 struct fcoe_dev_stats *stats; 1605 1606 /* 1607 * We only check CRC if no offload is available and if it is 1608 * it's solicited data, in which case, the FCP layer would 1609 * check it during the copy. 1610 */ 1611 if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) 1612 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1613 else 1614 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 1615 1616 fh = (struct fc_frame_header *) skb_transport_header(skb); 1617 fh = fc_frame_header_get(fp); 1618 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) 1619 return 0; 1620 1621 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; 1622 if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && 1623 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 1624 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); 1625 return -EINVAL; 1626 } 1627 1628 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) || 1629 le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { 1630 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1631 return 0; 1632 } 1633 1634 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1635 stats->InvalidCRCCount++; 1636 if (stats->InvalidCRCCount < 5) 1637 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); 1638 return -EINVAL; 1639 } 1640 1641 /** 1642 * fcoe_recv_frame() - process a single received frame 1643 * @skb: frame to process 1644 */ 1645 static void fcoe_recv_frame(struct sk_buff *skb) 1646 { 1647 u32 fr_len; 1648 struct fc_lport *lport; 1649 struct fcoe_rcv_info *fr; 1650 struct fcoe_dev_stats *stats; 1651 struct fcoe_crc_eof crc_eof; 1652 struct fc_frame *fp; 1653 struct fcoe_port *port; 1654 struct fcoe_hdr *hp; 1655 1656 fr = fcoe_dev_from_skb(skb); 1657 lport = fr->fr_dev; 1658 if (unlikely(!lport)) { 1659 if (skb->destructor != fcoe_percpu_flush_done) 1660 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); 1661 kfree_skb(skb); 1662 return; 1663 } 1664 1665 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " 1666 "head:%p data:%p tail:%p end:%p sum:%d dev:%s", 1667 skb->len, skb->data_len, 1668 skb->head, skb->data, skb_tail_pointer(skb), 1669 skb_end_pointer(skb), skb->csum, 1670 skb->dev ? skb->dev->name : "<NULL>"); 1671 1672 port = lport_priv(lport); 1673 if (skb_is_nonlinear(skb)) 1674 skb_linearize(skb); /* not ideal */ 1675 1676 /* 1677 * Frame length checks and setting up the header pointers 1678 * was done in fcoe_rcv already. 1679 */ 1680 hp = (struct fcoe_hdr *) skb_network_header(skb); 1681 1682 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1683 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1684 if (stats->ErrorFrames < 5) 1685 printk(KERN_WARNING "fcoe: FCoE version " 1686 "mismatch: The frame has " 1687 "version %x, but the " 1688 "initiator supports version " 1689 "%x\n", FC_FCOE_DECAPS_VER(hp), 1690 FC_FCOE_VER); 1691 goto drop; 1692 } 1693 1694 skb_pull(skb, sizeof(struct fcoe_hdr)); 1695 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 1696 1697 stats->RxFrames++; 1698 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; 1699 1700 fp = (struct fc_frame *)skb; 1701 fc_frame_init(fp); 1702 fr_dev(fp) = lport; 1703 fr_sof(fp) = hp->fcoe_sof; 1704 1705 /* Copy out the CRC and EOF trailer for access */ 1706 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) 1707 goto drop; 1708 fr_eof(fp) = crc_eof.fcoe_eof; 1709 fr_crc(fp) = crc_eof.fcoe_crc32; 1710 if (pskb_trim(skb, fr_len)) 1711 goto drop; 1712 1713 if (!fcoe_filter_frames(lport, fp)) { 1714 put_cpu(); 1715 fc_exch_recv(lport, fp); 1716 return; 1717 } 1718 drop: 1719 stats->ErrorFrames++; 1720 put_cpu(); 1721 kfree_skb(skb); 1722 } 1723 1724 /** 1725 * fcoe_percpu_receive_thread() - The per-CPU packet receive thread 1726 * @arg: The per-CPU context 1727 * 1728 * Return: 0 for success 1729 */ 1730 int fcoe_percpu_receive_thread(void *arg) 1731 { 1732 struct fcoe_percpu_s *p = arg; 1733 struct sk_buff *skb; 1734 1735 set_user_nice(current, -20); 1736 1737 while (!kthread_should_stop()) { 1738 1739 spin_lock_bh(&p->fcoe_rx_list.lock); 1740 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { 1741 set_current_state(TASK_INTERRUPTIBLE); 1742 spin_unlock_bh(&p->fcoe_rx_list.lock); 1743 schedule(); 1744 set_current_state(TASK_RUNNING); 1745 if (kthread_should_stop()) 1746 return 0; 1747 spin_lock_bh(&p->fcoe_rx_list.lock); 1748 } 1749 spin_unlock_bh(&p->fcoe_rx_list.lock); 1750 fcoe_recv_frame(skb); 1751 } 1752 return 0; 1753 } 1754 1755 /** 1756 * fcoe_dev_setup() - Setup the link change notification interface 1757 */ 1758 static void fcoe_dev_setup(void) 1759 { 1760 register_netdevice_notifier(&fcoe_notifier); 1761 } 1762 1763 /** 1764 * fcoe_dev_cleanup() - Cleanup the link change notification interface 1765 */ 1766 static void fcoe_dev_cleanup(void) 1767 { 1768 unregister_netdevice_notifier(&fcoe_notifier); 1769 } 1770 1771 /** 1772 * fcoe_device_notification() - Handler for net device events 1773 * @notifier: The context of the notification 1774 * @event: The type of event 1775 * @ptr: The net device that the event was on 1776 * 1777 * This function is called by the Ethernet driver in case of link change event. 1778 * 1779 * Returns: 0 for success 1780 */ 1781 static int fcoe_device_notification(struct notifier_block *notifier, 1782 ulong event, void *ptr) 1783 { 1784 struct fc_lport *lport = NULL; 1785 struct net_device *netdev = ptr; 1786 struct fcoe_interface *fcoe; 1787 struct fcoe_port *port; 1788 struct fcoe_dev_stats *stats; 1789 u32 link_possible = 1; 1790 u32 mfs; 1791 int rc = NOTIFY_OK; 1792 1793 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1794 if (fcoe->netdev == netdev) { 1795 lport = fcoe->ctlr.lp; 1796 break; 1797 } 1798 } 1799 if (!lport) { 1800 rc = NOTIFY_DONE; 1801 goto out; 1802 } 1803 1804 switch (event) { 1805 case NETDEV_DOWN: 1806 case NETDEV_GOING_DOWN: 1807 link_possible = 0; 1808 break; 1809 case NETDEV_UP: 1810 case NETDEV_CHANGE: 1811 break; 1812 case NETDEV_CHANGEMTU: 1813 if (netdev->features & NETIF_F_FCOE_MTU) 1814 break; 1815 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 1816 sizeof(struct fcoe_crc_eof)); 1817 if (mfs >= FC_MIN_MAX_FRAME) 1818 fc_set_mfs(lport, mfs); 1819 break; 1820 case NETDEV_REGISTER: 1821 break; 1822 case NETDEV_UNREGISTER: 1823 list_del(&fcoe->list); 1824 port = lport_priv(fcoe->ctlr.lp); 1825 queue_work(fcoe_wq, &port->destroy_work); 1826 goto out; 1827 break; 1828 case NETDEV_FEAT_CHANGE: 1829 fcoe_netdev_features_change(lport, netdev); 1830 break; 1831 default: 1832 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 1833 "from netdev netlink\n", event); 1834 } 1835 1836 fcoe_link_speed_update(lport); 1837 1838 if (link_possible && !fcoe_link_ok(lport)) 1839 fcoe_ctlr_link_up(&fcoe->ctlr); 1840 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 1841 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 1842 stats->LinkFailureCount++; 1843 put_cpu(); 1844 fcoe_clean_pending_queue(lport); 1845 } 1846 out: 1847 return rc; 1848 } 1849 1850 /** 1851 * fcoe_disable() - Disables a FCoE interface 1852 * @netdev : The net_device object the Ethernet interface to create on 1853 * 1854 * Called from fcoe transport. 1855 * 1856 * Returns: 0 for success 1857 */ 1858 static int fcoe_disable(struct net_device *netdev) 1859 { 1860 struct fcoe_interface *fcoe; 1861 int rc = 0; 1862 1863 mutex_lock(&fcoe_config_mutex); 1864 1865 rtnl_lock(); 1866 fcoe = fcoe_hostlist_lookup_port(netdev); 1867 rtnl_unlock(); 1868 1869 if (fcoe) { 1870 fcoe_ctlr_link_down(&fcoe->ctlr); 1871 fcoe_clean_pending_queue(fcoe->ctlr.lp); 1872 } else 1873 rc = -ENODEV; 1874 1875 mutex_unlock(&fcoe_config_mutex); 1876 return rc; 1877 } 1878 1879 /** 1880 * fcoe_enable() - Enables a FCoE interface 1881 * @netdev : The net_device object the Ethernet interface to create on 1882 * 1883 * Called from fcoe transport. 1884 * 1885 * Returns: 0 for success 1886 */ 1887 static int fcoe_enable(struct net_device *netdev) 1888 { 1889 struct fcoe_interface *fcoe; 1890 int rc = 0; 1891 1892 mutex_lock(&fcoe_config_mutex); 1893 rtnl_lock(); 1894 fcoe = fcoe_hostlist_lookup_port(netdev); 1895 rtnl_unlock(); 1896 1897 if (!fcoe) 1898 rc = -ENODEV; 1899 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 1900 fcoe_ctlr_link_up(&fcoe->ctlr); 1901 1902 mutex_unlock(&fcoe_config_mutex); 1903 return rc; 1904 } 1905 1906 /** 1907 * fcoe_destroy() - Destroy a FCoE interface 1908 * @netdev : The net_device object the Ethernet interface to create on 1909 * 1910 * Called from fcoe transport 1911 * 1912 * Returns: 0 for success 1913 */ 1914 static int fcoe_destroy(struct net_device *netdev) 1915 { 1916 struct fcoe_interface *fcoe; 1917 struct fc_lport *lport; 1918 struct fcoe_port *port; 1919 int rc = 0; 1920 1921 mutex_lock(&fcoe_config_mutex); 1922 rtnl_lock(); 1923 fcoe = fcoe_hostlist_lookup_port(netdev); 1924 if (!fcoe) { 1925 rc = -ENODEV; 1926 goto out_nodev; 1927 } 1928 lport = fcoe->ctlr.lp; 1929 port = lport_priv(lport); 1930 list_del(&fcoe->list); 1931 queue_work(fcoe_wq, &port->destroy_work); 1932 out_nodev: 1933 rtnl_unlock(); 1934 mutex_unlock(&fcoe_config_mutex); 1935 return rc; 1936 } 1937 1938 /** 1939 * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context 1940 * @work: Handle to the FCoE port to be destroyed 1941 */ 1942 static void fcoe_destroy_work(struct work_struct *work) 1943 { 1944 struct fcoe_port *port; 1945 struct fcoe_interface *fcoe; 1946 int npiv = 0; 1947 1948 port = container_of(work, struct fcoe_port, destroy_work); 1949 mutex_lock(&fcoe_config_mutex); 1950 1951 /* set if this is an NPIV port */ 1952 npiv = port->lport->vport ? 1 : 0; 1953 1954 fcoe = port->priv; 1955 fcoe_if_destroy(port->lport); 1956 1957 /* Do not tear down the fcoe interface for NPIV port */ 1958 if (!npiv) 1959 fcoe_interface_cleanup(fcoe); 1960 1961 mutex_unlock(&fcoe_config_mutex); 1962 } 1963 1964 /** 1965 * fcoe_match() - Check if the FCoE is supported on the given netdevice 1966 * @netdev : The net_device object the Ethernet interface to create on 1967 * 1968 * Called from fcoe transport. 1969 * 1970 * Returns: always returns true as this is the default FCoE transport, 1971 * i.e., support all netdevs. 1972 */ 1973 static bool fcoe_match(struct net_device *netdev) 1974 { 1975 return true; 1976 } 1977 1978 /** 1979 * fcoe_create() - Create a fcoe interface 1980 * @netdev : The net_device object the Ethernet interface to create on 1981 * @fip_mode: The FIP mode for this creation 1982 * 1983 * Called from fcoe transport 1984 * 1985 * Returns: 0 for success 1986 */ 1987 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 1988 { 1989 int rc = 0; 1990 struct fcoe_interface *fcoe; 1991 struct fc_lport *lport; 1992 1993 mutex_lock(&fcoe_config_mutex); 1994 rtnl_lock(); 1995 1996 /* look for existing lport */ 1997 if (fcoe_hostlist_lookup(netdev)) { 1998 rc = -EEXIST; 1999 goto out_nodev; 2000 } 2001 2002 fcoe = fcoe_interface_create(netdev, fip_mode); 2003 if (IS_ERR(fcoe)) { 2004 rc = PTR_ERR(fcoe); 2005 goto out_nodev; 2006 } 2007 2008 lport = fcoe_if_create(fcoe, &netdev->dev, 0); 2009 if (IS_ERR(lport)) { 2010 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2011 netdev->name); 2012 rc = -EIO; 2013 rtnl_unlock(); 2014 fcoe_interface_cleanup(fcoe); 2015 goto out_nortnl; 2016 } 2017 2018 /* Make this the "master" N_Port */ 2019 fcoe->ctlr.lp = lport; 2020 2021 /* add to lports list */ 2022 fcoe_hostlist_add(lport); 2023 2024 /* start FIP Discovery and FLOGI */ 2025 lport->boot_time = jiffies; 2026 fc_fabric_login(lport); 2027 if (!fcoe_link_ok(lport)) 2028 fcoe_ctlr_link_up(&fcoe->ctlr); 2029 2030 out_nodev: 2031 rtnl_unlock(); 2032 out_nortnl: 2033 mutex_unlock(&fcoe_config_mutex); 2034 return rc; 2035 } 2036 2037 /** 2038 * fcoe_link_speed_update() - Update the supported and actual link speeds 2039 * @lport: The local port to update speeds for 2040 * 2041 * Returns: 0 if the ethtool query was successful 2042 * -1 if the ethtool query failed 2043 */ 2044 int fcoe_link_speed_update(struct fc_lport *lport) 2045 { 2046 struct net_device *netdev = fcoe_netdev(lport); 2047 struct ethtool_cmd ecmd; 2048 2049 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 2050 lport->link_supported_speeds &= 2051 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 2052 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 2053 SUPPORTED_1000baseT_Full)) 2054 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; 2055 if (ecmd.supported & SUPPORTED_10000baseT_Full) 2056 lport->link_supported_speeds |= 2057 FC_PORTSPEED_10GBIT; 2058 switch (ethtool_cmd_speed(&ecmd)) { 2059 case SPEED_1000: 2060 lport->link_speed = FC_PORTSPEED_1GBIT; 2061 break; 2062 case SPEED_10000: 2063 lport->link_speed = FC_PORTSPEED_10GBIT; 2064 break; 2065 } 2066 return 0; 2067 } 2068 return -1; 2069 } 2070 2071 /** 2072 * fcoe_link_ok() - Check if the link is OK for a local port 2073 * @lport: The local port to check link on 2074 * 2075 * Returns: 0 if link is UP and OK, -1 if not 2076 * 2077 */ 2078 int fcoe_link_ok(struct fc_lport *lport) 2079 { 2080 struct net_device *netdev = fcoe_netdev(lport); 2081 2082 if (netif_oper_up(netdev)) 2083 return 0; 2084 return -1; 2085 } 2086 2087 /** 2088 * fcoe_percpu_clean() - Clear all pending skbs for an local port 2089 * @lport: The local port whose skbs are to be cleared 2090 * 2091 * Must be called with fcoe_create_mutex held to single-thread completion. 2092 * 2093 * This flushes the pending skbs by adding a new skb to each queue and 2094 * waiting until they are all freed. This assures us that not only are 2095 * there no packets that will be handled by the lport, but also that any 2096 * threads already handling packet have returned. 2097 */ 2098 void fcoe_percpu_clean(struct fc_lport *lport) 2099 { 2100 struct fcoe_percpu_s *pp; 2101 struct fcoe_rcv_info *fr; 2102 struct sk_buff_head *list; 2103 struct sk_buff *skb, *next; 2104 struct sk_buff *head; 2105 unsigned int cpu; 2106 2107 for_each_possible_cpu(cpu) { 2108 pp = &per_cpu(fcoe_percpu, cpu); 2109 spin_lock_bh(&pp->fcoe_rx_list.lock); 2110 list = &pp->fcoe_rx_list; 2111 head = list->next; 2112 for (skb = head; skb != (struct sk_buff *)list; 2113 skb = next) { 2114 next = skb->next; 2115 fr = fcoe_dev_from_skb(skb); 2116 if (fr->fr_dev == lport) { 2117 __skb_unlink(skb, list); 2118 kfree_skb(skb); 2119 } 2120 } 2121 2122 if (!pp->thread || !cpu_online(cpu)) { 2123 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2124 continue; 2125 } 2126 2127 skb = dev_alloc_skb(0); 2128 if (!skb) { 2129 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2130 continue; 2131 } 2132 skb->destructor = fcoe_percpu_flush_done; 2133 2134 __skb_queue_tail(&pp->fcoe_rx_list, skb); 2135 if (pp->fcoe_rx_list.qlen == 1) 2136 wake_up_process(pp->thread); 2137 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2138 2139 wait_for_completion(&fcoe_flush_completion); 2140 } 2141 } 2142 2143 /** 2144 * fcoe_reset() - Reset a local port 2145 * @shost: The SCSI host associated with the local port to be reset 2146 * 2147 * Returns: Always 0 (return value required by FC transport template) 2148 */ 2149 int fcoe_reset(struct Scsi_Host *shost) 2150 { 2151 struct fc_lport *lport = shost_priv(shost); 2152 struct fcoe_port *port = lport_priv(lport); 2153 struct fcoe_interface *fcoe = port->priv; 2154 2155 fcoe_ctlr_link_down(&fcoe->ctlr); 2156 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2157 if (!fcoe_link_ok(fcoe->ctlr.lp)) 2158 fcoe_ctlr_link_up(&fcoe->ctlr); 2159 return 0; 2160 } 2161 2162 /** 2163 * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device 2164 * @netdev: The net device used as a key 2165 * 2166 * Locking: Must be called with the RNL mutex held. 2167 * 2168 * Returns: NULL or the FCoE interface 2169 */ 2170 static struct fcoe_interface * 2171 fcoe_hostlist_lookup_port(const struct net_device *netdev) 2172 { 2173 struct fcoe_interface *fcoe; 2174 2175 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 2176 if (fcoe->netdev == netdev) 2177 return fcoe; 2178 } 2179 return NULL; 2180 } 2181 2182 /** 2183 * fcoe_hostlist_lookup() - Find the local port associated with a 2184 * given net device 2185 * @netdev: The netdevice used as a key 2186 * 2187 * Locking: Must be called with the RTNL mutex held 2188 * 2189 * Returns: NULL or the local port 2190 */ 2191 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2192 { 2193 struct fcoe_interface *fcoe; 2194 2195 fcoe = fcoe_hostlist_lookup_port(netdev); 2196 return (fcoe) ? fcoe->ctlr.lp : NULL; 2197 } 2198 2199 /** 2200 * fcoe_hostlist_add() - Add the FCoE interface identified by a local 2201 * port to the hostlist 2202 * @lport: The local port that identifies the FCoE interface to be added 2203 * 2204 * Locking: must be called with the RTNL mutex held 2205 * 2206 * Returns: 0 for success 2207 */ 2208 static int fcoe_hostlist_add(const struct fc_lport *lport) 2209 { 2210 struct fcoe_interface *fcoe; 2211 struct fcoe_port *port; 2212 2213 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); 2214 if (!fcoe) { 2215 port = lport_priv(lport); 2216 fcoe = port->priv; 2217 list_add_tail(&fcoe->list, &fcoe_hostlist); 2218 } 2219 return 0; 2220 } 2221 2222 2223 static struct fcoe_transport fcoe_sw_transport = { 2224 .name = {FCOE_TRANSPORT_DEFAULT}, 2225 .attached = false, 2226 .list = LIST_HEAD_INIT(fcoe_sw_transport.list), 2227 .match = fcoe_match, 2228 .create = fcoe_create, 2229 .destroy = fcoe_destroy, 2230 .enable = fcoe_enable, 2231 .disable = fcoe_disable, 2232 }; 2233 2234 /** 2235 * fcoe_init() - Initialize fcoe.ko 2236 * 2237 * Returns: 0 on success, or a negative value on failure 2238 */ 2239 static int __init fcoe_init(void) 2240 { 2241 struct fcoe_percpu_s *p; 2242 unsigned int cpu; 2243 int rc = 0; 2244 2245 fcoe_wq = alloc_workqueue("fcoe", 0, 0); 2246 if (!fcoe_wq) 2247 return -ENOMEM; 2248 2249 /* register as a fcoe transport */ 2250 rc = fcoe_transport_attach(&fcoe_sw_transport); 2251 if (rc) { 2252 printk(KERN_ERR "failed to register an fcoe transport, check " 2253 "if libfcoe is loaded\n"); 2254 return rc; 2255 } 2256 2257 mutex_lock(&fcoe_config_mutex); 2258 2259 for_each_possible_cpu(cpu) { 2260 p = &per_cpu(fcoe_percpu, cpu); 2261 skb_queue_head_init(&p->fcoe_rx_list); 2262 } 2263 2264 for_each_online_cpu(cpu) 2265 fcoe_percpu_thread_create(cpu); 2266 2267 /* Initialize per CPU interrupt thread */ 2268 rc = register_hotcpu_notifier(&fcoe_cpu_notifier); 2269 if (rc) 2270 goto out_free; 2271 2272 /* Setup link change notification */ 2273 fcoe_dev_setup(); 2274 2275 rc = fcoe_if_init(); 2276 if (rc) 2277 goto out_free; 2278 2279 mutex_unlock(&fcoe_config_mutex); 2280 return 0; 2281 2282 out_free: 2283 for_each_online_cpu(cpu) { 2284 fcoe_percpu_thread_destroy(cpu); 2285 } 2286 mutex_unlock(&fcoe_config_mutex); 2287 destroy_workqueue(fcoe_wq); 2288 return rc; 2289 } 2290 module_init(fcoe_init); 2291 2292 /** 2293 * fcoe_exit() - Clean up fcoe.ko 2294 * 2295 * Returns: 0 on success or a negative value on failure 2296 */ 2297 static void __exit fcoe_exit(void) 2298 { 2299 struct fcoe_interface *fcoe, *tmp; 2300 struct fcoe_port *port; 2301 unsigned int cpu; 2302 2303 mutex_lock(&fcoe_config_mutex); 2304 2305 fcoe_dev_cleanup(); 2306 2307 /* releases the associated fcoe hosts */ 2308 rtnl_lock(); 2309 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2310 list_del(&fcoe->list); 2311 port = lport_priv(fcoe->ctlr.lp); 2312 queue_work(fcoe_wq, &port->destroy_work); 2313 } 2314 rtnl_unlock(); 2315 2316 unregister_hotcpu_notifier(&fcoe_cpu_notifier); 2317 2318 for_each_online_cpu(cpu) 2319 fcoe_percpu_thread_destroy(cpu); 2320 2321 mutex_unlock(&fcoe_config_mutex); 2322 2323 /* 2324 * destroy_work's may be chained but destroy_workqueue() 2325 * can take care of them. Just kill the fcoe_wq. 2326 */ 2327 destroy_workqueue(fcoe_wq); 2328 2329 /* 2330 * Detaching from the scsi transport must happen after all 2331 * destroys are done on the fcoe_wq. destroy_workqueue will 2332 * enusre the fcoe_wq is flushed. 2333 */ 2334 fcoe_if_exit(); 2335 2336 /* detach from fcoe transport */ 2337 fcoe_transport_detach(&fcoe_sw_transport); 2338 } 2339 module_exit(fcoe_exit); 2340 2341 /** 2342 * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler 2343 * @seq: active sequence in the FLOGI or FDISC exchange 2344 * @fp: response frame, or error encoded in a pointer (timeout) 2345 * @arg: pointer the the fcoe_ctlr structure 2346 * 2347 * This handles MAC address management for FCoE, then passes control on to 2348 * the libfc FLOGI response handler. 2349 */ 2350 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2351 { 2352 struct fcoe_ctlr *fip = arg; 2353 struct fc_exch *exch = fc_seq_exch(seq); 2354 struct fc_lport *lport = exch->lp; 2355 u8 *mac; 2356 2357 if (IS_ERR(fp)) 2358 goto done; 2359 2360 mac = fr_cb(fp)->granted_mac; 2361 if (is_zero_ether_addr(mac)) { 2362 /* pre-FIP */ 2363 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 2364 fc_frame_free(fp); 2365 return; 2366 } 2367 } 2368 fcoe_update_src_mac(lport, mac); 2369 done: 2370 fc_lport_flogi_resp(seq, fp, lport); 2371 } 2372 2373 /** 2374 * fcoe_logo_resp() - FCoE specific LOGO response handler 2375 * @seq: active sequence in the LOGO exchange 2376 * @fp: response frame, or error encoded in a pointer (timeout) 2377 * @arg: pointer the the fcoe_ctlr structure 2378 * 2379 * This handles MAC address management for FCoE, then passes control on to 2380 * the libfc LOGO response handler. 2381 */ 2382 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2383 { 2384 struct fc_lport *lport = arg; 2385 static u8 zero_mac[ETH_ALEN] = { 0 }; 2386 2387 if (!IS_ERR(fp)) 2388 fcoe_update_src_mac(lport, zero_mac); 2389 fc_lport_logo_resp(seq, fp, lport); 2390 } 2391 2392 /** 2393 * fcoe_elsct_send - FCoE specific ELS handler 2394 * 2395 * This does special case handling of FIP encapsualted ELS exchanges for FCoE, 2396 * using FCoE specific response handlers and passing the FIP controller as 2397 * the argument (the lport is still available from the exchange). 2398 * 2399 * Most of the work here is just handed off to the libfc routine. 2400 */ 2401 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, 2402 struct fc_frame *fp, unsigned int op, 2403 void (*resp)(struct fc_seq *, 2404 struct fc_frame *, 2405 void *), 2406 void *arg, u32 timeout) 2407 { 2408 struct fcoe_port *port = lport_priv(lport); 2409 struct fcoe_interface *fcoe = port->priv; 2410 struct fcoe_ctlr *fip = &fcoe->ctlr; 2411 struct fc_frame_header *fh = fc_frame_header_get(fp); 2412 2413 switch (op) { 2414 case ELS_FLOGI: 2415 case ELS_FDISC: 2416 if (lport->point_to_multipoint) 2417 break; 2418 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, 2419 fip, timeout); 2420 case ELS_LOGO: 2421 /* only hook onto fabric logouts, not port logouts */ 2422 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) 2423 break; 2424 return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, 2425 lport, timeout); 2426 } 2427 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); 2428 } 2429 2430 /** 2431 * fcoe_vport_create() - create an fc_host/scsi_host for a vport 2432 * @vport: fc_vport object to create a new fc_host for 2433 * @disabled: start the new fc_host in a disabled state by default? 2434 * 2435 * Returns: 0 for success 2436 */ 2437 static int fcoe_vport_create(struct fc_vport *vport, bool disabled) 2438 { 2439 struct Scsi_Host *shost = vport_to_shost(vport); 2440 struct fc_lport *n_port = shost_priv(shost); 2441 struct fcoe_port *port = lport_priv(n_port); 2442 struct fcoe_interface *fcoe = port->priv; 2443 struct net_device *netdev = fcoe->netdev; 2444 struct fc_lport *vn_port; 2445 int rc; 2446 char buf[32]; 2447 2448 rc = fcoe_validate_vport_create(vport); 2449 if (rc) { 2450 wwn_to_str(vport->port_name, buf, sizeof(buf)); 2451 printk(KERN_ERR "fcoe: Failed to create vport, " 2452 "WWPN (0x%s) already exists\n", 2453 buf); 2454 return rc; 2455 } 2456 2457 mutex_lock(&fcoe_config_mutex); 2458 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2459 mutex_unlock(&fcoe_config_mutex); 2460 2461 if (IS_ERR(vn_port)) { 2462 printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", 2463 netdev->name); 2464 return -EIO; 2465 } 2466 2467 if (disabled) { 2468 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2469 } else { 2470 vn_port->boot_time = jiffies; 2471 fc_fabric_login(vn_port); 2472 fc_vport_setlink(vn_port); 2473 } 2474 return 0; 2475 } 2476 2477 /** 2478 * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport 2479 * @vport: fc_vport object that is being destroyed 2480 * 2481 * Returns: 0 for success 2482 */ 2483 static int fcoe_vport_destroy(struct fc_vport *vport) 2484 { 2485 struct Scsi_Host *shost = vport_to_shost(vport); 2486 struct fc_lport *n_port = shost_priv(shost); 2487 struct fc_lport *vn_port = vport->dd_data; 2488 struct fcoe_port *port = lport_priv(vn_port); 2489 2490 mutex_lock(&n_port->lp_mutex); 2491 list_del(&vn_port->list); 2492 mutex_unlock(&n_port->lp_mutex); 2493 queue_work(fcoe_wq, &port->destroy_work); 2494 return 0; 2495 } 2496 2497 /** 2498 * fcoe_vport_disable() - change vport state 2499 * @vport: vport to bring online/offline 2500 * @disable: should the vport be disabled? 2501 */ 2502 static int fcoe_vport_disable(struct fc_vport *vport, bool disable) 2503 { 2504 struct fc_lport *lport = vport->dd_data; 2505 2506 if (disable) { 2507 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2508 fc_fabric_logoff(lport); 2509 } else { 2510 lport->boot_time = jiffies; 2511 fc_fabric_login(lport); 2512 fc_vport_setlink(lport); 2513 } 2514 2515 return 0; 2516 } 2517 2518 /** 2519 * fcoe_vport_set_symbolic_name() - append vport string to symbolic name 2520 * @vport: fc_vport with a new symbolic name string 2521 * 2522 * After generating a new symbolic name string, a new RSPN_ID request is 2523 * sent to the name server. There is no response handler, so if it fails 2524 * for some reason it will not be retried. 2525 */ 2526 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) 2527 { 2528 struct fc_lport *lport = vport->dd_data; 2529 struct fc_frame *fp; 2530 size_t len; 2531 2532 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 2533 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, 2534 fcoe_netdev(lport)->name, vport->symbolic_name); 2535 2536 if (lport->state != LPORT_ST_READY) 2537 return; 2538 2539 len = strnlen(fc_host_symbolic_name(lport->host), 255); 2540 fp = fc_frame_alloc(lport, 2541 sizeof(struct fc_ct_hdr) + 2542 sizeof(struct fc_ns_rspn) + len); 2543 if (!fp) 2544 return; 2545 lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, 2546 NULL, NULL, 3 * lport->r_a_tov); 2547 } 2548 2549 /** 2550 * fcoe_get_lesb() - Fill the FCoE Link Error Status Block 2551 * @lport: the local port 2552 * @fc_lesb: the link error status block 2553 */ 2554 static void fcoe_get_lesb(struct fc_lport *lport, 2555 struct fc_els_lesb *fc_lesb) 2556 { 2557 unsigned int cpu; 2558 u32 lfc, vlfc, mdac; 2559 struct fcoe_dev_stats *devst; 2560 struct fcoe_fc_els_lesb *lesb; 2561 struct rtnl_link_stats64 temp; 2562 struct net_device *netdev = fcoe_netdev(lport); 2563 2564 lfc = 0; 2565 vlfc = 0; 2566 mdac = 0; 2567 lesb = (struct fcoe_fc_els_lesb *)fc_lesb; 2568 memset(lesb, 0, sizeof(*lesb)); 2569 for_each_possible_cpu(cpu) { 2570 devst = per_cpu_ptr(lport->dev_stats, cpu); 2571 lfc += devst->LinkFailureCount; 2572 vlfc += devst->VLinkFailureCount; 2573 mdac += devst->MissDiscAdvCount; 2574 } 2575 lesb->lesb_link_fail = htonl(lfc); 2576 lesb->lesb_vlink_fail = htonl(vlfc); 2577 lesb->lesb_miss_fka = htonl(mdac); 2578 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); 2579 } 2580 2581 /** 2582 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2583 * @lport: the local port 2584 * @port_id: the port ID 2585 * @fp: the received frame, if any, that caused the port_id to be set. 2586 * 2587 * This routine handles the case where we received a FLOGI and are 2588 * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi() 2589 * so it can set the non-mapped mode and gateway address. 2590 * 2591 * The FLOGI LS_ACC is handled by fcoe_flogi_resp(). 2592 */ 2593 static void fcoe_set_port_id(struct fc_lport *lport, 2594 u32 port_id, struct fc_frame *fp) 2595 { 2596 struct fcoe_port *port = lport_priv(lport); 2597 struct fcoe_interface *fcoe = port->priv; 2598 2599 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2600 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2601 } 2602 2603 /** 2604 * fcoe_validate_vport_create() - Validate a vport before creating it 2605 * @vport: NPIV port to be created 2606 * 2607 * This routine is meant to add validation for a vport before creating it 2608 * via fcoe_vport_create(). 2609 * Current validations are: 2610 * - WWPN supplied is unique for given lport 2611 * 2612 * 2613 */ 2614 static int fcoe_validate_vport_create(struct fc_vport *vport) 2615 { 2616 struct Scsi_Host *shost = vport_to_shost(vport); 2617 struct fc_lport *n_port = shost_priv(shost); 2618 struct fc_lport *vn_port; 2619 int rc = 0; 2620 char buf[32]; 2621 2622 mutex_lock(&n_port->lp_mutex); 2623 2624 wwn_to_str(vport->port_name, buf, sizeof(buf)); 2625 /* Check if the wwpn is not same as that of the lport */ 2626 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { 2627 FCOE_DBG("vport WWPN 0x%s is same as that of the " 2628 "base port WWPN\n", buf); 2629 rc = -EINVAL; 2630 goto out; 2631 } 2632 2633 /* Check if there is any existing vport with same wwpn */ 2634 list_for_each_entry(vn_port, &n_port->vports, list) { 2635 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { 2636 FCOE_DBG("vport with given WWPN 0x%s already " 2637 "exists\n", buf); 2638 rc = -EINVAL; 2639 break; 2640 } 2641 } 2642 2643 out: 2644 mutex_unlock(&n_port->lp_mutex); 2645 2646 return rc; 2647 } 2648