1 /* 2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 #include <linux/module.h> 21 #include <linux/spinlock.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/ethtool.h> 25 #include <linux/if_ether.h> 26 #include <linux/if_vlan.h> 27 #include <linux/crc32.h> 28 #include <linux/slab.h> 29 #include <linux/cpu.h> 30 #include <linux/fs.h> 31 #include <linux/sysfs.h> 32 #include <linux/ctype.h> 33 #include <linux/workqueue.h> 34 #include <net/dcbnl.h> 35 #include <net/dcbevent.h> 36 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsicam.h> 38 #include <scsi/scsi_transport.h> 39 #include <scsi/scsi_transport_fc.h> 40 #include <net/rtnetlink.h> 41 42 #include <scsi/fc/fc_encaps.h> 43 #include <scsi/fc/fc_fip.h> 44 #include <scsi/fc/fc_fcoe.h> 45 46 #include <scsi/libfc.h> 47 #include <scsi/fc_frame.h> 48 #include <scsi/libfcoe.h> 49 50 #include "fcoe.h" 51 52 MODULE_AUTHOR("Open-FCoE.org"); 53 MODULE_DESCRIPTION("FCoE"); 54 MODULE_LICENSE("GPL v2"); 55 56 /* Performance tuning parameters for fcoe */ 57 static unsigned int fcoe_ddp_min = 4096; 58 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); 59 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 60 "Direct Data Placement (DDP)."); 61 62 unsigned int fcoe_debug_logging; 63 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); 64 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 65 66 static DEFINE_MUTEX(fcoe_config_mutex); 67 68 static struct workqueue_struct *fcoe_wq; 69 70 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ 71 static DECLARE_COMPLETION(fcoe_flush_completion); 72 73 /* fcoe host list */ 74 /* must only by accessed under the RTNL mutex */ 75 static LIST_HEAD(fcoe_hostlist); 76 static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 77 78 /* Function Prototypes */ 79 static int fcoe_reset(struct Scsi_Host *); 80 static int fcoe_xmit(struct fc_lport *, struct fc_frame *); 81 static int fcoe_rcv(struct sk_buff *, struct net_device *, 82 struct packet_type *, struct net_device *); 83 static int fcoe_percpu_receive_thread(void *); 84 static void fcoe_percpu_clean(struct fc_lport *); 85 static int fcoe_link_speed_update(struct fc_lport *); 86 static int fcoe_link_ok(struct fc_lport *); 87 88 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 89 static int fcoe_hostlist_add(const struct fc_lport *); 90 91 static int fcoe_device_notification(struct notifier_block *, ulong, void *); 92 static void fcoe_dev_setup(void); 93 static void fcoe_dev_cleanup(void); 94 static struct fcoe_interface 95 *fcoe_hostlist_lookup_port(const struct net_device *); 96 97 static int fcoe_fip_recv(struct sk_buff *, struct net_device *, 98 struct packet_type *, struct net_device *); 99 100 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); 101 static void fcoe_update_src_mac(struct fc_lport *, u8 *); 102 static u8 *fcoe_get_src_mac(struct fc_lport *); 103 static void fcoe_destroy_work(struct work_struct *); 104 105 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, 106 unsigned int); 107 static int fcoe_ddp_done(struct fc_lport *, u16); 108 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, 109 unsigned int); 110 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); 111 static int fcoe_dcb_app_notification(struct notifier_block *notifier, 112 ulong event, void *ptr); 113 114 static bool fcoe_match(struct net_device *netdev); 115 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); 116 static int fcoe_destroy(struct net_device *netdev); 117 static int fcoe_enable(struct net_device *netdev); 118 static int fcoe_disable(struct net_device *netdev); 119 120 static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 121 u32 did, struct fc_frame *, 122 unsigned int op, 123 void (*resp)(struct fc_seq *, 124 struct fc_frame *, 125 void *), 126 void *, u32 timeout); 127 static void fcoe_recv_frame(struct sk_buff *skb); 128 129 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 130 131 /* notification function for packets from net device */ 132 static struct notifier_block fcoe_notifier = { 133 .notifier_call = fcoe_device_notification, 134 }; 135 136 /* notification function for CPU hotplug events */ 137 static struct notifier_block fcoe_cpu_notifier = { 138 .notifier_call = fcoe_cpu_callback, 139 }; 140 141 /* notification function for DCB events */ 142 static struct notifier_block dcb_notifier = { 143 .notifier_call = fcoe_dcb_app_notification, 144 }; 145 146 static struct scsi_transport_template *fcoe_nport_scsi_transport; 147 static struct scsi_transport_template *fcoe_vport_scsi_transport; 148 149 static int fcoe_vport_destroy(struct fc_vport *); 150 static int fcoe_vport_create(struct fc_vport *, bool disabled); 151 static int fcoe_vport_disable(struct fc_vport *, bool disable); 152 static void fcoe_set_vport_symbolic_name(struct fc_vport *); 153 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 154 static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *); 155 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); 156 157 static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { 158 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode, 159 .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, 160 .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, 161 .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, 162 .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, 163 .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, 164 .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, 165 166 .get_fcoe_fcf_selected = fcoe_fcf_get_selected, 167 .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id, 168 }; 169 170 static struct libfc_function_template fcoe_libfc_fcn_templ = { 171 .frame_send = fcoe_xmit, 172 .ddp_setup = fcoe_ddp_setup, 173 .ddp_done = fcoe_ddp_done, 174 .ddp_target = fcoe_ddp_target, 175 .elsct_send = fcoe_elsct_send, 176 .get_lesb = fcoe_get_lesb, 177 .lport_set_port_id = fcoe_set_port_id, 178 }; 179 180 static struct fc_function_template fcoe_nport_fc_functions = { 181 .show_host_node_name = 1, 182 .show_host_port_name = 1, 183 .show_host_supported_classes = 1, 184 .show_host_supported_fc4s = 1, 185 .show_host_active_fc4s = 1, 186 .show_host_maxframe_size = 1, 187 .show_host_serial_number = 1, 188 .show_host_manufacturer = 1, 189 .show_host_model = 1, 190 .show_host_model_description = 1, 191 .show_host_hardware_version = 1, 192 .show_host_driver_version = 1, 193 .show_host_firmware_version = 1, 194 .show_host_optionrom_version = 1, 195 196 .show_host_port_id = 1, 197 .show_host_supported_speeds = 1, 198 .get_host_speed = fc_get_host_speed, 199 .show_host_speed = 1, 200 .show_host_port_type = 1, 201 .get_host_port_state = fc_get_host_port_state, 202 .show_host_port_state = 1, 203 .show_host_symbolic_name = 1, 204 205 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 206 .show_rport_maxframe_size = 1, 207 .show_rport_supported_classes = 1, 208 209 .show_host_fabric_name = 1, 210 .show_starget_node_name = 1, 211 .show_starget_port_name = 1, 212 .show_starget_port_id = 1, 213 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 214 .show_rport_dev_loss_tmo = 1, 215 .get_fc_host_stats = fc_get_host_stats, 216 .issue_fc_host_lip = fcoe_reset, 217 218 .terminate_rport_io = fc_rport_terminate_io, 219 220 .vport_create = fcoe_vport_create, 221 .vport_delete = fcoe_vport_destroy, 222 .vport_disable = fcoe_vport_disable, 223 .set_vport_symbolic_name = fcoe_set_vport_symbolic_name, 224 225 .bsg_request = fc_lport_bsg_request, 226 }; 227 228 static struct fc_function_template fcoe_vport_fc_functions = { 229 .show_host_node_name = 1, 230 .show_host_port_name = 1, 231 .show_host_supported_classes = 1, 232 .show_host_supported_fc4s = 1, 233 .show_host_active_fc4s = 1, 234 .show_host_maxframe_size = 1, 235 .show_host_serial_number = 1, 236 .show_host_manufacturer = 1, 237 .show_host_model = 1, 238 .show_host_model_description = 1, 239 .show_host_hardware_version = 1, 240 .show_host_driver_version = 1, 241 .show_host_firmware_version = 1, 242 .show_host_optionrom_version = 1, 243 244 .show_host_port_id = 1, 245 .show_host_supported_speeds = 1, 246 .get_host_speed = fc_get_host_speed, 247 .show_host_speed = 1, 248 .show_host_port_type = 1, 249 .get_host_port_state = fc_get_host_port_state, 250 .show_host_port_state = 1, 251 .show_host_symbolic_name = 1, 252 253 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 254 .show_rport_maxframe_size = 1, 255 .show_rport_supported_classes = 1, 256 257 .show_host_fabric_name = 1, 258 .show_starget_node_name = 1, 259 .show_starget_port_name = 1, 260 .show_starget_port_id = 1, 261 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 262 .show_rport_dev_loss_tmo = 1, 263 .get_fc_host_stats = fc_get_host_stats, 264 .issue_fc_host_lip = fcoe_reset, 265 266 .terminate_rport_io = fc_rport_terminate_io, 267 268 .bsg_request = fc_lport_bsg_request, 269 }; 270 271 static struct scsi_host_template fcoe_shost_template = { 272 .module = THIS_MODULE, 273 .name = "FCoE Driver", 274 .proc_name = FCOE_NAME, 275 .queuecommand = fc_queuecommand, 276 .eh_abort_handler = fc_eh_abort, 277 .eh_device_reset_handler = fc_eh_device_reset, 278 .eh_host_reset_handler = fc_eh_host_reset, 279 .slave_alloc = fc_slave_alloc, 280 .change_queue_depth = fc_change_queue_depth, 281 .change_queue_type = fc_change_queue_type, 282 .this_id = -1, 283 .cmd_per_lun = 3, 284 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 285 .use_clustering = ENABLE_CLUSTERING, 286 .sg_tablesize = SG_ALL, 287 .max_sectors = 0xffff, 288 }; 289 290 /** 291 * fcoe_interface_setup() - Setup a FCoE interface 292 * @fcoe: The new FCoE interface 293 * @netdev: The net device that the fcoe interface is on 294 * 295 * Returns : 0 for success 296 * Locking: must be called with the RTNL mutex held 297 */ 298 static int fcoe_interface_setup(struct fcoe_interface *fcoe, 299 struct net_device *netdev) 300 { 301 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 302 struct netdev_hw_addr *ha; 303 struct net_device *real_dev; 304 u8 flogi_maddr[ETH_ALEN]; 305 const struct net_device_ops *ops; 306 307 fcoe->netdev = netdev; 308 309 /* Let LLD initialize for FCoE */ 310 ops = netdev->netdev_ops; 311 if (ops->ndo_fcoe_enable) { 312 if (ops->ndo_fcoe_enable(netdev)) 313 FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE" 314 " specific feature for LLD.\n"); 315 } 316 317 /* Do not support for bonding device */ 318 if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) { 319 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); 320 return -EOPNOTSUPP; 321 } 322 323 /* look for SAN MAC address, if multiple SAN MACs exist, only 324 * use the first one for SPMA */ 325 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? 326 vlan_dev_real_dev(netdev) : netdev; 327 fcoe->realdev = real_dev; 328 rcu_read_lock(); 329 for_each_dev_addr(real_dev, ha) { 330 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 331 (is_valid_ether_addr(ha->addr))) { 332 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); 333 fip->spma = 1; 334 break; 335 } 336 } 337 rcu_read_unlock(); 338 339 /* setup Source Mac Address */ 340 if (!fip->spma) 341 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len); 342 343 /* 344 * Add FCoE MAC address as second unicast MAC address 345 * or enter promiscuous mode if not capable of listening 346 * for multiple unicast MACs. 347 */ 348 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 349 dev_uc_add(netdev, flogi_maddr); 350 if (fip->spma) 351 dev_uc_add(netdev, fip->ctl_src_addr); 352 if (fip->mode == FIP_MODE_VN2VN) { 353 dev_mc_add(netdev, FIP_ALL_VN2VN_MACS); 354 dev_mc_add(netdev, FIP_ALL_P2P_MACS); 355 } else 356 dev_mc_add(netdev, FIP_ALL_ENODE_MACS); 357 358 /* 359 * setup the receive function from ethernet driver 360 * on the ethertype for the given device 361 */ 362 fcoe->fcoe_packet_type.func = fcoe_rcv; 363 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 364 fcoe->fcoe_packet_type.dev = netdev; 365 dev_add_pack(&fcoe->fcoe_packet_type); 366 367 fcoe->fip_packet_type.func = fcoe_fip_recv; 368 fcoe->fip_packet_type.type = htons(ETH_P_FIP); 369 fcoe->fip_packet_type.dev = netdev; 370 dev_add_pack(&fcoe->fip_packet_type); 371 372 return 0; 373 } 374 375 /** 376 * fcoe_interface_create() - Create a FCoE interface on a net device 377 * @netdev: The net device to create the FCoE interface on 378 * @fip_mode: The mode to use for FIP 379 * 380 * Returns: pointer to a struct fcoe_interface or NULL on error 381 */ 382 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, 383 enum fip_state fip_mode) 384 { 385 struct fcoe_ctlr_device *ctlr_dev; 386 struct fcoe_ctlr *ctlr; 387 struct fcoe_interface *fcoe; 388 int size; 389 int err; 390 391 if (!try_module_get(THIS_MODULE)) { 392 FCOE_NETDEV_DBG(netdev, 393 "Could not get a reference to the module\n"); 394 fcoe = ERR_PTR(-EBUSY); 395 goto out; 396 } 397 398 size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface); 399 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ, 400 size); 401 if (!ctlr_dev) { 402 FCOE_DBG("Failed to add fcoe_ctlr_device\n"); 403 fcoe = ERR_PTR(-ENOMEM); 404 goto out_putmod; 405 } 406 407 ctlr = fcoe_ctlr_device_priv(ctlr_dev); 408 fcoe = fcoe_ctlr_priv(ctlr); 409 410 dev_hold(netdev); 411 412 /* 413 * Initialize FIP. 414 */ 415 fcoe_ctlr_init(ctlr, fip_mode); 416 ctlr->send = fcoe_fip_send; 417 ctlr->update_mac = fcoe_update_src_mac; 418 ctlr->get_src_addr = fcoe_get_src_mac; 419 420 err = fcoe_interface_setup(fcoe, netdev); 421 if (err) { 422 fcoe_ctlr_destroy(ctlr); 423 fcoe_ctlr_device_delete(ctlr_dev); 424 dev_put(netdev); 425 fcoe = ERR_PTR(err); 426 goto out_putmod; 427 } 428 429 goto out; 430 431 out_putmod: 432 module_put(THIS_MODULE); 433 out: 434 return fcoe; 435 } 436 437 /** 438 * fcoe_interface_remove() - remove FCoE interface from netdev 439 * @fcoe: The FCoE interface to be cleaned up 440 * 441 * Caller must be holding the RTNL mutex 442 */ 443 static void fcoe_interface_remove(struct fcoe_interface *fcoe) 444 { 445 struct net_device *netdev = fcoe->netdev; 446 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 447 u8 flogi_maddr[ETH_ALEN]; 448 const struct net_device_ops *ops; 449 450 /* 451 * Don't listen for Ethernet packets anymore. 452 * synchronize_net() ensures that the packet handlers are not running 453 * on another CPU. dev_remove_pack() would do that, this calls the 454 * unsyncronized version __dev_remove_pack() to avoid multiple delays. 455 */ 456 __dev_remove_pack(&fcoe->fcoe_packet_type); 457 __dev_remove_pack(&fcoe->fip_packet_type); 458 synchronize_net(); 459 460 /* Delete secondary MAC addresses */ 461 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 462 dev_uc_del(netdev, flogi_maddr); 463 if (fip->spma) 464 dev_uc_del(netdev, fip->ctl_src_addr); 465 if (fip->mode == FIP_MODE_VN2VN) { 466 dev_mc_del(netdev, FIP_ALL_VN2VN_MACS); 467 dev_mc_del(netdev, FIP_ALL_P2P_MACS); 468 } else 469 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 470 471 /* Tell the LLD we are done w/ FCoE */ 472 ops = netdev->netdev_ops; 473 if (ops->ndo_fcoe_disable) { 474 if (ops->ndo_fcoe_disable(netdev)) 475 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 476 " specific feature for LLD.\n"); 477 } 478 fcoe->removed = 1; 479 } 480 481 482 /** 483 * fcoe_interface_cleanup() - Clean up a FCoE interface 484 * @fcoe: The FCoE interface to be cleaned up 485 */ 486 static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 487 { 488 struct net_device *netdev = fcoe->netdev; 489 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 490 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); 491 492 rtnl_lock(); 493 if (!fcoe->removed) 494 fcoe_interface_remove(fcoe); 495 rtnl_unlock(); 496 497 /* Release the self-reference taken during fcoe_interface_create() */ 498 /* tear-down the FCoE controller */ 499 fcoe_ctlr_destroy(fip); 500 scsi_host_put(fip->lp->host); 501 fcoe_ctlr_device_delete(ctlr_dev); 502 dev_put(netdev); 503 module_put(THIS_MODULE); 504 } 505 506 /** 507 * fcoe_fip_recv() - Handler for received FIP frames 508 * @skb: The receive skb 509 * @netdev: The associated net device 510 * @ptype: The packet_type structure which was used to register this handler 511 * @orig_dev: The original net_device the the skb was received on. 512 * (in case dev is a bond) 513 * 514 * Returns: 0 for success 515 */ 516 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, 517 struct packet_type *ptype, 518 struct net_device *orig_dev) 519 { 520 struct fcoe_interface *fcoe; 521 struct fcoe_ctlr *ctlr; 522 523 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 524 ctlr = fcoe_to_ctlr(fcoe); 525 fcoe_ctlr_recv(ctlr, skb); 526 return 0; 527 } 528 529 /** 530 * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame 531 * @port: The FCoE port 532 * @skb: The FIP/FCoE packet to be sent 533 */ 534 static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) 535 { 536 if (port->fcoe_pending_queue.qlen) 537 fcoe_check_wait_queue(port->lport, skb); 538 else if (fcoe_start_io(skb)) 539 fcoe_check_wait_queue(port->lport, skb); 540 } 541 542 /** 543 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 544 * @fip: The FCoE controller 545 * @skb: The FIP packet to be sent 546 */ 547 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 548 { 549 skb->dev = fcoe_from_ctlr(fip)->netdev; 550 fcoe_port_send(lport_priv(fip->lp), skb); 551 } 552 553 /** 554 * fcoe_update_src_mac() - Update the Ethernet MAC filters 555 * @lport: The local port to update the source MAC on 556 * @addr: Unicast MAC address to add 557 * 558 * Remove any previously-set unicast MAC filter. 559 * Add secondary FCoE MAC address filter for our OUI. 560 */ 561 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) 562 { 563 struct fcoe_port *port = lport_priv(lport); 564 struct fcoe_interface *fcoe = port->priv; 565 566 if (!is_zero_ether_addr(port->data_src_addr)) 567 dev_uc_del(fcoe->netdev, port->data_src_addr); 568 if (!is_zero_ether_addr(addr)) 569 dev_uc_add(fcoe->netdev, addr); 570 memcpy(port->data_src_addr, addr, ETH_ALEN); 571 } 572 573 /** 574 * fcoe_get_src_mac() - return the Ethernet source address for an lport 575 * @lport: libfc lport 576 */ 577 static u8 *fcoe_get_src_mac(struct fc_lport *lport) 578 { 579 struct fcoe_port *port = lport_priv(lport); 580 581 return port->data_src_addr; 582 } 583 584 /** 585 * fcoe_lport_config() - Set up a local port 586 * @lport: The local port to be setup 587 * 588 * Returns: 0 for success 589 */ 590 static int fcoe_lport_config(struct fc_lport *lport) 591 { 592 lport->link_up = 0; 593 lport->qfull = 0; 594 lport->max_retry_count = 3; 595 lport->max_rport_retry_count = 3; 596 lport->e_d_tov = 2 * 1000; /* FC-FS default */ 597 lport->r_a_tov = 2 * 2 * 1000; 598 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 599 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 600 lport->does_npiv = 1; 601 602 fc_lport_init_stats(lport); 603 604 /* lport fc_lport related configuration */ 605 fc_lport_config(lport); 606 607 /* offload related configuration */ 608 lport->crc_offload = 0; 609 lport->seq_offload = 0; 610 lport->lro_enabled = 0; 611 lport->lro_xid = 0; 612 lport->lso_max = 0; 613 614 return 0; 615 } 616 617 /** 618 * fcoe_netdev_features_change - Updates the lport's offload flags based 619 * on the LLD netdev's FCoE feature flags 620 */ 621 static void fcoe_netdev_features_change(struct fc_lport *lport, 622 struct net_device *netdev) 623 { 624 mutex_lock(&lport->lp_mutex); 625 626 if (netdev->features & NETIF_F_SG) 627 lport->sg_supp = 1; 628 else 629 lport->sg_supp = 0; 630 631 if (netdev->features & NETIF_F_FCOE_CRC) { 632 lport->crc_offload = 1; 633 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 634 } else { 635 lport->crc_offload = 0; 636 } 637 638 if (netdev->features & NETIF_F_FSO) { 639 lport->seq_offload = 1; 640 lport->lso_max = netdev->gso_max_size; 641 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 642 lport->lso_max); 643 } else { 644 lport->seq_offload = 0; 645 lport->lso_max = 0; 646 } 647 648 if (netdev->fcoe_ddp_xid) { 649 lport->lro_enabled = 1; 650 lport->lro_xid = netdev->fcoe_ddp_xid; 651 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 652 lport->lro_xid); 653 } else { 654 lport->lro_enabled = 0; 655 lport->lro_xid = 0; 656 } 657 658 mutex_unlock(&lport->lp_mutex); 659 } 660 661 /** 662 * fcoe_netdev_config() - Set up net devive for SW FCoE 663 * @lport: The local port that is associated with the net device 664 * @netdev: The associated net device 665 * 666 * Must be called after fcoe_lport_config() as it will use local port mutex 667 * 668 * Returns: 0 for success 669 */ 670 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) 671 { 672 u32 mfs; 673 u64 wwnn, wwpn; 674 struct fcoe_interface *fcoe; 675 struct fcoe_ctlr *ctlr; 676 struct fcoe_port *port; 677 678 /* Setup lport private data to point to fcoe softc */ 679 port = lport_priv(lport); 680 fcoe = port->priv; 681 ctlr = fcoe_to_ctlr(fcoe); 682 683 /* 684 * Determine max frame size based on underlying device and optional 685 * user-configured limit. If the MFS is too low, fcoe_link_ok() 686 * will return 0, so do this first. 687 */ 688 mfs = netdev->mtu; 689 if (netdev->features & NETIF_F_FCOE_MTU) { 690 mfs = FCOE_MTU; 691 FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); 692 } 693 mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); 694 if (fc_set_mfs(lport, mfs)) 695 return -EINVAL; 696 697 /* offload features support */ 698 fcoe_netdev_features_change(lport, netdev); 699 700 skb_queue_head_init(&port->fcoe_pending_queue); 701 port->fcoe_pending_queue_active = 0; 702 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 703 704 fcoe_link_speed_update(lport); 705 706 if (!lport->vport) { 707 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 708 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); 709 fc_set_wwnn(lport, wwnn); 710 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 711 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 712 2, 0); 713 fc_set_wwpn(lport, wwpn); 714 } 715 716 return 0; 717 } 718 719 /** 720 * fcoe_shost_config() - Set up the SCSI host associated with a local port 721 * @lport: The local port 722 * @dev: The device associated with the SCSI host 723 * 724 * Must be called after fcoe_lport_config() and fcoe_netdev_config() 725 * 726 * Returns: 0 for success 727 */ 728 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) 729 { 730 int rc = 0; 731 732 /* lport scsi host config */ 733 lport->host->max_lun = FCOE_MAX_LUN; 734 lport->host->max_id = FCOE_MAX_FCP_TARGET; 735 lport->host->max_channel = 0; 736 lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; 737 738 if (lport->vport) 739 lport->host->transportt = fcoe_vport_scsi_transport; 740 else 741 lport->host->transportt = fcoe_nport_scsi_transport; 742 743 /* add the new host to the SCSI-ml */ 744 rc = scsi_add_host(lport->host, dev); 745 if (rc) { 746 FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: " 747 "error on scsi_add_host\n"); 748 return rc; 749 } 750 751 if (!lport->vport) 752 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 753 754 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 755 "%s v%s over %s", FCOE_NAME, FCOE_VERSION, 756 fcoe_netdev(lport)->name); 757 758 return 0; 759 } 760 761 762 /** 763 * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE 764 * @lport: The local port that is associated with the net device 765 * @netdev: The associated net device 766 * 767 * Must be called after fcoe_shost_config() as it will use local port mutex 768 * 769 */ 770 static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) 771 { 772 struct fcoe_interface *fcoe; 773 struct fcoe_port *port; 774 struct net_device *realdev; 775 int rc; 776 struct netdev_fcoe_hbainfo fdmi; 777 778 port = lport_priv(lport); 779 fcoe = port->priv; 780 realdev = fcoe->realdev; 781 782 if (!realdev) 783 return; 784 785 /* No FDMI state m/c for NPIV ports */ 786 if (lport->vport) 787 return; 788 789 if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) { 790 memset(&fdmi, 0, sizeof(fdmi)); 791 rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev, 792 &fdmi); 793 if (rc) { 794 printk(KERN_INFO "fcoe: Failed to retrieve FDMI " 795 "information from netdev.\n"); 796 return; 797 } 798 799 snprintf(fc_host_serial_number(lport->host), 800 FC_SERIAL_NUMBER_SIZE, 801 "%s", 802 fdmi.serial_number); 803 snprintf(fc_host_manufacturer(lport->host), 804 FC_SERIAL_NUMBER_SIZE, 805 "%s", 806 fdmi.manufacturer); 807 snprintf(fc_host_model(lport->host), 808 FC_SYMBOLIC_NAME_SIZE, 809 "%s", 810 fdmi.model); 811 snprintf(fc_host_model_description(lport->host), 812 FC_SYMBOLIC_NAME_SIZE, 813 "%s", 814 fdmi.model_description); 815 snprintf(fc_host_hardware_version(lport->host), 816 FC_VERSION_STRING_SIZE, 817 "%s", 818 fdmi.hardware_version); 819 snprintf(fc_host_driver_version(lport->host), 820 FC_VERSION_STRING_SIZE, 821 "%s", 822 fdmi.driver_version); 823 snprintf(fc_host_optionrom_version(lport->host), 824 FC_VERSION_STRING_SIZE, 825 "%s", 826 fdmi.optionrom_version); 827 snprintf(fc_host_firmware_version(lport->host), 828 FC_VERSION_STRING_SIZE, 829 "%s", 830 fdmi.firmware_version); 831 832 /* Enable FDMI lport states */ 833 lport->fdmi_enabled = 1; 834 } else { 835 lport->fdmi_enabled = 0; 836 printk(KERN_INFO "fcoe: No FDMI support.\n"); 837 } 838 } 839 840 /** 841 * fcoe_oem_match() - The match routine for the offloaded exchange manager 842 * @fp: The I/O frame 843 * 844 * This routine will be associated with an exchange manager (EM). When 845 * the libfc exchange handling code is looking for an EM to use it will 846 * call this routine and pass it the frame that it wishes to send. This 847 * routine will return True if the associated EM is to be used and False 848 * if the echange code should continue looking for an EM. 849 * 850 * The offload EM that this routine is associated with will handle any 851 * packets that are for SCSI read requests. 852 * 853 * This has been enhanced to work when FCoE stack is operating in target 854 * mode. 855 * 856 * Returns: True for read types I/O, otherwise returns false. 857 */ 858 static bool fcoe_oem_match(struct fc_frame *fp) 859 { 860 struct fc_frame_header *fh = fc_frame_header_get(fp); 861 struct fcp_cmnd *fcp; 862 863 if (fc_fcp_is_read(fr_fsp(fp)) && 864 (fr_fsp(fp)->data_len > fcoe_ddp_min)) 865 return true; 866 else if ((fr_fsp(fp) == NULL) && 867 (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) && 868 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) { 869 fcp = fc_frame_payload_get(fp, sizeof(*fcp)); 870 if ((fcp->fc_flags & FCP_CFL_WRDATA) && 871 (ntohl(fcp->fc_dl) > fcoe_ddp_min)) 872 return true; 873 } 874 return false; 875 } 876 877 /** 878 * fcoe_em_config() - Allocate and configure an exchange manager 879 * @lport: The local port that the new EM will be associated with 880 * 881 * Returns: 0 on success 882 */ 883 static inline int fcoe_em_config(struct fc_lport *lport) 884 { 885 struct fcoe_port *port = lport_priv(lport); 886 struct fcoe_interface *fcoe = port->priv; 887 struct fcoe_interface *oldfcoe = NULL; 888 struct net_device *old_real_dev, *cur_real_dev; 889 u16 min_xid = FCOE_MIN_XID; 890 u16 max_xid = FCOE_MAX_XID; 891 892 /* 893 * Check if need to allocate an em instance for 894 * offload exchange ids to be shared across all VN_PORTs/lport. 895 */ 896 if (!lport->lro_enabled || !lport->lro_xid || 897 (lport->lro_xid >= max_xid)) { 898 lport->lro_xid = 0; 899 goto skip_oem; 900 } 901 902 /* 903 * Reuse existing offload em instance in case 904 * it is already allocated on real eth device 905 */ 906 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 907 cur_real_dev = vlan_dev_real_dev(fcoe->netdev); 908 else 909 cur_real_dev = fcoe->netdev; 910 911 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { 912 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 913 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); 914 else 915 old_real_dev = oldfcoe->netdev; 916 917 if (cur_real_dev == old_real_dev) { 918 fcoe->oem = oldfcoe->oem; 919 break; 920 } 921 } 922 923 if (fcoe->oem) { 924 if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) { 925 printk(KERN_ERR "fcoe_em_config: failed to add " 926 "offload em:%p on interface:%s\n", 927 fcoe->oem, fcoe->netdev->name); 928 return -ENOMEM; 929 } 930 } else { 931 fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3, 932 FCOE_MIN_XID, lport->lro_xid, 933 fcoe_oem_match); 934 if (!fcoe->oem) { 935 printk(KERN_ERR "fcoe_em_config: failed to allocate " 936 "em for offload exches on interface:%s\n", 937 fcoe->netdev->name); 938 return -ENOMEM; 939 } 940 } 941 942 /* 943 * Exclude offload EM xid range from next EM xid range. 944 */ 945 min_xid += lport->lro_xid + 1; 946 947 skip_oem: 948 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) { 949 printk(KERN_ERR "fcoe_em_config: failed to " 950 "allocate em on interface %s\n", fcoe->netdev->name); 951 return -ENOMEM; 952 } 953 954 return 0; 955 } 956 957 /** 958 * fcoe_if_destroy() - Tear down a SW FCoE instance 959 * @lport: The local port to be destroyed 960 * 961 */ 962 static void fcoe_if_destroy(struct fc_lport *lport) 963 { 964 struct fcoe_port *port = lport_priv(lport); 965 struct fcoe_interface *fcoe = port->priv; 966 struct net_device *netdev = fcoe->netdev; 967 968 FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); 969 970 /* Logout of the fabric */ 971 fc_fabric_logoff(lport); 972 973 /* Cleanup the fc_lport */ 974 fc_lport_destroy(lport); 975 976 /* Stop the transmit retry timer */ 977 del_timer_sync(&port->timer); 978 979 /* Free existing transmit skbs */ 980 fcoe_clean_pending_queue(lport); 981 982 rtnl_lock(); 983 if (!is_zero_ether_addr(port->data_src_addr)) 984 dev_uc_del(netdev, port->data_src_addr); 985 if (lport->vport) 986 synchronize_net(); 987 else 988 fcoe_interface_remove(fcoe); 989 rtnl_unlock(); 990 991 /* Free queued packets for the per-CPU receive threads */ 992 fcoe_percpu_clean(lport); 993 994 /* Detach from the scsi-ml */ 995 fc_remove_host(lport->host); 996 scsi_remove_host(lport->host); 997 998 /* Destroy lport scsi_priv */ 999 fc_fcp_destroy(lport); 1000 1001 /* There are no more rports or I/O, free the EM */ 1002 fc_exch_mgr_free(lport); 1003 1004 /* Free memory used by statistical counters */ 1005 fc_lport_free_stats(lport); 1006 1007 /* 1008 * Release the Scsi_Host for vport but hold on to 1009 * master lport until it fcoe interface fully cleaned-up. 1010 */ 1011 if (lport->vport) 1012 scsi_host_put(lport->host); 1013 } 1014 1015 /** 1016 * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device 1017 * @lport: The local port to setup DDP for 1018 * @xid: The exchange ID for this DDP transfer 1019 * @sgl: The scatterlist describing this transfer 1020 * @sgc: The number of sg items 1021 * 1022 * Returns: 0 if the DDP context was not configured 1023 */ 1024 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, 1025 struct scatterlist *sgl, unsigned int sgc) 1026 { 1027 struct net_device *netdev = fcoe_netdev(lport); 1028 1029 if (netdev->netdev_ops->ndo_fcoe_ddp_setup) 1030 return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev, 1031 xid, sgl, 1032 sgc); 1033 1034 return 0; 1035 } 1036 1037 /** 1038 * fcoe_ddp_target() - Call a LLD's ddp_target through the net device 1039 * @lport: The local port to setup DDP for 1040 * @xid: The exchange ID for this DDP transfer 1041 * @sgl: The scatterlist describing this transfer 1042 * @sgc: The number of sg items 1043 * 1044 * Returns: 0 if the DDP context was not configured 1045 */ 1046 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid, 1047 struct scatterlist *sgl, unsigned int sgc) 1048 { 1049 struct net_device *netdev = fcoe_netdev(lport); 1050 1051 if (netdev->netdev_ops->ndo_fcoe_ddp_target) 1052 return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid, 1053 sgl, sgc); 1054 1055 return 0; 1056 } 1057 1058 1059 /** 1060 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device 1061 * @lport: The local port to complete DDP on 1062 * @xid: The exchange ID for this DDP transfer 1063 * 1064 * Returns: the length of data that have been completed by DDP 1065 */ 1066 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) 1067 { 1068 struct net_device *netdev = fcoe_netdev(lport); 1069 1070 if (netdev->netdev_ops->ndo_fcoe_ddp_done) 1071 return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid); 1072 return 0; 1073 } 1074 1075 /** 1076 * fcoe_if_create() - Create a FCoE instance on an interface 1077 * @fcoe: The FCoE interface to create a local port on 1078 * @parent: The device pointer to be the parent in sysfs for the SCSI host 1079 * @npiv: Indicates if the port is a vport or not 1080 * 1081 * Creates a fc_lport instance and a Scsi_Host instance and configure them. 1082 * 1083 * Returns: The allocated fc_lport or an error pointer 1084 */ 1085 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 1086 struct device *parent, int npiv) 1087 { 1088 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 1089 struct net_device *netdev = fcoe->netdev; 1090 struct fc_lport *lport, *n_port; 1091 struct fcoe_port *port; 1092 struct Scsi_Host *shost; 1093 int rc; 1094 /* 1095 * parent is only a vport if npiv is 1, 1096 * but we'll only use vport in that case so go ahead and set it 1097 */ 1098 struct fc_vport *vport = dev_to_vport(parent); 1099 1100 FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 1101 1102 if (!npiv) 1103 lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port)); 1104 else 1105 lport = libfc_vport_create(vport, sizeof(*port)); 1106 1107 if (!lport) { 1108 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 1109 rc = -ENOMEM; 1110 goto out; 1111 } 1112 port = lport_priv(lport); 1113 port->lport = lport; 1114 port->priv = fcoe; 1115 port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; 1116 port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; 1117 INIT_WORK(&port->destroy_work, fcoe_destroy_work); 1118 1119 /* configure a fc_lport including the exchange manager */ 1120 rc = fcoe_lport_config(lport); 1121 if (rc) { 1122 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 1123 "interface\n"); 1124 goto out_host_put; 1125 } 1126 1127 if (npiv) { 1128 FCOE_NETDEV_DBG(netdev, "Setting vport names, " 1129 "%16.16llx %16.16llx\n", 1130 vport->node_name, vport->port_name); 1131 fc_set_wwnn(lport, vport->node_name); 1132 fc_set_wwpn(lport, vport->port_name); 1133 } 1134 1135 /* configure lport network properties */ 1136 rc = fcoe_netdev_config(lport, netdev); 1137 if (rc) { 1138 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " 1139 "interface\n"); 1140 goto out_lp_destroy; 1141 } 1142 1143 /* configure lport scsi host properties */ 1144 rc = fcoe_shost_config(lport, parent); 1145 if (rc) { 1146 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 1147 "interface\n"); 1148 goto out_lp_destroy; 1149 } 1150 1151 /* Initialize the library */ 1152 rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1); 1153 if (rc) { 1154 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1155 "interface\n"); 1156 goto out_lp_destroy; 1157 } 1158 1159 /* Initialized FDMI information */ 1160 fcoe_fdmi_info(lport, netdev); 1161 1162 /* 1163 * fcoe_em_alloc() and fcoe_hostlist_add() both 1164 * need to be atomic with respect to other changes to the 1165 * hostlist since fcoe_em_alloc() looks for an existing EM 1166 * instance on host list updated by fcoe_hostlist_add(). 1167 * 1168 * This is currently handled through the fcoe_config_mutex 1169 * begin held. 1170 */ 1171 if (!npiv) 1172 /* lport exch manager allocation */ 1173 rc = fcoe_em_config(lport); 1174 else { 1175 shost = vport_to_shost(vport); 1176 n_port = shost_priv(shost); 1177 rc = fc_exch_mgr_list_clone(n_port, lport); 1178 } 1179 1180 if (rc) { 1181 FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n"); 1182 goto out_lp_destroy; 1183 } 1184 1185 return lport; 1186 1187 out_lp_destroy: 1188 fc_exch_mgr_free(lport); 1189 out_host_put: 1190 scsi_host_put(lport->host); 1191 out: 1192 return ERR_PTR(rc); 1193 } 1194 1195 /** 1196 * fcoe_if_init() - Initialization routine for fcoe.ko 1197 * 1198 * Attaches the SW FCoE transport to the FC transport 1199 * 1200 * Returns: 0 on success 1201 */ 1202 static int __init fcoe_if_init(void) 1203 { 1204 /* attach to scsi transport */ 1205 fcoe_nport_scsi_transport = 1206 fc_attach_transport(&fcoe_nport_fc_functions); 1207 fcoe_vport_scsi_transport = 1208 fc_attach_transport(&fcoe_vport_fc_functions); 1209 1210 if (!fcoe_nport_scsi_transport) { 1211 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); 1212 return -ENODEV; 1213 } 1214 1215 return 0; 1216 } 1217 1218 /** 1219 * fcoe_if_exit() - Tear down fcoe.ko 1220 * 1221 * Detaches the SW FCoE transport from the FC transport 1222 * 1223 * Returns: 0 on success 1224 */ 1225 static int __exit fcoe_if_exit(void) 1226 { 1227 fc_release_transport(fcoe_nport_scsi_transport); 1228 fc_release_transport(fcoe_vport_scsi_transport); 1229 fcoe_nport_scsi_transport = NULL; 1230 fcoe_vport_scsi_transport = NULL; 1231 return 0; 1232 } 1233 1234 /** 1235 * fcoe_percpu_thread_create() - Create a receive thread for an online CPU 1236 * @cpu: The CPU index of the CPU to create a receive thread for 1237 */ 1238 static void fcoe_percpu_thread_create(unsigned int cpu) 1239 { 1240 struct fcoe_percpu_s *p; 1241 struct task_struct *thread; 1242 1243 p = &per_cpu(fcoe_percpu, cpu); 1244 1245 thread = kthread_create_on_node(fcoe_percpu_receive_thread, 1246 (void *)p, cpu_to_node(cpu), 1247 "fcoethread/%d", cpu); 1248 1249 if (likely(!IS_ERR(thread))) { 1250 kthread_bind(thread, cpu); 1251 wake_up_process(thread); 1252 1253 spin_lock_bh(&p->fcoe_rx_list.lock); 1254 p->thread = thread; 1255 spin_unlock_bh(&p->fcoe_rx_list.lock); 1256 } 1257 } 1258 1259 /** 1260 * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU 1261 * @cpu: The CPU index of the CPU whose receive thread is to be destroyed 1262 * 1263 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the 1264 * current CPU's Rx thread. If the thread being destroyed is bound to 1265 * the CPU processing this context the skbs will be freed. 1266 */ 1267 static void fcoe_percpu_thread_destroy(unsigned int cpu) 1268 { 1269 struct fcoe_percpu_s *p; 1270 struct task_struct *thread; 1271 struct page *crc_eof; 1272 struct sk_buff *skb; 1273 #ifdef CONFIG_SMP 1274 struct fcoe_percpu_s *p0; 1275 unsigned targ_cpu = get_cpu(); 1276 #endif /* CONFIG_SMP */ 1277 1278 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); 1279 1280 /* Prevent any new skbs from being queued for this CPU. */ 1281 p = &per_cpu(fcoe_percpu, cpu); 1282 spin_lock_bh(&p->fcoe_rx_list.lock); 1283 thread = p->thread; 1284 p->thread = NULL; 1285 crc_eof = p->crc_eof_page; 1286 p->crc_eof_page = NULL; 1287 p->crc_eof_offset = 0; 1288 spin_unlock_bh(&p->fcoe_rx_list.lock); 1289 1290 #ifdef CONFIG_SMP 1291 /* 1292 * Don't bother moving the skb's if this context is running 1293 * on the same CPU that is having its thread destroyed. This 1294 * can easily happen when the module is removed. 1295 */ 1296 if (cpu != targ_cpu) { 1297 p0 = &per_cpu(fcoe_percpu, targ_cpu); 1298 spin_lock_bh(&p0->fcoe_rx_list.lock); 1299 if (p0->thread) { 1300 FCOE_DBG("Moving frames from CPU %d to CPU %d\n", 1301 cpu, targ_cpu); 1302 1303 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1304 __skb_queue_tail(&p0->fcoe_rx_list, skb); 1305 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1306 } else { 1307 /* 1308 * The targeted CPU is not initialized and cannot accept 1309 * new skbs. Unlock the targeted CPU and drop the skbs 1310 * on the CPU that is going offline. 1311 */ 1312 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1313 kfree_skb(skb); 1314 spin_unlock_bh(&p0->fcoe_rx_list.lock); 1315 } 1316 } else { 1317 /* 1318 * This scenario occurs when the module is being removed 1319 * and all threads are being destroyed. skbs will continue 1320 * to be shifted from the CPU thread that is being removed 1321 * to the CPU thread associated with the CPU that is processing 1322 * the module removal. Once there is only one CPU Rx thread it 1323 * will reach this case and we will drop all skbs and later 1324 * stop the thread. 1325 */ 1326 spin_lock_bh(&p->fcoe_rx_list.lock); 1327 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1328 kfree_skb(skb); 1329 spin_unlock_bh(&p->fcoe_rx_list.lock); 1330 } 1331 put_cpu(); 1332 #else 1333 /* 1334 * This a non-SMP scenario where the singular Rx thread is 1335 * being removed. Free all skbs and stop the thread. 1336 */ 1337 spin_lock_bh(&p->fcoe_rx_list.lock); 1338 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 1339 kfree_skb(skb); 1340 spin_unlock_bh(&p->fcoe_rx_list.lock); 1341 #endif 1342 1343 if (thread) 1344 kthread_stop(thread); 1345 1346 if (crc_eof) 1347 put_page(crc_eof); 1348 } 1349 1350 /** 1351 * fcoe_cpu_callback() - Handler for CPU hotplug events 1352 * @nfb: The callback data block 1353 * @action: The event triggering the callback 1354 * @hcpu: The index of the CPU that the event is for 1355 * 1356 * This creates or destroys per-CPU data for fcoe 1357 * 1358 * Returns NOTIFY_OK always. 1359 */ 1360 static int fcoe_cpu_callback(struct notifier_block *nfb, 1361 unsigned long action, void *hcpu) 1362 { 1363 unsigned cpu = (unsigned long)hcpu; 1364 1365 switch (action) { 1366 case CPU_ONLINE: 1367 case CPU_ONLINE_FROZEN: 1368 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); 1369 fcoe_percpu_thread_create(cpu); 1370 break; 1371 case CPU_DEAD: 1372 case CPU_DEAD_FROZEN: 1373 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); 1374 fcoe_percpu_thread_destroy(cpu); 1375 break; 1376 default: 1377 break; 1378 } 1379 return NOTIFY_OK; 1380 } 1381 1382 /** 1383 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming 1384 * command. 1385 * 1386 * This routine selects next CPU based on cpumask to distribute 1387 * incoming requests in round robin. 1388 * 1389 * Returns: int CPU number 1390 */ 1391 static inline unsigned int fcoe_select_cpu(void) 1392 { 1393 static unsigned int selected_cpu; 1394 1395 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); 1396 if (selected_cpu >= nr_cpu_ids) 1397 selected_cpu = cpumask_first(cpu_online_mask); 1398 1399 return selected_cpu; 1400 } 1401 1402 /** 1403 * fcoe_rcv() - Receive packets from a net device 1404 * @skb: The received packet 1405 * @netdev: The net device that the packet was received on 1406 * @ptype: The packet type context 1407 * @olddev: The last device net device 1408 * 1409 * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a 1410 * FC frame and passes the frame to libfc. 1411 * 1412 * Returns: 0 for success 1413 */ 1414 static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1415 struct packet_type *ptype, struct net_device *olddev) 1416 { 1417 struct fc_lport *lport; 1418 struct fcoe_rcv_info *fr; 1419 struct fcoe_ctlr *ctlr; 1420 struct fcoe_interface *fcoe; 1421 struct fc_frame_header *fh; 1422 struct fcoe_percpu_s *fps; 1423 struct ethhdr *eh; 1424 unsigned int cpu; 1425 1426 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1427 ctlr = fcoe_to_ctlr(fcoe); 1428 lport = ctlr->lp; 1429 if (unlikely(!lport)) { 1430 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1431 goto err2; 1432 } 1433 if (!lport->link_up) 1434 goto err2; 1435 1436 FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " 1437 "data:%p tail:%p end:%p sum:%d dev:%s", 1438 skb->len, skb->data_len, skb->head, skb->data, 1439 skb_tail_pointer(skb), skb_end_pointer(skb), 1440 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1441 1442 eh = eth_hdr(skb); 1443 1444 if (is_fip_mode(ctlr) && 1445 compare_ether_addr(eh->h_source, ctlr->dest_addr)) { 1446 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1447 eh->h_source); 1448 goto err; 1449 } 1450 1451 /* 1452 * Check for minimum frame length, and make sure required FCoE 1453 * and FC headers are pulled into the linear data area. 1454 */ 1455 if (unlikely((skb->len < FCOE_MIN_FRAME) || 1456 !pskb_may_pull(skb, FCOE_HEADER_LEN))) 1457 goto err; 1458 1459 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1460 fh = (struct fc_frame_header *) skb_transport_header(skb); 1461 1462 if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) { 1463 FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n", 1464 eh->h_dest); 1465 goto err; 1466 } 1467 1468 fr = fcoe_dev_from_skb(skb); 1469 fr->fr_dev = lport; 1470 1471 /* 1472 * In case the incoming frame's exchange is originated from 1473 * the initiator, then received frame's exchange id is ANDed 1474 * with fc_cpu_mask bits to get the same cpu on which exchange 1475 * was originated, otherwise select cpu using rx exchange id 1476 * or fcoe_select_cpu(). 1477 */ 1478 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1479 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1480 else { 1481 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) 1482 cpu = fcoe_select_cpu(); 1483 else 1484 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; 1485 } 1486 1487 if (cpu >= nr_cpu_ids) 1488 goto err; 1489 1490 fps = &per_cpu(fcoe_percpu, cpu); 1491 spin_lock(&fps->fcoe_rx_list.lock); 1492 if (unlikely(!fps->thread)) { 1493 /* 1494 * The targeted CPU is not ready, let's target 1495 * the first CPU now. For non-SMP systems this 1496 * will check the same CPU twice. 1497 */ 1498 FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " 1499 "ready for incoming skb- using first online " 1500 "CPU.\n"); 1501 1502 spin_unlock(&fps->fcoe_rx_list.lock); 1503 cpu = cpumask_first(cpu_online_mask); 1504 fps = &per_cpu(fcoe_percpu, cpu); 1505 spin_lock(&fps->fcoe_rx_list.lock); 1506 if (!fps->thread) { 1507 spin_unlock(&fps->fcoe_rx_list.lock); 1508 goto err; 1509 } 1510 } 1511 1512 /* 1513 * We now have a valid CPU that we're targeting for 1514 * this skb. We also have this receive thread locked, 1515 * so we're free to queue skbs into it's queue. 1516 */ 1517 1518 /* 1519 * Note: We used to have a set of conditions under which we would 1520 * call fcoe_recv_frame directly, rather than queuing to the rx list 1521 * as it could save a few cycles, but doing so is prohibited, as 1522 * fcoe_recv_frame has several paths that may sleep, which is forbidden 1523 * in softirq context. 1524 */ 1525 __skb_queue_tail(&fps->fcoe_rx_list, skb); 1526 if (fps->thread->state == TASK_INTERRUPTIBLE) 1527 wake_up_process(fps->thread); 1528 spin_unlock(&fps->fcoe_rx_list.lock); 1529 1530 return 0; 1531 err: 1532 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; 1533 put_cpu(); 1534 err2: 1535 kfree_skb(skb); 1536 return -1; 1537 } 1538 1539 /** 1540 * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC 1541 * @skb: The packet to be transmitted 1542 * @tlen: The total length of the trailer 1543 * 1544 * Returns: 0 for success 1545 */ 1546 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) 1547 { 1548 struct fcoe_percpu_s *fps; 1549 int rc; 1550 1551 fps = &get_cpu_var(fcoe_percpu); 1552 rc = fcoe_get_paged_crc_eof(skb, tlen, fps); 1553 put_cpu_var(fcoe_percpu); 1554 1555 return rc; 1556 } 1557 1558 /** 1559 * fcoe_xmit() - Transmit a FCoE frame 1560 * @lport: The local port that the frame is to be transmitted for 1561 * @fp: The frame to be transmitted 1562 * 1563 * Return: 0 for success 1564 */ 1565 static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1566 { 1567 int wlen; 1568 u32 crc; 1569 struct ethhdr *eh; 1570 struct fcoe_crc_eof *cp; 1571 struct sk_buff *skb; 1572 struct fc_stats *stats; 1573 struct fc_frame_header *fh; 1574 unsigned int hlen; /* header length implies the version */ 1575 unsigned int tlen; /* trailer length */ 1576 unsigned int elen; /* eth header, may include vlan */ 1577 struct fcoe_port *port = lport_priv(lport); 1578 struct fcoe_interface *fcoe = port->priv; 1579 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 1580 u8 sof, eof; 1581 struct fcoe_hdr *hp; 1582 1583 WARN_ON((fr_len(fp) % sizeof(u32)) != 0); 1584 1585 fh = fc_frame_header_get(fp); 1586 skb = fp_skb(fp); 1587 wlen = skb->len / FCOE_WORD_TO_BYTE; 1588 1589 if (!lport->link_up) { 1590 kfree_skb(skb); 1591 return 0; 1592 } 1593 1594 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1595 fcoe_ctlr_els_send(ctlr, lport, skb)) 1596 return 0; 1597 1598 sof = fr_sof(fp); 1599 eof = fr_eof(fp); 1600 1601 elen = sizeof(struct ethhdr); 1602 hlen = sizeof(struct fcoe_hdr); 1603 tlen = sizeof(struct fcoe_crc_eof); 1604 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1605 1606 /* crc offload */ 1607 if (likely(lport->crc_offload)) { 1608 skb->ip_summed = CHECKSUM_UNNECESSARY; 1609 skb->csum_start = skb_headroom(skb); 1610 skb->csum_offset = skb->len; 1611 crc = 0; 1612 } else { 1613 skb->ip_summed = CHECKSUM_NONE; 1614 crc = fcoe_fc_crc(fp); 1615 } 1616 1617 /* copy port crc and eof to the skb buff */ 1618 if (skb_is_nonlinear(skb)) { 1619 skb_frag_t *frag; 1620 if (fcoe_alloc_paged_crc_eof(skb, tlen)) { 1621 kfree_skb(skb); 1622 return -ENOMEM; 1623 } 1624 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1625 cp = kmap_atomic(skb_frag_page(frag)) 1626 + frag->page_offset; 1627 } else { 1628 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 1629 } 1630 1631 memset(cp, 0, sizeof(*cp)); 1632 cp->fcoe_eof = eof; 1633 cp->fcoe_crc32 = cpu_to_le32(~crc); 1634 1635 if (skb_is_nonlinear(skb)) { 1636 kunmap_atomic(cp); 1637 cp = NULL; 1638 } 1639 1640 /* adjust skb network/transport offsets to match mac/fcoe/port */ 1641 skb_push(skb, elen + hlen); 1642 skb_reset_mac_header(skb); 1643 skb_reset_network_header(skb); 1644 skb->mac_len = elen; 1645 skb->protocol = htons(ETH_P_FCOE); 1646 skb->priority = port->priority; 1647 1648 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1649 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { 1650 skb->vlan_tci = VLAN_TAG_PRESENT | 1651 vlan_dev_vlan_id(fcoe->netdev); 1652 skb->dev = fcoe->realdev; 1653 } else 1654 skb->dev = fcoe->netdev; 1655 1656 /* fill up mac and fcoe headers */ 1657 eh = eth_hdr(skb); 1658 eh->h_proto = htons(ETH_P_FCOE); 1659 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); 1660 if (ctlr->map_dest) 1661 memcpy(eh->h_dest + 3, fh->fh_d_id, 3); 1662 1663 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) 1664 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); 1665 else 1666 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1667 1668 hp = (struct fcoe_hdr *)(eh + 1); 1669 memset(hp, 0, sizeof(*hp)); 1670 if (FC_FCOE_VER) 1671 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 1672 hp->fcoe_sof = sof; 1673 1674 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ 1675 if (lport->seq_offload && fr_max_payload(fp)) { 1676 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; 1677 skb_shinfo(skb)->gso_size = fr_max_payload(fp); 1678 } else { 1679 skb_shinfo(skb)->gso_type = 0; 1680 skb_shinfo(skb)->gso_size = 0; 1681 } 1682 /* update tx stats: regardless if LLD fails */ 1683 stats = per_cpu_ptr(lport->stats, get_cpu()); 1684 stats->TxFrames++; 1685 stats->TxWords += wlen; 1686 put_cpu(); 1687 1688 /* send down to lld */ 1689 fr_dev(fp) = lport; 1690 fcoe_port_send(port, skb); 1691 return 0; 1692 } 1693 1694 /** 1695 * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion 1696 * @skb: The completed skb (argument required by destructor) 1697 */ 1698 static void fcoe_percpu_flush_done(struct sk_buff *skb) 1699 { 1700 complete(&fcoe_flush_completion); 1701 } 1702 1703 /** 1704 * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC 1705 * @lport: The local port the frame was received on 1706 * @fp: The received frame 1707 * 1708 * Return: 0 on passing filtering checks 1709 */ 1710 static inline int fcoe_filter_frames(struct fc_lport *lport, 1711 struct fc_frame *fp) 1712 { 1713 struct fcoe_ctlr *ctlr; 1714 struct fcoe_interface *fcoe; 1715 struct fc_frame_header *fh; 1716 struct sk_buff *skb = (struct sk_buff *)fp; 1717 struct fc_stats *stats; 1718 1719 /* 1720 * We only check CRC if no offload is available and if it is 1721 * it's solicited data, in which case, the FCP layer would 1722 * check it during the copy. 1723 */ 1724 if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) 1725 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1726 else 1727 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 1728 1729 fh = (struct fc_frame_header *) skb_transport_header(skb); 1730 fh = fc_frame_header_get(fp); 1731 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) 1732 return 0; 1733 1734 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; 1735 ctlr = fcoe_to_ctlr(fcoe); 1736 if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && 1737 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 1738 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); 1739 return -EINVAL; 1740 } 1741 1742 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) || 1743 le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { 1744 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1745 return 0; 1746 } 1747 1748 stats = per_cpu_ptr(lport->stats, get_cpu()); 1749 stats->InvalidCRCCount++; 1750 if (stats->InvalidCRCCount < 5) 1751 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); 1752 put_cpu(); 1753 return -EINVAL; 1754 } 1755 1756 /** 1757 * fcoe_recv_frame() - process a single received frame 1758 * @skb: frame to process 1759 */ 1760 static void fcoe_recv_frame(struct sk_buff *skb) 1761 { 1762 u32 fr_len; 1763 struct fc_lport *lport; 1764 struct fcoe_rcv_info *fr; 1765 struct fc_stats *stats; 1766 struct fcoe_crc_eof crc_eof; 1767 struct fc_frame *fp; 1768 struct fcoe_port *port; 1769 struct fcoe_hdr *hp; 1770 1771 fr = fcoe_dev_from_skb(skb); 1772 lport = fr->fr_dev; 1773 if (unlikely(!lport)) { 1774 if (skb->destructor != fcoe_percpu_flush_done) 1775 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); 1776 kfree_skb(skb); 1777 return; 1778 } 1779 1780 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " 1781 "head:%p data:%p tail:%p end:%p sum:%d dev:%s", 1782 skb->len, skb->data_len, 1783 skb->head, skb->data, skb_tail_pointer(skb), 1784 skb_end_pointer(skb), skb->csum, 1785 skb->dev ? skb->dev->name : "<NULL>"); 1786 1787 port = lport_priv(lport); 1788 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */ 1789 1790 /* 1791 * Frame length checks and setting up the header pointers 1792 * was done in fcoe_rcv already. 1793 */ 1794 hp = (struct fcoe_hdr *) skb_network_header(skb); 1795 1796 stats = per_cpu_ptr(lport->stats, get_cpu()); 1797 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1798 if (stats->ErrorFrames < 5) 1799 printk(KERN_WARNING "fcoe: FCoE version " 1800 "mismatch: The frame has " 1801 "version %x, but the " 1802 "initiator supports version " 1803 "%x\n", FC_FCOE_DECAPS_VER(hp), 1804 FC_FCOE_VER); 1805 goto drop; 1806 } 1807 1808 skb_pull(skb, sizeof(struct fcoe_hdr)); 1809 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 1810 1811 stats->RxFrames++; 1812 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; 1813 1814 fp = (struct fc_frame *)skb; 1815 fc_frame_init(fp); 1816 fr_dev(fp) = lport; 1817 fr_sof(fp) = hp->fcoe_sof; 1818 1819 /* Copy out the CRC and EOF trailer for access */ 1820 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) 1821 goto drop; 1822 fr_eof(fp) = crc_eof.fcoe_eof; 1823 fr_crc(fp) = crc_eof.fcoe_crc32; 1824 if (pskb_trim(skb, fr_len)) 1825 goto drop; 1826 1827 if (!fcoe_filter_frames(lport, fp)) { 1828 put_cpu(); 1829 fc_exch_recv(lport, fp); 1830 return; 1831 } 1832 drop: 1833 stats->ErrorFrames++; 1834 put_cpu(); 1835 kfree_skb(skb); 1836 } 1837 1838 /** 1839 * fcoe_percpu_receive_thread() - The per-CPU packet receive thread 1840 * @arg: The per-CPU context 1841 * 1842 * Return: 0 for success 1843 */ 1844 static int fcoe_percpu_receive_thread(void *arg) 1845 { 1846 struct fcoe_percpu_s *p = arg; 1847 struct sk_buff *skb; 1848 struct sk_buff_head tmp; 1849 1850 skb_queue_head_init(&tmp); 1851 1852 set_user_nice(current, -20); 1853 1854 retry: 1855 while (!kthread_should_stop()) { 1856 1857 spin_lock_bh(&p->fcoe_rx_list.lock); 1858 skb_queue_splice_init(&p->fcoe_rx_list, &tmp); 1859 1860 if (!skb_queue_len(&tmp)) { 1861 set_current_state(TASK_INTERRUPTIBLE); 1862 spin_unlock_bh(&p->fcoe_rx_list.lock); 1863 schedule(); 1864 set_current_state(TASK_RUNNING); 1865 goto retry; 1866 } 1867 1868 spin_unlock_bh(&p->fcoe_rx_list.lock); 1869 1870 while ((skb = __skb_dequeue(&tmp)) != NULL) 1871 fcoe_recv_frame(skb); 1872 1873 } 1874 return 0; 1875 } 1876 1877 /** 1878 * fcoe_dev_setup() - Setup the link change notification interface 1879 */ 1880 static void fcoe_dev_setup(void) 1881 { 1882 register_dcbevent_notifier(&dcb_notifier); 1883 register_netdevice_notifier(&fcoe_notifier); 1884 } 1885 1886 /** 1887 * fcoe_dev_cleanup() - Cleanup the link change notification interface 1888 */ 1889 static void fcoe_dev_cleanup(void) 1890 { 1891 unregister_dcbevent_notifier(&dcb_notifier); 1892 unregister_netdevice_notifier(&fcoe_notifier); 1893 } 1894 1895 static struct fcoe_interface * 1896 fcoe_hostlist_lookup_realdev_port(struct net_device *netdev) 1897 { 1898 struct fcoe_interface *fcoe; 1899 struct net_device *real_dev; 1900 1901 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1902 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) 1903 real_dev = vlan_dev_real_dev(fcoe->netdev); 1904 else 1905 real_dev = fcoe->netdev; 1906 1907 if (netdev == real_dev) 1908 return fcoe; 1909 } 1910 return NULL; 1911 } 1912 1913 static int fcoe_dcb_app_notification(struct notifier_block *notifier, 1914 ulong event, void *ptr) 1915 { 1916 struct dcb_app_type *entry = ptr; 1917 struct fcoe_ctlr *ctlr; 1918 struct fcoe_interface *fcoe; 1919 struct net_device *netdev; 1920 struct fcoe_port *port; 1921 int prio; 1922 1923 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) 1924 return NOTIFY_OK; 1925 1926 netdev = dev_get_by_index(&init_net, entry->ifindex); 1927 if (!netdev) 1928 return NOTIFY_OK; 1929 1930 fcoe = fcoe_hostlist_lookup_realdev_port(netdev); 1931 dev_put(netdev); 1932 if (!fcoe) 1933 return NOTIFY_OK; 1934 1935 ctlr = fcoe_to_ctlr(fcoe); 1936 1937 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) 1938 prio = ffs(entry->app.priority) - 1; 1939 else 1940 prio = entry->app.priority; 1941 1942 if (prio < 0) 1943 return NOTIFY_OK; 1944 1945 if (entry->app.protocol == ETH_P_FIP || 1946 entry->app.protocol == ETH_P_FCOE) 1947 ctlr->priority = prio; 1948 1949 if (entry->app.protocol == ETH_P_FCOE) { 1950 port = lport_priv(ctlr->lp); 1951 port->priority = prio; 1952 } 1953 1954 return NOTIFY_OK; 1955 } 1956 1957 /** 1958 * fcoe_device_notification() - Handler for net device events 1959 * @notifier: The context of the notification 1960 * @event: The type of event 1961 * @ptr: The net device that the event was on 1962 * 1963 * This function is called by the Ethernet driver in case of link change event. 1964 * 1965 * Returns: 0 for success 1966 */ 1967 static int fcoe_device_notification(struct notifier_block *notifier, 1968 ulong event, void *ptr) 1969 { 1970 struct fc_lport *lport = NULL; 1971 struct net_device *netdev = ptr; 1972 struct fcoe_ctlr *ctlr; 1973 struct fcoe_interface *fcoe; 1974 struct fcoe_port *port; 1975 struct fc_stats *stats; 1976 u32 link_possible = 1; 1977 u32 mfs; 1978 int rc = NOTIFY_OK; 1979 1980 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1981 if (fcoe->netdev == netdev) { 1982 ctlr = fcoe_to_ctlr(fcoe); 1983 lport = ctlr->lp; 1984 break; 1985 } 1986 } 1987 if (!lport) { 1988 rc = NOTIFY_DONE; 1989 goto out; 1990 } 1991 1992 switch (event) { 1993 case NETDEV_DOWN: 1994 case NETDEV_GOING_DOWN: 1995 link_possible = 0; 1996 break; 1997 case NETDEV_UP: 1998 case NETDEV_CHANGE: 1999 break; 2000 case NETDEV_CHANGEMTU: 2001 if (netdev->features & NETIF_F_FCOE_MTU) 2002 break; 2003 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 2004 sizeof(struct fcoe_crc_eof)); 2005 if (mfs >= FC_MIN_MAX_FRAME) 2006 fc_set_mfs(lport, mfs); 2007 break; 2008 case NETDEV_REGISTER: 2009 break; 2010 case NETDEV_UNREGISTER: 2011 list_del(&fcoe->list); 2012 port = lport_priv(ctlr->lp); 2013 queue_work(fcoe_wq, &port->destroy_work); 2014 goto out; 2015 break; 2016 case NETDEV_FEAT_CHANGE: 2017 fcoe_netdev_features_change(lport, netdev); 2018 break; 2019 default: 2020 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 2021 "from netdev netlink\n", event); 2022 } 2023 2024 fcoe_link_speed_update(lport); 2025 2026 if (link_possible && !fcoe_link_ok(lport)) 2027 fcoe_ctlr_link_up(ctlr); 2028 else if (fcoe_ctlr_link_down(ctlr)) { 2029 stats = per_cpu_ptr(lport->stats, get_cpu()); 2030 stats->LinkFailureCount++; 2031 put_cpu(); 2032 fcoe_clean_pending_queue(lport); 2033 } 2034 out: 2035 return rc; 2036 } 2037 2038 /** 2039 * fcoe_disable() - Disables a FCoE interface 2040 * @netdev : The net_device object the Ethernet interface to create on 2041 * 2042 * Called from fcoe transport. 2043 * 2044 * Returns: 0 for success 2045 */ 2046 static int fcoe_disable(struct net_device *netdev) 2047 { 2048 struct fcoe_ctlr *ctlr; 2049 struct fcoe_interface *fcoe; 2050 int rc = 0; 2051 2052 mutex_lock(&fcoe_config_mutex); 2053 2054 rtnl_lock(); 2055 fcoe = fcoe_hostlist_lookup_port(netdev); 2056 rtnl_unlock(); 2057 2058 if (fcoe) { 2059 ctlr = fcoe_to_ctlr(fcoe); 2060 fcoe_ctlr_link_down(ctlr); 2061 fcoe_clean_pending_queue(ctlr->lp); 2062 } else 2063 rc = -ENODEV; 2064 2065 mutex_unlock(&fcoe_config_mutex); 2066 return rc; 2067 } 2068 2069 /** 2070 * fcoe_enable() - Enables a FCoE interface 2071 * @netdev : The net_device object the Ethernet interface to create on 2072 * 2073 * Called from fcoe transport. 2074 * 2075 * Returns: 0 for success 2076 */ 2077 static int fcoe_enable(struct net_device *netdev) 2078 { 2079 struct fcoe_ctlr *ctlr; 2080 struct fcoe_interface *fcoe; 2081 int rc = 0; 2082 2083 mutex_lock(&fcoe_config_mutex); 2084 rtnl_lock(); 2085 fcoe = fcoe_hostlist_lookup_port(netdev); 2086 rtnl_unlock(); 2087 2088 if (!fcoe) { 2089 rc = -ENODEV; 2090 goto out; 2091 } 2092 2093 ctlr = fcoe_to_ctlr(fcoe); 2094 2095 if (!fcoe_link_ok(ctlr->lp)) 2096 fcoe_ctlr_link_up(ctlr); 2097 2098 out: 2099 mutex_unlock(&fcoe_config_mutex); 2100 return rc; 2101 } 2102 2103 /** 2104 * fcoe_destroy() - Destroy a FCoE interface 2105 * @netdev : The net_device object the Ethernet interface to create on 2106 * 2107 * Called from fcoe transport 2108 * 2109 * Returns: 0 for success 2110 */ 2111 static int fcoe_destroy(struct net_device *netdev) 2112 { 2113 struct fcoe_ctlr *ctlr; 2114 struct fcoe_interface *fcoe; 2115 struct fc_lport *lport; 2116 struct fcoe_port *port; 2117 int rc = 0; 2118 2119 mutex_lock(&fcoe_config_mutex); 2120 rtnl_lock(); 2121 fcoe = fcoe_hostlist_lookup_port(netdev); 2122 if (!fcoe) { 2123 rc = -ENODEV; 2124 goto out_nodev; 2125 } 2126 ctlr = fcoe_to_ctlr(fcoe); 2127 lport = ctlr->lp; 2128 port = lport_priv(lport); 2129 list_del(&fcoe->list); 2130 queue_work(fcoe_wq, &port->destroy_work); 2131 out_nodev: 2132 rtnl_unlock(); 2133 mutex_unlock(&fcoe_config_mutex); 2134 return rc; 2135 } 2136 2137 /** 2138 * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context 2139 * @work: Handle to the FCoE port to be destroyed 2140 */ 2141 static void fcoe_destroy_work(struct work_struct *work) 2142 { 2143 struct fcoe_port *port; 2144 struct fcoe_interface *fcoe; 2145 2146 port = container_of(work, struct fcoe_port, destroy_work); 2147 mutex_lock(&fcoe_config_mutex); 2148 2149 fcoe = port->priv; 2150 fcoe_if_destroy(port->lport); 2151 fcoe_interface_cleanup(fcoe); 2152 2153 mutex_unlock(&fcoe_config_mutex); 2154 } 2155 2156 /** 2157 * fcoe_match() - Check if the FCoE is supported on the given netdevice 2158 * @netdev : The net_device object the Ethernet interface to create on 2159 * 2160 * Called from fcoe transport. 2161 * 2162 * Returns: always returns true as this is the default FCoE transport, 2163 * i.e., support all netdevs. 2164 */ 2165 static bool fcoe_match(struct net_device *netdev) 2166 { 2167 return true; 2168 } 2169 2170 /** 2171 * fcoe_dcb_create() - Initialize DCB attributes and hooks 2172 * @netdev: The net_device object of the L2 link that should be queried 2173 * @port: The fcoe_port to bind FCoE APP priority with 2174 * @ 2175 */ 2176 static void fcoe_dcb_create(struct fcoe_interface *fcoe) 2177 { 2178 #ifdef CONFIG_DCB 2179 int dcbx; 2180 u8 fup, up; 2181 struct net_device *netdev = fcoe->realdev; 2182 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 2183 struct fcoe_port *port = lport_priv(ctlr->lp); 2184 struct dcb_app app = { 2185 .priority = 0, 2186 .protocol = ETH_P_FCOE 2187 }; 2188 2189 /* setup DCB priority attributes. */ 2190 if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) { 2191 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 2192 2193 if (dcbx & DCB_CAP_DCBX_VER_IEEE) { 2194 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; 2195 up = dcb_ieee_getapp_mask(netdev, &app); 2196 app.protocol = ETH_P_FIP; 2197 fup = dcb_ieee_getapp_mask(netdev, &app); 2198 } else { 2199 app.selector = DCB_APP_IDTYPE_ETHTYPE; 2200 up = dcb_getapp(netdev, &app); 2201 app.protocol = ETH_P_FIP; 2202 fup = dcb_getapp(netdev, &app); 2203 } 2204 2205 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2206 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2207 } 2208 #endif 2209 } 2210 2211 /** 2212 * fcoe_create() - Create a fcoe interface 2213 * @netdev : The net_device object the Ethernet interface to create on 2214 * @fip_mode: The FIP mode for this creation 2215 * 2216 * Called from fcoe transport 2217 * 2218 * Returns: 0 for success 2219 */ 2220 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 2221 { 2222 int rc = 0; 2223 struct fcoe_ctlr_device *ctlr_dev; 2224 struct fcoe_ctlr *ctlr; 2225 struct fcoe_interface *fcoe; 2226 struct fc_lport *lport; 2227 2228 mutex_lock(&fcoe_config_mutex); 2229 rtnl_lock(); 2230 2231 /* look for existing lport */ 2232 if (fcoe_hostlist_lookup(netdev)) { 2233 rc = -EEXIST; 2234 goto out_nodev; 2235 } 2236 2237 fcoe = fcoe_interface_create(netdev, fip_mode); 2238 if (IS_ERR(fcoe)) { 2239 rc = PTR_ERR(fcoe); 2240 goto out_nodev; 2241 } 2242 2243 ctlr = fcoe_to_ctlr(fcoe); 2244 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); 2245 lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0); 2246 if (IS_ERR(lport)) { 2247 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2248 netdev->name); 2249 rc = -EIO; 2250 rtnl_unlock(); 2251 fcoe_interface_cleanup(fcoe); 2252 goto out_nortnl; 2253 } 2254 2255 /* Make this the "master" N_Port */ 2256 ctlr->lp = lport; 2257 2258 /* setup DCB priority attributes. */ 2259 fcoe_dcb_create(fcoe); 2260 2261 /* add to lports list */ 2262 fcoe_hostlist_add(lport); 2263 2264 /* start FIP Discovery and FLOGI */ 2265 lport->boot_time = jiffies; 2266 fc_fabric_login(lport); 2267 if (!fcoe_link_ok(lport)) { 2268 rtnl_unlock(); 2269 fcoe_ctlr_link_up(ctlr); 2270 mutex_unlock(&fcoe_config_mutex); 2271 return rc; 2272 } 2273 2274 out_nodev: 2275 rtnl_unlock(); 2276 out_nortnl: 2277 mutex_unlock(&fcoe_config_mutex); 2278 return rc; 2279 } 2280 2281 /** 2282 * fcoe_link_speed_update() - Update the supported and actual link speeds 2283 * @lport: The local port to update speeds for 2284 * 2285 * Returns: 0 if the ethtool query was successful 2286 * -1 if the ethtool query failed 2287 */ 2288 static int fcoe_link_speed_update(struct fc_lport *lport) 2289 { 2290 struct net_device *netdev = fcoe_netdev(lport); 2291 struct ethtool_cmd ecmd; 2292 2293 if (!__ethtool_get_settings(netdev, &ecmd)) { 2294 lport->link_supported_speeds &= 2295 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 2296 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 2297 SUPPORTED_1000baseT_Full)) 2298 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; 2299 if (ecmd.supported & SUPPORTED_10000baseT_Full) 2300 lport->link_supported_speeds |= 2301 FC_PORTSPEED_10GBIT; 2302 switch (ethtool_cmd_speed(&ecmd)) { 2303 case SPEED_1000: 2304 lport->link_speed = FC_PORTSPEED_1GBIT; 2305 break; 2306 case SPEED_10000: 2307 lport->link_speed = FC_PORTSPEED_10GBIT; 2308 break; 2309 } 2310 return 0; 2311 } 2312 return -1; 2313 } 2314 2315 /** 2316 * fcoe_link_ok() - Check if the link is OK for a local port 2317 * @lport: The local port to check link on 2318 * 2319 * Returns: 0 if link is UP and OK, -1 if not 2320 * 2321 */ 2322 static int fcoe_link_ok(struct fc_lport *lport) 2323 { 2324 struct net_device *netdev = fcoe_netdev(lport); 2325 2326 if (netif_oper_up(netdev)) 2327 return 0; 2328 return -1; 2329 } 2330 2331 /** 2332 * fcoe_percpu_clean() - Clear all pending skbs for an local port 2333 * @lport: The local port whose skbs are to be cleared 2334 * 2335 * Must be called with fcoe_create_mutex held to single-thread completion. 2336 * 2337 * This flushes the pending skbs by adding a new skb to each queue and 2338 * waiting until they are all freed. This assures us that not only are 2339 * there no packets that will be handled by the lport, but also that any 2340 * threads already handling packet have returned. 2341 */ 2342 static void fcoe_percpu_clean(struct fc_lport *lport) 2343 { 2344 struct fcoe_percpu_s *pp; 2345 struct sk_buff *skb; 2346 unsigned int cpu; 2347 2348 for_each_possible_cpu(cpu) { 2349 pp = &per_cpu(fcoe_percpu, cpu); 2350 2351 if (!pp->thread || !cpu_online(cpu)) 2352 continue; 2353 2354 skb = dev_alloc_skb(0); 2355 if (!skb) 2356 continue; 2357 2358 skb->destructor = fcoe_percpu_flush_done; 2359 2360 spin_lock_bh(&pp->fcoe_rx_list.lock); 2361 __skb_queue_tail(&pp->fcoe_rx_list, skb); 2362 if (pp->fcoe_rx_list.qlen == 1) 2363 wake_up_process(pp->thread); 2364 spin_unlock_bh(&pp->fcoe_rx_list.lock); 2365 2366 wait_for_completion(&fcoe_flush_completion); 2367 } 2368 } 2369 2370 /** 2371 * fcoe_reset() - Reset a local port 2372 * @shost: The SCSI host associated with the local port to be reset 2373 * 2374 * Returns: Always 0 (return value required by FC transport template) 2375 */ 2376 static int fcoe_reset(struct Scsi_Host *shost) 2377 { 2378 struct fc_lport *lport = shost_priv(shost); 2379 struct fcoe_port *port = lport_priv(lport); 2380 struct fcoe_interface *fcoe = port->priv; 2381 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 2382 2383 fcoe_ctlr_link_down(ctlr); 2384 fcoe_clean_pending_queue(ctlr->lp); 2385 if (!fcoe_link_ok(ctlr->lp)) 2386 fcoe_ctlr_link_up(ctlr); 2387 return 0; 2388 } 2389 2390 /** 2391 * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device 2392 * @netdev: The net device used as a key 2393 * 2394 * Locking: Must be called with the RNL mutex held. 2395 * 2396 * Returns: NULL or the FCoE interface 2397 */ 2398 static struct fcoe_interface * 2399 fcoe_hostlist_lookup_port(const struct net_device *netdev) 2400 { 2401 struct fcoe_interface *fcoe; 2402 2403 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 2404 if (fcoe->netdev == netdev) 2405 return fcoe; 2406 } 2407 return NULL; 2408 } 2409 2410 /** 2411 * fcoe_hostlist_lookup() - Find the local port associated with a 2412 * given net device 2413 * @netdev: The netdevice used as a key 2414 * 2415 * Locking: Must be called with the RTNL mutex held 2416 * 2417 * Returns: NULL or the local port 2418 */ 2419 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2420 { 2421 struct fcoe_ctlr *ctlr; 2422 struct fcoe_interface *fcoe; 2423 2424 fcoe = fcoe_hostlist_lookup_port(netdev); 2425 ctlr = fcoe_to_ctlr(fcoe); 2426 return (fcoe) ? ctlr->lp : NULL; 2427 } 2428 2429 /** 2430 * fcoe_hostlist_add() - Add the FCoE interface identified by a local 2431 * port to the hostlist 2432 * @lport: The local port that identifies the FCoE interface to be added 2433 * 2434 * Locking: must be called with the RTNL mutex held 2435 * 2436 * Returns: 0 for success 2437 */ 2438 static int fcoe_hostlist_add(const struct fc_lport *lport) 2439 { 2440 struct fcoe_interface *fcoe; 2441 struct fcoe_port *port; 2442 2443 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); 2444 if (!fcoe) { 2445 port = lport_priv(lport); 2446 fcoe = port->priv; 2447 list_add_tail(&fcoe->list, &fcoe_hostlist); 2448 } 2449 return 0; 2450 } 2451 2452 2453 static struct fcoe_transport fcoe_sw_transport = { 2454 .name = {FCOE_TRANSPORT_DEFAULT}, 2455 .attached = false, 2456 .list = LIST_HEAD_INIT(fcoe_sw_transport.list), 2457 .match = fcoe_match, 2458 .create = fcoe_create, 2459 .destroy = fcoe_destroy, 2460 .enable = fcoe_enable, 2461 .disable = fcoe_disable, 2462 }; 2463 2464 /** 2465 * fcoe_init() - Initialize fcoe.ko 2466 * 2467 * Returns: 0 on success, or a negative value on failure 2468 */ 2469 static int __init fcoe_init(void) 2470 { 2471 struct fcoe_percpu_s *p; 2472 unsigned int cpu; 2473 int rc = 0; 2474 2475 fcoe_wq = alloc_workqueue("fcoe", 0, 0); 2476 if (!fcoe_wq) 2477 return -ENOMEM; 2478 2479 /* register as a fcoe transport */ 2480 rc = fcoe_transport_attach(&fcoe_sw_transport); 2481 if (rc) { 2482 printk(KERN_ERR "failed to register an fcoe transport, check " 2483 "if libfcoe is loaded\n"); 2484 return rc; 2485 } 2486 2487 mutex_lock(&fcoe_config_mutex); 2488 2489 for_each_possible_cpu(cpu) { 2490 p = &per_cpu(fcoe_percpu, cpu); 2491 skb_queue_head_init(&p->fcoe_rx_list); 2492 } 2493 2494 for_each_online_cpu(cpu) 2495 fcoe_percpu_thread_create(cpu); 2496 2497 /* Initialize per CPU interrupt thread */ 2498 rc = register_hotcpu_notifier(&fcoe_cpu_notifier); 2499 if (rc) 2500 goto out_free; 2501 2502 /* Setup link change notification */ 2503 fcoe_dev_setup(); 2504 2505 rc = fcoe_if_init(); 2506 if (rc) 2507 goto out_free; 2508 2509 mutex_unlock(&fcoe_config_mutex); 2510 return 0; 2511 2512 out_free: 2513 for_each_online_cpu(cpu) { 2514 fcoe_percpu_thread_destroy(cpu); 2515 } 2516 mutex_unlock(&fcoe_config_mutex); 2517 destroy_workqueue(fcoe_wq); 2518 return rc; 2519 } 2520 module_init(fcoe_init); 2521 2522 /** 2523 * fcoe_exit() - Clean up fcoe.ko 2524 * 2525 * Returns: 0 on success or a negative value on failure 2526 */ 2527 static void __exit fcoe_exit(void) 2528 { 2529 struct fcoe_interface *fcoe, *tmp; 2530 struct fcoe_ctlr *ctlr; 2531 struct fcoe_port *port; 2532 unsigned int cpu; 2533 2534 mutex_lock(&fcoe_config_mutex); 2535 2536 fcoe_dev_cleanup(); 2537 2538 /* releases the associated fcoe hosts */ 2539 rtnl_lock(); 2540 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2541 list_del(&fcoe->list); 2542 ctlr = fcoe_to_ctlr(fcoe); 2543 port = lport_priv(ctlr->lp); 2544 queue_work(fcoe_wq, &port->destroy_work); 2545 } 2546 rtnl_unlock(); 2547 2548 unregister_hotcpu_notifier(&fcoe_cpu_notifier); 2549 2550 for_each_online_cpu(cpu) 2551 fcoe_percpu_thread_destroy(cpu); 2552 2553 mutex_unlock(&fcoe_config_mutex); 2554 2555 /* 2556 * destroy_work's may be chained but destroy_workqueue() 2557 * can take care of them. Just kill the fcoe_wq. 2558 */ 2559 destroy_workqueue(fcoe_wq); 2560 2561 /* 2562 * Detaching from the scsi transport must happen after all 2563 * destroys are done on the fcoe_wq. destroy_workqueue will 2564 * enusre the fcoe_wq is flushed. 2565 */ 2566 fcoe_if_exit(); 2567 2568 /* detach from fcoe transport */ 2569 fcoe_transport_detach(&fcoe_sw_transport); 2570 } 2571 module_exit(fcoe_exit); 2572 2573 /** 2574 * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler 2575 * @seq: active sequence in the FLOGI or FDISC exchange 2576 * @fp: response frame, or error encoded in a pointer (timeout) 2577 * @arg: pointer the the fcoe_ctlr structure 2578 * 2579 * This handles MAC address management for FCoE, then passes control on to 2580 * the libfc FLOGI response handler. 2581 */ 2582 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2583 { 2584 struct fcoe_ctlr *fip = arg; 2585 struct fc_exch *exch = fc_seq_exch(seq); 2586 struct fc_lport *lport = exch->lp; 2587 u8 *mac; 2588 2589 if (IS_ERR(fp)) 2590 goto done; 2591 2592 mac = fr_cb(fp)->granted_mac; 2593 /* pre-FIP */ 2594 if (is_zero_ether_addr(mac)) 2595 fcoe_ctlr_recv_flogi(fip, lport, fp); 2596 if (!is_zero_ether_addr(mac)) 2597 fcoe_update_src_mac(lport, mac); 2598 done: 2599 fc_lport_flogi_resp(seq, fp, lport); 2600 } 2601 2602 /** 2603 * fcoe_logo_resp() - FCoE specific LOGO response handler 2604 * @seq: active sequence in the LOGO exchange 2605 * @fp: response frame, or error encoded in a pointer (timeout) 2606 * @arg: pointer the the fcoe_ctlr structure 2607 * 2608 * This handles MAC address management for FCoE, then passes control on to 2609 * the libfc LOGO response handler. 2610 */ 2611 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 2612 { 2613 struct fc_lport *lport = arg; 2614 static u8 zero_mac[ETH_ALEN] = { 0 }; 2615 2616 if (!IS_ERR(fp)) 2617 fcoe_update_src_mac(lport, zero_mac); 2618 fc_lport_logo_resp(seq, fp, lport); 2619 } 2620 2621 /** 2622 * fcoe_elsct_send - FCoE specific ELS handler 2623 * 2624 * This does special case handling of FIP encapsualted ELS exchanges for FCoE, 2625 * using FCoE specific response handlers and passing the FIP controller as 2626 * the argument (the lport is still available from the exchange). 2627 * 2628 * Most of the work here is just handed off to the libfc routine. 2629 */ 2630 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, 2631 struct fc_frame *fp, unsigned int op, 2632 void (*resp)(struct fc_seq *, 2633 struct fc_frame *, 2634 void *), 2635 void *arg, u32 timeout) 2636 { 2637 struct fcoe_port *port = lport_priv(lport); 2638 struct fcoe_interface *fcoe = port->priv; 2639 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 2640 struct fc_frame_header *fh = fc_frame_header_get(fp); 2641 2642 switch (op) { 2643 case ELS_FLOGI: 2644 case ELS_FDISC: 2645 if (lport->point_to_multipoint) 2646 break; 2647 return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, 2648 fip, timeout); 2649 case ELS_LOGO: 2650 /* only hook onto fabric logouts, not port logouts */ 2651 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) 2652 break; 2653 return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp, 2654 lport, timeout); 2655 } 2656 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); 2657 } 2658 2659 /** 2660 * fcoe_vport_create() - create an fc_host/scsi_host for a vport 2661 * @vport: fc_vport object to create a new fc_host for 2662 * @disabled: start the new fc_host in a disabled state by default? 2663 * 2664 * Returns: 0 for success 2665 */ 2666 static int fcoe_vport_create(struct fc_vport *vport, bool disabled) 2667 { 2668 struct Scsi_Host *shost = vport_to_shost(vport); 2669 struct fc_lport *n_port = shost_priv(shost); 2670 struct fcoe_port *port = lport_priv(n_port); 2671 struct fcoe_interface *fcoe = port->priv; 2672 struct net_device *netdev = fcoe->netdev; 2673 struct fc_lport *vn_port; 2674 int rc; 2675 char buf[32]; 2676 2677 rc = fcoe_validate_vport_create(vport); 2678 if (rc) { 2679 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); 2680 printk(KERN_ERR "fcoe: Failed to create vport, " 2681 "WWPN (0x%s) already exists\n", 2682 buf); 2683 return rc; 2684 } 2685 2686 mutex_lock(&fcoe_config_mutex); 2687 rtnl_lock(); 2688 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2689 rtnl_unlock(); 2690 mutex_unlock(&fcoe_config_mutex); 2691 2692 if (IS_ERR(vn_port)) { 2693 printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n", 2694 netdev->name); 2695 return -EIO; 2696 } 2697 2698 if (disabled) { 2699 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2700 } else { 2701 vn_port->boot_time = jiffies; 2702 fc_fabric_login(vn_port); 2703 fc_vport_setlink(vn_port); 2704 } 2705 return 0; 2706 } 2707 2708 /** 2709 * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport 2710 * @vport: fc_vport object that is being destroyed 2711 * 2712 * Returns: 0 for success 2713 */ 2714 static int fcoe_vport_destroy(struct fc_vport *vport) 2715 { 2716 struct Scsi_Host *shost = vport_to_shost(vport); 2717 struct fc_lport *n_port = shost_priv(shost); 2718 struct fc_lport *vn_port = vport->dd_data; 2719 2720 mutex_lock(&n_port->lp_mutex); 2721 list_del(&vn_port->list); 2722 mutex_unlock(&n_port->lp_mutex); 2723 2724 mutex_lock(&fcoe_config_mutex); 2725 fcoe_if_destroy(vn_port); 2726 mutex_unlock(&fcoe_config_mutex); 2727 2728 return 0; 2729 } 2730 2731 /** 2732 * fcoe_vport_disable() - change vport state 2733 * @vport: vport to bring online/offline 2734 * @disable: should the vport be disabled? 2735 */ 2736 static int fcoe_vport_disable(struct fc_vport *vport, bool disable) 2737 { 2738 struct fc_lport *lport = vport->dd_data; 2739 2740 if (disable) { 2741 fc_vport_set_state(vport, FC_VPORT_DISABLED); 2742 fc_fabric_logoff(lport); 2743 } else { 2744 lport->boot_time = jiffies; 2745 fc_fabric_login(lport); 2746 fc_vport_setlink(lport); 2747 } 2748 2749 return 0; 2750 } 2751 2752 /** 2753 * fcoe_vport_set_symbolic_name() - append vport string to symbolic name 2754 * @vport: fc_vport with a new symbolic name string 2755 * 2756 * After generating a new symbolic name string, a new RSPN_ID request is 2757 * sent to the name server. There is no response handler, so if it fails 2758 * for some reason it will not be retried. 2759 */ 2760 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) 2761 { 2762 struct fc_lport *lport = vport->dd_data; 2763 struct fc_frame *fp; 2764 size_t len; 2765 2766 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 2767 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION, 2768 fcoe_netdev(lport)->name, vport->symbolic_name); 2769 2770 if (lport->state != LPORT_ST_READY) 2771 return; 2772 2773 len = strnlen(fc_host_symbolic_name(lport->host), 255); 2774 fp = fc_frame_alloc(lport, 2775 sizeof(struct fc_ct_hdr) + 2776 sizeof(struct fc_ns_rspn) + len); 2777 if (!fp) 2778 return; 2779 lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID, 2780 NULL, NULL, 3 * lport->r_a_tov); 2781 } 2782 2783 /** 2784 * fcoe_get_lesb() - Fill the FCoE Link Error Status Block 2785 * @lport: the local port 2786 * @fc_lesb: the link error status block 2787 */ 2788 static void fcoe_get_lesb(struct fc_lport *lport, 2789 struct fc_els_lesb *fc_lesb) 2790 { 2791 struct net_device *netdev = fcoe_netdev(lport); 2792 2793 __fcoe_get_lesb(lport, fc_lesb, netdev); 2794 } 2795 2796 static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) 2797 { 2798 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); 2799 struct net_device *netdev = fcoe_netdev(fip->lp); 2800 struct fcoe_fc_els_lesb *fcoe_lesb; 2801 struct fc_els_lesb fc_lesb; 2802 2803 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); 2804 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); 2805 2806 ctlr_dev->lesb.lesb_link_fail = 2807 ntohl(fcoe_lesb->lesb_link_fail); 2808 ctlr_dev->lesb.lesb_vlink_fail = 2809 ntohl(fcoe_lesb->lesb_vlink_fail); 2810 ctlr_dev->lesb.lesb_miss_fka = 2811 ntohl(fcoe_lesb->lesb_miss_fka); 2812 ctlr_dev->lesb.lesb_symb_err = 2813 ntohl(fcoe_lesb->lesb_symb_err); 2814 ctlr_dev->lesb.lesb_err_block = 2815 ntohl(fcoe_lesb->lesb_err_block); 2816 ctlr_dev->lesb.lesb_fcs_error = 2817 ntohl(fcoe_lesb->lesb_fcs_error); 2818 } 2819 2820 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) 2821 { 2822 struct fcoe_ctlr_device *ctlr_dev = 2823 fcoe_fcf_dev_to_ctlr_dev(fcf_dev); 2824 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 2825 struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); 2826 2827 fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev); 2828 } 2829 2830 /** 2831 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2832 * @lport: the local port 2833 * @port_id: the port ID 2834 * @fp: the received frame, if any, that caused the port_id to be set. 2835 * 2836 * This routine handles the case where we received a FLOGI and are 2837 * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi() 2838 * so it can set the non-mapped mode and gateway address. 2839 * 2840 * The FLOGI LS_ACC is handled by fcoe_flogi_resp(). 2841 */ 2842 static void fcoe_set_port_id(struct fc_lport *lport, 2843 u32 port_id, struct fc_frame *fp) 2844 { 2845 struct fcoe_port *port = lport_priv(lport); 2846 struct fcoe_interface *fcoe = port->priv; 2847 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 2848 2849 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2850 fcoe_ctlr_recv_flogi(ctlr, lport, fp); 2851 } 2852