xref: /openbmc/linux/drivers/scsi/fcoe/fcoe.c (revision 81d67439)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/fs.h>
32 #include <linux/sysfs.h>
33 #include <linux/ctype.h>
34 #include <linux/workqueue.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsicam.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_transport_fc.h>
39 #include <net/rtnetlink.h>
40 
41 #include <scsi/fc/fc_encaps.h>
42 #include <scsi/fc/fc_fip.h>
43 
44 #include <scsi/libfc.h>
45 #include <scsi/fc_frame.h>
46 #include <scsi/libfcoe.h>
47 
48 #include "fcoe.h"
49 
50 MODULE_AUTHOR("Open-FCoE.org");
51 MODULE_DESCRIPTION("FCoE");
52 MODULE_LICENSE("GPL v2");
53 
54 /* Performance tuning parameters for fcoe */
55 static unsigned int fcoe_ddp_min;
56 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
57 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
58 		 "Direct Data Placement (DDP).");
59 
60 DEFINE_MUTEX(fcoe_config_mutex);
61 
62 static struct workqueue_struct *fcoe_wq;
63 
64 /* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
65 static DECLARE_COMPLETION(fcoe_flush_completion);
66 
67 /* fcoe host list */
68 /* must only by accessed under the RTNL mutex */
69 LIST_HEAD(fcoe_hostlist);
70 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
71 
72 /* Function Prototypes */
73 static int fcoe_reset(struct Scsi_Host *);
74 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
75 static int fcoe_rcv(struct sk_buff *, struct net_device *,
76 		    struct packet_type *, struct net_device *);
77 static int fcoe_percpu_receive_thread(void *);
78 static void fcoe_percpu_clean(struct fc_lport *);
79 static int fcoe_link_speed_update(struct fc_lport *);
80 static int fcoe_link_ok(struct fc_lport *);
81 
82 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
83 static int fcoe_hostlist_add(const struct fc_lport *);
84 
85 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
86 static void fcoe_dev_setup(void);
87 static void fcoe_dev_cleanup(void);
88 static struct fcoe_interface
89 *fcoe_hostlist_lookup_port(const struct net_device *);
90 
91 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
92 			 struct packet_type *, struct net_device *);
93 
94 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
95 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
96 static u8 *fcoe_get_src_mac(struct fc_lport *);
97 static void fcoe_destroy_work(struct work_struct *);
98 
99 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
100 			  unsigned int);
101 static int fcoe_ddp_done(struct fc_lport *, u16);
102 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
103 			   unsigned int);
104 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
105 
106 static bool fcoe_match(struct net_device *netdev);
107 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
108 static int fcoe_destroy(struct net_device *netdev);
109 static int fcoe_enable(struct net_device *netdev);
110 static int fcoe_disable(struct net_device *netdev);
111 
112 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
113 				      u32 did, struct fc_frame *,
114 				      unsigned int op,
115 				      void (*resp)(struct fc_seq *,
116 						   struct fc_frame *,
117 						   void *),
118 				      void *, u32 timeout);
119 static void fcoe_recv_frame(struct sk_buff *skb);
120 
121 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
122 
123 /* notification function for packets from net device */
124 static struct notifier_block fcoe_notifier = {
125 	.notifier_call = fcoe_device_notification,
126 };
127 
128 /* notification function for CPU hotplug events */
129 static struct notifier_block fcoe_cpu_notifier = {
130 	.notifier_call = fcoe_cpu_callback,
131 };
132 
133 static struct scsi_transport_template *fcoe_nport_scsi_transport;
134 static struct scsi_transport_template *fcoe_vport_scsi_transport;
135 
136 static int fcoe_vport_destroy(struct fc_vport *);
137 static int fcoe_vport_create(struct fc_vport *, bool disabled);
138 static int fcoe_vport_disable(struct fc_vport *, bool disable);
139 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
140 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
141 static int fcoe_validate_vport_create(struct fc_vport *);
142 
143 static struct libfc_function_template fcoe_libfc_fcn_templ = {
144 	.frame_send = fcoe_xmit,
145 	.ddp_setup = fcoe_ddp_setup,
146 	.ddp_done = fcoe_ddp_done,
147 	.ddp_target = fcoe_ddp_target,
148 	.elsct_send = fcoe_elsct_send,
149 	.get_lesb = fcoe_get_lesb,
150 	.lport_set_port_id = fcoe_set_port_id,
151 };
152 
153 struct fc_function_template fcoe_nport_fc_functions = {
154 	.show_host_node_name = 1,
155 	.show_host_port_name = 1,
156 	.show_host_supported_classes = 1,
157 	.show_host_supported_fc4s = 1,
158 	.show_host_active_fc4s = 1,
159 	.show_host_maxframe_size = 1,
160 
161 	.show_host_port_id = 1,
162 	.show_host_supported_speeds = 1,
163 	.get_host_speed = fc_get_host_speed,
164 	.show_host_speed = 1,
165 	.show_host_port_type = 1,
166 	.get_host_port_state = fc_get_host_port_state,
167 	.show_host_port_state = 1,
168 	.show_host_symbolic_name = 1,
169 
170 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
171 	.show_rport_maxframe_size = 1,
172 	.show_rport_supported_classes = 1,
173 
174 	.show_host_fabric_name = 1,
175 	.show_starget_node_name = 1,
176 	.show_starget_port_name = 1,
177 	.show_starget_port_id = 1,
178 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
179 	.show_rport_dev_loss_tmo = 1,
180 	.get_fc_host_stats = fc_get_host_stats,
181 	.issue_fc_host_lip = fcoe_reset,
182 
183 	.terminate_rport_io = fc_rport_terminate_io,
184 
185 	.vport_create = fcoe_vport_create,
186 	.vport_delete = fcoe_vport_destroy,
187 	.vport_disable = fcoe_vport_disable,
188 	.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
189 
190 	.bsg_request = fc_lport_bsg_request,
191 };
192 
193 struct fc_function_template fcoe_vport_fc_functions = {
194 	.show_host_node_name = 1,
195 	.show_host_port_name = 1,
196 	.show_host_supported_classes = 1,
197 	.show_host_supported_fc4s = 1,
198 	.show_host_active_fc4s = 1,
199 	.show_host_maxframe_size = 1,
200 
201 	.show_host_port_id = 1,
202 	.show_host_supported_speeds = 1,
203 	.get_host_speed = fc_get_host_speed,
204 	.show_host_speed = 1,
205 	.show_host_port_type = 1,
206 	.get_host_port_state = fc_get_host_port_state,
207 	.show_host_port_state = 1,
208 	.show_host_symbolic_name = 1,
209 
210 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
211 	.show_rport_maxframe_size = 1,
212 	.show_rport_supported_classes = 1,
213 
214 	.show_host_fabric_name = 1,
215 	.show_starget_node_name = 1,
216 	.show_starget_port_name = 1,
217 	.show_starget_port_id = 1,
218 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
219 	.show_rport_dev_loss_tmo = 1,
220 	.get_fc_host_stats = fc_get_host_stats,
221 	.issue_fc_host_lip = fcoe_reset,
222 
223 	.terminate_rport_io = fc_rport_terminate_io,
224 
225 	.bsg_request = fc_lport_bsg_request,
226 };
227 
228 static struct scsi_host_template fcoe_shost_template = {
229 	.module = THIS_MODULE,
230 	.name = "FCoE Driver",
231 	.proc_name = FCOE_NAME,
232 	.queuecommand = fc_queuecommand,
233 	.eh_abort_handler = fc_eh_abort,
234 	.eh_device_reset_handler = fc_eh_device_reset,
235 	.eh_host_reset_handler = fc_eh_host_reset,
236 	.slave_alloc = fc_slave_alloc,
237 	.change_queue_depth = fc_change_queue_depth,
238 	.change_queue_type = fc_change_queue_type,
239 	.this_id = -1,
240 	.cmd_per_lun = 3,
241 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
242 	.use_clustering = ENABLE_CLUSTERING,
243 	.sg_tablesize = SG_ALL,
244 	.max_sectors = 0xffff,
245 };
246 
247 /**
248  * fcoe_interface_setup() - Setup a FCoE interface
249  * @fcoe:   The new FCoE interface
250  * @netdev: The net device that the fcoe interface is on
251  *
252  * Returns : 0 for success
253  * Locking: must be called with the RTNL mutex held
254  */
255 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
256 				struct net_device *netdev)
257 {
258 	struct fcoe_ctlr *fip = &fcoe->ctlr;
259 	struct netdev_hw_addr *ha;
260 	struct net_device *real_dev;
261 	u8 flogi_maddr[ETH_ALEN];
262 	const struct net_device_ops *ops;
263 
264 	fcoe->netdev = netdev;
265 
266 	/* Let LLD initialize for FCoE */
267 	ops = netdev->netdev_ops;
268 	if (ops->ndo_fcoe_enable) {
269 		if (ops->ndo_fcoe_enable(netdev))
270 			FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
271 					" specific feature for LLD.\n");
272 	}
273 
274 	/* Do not support for bonding device */
275 	if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
276 		FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
277 		return -EOPNOTSUPP;
278 	}
279 
280 	/* look for SAN MAC address, if multiple SAN MACs exist, only
281 	 * use the first one for SPMA */
282 	real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
283 		vlan_dev_real_dev(netdev) : netdev;
284 	rcu_read_lock();
285 	for_each_dev_addr(real_dev, ha) {
286 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
287 		    (is_valid_ether_addr(ha->addr))) {
288 			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
289 			fip->spma = 1;
290 			break;
291 		}
292 	}
293 	rcu_read_unlock();
294 
295 	/* setup Source Mac Address */
296 	if (!fip->spma)
297 		memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
298 
299 	/*
300 	 * Add FCoE MAC address as second unicast MAC address
301 	 * or enter promiscuous mode if not capable of listening
302 	 * for multiple unicast MACs.
303 	 */
304 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
305 	dev_uc_add(netdev, flogi_maddr);
306 	if (fip->spma)
307 		dev_uc_add(netdev, fip->ctl_src_addr);
308 	if (fip->mode == FIP_MODE_VN2VN) {
309 		dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
310 		dev_mc_add(netdev, FIP_ALL_P2P_MACS);
311 	} else
312 		dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
313 
314 	/*
315 	 * setup the receive function from ethernet driver
316 	 * on the ethertype for the given device
317 	 */
318 	fcoe->fcoe_packet_type.func = fcoe_rcv;
319 	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
320 	fcoe->fcoe_packet_type.dev = netdev;
321 	dev_add_pack(&fcoe->fcoe_packet_type);
322 
323 	fcoe->fip_packet_type.func = fcoe_fip_recv;
324 	fcoe->fip_packet_type.type = htons(ETH_P_FIP);
325 	fcoe->fip_packet_type.dev = netdev;
326 	dev_add_pack(&fcoe->fip_packet_type);
327 
328 	return 0;
329 }
330 
331 /**
332  * fcoe_interface_create() - Create a FCoE interface on a net device
333  * @netdev: The net device to create the FCoE interface on
334  * @fip_mode: The mode to use for FIP
335  *
336  * Returns: pointer to a struct fcoe_interface or NULL on error
337  */
338 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
339 						    enum fip_state fip_mode)
340 {
341 	struct fcoe_interface *fcoe;
342 	int err;
343 
344 	if (!try_module_get(THIS_MODULE)) {
345 		FCOE_NETDEV_DBG(netdev,
346 				"Could not get a reference to the module\n");
347 		fcoe = ERR_PTR(-EBUSY);
348 		goto out;
349 	}
350 
351 	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
352 	if (!fcoe) {
353 		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
354 		fcoe = ERR_PTR(-ENOMEM);
355 		goto out_nomod;
356 	}
357 
358 	dev_hold(netdev);
359 	kref_init(&fcoe->kref);
360 
361 	/*
362 	 * Initialize FIP.
363 	 */
364 	fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
365 	fcoe->ctlr.send = fcoe_fip_send;
366 	fcoe->ctlr.update_mac = fcoe_update_src_mac;
367 	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
368 
369 	err = fcoe_interface_setup(fcoe, netdev);
370 	if (err) {
371 		fcoe_ctlr_destroy(&fcoe->ctlr);
372 		kfree(fcoe);
373 		dev_put(netdev);
374 		fcoe = ERR_PTR(err);
375 		goto out_nomod;
376 	}
377 
378 	goto out;
379 
380 out_nomod:
381 	module_put(THIS_MODULE);
382 out:
383 	return fcoe;
384 }
385 
386 /**
387  * fcoe_interface_release() - fcoe_port kref release function
388  * @kref: Embedded reference count in an fcoe_interface struct
389  */
390 static void fcoe_interface_release(struct kref *kref)
391 {
392 	struct fcoe_interface *fcoe;
393 	struct net_device *netdev;
394 
395 	fcoe = container_of(kref, struct fcoe_interface, kref);
396 	netdev = fcoe->netdev;
397 	/* tear-down the FCoE controller */
398 	fcoe_ctlr_destroy(&fcoe->ctlr);
399 	kfree(fcoe);
400 	dev_put(netdev);
401 	module_put(THIS_MODULE);
402 }
403 
404 /**
405  * fcoe_interface_get() - Get a reference to a FCoE interface
406  * @fcoe: The FCoE interface to be held
407  */
408 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
409 {
410 	kref_get(&fcoe->kref);
411 }
412 
413 /**
414  * fcoe_interface_put() - Put a reference to a FCoE interface
415  * @fcoe: The FCoE interface to be released
416  */
417 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
418 {
419 	kref_put(&fcoe->kref, fcoe_interface_release);
420 }
421 
422 /**
423  * fcoe_interface_cleanup() - Clean up a FCoE interface
424  * @fcoe: The FCoE interface to be cleaned up
425  *
426  * Caller must be holding the RTNL mutex
427  */
428 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
429 {
430 	struct net_device *netdev = fcoe->netdev;
431 	struct fcoe_ctlr *fip = &fcoe->ctlr;
432 	u8 flogi_maddr[ETH_ALEN];
433 	const struct net_device_ops *ops;
434 
435 	/*
436 	 * Don't listen for Ethernet packets anymore.
437 	 * synchronize_net() ensures that the packet handlers are not running
438 	 * on another CPU. dev_remove_pack() would do that, this calls the
439 	 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
440 	 */
441 	__dev_remove_pack(&fcoe->fcoe_packet_type);
442 	__dev_remove_pack(&fcoe->fip_packet_type);
443 	synchronize_net();
444 
445 	/* Delete secondary MAC addresses */
446 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
447 	dev_uc_del(netdev, flogi_maddr);
448 	if (fip->spma)
449 		dev_uc_del(netdev, fip->ctl_src_addr);
450 	if (fip->mode == FIP_MODE_VN2VN) {
451 		dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
452 		dev_mc_del(netdev, FIP_ALL_P2P_MACS);
453 	} else
454 		dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
455 
456 	/* Tell the LLD we are done w/ FCoE */
457 	ops = netdev->netdev_ops;
458 	if (ops->ndo_fcoe_disable) {
459 		if (ops->ndo_fcoe_disable(netdev))
460 			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
461 					" specific feature for LLD.\n");
462 	}
463 
464 	/* Release the self-reference taken during fcoe_interface_create() */
465 	fcoe_interface_put(fcoe);
466 }
467 
468 /**
469  * fcoe_fip_recv() - Handler for received FIP frames
470  * @skb:      The receive skb
471  * @netdev:   The associated net device
472  * @ptype:    The packet_type structure which was used to register this handler
473  * @orig_dev: The original net_device the the skb was received on.
474  *	      (in case dev is a bond)
475  *
476  * Returns: 0 for success
477  */
478 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
479 			 struct packet_type *ptype,
480 			 struct net_device *orig_dev)
481 {
482 	struct fcoe_interface *fcoe;
483 
484 	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
485 	fcoe_ctlr_recv(&fcoe->ctlr, skb);
486 	return 0;
487 }
488 
489 /**
490  * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
491  * @fip: The FCoE controller
492  * @skb: The FIP packet to be sent
493  */
494 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
495 {
496 	skb->dev = fcoe_from_ctlr(fip)->netdev;
497 	dev_queue_xmit(skb);
498 }
499 
500 /**
501  * fcoe_update_src_mac() - Update the Ethernet MAC filters
502  * @lport: The local port to update the source MAC on
503  * @addr:  Unicast MAC address to add
504  *
505  * Remove any previously-set unicast MAC filter.
506  * Add secondary FCoE MAC address filter for our OUI.
507  */
508 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
509 {
510 	struct fcoe_port *port = lport_priv(lport);
511 	struct fcoe_interface *fcoe = port->priv;
512 
513 	rtnl_lock();
514 	if (!is_zero_ether_addr(port->data_src_addr))
515 		dev_uc_del(fcoe->netdev, port->data_src_addr);
516 	if (!is_zero_ether_addr(addr))
517 		dev_uc_add(fcoe->netdev, addr);
518 	memcpy(port->data_src_addr, addr, ETH_ALEN);
519 	rtnl_unlock();
520 }
521 
522 /**
523  * fcoe_get_src_mac() - return the Ethernet source address for an lport
524  * @lport: libfc lport
525  */
526 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
527 {
528 	struct fcoe_port *port = lport_priv(lport);
529 
530 	return port->data_src_addr;
531 }
532 
533 /**
534  * fcoe_lport_config() - Set up a local port
535  * @lport: The local port to be setup
536  *
537  * Returns: 0 for success
538  */
539 static int fcoe_lport_config(struct fc_lport *lport)
540 {
541 	lport->link_up = 0;
542 	lport->qfull = 0;
543 	lport->max_retry_count = 3;
544 	lport->max_rport_retry_count = 3;
545 	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
546 	lport->r_a_tov = 2 * 2 * 1000;
547 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
548 				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
549 	lport->does_npiv = 1;
550 
551 	fc_lport_init_stats(lport);
552 
553 	/* lport fc_lport related configuration */
554 	fc_lport_config(lport);
555 
556 	/* offload related configuration */
557 	lport->crc_offload = 0;
558 	lport->seq_offload = 0;
559 	lport->lro_enabled = 0;
560 	lport->lro_xid = 0;
561 	lport->lso_max = 0;
562 
563 	return 0;
564 }
565 
566 /**
567  * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
568  * @netdev: the associated net device
569  * @wwn: the output WWN
570  * @type: the type of WWN (WWPN or WWNN)
571  *
572  * Returns: 0 for success
573  */
574 static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
575 {
576 	const struct net_device_ops *ops = netdev->netdev_ops;
577 
578 	if (ops->ndo_fcoe_get_wwn)
579 		return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
580 	return -EINVAL;
581 }
582 
583 /**
584  * fcoe_netdev_features_change - Updates the lport's offload flags based
585  * on the LLD netdev's FCoE feature flags
586  */
587 static void fcoe_netdev_features_change(struct fc_lport *lport,
588 					struct net_device *netdev)
589 {
590 	mutex_lock(&lport->lp_mutex);
591 
592 	if (netdev->features & NETIF_F_SG)
593 		lport->sg_supp = 1;
594 	else
595 		lport->sg_supp = 0;
596 
597 	if (netdev->features & NETIF_F_FCOE_CRC) {
598 		lport->crc_offload = 1;
599 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
600 	} else {
601 		lport->crc_offload = 0;
602 	}
603 
604 	if (netdev->features & NETIF_F_FSO) {
605 		lport->seq_offload = 1;
606 		lport->lso_max = netdev->gso_max_size;
607 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
608 				lport->lso_max);
609 	} else {
610 		lport->seq_offload = 0;
611 		lport->lso_max = 0;
612 	}
613 
614 	if (netdev->fcoe_ddp_xid) {
615 		lport->lro_enabled = 1;
616 		lport->lro_xid = netdev->fcoe_ddp_xid;
617 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
618 				lport->lro_xid);
619 	} else {
620 		lport->lro_enabled = 0;
621 		lport->lro_xid = 0;
622 	}
623 
624 	mutex_unlock(&lport->lp_mutex);
625 }
626 
627 /**
628  * fcoe_netdev_config() - Set up net devive for SW FCoE
629  * @lport:  The local port that is associated with the net device
630  * @netdev: The associated net device
631  *
632  * Must be called after fcoe_lport_config() as it will use local port mutex
633  *
634  * Returns: 0 for success
635  */
636 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
637 {
638 	u32 mfs;
639 	u64 wwnn, wwpn;
640 	struct fcoe_interface *fcoe;
641 	struct fcoe_port *port;
642 
643 	/* Setup lport private data to point to fcoe softc */
644 	port = lport_priv(lport);
645 	fcoe = port->priv;
646 
647 	/*
648 	 * Determine max frame size based on underlying device and optional
649 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
650 	 * will return 0, so do this first.
651 	 */
652 	mfs = netdev->mtu;
653 	if (netdev->features & NETIF_F_FCOE_MTU) {
654 		mfs = FCOE_MTU;
655 		FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
656 	}
657 	mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
658 	if (fc_set_mfs(lport, mfs))
659 		return -EINVAL;
660 
661 	/* offload features support */
662 	fcoe_netdev_features_change(lport, netdev);
663 
664 	skb_queue_head_init(&port->fcoe_pending_queue);
665 	port->fcoe_pending_queue_active = 0;
666 	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
667 
668 	fcoe_link_speed_update(lport);
669 
670 	if (!lport->vport) {
671 		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
672 			wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
673 		fc_set_wwnn(lport, wwnn);
674 		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
675 			wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
676 						 2, 0);
677 		fc_set_wwpn(lport, wwpn);
678 	}
679 
680 	return 0;
681 }
682 
683 /**
684  * fcoe_shost_config() - Set up the SCSI host associated with a local port
685  * @lport: The local port
686  * @dev:   The device associated with the SCSI host
687  *
688  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
689  *
690  * Returns: 0 for success
691  */
692 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
693 {
694 	int rc = 0;
695 
696 	/* lport scsi host config */
697 	lport->host->max_lun = FCOE_MAX_LUN;
698 	lport->host->max_id = FCOE_MAX_FCP_TARGET;
699 	lport->host->max_channel = 0;
700 	lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
701 
702 	if (lport->vport)
703 		lport->host->transportt = fcoe_vport_scsi_transport;
704 	else
705 		lport->host->transportt = fcoe_nport_scsi_transport;
706 
707 	/* add the new host to the SCSI-ml */
708 	rc = scsi_add_host(lport->host, dev);
709 	if (rc) {
710 		FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
711 				"error on scsi_add_host\n");
712 		return rc;
713 	}
714 
715 	if (!lport->vport)
716 		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
717 
718 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
719 		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
720 		 fcoe_netdev(lport)->name);
721 
722 	return 0;
723 }
724 
725 /**
726  * fcoe_oem_match() - The match routine for the offloaded exchange manager
727  * @fp: The I/O frame
728  *
729  * This routine will be associated with an exchange manager (EM). When
730  * the libfc exchange handling code is looking for an EM to use it will
731  * call this routine and pass it the frame that it wishes to send. This
732  * routine will return True if the associated EM is to be used and False
733  * if the echange code should continue looking for an EM.
734  *
735  * The offload EM that this routine is associated with will handle any
736  * packets that are for SCSI read requests.
737  *
738  * This has been enhanced to work when FCoE stack is operating in target
739  * mode.
740  *
741  * Returns: True for read types I/O, otherwise returns false.
742  */
743 bool fcoe_oem_match(struct fc_frame *fp)
744 {
745 	struct fc_frame_header *fh = fc_frame_header_get(fp);
746 	struct fcp_cmnd *fcp;
747 
748 	if (fc_fcp_is_read(fr_fsp(fp)) &&
749 	    (fr_fsp(fp)->data_len > fcoe_ddp_min))
750 		return true;
751 	else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
752 		fcp = fc_frame_payload_get(fp, sizeof(*fcp));
753 		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
754 		    fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
755 		    (fcp->fc_flags & FCP_CFL_WRDATA))
756 			return true;
757 	}
758 	return false;
759 }
760 
761 /**
762  * fcoe_em_config() - Allocate and configure an exchange manager
763  * @lport: The local port that the new EM will be associated with
764  *
765  * Returns: 0 on success
766  */
767 static inline int fcoe_em_config(struct fc_lport *lport)
768 {
769 	struct fcoe_port *port = lport_priv(lport);
770 	struct fcoe_interface *fcoe = port->priv;
771 	struct fcoe_interface *oldfcoe = NULL;
772 	struct net_device *old_real_dev, *cur_real_dev;
773 	u16 min_xid = FCOE_MIN_XID;
774 	u16 max_xid = FCOE_MAX_XID;
775 
776 	/*
777 	 * Check if need to allocate an em instance for
778 	 * offload exchange ids to be shared across all VN_PORTs/lport.
779 	 */
780 	if (!lport->lro_enabled || !lport->lro_xid ||
781 	    (lport->lro_xid >= max_xid)) {
782 		lport->lro_xid = 0;
783 		goto skip_oem;
784 	}
785 
786 	/*
787 	 * Reuse existing offload em instance in case
788 	 * it is already allocated on real eth device
789 	 */
790 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
791 		cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
792 	else
793 		cur_real_dev = fcoe->netdev;
794 
795 	list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
796 		if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
797 			old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
798 		else
799 			old_real_dev = oldfcoe->netdev;
800 
801 		if (cur_real_dev == old_real_dev) {
802 			fcoe->oem = oldfcoe->oem;
803 			break;
804 		}
805 	}
806 
807 	if (fcoe->oem) {
808 		if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
809 			printk(KERN_ERR "fcoe_em_config: failed to add "
810 			       "offload em:%p on interface:%s\n",
811 			       fcoe->oem, fcoe->netdev->name);
812 			return -ENOMEM;
813 		}
814 	} else {
815 		fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
816 					      FCOE_MIN_XID, lport->lro_xid,
817 					      fcoe_oem_match);
818 		if (!fcoe->oem) {
819 			printk(KERN_ERR "fcoe_em_config: failed to allocate "
820 			       "em for offload exches on interface:%s\n",
821 			       fcoe->netdev->name);
822 			return -ENOMEM;
823 		}
824 	}
825 
826 	/*
827 	 * Exclude offload EM xid range from next EM xid range.
828 	 */
829 	min_xid += lport->lro_xid + 1;
830 
831 skip_oem:
832 	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
833 		printk(KERN_ERR "fcoe_em_config: failed to "
834 		       "allocate em on interface %s\n", fcoe->netdev->name);
835 		return -ENOMEM;
836 	}
837 
838 	return 0;
839 }
840 
841 /**
842  * fcoe_if_destroy() - Tear down a SW FCoE instance
843  * @lport: The local port to be destroyed
844  *
845  */
846 static void fcoe_if_destroy(struct fc_lport *lport)
847 {
848 	struct fcoe_port *port = lport_priv(lport);
849 	struct fcoe_interface *fcoe = port->priv;
850 	struct net_device *netdev = fcoe->netdev;
851 
852 	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
853 
854 	/* Logout of the fabric */
855 	fc_fabric_logoff(lport);
856 
857 	/* Cleanup the fc_lport */
858 	fc_lport_destroy(lport);
859 
860 	/* Stop the transmit retry timer */
861 	del_timer_sync(&port->timer);
862 
863 	/* Free existing transmit skbs */
864 	fcoe_clean_pending_queue(lport);
865 
866 	rtnl_lock();
867 	if (!is_zero_ether_addr(port->data_src_addr))
868 		dev_uc_del(netdev, port->data_src_addr);
869 	rtnl_unlock();
870 
871 	/* Release reference held in fcoe_if_create() */
872 	fcoe_interface_put(fcoe);
873 
874 	/* Free queued packets for the per-CPU receive threads */
875 	fcoe_percpu_clean(lport);
876 
877 	/* Detach from the scsi-ml */
878 	fc_remove_host(lport->host);
879 	scsi_remove_host(lport->host);
880 
881 	/* Destroy lport scsi_priv */
882 	fc_fcp_destroy(lport);
883 
884 	/* There are no more rports or I/O, free the EM */
885 	fc_exch_mgr_free(lport);
886 
887 	/* Free memory used by statistical counters */
888 	fc_lport_free_stats(lport);
889 
890 	/* Release the Scsi_Host */
891 	scsi_host_put(lport->host);
892 }
893 
894 /**
895  * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
896  * @lport: The local port to setup DDP for
897  * @xid:   The exchange ID for this DDP transfer
898  * @sgl:   The scatterlist describing this transfer
899  * @sgc:   The number of sg items
900  *
901  * Returns: 0 if the DDP context was not configured
902  */
903 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
904 			  struct scatterlist *sgl, unsigned int sgc)
905 {
906 	struct net_device *netdev = fcoe_netdev(lport);
907 
908 	if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
909 		return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
910 							      xid, sgl,
911 							      sgc);
912 
913 	return 0;
914 }
915 
916 /**
917  * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
918  * @lport: The local port to setup DDP for
919  * @xid:   The exchange ID for this DDP transfer
920  * @sgl:   The scatterlist describing this transfer
921  * @sgc:   The number of sg items
922  *
923  * Returns: 0 if the DDP context was not configured
924  */
925 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
926 			   struct scatterlist *sgl, unsigned int sgc)
927 {
928 	struct net_device *netdev = fcoe_netdev(lport);
929 
930 	if (netdev->netdev_ops->ndo_fcoe_ddp_target)
931 		return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
932 							       sgl, sgc);
933 
934 	return 0;
935 }
936 
937 
938 /**
939  * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
940  * @lport: The local port to complete DDP on
941  * @xid:   The exchange ID for this DDP transfer
942  *
943  * Returns: the length of data that have been completed by DDP
944  */
945 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
946 {
947 	struct net_device *netdev = fcoe_netdev(lport);
948 
949 	if (netdev->netdev_ops->ndo_fcoe_ddp_done)
950 		return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
951 	return 0;
952 }
953 
954 /**
955  * fcoe_if_create() - Create a FCoE instance on an interface
956  * @fcoe:   The FCoE interface to create a local port on
957  * @parent: The device pointer to be the parent in sysfs for the SCSI host
958  * @npiv:   Indicates if the port is a vport or not
959  *
960  * Creates a fc_lport instance and a Scsi_Host instance and configure them.
961  *
962  * Returns: The allocated fc_lport or an error pointer
963  */
964 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
965 				       struct device *parent, int npiv)
966 {
967 	struct net_device *netdev = fcoe->netdev;
968 	struct fc_lport *lport, *n_port;
969 	struct fcoe_port *port;
970 	struct Scsi_Host *shost;
971 	int rc;
972 	/*
973 	 * parent is only a vport if npiv is 1,
974 	 * but we'll only use vport in that case so go ahead and set it
975 	 */
976 	struct fc_vport *vport = dev_to_vport(parent);
977 
978 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
979 
980 	if (!npiv)
981 		lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
982 	else
983 		lport = libfc_vport_create(vport, sizeof(*port));
984 
985 	if (!lport) {
986 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
987 		rc = -ENOMEM;
988 		goto out;
989 	}
990 	port = lport_priv(lport);
991 	port->lport = lport;
992 	port->priv = fcoe;
993 	port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
994 	port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
995 	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
996 
997 	/* configure a fc_lport including the exchange manager */
998 	rc = fcoe_lport_config(lport);
999 	if (rc) {
1000 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
1001 				"interface\n");
1002 		goto out_host_put;
1003 	}
1004 
1005 	if (npiv) {
1006 		FCOE_NETDEV_DBG(netdev, "Setting vport names, "
1007 				"%16.16llx %16.16llx\n",
1008 				vport->node_name, vport->port_name);
1009 		fc_set_wwnn(lport, vport->node_name);
1010 		fc_set_wwpn(lport, vport->port_name);
1011 	}
1012 
1013 	/* configure lport network properties */
1014 	rc = fcoe_netdev_config(lport, netdev);
1015 	if (rc) {
1016 		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
1017 				"interface\n");
1018 		goto out_lp_destroy;
1019 	}
1020 
1021 	/* configure lport scsi host properties */
1022 	rc = fcoe_shost_config(lport, parent);
1023 	if (rc) {
1024 		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
1025 				"interface\n");
1026 		goto out_lp_destroy;
1027 	}
1028 
1029 	/* Initialize the library */
1030 	rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
1031 	if (rc) {
1032 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1033 				"interface\n");
1034 		goto out_lp_destroy;
1035 	}
1036 
1037 	/*
1038 	 * fcoe_em_alloc() and fcoe_hostlist_add() both
1039 	 * need to be atomic with respect to other changes to the
1040 	 * hostlist since fcoe_em_alloc() looks for an existing EM
1041 	 * instance on host list updated by fcoe_hostlist_add().
1042 	 *
1043 	 * This is currently handled through the fcoe_config_mutex
1044 	 * begin held.
1045 	 */
1046 	if (!npiv)
1047 		/* lport exch manager allocation */
1048 		rc = fcoe_em_config(lport);
1049 	else {
1050 		shost = vport_to_shost(vport);
1051 		n_port = shost_priv(shost);
1052 		rc = fc_exch_mgr_list_clone(n_port, lport);
1053 	}
1054 
1055 	if (rc) {
1056 		FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
1057 		goto out_lp_destroy;
1058 	}
1059 
1060 	fcoe_interface_get(fcoe);
1061 	return lport;
1062 
1063 out_lp_destroy:
1064 	fc_exch_mgr_free(lport);
1065 out_host_put:
1066 	scsi_host_put(lport->host);
1067 out:
1068 	return ERR_PTR(rc);
1069 }
1070 
1071 /**
1072  * fcoe_if_init() - Initialization routine for fcoe.ko
1073  *
1074  * Attaches the SW FCoE transport to the FC transport
1075  *
1076  * Returns: 0 on success
1077  */
1078 static int __init fcoe_if_init(void)
1079 {
1080 	/* attach to scsi transport */
1081 	fcoe_nport_scsi_transport =
1082 		fc_attach_transport(&fcoe_nport_fc_functions);
1083 	fcoe_vport_scsi_transport =
1084 		fc_attach_transport(&fcoe_vport_fc_functions);
1085 
1086 	if (!fcoe_nport_scsi_transport) {
1087 		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1088 		return -ENODEV;
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 /**
1095  * fcoe_if_exit() - Tear down fcoe.ko
1096  *
1097  * Detaches the SW FCoE transport from the FC transport
1098  *
1099  * Returns: 0 on success
1100  */
1101 int __exit fcoe_if_exit(void)
1102 {
1103 	fc_release_transport(fcoe_nport_scsi_transport);
1104 	fc_release_transport(fcoe_vport_scsi_transport);
1105 	fcoe_nport_scsi_transport = NULL;
1106 	fcoe_vport_scsi_transport = NULL;
1107 	return 0;
1108 }
1109 
1110 /**
1111  * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
1112  * @cpu: The CPU index of the CPU to create a receive thread for
1113  */
1114 static void fcoe_percpu_thread_create(unsigned int cpu)
1115 {
1116 	struct fcoe_percpu_s *p;
1117 	struct task_struct *thread;
1118 
1119 	p = &per_cpu(fcoe_percpu, cpu);
1120 
1121 	thread = kthread_create(fcoe_percpu_receive_thread,
1122 				(void *)p, "fcoethread/%d", cpu);
1123 
1124 	if (likely(!IS_ERR(thread))) {
1125 		kthread_bind(thread, cpu);
1126 		wake_up_process(thread);
1127 
1128 		spin_lock_bh(&p->fcoe_rx_list.lock);
1129 		p->thread = thread;
1130 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1131 	}
1132 }
1133 
1134 /**
1135  * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
1136  * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
1137  *
1138  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
1139  * current CPU's Rx thread. If the thread being destroyed is bound to
1140  * the CPU processing this context the skbs will be freed.
1141  */
1142 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1143 {
1144 	struct fcoe_percpu_s *p;
1145 	struct task_struct *thread;
1146 	struct page *crc_eof;
1147 	struct sk_buff *skb;
1148 #ifdef CONFIG_SMP
1149 	struct fcoe_percpu_s *p0;
1150 	unsigned targ_cpu = get_cpu();
1151 #endif /* CONFIG_SMP */
1152 
1153 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1154 
1155 	/* Prevent any new skbs from being queued for this CPU. */
1156 	p = &per_cpu(fcoe_percpu, cpu);
1157 	spin_lock_bh(&p->fcoe_rx_list.lock);
1158 	thread = p->thread;
1159 	p->thread = NULL;
1160 	crc_eof = p->crc_eof_page;
1161 	p->crc_eof_page = NULL;
1162 	p->crc_eof_offset = 0;
1163 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1164 
1165 #ifdef CONFIG_SMP
1166 	/*
1167 	 * Don't bother moving the skb's if this context is running
1168 	 * on the same CPU that is having its thread destroyed. This
1169 	 * can easily happen when the module is removed.
1170 	 */
1171 	if (cpu != targ_cpu) {
1172 		p0 = &per_cpu(fcoe_percpu, targ_cpu);
1173 		spin_lock_bh(&p0->fcoe_rx_list.lock);
1174 		if (p0->thread) {
1175 			FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1176 				 cpu, targ_cpu);
1177 
1178 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1179 				__skb_queue_tail(&p0->fcoe_rx_list, skb);
1180 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1181 		} else {
1182 			/*
1183 			 * The targeted CPU is not initialized and cannot accept
1184 			 * new	skbs. Unlock the targeted CPU and drop the skbs
1185 			 * on the CPU that is going offline.
1186 			 */
1187 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1188 				kfree_skb(skb);
1189 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1190 		}
1191 	} else {
1192 		/*
1193 		 * This scenario occurs when the module is being removed
1194 		 * and all threads are being destroyed. skbs will continue
1195 		 * to be shifted from the CPU thread that is being removed
1196 		 * to the CPU thread associated with the CPU that is processing
1197 		 * the module removal. Once there is only one CPU Rx thread it
1198 		 * will reach this case and we will drop all skbs and later
1199 		 * stop the thread.
1200 		 */
1201 		spin_lock_bh(&p->fcoe_rx_list.lock);
1202 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1203 			kfree_skb(skb);
1204 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1205 	}
1206 	put_cpu();
1207 #else
1208 	/*
1209 	 * This a non-SMP scenario where the singular Rx thread is
1210 	 * being removed. Free all skbs and stop the thread.
1211 	 */
1212 	spin_lock_bh(&p->fcoe_rx_list.lock);
1213 	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1214 		kfree_skb(skb);
1215 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1216 #endif
1217 
1218 	if (thread)
1219 		kthread_stop(thread);
1220 
1221 	if (crc_eof)
1222 		put_page(crc_eof);
1223 }
1224 
1225 /**
1226  * fcoe_cpu_callback() - Handler for CPU hotplug events
1227  * @nfb:    The callback data block
1228  * @action: The event triggering the callback
1229  * @hcpu:   The index of the CPU that the event is for
1230  *
1231  * This creates or destroys per-CPU data for fcoe
1232  *
1233  * Returns NOTIFY_OK always.
1234  */
1235 static int fcoe_cpu_callback(struct notifier_block *nfb,
1236 			     unsigned long action, void *hcpu)
1237 {
1238 	unsigned cpu = (unsigned long)hcpu;
1239 
1240 	switch (action) {
1241 	case CPU_ONLINE:
1242 	case CPU_ONLINE_FROZEN:
1243 		FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1244 		fcoe_percpu_thread_create(cpu);
1245 		break;
1246 	case CPU_DEAD:
1247 	case CPU_DEAD_FROZEN:
1248 		FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1249 		fcoe_percpu_thread_destroy(cpu);
1250 		break;
1251 	default:
1252 		break;
1253 	}
1254 	return NOTIFY_OK;
1255 }
1256 
1257 /**
1258  * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259  *			command.
1260  * @curr_cpu:   CPU which received request
1261  *
1262  * This routine selects next CPU based on cpumask.
1263  *
1264  * Returns: int (CPU number). Caller to verify if returned CPU is online or not.
1265  */
1266 static unsigned int fcoe_select_cpu(unsigned int curr_cpu)
1267 {
1268 	static unsigned int selected_cpu;
1269 
1270 	if (num_online_cpus() == 1)
1271 		return curr_cpu;
1272 	/*
1273 	 * Doing following check, to skip "curr_cpu (smp_processor_id)"
1274 	 * from selection of CPU is intentional. This is to avoid same CPU
1275 	 * doing post-processing of command. "curr_cpu" to just receive
1276 	 * incoming request in case where rx_id is UNKNOWN and all other
1277 	 * CPU to actually process the command(s)
1278 	 */
1279 	do {
1280 		selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1281 		if (selected_cpu >= nr_cpu_ids)
1282 			selected_cpu = cpumask_first(cpu_online_mask);
1283 	} while (selected_cpu == curr_cpu);
1284 	return selected_cpu;
1285 }
1286 
1287 /**
1288  * fcoe_rcv() - Receive packets from a net device
1289  * @skb:    The received packet
1290  * @netdev: The net device that the packet was received on
1291  * @ptype:  The packet type context
1292  * @olddev: The last device net device
1293  *
1294  * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1295  * FC frame and passes the frame to libfc.
1296  *
1297  * Returns: 0 for success
1298  */
1299 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1300 	     struct packet_type *ptype, struct net_device *olddev)
1301 {
1302 	struct fc_lport *lport;
1303 	struct fcoe_rcv_info *fr;
1304 	struct fcoe_interface *fcoe;
1305 	struct fc_frame_header *fh;
1306 	struct fcoe_percpu_s *fps;
1307 	struct ethhdr *eh;
1308 	unsigned int cpu;
1309 
1310 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1311 	lport = fcoe->ctlr.lp;
1312 	if (unlikely(!lport)) {
1313 		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1314 		goto err2;
1315 	}
1316 	if (!lport->link_up)
1317 		goto err2;
1318 
1319 	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1320 			"data:%p tail:%p end:%p sum:%d dev:%s",
1321 			skb->len, skb->data_len, skb->head, skb->data,
1322 			skb_tail_pointer(skb), skb_end_pointer(skb),
1323 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1324 
1325 	eh = eth_hdr(skb);
1326 
1327 	if (is_fip_mode(&fcoe->ctlr) &&
1328 	    compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
1329 		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1330 				eh->h_source);
1331 		goto err;
1332 	}
1333 
1334 	/*
1335 	 * Check for minimum frame length, and make sure required FCoE
1336 	 * and FC headers are pulled into the linear data area.
1337 	 */
1338 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1339 		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1340 		goto err;
1341 
1342 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1343 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1344 
1345 	if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
1346 		FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
1347 				eh->h_dest);
1348 		goto err;
1349 	}
1350 
1351 	fr = fcoe_dev_from_skb(skb);
1352 	fr->fr_dev = lport;
1353 	fr->ptype = ptype;
1354 
1355 	/*
1356 	 * In case the incoming frame's exchange is originated from
1357 	 * the initiator, then received frame's exchange id is ANDed
1358 	 * with fc_cpu_mask bits to get the same cpu on which exchange
1359 	 * was originated, otherwise just use the current cpu.
1360 	 */
1361 	if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1362 		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1363 	else {
1364 		cpu = smp_processor_id();
1365 
1366 		if ((fh->fh_type == FC_TYPE_FCP) &&
1367 		    (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
1368 			do {
1369 				cpu = fcoe_select_cpu(cpu);
1370 			} while (!cpu_online(cpu));
1371 		} else  if ((fh->fh_type == FC_TYPE_FCP) &&
1372 			    (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
1373 			cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1374 		} else
1375 			cpu = smp_processor_id();
1376 	}
1377 	fps = &per_cpu(fcoe_percpu, cpu);
1378 	spin_lock_bh(&fps->fcoe_rx_list.lock);
1379 	if (unlikely(!fps->thread)) {
1380 		/*
1381 		 * The targeted CPU is not ready, let's target
1382 		 * the first CPU now. For non-SMP systems this
1383 		 * will check the same CPU twice.
1384 		 */
1385 		FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1386 				"ready for incoming skb- using first online "
1387 				"CPU.\n");
1388 
1389 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1390 		cpu = cpumask_first(cpu_online_mask);
1391 		fps = &per_cpu(fcoe_percpu, cpu);
1392 		spin_lock_bh(&fps->fcoe_rx_list.lock);
1393 		if (!fps->thread) {
1394 			spin_unlock_bh(&fps->fcoe_rx_list.lock);
1395 			goto err;
1396 		}
1397 	}
1398 
1399 	/*
1400 	 * We now have a valid CPU that we're targeting for
1401 	 * this skb. We also have this receive thread locked,
1402 	 * so we're free to queue skbs into it's queue.
1403 	 */
1404 
1405 	/* If this is a SCSI-FCP frame, and this is already executing on the
1406 	 * correct CPU, and the queue for this CPU is empty, then go ahead
1407 	 * and process the frame directly in the softirq context.
1408 	 * This lets us process completions without context switching from the
1409 	 * NET_RX softirq, to our receive processing thread, and then back to
1410 	 * BLOCK softirq context.
1411 	 */
1412 	if (fh->fh_type == FC_TYPE_FCP &&
1413 	    cpu == smp_processor_id() &&
1414 	    skb_queue_empty(&fps->fcoe_rx_list)) {
1415 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1416 		fcoe_recv_frame(skb);
1417 	} else {
1418 		__skb_queue_tail(&fps->fcoe_rx_list, skb);
1419 		if (fps->fcoe_rx_list.qlen == 1)
1420 			wake_up_process(fps->thread);
1421 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1422 	}
1423 
1424 	return 0;
1425 err:
1426 	per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
1427 	put_cpu();
1428 err2:
1429 	kfree_skb(skb);
1430 	return -1;
1431 }
1432 
1433 /**
1434  * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1435  * @skb:  The packet to be transmitted
1436  * @tlen: The total length of the trailer
1437  *
1438  * Returns: 0 for success
1439  */
1440 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1441 {
1442 	struct fcoe_percpu_s *fps;
1443 	int rc;
1444 
1445 	fps = &get_cpu_var(fcoe_percpu);
1446 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
1447 	put_cpu_var(fcoe_percpu);
1448 
1449 	return rc;
1450 }
1451 
1452 /**
1453  * fcoe_xmit() - Transmit a FCoE frame
1454  * @lport: The local port that the frame is to be transmitted for
1455  * @fp:	   The frame to be transmitted
1456  *
1457  * Return: 0 for success
1458  */
1459 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1460 {
1461 	int wlen;
1462 	u32 crc;
1463 	struct ethhdr *eh;
1464 	struct fcoe_crc_eof *cp;
1465 	struct sk_buff *skb;
1466 	struct fcoe_dev_stats *stats;
1467 	struct fc_frame_header *fh;
1468 	unsigned int hlen;		/* header length implies the version */
1469 	unsigned int tlen;		/* trailer length */
1470 	unsigned int elen;		/* eth header, may include vlan */
1471 	struct fcoe_port *port = lport_priv(lport);
1472 	struct fcoe_interface *fcoe = port->priv;
1473 	u8 sof, eof;
1474 	struct fcoe_hdr *hp;
1475 
1476 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1477 
1478 	fh = fc_frame_header_get(fp);
1479 	skb = fp_skb(fp);
1480 	wlen = skb->len / FCOE_WORD_TO_BYTE;
1481 
1482 	if (!lport->link_up) {
1483 		kfree_skb(skb);
1484 		return 0;
1485 	}
1486 
1487 	if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1488 	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1489 		return 0;
1490 
1491 	sof = fr_sof(fp);
1492 	eof = fr_eof(fp);
1493 
1494 	elen = sizeof(struct ethhdr);
1495 	hlen = sizeof(struct fcoe_hdr);
1496 	tlen = sizeof(struct fcoe_crc_eof);
1497 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1498 
1499 	/* crc offload */
1500 	if (likely(lport->crc_offload)) {
1501 		skb->ip_summed = CHECKSUM_PARTIAL;
1502 		skb->csum_start = skb_headroom(skb);
1503 		skb->csum_offset = skb->len;
1504 		crc = 0;
1505 	} else {
1506 		skb->ip_summed = CHECKSUM_NONE;
1507 		crc = fcoe_fc_crc(fp);
1508 	}
1509 
1510 	/* copy port crc and eof to the skb buff */
1511 	if (skb_is_nonlinear(skb)) {
1512 		skb_frag_t *frag;
1513 		if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
1514 			kfree_skb(skb);
1515 			return -ENOMEM;
1516 		}
1517 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1518 		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1519 			+ frag->page_offset;
1520 	} else {
1521 		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1522 	}
1523 
1524 	memset(cp, 0, sizeof(*cp));
1525 	cp->fcoe_eof = eof;
1526 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1527 
1528 	if (skb_is_nonlinear(skb)) {
1529 		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1530 		cp = NULL;
1531 	}
1532 
1533 	/* adjust skb network/transport offsets to match mac/fcoe/port */
1534 	skb_push(skb, elen + hlen);
1535 	skb_reset_mac_header(skb);
1536 	skb_reset_network_header(skb);
1537 	skb->mac_len = elen;
1538 	skb->protocol = htons(ETH_P_FCOE);
1539 	skb->dev = fcoe->netdev;
1540 
1541 	/* fill up mac and fcoe headers */
1542 	eh = eth_hdr(skb);
1543 	eh->h_proto = htons(ETH_P_FCOE);
1544 	memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1545 	if (fcoe->ctlr.map_dest)
1546 		memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1547 
1548 	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1549 		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1550 	else
1551 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1552 
1553 	hp = (struct fcoe_hdr *)(eh + 1);
1554 	memset(hp, 0, sizeof(*hp));
1555 	if (FC_FCOE_VER)
1556 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1557 	hp->fcoe_sof = sof;
1558 
1559 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1560 	if (lport->seq_offload && fr_max_payload(fp)) {
1561 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1562 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1563 	} else {
1564 		skb_shinfo(skb)->gso_type = 0;
1565 		skb_shinfo(skb)->gso_size = 0;
1566 	}
1567 	/* update tx stats: regardless if LLD fails */
1568 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1569 	stats->TxFrames++;
1570 	stats->TxWords += wlen;
1571 	put_cpu();
1572 
1573 	/* send down to lld */
1574 	fr_dev(fp) = lport;
1575 	if (port->fcoe_pending_queue.qlen)
1576 		fcoe_check_wait_queue(lport, skb);
1577 	else if (fcoe_start_io(skb))
1578 		fcoe_check_wait_queue(lport, skb);
1579 
1580 	return 0;
1581 }
1582 
1583 /**
1584  * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1585  * @skb: The completed skb (argument required by destructor)
1586  */
1587 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1588 {
1589 	complete(&fcoe_flush_completion);
1590 }
1591 
1592 /**
1593  * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
1594  * @lport: The local port the frame was received on
1595  * @fp:	   The received frame
1596  *
1597  * Return: 0 on passing filtering checks
1598  */
1599 static inline int fcoe_filter_frames(struct fc_lport *lport,
1600 				     struct fc_frame *fp)
1601 {
1602 	struct fcoe_interface *fcoe;
1603 	struct fc_frame_header *fh;
1604 	struct sk_buff *skb = (struct sk_buff *)fp;
1605 	struct fcoe_dev_stats *stats;
1606 
1607 	/*
1608 	 * We only check CRC if no offload is available and if it is
1609 	 * it's solicited data, in which case, the FCP layer would
1610 	 * check it during the copy.
1611 	 */
1612 	if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1613 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1614 	else
1615 		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1616 
1617 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1618 	fh = fc_frame_header_get(fp);
1619 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
1620 		return 0;
1621 
1622 	fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1623 	if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1624 	    ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1625 		FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1626 		return -EINVAL;
1627 	}
1628 
1629 	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
1630 	    le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
1631 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1632 		return 0;
1633 	}
1634 
1635 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1636 	stats->InvalidCRCCount++;
1637 	if (stats->InvalidCRCCount < 5)
1638 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
1639 	return -EINVAL;
1640 }
1641 
1642 /**
1643  * fcoe_recv_frame() - process a single received frame
1644  * @skb: frame to process
1645  */
1646 static void fcoe_recv_frame(struct sk_buff *skb)
1647 {
1648 	u32 fr_len;
1649 	struct fc_lport *lport;
1650 	struct fcoe_rcv_info *fr;
1651 	struct fcoe_dev_stats *stats;
1652 	struct fcoe_crc_eof crc_eof;
1653 	struct fc_frame *fp;
1654 	struct fcoe_port *port;
1655 	struct fcoe_hdr *hp;
1656 
1657 	fr = fcoe_dev_from_skb(skb);
1658 	lport = fr->fr_dev;
1659 	if (unlikely(!lport)) {
1660 		if (skb->destructor != fcoe_percpu_flush_done)
1661 			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1662 		kfree_skb(skb);
1663 		return;
1664 	}
1665 
1666 	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1667 			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1668 			skb->len, skb->data_len,
1669 			skb->head, skb->data, skb_tail_pointer(skb),
1670 			skb_end_pointer(skb), skb->csum,
1671 			skb->dev ? skb->dev->name : "<NULL>");
1672 
1673 	port = lport_priv(lport);
1674 	if (skb_is_nonlinear(skb))
1675 		skb_linearize(skb);	/* not ideal */
1676 
1677 	/*
1678 	 * Frame length checks and setting up the header pointers
1679 	 * was done in fcoe_rcv already.
1680 	 */
1681 	hp = (struct fcoe_hdr *) skb_network_header(skb);
1682 
1683 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1684 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1685 		if (stats->ErrorFrames < 5)
1686 			printk(KERN_WARNING "fcoe: FCoE version "
1687 			       "mismatch: The frame has "
1688 			       "version %x, but the "
1689 			       "initiator supports version "
1690 			       "%x\n", FC_FCOE_DECAPS_VER(hp),
1691 			       FC_FCOE_VER);
1692 		goto drop;
1693 	}
1694 
1695 	skb_pull(skb, sizeof(struct fcoe_hdr));
1696 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1697 
1698 	stats->RxFrames++;
1699 	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1700 
1701 	fp = (struct fc_frame *)skb;
1702 	fc_frame_init(fp);
1703 	fr_dev(fp) = lport;
1704 	fr_sof(fp) = hp->fcoe_sof;
1705 
1706 	/* Copy out the CRC and EOF trailer for access */
1707 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
1708 		goto drop;
1709 	fr_eof(fp) = crc_eof.fcoe_eof;
1710 	fr_crc(fp) = crc_eof.fcoe_crc32;
1711 	if (pskb_trim(skb, fr_len))
1712 		goto drop;
1713 
1714 	if (!fcoe_filter_frames(lport, fp)) {
1715 		put_cpu();
1716 		fc_exch_recv(lport, fp);
1717 		return;
1718 	}
1719 drop:
1720 	stats->ErrorFrames++;
1721 	put_cpu();
1722 	kfree_skb(skb);
1723 }
1724 
1725 /**
1726  * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1727  * @arg: The per-CPU context
1728  *
1729  * Return: 0 for success
1730  */
1731 int fcoe_percpu_receive_thread(void *arg)
1732 {
1733 	struct fcoe_percpu_s *p = arg;
1734 	struct sk_buff *skb;
1735 
1736 	set_user_nice(current, -20);
1737 
1738 	while (!kthread_should_stop()) {
1739 
1740 		spin_lock_bh(&p->fcoe_rx_list.lock);
1741 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1742 			set_current_state(TASK_INTERRUPTIBLE);
1743 			spin_unlock_bh(&p->fcoe_rx_list.lock);
1744 			schedule();
1745 			set_current_state(TASK_RUNNING);
1746 			if (kthread_should_stop())
1747 				return 0;
1748 			spin_lock_bh(&p->fcoe_rx_list.lock);
1749 		}
1750 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1751 		fcoe_recv_frame(skb);
1752 	}
1753 	return 0;
1754 }
1755 
1756 /**
1757  * fcoe_dev_setup() - Setup the link change notification interface
1758  */
1759 static void fcoe_dev_setup(void)
1760 {
1761 	register_netdevice_notifier(&fcoe_notifier);
1762 }
1763 
1764 /**
1765  * fcoe_dev_cleanup() - Cleanup the link change notification interface
1766  */
1767 static void fcoe_dev_cleanup(void)
1768 {
1769 	unregister_netdevice_notifier(&fcoe_notifier);
1770 }
1771 
1772 /**
1773  * fcoe_device_notification() - Handler for net device events
1774  * @notifier: The context of the notification
1775  * @event:    The type of event
1776  * @ptr:      The net device that the event was on
1777  *
1778  * This function is called by the Ethernet driver in case of link change event.
1779  *
1780  * Returns: 0 for success
1781  */
1782 static int fcoe_device_notification(struct notifier_block *notifier,
1783 				    ulong event, void *ptr)
1784 {
1785 	struct fc_lport *lport = NULL;
1786 	struct net_device *netdev = ptr;
1787 	struct fcoe_interface *fcoe;
1788 	struct fcoe_port *port;
1789 	struct fcoe_dev_stats *stats;
1790 	u32 link_possible = 1;
1791 	u32 mfs;
1792 	int rc = NOTIFY_OK;
1793 
1794 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1795 		if (fcoe->netdev == netdev) {
1796 			lport = fcoe->ctlr.lp;
1797 			break;
1798 		}
1799 	}
1800 	if (!lport) {
1801 		rc = NOTIFY_DONE;
1802 		goto out;
1803 	}
1804 
1805 	switch (event) {
1806 	case NETDEV_DOWN:
1807 	case NETDEV_GOING_DOWN:
1808 		link_possible = 0;
1809 		break;
1810 	case NETDEV_UP:
1811 	case NETDEV_CHANGE:
1812 		break;
1813 	case NETDEV_CHANGEMTU:
1814 		if (netdev->features & NETIF_F_FCOE_MTU)
1815 			break;
1816 		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1817 				     sizeof(struct fcoe_crc_eof));
1818 		if (mfs >= FC_MIN_MAX_FRAME)
1819 			fc_set_mfs(lport, mfs);
1820 		break;
1821 	case NETDEV_REGISTER:
1822 		break;
1823 	case NETDEV_UNREGISTER:
1824 		list_del(&fcoe->list);
1825 		port = lport_priv(fcoe->ctlr.lp);
1826 		queue_work(fcoe_wq, &port->destroy_work);
1827 		goto out;
1828 		break;
1829 	case NETDEV_FEAT_CHANGE:
1830 		fcoe_netdev_features_change(lport, netdev);
1831 		break;
1832 	default:
1833 		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1834 				"from netdev netlink\n", event);
1835 	}
1836 
1837 	fcoe_link_speed_update(lport);
1838 
1839 	if (link_possible && !fcoe_link_ok(lport))
1840 		fcoe_ctlr_link_up(&fcoe->ctlr);
1841 	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1842 		stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1843 		stats->LinkFailureCount++;
1844 		put_cpu();
1845 		fcoe_clean_pending_queue(lport);
1846 	}
1847 out:
1848 	return rc;
1849 }
1850 
1851 /**
1852  * fcoe_disable() - Disables a FCoE interface
1853  * @netdev  : The net_device object the Ethernet interface to create on
1854  *
1855  * Called from fcoe transport.
1856  *
1857  * Returns: 0 for success
1858  */
1859 static int fcoe_disable(struct net_device *netdev)
1860 {
1861 	struct fcoe_interface *fcoe;
1862 	int rc = 0;
1863 
1864 	mutex_lock(&fcoe_config_mutex);
1865 
1866 	rtnl_lock();
1867 	fcoe = fcoe_hostlist_lookup_port(netdev);
1868 	rtnl_unlock();
1869 
1870 	if (fcoe) {
1871 		fcoe_ctlr_link_down(&fcoe->ctlr);
1872 		fcoe_clean_pending_queue(fcoe->ctlr.lp);
1873 	} else
1874 		rc = -ENODEV;
1875 
1876 	mutex_unlock(&fcoe_config_mutex);
1877 	return rc;
1878 }
1879 
1880 /**
1881  * fcoe_enable() - Enables a FCoE interface
1882  * @netdev  : The net_device object the Ethernet interface to create on
1883  *
1884  * Called from fcoe transport.
1885  *
1886  * Returns: 0 for success
1887  */
1888 static int fcoe_enable(struct net_device *netdev)
1889 {
1890 	struct fcoe_interface *fcoe;
1891 	int rc = 0;
1892 
1893 	mutex_lock(&fcoe_config_mutex);
1894 	rtnl_lock();
1895 	fcoe = fcoe_hostlist_lookup_port(netdev);
1896 	rtnl_unlock();
1897 
1898 	if (!fcoe)
1899 		rc = -ENODEV;
1900 	else if (!fcoe_link_ok(fcoe->ctlr.lp))
1901 		fcoe_ctlr_link_up(&fcoe->ctlr);
1902 
1903 	mutex_unlock(&fcoe_config_mutex);
1904 	return rc;
1905 }
1906 
1907 /**
1908  * fcoe_destroy() - Destroy a FCoE interface
1909  * @netdev  : The net_device object the Ethernet interface to create on
1910  *
1911  * Called from fcoe transport
1912  *
1913  * Returns: 0 for success
1914  */
1915 static int fcoe_destroy(struct net_device *netdev)
1916 {
1917 	struct fcoe_interface *fcoe;
1918 	struct fc_lport *lport;
1919 	struct fcoe_port *port;
1920 	int rc = 0;
1921 
1922 	mutex_lock(&fcoe_config_mutex);
1923 	rtnl_lock();
1924 	fcoe = fcoe_hostlist_lookup_port(netdev);
1925 	if (!fcoe) {
1926 		rc = -ENODEV;
1927 		goto out_nodev;
1928 	}
1929 	lport = fcoe->ctlr.lp;
1930 	port = lport_priv(lport);
1931 	list_del(&fcoe->list);
1932 	queue_work(fcoe_wq, &port->destroy_work);
1933 out_nodev:
1934 	rtnl_unlock();
1935 	mutex_unlock(&fcoe_config_mutex);
1936 	return rc;
1937 }
1938 
1939 /**
1940  * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
1941  * @work: Handle to the FCoE port to be destroyed
1942  */
1943 static void fcoe_destroy_work(struct work_struct *work)
1944 {
1945 	struct fcoe_port *port;
1946 	struct fcoe_interface *fcoe;
1947 	int npiv = 0;
1948 
1949 	port = container_of(work, struct fcoe_port, destroy_work);
1950 	mutex_lock(&fcoe_config_mutex);
1951 
1952 	/* set if this is an NPIV port */
1953 	npiv = port->lport->vport ? 1 : 0;
1954 
1955 	fcoe = port->priv;
1956 	fcoe_if_destroy(port->lport);
1957 
1958 	/* Do not tear down the fcoe interface for NPIV port */
1959 	if (!npiv) {
1960 		rtnl_lock();
1961 		fcoe_interface_cleanup(fcoe);
1962 		rtnl_unlock();
1963 	}
1964 
1965 	mutex_unlock(&fcoe_config_mutex);
1966 }
1967 
1968 /**
1969  * fcoe_match() - Check if the FCoE is supported on the given netdevice
1970  * @netdev  : The net_device object the Ethernet interface to create on
1971  *
1972  * Called from fcoe transport.
1973  *
1974  * Returns: always returns true as this is the default FCoE transport,
1975  * i.e., support all netdevs.
1976  */
1977 static bool fcoe_match(struct net_device *netdev)
1978 {
1979 	return true;
1980 }
1981 
1982 /**
1983  * fcoe_create() - Create a fcoe interface
1984  * @netdev  : The net_device object the Ethernet interface to create on
1985  * @fip_mode: The FIP mode for this creation
1986  *
1987  * Called from fcoe transport
1988  *
1989  * Returns: 0 for success
1990  */
1991 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1992 {
1993 	int rc = 0;
1994 	struct fcoe_interface *fcoe;
1995 	struct fc_lport *lport;
1996 
1997 	mutex_lock(&fcoe_config_mutex);
1998 	rtnl_lock();
1999 
2000 	/* look for existing lport */
2001 	if (fcoe_hostlist_lookup(netdev)) {
2002 		rc = -EEXIST;
2003 		goto out_nodev;
2004 	}
2005 
2006 	fcoe = fcoe_interface_create(netdev, fip_mode);
2007 	if (IS_ERR(fcoe)) {
2008 		rc = PTR_ERR(fcoe);
2009 		goto out_nodev;
2010 	}
2011 
2012 	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
2013 	if (IS_ERR(lport)) {
2014 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2015 		       netdev->name);
2016 		rc = -EIO;
2017 		fcoe_interface_cleanup(fcoe);
2018 		goto out_nodev;
2019 	}
2020 
2021 	/* Make this the "master" N_Port */
2022 	fcoe->ctlr.lp = lport;
2023 
2024 	/* add to lports list */
2025 	fcoe_hostlist_add(lport);
2026 
2027 	/* start FIP Discovery and FLOGI */
2028 	lport->boot_time = jiffies;
2029 	fc_fabric_login(lport);
2030 	if (!fcoe_link_ok(lport))
2031 		fcoe_ctlr_link_up(&fcoe->ctlr);
2032 
2033 out_nodev:
2034 	rtnl_unlock();
2035 	mutex_unlock(&fcoe_config_mutex);
2036 	return rc;
2037 }
2038 
2039 /**
2040  * fcoe_link_speed_update() - Update the supported and actual link speeds
2041  * @lport: The local port to update speeds for
2042  *
2043  * Returns: 0 if the ethtool query was successful
2044  *          -1 if the ethtool query failed
2045  */
2046 int fcoe_link_speed_update(struct fc_lport *lport)
2047 {
2048 	struct net_device *netdev = fcoe_netdev(lport);
2049 	struct ethtool_cmd ecmd;
2050 
2051 	if (!dev_ethtool_get_settings(netdev, &ecmd)) {
2052 		lport->link_supported_speeds &=
2053 			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2054 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2055 				      SUPPORTED_1000baseT_Full))
2056 			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2057 		if (ecmd.supported & SUPPORTED_10000baseT_Full)
2058 			lport->link_supported_speeds |=
2059 				FC_PORTSPEED_10GBIT;
2060 		switch (ethtool_cmd_speed(&ecmd)) {
2061 		case SPEED_1000:
2062 			lport->link_speed = FC_PORTSPEED_1GBIT;
2063 			break;
2064 		case SPEED_10000:
2065 			lport->link_speed = FC_PORTSPEED_10GBIT;
2066 			break;
2067 		}
2068 		return 0;
2069 	}
2070 	return -1;
2071 }
2072 
2073 /**
2074  * fcoe_link_ok() - Check if the link is OK for a local port
2075  * @lport: The local port to check link on
2076  *
2077  * Returns: 0 if link is UP and OK, -1 if not
2078  *
2079  */
2080 int fcoe_link_ok(struct fc_lport *lport)
2081 {
2082 	struct net_device *netdev = fcoe_netdev(lport);
2083 
2084 	if (netif_oper_up(netdev))
2085 		return 0;
2086 	return -1;
2087 }
2088 
2089 /**
2090  * fcoe_percpu_clean() - Clear all pending skbs for an local port
2091  * @lport: The local port whose skbs are to be cleared
2092  *
2093  * Must be called with fcoe_create_mutex held to single-thread completion.
2094  *
2095  * This flushes the pending skbs by adding a new skb to each queue and
2096  * waiting until they are all freed.  This assures us that not only are
2097  * there no packets that will be handled by the lport, but also that any
2098  * threads already handling packet have returned.
2099  */
2100 void fcoe_percpu_clean(struct fc_lport *lport)
2101 {
2102 	struct fcoe_percpu_s *pp;
2103 	struct fcoe_rcv_info *fr;
2104 	struct sk_buff_head *list;
2105 	struct sk_buff *skb, *next;
2106 	struct sk_buff *head;
2107 	unsigned int cpu;
2108 
2109 	for_each_possible_cpu(cpu) {
2110 		pp = &per_cpu(fcoe_percpu, cpu);
2111 		spin_lock_bh(&pp->fcoe_rx_list.lock);
2112 		list = &pp->fcoe_rx_list;
2113 		head = list->next;
2114 		for (skb = head; skb != (struct sk_buff *)list;
2115 		     skb = next) {
2116 			next = skb->next;
2117 			fr = fcoe_dev_from_skb(skb);
2118 			if (fr->fr_dev == lport) {
2119 				__skb_unlink(skb, list);
2120 				kfree_skb(skb);
2121 			}
2122 		}
2123 
2124 		if (!pp->thread || !cpu_online(cpu)) {
2125 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2126 			continue;
2127 		}
2128 
2129 		skb = dev_alloc_skb(0);
2130 		if (!skb) {
2131 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2132 			continue;
2133 		}
2134 		skb->destructor = fcoe_percpu_flush_done;
2135 
2136 		__skb_queue_tail(&pp->fcoe_rx_list, skb);
2137 		if (pp->fcoe_rx_list.qlen == 1)
2138 			wake_up_process(pp->thread);
2139 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
2140 
2141 		wait_for_completion(&fcoe_flush_completion);
2142 	}
2143 }
2144 
2145 /**
2146  * fcoe_reset() - Reset a local port
2147  * @shost: The SCSI host associated with the local port to be reset
2148  *
2149  * Returns: Always 0 (return value required by FC transport template)
2150  */
2151 int fcoe_reset(struct Scsi_Host *shost)
2152 {
2153 	struct fc_lport *lport = shost_priv(shost);
2154 	struct fcoe_port *port = lport_priv(lport);
2155 	struct fcoe_interface *fcoe = port->priv;
2156 
2157 	fcoe_ctlr_link_down(&fcoe->ctlr);
2158 	fcoe_clean_pending_queue(fcoe->ctlr.lp);
2159 	if (!fcoe_link_ok(fcoe->ctlr.lp))
2160 		fcoe_ctlr_link_up(&fcoe->ctlr);
2161 	return 0;
2162 }
2163 
2164 /**
2165  * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
2166  * @netdev: The net device used as a key
2167  *
2168  * Locking: Must be called with the RNL mutex held.
2169  *
2170  * Returns: NULL or the FCoE interface
2171  */
2172 static struct fcoe_interface *
2173 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2174 {
2175 	struct fcoe_interface *fcoe;
2176 
2177 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2178 		if (fcoe->netdev == netdev)
2179 			return fcoe;
2180 	}
2181 	return NULL;
2182 }
2183 
2184 /**
2185  * fcoe_hostlist_lookup() - Find the local port associated with a
2186  *			    given net device
2187  * @netdev: The netdevice used as a key
2188  *
2189  * Locking: Must be called with the RTNL mutex held
2190  *
2191  * Returns: NULL or the local port
2192  */
2193 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2194 {
2195 	struct fcoe_interface *fcoe;
2196 
2197 	fcoe = fcoe_hostlist_lookup_port(netdev);
2198 	return (fcoe) ? fcoe->ctlr.lp : NULL;
2199 }
2200 
2201 /**
2202  * fcoe_hostlist_add() - Add the FCoE interface identified by a local
2203  *			 port to the hostlist
2204  * @lport: The local port that identifies the FCoE interface to be added
2205  *
2206  * Locking: must be called with the RTNL mutex held
2207  *
2208  * Returns: 0 for success
2209  */
2210 static int fcoe_hostlist_add(const struct fc_lport *lport)
2211 {
2212 	struct fcoe_interface *fcoe;
2213 	struct fcoe_port *port;
2214 
2215 	fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2216 	if (!fcoe) {
2217 		port = lport_priv(lport);
2218 		fcoe = port->priv;
2219 		list_add_tail(&fcoe->list, &fcoe_hostlist);
2220 	}
2221 	return 0;
2222 }
2223 
2224 
2225 static struct fcoe_transport fcoe_sw_transport = {
2226 	.name = {FCOE_TRANSPORT_DEFAULT},
2227 	.attached = false,
2228 	.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
2229 	.match = fcoe_match,
2230 	.create = fcoe_create,
2231 	.destroy = fcoe_destroy,
2232 	.enable = fcoe_enable,
2233 	.disable = fcoe_disable,
2234 };
2235 
2236 /**
2237  * fcoe_init() - Initialize fcoe.ko
2238  *
2239  * Returns: 0 on success, or a negative value on failure
2240  */
2241 static int __init fcoe_init(void)
2242 {
2243 	struct fcoe_percpu_s *p;
2244 	unsigned int cpu;
2245 	int rc = 0;
2246 
2247 	fcoe_wq = alloc_workqueue("fcoe", 0, 0);
2248 	if (!fcoe_wq)
2249 		return -ENOMEM;
2250 
2251 	/* register as a fcoe transport */
2252 	rc = fcoe_transport_attach(&fcoe_sw_transport);
2253 	if (rc) {
2254 		printk(KERN_ERR "failed to register an fcoe transport, check "
2255 			"if libfcoe is loaded\n");
2256 		return rc;
2257 	}
2258 
2259 	mutex_lock(&fcoe_config_mutex);
2260 
2261 	for_each_possible_cpu(cpu) {
2262 		p = &per_cpu(fcoe_percpu, cpu);
2263 		skb_queue_head_init(&p->fcoe_rx_list);
2264 	}
2265 
2266 	for_each_online_cpu(cpu)
2267 		fcoe_percpu_thread_create(cpu);
2268 
2269 	/* Initialize per CPU interrupt thread */
2270 	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2271 	if (rc)
2272 		goto out_free;
2273 
2274 	/* Setup link change notification */
2275 	fcoe_dev_setup();
2276 
2277 	rc = fcoe_if_init();
2278 	if (rc)
2279 		goto out_free;
2280 
2281 	mutex_unlock(&fcoe_config_mutex);
2282 	return 0;
2283 
2284 out_free:
2285 	for_each_online_cpu(cpu) {
2286 		fcoe_percpu_thread_destroy(cpu);
2287 	}
2288 	mutex_unlock(&fcoe_config_mutex);
2289 	destroy_workqueue(fcoe_wq);
2290 	return rc;
2291 }
2292 module_init(fcoe_init);
2293 
2294 /**
2295  * fcoe_exit() - Clean up fcoe.ko
2296  *
2297  * Returns: 0 on success or a  negative value on failure
2298  */
2299 static void __exit fcoe_exit(void)
2300 {
2301 	struct fcoe_interface *fcoe, *tmp;
2302 	struct fcoe_port *port;
2303 	unsigned int cpu;
2304 
2305 	mutex_lock(&fcoe_config_mutex);
2306 
2307 	fcoe_dev_cleanup();
2308 
2309 	/* releases the associated fcoe hosts */
2310 	rtnl_lock();
2311 	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2312 		list_del(&fcoe->list);
2313 		port = lport_priv(fcoe->ctlr.lp);
2314 		queue_work(fcoe_wq, &port->destroy_work);
2315 	}
2316 	rtnl_unlock();
2317 
2318 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2319 
2320 	for_each_online_cpu(cpu)
2321 		fcoe_percpu_thread_destroy(cpu);
2322 
2323 	mutex_unlock(&fcoe_config_mutex);
2324 
2325 	/*
2326 	 * destroy_work's may be chained but destroy_workqueue()
2327 	 * can take care of them. Just kill the fcoe_wq.
2328 	 */
2329 	destroy_workqueue(fcoe_wq);
2330 
2331 	/*
2332 	 * Detaching from the scsi transport must happen after all
2333 	 * destroys are done on the fcoe_wq. destroy_workqueue will
2334 	 * enusre the fcoe_wq is flushed.
2335 	 */
2336 	fcoe_if_exit();
2337 
2338 	/* detach from fcoe transport */
2339 	fcoe_transport_detach(&fcoe_sw_transport);
2340 }
2341 module_exit(fcoe_exit);
2342 
2343 /**
2344  * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2345  * @seq: active sequence in the FLOGI or FDISC exchange
2346  * @fp: response frame, or error encoded in a pointer (timeout)
2347  * @arg: pointer the the fcoe_ctlr structure
2348  *
2349  * This handles MAC address management for FCoE, then passes control on to
2350  * the libfc FLOGI response handler.
2351  */
2352 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2353 {
2354 	struct fcoe_ctlr *fip = arg;
2355 	struct fc_exch *exch = fc_seq_exch(seq);
2356 	struct fc_lport *lport = exch->lp;
2357 	u8 *mac;
2358 
2359 	if (IS_ERR(fp))
2360 		goto done;
2361 
2362 	mac = fr_cb(fp)->granted_mac;
2363 	if (is_zero_ether_addr(mac)) {
2364 		/* pre-FIP */
2365 		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
2366 			fc_frame_free(fp);
2367 			return;
2368 		}
2369 	}
2370 	fcoe_update_src_mac(lport, mac);
2371 done:
2372 	fc_lport_flogi_resp(seq, fp, lport);
2373 }
2374 
2375 /**
2376  * fcoe_logo_resp() - FCoE specific LOGO response handler
2377  * @seq: active sequence in the LOGO exchange
2378  * @fp: response frame, or error encoded in a pointer (timeout)
2379  * @arg: pointer the the fcoe_ctlr structure
2380  *
2381  * This handles MAC address management for FCoE, then passes control on to
2382  * the libfc LOGO response handler.
2383  */
2384 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2385 {
2386 	struct fc_lport *lport = arg;
2387 	static u8 zero_mac[ETH_ALEN] = { 0 };
2388 
2389 	if (!IS_ERR(fp))
2390 		fcoe_update_src_mac(lport, zero_mac);
2391 	fc_lport_logo_resp(seq, fp, lport);
2392 }
2393 
2394 /**
2395  * fcoe_elsct_send - FCoE specific ELS handler
2396  *
2397  * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2398  * using FCoE specific response handlers and passing the FIP controller as
2399  * the argument (the lport is still available from the exchange).
2400  *
2401  * Most of the work here is just handed off to the libfc routine.
2402  */
2403 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2404 				      struct fc_frame *fp, unsigned int op,
2405 				      void (*resp)(struct fc_seq *,
2406 						   struct fc_frame *,
2407 						   void *),
2408 				      void *arg, u32 timeout)
2409 {
2410 	struct fcoe_port *port = lport_priv(lport);
2411 	struct fcoe_interface *fcoe = port->priv;
2412 	struct fcoe_ctlr *fip = &fcoe->ctlr;
2413 	struct fc_frame_header *fh = fc_frame_header_get(fp);
2414 
2415 	switch (op) {
2416 	case ELS_FLOGI:
2417 	case ELS_FDISC:
2418 		if (lport->point_to_multipoint)
2419 			break;
2420 		return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2421 				     fip, timeout);
2422 	case ELS_LOGO:
2423 		/* only hook onto fabric logouts, not port logouts */
2424 		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2425 			break;
2426 		return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2427 				     lport, timeout);
2428 	}
2429 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2430 }
2431 
2432 /**
2433  * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2434  * @vport: fc_vport object to create a new fc_host for
2435  * @disabled: start the new fc_host in a disabled state by default?
2436  *
2437  * Returns: 0 for success
2438  */
2439 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2440 {
2441 	struct Scsi_Host *shost = vport_to_shost(vport);
2442 	struct fc_lport *n_port = shost_priv(shost);
2443 	struct fcoe_port *port = lport_priv(n_port);
2444 	struct fcoe_interface *fcoe = port->priv;
2445 	struct net_device *netdev = fcoe->netdev;
2446 	struct fc_lport *vn_port;
2447 	int rc;
2448 	char buf[32];
2449 
2450 	rc = fcoe_validate_vport_create(vport);
2451 	if (rc) {
2452 		wwn_to_str(vport->port_name, buf, sizeof(buf));
2453 		printk(KERN_ERR "fcoe: Failed to create vport, "
2454 			"WWPN (0x%s) already exists\n",
2455 			buf);
2456 		return rc;
2457 	}
2458 
2459 	mutex_lock(&fcoe_config_mutex);
2460 	vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2461 	mutex_unlock(&fcoe_config_mutex);
2462 
2463 	if (IS_ERR(vn_port)) {
2464 		printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2465 		       netdev->name);
2466 		return -EIO;
2467 	}
2468 
2469 	if (disabled) {
2470 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2471 	} else {
2472 		vn_port->boot_time = jiffies;
2473 		fc_fabric_login(vn_port);
2474 		fc_vport_setlink(vn_port);
2475 	}
2476 	return 0;
2477 }
2478 
2479 /**
2480  * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2481  * @vport: fc_vport object that is being destroyed
2482  *
2483  * Returns: 0 for success
2484  */
2485 static int fcoe_vport_destroy(struct fc_vport *vport)
2486 {
2487 	struct Scsi_Host *shost = vport_to_shost(vport);
2488 	struct fc_lport *n_port = shost_priv(shost);
2489 	struct fc_lport *vn_port = vport->dd_data;
2490 	struct fcoe_port *port = lport_priv(vn_port);
2491 
2492 	mutex_lock(&n_port->lp_mutex);
2493 	list_del(&vn_port->list);
2494 	mutex_unlock(&n_port->lp_mutex);
2495 	queue_work(fcoe_wq, &port->destroy_work);
2496 	return 0;
2497 }
2498 
2499 /**
2500  * fcoe_vport_disable() - change vport state
2501  * @vport: vport to bring online/offline
2502  * @disable: should the vport be disabled?
2503  */
2504 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2505 {
2506 	struct fc_lport *lport = vport->dd_data;
2507 
2508 	if (disable) {
2509 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2510 		fc_fabric_logoff(lport);
2511 	} else {
2512 		lport->boot_time = jiffies;
2513 		fc_fabric_login(lport);
2514 		fc_vport_setlink(lport);
2515 	}
2516 
2517 	return 0;
2518 }
2519 
2520 /**
2521  * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2522  * @vport: fc_vport with a new symbolic name string
2523  *
2524  * After generating a new symbolic name string, a new RSPN_ID request is
2525  * sent to the name server.  There is no response handler, so if it fails
2526  * for some reason it will not be retried.
2527  */
2528 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2529 {
2530 	struct fc_lport *lport = vport->dd_data;
2531 	struct fc_frame *fp;
2532 	size_t len;
2533 
2534 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2535 		 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2536 		 fcoe_netdev(lport)->name, vport->symbolic_name);
2537 
2538 	if (lport->state != LPORT_ST_READY)
2539 		return;
2540 
2541 	len = strnlen(fc_host_symbolic_name(lport->host), 255);
2542 	fp = fc_frame_alloc(lport,
2543 			    sizeof(struct fc_ct_hdr) +
2544 			    sizeof(struct fc_ns_rspn) + len);
2545 	if (!fp)
2546 		return;
2547 	lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2548 			     NULL, NULL, 3 * lport->r_a_tov);
2549 }
2550 
2551 /**
2552  * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2553  * @lport: the local port
2554  * @fc_lesb: the link error status block
2555  */
2556 static void fcoe_get_lesb(struct fc_lport *lport,
2557 			 struct fc_els_lesb *fc_lesb)
2558 {
2559 	unsigned int cpu;
2560 	u32 lfc, vlfc, mdac;
2561 	struct fcoe_dev_stats *devst;
2562 	struct fcoe_fc_els_lesb *lesb;
2563 	struct rtnl_link_stats64 temp;
2564 	struct net_device *netdev = fcoe_netdev(lport);
2565 
2566 	lfc = 0;
2567 	vlfc = 0;
2568 	mdac = 0;
2569 	lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2570 	memset(lesb, 0, sizeof(*lesb));
2571 	for_each_possible_cpu(cpu) {
2572 		devst = per_cpu_ptr(lport->dev_stats, cpu);
2573 		lfc += devst->LinkFailureCount;
2574 		vlfc += devst->VLinkFailureCount;
2575 		mdac += devst->MissDiscAdvCount;
2576 	}
2577 	lesb->lesb_link_fail = htonl(lfc);
2578 	lesb->lesb_vlink_fail = htonl(vlfc);
2579 	lesb->lesb_miss_fka = htonl(mdac);
2580 	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
2581 }
2582 
2583 /**
2584  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2585  * @lport: the local port
2586  * @port_id: the port ID
2587  * @fp: the received frame, if any, that caused the port_id to be set.
2588  *
2589  * This routine handles the case where we received a FLOGI and are
2590  * entering point-to-point mode.  We need to call fcoe_ctlr_recv_flogi()
2591  * so it can set the non-mapped mode and gateway address.
2592  *
2593  * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
2594  */
2595 static void fcoe_set_port_id(struct fc_lport *lport,
2596 			     u32 port_id, struct fc_frame *fp)
2597 {
2598 	struct fcoe_port *port = lport_priv(lport);
2599 	struct fcoe_interface *fcoe = port->priv;
2600 
2601 	if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2602 		fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2603 }
2604 
2605 /**
2606  * fcoe_validate_vport_create() - Validate a vport before creating it
2607  * @vport: NPIV port to be created
2608  *
2609  * This routine is meant to add validation for a vport before creating it
2610  * via fcoe_vport_create().
2611  * Current validations are:
2612  *      - WWPN supplied is unique for given lport
2613  *
2614  *
2615 */
2616 static int fcoe_validate_vport_create(struct fc_vport *vport)
2617 {
2618 	struct Scsi_Host *shost = vport_to_shost(vport);
2619 	struct fc_lport *n_port = shost_priv(shost);
2620 	struct fc_lport *vn_port;
2621 	int rc = 0;
2622 	char buf[32];
2623 
2624 	mutex_lock(&n_port->lp_mutex);
2625 
2626 	wwn_to_str(vport->port_name, buf, sizeof(buf));
2627 	/* Check if the wwpn is not same as that of the lport */
2628 	if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2629 		FCOE_DBG("vport WWPN 0x%s is same as that of the "
2630 			"base port WWPN\n", buf);
2631 		rc = -EINVAL;
2632 		goto out;
2633 	}
2634 
2635 	/* Check if there is any existing vport with same wwpn */
2636 	list_for_each_entry(vn_port, &n_port->vports, list) {
2637 		if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2638 			FCOE_DBG("vport with given WWPN 0x%s already "
2639 			"exists\n", buf);
2640 			rc = -EINVAL;
2641 			break;
2642 		}
2643 	}
2644 
2645 out:
2646 	mutex_unlock(&n_port->lp_mutex);
2647 
2648 	return rc;
2649 }
2650