xref: /openbmc/linux/drivers/scsi/fnic/fnic_fcs.c (revision c01461a6d7b35c9ccc4621332d4db6b029ffb006)
1 /*
2  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
32 #include "fnic_io.h"
33 #include "fnic.h"
34 #include "fnic_fip.h"
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
37 
38 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39 struct workqueue_struct *fnic_fip_queue;
40 struct workqueue_struct *fnic_event_queue;
41 
42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48 
49 void fnic_handle_link(struct work_struct *work)
50 {
51 	struct fnic *fnic = container_of(work, struct fnic, link_work);
52 	unsigned long flags;
53 	int old_link_status;
54 	u32 old_link_down_cnt;
55 
56 	spin_lock_irqsave(&fnic->fnic_lock, flags);
57 
58 	if (fnic->stop_rx_link_events) {
59 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60 		return;
61 	}
62 
63 	old_link_down_cnt = fnic->link_down_cnt;
64 	old_link_status = fnic->link_status;
65 	fnic->link_status = vnic_dev_link_status(fnic->vdev);
66 	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
67 
68 	switch (vnic_dev_port_speed(fnic->vdev)) {
69 	case DCEM_PORTSPEED_10G:
70 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_10GBIT;
71 		fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
72 		break;
73 	case DCEM_PORTSPEED_20G:
74 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_20GBIT;
75 		fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
76 		break;
77 	case DCEM_PORTSPEED_25G:
78 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_25GBIT;
79 		fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
80 		break;
81 	case DCEM_PORTSPEED_40G:
82 	case DCEM_PORTSPEED_4x10G:
83 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_40GBIT;
84 		fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
85 		break;
86 	case DCEM_PORTSPEED_100G:
87 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_100GBIT;
88 		fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
89 		break;
90 	default:
91 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_UNKNOWN;
92 		fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
93 		break;
94 	}
95 
96 	if (old_link_status == fnic->link_status) {
97 		if (!fnic->link_status) {
98 			/* DOWN -> DOWN */
99 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
100 			fnic_fc_trace_set_data(fnic->lport->host->host_no,
101 				FNIC_FC_LE, "Link Status: DOWN->DOWN",
102 				strlen("Link Status: DOWN->DOWN"));
103 		} else {
104 			if (old_link_down_cnt != fnic->link_down_cnt) {
105 				/* UP -> DOWN -> UP */
106 				fnic->lport->host_stats.link_failure_count++;
107 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
108 				fnic_fc_trace_set_data(
109 					fnic->lport->host->host_no,
110 					FNIC_FC_LE,
111 					"Link Status:UP_DOWN_UP",
112 					strlen("Link_Status:UP_DOWN_UP")
113 					);
114 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
115 					     "link down\n");
116 				fcoe_ctlr_link_down(&fnic->ctlr);
117 				if (fnic->config.flags & VFCF_FIP_CAPABLE) {
118 					/* start FCoE VLAN discovery */
119 					fnic_fc_trace_set_data(
120 						fnic->lport->host->host_no,
121 						FNIC_FC_LE,
122 						"Link Status: UP_DOWN_UP_VLAN",
123 						strlen(
124 						"Link Status: UP_DOWN_UP_VLAN")
125 						);
126 					fnic_fcoe_send_vlan_req(fnic);
127 					return;
128 				}
129 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
130 					     "link up\n");
131 				fcoe_ctlr_link_up(&fnic->ctlr);
132 			} else {
133 				/* UP -> UP */
134 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
135 				fnic_fc_trace_set_data(
136 					fnic->lport->host->host_no, FNIC_FC_LE,
137 					"Link Status: UP_UP",
138 					strlen("Link Status: UP_UP"));
139 			}
140 		}
141 	} else if (fnic->link_status) {
142 		/* DOWN -> UP */
143 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
144 		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
145 			/* start FCoE VLAN discovery */
146 				fnic_fc_trace_set_data(
147 				fnic->lport->host->host_no,
148 				FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
149 				strlen("Link Status: DOWN_UP_VLAN"));
150 			fnic_fcoe_send_vlan_req(fnic);
151 			return;
152 		}
153 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
154 		fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
155 			"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
156 		fcoe_ctlr_link_up(&fnic->ctlr);
157 	} else {
158 		/* UP -> DOWN */
159 		fnic->lport->host_stats.link_failure_count++;
160 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
161 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
162 		fnic_fc_trace_set_data(
163 			fnic->lport->host->host_no, FNIC_FC_LE,
164 			"Link Status: UP_DOWN",
165 			strlen("Link Status: UP_DOWN"));
166 		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
167 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
168 				"deleting fip-timer during link-down\n");
169 			del_timer_sync(&fnic->fip_timer);
170 		}
171 		fcoe_ctlr_link_down(&fnic->ctlr);
172 	}
173 
174 }
175 
176 /*
177  * This function passes incoming fabric frames to libFC
178  */
179 void fnic_handle_frame(struct work_struct *work)
180 {
181 	struct fnic *fnic = container_of(work, struct fnic, frame_work);
182 	struct fc_lport *lp = fnic->lport;
183 	unsigned long flags;
184 	struct sk_buff *skb;
185 	struct fc_frame *fp;
186 
187 	while ((skb = skb_dequeue(&fnic->frame_queue))) {
188 
189 		spin_lock_irqsave(&fnic->fnic_lock, flags);
190 		if (fnic->stop_rx_link_events) {
191 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
192 			dev_kfree_skb(skb);
193 			return;
194 		}
195 		fp = (struct fc_frame *)skb;
196 
197 		/*
198 		 * If we're in a transitional state, just re-queue and return.
199 		 * The queue will be serviced when we get to a stable state.
200 		 */
201 		if (fnic->state != FNIC_IN_FC_MODE &&
202 		    fnic->state != FNIC_IN_ETH_MODE) {
203 			skb_queue_head(&fnic->frame_queue, skb);
204 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
205 			return;
206 		}
207 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
208 
209 		fc_exch_recv(lp, fp);
210 	}
211 }
212 
213 void fnic_fcoe_evlist_free(struct fnic *fnic)
214 {
215 	struct fnic_event *fevt = NULL;
216 	struct fnic_event *next = NULL;
217 	unsigned long flags;
218 
219 	spin_lock_irqsave(&fnic->fnic_lock, flags);
220 	if (list_empty(&fnic->evlist)) {
221 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
222 		return;
223 	}
224 
225 	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
226 		list_del(&fevt->list);
227 		kfree(fevt);
228 	}
229 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
230 }
231 
232 void fnic_handle_event(struct work_struct *work)
233 {
234 	struct fnic *fnic = container_of(work, struct fnic, event_work);
235 	struct fnic_event *fevt = NULL;
236 	struct fnic_event *next = NULL;
237 	unsigned long flags;
238 
239 	spin_lock_irqsave(&fnic->fnic_lock, flags);
240 	if (list_empty(&fnic->evlist)) {
241 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
242 		return;
243 	}
244 
245 	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
246 		if (fnic->stop_rx_link_events) {
247 			list_del(&fevt->list);
248 			kfree(fevt);
249 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
250 			return;
251 		}
252 		/*
253 		 * If we're in a transitional state, just re-queue and return.
254 		 * The queue will be serviced when we get to a stable state.
255 		 */
256 		if (fnic->state != FNIC_IN_FC_MODE &&
257 		    fnic->state != FNIC_IN_ETH_MODE) {
258 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
259 			return;
260 		}
261 
262 		list_del(&fevt->list);
263 		switch (fevt->event) {
264 		case FNIC_EVT_START_VLAN_DISC:
265 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
266 			fnic_fcoe_send_vlan_req(fnic);
267 			spin_lock_irqsave(&fnic->fnic_lock, flags);
268 			break;
269 		case FNIC_EVT_START_FCF_DISC:
270 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
271 				  "Start FCF Discovery\n");
272 			fnic_fcoe_start_fcf_disc(fnic);
273 			break;
274 		default:
275 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
276 				  "Unknown event 0x%x\n", fevt->event);
277 			break;
278 		}
279 		kfree(fevt);
280 	}
281 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
282 }
283 
284 /**
285  * Check if the Received FIP FLOGI frame is rejected
286  * @fip: The FCoE controller that received the frame
287  * @skb: The received FIP frame
288  *
289  * Returns non-zero if the frame is rejected with unsupported cmd with
290  * insufficient resource els explanation.
291  */
292 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
293 					 struct sk_buff *skb)
294 {
295 	struct fc_lport *lport = fip->lp;
296 	struct fip_header *fiph;
297 	struct fc_frame_header *fh = NULL;
298 	struct fip_desc *desc;
299 	struct fip_encaps *els;
300 	enum fip_desc_type els_dtype = 0;
301 	u16 op;
302 	u8 els_op;
303 	u8 sub;
304 
305 	size_t els_len = 0;
306 	size_t rlen;
307 	size_t dlen = 0;
308 
309 	if (skb_linearize(skb))
310 		return 0;
311 
312 	if (skb->len < sizeof(*fiph))
313 		return 0;
314 
315 	fiph = (struct fip_header *)skb->data;
316 	op = ntohs(fiph->fip_op);
317 	sub = fiph->fip_subcode;
318 
319 	if (op != FIP_OP_LS)
320 		return 0;
321 
322 	if (sub != FIP_SC_REP)
323 		return 0;
324 
325 	rlen = ntohs(fiph->fip_dl_len) * 4;
326 	if (rlen + sizeof(*fiph) > skb->len)
327 		return 0;
328 
329 	desc = (struct fip_desc *)(fiph + 1);
330 	dlen = desc->fip_dlen * FIP_BPW;
331 
332 	if (desc->fip_dtype == FIP_DT_FLOGI) {
333 
334 		if (dlen < sizeof(*els) + sizeof(*fh) + 1)
335 			return 0;
336 
337 		els_len = dlen - sizeof(*els);
338 		els = (struct fip_encaps *)desc;
339 		fh = (struct fc_frame_header *)(els + 1);
340 		els_dtype = desc->fip_dtype;
341 
342 		if (!fh)
343 			return 0;
344 
345 		/*
346 		 * ELS command code, reason and explanation should be = Reject,
347 		 * unsupported command and insufficient resource
348 		 */
349 		els_op = *(u8 *)(fh + 1);
350 		if (els_op == ELS_LS_RJT) {
351 			shost_printk(KERN_INFO, lport->host,
352 				  "Flogi Request Rejected by Switch\n");
353 			return 1;
354 		}
355 		shost_printk(KERN_INFO, lport->host,
356 				"Flogi Request Accepted by Switch\n");
357 	}
358 	return 0;
359 }
360 
361 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
362 {
363 	struct fcoe_ctlr *fip = &fnic->ctlr;
364 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
365 	struct sk_buff *skb;
366 	char *eth_fr;
367 	int fr_len;
368 	struct fip_vlan *vlan;
369 	u64 vlan_tov;
370 
371 	fnic_fcoe_reset_vlans(fnic);
372 	fnic->set_vlan(fnic, 0);
373 
374 	if (printk_ratelimit())
375 		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
376 			  "Sending VLAN request...\n");
377 
378 	skb = dev_alloc_skb(sizeof(struct fip_vlan));
379 	if (!skb)
380 		return;
381 
382 	fr_len = sizeof(*vlan);
383 	eth_fr = (char *)skb->data;
384 	vlan = (struct fip_vlan *)eth_fr;
385 
386 	memset(vlan, 0, sizeof(*vlan));
387 	memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
388 	memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
389 	vlan->eth.h_proto = htons(ETH_P_FIP);
390 
391 	vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
392 	vlan->fip.fip_op = htons(FIP_OP_VLAN);
393 	vlan->fip.fip_subcode = FIP_SC_VL_REQ;
394 	vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
395 
396 	vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
397 	vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
398 	memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
399 
400 	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
401 	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
402 	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
403 	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
404 
405 	skb_put(skb, sizeof(*vlan));
406 	skb->protocol = htons(ETH_P_FIP);
407 	skb_reset_mac_header(skb);
408 	skb_reset_network_header(skb);
409 	fip->send(fip, skb);
410 
411 	/* set a timer so that we can retry if there no response */
412 	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
413 	mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
414 }
415 
416 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
417 {
418 	struct fcoe_ctlr *fip = &fnic->ctlr;
419 	struct fip_header *fiph;
420 	struct fip_desc *desc;
421 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
422 	u16 vid;
423 	size_t rlen;
424 	size_t dlen;
425 	struct fcoe_vlan *vlan;
426 	u64 sol_time;
427 	unsigned long flags;
428 
429 	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
430 		  "Received VLAN response...\n");
431 
432 	fiph = (struct fip_header *) skb->data;
433 
434 	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
435 		  "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
436 		  ntohs(fiph->fip_op), fiph->fip_subcode);
437 
438 	rlen = ntohs(fiph->fip_dl_len) * 4;
439 	fnic_fcoe_reset_vlans(fnic);
440 	spin_lock_irqsave(&fnic->vlans_lock, flags);
441 	desc = (struct fip_desc *)(fiph + 1);
442 	while (rlen > 0) {
443 		dlen = desc->fip_dlen * FIP_BPW;
444 		switch (desc->fip_dtype) {
445 		case FIP_DT_VLAN:
446 			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
447 			shost_printk(KERN_INFO, fnic->lport->host,
448 				  "process_vlan_resp: FIP VLAN %d\n", vid);
449 			vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
450 			if (!vlan) {
451 				/* retry from timer */
452 				spin_unlock_irqrestore(&fnic->vlans_lock,
453 							flags);
454 				goto out;
455 			}
456 			vlan->vid = vid & 0x0fff;
457 			vlan->state = FIP_VLAN_AVAIL;
458 			list_add_tail(&vlan->list, &fnic->vlans);
459 			break;
460 		}
461 		desc = (struct fip_desc *)((char *)desc + dlen);
462 		rlen -= dlen;
463 	}
464 
465 	/* any VLAN descriptors present ? */
466 	if (list_empty(&fnic->vlans)) {
467 		/* retry from timer */
468 		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
469 		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
470 			  "No VLAN descriptors in FIP VLAN response\n");
471 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
472 		goto out;
473 	}
474 
475 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
476 	fnic->set_vlan(fnic, vlan->vid);
477 	vlan->state = FIP_VLAN_SENT; /* sent now */
478 	vlan->sol_count++;
479 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
480 
481 	/* start the solicitation */
482 	fcoe_ctlr_link_up(fip);
483 
484 	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
485 	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
486 out:
487 	return;
488 }
489 
490 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
491 {
492 	unsigned long flags;
493 	struct fcoe_vlan *vlan;
494 	u64 sol_time;
495 
496 	spin_lock_irqsave(&fnic->vlans_lock, flags);
497 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
498 	fnic->set_vlan(fnic, vlan->vid);
499 	vlan->state = FIP_VLAN_SENT; /* sent now */
500 	vlan->sol_count = 1;
501 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
502 
503 	/* start the solicitation */
504 	fcoe_ctlr_link_up(&fnic->ctlr);
505 
506 	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
507 	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
508 }
509 
510 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
511 {
512 	unsigned long flags;
513 	struct fcoe_vlan *fvlan;
514 
515 	spin_lock_irqsave(&fnic->vlans_lock, flags);
516 	if (list_empty(&fnic->vlans)) {
517 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
518 		return -EINVAL;
519 	}
520 
521 	fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
522 	if (fvlan->state == FIP_VLAN_USED) {
523 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
524 		return 0;
525 	}
526 
527 	if (fvlan->state == FIP_VLAN_SENT) {
528 		fvlan->state = FIP_VLAN_USED;
529 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
530 		return 0;
531 	}
532 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
533 	return -EINVAL;
534 }
535 
536 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
537 {
538 	struct fnic_event *fevt;
539 	unsigned long flags;
540 
541 	fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
542 	if (!fevt)
543 		return;
544 
545 	fevt->fnic = fnic;
546 	fevt->event = ev;
547 
548 	spin_lock_irqsave(&fnic->fnic_lock, flags);
549 	list_add_tail(&fevt->list, &fnic->evlist);
550 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
551 
552 	schedule_work(&fnic->event_work);
553 }
554 
555 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
556 {
557 	struct fip_header *fiph;
558 	int ret = 1;
559 	u16 op;
560 	u8 sub;
561 
562 	if (!skb || !(skb->data))
563 		return -1;
564 
565 	if (skb_linearize(skb))
566 		goto drop;
567 
568 	fiph = (struct fip_header *)skb->data;
569 	op = ntohs(fiph->fip_op);
570 	sub = fiph->fip_subcode;
571 
572 	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
573 		goto drop;
574 
575 	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
576 		goto drop;
577 
578 	if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
579 		if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
580 			goto drop;
581 		/* pass it on to fcoe */
582 		ret = 1;
583 	} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
584 		/* set the vlan as used */
585 		fnic_fcoe_process_vlan_resp(fnic, skb);
586 		ret = 0;
587 	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
588 		/* received CVL request, restart vlan disc */
589 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
590 		/* pass it on to fcoe */
591 		ret = 1;
592 	}
593 drop:
594 	return ret;
595 }
596 
597 void fnic_handle_fip_frame(struct work_struct *work)
598 {
599 	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
600 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
601 	unsigned long flags;
602 	struct sk_buff *skb;
603 	struct ethhdr *eh;
604 
605 	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
606 		spin_lock_irqsave(&fnic->fnic_lock, flags);
607 		if (fnic->stop_rx_link_events) {
608 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
609 			dev_kfree_skb(skb);
610 			return;
611 		}
612 		/*
613 		 * If we're in a transitional state, just re-queue and return.
614 		 * The queue will be serviced when we get to a stable state.
615 		 */
616 		if (fnic->state != FNIC_IN_FC_MODE &&
617 		    fnic->state != FNIC_IN_ETH_MODE) {
618 			skb_queue_head(&fnic->fip_frame_queue, skb);
619 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
620 			return;
621 		}
622 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
623 		eh = (struct ethhdr *)skb->data;
624 		if (eh->h_proto == htons(ETH_P_FIP)) {
625 			skb_pull(skb, sizeof(*eh));
626 			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
627 				dev_kfree_skb(skb);
628 				continue;
629 			}
630 			/*
631 			 * If there's FLOGI rejects - clear all
632 			 * fcf's & restart from scratch
633 			 */
634 			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
635 				atomic64_inc(
636 					&fnic_stats->vlan_stats.flogi_rejects);
637 				shost_printk(KERN_INFO, fnic->lport->host,
638 					  "Trigger a Link down - VLAN Disc\n");
639 				fcoe_ctlr_link_down(&fnic->ctlr);
640 				/* start FCoE VLAN discovery */
641 				fnic_fcoe_send_vlan_req(fnic);
642 				dev_kfree_skb(skb);
643 				continue;
644 			}
645 			fcoe_ctlr_recv(&fnic->ctlr, skb);
646 			continue;
647 		}
648 	}
649 }
650 
651 /**
652  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
653  * @fnic:	fnic instance.
654  * @skb:	Ethernet Frame.
655  */
656 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
657 {
658 	struct fc_frame *fp;
659 	struct ethhdr *eh;
660 	struct fcoe_hdr *fcoe_hdr;
661 	struct fcoe_crc_eof *ft;
662 
663 	/*
664 	 * Undo VLAN encapsulation if present.
665 	 */
666 	eh = (struct ethhdr *)skb->data;
667 	if (eh->h_proto == htons(ETH_P_8021Q)) {
668 		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
669 		eh = skb_pull(skb, VLAN_HLEN);
670 		skb_reset_mac_header(skb);
671 	}
672 	if (eh->h_proto == htons(ETH_P_FIP)) {
673 		if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
674 			printk(KERN_ERR "Dropped FIP frame, as firmware "
675 					"uses non-FIP mode, Enable FIP "
676 					"using UCSM\n");
677 			goto drop;
678 		}
679 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
680 			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
681 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
682 		}
683 		skb_queue_tail(&fnic->fip_frame_queue, skb);
684 		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
685 		return 1;		/* let caller know packet was used */
686 	}
687 	if (eh->h_proto != htons(ETH_P_FCOE))
688 		goto drop;
689 	skb_set_network_header(skb, sizeof(*eh));
690 	skb_pull(skb, sizeof(*eh));
691 
692 	fcoe_hdr = (struct fcoe_hdr *)skb->data;
693 	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
694 		goto drop;
695 
696 	fp = (struct fc_frame *)skb;
697 	fc_frame_init(fp);
698 	fr_sof(fp) = fcoe_hdr->fcoe_sof;
699 	skb_pull(skb, sizeof(struct fcoe_hdr));
700 	skb_reset_transport_header(skb);
701 
702 	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
703 	fr_eof(fp) = ft->fcoe_eof;
704 	skb_trim(skb, skb->len - sizeof(*ft));
705 	return 0;
706 drop:
707 	dev_kfree_skb_irq(skb);
708 	return -1;
709 }
710 
711 /**
712  * fnic_update_mac_locked() - set data MAC address and filters.
713  * @fnic:	fnic instance.
714  * @new:	newly-assigned FCoE MAC address.
715  *
716  * Called with the fnic lock held.
717  */
718 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
719 {
720 	u8 *ctl = fnic->ctlr.ctl_src_addr;
721 	u8 *data = fnic->data_src_addr;
722 
723 	if (is_zero_ether_addr(new))
724 		new = ctl;
725 	if (ether_addr_equal(data, new))
726 		return;
727 	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
728 	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
729 		vnic_dev_del_addr(fnic->vdev, data);
730 	memcpy(data, new, ETH_ALEN);
731 	if (!ether_addr_equal(new, ctl))
732 		vnic_dev_add_addr(fnic->vdev, new);
733 }
734 
735 /**
736  * fnic_update_mac() - set data MAC address and filters.
737  * @lport:	local port.
738  * @new:	newly-assigned FCoE MAC address.
739  */
740 void fnic_update_mac(struct fc_lport *lport, u8 *new)
741 {
742 	struct fnic *fnic = lport_priv(lport);
743 
744 	spin_lock_irq(&fnic->fnic_lock);
745 	fnic_update_mac_locked(fnic, new);
746 	spin_unlock_irq(&fnic->fnic_lock);
747 }
748 
749 /**
750  * fnic_set_port_id() - set the port_ID after successful FLOGI.
751  * @lport:	local port.
752  * @port_id:	assigned FC_ID.
753  * @fp:		received frame containing the FLOGI accept or NULL.
754  *
755  * This is called from libfc when a new FC_ID has been assigned.
756  * This causes us to reset the firmware to FC_MODE and setup the new MAC
757  * address and FC_ID.
758  *
759  * It is also called with FC_ID 0 when we're logged off.
760  *
761  * If the FC_ID is due to point-to-point, fp may be NULL.
762  */
763 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
764 {
765 	struct fnic *fnic = lport_priv(lport);
766 	u8 *mac;
767 	int ret;
768 
769 	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
770 		     port_id, fp);
771 
772 	/*
773 	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
774 	 * Set ethernet mode to send FLOGI.
775 	 */
776 	if (!port_id) {
777 		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
778 		fnic_set_eth_mode(fnic);
779 		return;
780 	}
781 
782 	if (fp) {
783 		mac = fr_cb(fp)->granted_mac;
784 		if (is_zero_ether_addr(mac)) {
785 			/* non-FIP - FLOGI already accepted - ignore return */
786 			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
787 		}
788 		fnic_update_mac(lport, mac);
789 	}
790 
791 	/* Change state to reflect transition to FC mode */
792 	spin_lock_irq(&fnic->fnic_lock);
793 	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
794 		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
795 	else {
796 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
797 			     "Unexpected fnic state %s while"
798 			     " processing flogi resp\n",
799 			     fnic_state_to_str(fnic->state));
800 		spin_unlock_irq(&fnic->fnic_lock);
801 		return;
802 	}
803 	spin_unlock_irq(&fnic->fnic_lock);
804 
805 	/*
806 	 * Send FLOGI registration to firmware to set up FC mode.
807 	 * The new address will be set up when registration completes.
808 	 */
809 	ret = fnic_flogi_reg_handler(fnic, port_id);
810 
811 	if (ret < 0) {
812 		spin_lock_irq(&fnic->fnic_lock);
813 		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
814 			fnic->state = FNIC_IN_ETH_MODE;
815 		spin_unlock_irq(&fnic->fnic_lock);
816 	}
817 }
818 
819 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
820 				    *cq_desc, struct vnic_rq_buf *buf,
821 				    int skipped __attribute__((unused)),
822 				    void *opaque)
823 {
824 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
825 	struct sk_buff *skb;
826 	struct fc_frame *fp;
827 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
828 	unsigned int eth_hdrs_stripped;
829 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
830 	u8 fcoe = 0, fcoe_sof, fcoe_eof;
831 	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
832 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
833 	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
834 	u8 fcs_ok = 1, packet_error = 0;
835 	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
836 	u32 rss_hash;
837 	u16 exchange_id, tmpl;
838 	u8 sof = 0;
839 	u8 eof = 0;
840 	u32 fcp_bytes_written = 0;
841 	unsigned long flags;
842 
843 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
844 			 DMA_FROM_DEVICE);
845 	skb = buf->os_buf;
846 	fp = (struct fc_frame *)skb;
847 	buf->os_buf = NULL;
848 
849 	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
850 	if (type == CQ_DESC_TYPE_RQ_FCP) {
851 		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
852 				   &type, &color, &q_number, &completed_index,
853 				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
854 				   &tmpl, &fcp_bytes_written, &sof, &eof,
855 				   &ingress_port, &packet_error,
856 				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
857 				   &vlan);
858 		eth_hdrs_stripped = 1;
859 		skb_trim(skb, fcp_bytes_written);
860 		fr_sof(fp) = sof;
861 		fr_eof(fp) = eof;
862 
863 	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
864 		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
865 				    &type, &color, &q_number, &completed_index,
866 				    &ingress_port, &fcoe, &eop, &sop,
867 				    &rss_type, &csum_not_calc, &rss_hash,
868 				    &bytes_written, &packet_error,
869 				    &vlan_stripped, &vlan, &checksum,
870 				    &fcoe_sof, &fcoe_fc_crc_ok,
871 				    &fcoe_enc_error, &fcoe_eof,
872 				    &tcp_udp_csum_ok, &udp, &tcp,
873 				    &ipv4_csum_ok, &ipv6, &ipv4,
874 				    &ipv4_fragment, &fcs_ok);
875 		eth_hdrs_stripped = 0;
876 		skb_trim(skb, bytes_written);
877 		if (!fcs_ok) {
878 			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
879 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
880 				     "fcs error.  dropping packet.\n");
881 			goto drop;
882 		}
883 		if (fnic_import_rq_eth_pkt(fnic, skb))
884 			return;
885 
886 	} else {
887 		/* wrong CQ type*/
888 		shost_printk(KERN_ERR, fnic->lport->host,
889 			     "fnic rq_cmpl wrong cq type x%x\n", type);
890 		goto drop;
891 	}
892 
893 	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
894 		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
895 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
896 			     "fnic rq_cmpl fcoe x%x fcsok x%x"
897 			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
898 			     " x%x\n",
899 			     fcoe, fcs_ok, packet_error,
900 			     fcoe_fc_crc_ok, fcoe_enc_error);
901 		goto drop;
902 	}
903 
904 	spin_lock_irqsave(&fnic->fnic_lock, flags);
905 	if (fnic->stop_rx_link_events) {
906 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
907 		goto drop;
908 	}
909 	fr_dev(fp) = fnic->lport;
910 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
911 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
912 					(char *)skb->data, skb->len)) != 0) {
913 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
914 	}
915 
916 	skb_queue_tail(&fnic->frame_queue, skb);
917 	queue_work(fnic_event_queue, &fnic->frame_work);
918 
919 	return;
920 drop:
921 	dev_kfree_skb_irq(skb);
922 }
923 
924 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
925 				     struct cq_desc *cq_desc, u8 type,
926 				     u16 q_number, u16 completed_index,
927 				     void *opaque)
928 {
929 	struct fnic *fnic = vnic_dev_priv(vdev);
930 
931 	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
932 			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
933 			NULL);
934 	return 0;
935 }
936 
937 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
938 {
939 	unsigned int tot_rq_work_done = 0, cur_work_done;
940 	unsigned int i;
941 	int err;
942 
943 	for (i = 0; i < fnic->rq_count; i++) {
944 		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
945 						fnic_rq_cmpl_handler_cont,
946 						NULL);
947 		if (cur_work_done) {
948 			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
949 			if (err)
950 				shost_printk(KERN_ERR, fnic->lport->host,
951 					     "fnic_alloc_rq_frame can't alloc"
952 					     " frame\n");
953 		}
954 		tot_rq_work_done += cur_work_done;
955 	}
956 
957 	return tot_rq_work_done;
958 }
959 
960 /*
961  * This function is called once at init time to allocate and fill RQ
962  * buffers. Subsequently, it is called in the interrupt context after RQ
963  * buffer processing to replenish the buffers in the RQ
964  */
965 int fnic_alloc_rq_frame(struct vnic_rq *rq)
966 {
967 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
968 	struct sk_buff *skb;
969 	u16 len;
970 	dma_addr_t pa;
971 	int r;
972 
973 	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
974 	skb = dev_alloc_skb(len);
975 	if (!skb) {
976 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
977 			     "Unable to allocate RQ sk_buff\n");
978 		return -ENOMEM;
979 	}
980 	skb_reset_mac_header(skb);
981 	skb_reset_transport_header(skb);
982 	skb_reset_network_header(skb);
983 	skb_put(skb, len);
984 	pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
985 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
986 		r = -ENOMEM;
987 		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
988 		goto free_skb;
989 	}
990 
991 	fnic_queue_rq_desc(rq, skb, pa, len);
992 	return 0;
993 
994 free_skb:
995 	kfree_skb(skb);
996 	return r;
997 }
998 
999 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1000 {
1001 	struct fc_frame *fp = buf->os_buf;
1002 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
1003 
1004 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1005 			 DMA_FROM_DEVICE);
1006 
1007 	dev_kfree_skb(fp_skb(fp));
1008 	buf->os_buf = NULL;
1009 }
1010 
1011 /**
1012  * fnic_eth_send() - Send Ethernet frame.
1013  * @fip:	fcoe_ctlr instance.
1014  * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
1015  */
1016 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1017 {
1018 	struct fnic *fnic = fnic_from_ctlr(fip);
1019 	struct vnic_wq *wq = &fnic->wq[0];
1020 	dma_addr_t pa;
1021 	struct ethhdr *eth_hdr;
1022 	struct vlan_ethhdr *vlan_hdr;
1023 	unsigned long flags;
1024 
1025 	if (!fnic->vlan_hw_insert) {
1026 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1027 		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1028 		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1029 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1030 		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1031 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1032 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1033 			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1034 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1035 		}
1036 	} else {
1037 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1038 			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1039 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1040 		}
1041 	}
1042 
1043 	pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1044 			DMA_TO_DEVICE);
1045 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1046 		printk(KERN_ERR "DMA mapping failed\n");
1047 		goto free_skb;
1048 	}
1049 
1050 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1051 	if (!vnic_wq_desc_avail(wq))
1052 		goto irq_restore;
1053 
1054 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1055 			       0 /* hw inserts cos value */,
1056 			       fnic->vlan_id, 1);
1057 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1058 	return;
1059 
1060 irq_restore:
1061 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1062 	dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1063 free_skb:
1064 	kfree_skb(skb);
1065 }
1066 
1067 /*
1068  * Send FC frame.
1069  */
1070 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1071 {
1072 	struct vnic_wq *wq = &fnic->wq[0];
1073 	struct sk_buff *skb;
1074 	dma_addr_t pa;
1075 	struct ethhdr *eth_hdr;
1076 	struct vlan_ethhdr *vlan_hdr;
1077 	struct fcoe_hdr *fcoe_hdr;
1078 	struct fc_frame_header *fh;
1079 	u32 tot_len, eth_hdr_len;
1080 	int ret = 0;
1081 	unsigned long flags;
1082 
1083 	fh = fc_frame_header_get(fp);
1084 	skb = fp_skb(fp);
1085 
1086 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1087 	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1088 		return 0;
1089 
1090 	if (!fnic->vlan_hw_insert) {
1091 		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1092 		vlan_hdr = skb_push(skb, eth_hdr_len);
1093 		eth_hdr = (struct ethhdr *)vlan_hdr;
1094 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1095 		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1096 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1097 		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1098 	} else {
1099 		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1100 		eth_hdr = skb_push(skb, eth_hdr_len);
1101 		eth_hdr->h_proto = htons(ETH_P_FCOE);
1102 		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1103 	}
1104 
1105 	if (fnic->ctlr.map_dest)
1106 		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1107 	else
1108 		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1109 	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1110 
1111 	tot_len = skb->len;
1112 	BUG_ON(tot_len % 4);
1113 
1114 	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1115 	fcoe_hdr->fcoe_sof = fr_sof(fp);
1116 	if (FC_FCOE_VER)
1117 		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1118 
1119 	pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1120 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1121 		ret = -ENOMEM;
1122 		printk(KERN_ERR "DMA map failed with error %d\n", ret);
1123 		goto free_skb_on_err;
1124 	}
1125 
1126 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1127 				(char *)eth_hdr, tot_len)) != 0) {
1128 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
1129 	}
1130 
1131 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1132 
1133 	if (!vnic_wq_desc_avail(wq)) {
1134 		dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1135 		ret = -1;
1136 		goto irq_restore;
1137 	}
1138 
1139 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1140 			   0 /* hw inserts cos value */,
1141 			   fnic->vlan_id, 1, 1, 1);
1142 
1143 irq_restore:
1144 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1145 
1146 free_skb_on_err:
1147 	if (ret)
1148 		dev_kfree_skb_any(fp_skb(fp));
1149 
1150 	return ret;
1151 }
1152 
1153 /*
1154  * fnic_send
1155  * Routine to send a raw frame
1156  */
1157 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1158 {
1159 	struct fnic *fnic = lport_priv(lp);
1160 	unsigned long flags;
1161 
1162 	if (fnic->in_remove) {
1163 		dev_kfree_skb(fp_skb(fp));
1164 		return -1;
1165 	}
1166 
1167 	/*
1168 	 * Queue frame if in a transitional state.
1169 	 * This occurs while registering the Port_ID / MAC address after FLOGI.
1170 	 */
1171 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1172 	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1173 		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1174 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1175 		return 0;
1176 	}
1177 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1178 
1179 	return fnic_send_frame(fnic, fp);
1180 }
1181 
1182 /**
1183  * fnic_flush_tx() - send queued frames.
1184  * @fnic: fnic device
1185  *
1186  * Send frames that were waiting to go out in FC or Ethernet mode.
1187  * Whenever changing modes we purge queued frames, so these frames should
1188  * be queued for the stable mode that we're in, either FC or Ethernet.
1189  *
1190  * Called without fnic_lock held.
1191  */
1192 void fnic_flush_tx(struct fnic *fnic)
1193 {
1194 	struct sk_buff *skb;
1195 	struct fc_frame *fp;
1196 
1197 	while ((skb = skb_dequeue(&fnic->tx_queue))) {
1198 		fp = (struct fc_frame *)skb;
1199 		fnic_send_frame(fnic, fp);
1200 	}
1201 }
1202 
1203 /**
1204  * fnic_set_eth_mode() - put fnic into ethernet mode.
1205  * @fnic: fnic device
1206  *
1207  * Called without fnic lock held.
1208  */
1209 static void fnic_set_eth_mode(struct fnic *fnic)
1210 {
1211 	unsigned long flags;
1212 	enum fnic_state old_state;
1213 	int ret;
1214 
1215 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1216 again:
1217 	old_state = fnic->state;
1218 	switch (old_state) {
1219 	case FNIC_IN_FC_MODE:
1220 	case FNIC_IN_ETH_TRANS_FC_MODE:
1221 	default:
1222 		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1223 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1224 
1225 		ret = fnic_fw_reset_handler(fnic);
1226 
1227 		spin_lock_irqsave(&fnic->fnic_lock, flags);
1228 		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1229 			goto again;
1230 		if (ret)
1231 			fnic->state = old_state;
1232 		break;
1233 
1234 	case FNIC_IN_FC_TRANS_ETH_MODE:
1235 	case FNIC_IN_ETH_MODE:
1236 		break;
1237 	}
1238 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1239 }
1240 
1241 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1242 					struct cq_desc *cq_desc,
1243 					struct vnic_wq_buf *buf, void *opaque)
1244 {
1245 	struct sk_buff *skb = buf->os_buf;
1246 	struct fc_frame *fp = (struct fc_frame *)skb;
1247 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1248 
1249 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1250 			 DMA_TO_DEVICE);
1251 	dev_kfree_skb_irq(fp_skb(fp));
1252 	buf->os_buf = NULL;
1253 }
1254 
1255 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1256 				     struct cq_desc *cq_desc, u8 type,
1257 				     u16 q_number, u16 completed_index,
1258 				     void *opaque)
1259 {
1260 	struct fnic *fnic = vnic_dev_priv(vdev);
1261 	unsigned long flags;
1262 
1263 	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1264 	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1265 			fnic_wq_complete_frame_send, NULL);
1266 	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1267 
1268 	return 0;
1269 }
1270 
1271 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1272 {
1273 	unsigned int wq_work_done = 0;
1274 	unsigned int i;
1275 
1276 	for (i = 0; i < fnic->raw_wq_count; i++) {
1277 		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1278 						 work_to_do,
1279 						 fnic_wq_cmpl_handler_cont,
1280 						 NULL);
1281 	}
1282 
1283 	return wq_work_done;
1284 }
1285 
1286 
1287 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1288 {
1289 	struct fc_frame *fp = buf->os_buf;
1290 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1291 
1292 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1293 			 DMA_TO_DEVICE);
1294 
1295 	dev_kfree_skb(fp_skb(fp));
1296 	buf->os_buf = NULL;
1297 }
1298 
1299 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1300 {
1301 	unsigned long flags;
1302 	struct fcoe_vlan *vlan;
1303 	struct fcoe_vlan *next;
1304 
1305 	/*
1306 	 * indicate a link down to fcoe so that all fcf's are free'd
1307 	 * might not be required since we did this before sending vlan
1308 	 * discovery request
1309 	 */
1310 	spin_lock_irqsave(&fnic->vlans_lock, flags);
1311 	if (!list_empty(&fnic->vlans)) {
1312 		list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1313 			list_del(&vlan->list);
1314 			kfree(vlan);
1315 		}
1316 	}
1317 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1318 }
1319 
1320 void fnic_handle_fip_timer(struct fnic *fnic)
1321 {
1322 	unsigned long flags;
1323 	struct fcoe_vlan *vlan;
1324 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1325 	u64 sol_time;
1326 
1327 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1328 	if (fnic->stop_rx_link_events) {
1329 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1330 		return;
1331 	}
1332 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1333 
1334 	if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1335 		return;
1336 
1337 	spin_lock_irqsave(&fnic->vlans_lock, flags);
1338 	if (list_empty(&fnic->vlans)) {
1339 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1340 		/* no vlans available, try again */
1341 		if (printk_ratelimit())
1342 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1343 				  "Start VLAN Discovery\n");
1344 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1345 		return;
1346 	}
1347 
1348 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1349 	shost_printk(KERN_DEBUG, fnic->lport->host,
1350 		  "fip_timer: vlan %d state %d sol_count %d\n",
1351 		  vlan->vid, vlan->state, vlan->sol_count);
1352 	switch (vlan->state) {
1353 	case FIP_VLAN_USED:
1354 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1355 			  "FIP VLAN is selected for FC transaction\n");
1356 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1357 		break;
1358 	case FIP_VLAN_FAILED:
1359 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1360 		/* if all vlans are in failed state, restart vlan disc */
1361 		if (printk_ratelimit())
1362 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1363 				  "Start VLAN Discovery\n");
1364 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1365 		break;
1366 	case FIP_VLAN_SENT:
1367 		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1368 			/*
1369 			 * no response on this vlan, remove  from the list.
1370 			 * Try the next vlan
1371 			 */
1372 			shost_printk(KERN_INFO, fnic->lport->host,
1373 				  "Dequeue this VLAN ID %d from list\n",
1374 				  vlan->vid);
1375 			list_del(&vlan->list);
1376 			kfree(vlan);
1377 			vlan = NULL;
1378 			if (list_empty(&fnic->vlans)) {
1379 				/* we exhausted all vlans, restart vlan disc */
1380 				spin_unlock_irqrestore(&fnic->vlans_lock,
1381 							flags);
1382 				shost_printk(KERN_INFO, fnic->lport->host,
1383 					  "fip_timer: vlan list empty, "
1384 					  "trigger vlan disc\n");
1385 				fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1386 				return;
1387 			}
1388 			/* check the next vlan */
1389 			vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1390 							list);
1391 			fnic->set_vlan(fnic, vlan->vid);
1392 			vlan->state = FIP_VLAN_SENT; /* sent now */
1393 		}
1394 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1395 		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1396 		vlan->sol_count++;
1397 		sol_time = jiffies + msecs_to_jiffies
1398 					(FCOE_CTLR_START_DELAY);
1399 		mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1400 		break;
1401 	}
1402 }
1403