xref: /openbmc/linux/drivers/scsi/fnic/fnic_fcs.c (revision a09d2831)
1 /*
2  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/skbuff.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/workqueue.h>
26 #include <scsi/fc/fc_fip.h>
27 #include <scsi/fc/fc_els.h>
28 #include <scsi/fc/fc_fcoe.h>
29 #include <scsi/fc_frame.h>
30 #include <scsi/libfc.h>
31 #include "fnic_io.h"
32 #include "fnic.h"
33 #include "cq_enet_desc.h"
34 #include "cq_exch_desc.h"
35 
36 struct workqueue_struct *fnic_event_queue;
37 
38 static void fnic_set_eth_mode(struct fnic *);
39 
40 void fnic_handle_link(struct work_struct *work)
41 {
42 	struct fnic *fnic = container_of(work, struct fnic, link_work);
43 	unsigned long flags;
44 	int old_link_status;
45 	u32 old_link_down_cnt;
46 
47 	spin_lock_irqsave(&fnic->fnic_lock, flags);
48 
49 	if (fnic->stop_rx_link_events) {
50 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
51 		return;
52 	}
53 
54 	old_link_down_cnt = fnic->link_down_cnt;
55 	old_link_status = fnic->link_status;
56 	fnic->link_status = vnic_dev_link_status(fnic->vdev);
57 	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
58 
59 	if (old_link_status == fnic->link_status) {
60 		if (!fnic->link_status)
61 			/* DOWN -> DOWN */
62 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
63 		else {
64 			if (old_link_down_cnt != fnic->link_down_cnt) {
65 				/* UP -> DOWN -> UP */
66 				fnic->lport->host_stats.link_failure_count++;
67 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
68 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 					     "link down\n");
70 				fcoe_ctlr_link_down(&fnic->ctlr);
71 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
72 					     "link up\n");
73 				fcoe_ctlr_link_up(&fnic->ctlr);
74 			} else
75 				/* UP -> UP */
76 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
77 		}
78 	} else if (fnic->link_status) {
79 		/* DOWN -> UP */
80 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
81 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
82 		fcoe_ctlr_link_up(&fnic->ctlr);
83 	} else {
84 		/* UP -> DOWN */
85 		fnic->lport->host_stats.link_failure_count++;
86 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
87 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
88 		fcoe_ctlr_link_down(&fnic->ctlr);
89 	}
90 
91 }
92 
93 /*
94  * This function passes incoming fabric frames to libFC
95  */
96 void fnic_handle_frame(struct work_struct *work)
97 {
98 	struct fnic *fnic = container_of(work, struct fnic, frame_work);
99 	struct fc_lport *lp = fnic->lport;
100 	unsigned long flags;
101 	struct sk_buff *skb;
102 	struct fc_frame *fp;
103 
104 	while ((skb = skb_dequeue(&fnic->frame_queue))) {
105 
106 		spin_lock_irqsave(&fnic->fnic_lock, flags);
107 		if (fnic->stop_rx_link_events) {
108 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
109 			dev_kfree_skb(skb);
110 			return;
111 		}
112 		fp = (struct fc_frame *)skb;
113 
114 		/*
115 		 * If we're in a transitional state, just re-queue and return.
116 		 * The queue will be serviced when we get to a stable state.
117 		 */
118 		if (fnic->state != FNIC_IN_FC_MODE &&
119 		    fnic->state != FNIC_IN_ETH_MODE) {
120 			skb_queue_head(&fnic->frame_queue, skb);
121 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
122 			return;
123 		}
124 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
125 
126 		fc_exch_recv(lp, fp);
127 	}
128 }
129 
130 /**
131  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
132  * @fnic:	fnic instance.
133  * @skb:	Ethernet Frame.
134  */
135 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
136 {
137 	struct fc_frame *fp;
138 	struct ethhdr *eh;
139 	struct fcoe_hdr *fcoe_hdr;
140 	struct fcoe_crc_eof *ft;
141 
142 	/*
143 	 * Undo VLAN encapsulation if present.
144 	 */
145 	eh = (struct ethhdr *)skb->data;
146 	if (eh->h_proto == htons(ETH_P_8021Q)) {
147 		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
148 		eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
149 		skb_reset_mac_header(skb);
150 	}
151 	if (eh->h_proto == htons(ETH_P_FIP)) {
152 		skb_pull(skb, sizeof(*eh));
153 		fcoe_ctlr_recv(&fnic->ctlr, skb);
154 		return 1;		/* let caller know packet was used */
155 	}
156 	if (eh->h_proto != htons(ETH_P_FCOE))
157 		goto drop;
158 	skb_set_network_header(skb, sizeof(*eh));
159 	skb_pull(skb, sizeof(*eh));
160 
161 	fcoe_hdr = (struct fcoe_hdr *)skb->data;
162 	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
163 		goto drop;
164 
165 	fp = (struct fc_frame *)skb;
166 	fc_frame_init(fp);
167 	fr_sof(fp) = fcoe_hdr->fcoe_sof;
168 	skb_pull(skb, sizeof(struct fcoe_hdr));
169 	skb_reset_transport_header(skb);
170 
171 	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
172 	fr_eof(fp) = ft->fcoe_eof;
173 	skb_trim(skb, skb->len - sizeof(*ft));
174 	return 0;
175 drop:
176 	dev_kfree_skb_irq(skb);
177 	return -1;
178 }
179 
180 /**
181  * fnic_update_mac_locked() - set data MAC address and filters.
182  * @fnic:	fnic instance.
183  * @new:	newly-assigned FCoE MAC address.
184  *
185  * Called with the fnic lock held.
186  */
187 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
188 {
189 	u8 *ctl = fnic->ctlr.ctl_src_addr;
190 	u8 *data = fnic->data_src_addr;
191 
192 	if (is_zero_ether_addr(new))
193 		new = ctl;
194 	if (!compare_ether_addr(data, new))
195 		return;
196 	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
197 	if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
198 		vnic_dev_del_addr(fnic->vdev, data);
199 	memcpy(data, new, ETH_ALEN);
200 	if (compare_ether_addr(new, ctl))
201 		vnic_dev_add_addr(fnic->vdev, new);
202 }
203 
204 /**
205  * fnic_update_mac() - set data MAC address and filters.
206  * @lport:	local port.
207  * @new:	newly-assigned FCoE MAC address.
208  */
209 void fnic_update_mac(struct fc_lport *lport, u8 *new)
210 {
211 	struct fnic *fnic = lport_priv(lport);
212 
213 	spin_lock_irq(&fnic->fnic_lock);
214 	fnic_update_mac_locked(fnic, new);
215 	spin_unlock_irq(&fnic->fnic_lock);
216 }
217 
218 /**
219  * fnic_set_port_id() - set the port_ID after successful FLOGI.
220  * @lport:	local port.
221  * @port_id:	assigned FC_ID.
222  * @fp:		received frame containing the FLOGI accept or NULL.
223  *
224  * This is called from libfc when a new FC_ID has been assigned.
225  * This causes us to reset the firmware to FC_MODE and setup the new MAC
226  * address and FC_ID.
227  *
228  * It is also called with FC_ID 0 when we're logged off.
229  *
230  * If the FC_ID is due to point-to-point, fp may be NULL.
231  */
232 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
233 {
234 	struct fnic *fnic = lport_priv(lport);
235 	u8 *mac;
236 	int ret;
237 
238 	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
239 		     port_id, fp);
240 
241 	/*
242 	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
243 	 * Set ethernet mode to send FLOGI.
244 	 */
245 	if (!port_id) {
246 		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
247 		fnic_set_eth_mode(fnic);
248 		return;
249 	}
250 
251 	if (fp) {
252 		mac = fr_cb(fp)->granted_mac;
253 		if (is_zero_ether_addr(mac)) {
254 			/* non-FIP - FLOGI already accepted - ignore return */
255 			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
256 		}
257 		fnic_update_mac(lport, mac);
258 	}
259 
260 	/* Change state to reflect transition to FC mode */
261 	spin_lock_irq(&fnic->fnic_lock);
262 	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
263 		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
264 	else {
265 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
266 			     "Unexpected fnic state %s while"
267 			     " processing flogi resp\n",
268 			     fnic_state_to_str(fnic->state));
269 		spin_unlock_irq(&fnic->fnic_lock);
270 		return;
271 	}
272 	spin_unlock_irq(&fnic->fnic_lock);
273 
274 	/*
275 	 * Send FLOGI registration to firmware to set up FC mode.
276 	 * The new address will be set up when registration completes.
277 	 */
278 	ret = fnic_flogi_reg_handler(fnic, port_id);
279 
280 	if (ret < 0) {
281 		spin_lock_irq(&fnic->fnic_lock);
282 		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
283 			fnic->state = FNIC_IN_ETH_MODE;
284 		spin_unlock_irq(&fnic->fnic_lock);
285 	}
286 }
287 
288 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
289 				    *cq_desc, struct vnic_rq_buf *buf,
290 				    int skipped __attribute__((unused)),
291 				    void *opaque)
292 {
293 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
294 	struct sk_buff *skb;
295 	struct fc_frame *fp;
296 	unsigned int eth_hdrs_stripped;
297 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
298 	u8 fcoe = 0, fcoe_sof, fcoe_eof;
299 	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
300 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
301 	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
302 	u8 fcs_ok = 1, packet_error = 0;
303 	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
304 	u32 rss_hash;
305 	u16 exchange_id, tmpl;
306 	u8 sof = 0;
307 	u8 eof = 0;
308 	u32 fcp_bytes_written = 0;
309 	unsigned long flags;
310 
311 	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
312 			 PCI_DMA_FROMDEVICE);
313 	skb = buf->os_buf;
314 	fp = (struct fc_frame *)skb;
315 	buf->os_buf = NULL;
316 
317 	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
318 	if (type == CQ_DESC_TYPE_RQ_FCP) {
319 		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
320 				   &type, &color, &q_number, &completed_index,
321 				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
322 				   &tmpl, &fcp_bytes_written, &sof, &eof,
323 				   &ingress_port, &packet_error,
324 				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
325 				   &vlan);
326 		eth_hdrs_stripped = 1;
327 		skb_trim(skb, fcp_bytes_written);
328 		fr_sof(fp) = sof;
329 		fr_eof(fp) = eof;
330 
331 	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
332 		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
333 				    &type, &color, &q_number, &completed_index,
334 				    &ingress_port, &fcoe, &eop, &sop,
335 				    &rss_type, &csum_not_calc, &rss_hash,
336 				    &bytes_written, &packet_error,
337 				    &vlan_stripped, &vlan, &checksum,
338 				    &fcoe_sof, &fcoe_fc_crc_ok,
339 				    &fcoe_enc_error, &fcoe_eof,
340 				    &tcp_udp_csum_ok, &udp, &tcp,
341 				    &ipv4_csum_ok, &ipv6, &ipv4,
342 				    &ipv4_fragment, &fcs_ok);
343 		eth_hdrs_stripped = 0;
344 		skb_trim(skb, bytes_written);
345 		if (!fcs_ok) {
346 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
347 				     "fcs error.  dropping packet.\n");
348 			goto drop;
349 		}
350 		if (fnic_import_rq_eth_pkt(fnic, skb))
351 			return;
352 
353 	} else {
354 		/* wrong CQ type*/
355 		shost_printk(KERN_ERR, fnic->lport->host,
356 			     "fnic rq_cmpl wrong cq type x%x\n", type);
357 		goto drop;
358 	}
359 
360 	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
361 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
362 			     "fnic rq_cmpl fcoe x%x fcsok x%x"
363 			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
364 			     " x%x\n",
365 			     fcoe, fcs_ok, packet_error,
366 			     fcoe_fc_crc_ok, fcoe_enc_error);
367 		goto drop;
368 	}
369 
370 	spin_lock_irqsave(&fnic->fnic_lock, flags);
371 	if (fnic->stop_rx_link_events) {
372 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
373 		goto drop;
374 	}
375 	fr_dev(fp) = fnic->lport;
376 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
377 
378 	skb_queue_tail(&fnic->frame_queue, skb);
379 	queue_work(fnic_event_queue, &fnic->frame_work);
380 
381 	return;
382 drop:
383 	dev_kfree_skb_irq(skb);
384 }
385 
386 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
387 				     struct cq_desc *cq_desc, u8 type,
388 				     u16 q_number, u16 completed_index,
389 				     void *opaque)
390 {
391 	struct fnic *fnic = vnic_dev_priv(vdev);
392 
393 	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
394 			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
395 			NULL);
396 	return 0;
397 }
398 
399 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
400 {
401 	unsigned int tot_rq_work_done = 0, cur_work_done;
402 	unsigned int i;
403 	int err;
404 
405 	for (i = 0; i < fnic->rq_count; i++) {
406 		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
407 						fnic_rq_cmpl_handler_cont,
408 						NULL);
409 		if (cur_work_done) {
410 			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
411 			if (err)
412 				shost_printk(KERN_ERR, fnic->lport->host,
413 					     "fnic_alloc_rq_frame cant alloc"
414 					     " frame\n");
415 		}
416 		tot_rq_work_done += cur_work_done;
417 	}
418 
419 	return tot_rq_work_done;
420 }
421 
422 /*
423  * This function is called once at init time to allocate and fill RQ
424  * buffers. Subsequently, it is called in the interrupt context after RQ
425  * buffer processing to replenish the buffers in the RQ
426  */
427 int fnic_alloc_rq_frame(struct vnic_rq *rq)
428 {
429 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
430 	struct sk_buff *skb;
431 	u16 len;
432 	dma_addr_t pa;
433 
434 	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
435 	skb = dev_alloc_skb(len);
436 	if (!skb) {
437 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
438 			     "Unable to allocate RQ sk_buff\n");
439 		return -ENOMEM;
440 	}
441 	skb_reset_mac_header(skb);
442 	skb_reset_transport_header(skb);
443 	skb_reset_network_header(skb);
444 	skb_put(skb, len);
445 	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
446 	fnic_queue_rq_desc(rq, skb, pa, len);
447 	return 0;
448 }
449 
450 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
451 {
452 	struct fc_frame *fp = buf->os_buf;
453 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
454 
455 	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
456 			 PCI_DMA_FROMDEVICE);
457 
458 	dev_kfree_skb(fp_skb(fp));
459 	buf->os_buf = NULL;
460 }
461 
462 /**
463  * fnic_eth_send() - Send Ethernet frame.
464  * @fip:	fcoe_ctlr instance.
465  * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
466  */
467 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
468 {
469 	struct fnic *fnic = fnic_from_ctlr(fip);
470 	struct vnic_wq *wq = &fnic->wq[0];
471 	dma_addr_t pa;
472 	struct ethhdr *eth_hdr;
473 	struct vlan_ethhdr *vlan_hdr;
474 	unsigned long flags;
475 
476 	if (!fnic->vlan_hw_insert) {
477 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
478 		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
479 				sizeof(*vlan_hdr) - sizeof(*eth_hdr));
480 		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
481 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
482 		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
483 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
484 	}
485 
486 	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
487 
488 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
489 	if (!vnic_wq_desc_avail(wq)) {
490 		pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
491 		spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
492 		kfree_skb(skb);
493 		return;
494 	}
495 
496 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
497 			       fnic->vlan_hw_insert, fnic->vlan_id, 1);
498 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
499 }
500 
501 /*
502  * Send FC frame.
503  */
504 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
505 {
506 	struct vnic_wq *wq = &fnic->wq[0];
507 	struct sk_buff *skb;
508 	dma_addr_t pa;
509 	struct ethhdr *eth_hdr;
510 	struct vlan_ethhdr *vlan_hdr;
511 	struct fcoe_hdr *fcoe_hdr;
512 	struct fc_frame_header *fh;
513 	u32 tot_len, eth_hdr_len;
514 	int ret = 0;
515 	unsigned long flags;
516 
517 	fh = fc_frame_header_get(fp);
518 	skb = fp_skb(fp);
519 
520 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
521 	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
522 		return 0;
523 
524 	if (!fnic->vlan_hw_insert) {
525 		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
526 		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
527 		eth_hdr = (struct ethhdr *)vlan_hdr;
528 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
529 		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
530 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
531 		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
532 	} else {
533 		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
534 		eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
535 		eth_hdr->h_proto = htons(ETH_P_FCOE);
536 		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
537 	}
538 
539 	if (fnic->ctlr.map_dest)
540 		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
541 	else
542 		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
543 	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
544 
545 	tot_len = skb->len;
546 	BUG_ON(tot_len % 4);
547 
548 	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
549 	fcoe_hdr->fcoe_sof = fr_sof(fp);
550 	if (FC_FCOE_VER)
551 		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
552 
553 	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
554 
555 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
556 
557 	if (!vnic_wq_desc_avail(wq)) {
558 		pci_unmap_single(fnic->pdev, pa,
559 				 tot_len, PCI_DMA_TODEVICE);
560 		ret = -1;
561 		goto fnic_send_frame_end;
562 	}
563 
564 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
565 			   fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
566 fnic_send_frame_end:
567 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
568 
569 	if (ret)
570 		dev_kfree_skb_any(fp_skb(fp));
571 
572 	return ret;
573 }
574 
575 /*
576  * fnic_send
577  * Routine to send a raw frame
578  */
579 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
580 {
581 	struct fnic *fnic = lport_priv(lp);
582 	unsigned long flags;
583 
584 	if (fnic->in_remove) {
585 		dev_kfree_skb(fp_skb(fp));
586 		return -1;
587 	}
588 
589 	/*
590 	 * Queue frame if in a transitional state.
591 	 * This occurs while registering the Port_ID / MAC address after FLOGI.
592 	 */
593 	spin_lock_irqsave(&fnic->fnic_lock, flags);
594 	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
595 		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
596 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
597 		return 0;
598 	}
599 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
600 
601 	return fnic_send_frame(fnic, fp);
602 }
603 
604 /**
605  * fnic_flush_tx() - send queued frames.
606  * @fnic: fnic device
607  *
608  * Send frames that were waiting to go out in FC or Ethernet mode.
609  * Whenever changing modes we purge queued frames, so these frames should
610  * be queued for the stable mode that we're in, either FC or Ethernet.
611  *
612  * Called without fnic_lock held.
613  */
614 void fnic_flush_tx(struct fnic *fnic)
615 {
616 	struct sk_buff *skb;
617 	struct fc_frame *fp;
618 
619 	while ((skb = skb_dequeue(&fnic->frame_queue))) {
620 		fp = (struct fc_frame *)skb;
621 		fnic_send_frame(fnic, fp);
622 	}
623 }
624 
625 /**
626  * fnic_set_eth_mode() - put fnic into ethernet mode.
627  * @fnic: fnic device
628  *
629  * Called without fnic lock held.
630  */
631 static void fnic_set_eth_mode(struct fnic *fnic)
632 {
633 	unsigned long flags;
634 	enum fnic_state old_state;
635 	int ret;
636 
637 	spin_lock_irqsave(&fnic->fnic_lock, flags);
638 again:
639 	old_state = fnic->state;
640 	switch (old_state) {
641 	case FNIC_IN_FC_MODE:
642 	case FNIC_IN_ETH_TRANS_FC_MODE:
643 	default:
644 		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
645 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 
647 		ret = fnic_fw_reset_handler(fnic);
648 
649 		spin_lock_irqsave(&fnic->fnic_lock, flags);
650 		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
651 			goto again;
652 		if (ret)
653 			fnic->state = old_state;
654 		break;
655 
656 	case FNIC_IN_FC_TRANS_ETH_MODE:
657 	case FNIC_IN_ETH_MODE:
658 		break;
659 	}
660 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
661 }
662 
663 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
664 					struct cq_desc *cq_desc,
665 					struct vnic_wq_buf *buf, void *opaque)
666 {
667 	struct sk_buff *skb = buf->os_buf;
668 	struct fc_frame *fp = (struct fc_frame *)skb;
669 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
670 
671 	pci_unmap_single(fnic->pdev, buf->dma_addr,
672 			 buf->len, PCI_DMA_TODEVICE);
673 	dev_kfree_skb_irq(fp_skb(fp));
674 	buf->os_buf = NULL;
675 }
676 
677 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
678 				     struct cq_desc *cq_desc, u8 type,
679 				     u16 q_number, u16 completed_index,
680 				     void *opaque)
681 {
682 	struct fnic *fnic = vnic_dev_priv(vdev);
683 	unsigned long flags;
684 
685 	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
686 	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
687 			fnic_wq_complete_frame_send, NULL);
688 	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
689 
690 	return 0;
691 }
692 
693 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
694 {
695 	unsigned int wq_work_done = 0;
696 	unsigned int i;
697 
698 	for (i = 0; i < fnic->raw_wq_count; i++) {
699 		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
700 						 work_to_do,
701 						 fnic_wq_cmpl_handler_cont,
702 						 NULL);
703 	}
704 
705 	return wq_work_done;
706 }
707 
708 
709 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
710 {
711 	struct fc_frame *fp = buf->os_buf;
712 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
713 
714 	pci_unmap_single(fnic->pdev, buf->dma_addr,
715 			 buf->len, PCI_DMA_TODEVICE);
716 
717 	dev_kfree_skb(fp_skb(fp));
718 	buf->os_buf = NULL;
719 }
720