1 /*******************************************************************************
2 
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include "ixgbe.h"
29 #include <linux/if_ether.h>
30 #include <linux/gfp.h>
31 #include <linux/if_vlan.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <scsi/fc/fc_fcoe.h>
36 #include <scsi/libfc.h>
37 #include <scsi/libfcoe.h>
38 
39 /**
40  * ixgbe_fcoe_clear_ddp - clear the given ddp context
41  * @ddp - ptr to the ixgbe_fcoe_ddp
42  *
43  * Returns : none
44  *
45  */
46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
47 {
48 	ddp->len = 0;
49 	ddp->err = 1;
50 	ddp->udl = NULL;
51 	ddp->udp = 0UL;
52 	ddp->sgl = NULL;
53 	ddp->sgc = 0;
54 }
55 
56 /**
57  * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
58  * @netdev: the corresponding net_device
59  * @xid: the xid that corresponding ddp will be freed
60  *
61  * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
62  * and it is expected to be called by ULD, i.e., FCP layer of libfc
63  * to release the corresponding ddp context when the I/O is done.
64  *
65  * Returns : data length already ddp-ed in bytes
66  */
67 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
68 {
69 	int len = 0;
70 	struct ixgbe_fcoe *fcoe;
71 	struct ixgbe_adapter *adapter;
72 	struct ixgbe_fcoe_ddp *ddp;
73 	u32 fcbuff;
74 
75 	if (!netdev)
76 		goto out_ddp_put;
77 
78 	if (xid >= IXGBE_FCOE_DDP_MAX)
79 		goto out_ddp_put;
80 
81 	adapter = netdev_priv(netdev);
82 	fcoe = &adapter->fcoe;
83 	ddp = &fcoe->ddp[xid];
84 	if (!ddp->udl)
85 		goto out_ddp_put;
86 
87 	len = ddp->len;
88 	/* if there an error, force to invalidate ddp context */
89 	if (ddp->err) {
90 		spin_lock_bh(&fcoe->lock);
91 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
92 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
93 				(xid | IXGBE_FCFLTRW_WE));
94 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
95 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
96 				(xid | IXGBE_FCDMARW_WE));
97 
98 		/* guaranteed to be invalidated after 100us */
99 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
100 				(xid | IXGBE_FCDMARW_RE));
101 		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
102 		spin_unlock_bh(&fcoe->lock);
103 		if (fcbuff & IXGBE_FCBUFF_VALID)
104 			udelay(100);
105 	}
106 	if (ddp->sgl)
107 		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
108 			     DMA_FROM_DEVICE);
109 	if (ddp->pool) {
110 		pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
111 		ddp->pool = NULL;
112 	}
113 
114 	ixgbe_fcoe_clear_ddp(ddp);
115 
116 out_ddp_put:
117 	return len;
118 }
119 
120 /**
121  * ixgbe_fcoe_ddp_setup - called to set up ddp context
122  * @netdev: the corresponding net_device
123  * @xid: the exchange id requesting ddp
124  * @sgl: the scatter-gather list for this request
125  * @sgc: the number of scatter-gather items
126  *
127  * Returns : 1 for success and 0 for no ddp
128  */
129 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
130 				struct scatterlist *sgl, unsigned int sgc,
131 				int target_mode)
132 {
133 	struct ixgbe_adapter *adapter;
134 	struct ixgbe_hw *hw;
135 	struct ixgbe_fcoe *fcoe;
136 	struct ixgbe_fcoe_ddp *ddp;
137 	struct scatterlist *sg;
138 	unsigned int i, j, dmacount;
139 	unsigned int len;
140 	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
141 	unsigned int firstoff = 0;
142 	unsigned int lastsize;
143 	unsigned int thisoff = 0;
144 	unsigned int thislen = 0;
145 	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
146 	dma_addr_t addr = 0;
147 	struct pci_pool *pool;
148 	unsigned int cpu;
149 
150 	if (!netdev || !sgl)
151 		return 0;
152 
153 	adapter = netdev_priv(netdev);
154 	if (xid >= IXGBE_FCOE_DDP_MAX) {
155 		e_warn(drv, "xid=0x%x out-of-range\n", xid);
156 		return 0;
157 	}
158 
159 	/* no DDP if we are already down or resetting */
160 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
161 	    test_bit(__IXGBE_RESETTING, &adapter->state))
162 		return 0;
163 
164 	fcoe = &adapter->fcoe;
165 	if (!fcoe->pool) {
166 		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
167 		return 0;
168 	}
169 
170 	ddp = &fcoe->ddp[xid];
171 	if (ddp->sgl) {
172 		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
173 		      xid, ddp->sgl, ddp->sgc);
174 		return 0;
175 	}
176 	ixgbe_fcoe_clear_ddp(ddp);
177 
178 	/* setup dma from scsi command sgl */
179 	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
180 	if (dmacount == 0) {
181 		e_err(drv, "xid 0x%x DMA map error\n", xid);
182 		return 0;
183 	}
184 
185 	/* alloc the udl from per cpu ddp pool */
186 	cpu = get_cpu();
187 	pool = *per_cpu_ptr(fcoe->pool, cpu);
188 	ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
189 	if (!ddp->udl) {
190 		e_err(drv, "failed allocated ddp context\n");
191 		goto out_noddp_unmap;
192 	}
193 	ddp->pool = pool;
194 	ddp->sgl = sgl;
195 	ddp->sgc = sgc;
196 
197 	j = 0;
198 	for_each_sg(sgl, sg, dmacount, i) {
199 		addr = sg_dma_address(sg);
200 		len = sg_dma_len(sg);
201 		while (len) {
202 			/* max number of buffers allowed in one DDP context */
203 			if (j >= IXGBE_BUFFCNT_MAX) {
204 				*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
205 				goto out_noddp_free;
206 			}
207 
208 			/* get the offset of length of current buffer */
209 			thisoff = addr & ((dma_addr_t)bufflen - 1);
210 			thislen = min((bufflen - thisoff), len);
211 			/*
212 			 * all but the 1st buffer (j == 0)
213 			 * must be aligned on bufflen
214 			 */
215 			if ((j != 0) && (thisoff))
216 				goto out_noddp_free;
217 			/*
218 			 * all but the last buffer
219 			 * ((i == (dmacount - 1)) && (thislen == len))
220 			 * must end at bufflen
221 			 */
222 			if (((i != (dmacount - 1)) || (thislen != len))
223 			    && ((thislen + thisoff) != bufflen))
224 				goto out_noddp_free;
225 
226 			ddp->udl[j] = (u64)(addr - thisoff);
227 			/* only the first buffer may have none-zero offset */
228 			if (j == 0)
229 				firstoff = thisoff;
230 			len -= thislen;
231 			addr += thislen;
232 			j++;
233 		}
234 	}
235 	/* only the last buffer may have non-full bufflen */
236 	lastsize = thisoff + thislen;
237 
238 	/*
239 	 * lastsize can not be buffer len.
240 	 * If it is then adding another buffer with lastsize = 1.
241 	 */
242 	if (lastsize == bufflen) {
243 		if (j >= IXGBE_BUFFCNT_MAX) {
244 			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
245 			goto out_noddp_free;
246 		}
247 
248 		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
249 		j++;
250 		lastsize = 1;
251 	}
252 	put_cpu();
253 
254 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
255 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
256 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
257 	/* Set WRCONTX bit to allow DDP for target */
258 	if (target_mode)
259 		fcbuff |= (IXGBE_FCBUFF_WRCONTX);
260 	fcbuff |= (IXGBE_FCBUFF_VALID);
261 
262 	fcdmarw = xid;
263 	fcdmarw |= IXGBE_FCDMARW_WE;
264 	fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
265 
266 	fcfltrw = xid;
267 	fcfltrw |= IXGBE_FCFLTRW_WE;
268 
269 	/* program DMA context */
270 	hw = &adapter->hw;
271 	spin_lock_bh(&fcoe->lock);
272 
273 	/* turn on last frame indication for target mode as FCP_RSPtarget is
274 	 * supposed to send FCP_RSP when it is done. */
275 	if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
276 		set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
277 		fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
278 		fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
279 		IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
280 	}
281 
282 	IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
283 	IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
284 	IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
285 	IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
286 	/* program filter context */
287 	IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
288 	IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
289 	IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
290 
291 	spin_unlock_bh(&fcoe->lock);
292 
293 	return 1;
294 
295 out_noddp_free:
296 	pci_pool_free(pool, ddp->udl, ddp->udp);
297 	ixgbe_fcoe_clear_ddp(ddp);
298 
299 out_noddp_unmap:
300 	pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
301 	put_cpu();
302 	return 0;
303 }
304 
305 /**
306  * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
307  * @netdev: the corresponding net_device
308  * @xid: the exchange id requesting ddp
309  * @sgl: the scatter-gather list for this request
310  * @sgc: the number of scatter-gather items
311  *
312  * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
313  * and is expected to be called from ULD, e.g., FCP layer of libfc
314  * to set up ddp for the corresponding xid of the given sglist for
315  * the corresponding I/O.
316  *
317  * Returns : 1 for success and 0 for no ddp
318  */
319 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
320 		       struct scatterlist *sgl, unsigned int sgc)
321 {
322 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
323 }
324 
325 /**
326  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
327  * @netdev: the corresponding net_device
328  * @xid: the exchange id requesting ddp
329  * @sgl: the scatter-gather list for this request
330  * @sgc: the number of scatter-gather items
331  *
332  * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
333  * and is expected to be called from ULD, e.g., FCP layer of libfc
334  * to set up ddp for the corresponding xid of the given sglist for
335  * the corresponding I/O. The DDP in target mode is a write I/O request
336  * from the initiator.
337  *
338  * Returns : 1 for success and 0 for no ddp
339  */
340 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
341 			    struct scatterlist *sgl, unsigned int sgc)
342 {
343 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
344 }
345 
346 /**
347  * ixgbe_fcoe_ddp - check ddp status and mark it done
348  * @adapter: ixgbe adapter
349  * @rx_desc: advanced rx descriptor
350  * @skb: the skb holding the received data
351  *
352  * This checks ddp status.
353  *
354  * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
355  * not passing the skb to ULD, > 0 indicates is the length of data
356  * being ddped.
357  */
358 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
359 		   union ixgbe_adv_rx_desc *rx_desc,
360 		   struct sk_buff *skb)
361 {
362 	int rc = -EINVAL;
363 	struct ixgbe_fcoe *fcoe;
364 	struct ixgbe_fcoe_ddp *ddp;
365 	struct fc_frame_header *fh;
366 	struct fcoe_crc_eof *crc;
367 	__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
368 	__le32 ddp_err;
369 	u32 fctl;
370 	u16 xid;
371 
372 	if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
373 		skb->ip_summed = CHECKSUM_NONE;
374 	else
375 		skb->ip_summed = CHECKSUM_UNNECESSARY;
376 
377 	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
378 		fh = (struct fc_frame_header *)(skb->data +
379 			sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
380 	else
381 		fh = (struct fc_frame_header *)(skb->data +
382 			sizeof(struct fcoe_hdr));
383 
384 	fctl = ntoh24(fh->fh_f_ctl);
385 	if (fctl & FC_FC_EX_CTX)
386 		xid =  be16_to_cpu(fh->fh_ox_id);
387 	else
388 		xid =  be16_to_cpu(fh->fh_rx_id);
389 
390 	if (xid >= IXGBE_FCOE_DDP_MAX)
391 		goto ddp_out;
392 
393 	fcoe = &adapter->fcoe;
394 	ddp = &fcoe->ddp[xid];
395 	if (!ddp->udl)
396 		goto ddp_out;
397 
398 	ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
399 					      IXGBE_RXDADV_ERR_FCERR);
400 	if (ddp_err)
401 		goto ddp_out;
402 
403 	switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
404 	/* return 0 to bypass going to ULD for DDPed data */
405 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
406 		/* update length of DDPed data */
407 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
408 		rc = 0;
409 		break;
410 	/* unmap the sg list when FCPRSP is received */
411 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
412 		pci_unmap_sg(adapter->pdev, ddp->sgl,
413 			     ddp->sgc, DMA_FROM_DEVICE);
414 		ddp->err = ddp_err;
415 		ddp->sgl = NULL;
416 		ddp->sgc = 0;
417 		/* fall through */
418 	/* if DDP length is present pass it through to ULD */
419 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
420 		/* update length of DDPed data */
421 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
422 		if (ddp->len)
423 			rc = ddp->len;
424 		break;
425 	/* no match will return as an error */
426 	case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
427 	default:
428 		break;
429 	}
430 
431 	/* In target mode, check the last data frame of the sequence.
432 	 * For DDP in target mode, data is already DDPed but the header
433 	 * indication of the last data frame ould allow is to tell if we
434 	 * got all the data and the ULP can send FCP_RSP back, as this is
435 	 * not a full fcoe frame, we fill the trailer here so it won't be
436 	 * dropped by the ULP stack.
437 	 */
438 	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
439 	    (fctl & FC_FC_END_SEQ)) {
440 		skb_linearize(skb);
441 		crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
442 		crc->fcoe_eof = FC_EOF_T;
443 	}
444 ddp_out:
445 	return rc;
446 }
447 
448 /**
449  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
450  * @tx_ring: tx desc ring
451  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
452  * @hdr_len: hdr_len to be returned
453  *
454  * This sets up large send offload for FCoE
455  *
456  * Returns : 0 indicates success, < 0 for error
457  */
458 int ixgbe_fso(struct ixgbe_ring *tx_ring,
459 	      struct ixgbe_tx_buffer *first,
460 	      u8 *hdr_len)
461 {
462 	struct sk_buff *skb = first->skb;
463 	struct fc_frame_header *fh;
464 	u32 vlan_macip_lens;
465 	u32 fcoe_sof_eof = 0;
466 	u32 mss_l4len_idx;
467 	u8 sof, eof;
468 
469 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
470 		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
471 			skb_shinfo(skb)->gso_type);
472 		return -EINVAL;
473 	}
474 
475 	/* resets the header to point fcoe/fc */
476 	skb_set_network_header(skb, skb->mac_len);
477 	skb_set_transport_header(skb, skb->mac_len +
478 				 sizeof(struct fcoe_hdr));
479 
480 	/* sets up SOF and ORIS */
481 	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
482 	switch (sof) {
483 	case FC_SOF_I2:
484 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
485 		break;
486 	case FC_SOF_I3:
487 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
488 			       IXGBE_ADVTXD_FCOEF_ORIS;
489 		break;
490 	case FC_SOF_N2:
491 		break;
492 	case FC_SOF_N3:
493 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
494 		break;
495 	default:
496 		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
497 		return -EINVAL;
498 	}
499 
500 	/* the first byte of the last dword is EOF */
501 	skb_copy_bits(skb, skb->len - 4, &eof, 1);
502 	/* sets up EOF and ORIE */
503 	switch (eof) {
504 	case FC_EOF_N:
505 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
506 		break;
507 	case FC_EOF_T:
508 		/* lso needs ORIE */
509 		if (skb_is_gso(skb))
510 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
511 					IXGBE_ADVTXD_FCOEF_ORIE;
512 		else
513 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
514 		break;
515 	case FC_EOF_NI:
516 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
517 		break;
518 	case FC_EOF_A:
519 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
520 		break;
521 	default:
522 		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
523 		return -EINVAL;
524 	}
525 
526 	/* sets up PARINC indicating data offset */
527 	fh = (struct fc_frame_header *)skb_transport_header(skb);
528 	if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
529 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
530 
531 	/* include trailer in headlen as it is replicated per frame */
532 	*hdr_len = sizeof(struct fcoe_crc_eof);
533 
534 	/* hdr_len includes fc_hdr if FCoE LSO is enabled */
535 	if (skb_is_gso(skb)) {
536 		*hdr_len += skb_transport_offset(skb) +
537 			    sizeof(struct fc_frame_header);
538 		/* update gso_segs and bytecount */
539 		first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
540 					       skb_shinfo(skb)->gso_size);
541 		first->bytecount += (first->gso_segs - 1) * *hdr_len;
542 		first->tx_flags |= IXGBE_TX_FLAGS_FSO;
543 	}
544 
545 	/* set flag indicating FCOE to ixgbe_tx_map call */
546 	first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
547 
548 	/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
549 	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
550 	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
551 
552 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
553 	vlan_macip_lens = skb_transport_offset(skb) +
554 			  sizeof(struct fc_frame_header);
555 	vlan_macip_lens |= (skb_transport_offset(skb) - 4)
556 			   << IXGBE_ADVTXD_MACLEN_SHIFT;
557 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
558 
559 	/* write context desc */
560 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
561 			  IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
562 
563 	return 0;
564 }
565 
566 static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
567 {
568 	unsigned int cpu;
569 	struct pci_pool **pool;
570 
571 	for_each_possible_cpu(cpu) {
572 		pool = per_cpu_ptr(fcoe->pool, cpu);
573 		if (*pool)
574 			pci_pool_destroy(*pool);
575 	}
576 	free_percpu(fcoe->pool);
577 	fcoe->pool = NULL;
578 }
579 
580 static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
581 {
582 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
583 	unsigned int cpu;
584 	struct pci_pool **pool;
585 	char pool_name[32];
586 
587 	fcoe->pool = alloc_percpu(struct pci_pool *);
588 	if (!fcoe->pool)
589 		return;
590 
591 	/* allocate pci pool for each cpu */
592 	for_each_possible_cpu(cpu) {
593 		snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
594 		pool = per_cpu_ptr(fcoe->pool, cpu);
595 		*pool = pci_pool_create(pool_name,
596 					adapter->pdev, IXGBE_FCPTR_MAX,
597 					IXGBE_FCPTR_ALIGN, PAGE_SIZE);
598 		if (!*pool) {
599 			e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
600 			ixgbe_fcoe_ddp_pools_free(fcoe);
601 			return;
602 		}
603 	}
604 }
605 
606 /**
607  * ixgbe_configure_fcoe - configures registers for fcoe at start
608  * @adapter: ptr to ixgbe adapter
609  *
610  * This sets up FCoE related registers
611  *
612  * Returns : none
613  */
614 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
615 {
616 	int i, fcoe_q, fcoe_i;
617 	struct ixgbe_hw *hw = &adapter->hw;
618 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
619 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
620 	unsigned int cpu;
621 
622 	if (!fcoe->pool) {
623 		spin_lock_init(&fcoe->lock);
624 
625 		ixgbe_fcoe_ddp_pools_alloc(adapter);
626 		if (!fcoe->pool) {
627 			e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
628 			return;
629 		}
630 
631 		/* Extra buffer to be shared by all DDPs for HW work around */
632 		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
633 		if (fcoe->extra_ddp_buffer == NULL) {
634 			e_err(drv, "failed to allocated extra DDP buffer\n");
635 			goto out_ddp_pools;
636 		}
637 
638 		fcoe->extra_ddp_buffer_dma =
639 			dma_map_single(&adapter->pdev->dev,
640 				       fcoe->extra_ddp_buffer,
641 				       IXGBE_FCBUFF_MIN,
642 				       DMA_FROM_DEVICE);
643 		if (dma_mapping_error(&adapter->pdev->dev,
644 				      fcoe->extra_ddp_buffer_dma)) {
645 			e_err(drv, "failed to map extra DDP buffer\n");
646 			goto out_extra_ddp_buffer;
647 		}
648 
649 		/* Alloc per cpu mem to count the ddp alloc failure number */
650 		fcoe->pcpu_noddp = alloc_percpu(u64);
651 		if (!fcoe->pcpu_noddp) {
652 			e_err(drv, "failed to alloc noddp counter\n");
653 			goto out_pcpu_noddp_alloc_fail;
654 		}
655 
656 		fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
657 		if (!fcoe->pcpu_noddp_ext_buff) {
658 			e_err(drv, "failed to alloc noddp extra buff cnt\n");
659 			goto out_pcpu_noddp_extra_buff_alloc_fail;
660 		}
661 
662 		for_each_possible_cpu(cpu) {
663 			*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
664 			*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
665 		}
666 	}
667 
668 	/* Enable L2 eth type filter for FCoE */
669 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
670 			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
671 	/* Enable L2 eth type filter for FIP */
672 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
673 			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
674 	if (adapter->ring_feature[RING_F_FCOE].indices) {
675 		/* Use multiple rx queues for FCoE by redirection table */
676 		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
677 			fcoe_i = f->mask + i % f->indices;
678 			fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
679 			fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
680 			IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
681 		}
682 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
683 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
684 	} else  {
685 		/* Use single rx queue for FCoE */
686 		fcoe_i = f->mask;
687 		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
688 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
689 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
690 				IXGBE_ETQS_QUEUE_EN |
691 				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
692 	}
693 	/* send FIP frames to the first FCoE queue */
694 	fcoe_i = f->mask;
695 	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
696 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
697 			IXGBE_ETQS_QUEUE_EN |
698 			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
699 
700 	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
701 			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
702 	return;
703 out_pcpu_noddp_extra_buff_alloc_fail:
704 	free_percpu(fcoe->pcpu_noddp);
705 out_pcpu_noddp_alloc_fail:
706 	dma_unmap_single(&adapter->pdev->dev,
707 			 fcoe->extra_ddp_buffer_dma,
708 			 IXGBE_FCBUFF_MIN,
709 			 DMA_FROM_DEVICE);
710 out_extra_ddp_buffer:
711 	kfree(fcoe->extra_ddp_buffer);
712 out_ddp_pools:
713 	ixgbe_fcoe_ddp_pools_free(fcoe);
714 }
715 
716 /**
717  * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
718  * @adapter : ixgbe adapter
719  *
720  * Cleans up outstanding ddp context resources
721  *
722  * Returns : none
723  */
724 void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
725 {
726 	int i;
727 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
728 
729 	if (!fcoe->pool)
730 		return;
731 
732 	for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
733 		ixgbe_fcoe_ddp_put(adapter->netdev, i);
734 	dma_unmap_single(&adapter->pdev->dev,
735 			 fcoe->extra_ddp_buffer_dma,
736 			 IXGBE_FCBUFF_MIN,
737 			 DMA_FROM_DEVICE);
738 	free_percpu(fcoe->pcpu_noddp);
739 	free_percpu(fcoe->pcpu_noddp_ext_buff);
740 	kfree(fcoe->extra_ddp_buffer);
741 	ixgbe_fcoe_ddp_pools_free(fcoe);
742 }
743 
744 /**
745  * ixgbe_fcoe_enable - turn on FCoE offload feature
746  * @netdev: the corresponding netdev
747  *
748  * Turns on FCoE offload feature in 82599.
749  *
750  * Returns : 0 indicates success or -EINVAL on failure
751  */
752 int ixgbe_fcoe_enable(struct net_device *netdev)
753 {
754 	int rc = -EINVAL;
755 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
756 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
757 
758 
759 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
760 		goto out_enable;
761 
762 	atomic_inc(&fcoe->refcnt);
763 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
764 		goto out_enable;
765 
766 	e_info(drv, "Enabling FCoE offload features.\n");
767 	if (netif_running(netdev))
768 		netdev->netdev_ops->ndo_stop(netdev);
769 
770 	ixgbe_clear_interrupt_scheme(adapter);
771 
772 	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
773 	adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
774 	netdev->features |= NETIF_F_FCOE_CRC;
775 	netdev->features |= NETIF_F_FSO;
776 	netdev->features |= NETIF_F_FCOE_MTU;
777 	netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
778 
779 	ixgbe_init_interrupt_scheme(adapter);
780 	netdev_features_change(netdev);
781 
782 	if (netif_running(netdev))
783 		netdev->netdev_ops->ndo_open(netdev);
784 	rc = 0;
785 
786 out_enable:
787 	return rc;
788 }
789 
790 /**
791  * ixgbe_fcoe_disable - turn off FCoE offload feature
792  * @netdev: the corresponding netdev
793  *
794  * Turns off FCoE offload feature in 82599.
795  *
796  * Returns : 0 indicates success or -EINVAL on failure
797  */
798 int ixgbe_fcoe_disable(struct net_device *netdev)
799 {
800 	int rc = -EINVAL;
801 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
802 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
803 
804 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
805 		goto out_disable;
806 
807 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
808 		goto out_disable;
809 
810 	if (!atomic_dec_and_test(&fcoe->refcnt))
811 		goto out_disable;
812 
813 	e_info(drv, "Disabling FCoE offload features.\n");
814 	netdev->features &= ~NETIF_F_FCOE_CRC;
815 	netdev->features &= ~NETIF_F_FSO;
816 	netdev->features &= ~NETIF_F_FCOE_MTU;
817 	netdev->fcoe_ddp_xid = 0;
818 	netdev_features_change(netdev);
819 
820 	if (netif_running(netdev))
821 		netdev->netdev_ops->ndo_stop(netdev);
822 
823 	ixgbe_clear_interrupt_scheme(adapter);
824 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
825 	adapter->ring_feature[RING_F_FCOE].indices = 0;
826 	ixgbe_cleanup_fcoe(adapter);
827 	ixgbe_init_interrupt_scheme(adapter);
828 
829 	if (netif_running(netdev))
830 		netdev->netdev_ops->ndo_open(netdev);
831 	rc = 0;
832 
833 out_disable:
834 	return rc;
835 }
836 
837 /**
838  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
839  * @netdev : ixgbe adapter
840  * @wwn : the world wide name
841  * @type: the type of world wide name
842  *
843  * Returns the node or port world wide name if both the prefix and the san
844  * mac address are valid, then the wwn is formed based on the NAA-2 for
845  * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
846  *
847  * Returns : 0 on success
848  */
849 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
850 {
851 	int rc = -EINVAL;
852 	u16 prefix = 0xffff;
853 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
854 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
855 
856 	switch (type) {
857 	case NETDEV_FCOE_WWNN:
858 		prefix = mac->wwnn_prefix;
859 		break;
860 	case NETDEV_FCOE_WWPN:
861 		prefix = mac->wwpn_prefix;
862 		break;
863 	default:
864 		break;
865 	}
866 
867 	if ((prefix != 0xffff) &&
868 	    is_valid_ether_addr(mac->san_addr)) {
869 		*wwn = ((u64) prefix << 48) |
870 		       ((u64) mac->san_addr[0] << 40) |
871 		       ((u64) mac->san_addr[1] << 32) |
872 		       ((u64) mac->san_addr[2] << 24) |
873 		       ((u64) mac->san_addr[3] << 16) |
874 		       ((u64) mac->san_addr[4] << 8)  |
875 		       ((u64) mac->san_addr[5]);
876 		rc = 0;
877 	}
878 	return rc;
879 }
880 
881 /**
882  * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
883  * @netdev : ixgbe adapter
884  * @info : HBA information
885  *
886  * Returns ixgbe HBA information
887  *
888  * Returns : 0 on success
889  */
890 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
891 			   struct netdev_fcoe_hbainfo *info)
892 {
893 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
894 	struct ixgbe_hw *hw = &adapter->hw;
895 	int i, pos;
896 	u8 buf[8];
897 
898 	if (!info)
899 		return -EINVAL;
900 
901 	/* Don't return information on unsupported devices */
902 	if (hw->mac.type != ixgbe_mac_82599EB &&
903 	    hw->mac.type != ixgbe_mac_X540)
904 		return -EINVAL;
905 
906 	/* Manufacturer */
907 	snprintf(info->manufacturer, sizeof(info->manufacturer),
908 		 "Intel Corporation");
909 
910 	/* Serial Number */
911 
912 	/* Get the PCI-e Device Serial Number Capability */
913 	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
914 	if (pos) {
915 		pos += 4;
916 		for (i = 0; i < 8; i++)
917 			pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
918 
919 		snprintf(info->serial_number, sizeof(info->serial_number),
920 			 "%02X%02X%02X%02X%02X%02X%02X%02X",
921 			 buf[7], buf[6], buf[5], buf[4],
922 			 buf[3], buf[2], buf[1], buf[0]);
923 	} else
924 		snprintf(info->serial_number, sizeof(info->serial_number),
925 			 "Unknown");
926 
927 	/* Hardware Version */
928 	snprintf(info->hardware_version,
929 		 sizeof(info->hardware_version),
930 		 "Rev %d", hw->revision_id);
931 	/* Driver Name/Version */
932 	snprintf(info->driver_version,
933 		 sizeof(info->driver_version),
934 		 "%s v%s",
935 		 ixgbe_driver_name,
936 		 ixgbe_driver_version);
937 	/* Firmware Version */
938 	snprintf(info->firmware_version,
939 		 sizeof(info->firmware_version),
940 		 "0x%08x",
941 		 (adapter->eeprom_verh << 16) |
942 		  adapter->eeprom_verl);
943 
944 	/* Model */
945 	if (hw->mac.type == ixgbe_mac_82599EB) {
946 		snprintf(info->model,
947 			 sizeof(info->model),
948 			 "Intel 82599");
949 	} else {
950 		snprintf(info->model,
951 			 sizeof(info->model),
952 			 "Intel X540");
953 	}
954 
955 	/* Model Description */
956 	snprintf(info->model_description,
957 		 sizeof(info->model_description),
958 		 "%s",
959 		 ixgbe_default_device_descr);
960 
961 	return 0;
962 }
963