1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include "ixgbe.h" 29 #include <linux/if_ether.h> 30 #include <linux/gfp.h> 31 #include <linux/if_vlan.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/fc/fc_fs.h> 35 #include <scsi/fc/fc_fcoe.h> 36 #include <scsi/libfc.h> 37 #include <scsi/libfcoe.h> 38 39 /** 40 * ixgbe_fcoe_clear_ddp - clear the given ddp context 41 * @ddp: ptr to the ixgbe_fcoe_ddp 42 * 43 * Returns : none 44 * 45 */ 46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 47 { 48 ddp->len = 0; 49 ddp->err = 1; 50 ddp->udl = NULL; 51 ddp->udp = 0UL; 52 ddp->sgl = NULL; 53 ddp->sgc = 0; 54 } 55 56 /** 57 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 58 * @netdev: the corresponding net_device 59 * @xid: the xid that corresponding ddp will be freed 60 * 61 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 62 * and it is expected to be called by ULD, i.e., FCP layer of libfc 63 * to release the corresponding ddp context when the I/O is done. 64 * 65 * Returns : data length already ddp-ed in bytes 66 */ 67 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 68 { 69 int len = 0; 70 struct ixgbe_fcoe *fcoe; 71 struct ixgbe_adapter *adapter; 72 struct ixgbe_fcoe_ddp *ddp; 73 u32 fcbuff; 74 75 if (!netdev) 76 goto out_ddp_put; 77 78 if (xid >= IXGBE_FCOE_DDP_MAX) 79 goto out_ddp_put; 80 81 adapter = netdev_priv(netdev); 82 fcoe = &adapter->fcoe; 83 ddp = &fcoe->ddp[xid]; 84 if (!ddp->udl) 85 goto out_ddp_put; 86 87 len = ddp->len; 88 /* if there an error, force to invalidate ddp context */ 89 if (ddp->err) { 90 spin_lock_bh(&fcoe->lock); 91 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); 92 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, 93 (xid | IXGBE_FCFLTRW_WE)); 94 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 95 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 96 (xid | IXGBE_FCDMARW_WE)); 97 98 /* guaranteed to be invalidated after 100us */ 99 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 100 (xid | IXGBE_FCDMARW_RE)); 101 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); 102 spin_unlock_bh(&fcoe->lock); 103 if (fcbuff & IXGBE_FCBUFF_VALID) 104 udelay(100); 105 } 106 if (ddp->sgl) 107 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 108 DMA_FROM_DEVICE); 109 if (ddp->pool) { 110 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 111 ddp->pool = NULL; 112 } 113 114 ixgbe_fcoe_clear_ddp(ddp); 115 116 out_ddp_put: 117 return len; 118 } 119 120 /** 121 * ixgbe_fcoe_ddp_setup - called to set up ddp context 122 * @netdev: the corresponding net_device 123 * @xid: the exchange id requesting ddp 124 * @sgl: the scatter-gather list for this request 125 * @sgc: the number of scatter-gather items 126 * 127 * Returns : 1 for success and 0 for no ddp 128 */ 129 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, 130 struct scatterlist *sgl, unsigned int sgc, 131 int target_mode) 132 { 133 struct ixgbe_adapter *adapter; 134 struct ixgbe_hw *hw; 135 struct ixgbe_fcoe *fcoe; 136 struct ixgbe_fcoe_ddp *ddp; 137 struct ixgbe_fcoe_ddp_pool *ddp_pool; 138 struct scatterlist *sg; 139 unsigned int i, j, dmacount; 140 unsigned int len; 141 static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 142 unsigned int firstoff = 0; 143 unsigned int lastsize; 144 unsigned int thisoff = 0; 145 unsigned int thislen = 0; 146 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 147 dma_addr_t addr = 0; 148 149 if (!netdev || !sgl) 150 return 0; 151 152 adapter = netdev_priv(netdev); 153 if (xid >= IXGBE_FCOE_DDP_MAX) { 154 e_warn(drv, "xid=0x%x out-of-range\n", xid); 155 return 0; 156 } 157 158 /* no DDP if we are already down or resetting */ 159 if (test_bit(__IXGBE_DOWN, &adapter->state) || 160 test_bit(__IXGBE_RESETTING, &adapter->state)) 161 return 0; 162 163 fcoe = &adapter->fcoe; 164 ddp = &fcoe->ddp[xid]; 165 if (ddp->sgl) { 166 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 167 xid, ddp->sgl, ddp->sgc); 168 return 0; 169 } 170 ixgbe_fcoe_clear_ddp(ddp); 171 172 173 if (!fcoe->ddp_pool) { 174 e_warn(drv, "No ddp_pool resources allocated\n"); 175 return 0; 176 } 177 178 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); 179 if (!ddp_pool->pool) { 180 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 181 goto out_noddp; 182 } 183 184 /* setup dma from scsi command sgl */ 185 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 186 if (dmacount == 0) { 187 e_err(drv, "xid 0x%x DMA map error\n", xid); 188 goto out_noddp; 189 } 190 191 /* alloc the udl from per cpu ddp pool */ 192 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 193 if (!ddp->udl) { 194 e_err(drv, "failed allocated ddp context\n"); 195 goto out_noddp_unmap; 196 } 197 ddp->pool = ddp_pool->pool; 198 ddp->sgl = sgl; 199 ddp->sgc = sgc; 200 201 j = 0; 202 for_each_sg(sgl, sg, dmacount, i) { 203 addr = sg_dma_address(sg); 204 len = sg_dma_len(sg); 205 while (len) { 206 /* max number of buffers allowed in one DDP context */ 207 if (j >= IXGBE_BUFFCNT_MAX) { 208 ddp_pool->noddp++; 209 goto out_noddp_free; 210 } 211 212 /* get the offset of length of current buffer */ 213 thisoff = addr & ((dma_addr_t)bufflen - 1); 214 thislen = min((bufflen - thisoff), len); 215 /* 216 * all but the 1st buffer (j == 0) 217 * must be aligned on bufflen 218 */ 219 if ((j != 0) && (thisoff)) 220 goto out_noddp_free; 221 /* 222 * all but the last buffer 223 * ((i == (dmacount - 1)) && (thislen == len)) 224 * must end at bufflen 225 */ 226 if (((i != (dmacount - 1)) || (thislen != len)) 227 && ((thislen + thisoff) != bufflen)) 228 goto out_noddp_free; 229 230 ddp->udl[j] = (u64)(addr - thisoff); 231 /* only the first buffer may have none-zero offset */ 232 if (j == 0) 233 firstoff = thisoff; 234 len -= thislen; 235 addr += thislen; 236 j++; 237 } 238 } 239 /* only the last buffer may have non-full bufflen */ 240 lastsize = thisoff + thislen; 241 242 /* 243 * lastsize can not be buffer len. 244 * If it is then adding another buffer with lastsize = 1. 245 */ 246 if (lastsize == bufflen) { 247 if (j >= IXGBE_BUFFCNT_MAX) { 248 ddp_pool->noddp_ext_buff++; 249 goto out_noddp_free; 250 } 251 252 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 253 j++; 254 lastsize = 1; 255 } 256 put_cpu(); 257 258 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 259 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 260 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 261 /* Set WRCONTX bit to allow DDP for target */ 262 if (target_mode) 263 fcbuff |= (IXGBE_FCBUFF_WRCONTX); 264 fcbuff |= (IXGBE_FCBUFF_VALID); 265 266 fcdmarw = xid; 267 fcdmarw |= IXGBE_FCDMARW_WE; 268 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 269 270 fcfltrw = xid; 271 fcfltrw |= IXGBE_FCFLTRW_WE; 272 273 /* program DMA context */ 274 hw = &adapter->hw; 275 spin_lock_bh(&fcoe->lock); 276 277 /* turn on last frame indication for target mode as FCP_RSPtarget is 278 * supposed to send FCP_RSP when it is done. */ 279 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { 280 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); 281 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); 282 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; 283 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 284 } 285 286 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 287 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 288 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 289 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 290 /* program filter context */ 291 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 292 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 293 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 294 295 spin_unlock_bh(&fcoe->lock); 296 297 return 1; 298 299 out_noddp_free: 300 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 301 ixgbe_fcoe_clear_ddp(ddp); 302 303 out_noddp_unmap: 304 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 305 out_noddp: 306 put_cpu(); 307 return 0; 308 } 309 310 /** 311 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode 312 * @netdev: the corresponding net_device 313 * @xid: the exchange id requesting ddp 314 * @sgl: the scatter-gather list for this request 315 * @sgc: the number of scatter-gather items 316 * 317 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 318 * and is expected to be called from ULD, e.g., FCP layer of libfc 319 * to set up ddp for the corresponding xid of the given sglist for 320 * the corresponding I/O. 321 * 322 * Returns : 1 for success and 0 for no ddp 323 */ 324 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 325 struct scatterlist *sgl, unsigned int sgc) 326 { 327 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); 328 } 329 330 /** 331 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode 332 * @netdev: the corresponding net_device 333 * @xid: the exchange id requesting ddp 334 * @sgl: the scatter-gather list for this request 335 * @sgc: the number of scatter-gather items 336 * 337 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target 338 * and is expected to be called from ULD, e.g., FCP layer of libfc 339 * to set up ddp for the corresponding xid of the given sglist for 340 * the corresponding I/O. The DDP in target mode is a write I/O request 341 * from the initiator. 342 * 343 * Returns : 1 for success and 0 for no ddp 344 */ 345 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 346 struct scatterlist *sgl, unsigned int sgc) 347 { 348 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); 349 } 350 351 /** 352 * ixgbe_fcoe_ddp - check ddp status and mark it done 353 * @adapter: ixgbe adapter 354 * @rx_desc: advanced rx descriptor 355 * @skb: the skb holding the received data 356 * 357 * This checks ddp status. 358 * 359 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 360 * not passing the skb to ULD, > 0 indicates is the length of data 361 * being ddped. 362 */ 363 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 364 union ixgbe_adv_rx_desc *rx_desc, 365 struct sk_buff *skb) 366 { 367 int rc = -EINVAL; 368 struct ixgbe_fcoe *fcoe; 369 struct ixgbe_fcoe_ddp *ddp; 370 struct fc_frame_header *fh; 371 struct fcoe_crc_eof *crc; 372 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); 373 __le32 ddp_err; 374 u32 fctl; 375 u16 xid; 376 377 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) 378 skb->ip_summed = CHECKSUM_NONE; 379 else 380 skb->ip_summed = CHECKSUM_UNNECESSARY; 381 382 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 383 fh = (struct fc_frame_header *)(skb->data + 384 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 385 else 386 fh = (struct fc_frame_header *)(skb->data + 387 sizeof(struct fcoe_hdr)); 388 389 fctl = ntoh24(fh->fh_f_ctl); 390 if (fctl & FC_FC_EX_CTX) 391 xid = be16_to_cpu(fh->fh_ox_id); 392 else 393 xid = be16_to_cpu(fh->fh_rx_id); 394 395 if (xid >= IXGBE_FCOE_DDP_MAX) 396 goto ddp_out; 397 398 fcoe = &adapter->fcoe; 399 ddp = &fcoe->ddp[xid]; 400 if (!ddp->udl) 401 goto ddp_out; 402 403 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | 404 IXGBE_RXDADV_ERR_FCERR); 405 if (ddp_err) 406 goto ddp_out; 407 408 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 409 /* return 0 to bypass going to ULD for DDPed data */ 410 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 411 /* update length of DDPed data */ 412 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 413 rc = 0; 414 break; 415 /* unmap the sg list when FCPRSP is received */ 416 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 417 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 418 ddp->sgc, DMA_FROM_DEVICE); 419 ddp->err = ddp_err; 420 ddp->sgl = NULL; 421 ddp->sgc = 0; 422 /* fall through */ 423 /* if DDP length is present pass it through to ULD */ 424 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 425 /* update length of DDPed data */ 426 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 427 if (ddp->len) 428 rc = ddp->len; 429 break; 430 /* no match will return as an error */ 431 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 432 default: 433 break; 434 } 435 436 /* In target mode, check the last data frame of the sequence. 437 * For DDP in target mode, data is already DDPed but the header 438 * indication of the last data frame ould allow is to tell if we 439 * got all the data and the ULP can send FCP_RSP back, as this is 440 * not a full fcoe frame, we fill the trailer here so it won't be 441 * dropped by the ULP stack. 442 */ 443 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 444 (fctl & FC_FC_END_SEQ)) { 445 skb_linearize(skb); 446 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 447 crc->fcoe_eof = FC_EOF_T; 448 } 449 ddp_out: 450 return rc; 451 } 452 453 /** 454 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 455 * @tx_ring: tx desc ring 456 * @first: first tx_buffer structure containing skb, tx_flags, and protocol 457 * @hdr_len: hdr_len to be returned 458 * 459 * This sets up large send offload for FCoE 460 * 461 * Returns : 0 indicates success, < 0 for error 462 */ 463 int ixgbe_fso(struct ixgbe_ring *tx_ring, 464 struct ixgbe_tx_buffer *first, 465 u8 *hdr_len) 466 { 467 struct sk_buff *skb = first->skb; 468 struct fc_frame_header *fh; 469 u32 vlan_macip_lens; 470 u32 fcoe_sof_eof = 0; 471 u32 mss_l4len_idx; 472 u8 sof, eof; 473 474 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 475 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 476 skb_shinfo(skb)->gso_type); 477 return -EINVAL; 478 } 479 480 /* resets the header to point fcoe/fc */ 481 skb_set_network_header(skb, skb->mac_len); 482 skb_set_transport_header(skb, skb->mac_len + 483 sizeof(struct fcoe_hdr)); 484 485 /* sets up SOF and ORIS */ 486 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 487 switch (sof) { 488 case FC_SOF_I2: 489 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; 490 break; 491 case FC_SOF_I3: 492 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | 493 IXGBE_ADVTXD_FCOEF_ORIS; 494 break; 495 case FC_SOF_N2: 496 break; 497 case FC_SOF_N3: 498 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; 499 break; 500 default: 501 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); 502 return -EINVAL; 503 } 504 505 /* the first byte of the last dword is EOF */ 506 skb_copy_bits(skb, skb->len - 4, &eof, 1); 507 /* sets up EOF and ORIE */ 508 switch (eof) { 509 case FC_EOF_N: 510 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 511 break; 512 case FC_EOF_T: 513 /* lso needs ORIE */ 514 if (skb_is_gso(skb)) 515 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | 516 IXGBE_ADVTXD_FCOEF_ORIE; 517 else 518 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 519 break; 520 case FC_EOF_NI: 521 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 522 break; 523 case FC_EOF_A: 524 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 525 break; 526 default: 527 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); 528 return -EINVAL; 529 } 530 531 /* sets up PARINC indicating data offset */ 532 fh = (struct fc_frame_header *)skb_transport_header(skb); 533 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 534 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 535 536 /* include trailer in headlen as it is replicated per frame */ 537 *hdr_len = sizeof(struct fcoe_crc_eof); 538 539 /* hdr_len includes fc_hdr if FCoE LSO is enabled */ 540 if (skb_is_gso(skb)) { 541 *hdr_len += skb_transport_offset(skb) + 542 sizeof(struct fc_frame_header); 543 /* update gso_segs and bytecount */ 544 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 545 skb_shinfo(skb)->gso_size); 546 first->bytecount += (first->gso_segs - 1) * *hdr_len; 547 first->tx_flags |= IXGBE_TX_FLAGS_FSO; 548 } 549 550 /* set flag indicating FCOE to ixgbe_tx_map call */ 551 first->tx_flags |= IXGBE_TX_FLAGS_FCOE; 552 553 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ 554 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 555 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 556 557 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 558 vlan_macip_lens = skb_transport_offset(skb) + 559 sizeof(struct fc_frame_header); 560 vlan_macip_lens |= (skb_transport_offset(skb) - 4) 561 << IXGBE_ADVTXD_MACLEN_SHIFT; 562 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 563 564 /* write context desc */ 565 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 566 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); 567 568 return 0; 569 } 570 571 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) 572 { 573 struct ixgbe_fcoe_ddp_pool *ddp_pool; 574 575 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 576 if (ddp_pool->pool) 577 dma_pool_destroy(ddp_pool->pool); 578 ddp_pool->pool = NULL; 579 } 580 581 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, 582 struct device *dev, 583 unsigned int cpu) 584 { 585 struct ixgbe_fcoe_ddp_pool *ddp_pool; 586 struct dma_pool *pool; 587 char pool_name[32]; 588 589 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); 590 591 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, 592 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 593 if (!pool) 594 return -ENOMEM; 595 596 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 597 ddp_pool->pool = pool; 598 ddp_pool->noddp = 0; 599 ddp_pool->noddp_ext_buff = 0; 600 601 return 0; 602 } 603 604 /** 605 * ixgbe_configure_fcoe - configures registers for fcoe at start 606 * @adapter: ptr to ixgbe adapter 607 * 608 * This sets up FCoE related registers 609 * 610 * Returns : none 611 */ 612 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 613 { 614 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 615 struct ixgbe_hw *hw = &adapter->hw; 616 int i, fcoe_q, fcoe_i; 617 u32 etqf; 618 619 /* Minimal functionality for FCoE requires at least CRC offloads */ 620 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) 621 return; 622 623 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ 624 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; 625 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 626 etqf |= IXGBE_ETQF_POOL_ENABLE; 627 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 628 } 629 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); 630 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 631 632 /* leave registers un-configured if FCoE is disabled */ 633 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 634 return; 635 636 /* Use one or more Rx queues for FCoE by redirection table */ 637 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 638 fcoe_i = fcoe->offset + (i % fcoe->indices); 639 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 640 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 641 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 642 } 643 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 644 645 /* Enable L2 EtherType filter for FIP */ 646 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; 647 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 648 etqf |= IXGBE_ETQF_POOL_ENABLE; 649 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 650 } 651 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); 652 653 /* Send FIP frames to the first FCoE queue */ 654 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; 655 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 656 IXGBE_ETQS_QUEUE_EN | 657 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 658 659 /* Configure FCoE Rx control */ 660 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 661 IXGBE_FCRXCTRL_FCCRCBO | 662 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 663 } 664 665 /** 666 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources 667 * @adapter : ixgbe adapter 668 * 669 * Cleans up outstanding ddp context resources 670 * 671 * Returns : none 672 */ 673 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 674 { 675 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 676 int cpu, i; 677 678 /* do nothing if no DDP pools were allocated */ 679 if (!fcoe->ddp_pool) 680 return; 681 682 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 683 ixgbe_fcoe_ddp_put(adapter->netdev, i); 684 685 for_each_possible_cpu(cpu) 686 ixgbe_fcoe_dma_pool_free(fcoe, cpu); 687 688 dma_unmap_single(&adapter->pdev->dev, 689 fcoe->extra_ddp_buffer_dma, 690 IXGBE_FCBUFF_MIN, 691 DMA_FROM_DEVICE); 692 kfree(fcoe->extra_ddp_buffer); 693 694 fcoe->extra_ddp_buffer = NULL; 695 fcoe->extra_ddp_buffer_dma = 0; 696 } 697 698 /** 699 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources 700 * @adapter: ixgbe adapter 701 * 702 * Sets up ddp context resouces 703 * 704 * Returns : 0 indicates success or -EINVAL on failure 705 */ 706 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 707 { 708 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 709 struct device *dev = &adapter->pdev->dev; 710 void *buffer; 711 dma_addr_t dma; 712 unsigned int cpu; 713 714 /* do nothing if no DDP pools were allocated */ 715 if (!fcoe->ddp_pool) 716 return 0; 717 718 /* Extra buffer to be shared by all DDPs for HW work around */ 719 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 720 if (!buffer) { 721 e_err(drv, "failed to allocate extra DDP buffer\n"); 722 return -ENOMEM; 723 } 724 725 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); 726 if (dma_mapping_error(dev, dma)) { 727 e_err(drv, "failed to map extra DDP buffer\n"); 728 kfree(buffer); 729 return -ENOMEM; 730 } 731 732 fcoe->extra_ddp_buffer = buffer; 733 fcoe->extra_ddp_buffer_dma = dma; 734 735 /* allocate pci pool for each cpu */ 736 for_each_possible_cpu(cpu) { 737 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); 738 if (!err) 739 continue; 740 741 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 742 ixgbe_free_fcoe_ddp_resources(adapter); 743 return -ENOMEM; 744 } 745 746 return 0; 747 } 748 749 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) 750 { 751 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 752 753 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 754 return -EINVAL; 755 756 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); 757 758 if (!fcoe->ddp_pool) { 759 e_err(drv, "failed to allocate percpu DDP resources\n"); 760 return -ENOMEM; 761 } 762 763 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 764 765 return 0; 766 } 767 768 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) 769 { 770 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 771 772 adapter->netdev->fcoe_ddp_xid = 0; 773 774 if (!fcoe->ddp_pool) 775 return; 776 777 free_percpu(fcoe->ddp_pool); 778 fcoe->ddp_pool = NULL; 779 } 780 781 /** 782 * ixgbe_fcoe_enable - turn on FCoE offload feature 783 * @netdev: the corresponding netdev 784 * 785 * Turns on FCoE offload feature in 82599. 786 * 787 * Returns : 0 indicates success or -EINVAL on failure 788 */ 789 int ixgbe_fcoe_enable(struct net_device *netdev) 790 { 791 struct ixgbe_adapter *adapter = netdev_priv(netdev); 792 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 793 794 atomic_inc(&fcoe->refcnt); 795 796 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 797 return -EINVAL; 798 799 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 800 return -EINVAL; 801 802 e_info(drv, "Enabling FCoE offload features.\n"); 803 804 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 805 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); 806 807 if (netif_running(netdev)) 808 netdev->netdev_ops->ndo_stop(netdev); 809 810 /* Allocate per CPU memory to track DDP pools */ 811 ixgbe_fcoe_ddp_enable(adapter); 812 813 /* enable FCoE and notify stack */ 814 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 815 netdev->features |= NETIF_F_FCOE_MTU; 816 netdev_features_change(netdev); 817 818 /* release existing queues and reallocate them */ 819 ixgbe_clear_interrupt_scheme(adapter); 820 ixgbe_init_interrupt_scheme(adapter); 821 822 if (netif_running(netdev)) 823 netdev->netdev_ops->ndo_open(netdev); 824 825 return 0; 826 } 827 828 /** 829 * ixgbe_fcoe_disable - turn off FCoE offload feature 830 * @netdev: the corresponding netdev 831 * 832 * Turns off FCoE offload feature in 82599. 833 * 834 * Returns : 0 indicates success or -EINVAL on failure 835 */ 836 int ixgbe_fcoe_disable(struct net_device *netdev) 837 { 838 struct ixgbe_adapter *adapter = netdev_priv(netdev); 839 840 if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) 841 return -EINVAL; 842 843 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 844 return -EINVAL; 845 846 e_info(drv, "Disabling FCoE offload features.\n"); 847 if (netif_running(netdev)) 848 netdev->netdev_ops->ndo_stop(netdev); 849 850 /* Free per CPU memory to track DDP pools */ 851 ixgbe_fcoe_ddp_disable(adapter); 852 853 /* disable FCoE and notify stack */ 854 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 855 netdev->features &= ~NETIF_F_FCOE_MTU; 856 857 netdev_features_change(netdev); 858 859 /* release existing queues and reallocate them */ 860 ixgbe_clear_interrupt_scheme(adapter); 861 ixgbe_init_interrupt_scheme(adapter); 862 863 if (netif_running(netdev)) 864 netdev->netdev_ops->ndo_open(netdev); 865 866 return 0; 867 } 868 869 /** 870 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 871 * @netdev : ixgbe adapter 872 * @wwn : the world wide name 873 * @type: the type of world wide name 874 * 875 * Returns the node or port world wide name if both the prefix and the san 876 * mac address are valid, then the wwn is formed based on the NAA-2 for 877 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 878 * 879 * Returns : 0 on success 880 */ 881 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 882 { 883 int rc = -EINVAL; 884 u16 prefix = 0xffff; 885 struct ixgbe_adapter *adapter = netdev_priv(netdev); 886 struct ixgbe_mac_info *mac = &adapter->hw.mac; 887 888 switch (type) { 889 case NETDEV_FCOE_WWNN: 890 prefix = mac->wwnn_prefix; 891 break; 892 case NETDEV_FCOE_WWPN: 893 prefix = mac->wwpn_prefix; 894 break; 895 default: 896 break; 897 } 898 899 if ((prefix != 0xffff) && 900 is_valid_ether_addr(mac->san_addr)) { 901 *wwn = ((u64) prefix << 48) | 902 ((u64) mac->san_addr[0] << 40) | 903 ((u64) mac->san_addr[1] << 32) | 904 ((u64) mac->san_addr[2] << 24) | 905 ((u64) mac->san_addr[3] << 16) | 906 ((u64) mac->san_addr[4] << 8) | 907 ((u64) mac->san_addr[5]); 908 rc = 0; 909 } 910 return rc; 911 } 912 913 /** 914 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information 915 * @netdev : ixgbe adapter 916 * @info : HBA information 917 * 918 * Returns ixgbe HBA information 919 * 920 * Returns : 0 on success 921 */ 922 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 923 struct netdev_fcoe_hbainfo *info) 924 { 925 struct ixgbe_adapter *adapter = netdev_priv(netdev); 926 struct ixgbe_hw *hw = &adapter->hw; 927 int i, pos; 928 u8 buf[8]; 929 930 if (!info) 931 return -EINVAL; 932 933 /* Don't return information on unsupported devices */ 934 if (hw->mac.type != ixgbe_mac_82599EB && 935 hw->mac.type != ixgbe_mac_X540) 936 return -EINVAL; 937 938 /* Manufacturer */ 939 snprintf(info->manufacturer, sizeof(info->manufacturer), 940 "Intel Corporation"); 941 942 /* Serial Number */ 943 944 /* Get the PCI-e Device Serial Number Capability */ 945 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); 946 if (pos) { 947 pos += 4; 948 for (i = 0; i < 8; i++) 949 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); 950 951 snprintf(info->serial_number, sizeof(info->serial_number), 952 "%02X%02X%02X%02X%02X%02X%02X%02X", 953 buf[7], buf[6], buf[5], buf[4], 954 buf[3], buf[2], buf[1], buf[0]); 955 } else 956 snprintf(info->serial_number, sizeof(info->serial_number), 957 "Unknown"); 958 959 /* Hardware Version */ 960 snprintf(info->hardware_version, 961 sizeof(info->hardware_version), 962 "Rev %d", hw->revision_id); 963 /* Driver Name/Version */ 964 snprintf(info->driver_version, 965 sizeof(info->driver_version), 966 "%s v%s", 967 ixgbe_driver_name, 968 ixgbe_driver_version); 969 /* Firmware Version */ 970 snprintf(info->firmware_version, 971 sizeof(info->firmware_version), 972 "0x%08x", 973 (adapter->eeprom_verh << 16) | 974 adapter->eeprom_verl); 975 976 /* Model */ 977 if (hw->mac.type == ixgbe_mac_82599EB) { 978 snprintf(info->model, 979 sizeof(info->model), 980 "Intel 82599"); 981 } else { 982 snprintf(info->model, 983 sizeof(info->model), 984 "Intel X540"); 985 } 986 987 /* Model Description */ 988 snprintf(info->model_description, 989 sizeof(info->model_description), 990 "%s", 991 ixgbe_default_device_descr); 992 993 return 0; 994 } 995 996 /** 997 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to 998 * @adapter - pointer to the device adapter structure 999 * 1000 * Return : TC that FCoE is mapped to 1001 */ 1002 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) 1003 { 1004 #ifdef CONFIG_IXGBE_DCB 1005 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); 1006 #else 1007 return 0; 1008 #endif 1009 } 1010