1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include <linux/if_ether.h> 31 #include <linux/gfp.h> 32 #include <linux/if_vlan.h> 33 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/fc/fc_fs.h> 36 #include <scsi/fc/fc_fcoe.h> 37 #include <scsi/libfc.h> 38 #include <scsi/libfcoe.h> 39 40 /** 41 * ixgbe_fcoe_clear_ddp - clear the given ddp context 42 * @ddp: ptr to the ixgbe_fcoe_ddp 43 * 44 * Returns : none 45 * 46 */ 47 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 48 { 49 ddp->len = 0; 50 ddp->err = 1; 51 ddp->udl = NULL; 52 ddp->udp = 0UL; 53 ddp->sgl = NULL; 54 ddp->sgc = 0; 55 } 56 57 /** 58 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 59 * @netdev: the corresponding net_device 60 * @xid: the xid that corresponding ddp will be freed 61 * 62 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 63 * and it is expected to be called by ULD, i.e., FCP layer of libfc 64 * to release the corresponding ddp context when the I/O is done. 65 * 66 * Returns : data length already ddp-ed in bytes 67 */ 68 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 69 { 70 int len; 71 struct ixgbe_fcoe *fcoe; 72 struct ixgbe_adapter *adapter; 73 struct ixgbe_fcoe_ddp *ddp; 74 struct ixgbe_hw *hw; 75 u32 fcbuff; 76 77 if (!netdev) 78 return 0; 79 80 if (xid >= netdev->fcoe_ddp_xid) 81 return 0; 82 83 adapter = netdev_priv(netdev); 84 fcoe = &adapter->fcoe; 85 ddp = &fcoe->ddp[xid]; 86 if (!ddp->udl) 87 return 0; 88 89 hw = &adapter->hw; 90 len = ddp->len; 91 /* if no error then skip ddp context invalidation */ 92 if (!ddp->err) 93 goto skip_ddpinv; 94 95 if (hw->mac.type == ixgbe_mac_X550) { 96 /* X550 does not require DDP FCoE lock */ 97 98 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); 99 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), 100 (xid | IXGBE_FCFLTRW_WE)); 101 102 /* program FCBUFF */ 103 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); 104 105 /* program FCDMARW */ 106 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 107 (xid | IXGBE_FCDMARW_WE)); 108 109 /* read FCBUFF to check context invalidated */ 110 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 111 (xid | IXGBE_FCDMARW_RE)); 112 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); 113 } else { 114 /* other hardware requires DDP FCoE lock */ 115 spin_lock_bh(&fcoe->lock); 116 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); 117 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, 118 (xid | IXGBE_FCFLTRW_WE)); 119 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); 120 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 121 (xid | IXGBE_FCDMARW_WE)); 122 123 /* guaranteed to be invalidated after 100us */ 124 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 125 (xid | IXGBE_FCDMARW_RE)); 126 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); 127 spin_unlock_bh(&fcoe->lock); 128 } 129 130 if (fcbuff & IXGBE_FCBUFF_VALID) 131 usleep_range(100, 150); 132 133 skip_ddpinv: 134 if (ddp->sgl) 135 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 136 DMA_FROM_DEVICE); 137 if (ddp->pool) { 138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 139 ddp->pool = NULL; 140 } 141 142 ixgbe_fcoe_clear_ddp(ddp); 143 144 return len; 145 } 146 147 /** 148 * ixgbe_fcoe_ddp_setup - called to set up ddp context 149 * @netdev: the corresponding net_device 150 * @xid: the exchange id requesting ddp 151 * @sgl: the scatter-gather list for this request 152 * @sgc: the number of scatter-gather items 153 * 154 * Returns : 1 for success and 0 for no ddp 155 */ 156 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, 157 struct scatterlist *sgl, unsigned int sgc, 158 int target_mode) 159 { 160 struct ixgbe_adapter *adapter; 161 struct ixgbe_hw *hw; 162 struct ixgbe_fcoe *fcoe; 163 struct ixgbe_fcoe_ddp *ddp; 164 struct ixgbe_fcoe_ddp_pool *ddp_pool; 165 struct scatterlist *sg; 166 unsigned int i, j, dmacount; 167 unsigned int len; 168 static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 169 unsigned int firstoff = 0; 170 unsigned int lastsize; 171 unsigned int thisoff = 0; 172 unsigned int thislen = 0; 173 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 174 dma_addr_t addr = 0; 175 176 if (!netdev || !sgl) 177 return 0; 178 179 adapter = netdev_priv(netdev); 180 if (xid >= netdev->fcoe_ddp_xid) { 181 e_warn(drv, "xid=0x%x out-of-range\n", xid); 182 return 0; 183 } 184 185 /* no DDP if we are already down or resetting */ 186 if (test_bit(__IXGBE_DOWN, &adapter->state) || 187 test_bit(__IXGBE_RESETTING, &adapter->state)) 188 return 0; 189 190 fcoe = &adapter->fcoe; 191 ddp = &fcoe->ddp[xid]; 192 if (ddp->sgl) { 193 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 194 xid, ddp->sgl, ddp->sgc); 195 return 0; 196 } 197 ixgbe_fcoe_clear_ddp(ddp); 198 199 200 if (!fcoe->ddp_pool) { 201 e_warn(drv, "No ddp_pool resources allocated\n"); 202 return 0; 203 } 204 205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); 206 if (!ddp_pool->pool) { 207 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 208 goto out_noddp; 209 } 210 211 /* setup dma from scsi command sgl */ 212 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 213 if (dmacount == 0) { 214 e_err(drv, "xid 0x%x DMA map error\n", xid); 215 goto out_noddp; 216 } 217 218 /* alloc the udl from per cpu ddp pool */ 219 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 220 if (!ddp->udl) { 221 e_err(drv, "failed allocated ddp context\n"); 222 goto out_noddp_unmap; 223 } 224 ddp->pool = ddp_pool->pool; 225 ddp->sgl = sgl; 226 ddp->sgc = sgc; 227 228 j = 0; 229 for_each_sg(sgl, sg, dmacount, i) { 230 addr = sg_dma_address(sg); 231 len = sg_dma_len(sg); 232 while (len) { 233 /* max number of buffers allowed in one DDP context */ 234 if (j >= IXGBE_BUFFCNT_MAX) { 235 ddp_pool->noddp++; 236 goto out_noddp_free; 237 } 238 239 /* get the offset of length of current buffer */ 240 thisoff = addr & ((dma_addr_t)bufflen - 1); 241 thislen = min((bufflen - thisoff), len); 242 /* 243 * all but the 1st buffer (j == 0) 244 * must be aligned on bufflen 245 */ 246 if ((j != 0) && (thisoff)) 247 goto out_noddp_free; 248 /* 249 * all but the last buffer 250 * ((i == (dmacount - 1)) && (thislen == len)) 251 * must end at bufflen 252 */ 253 if (((i != (dmacount - 1)) || (thislen != len)) 254 && ((thislen + thisoff) != bufflen)) 255 goto out_noddp_free; 256 257 ddp->udl[j] = (u64)(addr - thisoff); 258 /* only the first buffer may have none-zero offset */ 259 if (j == 0) 260 firstoff = thisoff; 261 len -= thislen; 262 addr += thislen; 263 j++; 264 } 265 } 266 /* only the last buffer may have non-full bufflen */ 267 lastsize = thisoff + thislen; 268 269 /* 270 * lastsize can not be buffer len. 271 * If it is then adding another buffer with lastsize = 1. 272 */ 273 if (lastsize == bufflen) { 274 if (j >= IXGBE_BUFFCNT_MAX) { 275 ddp_pool->noddp_ext_buff++; 276 goto out_noddp_free; 277 } 278 279 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 280 j++; 281 lastsize = 1; 282 } 283 put_cpu(); 284 285 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 286 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 287 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 288 /* Set WRCONTX bit to allow DDP for target */ 289 if (target_mode) 290 fcbuff |= (IXGBE_FCBUFF_WRCONTX); 291 fcbuff |= (IXGBE_FCBUFF_VALID); 292 293 fcdmarw = xid; 294 fcdmarw |= IXGBE_FCDMARW_WE; 295 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 296 297 fcfltrw = xid; 298 fcfltrw |= IXGBE_FCFLTRW_WE; 299 300 /* program DMA context */ 301 hw = &adapter->hw; 302 303 /* turn on last frame indication for target mode as FCP_RSPtarget is 304 * supposed to send FCP_RSP when it is done. */ 305 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { 306 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); 307 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); 308 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; 309 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 310 } 311 312 if (hw->mac.type == ixgbe_mac_X550) { 313 /* X550 does not require DDP lock */ 314 315 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), 316 ddp->udp & DMA_BIT_MASK(32)); 317 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); 318 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); 319 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); 320 /* program filter context */ 321 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); 322 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); 323 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); 324 } else { 325 /* DDP lock for indirect DDP context access */ 326 spin_lock_bh(&fcoe->lock); 327 328 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 329 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 330 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 331 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 332 /* program filter context */ 333 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 334 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 335 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 336 337 spin_unlock_bh(&fcoe->lock); 338 } 339 340 return 1; 341 342 out_noddp_free: 343 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 344 ixgbe_fcoe_clear_ddp(ddp); 345 346 out_noddp_unmap: 347 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 348 out_noddp: 349 put_cpu(); 350 return 0; 351 } 352 353 /** 354 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode 355 * @netdev: the corresponding net_device 356 * @xid: the exchange id requesting ddp 357 * @sgl: the scatter-gather list for this request 358 * @sgc: the number of scatter-gather items 359 * 360 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 361 * and is expected to be called from ULD, e.g., FCP layer of libfc 362 * to set up ddp for the corresponding xid of the given sglist for 363 * the corresponding I/O. 364 * 365 * Returns : 1 for success and 0 for no ddp 366 */ 367 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 368 struct scatterlist *sgl, unsigned int sgc) 369 { 370 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); 371 } 372 373 /** 374 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode 375 * @netdev: the corresponding net_device 376 * @xid: the exchange id requesting ddp 377 * @sgl: the scatter-gather list for this request 378 * @sgc: the number of scatter-gather items 379 * 380 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target 381 * and is expected to be called from ULD, e.g., FCP layer of libfc 382 * to set up ddp for the corresponding xid of the given sglist for 383 * the corresponding I/O. The DDP in target mode is a write I/O request 384 * from the initiator. 385 * 386 * Returns : 1 for success and 0 for no ddp 387 */ 388 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 389 struct scatterlist *sgl, unsigned int sgc) 390 { 391 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); 392 } 393 394 /** 395 * ixgbe_fcoe_ddp - check ddp status and mark it done 396 * @adapter: ixgbe adapter 397 * @rx_desc: advanced rx descriptor 398 * @skb: the skb holding the received data 399 * 400 * This checks ddp status. 401 * 402 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 403 * not passing the skb to ULD, > 0 indicates is the length of data 404 * being ddped. 405 */ 406 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 407 union ixgbe_adv_rx_desc *rx_desc, 408 struct sk_buff *skb) 409 { 410 int rc = -EINVAL; 411 struct ixgbe_fcoe *fcoe; 412 struct ixgbe_fcoe_ddp *ddp; 413 struct fc_frame_header *fh; 414 struct fcoe_crc_eof *crc; 415 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); 416 __le32 ddp_err; 417 int ddp_max; 418 u32 fctl; 419 u16 xid; 420 421 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) 422 skb->ip_summed = CHECKSUM_NONE; 423 else 424 skb->ip_summed = CHECKSUM_UNNECESSARY; 425 426 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 427 fh = (struct fc_frame_header *)(skb->data + 428 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 429 else 430 fh = (struct fc_frame_header *)(skb->data + 431 sizeof(struct fcoe_hdr)); 432 433 fctl = ntoh24(fh->fh_f_ctl); 434 if (fctl & FC_FC_EX_CTX) 435 xid = be16_to_cpu(fh->fh_ox_id); 436 else 437 xid = be16_to_cpu(fh->fh_rx_id); 438 439 ddp_max = IXGBE_FCOE_DDP_MAX; 440 /* X550 has different DDP Max limit */ 441 if (adapter->hw.mac.type == ixgbe_mac_X550) 442 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 443 if (xid >= ddp_max) 444 return -EINVAL; 445 446 fcoe = &adapter->fcoe; 447 ddp = &fcoe->ddp[xid]; 448 if (!ddp->udl) 449 return -EINVAL; 450 451 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | 452 IXGBE_RXDADV_ERR_FCERR); 453 if (ddp_err) 454 return -EINVAL; 455 456 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 457 /* return 0 to bypass going to ULD for DDPed data */ 458 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 459 /* update length of DDPed data */ 460 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 461 rc = 0; 462 break; 463 /* unmap the sg list when FCPRSP is received */ 464 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 465 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 466 ddp->sgc, DMA_FROM_DEVICE); 467 ddp->err = ddp_err; 468 ddp->sgl = NULL; 469 ddp->sgc = 0; 470 /* fall through */ 471 /* if DDP length is present pass it through to ULD */ 472 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 473 /* update length of DDPed data */ 474 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 475 if (ddp->len) 476 rc = ddp->len; 477 break; 478 /* no match will return as an error */ 479 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 480 default: 481 break; 482 } 483 484 /* In target mode, check the last data frame of the sequence. 485 * For DDP in target mode, data is already DDPed but the header 486 * indication of the last data frame ould allow is to tell if we 487 * got all the data and the ULP can send FCP_RSP back, as this is 488 * not a full fcoe frame, we fill the trailer here so it won't be 489 * dropped by the ULP stack. 490 */ 491 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 492 (fctl & FC_FC_END_SEQ)) { 493 skb_linearize(skb); 494 crc = skb_put(skb, sizeof(*crc)); 495 crc->fcoe_eof = FC_EOF_T; 496 } 497 498 return rc; 499 } 500 501 /** 502 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 503 * @tx_ring: tx desc ring 504 * @first: first tx_buffer structure containing skb, tx_flags, and protocol 505 * @hdr_len: hdr_len to be returned 506 * 507 * This sets up large send offload for FCoE 508 * 509 * Returns : 0 indicates success, < 0 for error 510 */ 511 int ixgbe_fso(struct ixgbe_ring *tx_ring, 512 struct ixgbe_tx_buffer *first, 513 u8 *hdr_len) 514 { 515 struct sk_buff *skb = first->skb; 516 struct fc_frame_header *fh; 517 u32 vlan_macip_lens; 518 u32 fcoe_sof_eof = 0; 519 u32 mss_l4len_idx; 520 u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; 521 u8 sof, eof; 522 523 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 524 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 525 skb_shinfo(skb)->gso_type); 526 return -EINVAL; 527 } 528 529 /* resets the header to point fcoe/fc */ 530 skb_set_network_header(skb, skb->mac_len); 531 skb_set_transport_header(skb, skb->mac_len + 532 sizeof(struct fcoe_hdr)); 533 534 /* sets up SOF and ORIS */ 535 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 536 switch (sof) { 537 case FC_SOF_I2: 538 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; 539 break; 540 case FC_SOF_I3: 541 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | 542 IXGBE_ADVTXD_FCOEF_ORIS; 543 break; 544 case FC_SOF_N2: 545 break; 546 case FC_SOF_N3: 547 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; 548 break; 549 default: 550 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); 551 return -EINVAL; 552 } 553 554 /* the first byte of the last dword is EOF */ 555 skb_copy_bits(skb, skb->len - 4, &eof, 1); 556 /* sets up EOF and ORIE */ 557 switch (eof) { 558 case FC_EOF_N: 559 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 560 break; 561 case FC_EOF_T: 562 /* lso needs ORIE */ 563 if (skb_is_gso(skb)) 564 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | 565 IXGBE_ADVTXD_FCOEF_ORIE; 566 else 567 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 568 break; 569 case FC_EOF_NI: 570 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 571 break; 572 case FC_EOF_A: 573 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 574 break; 575 default: 576 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); 577 return -EINVAL; 578 } 579 580 /* sets up PARINC indicating data offset */ 581 fh = (struct fc_frame_header *)skb_transport_header(skb); 582 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 583 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 584 585 /* include trailer in headlen as it is replicated per frame */ 586 *hdr_len = sizeof(struct fcoe_crc_eof); 587 588 /* hdr_len includes fc_hdr if FCoE LSO is enabled */ 589 if (skb_is_gso(skb)) { 590 *hdr_len += skb_transport_offset(skb) + 591 sizeof(struct fc_frame_header); 592 /* update gso_segs and bytecount */ 593 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 594 skb_shinfo(skb)->gso_size); 595 first->bytecount += (first->gso_segs - 1) * *hdr_len; 596 first->tx_flags |= IXGBE_TX_FLAGS_TSO; 597 /* Hardware expects L4T to be RSV for FCoE TSO */ 598 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; 599 } 600 601 /* set flag indicating FCOE to ixgbe_tx_map call */ 602 first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; 603 604 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ 605 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 606 607 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 608 vlan_macip_lens = skb_transport_offset(skb) + 609 sizeof(struct fc_frame_header); 610 vlan_macip_lens |= (skb_transport_offset(skb) - 4) 611 << IXGBE_ADVTXD_MACLEN_SHIFT; 612 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 613 614 /* write context desc */ 615 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 616 type_tucmd, mss_l4len_idx); 617 618 return 0; 619 } 620 621 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) 622 { 623 struct ixgbe_fcoe_ddp_pool *ddp_pool; 624 625 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 626 dma_pool_destroy(ddp_pool->pool); 627 ddp_pool->pool = NULL; 628 } 629 630 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, 631 struct device *dev, 632 unsigned int cpu) 633 { 634 struct ixgbe_fcoe_ddp_pool *ddp_pool; 635 struct dma_pool *pool; 636 char pool_name[32]; 637 638 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); 639 640 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, 641 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 642 if (!pool) 643 return -ENOMEM; 644 645 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 646 ddp_pool->pool = pool; 647 ddp_pool->noddp = 0; 648 ddp_pool->noddp_ext_buff = 0; 649 650 return 0; 651 } 652 653 /** 654 * ixgbe_configure_fcoe - configures registers for fcoe at start 655 * @adapter: ptr to ixgbe adapter 656 * 657 * This sets up FCoE related registers 658 * 659 * Returns : none 660 */ 661 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 662 { 663 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 664 struct ixgbe_hw *hw = &adapter->hw; 665 int i, fcoe_q, fcoe_i, fcoe_q_h = 0; 666 int fcreta_size; 667 u32 etqf; 668 669 /* Minimal functionality for FCoE requires at least CRC offloads */ 670 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) 671 return; 672 673 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ 674 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; 675 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 676 etqf |= IXGBE_ETQF_POOL_ENABLE; 677 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 678 } 679 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); 680 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 681 682 /* leave registers un-configured if FCoE is disabled */ 683 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 684 return; 685 686 /* Use one or more Rx queues for FCoE by redirection table */ 687 fcreta_size = IXGBE_FCRETA_SIZE; 688 if (adapter->hw.mac.type == ixgbe_mac_X550) 689 fcreta_size = IXGBE_FCRETA_SIZE_X550; 690 691 for (i = 0; i < fcreta_size; i++) { 692 if (adapter->hw.mac.type == ixgbe_mac_X550) { 693 int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % 694 fcoe->indices); 695 fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; 696 fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & 697 IXGBE_FCRETA_ENTRY_HIGH_MASK; 698 } 699 700 fcoe_i = fcoe->offset + (i % fcoe->indices); 701 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 702 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 703 fcoe_q |= fcoe_q_h; 704 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 705 } 706 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 707 708 /* Enable L2 EtherType filter for FIP */ 709 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; 710 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 711 etqf |= IXGBE_ETQF_POOL_ENABLE; 712 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 713 } 714 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); 715 716 /* Send FIP frames to the first FCoE queue */ 717 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; 718 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 719 IXGBE_ETQS_QUEUE_EN | 720 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 721 722 /* Configure FCoE Rx control */ 723 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 724 IXGBE_FCRXCTRL_FCCRCBO | 725 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 726 } 727 728 /** 729 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources 730 * @adapter : ixgbe adapter 731 * 732 * Cleans up outstanding ddp context resources 733 * 734 * Returns : none 735 */ 736 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 737 { 738 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 739 int cpu, i, ddp_max; 740 741 /* do nothing if no DDP pools were allocated */ 742 if (!fcoe->ddp_pool) 743 return; 744 745 ddp_max = IXGBE_FCOE_DDP_MAX; 746 /* X550 has different DDP Max limit */ 747 if (adapter->hw.mac.type == ixgbe_mac_X550) 748 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 749 750 for (i = 0; i < ddp_max; i++) 751 ixgbe_fcoe_ddp_put(adapter->netdev, i); 752 753 for_each_possible_cpu(cpu) 754 ixgbe_fcoe_dma_pool_free(fcoe, cpu); 755 756 dma_unmap_single(&adapter->pdev->dev, 757 fcoe->extra_ddp_buffer_dma, 758 IXGBE_FCBUFF_MIN, 759 DMA_FROM_DEVICE); 760 kfree(fcoe->extra_ddp_buffer); 761 762 fcoe->extra_ddp_buffer = NULL; 763 fcoe->extra_ddp_buffer_dma = 0; 764 } 765 766 /** 767 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources 768 * @adapter: ixgbe adapter 769 * 770 * Sets up ddp context resouces 771 * 772 * Returns : 0 indicates success or -EINVAL on failure 773 */ 774 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 775 { 776 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 777 struct device *dev = &adapter->pdev->dev; 778 void *buffer; 779 dma_addr_t dma; 780 unsigned int cpu; 781 782 /* do nothing if no DDP pools were allocated */ 783 if (!fcoe->ddp_pool) 784 return 0; 785 786 /* Extra buffer to be shared by all DDPs for HW work around */ 787 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 788 if (!buffer) 789 return -ENOMEM; 790 791 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); 792 if (dma_mapping_error(dev, dma)) { 793 e_err(drv, "failed to map extra DDP buffer\n"); 794 kfree(buffer); 795 return -ENOMEM; 796 } 797 798 fcoe->extra_ddp_buffer = buffer; 799 fcoe->extra_ddp_buffer_dma = dma; 800 801 /* allocate pci pool for each cpu */ 802 for_each_possible_cpu(cpu) { 803 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); 804 if (!err) 805 continue; 806 807 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 808 ixgbe_free_fcoe_ddp_resources(adapter); 809 return -ENOMEM; 810 } 811 812 return 0; 813 } 814 815 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) 816 { 817 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 818 819 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 820 return -EINVAL; 821 822 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); 823 824 if (!fcoe->ddp_pool) { 825 e_err(drv, "failed to allocate percpu DDP resources\n"); 826 return -ENOMEM; 827 } 828 829 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 830 /* X550 has different DDP Max limit */ 831 if (adapter->hw.mac.type == ixgbe_mac_X550) 832 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; 833 834 return 0; 835 } 836 837 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) 838 { 839 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 840 841 adapter->netdev->fcoe_ddp_xid = 0; 842 843 if (!fcoe->ddp_pool) 844 return; 845 846 free_percpu(fcoe->ddp_pool); 847 fcoe->ddp_pool = NULL; 848 } 849 850 /** 851 * ixgbe_fcoe_enable - turn on FCoE offload feature 852 * @netdev: the corresponding netdev 853 * 854 * Turns on FCoE offload feature in 82599. 855 * 856 * Returns : 0 indicates success or -EINVAL on failure 857 */ 858 int ixgbe_fcoe_enable(struct net_device *netdev) 859 { 860 struct ixgbe_adapter *adapter = netdev_priv(netdev); 861 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 862 863 atomic_inc(&fcoe->refcnt); 864 865 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 866 return -EINVAL; 867 868 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 869 return -EINVAL; 870 871 e_info(drv, "Enabling FCoE offload features.\n"); 872 873 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 874 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); 875 876 if (netif_running(netdev)) 877 netdev->netdev_ops->ndo_stop(netdev); 878 879 /* Allocate per CPU memory to track DDP pools */ 880 ixgbe_fcoe_ddp_enable(adapter); 881 882 /* enable FCoE and notify stack */ 883 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 884 netdev->features |= NETIF_F_FCOE_MTU; 885 netdev_features_change(netdev); 886 887 /* release existing queues and reallocate them */ 888 ixgbe_clear_interrupt_scheme(adapter); 889 ixgbe_init_interrupt_scheme(adapter); 890 891 if (netif_running(netdev)) 892 netdev->netdev_ops->ndo_open(netdev); 893 894 return 0; 895 } 896 897 /** 898 * ixgbe_fcoe_disable - turn off FCoE offload feature 899 * @netdev: the corresponding netdev 900 * 901 * Turns off FCoE offload feature in 82599. 902 * 903 * Returns : 0 indicates success or -EINVAL on failure 904 */ 905 int ixgbe_fcoe_disable(struct net_device *netdev) 906 { 907 struct ixgbe_adapter *adapter = netdev_priv(netdev); 908 909 if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) 910 return -EINVAL; 911 912 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 913 return -EINVAL; 914 915 e_info(drv, "Disabling FCoE offload features.\n"); 916 if (netif_running(netdev)) 917 netdev->netdev_ops->ndo_stop(netdev); 918 919 /* Free per CPU memory to track DDP pools */ 920 ixgbe_fcoe_ddp_disable(adapter); 921 922 /* disable FCoE and notify stack */ 923 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 924 netdev->features &= ~NETIF_F_FCOE_MTU; 925 926 netdev_features_change(netdev); 927 928 /* release existing queues and reallocate them */ 929 ixgbe_clear_interrupt_scheme(adapter); 930 ixgbe_init_interrupt_scheme(adapter); 931 932 if (netif_running(netdev)) 933 netdev->netdev_ops->ndo_open(netdev); 934 935 return 0; 936 } 937 938 /** 939 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 940 * @netdev : ixgbe adapter 941 * @wwn : the world wide name 942 * @type: the type of world wide name 943 * 944 * Returns the node or port world wide name if both the prefix and the san 945 * mac address are valid, then the wwn is formed based on the NAA-2 for 946 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 947 * 948 * Returns : 0 on success 949 */ 950 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 951 { 952 u16 prefix = 0xffff; 953 struct ixgbe_adapter *adapter = netdev_priv(netdev); 954 struct ixgbe_mac_info *mac = &adapter->hw.mac; 955 956 switch (type) { 957 case NETDEV_FCOE_WWNN: 958 prefix = mac->wwnn_prefix; 959 break; 960 case NETDEV_FCOE_WWPN: 961 prefix = mac->wwpn_prefix; 962 break; 963 default: 964 break; 965 } 966 967 if ((prefix != 0xffff) && 968 is_valid_ether_addr(mac->san_addr)) { 969 *wwn = ((u64) prefix << 48) | 970 ((u64) mac->san_addr[0] << 40) | 971 ((u64) mac->san_addr[1] << 32) | 972 ((u64) mac->san_addr[2] << 24) | 973 ((u64) mac->san_addr[3] << 16) | 974 ((u64) mac->san_addr[4] << 8) | 975 ((u64) mac->san_addr[5]); 976 return 0; 977 } 978 return -EINVAL; 979 } 980 981 /** 982 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information 983 * @netdev : ixgbe adapter 984 * @info : HBA information 985 * 986 * Returns ixgbe HBA information 987 * 988 * Returns : 0 on success 989 */ 990 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 991 struct netdev_fcoe_hbainfo *info) 992 { 993 struct ixgbe_adapter *adapter = netdev_priv(netdev); 994 struct ixgbe_hw *hw = &adapter->hw; 995 int i, pos; 996 u8 buf[8]; 997 998 if (!info) 999 return -EINVAL; 1000 1001 /* Don't return information on unsupported devices */ 1002 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 1003 return -EINVAL; 1004 1005 /* Manufacturer */ 1006 snprintf(info->manufacturer, sizeof(info->manufacturer), 1007 "Intel Corporation"); 1008 1009 /* Serial Number */ 1010 1011 /* Get the PCI-e Device Serial Number Capability */ 1012 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); 1013 if (pos) { 1014 pos += 4; 1015 for (i = 0; i < 8; i++) 1016 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); 1017 1018 snprintf(info->serial_number, sizeof(info->serial_number), 1019 "%02X%02X%02X%02X%02X%02X%02X%02X", 1020 buf[7], buf[6], buf[5], buf[4], 1021 buf[3], buf[2], buf[1], buf[0]); 1022 } else 1023 snprintf(info->serial_number, sizeof(info->serial_number), 1024 "Unknown"); 1025 1026 /* Hardware Version */ 1027 snprintf(info->hardware_version, 1028 sizeof(info->hardware_version), 1029 "Rev %d", hw->revision_id); 1030 /* Driver Name/Version */ 1031 snprintf(info->driver_version, 1032 sizeof(info->driver_version), 1033 "%s v%s", 1034 ixgbe_driver_name, 1035 ixgbe_driver_version); 1036 /* Firmware Version */ 1037 snprintf(info->firmware_version, 1038 sizeof(info->firmware_version), 1039 "0x%08x", 1040 (adapter->eeprom_verh << 16) | 1041 adapter->eeprom_verl); 1042 1043 /* Model */ 1044 if (hw->mac.type == ixgbe_mac_82599EB) { 1045 snprintf(info->model, 1046 sizeof(info->model), 1047 "Intel 82599"); 1048 } else if (hw->mac.type == ixgbe_mac_X550) { 1049 snprintf(info->model, 1050 sizeof(info->model), 1051 "Intel X550"); 1052 } else { 1053 snprintf(info->model, 1054 sizeof(info->model), 1055 "Intel X540"); 1056 } 1057 1058 /* Model Description */ 1059 snprintf(info->model_description, 1060 sizeof(info->model_description), 1061 "%s", 1062 ixgbe_default_device_descr); 1063 1064 return 0; 1065 } 1066 1067 /** 1068 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to 1069 * @adapter - pointer to the device adapter structure 1070 * 1071 * Return : TC that FCoE is mapped to 1072 */ 1073 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) 1074 { 1075 #ifdef CONFIG_IXGBE_DCB 1076 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); 1077 #else 1078 return 0; 1079 #endif 1080 } 1081