1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include <linux/if_ether.h> 31 #include <linux/gfp.h> 32 #include <linux/if_vlan.h> 33 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/fc/fc_fs.h> 36 #include <scsi/fc/fc_fcoe.h> 37 #include <scsi/libfc.h> 38 #include <scsi/libfcoe.h> 39 40 /** 41 * ixgbe_fcoe_clear_ddp - clear the given ddp context 42 * @ddp: ptr to the ixgbe_fcoe_ddp 43 * 44 * Returns : none 45 * 46 */ 47 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 48 { 49 ddp->len = 0; 50 ddp->err = 1; 51 ddp->udl = NULL; 52 ddp->udp = 0UL; 53 ddp->sgl = NULL; 54 ddp->sgc = 0; 55 } 56 57 /** 58 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 59 * @netdev: the corresponding net_device 60 * @xid: the xid that corresponding ddp will be freed 61 * 62 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 63 * and it is expected to be called by ULD, i.e., FCP layer of libfc 64 * to release the corresponding ddp context when the I/O is done. 65 * 66 * Returns : data length already ddp-ed in bytes 67 */ 68 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 69 { 70 int len; 71 struct ixgbe_fcoe *fcoe; 72 struct ixgbe_adapter *adapter; 73 struct ixgbe_fcoe_ddp *ddp; 74 struct ixgbe_hw *hw; 75 u32 fcbuff; 76 77 if (!netdev) 78 return 0; 79 80 if (xid >= netdev->fcoe_ddp_xid) 81 return 0; 82 83 adapter = netdev_priv(netdev); 84 fcoe = &adapter->fcoe; 85 ddp = &fcoe->ddp[xid]; 86 if (!ddp->udl) 87 return 0; 88 89 hw = &adapter->hw; 90 len = ddp->len; 91 /* if no error then skip ddp context invalidation */ 92 if (!ddp->err) 93 goto skip_ddpinv; 94 95 if (hw->mac.type == ixgbe_mac_X550) { 96 /* X550 does not require DDP FCoE lock */ 97 98 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); 99 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), 100 (xid | IXGBE_FCFLTRW_WE)); 101 102 /* program FCBUFF */ 103 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); 104 105 /* program FCDMARW */ 106 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 107 (xid | IXGBE_FCDMARW_WE)); 108 109 /* read FCBUFF to check context invalidated */ 110 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), 111 (xid | IXGBE_FCDMARW_RE)); 112 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); 113 } else { 114 /* other hardware requires DDP FCoE lock */ 115 spin_lock_bh(&fcoe->lock); 116 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); 117 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, 118 (xid | IXGBE_FCFLTRW_WE)); 119 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); 120 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 121 (xid | IXGBE_FCDMARW_WE)); 122 123 /* guaranteed to be invalidated after 100us */ 124 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, 125 (xid | IXGBE_FCDMARW_RE)); 126 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); 127 spin_unlock_bh(&fcoe->lock); 128 } 129 130 if (fcbuff & IXGBE_FCBUFF_VALID) 131 usleep_range(100, 150); 132 133 skip_ddpinv: 134 if (ddp->sgl) 135 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 136 DMA_FROM_DEVICE); 137 if (ddp->pool) { 138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 139 ddp->pool = NULL; 140 } 141 142 ixgbe_fcoe_clear_ddp(ddp); 143 144 return len; 145 } 146 147 /** 148 * ixgbe_fcoe_ddp_setup - called to set up ddp context 149 * @netdev: the corresponding net_device 150 * @xid: the exchange id requesting ddp 151 * @sgl: the scatter-gather list for this request 152 * @sgc: the number of scatter-gather items 153 * @target_mode: 1 to setup target mode, 0 to setup initiator mode 154 * 155 * Returns : 1 for success and 0 for no ddp 156 */ 157 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, 158 struct scatterlist *sgl, unsigned int sgc, 159 int target_mode) 160 { 161 struct ixgbe_adapter *adapter; 162 struct ixgbe_hw *hw; 163 struct ixgbe_fcoe *fcoe; 164 struct ixgbe_fcoe_ddp *ddp; 165 struct ixgbe_fcoe_ddp_pool *ddp_pool; 166 struct scatterlist *sg; 167 unsigned int i, j, dmacount; 168 unsigned int len; 169 static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 170 unsigned int firstoff = 0; 171 unsigned int lastsize; 172 unsigned int thisoff = 0; 173 unsigned int thislen = 0; 174 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 175 dma_addr_t addr = 0; 176 177 if (!netdev || !sgl) 178 return 0; 179 180 adapter = netdev_priv(netdev); 181 if (xid >= netdev->fcoe_ddp_xid) { 182 e_warn(drv, "xid=0x%x out-of-range\n", xid); 183 return 0; 184 } 185 186 /* no DDP if we are already down or resetting */ 187 if (test_bit(__IXGBE_DOWN, &adapter->state) || 188 test_bit(__IXGBE_RESETTING, &adapter->state)) 189 return 0; 190 191 fcoe = &adapter->fcoe; 192 ddp = &fcoe->ddp[xid]; 193 if (ddp->sgl) { 194 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 195 xid, ddp->sgl, ddp->sgc); 196 return 0; 197 } 198 ixgbe_fcoe_clear_ddp(ddp); 199 200 201 if (!fcoe->ddp_pool) { 202 e_warn(drv, "No ddp_pool resources allocated\n"); 203 return 0; 204 } 205 206 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); 207 if (!ddp_pool->pool) { 208 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 209 goto out_noddp; 210 } 211 212 /* setup dma from scsi command sgl */ 213 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 214 if (dmacount == 0) { 215 e_err(drv, "xid 0x%x DMA map error\n", xid); 216 goto out_noddp; 217 } 218 219 /* alloc the udl from per cpu ddp pool */ 220 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 221 if (!ddp->udl) { 222 e_err(drv, "failed allocated ddp context\n"); 223 goto out_noddp_unmap; 224 } 225 ddp->pool = ddp_pool->pool; 226 ddp->sgl = sgl; 227 ddp->sgc = sgc; 228 229 j = 0; 230 for_each_sg(sgl, sg, dmacount, i) { 231 addr = sg_dma_address(sg); 232 len = sg_dma_len(sg); 233 while (len) { 234 /* max number of buffers allowed in one DDP context */ 235 if (j >= IXGBE_BUFFCNT_MAX) { 236 ddp_pool->noddp++; 237 goto out_noddp_free; 238 } 239 240 /* get the offset of length of current buffer */ 241 thisoff = addr & ((dma_addr_t)bufflen - 1); 242 thislen = min((bufflen - thisoff), len); 243 /* 244 * all but the 1st buffer (j == 0) 245 * must be aligned on bufflen 246 */ 247 if ((j != 0) && (thisoff)) 248 goto out_noddp_free; 249 /* 250 * all but the last buffer 251 * ((i == (dmacount - 1)) && (thislen == len)) 252 * must end at bufflen 253 */ 254 if (((i != (dmacount - 1)) || (thislen != len)) 255 && ((thislen + thisoff) != bufflen)) 256 goto out_noddp_free; 257 258 ddp->udl[j] = (u64)(addr - thisoff); 259 /* only the first buffer may have none-zero offset */ 260 if (j == 0) 261 firstoff = thisoff; 262 len -= thislen; 263 addr += thislen; 264 j++; 265 } 266 } 267 /* only the last buffer may have non-full bufflen */ 268 lastsize = thisoff + thislen; 269 270 /* 271 * lastsize can not be buffer len. 272 * If it is then adding another buffer with lastsize = 1. 273 */ 274 if (lastsize == bufflen) { 275 if (j >= IXGBE_BUFFCNT_MAX) { 276 ddp_pool->noddp_ext_buff++; 277 goto out_noddp_free; 278 } 279 280 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 281 j++; 282 lastsize = 1; 283 } 284 put_cpu(); 285 286 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 287 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 288 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 289 /* Set WRCONTX bit to allow DDP for target */ 290 if (target_mode) 291 fcbuff |= (IXGBE_FCBUFF_WRCONTX); 292 fcbuff |= (IXGBE_FCBUFF_VALID); 293 294 fcdmarw = xid; 295 fcdmarw |= IXGBE_FCDMARW_WE; 296 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 297 298 fcfltrw = xid; 299 fcfltrw |= IXGBE_FCFLTRW_WE; 300 301 /* program DMA context */ 302 hw = &adapter->hw; 303 304 /* turn on last frame indication for target mode as FCP_RSPtarget is 305 * supposed to send FCP_RSP when it is done. */ 306 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { 307 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); 308 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); 309 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; 310 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); 311 } 312 313 if (hw->mac.type == ixgbe_mac_X550) { 314 /* X550 does not require DDP lock */ 315 316 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), 317 ddp->udp & DMA_BIT_MASK(32)); 318 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); 319 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); 320 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); 321 /* program filter context */ 322 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); 323 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); 324 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); 325 } else { 326 /* DDP lock for indirect DDP context access */ 327 spin_lock_bh(&fcoe->lock); 328 329 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 330 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 331 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 332 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 333 /* program filter context */ 334 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 335 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 336 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 337 338 spin_unlock_bh(&fcoe->lock); 339 } 340 341 return 1; 342 343 out_noddp_free: 344 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); 345 ixgbe_fcoe_clear_ddp(ddp); 346 347 out_noddp_unmap: 348 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); 349 out_noddp: 350 put_cpu(); 351 return 0; 352 } 353 354 /** 355 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode 356 * @netdev: the corresponding net_device 357 * @xid: the exchange id requesting ddp 358 * @sgl: the scatter-gather list for this request 359 * @sgc: the number of scatter-gather items 360 * 361 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 362 * and is expected to be called from ULD, e.g., FCP layer of libfc 363 * to set up ddp for the corresponding xid of the given sglist for 364 * the corresponding I/O. 365 * 366 * Returns : 1 for success and 0 for no ddp 367 */ 368 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 369 struct scatterlist *sgl, unsigned int sgc) 370 { 371 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); 372 } 373 374 /** 375 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode 376 * @netdev: the corresponding net_device 377 * @xid: the exchange id requesting ddp 378 * @sgl: the scatter-gather list for this request 379 * @sgc: the number of scatter-gather items 380 * 381 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target 382 * and is expected to be called from ULD, e.g., FCP layer of libfc 383 * to set up ddp for the corresponding xid of the given sglist for 384 * the corresponding I/O. The DDP in target mode is a write I/O request 385 * from the initiator. 386 * 387 * Returns : 1 for success and 0 for no ddp 388 */ 389 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 390 struct scatterlist *sgl, unsigned int sgc) 391 { 392 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); 393 } 394 395 /** 396 * ixgbe_fcoe_ddp - check ddp status and mark it done 397 * @adapter: ixgbe adapter 398 * @rx_desc: advanced rx descriptor 399 * @skb: the skb holding the received data 400 * 401 * This checks ddp status. 402 * 403 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 404 * not passing the skb to ULD, > 0 indicates is the length of data 405 * being ddped. 406 */ 407 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 408 union ixgbe_adv_rx_desc *rx_desc, 409 struct sk_buff *skb) 410 { 411 int rc = -EINVAL; 412 struct ixgbe_fcoe *fcoe; 413 struct ixgbe_fcoe_ddp *ddp; 414 struct fc_frame_header *fh; 415 struct fcoe_crc_eof *crc; 416 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); 417 __le32 ddp_err; 418 int ddp_max; 419 u32 fctl; 420 u16 xid; 421 422 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) 423 skb->ip_summed = CHECKSUM_NONE; 424 else 425 skb->ip_summed = CHECKSUM_UNNECESSARY; 426 427 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 428 fh = (struct fc_frame_header *)(skb->data + 429 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 430 else 431 fh = (struct fc_frame_header *)(skb->data + 432 sizeof(struct fcoe_hdr)); 433 434 fctl = ntoh24(fh->fh_f_ctl); 435 if (fctl & FC_FC_EX_CTX) 436 xid = be16_to_cpu(fh->fh_ox_id); 437 else 438 xid = be16_to_cpu(fh->fh_rx_id); 439 440 ddp_max = IXGBE_FCOE_DDP_MAX; 441 /* X550 has different DDP Max limit */ 442 if (adapter->hw.mac.type == ixgbe_mac_X550) 443 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 444 if (xid >= ddp_max) 445 return -EINVAL; 446 447 fcoe = &adapter->fcoe; 448 ddp = &fcoe->ddp[xid]; 449 if (!ddp->udl) 450 return -EINVAL; 451 452 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | 453 IXGBE_RXDADV_ERR_FCERR); 454 if (ddp_err) 455 return -EINVAL; 456 457 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 458 /* return 0 to bypass going to ULD for DDPed data */ 459 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 460 /* update length of DDPed data */ 461 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 462 rc = 0; 463 break; 464 /* unmap the sg list when FCPRSP is received */ 465 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 466 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 467 ddp->sgc, DMA_FROM_DEVICE); 468 ddp->err = ddp_err; 469 ddp->sgl = NULL; 470 ddp->sgc = 0; 471 /* fall through */ 472 /* if DDP length is present pass it through to ULD */ 473 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 474 /* update length of DDPed data */ 475 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 476 if (ddp->len) 477 rc = ddp->len; 478 break; 479 /* no match will return as an error */ 480 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 481 default: 482 break; 483 } 484 485 /* In target mode, check the last data frame of the sequence. 486 * For DDP in target mode, data is already DDPed but the header 487 * indication of the last data frame ould allow is to tell if we 488 * got all the data and the ULP can send FCP_RSP back, as this is 489 * not a full fcoe frame, we fill the trailer here so it won't be 490 * dropped by the ULP stack. 491 */ 492 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && 493 (fctl & FC_FC_END_SEQ)) { 494 skb_linearize(skb); 495 crc = skb_put(skb, sizeof(*crc)); 496 crc->fcoe_eof = FC_EOF_T; 497 } 498 499 return rc; 500 } 501 502 /** 503 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 504 * @tx_ring: tx desc ring 505 * @first: first tx_buffer structure containing skb, tx_flags, and protocol 506 * @hdr_len: hdr_len to be returned 507 * 508 * This sets up large send offload for FCoE 509 * 510 * Returns : 0 indicates success, < 0 for error 511 */ 512 int ixgbe_fso(struct ixgbe_ring *tx_ring, 513 struct ixgbe_tx_buffer *first, 514 u8 *hdr_len) 515 { 516 struct sk_buff *skb = first->skb; 517 struct fc_frame_header *fh; 518 u32 vlan_macip_lens; 519 u32 fcoe_sof_eof = 0; 520 u32 mss_l4len_idx; 521 u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; 522 u8 sof, eof; 523 524 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 525 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 526 skb_shinfo(skb)->gso_type); 527 return -EINVAL; 528 } 529 530 /* resets the header to point fcoe/fc */ 531 skb_set_network_header(skb, skb->mac_len); 532 skb_set_transport_header(skb, skb->mac_len + 533 sizeof(struct fcoe_hdr)); 534 535 /* sets up SOF and ORIS */ 536 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 537 switch (sof) { 538 case FC_SOF_I2: 539 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; 540 break; 541 case FC_SOF_I3: 542 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | 543 IXGBE_ADVTXD_FCOEF_ORIS; 544 break; 545 case FC_SOF_N2: 546 break; 547 case FC_SOF_N3: 548 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; 549 break; 550 default: 551 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); 552 return -EINVAL; 553 } 554 555 /* the first byte of the last dword is EOF */ 556 skb_copy_bits(skb, skb->len - 4, &eof, 1); 557 /* sets up EOF and ORIE */ 558 switch (eof) { 559 case FC_EOF_N: 560 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 561 break; 562 case FC_EOF_T: 563 /* lso needs ORIE */ 564 if (skb_is_gso(skb)) 565 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | 566 IXGBE_ADVTXD_FCOEF_ORIE; 567 else 568 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 569 break; 570 case FC_EOF_NI: 571 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 572 break; 573 case FC_EOF_A: 574 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 575 break; 576 default: 577 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); 578 return -EINVAL; 579 } 580 581 /* sets up PARINC indicating data offset */ 582 fh = (struct fc_frame_header *)skb_transport_header(skb); 583 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 584 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 585 586 /* include trailer in headlen as it is replicated per frame */ 587 *hdr_len = sizeof(struct fcoe_crc_eof); 588 589 /* hdr_len includes fc_hdr if FCoE LSO is enabled */ 590 if (skb_is_gso(skb)) { 591 *hdr_len += skb_transport_offset(skb) + 592 sizeof(struct fc_frame_header); 593 /* update gso_segs and bytecount */ 594 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 595 skb_shinfo(skb)->gso_size); 596 first->bytecount += (first->gso_segs - 1) * *hdr_len; 597 first->tx_flags |= IXGBE_TX_FLAGS_TSO; 598 /* Hardware expects L4T to be RSV for FCoE TSO */ 599 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; 600 } 601 602 /* set flag indicating FCOE to ixgbe_tx_map call */ 603 first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; 604 605 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ 606 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 607 608 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 609 vlan_macip_lens = skb_transport_offset(skb) + 610 sizeof(struct fc_frame_header); 611 vlan_macip_lens |= (skb_transport_offset(skb) - 4) 612 << IXGBE_ADVTXD_MACLEN_SHIFT; 613 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 614 615 /* write context desc */ 616 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, 617 type_tucmd, mss_l4len_idx); 618 619 return 0; 620 } 621 622 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) 623 { 624 struct ixgbe_fcoe_ddp_pool *ddp_pool; 625 626 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 627 dma_pool_destroy(ddp_pool->pool); 628 ddp_pool->pool = NULL; 629 } 630 631 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, 632 struct device *dev, 633 unsigned int cpu) 634 { 635 struct ixgbe_fcoe_ddp_pool *ddp_pool; 636 struct dma_pool *pool; 637 char pool_name[32]; 638 639 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); 640 641 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, 642 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 643 if (!pool) 644 return -ENOMEM; 645 646 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 647 ddp_pool->pool = pool; 648 ddp_pool->noddp = 0; 649 ddp_pool->noddp_ext_buff = 0; 650 651 return 0; 652 } 653 654 /** 655 * ixgbe_configure_fcoe - configures registers for fcoe at start 656 * @adapter: ptr to ixgbe adapter 657 * 658 * This sets up FCoE related registers 659 * 660 * Returns : none 661 */ 662 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 663 { 664 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 665 struct ixgbe_hw *hw = &adapter->hw; 666 int i, fcoe_q, fcoe_i, fcoe_q_h = 0; 667 int fcreta_size; 668 u32 etqf; 669 670 /* Minimal functionality for FCoE requires at least CRC offloads */ 671 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) 672 return; 673 674 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ 675 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; 676 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 677 etqf |= IXGBE_ETQF_POOL_ENABLE; 678 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 679 } 680 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); 681 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 682 683 /* leave registers un-configured if FCoE is disabled */ 684 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 685 return; 686 687 /* Use one or more Rx queues for FCoE by redirection table */ 688 fcreta_size = IXGBE_FCRETA_SIZE; 689 if (adapter->hw.mac.type == ixgbe_mac_X550) 690 fcreta_size = IXGBE_FCRETA_SIZE_X550; 691 692 for (i = 0; i < fcreta_size; i++) { 693 if (adapter->hw.mac.type == ixgbe_mac_X550) { 694 int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % 695 fcoe->indices); 696 fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; 697 fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & 698 IXGBE_FCRETA_ENTRY_HIGH_MASK; 699 } 700 701 fcoe_i = fcoe->offset + (i % fcoe->indices); 702 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 703 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 704 fcoe_q |= fcoe_q_h; 705 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 706 } 707 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 708 709 /* Enable L2 EtherType filter for FIP */ 710 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; 711 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 712 etqf |= IXGBE_ETQF_POOL_ENABLE; 713 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; 714 } 715 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); 716 717 /* Send FIP frames to the first FCoE queue */ 718 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; 719 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 720 IXGBE_ETQS_QUEUE_EN | 721 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 722 723 /* Configure FCoE Rx control */ 724 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 725 IXGBE_FCRXCTRL_FCCRCBO | 726 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 727 } 728 729 /** 730 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources 731 * @adapter : ixgbe adapter 732 * 733 * Cleans up outstanding ddp context resources 734 * 735 * Returns : none 736 */ 737 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 738 { 739 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 740 int cpu, i, ddp_max; 741 742 /* do nothing if no DDP pools were allocated */ 743 if (!fcoe->ddp_pool) 744 return; 745 746 ddp_max = IXGBE_FCOE_DDP_MAX; 747 /* X550 has different DDP Max limit */ 748 if (adapter->hw.mac.type == ixgbe_mac_X550) 749 ddp_max = IXGBE_FCOE_DDP_MAX_X550; 750 751 for (i = 0; i < ddp_max; i++) 752 ixgbe_fcoe_ddp_put(adapter->netdev, i); 753 754 for_each_possible_cpu(cpu) 755 ixgbe_fcoe_dma_pool_free(fcoe, cpu); 756 757 dma_unmap_single(&adapter->pdev->dev, 758 fcoe->extra_ddp_buffer_dma, 759 IXGBE_FCBUFF_MIN, 760 DMA_FROM_DEVICE); 761 kfree(fcoe->extra_ddp_buffer); 762 763 fcoe->extra_ddp_buffer = NULL; 764 fcoe->extra_ddp_buffer_dma = 0; 765 } 766 767 /** 768 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources 769 * @adapter: ixgbe adapter 770 * 771 * Sets up ddp context resouces 772 * 773 * Returns : 0 indicates success or -EINVAL on failure 774 */ 775 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) 776 { 777 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 778 struct device *dev = &adapter->pdev->dev; 779 void *buffer; 780 dma_addr_t dma; 781 unsigned int cpu; 782 783 /* do nothing if no DDP pools were allocated */ 784 if (!fcoe->ddp_pool) 785 return 0; 786 787 /* Extra buffer to be shared by all DDPs for HW work around */ 788 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 789 if (!buffer) 790 return -ENOMEM; 791 792 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); 793 if (dma_mapping_error(dev, dma)) { 794 e_err(drv, "failed to map extra DDP buffer\n"); 795 kfree(buffer); 796 return -ENOMEM; 797 } 798 799 fcoe->extra_ddp_buffer = buffer; 800 fcoe->extra_ddp_buffer_dma = dma; 801 802 /* allocate pci pool for each cpu */ 803 for_each_possible_cpu(cpu) { 804 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); 805 if (!err) 806 continue; 807 808 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 809 ixgbe_free_fcoe_ddp_resources(adapter); 810 return -ENOMEM; 811 } 812 813 return 0; 814 } 815 816 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) 817 { 818 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 819 820 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 821 return -EINVAL; 822 823 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); 824 825 if (!fcoe->ddp_pool) { 826 e_err(drv, "failed to allocate percpu DDP resources\n"); 827 return -ENOMEM; 828 } 829 830 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 831 /* X550 has different DDP Max limit */ 832 if (adapter->hw.mac.type == ixgbe_mac_X550) 833 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; 834 835 return 0; 836 } 837 838 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) 839 { 840 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 841 842 adapter->netdev->fcoe_ddp_xid = 0; 843 844 if (!fcoe->ddp_pool) 845 return; 846 847 free_percpu(fcoe->ddp_pool); 848 fcoe->ddp_pool = NULL; 849 } 850 851 /** 852 * ixgbe_fcoe_enable - turn on FCoE offload feature 853 * @netdev: the corresponding netdev 854 * 855 * Turns on FCoE offload feature in 82599. 856 * 857 * Returns : 0 indicates success or -EINVAL on failure 858 */ 859 int ixgbe_fcoe_enable(struct net_device *netdev) 860 { 861 struct ixgbe_adapter *adapter = netdev_priv(netdev); 862 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 863 864 atomic_inc(&fcoe->refcnt); 865 866 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 867 return -EINVAL; 868 869 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 870 return -EINVAL; 871 872 e_info(drv, "Enabling FCoE offload features.\n"); 873 874 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 875 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); 876 877 if (netif_running(netdev)) 878 netdev->netdev_ops->ndo_stop(netdev); 879 880 /* Allocate per CPU memory to track DDP pools */ 881 ixgbe_fcoe_ddp_enable(adapter); 882 883 /* enable FCoE and notify stack */ 884 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 885 netdev->features |= NETIF_F_FCOE_MTU; 886 netdev_features_change(netdev); 887 888 /* release existing queues and reallocate them */ 889 ixgbe_clear_interrupt_scheme(adapter); 890 ixgbe_init_interrupt_scheme(adapter); 891 892 if (netif_running(netdev)) 893 netdev->netdev_ops->ndo_open(netdev); 894 895 return 0; 896 } 897 898 /** 899 * ixgbe_fcoe_disable - turn off FCoE offload feature 900 * @netdev: the corresponding netdev 901 * 902 * Turns off FCoE offload feature in 82599. 903 * 904 * Returns : 0 indicates success or -EINVAL on failure 905 */ 906 int ixgbe_fcoe_disable(struct net_device *netdev) 907 { 908 struct ixgbe_adapter *adapter = netdev_priv(netdev); 909 910 if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) 911 return -EINVAL; 912 913 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 914 return -EINVAL; 915 916 e_info(drv, "Disabling FCoE offload features.\n"); 917 if (netif_running(netdev)) 918 netdev->netdev_ops->ndo_stop(netdev); 919 920 /* Free per CPU memory to track DDP pools */ 921 ixgbe_fcoe_ddp_disable(adapter); 922 923 /* disable FCoE and notify stack */ 924 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 925 netdev->features &= ~NETIF_F_FCOE_MTU; 926 927 netdev_features_change(netdev); 928 929 /* release existing queues and reallocate them */ 930 ixgbe_clear_interrupt_scheme(adapter); 931 ixgbe_init_interrupt_scheme(adapter); 932 933 if (netif_running(netdev)) 934 netdev->netdev_ops->ndo_open(netdev); 935 936 return 0; 937 } 938 939 /** 940 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 941 * @netdev : ixgbe adapter 942 * @wwn : the world wide name 943 * @type: the type of world wide name 944 * 945 * Returns the node or port world wide name if both the prefix and the san 946 * mac address are valid, then the wwn is formed based on the NAA-2 for 947 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 948 * 949 * Returns : 0 on success 950 */ 951 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 952 { 953 u16 prefix = 0xffff; 954 struct ixgbe_adapter *adapter = netdev_priv(netdev); 955 struct ixgbe_mac_info *mac = &adapter->hw.mac; 956 957 switch (type) { 958 case NETDEV_FCOE_WWNN: 959 prefix = mac->wwnn_prefix; 960 break; 961 case NETDEV_FCOE_WWPN: 962 prefix = mac->wwpn_prefix; 963 break; 964 default: 965 break; 966 } 967 968 if ((prefix != 0xffff) && 969 is_valid_ether_addr(mac->san_addr)) { 970 *wwn = ((u64) prefix << 48) | 971 ((u64) mac->san_addr[0] << 40) | 972 ((u64) mac->san_addr[1] << 32) | 973 ((u64) mac->san_addr[2] << 24) | 974 ((u64) mac->san_addr[3] << 16) | 975 ((u64) mac->san_addr[4] << 8) | 976 ((u64) mac->san_addr[5]); 977 return 0; 978 } 979 return -EINVAL; 980 } 981 982 /** 983 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information 984 * @netdev : ixgbe adapter 985 * @info : HBA information 986 * 987 * Returns ixgbe HBA information 988 * 989 * Returns : 0 on success 990 */ 991 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 992 struct netdev_fcoe_hbainfo *info) 993 { 994 struct ixgbe_adapter *adapter = netdev_priv(netdev); 995 struct ixgbe_hw *hw = &adapter->hw; 996 int i, pos; 997 u8 buf[8]; 998 999 if (!info) 1000 return -EINVAL; 1001 1002 /* Don't return information on unsupported devices */ 1003 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 1004 return -EINVAL; 1005 1006 /* Manufacturer */ 1007 snprintf(info->manufacturer, sizeof(info->manufacturer), 1008 "Intel Corporation"); 1009 1010 /* Serial Number */ 1011 1012 /* Get the PCI-e Device Serial Number Capability */ 1013 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); 1014 if (pos) { 1015 pos += 4; 1016 for (i = 0; i < 8; i++) 1017 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); 1018 1019 snprintf(info->serial_number, sizeof(info->serial_number), 1020 "%02X%02X%02X%02X%02X%02X%02X%02X", 1021 buf[7], buf[6], buf[5], buf[4], 1022 buf[3], buf[2], buf[1], buf[0]); 1023 } else 1024 snprintf(info->serial_number, sizeof(info->serial_number), 1025 "Unknown"); 1026 1027 /* Hardware Version */ 1028 snprintf(info->hardware_version, 1029 sizeof(info->hardware_version), 1030 "Rev %d", hw->revision_id); 1031 /* Driver Name/Version */ 1032 snprintf(info->driver_version, 1033 sizeof(info->driver_version), 1034 "%s v%s", 1035 ixgbe_driver_name, 1036 ixgbe_driver_version); 1037 /* Firmware Version */ 1038 strlcpy(info->firmware_version, adapter->eeprom_id, 1039 sizeof(info->firmware_version)); 1040 1041 /* Model */ 1042 if (hw->mac.type == ixgbe_mac_82599EB) { 1043 snprintf(info->model, 1044 sizeof(info->model), 1045 "Intel 82599"); 1046 } else if (hw->mac.type == ixgbe_mac_X550) { 1047 snprintf(info->model, 1048 sizeof(info->model), 1049 "Intel X550"); 1050 } else { 1051 snprintf(info->model, 1052 sizeof(info->model), 1053 "Intel X540"); 1054 } 1055 1056 /* Model Description */ 1057 snprintf(info->model_description, 1058 sizeof(info->model_description), 1059 "%s", 1060 ixgbe_default_device_descr); 1061 1062 return 0; 1063 } 1064 1065 /** 1066 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to 1067 * @adapter: pointer to the device adapter structure 1068 * 1069 * Return : TC that FCoE is mapped to 1070 */ 1071 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) 1072 { 1073 #ifdef CONFIG_IXGBE_DCB 1074 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); 1075 #else 1076 return 0; 1077 #endif 1078 } 1079