1 /* bnx2x_cmn.c: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/etherdevice.h> 23 #include <linux/if_vlan.h> 24 #include <linux/interrupt.h> 25 #include <linux/ip.h> 26 #include <linux/crash_dump.h> 27 #include <net/tcp.h> 28 #include <net/ipv6.h> 29 #include <net/ip6_checksum.h> 30 #include <net/busy_poll.h> 31 #include <linux/prefetch.h> 32 #include "bnx2x_cmn.h" 33 #include "bnx2x_init.h" 34 #include "bnx2x_sp.h" 35 36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp); 39 static int bnx2x_poll(struct napi_struct *napi, int budget); 40 41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) 42 { 43 int i; 44 45 /* Add NAPI objects */ 46 for_each_rx_queue_cnic(bp, i) { 47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 48 bnx2x_poll, NAPI_POLL_WEIGHT); 49 } 50 } 51 52 static void bnx2x_add_all_napi(struct bnx2x *bp) 53 { 54 int i; 55 56 /* Add NAPI objects */ 57 for_each_eth_queue(bp, i) { 58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 59 bnx2x_poll, NAPI_POLL_WEIGHT); 60 } 61 } 62 63 static int bnx2x_calc_num_queues(struct bnx2x *bp) 64 { 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); 66 67 /* Reduce memory usage in kdump environment by using only one queue */ 68 if (is_kdump_kernel()) 69 nq = 1; 70 71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); 72 return nq; 73 } 74 75 /** 76 * bnx2x_move_fp - move content of the fastpath structure. 77 * 78 * @bp: driver handle 79 * @from: source FP index 80 * @to: destination FP index 81 * 82 * Makes sure the contents of the bp->fp[to].napi is kept 83 * intact. This is done by first copying the napi struct from 84 * the target to the source, and then mem copying the entire 85 * source onto the target. Update txdata pointers and related 86 * content. 87 */ 88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 89 { 90 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 91 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; 93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; 94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; 95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; 96 int old_max_eth_txqs, new_max_eth_txqs; 97 int old_txdata_index = 0, new_txdata_index = 0; 98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; 99 100 /* Copy the NAPI object as it has been already initialized */ 101 from_fp->napi = to_fp->napi; 102 103 /* Move bnx2x_fastpath contents */ 104 memcpy(to_fp, from_fp, sizeof(*to_fp)); 105 to_fp->index = to; 106 107 /* Retain the tpa_info of the original `to' version as we don't want 108 * 2 FPs to contain the same tpa_info pointer. 109 */ 110 to_fp->tpa_info = old_tpa_info; 111 112 /* move sp_objs contents as well, as their indices match fp ones */ 113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); 114 115 /* move fp_stats contents as well, as their indices match fp ones */ 116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); 117 118 /* Update txdata pointers in fp and move txdata content accordingly: 119 * Each fp consumes 'max_cos' txdata structures, so the index should be 120 * decremented by max_cos x delta. 121 */ 122 123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; 124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * 125 (bp)->max_cos; 126 if (from == FCOE_IDX(bp)) { 127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; 128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; 129 } 130 131 memcpy(&bp->bnx2x_txq[new_txdata_index], 132 &bp->bnx2x_txq[old_txdata_index], 133 sizeof(struct bnx2x_fp_txdata)); 134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; 135 } 136 137 /** 138 * bnx2x_fill_fw_str - Fill buffer with FW version string. 139 * 140 * @bp: driver handle 141 * @buf: character buffer to fill with the fw name 142 * @buf_len: length of the above buffer 143 * 144 */ 145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) 146 { 147 if (IS_PF(bp)) { 148 u8 phy_fw_ver[PHY_FW_VER_LEN]; 149 150 phy_fw_ver[0] = '\0'; 151 bnx2x_get_ext_phy_fw_version(&bp->link_params, 152 phy_fw_ver, PHY_FW_VER_LEN); 153 strlcpy(buf, bp->fw_ver, buf_len); 154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), 155 "bc %d.%d.%d%s%s", 156 (bp->common.bc_ver & 0xff0000) >> 16, 157 (bp->common.bc_ver & 0xff00) >> 8, 158 (bp->common.bc_ver & 0xff), 159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); 160 } else { 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len); 162 } 163 } 164 165 /** 166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact 167 * 168 * @bp: driver handle 169 * @delta: number of eth queues which were not allocated 170 */ 171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) 172 { 173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); 174 175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer 176 * backward along the array could cause memory to be overridden 177 */ 178 for (cos = 1; cos < bp->max_cos; cos++) { 179 for (i = 0; i < old_eth_num - delta; i++) { 180 struct bnx2x_fastpath *fp = &bp->fp[i]; 181 int new_idx = cos * (old_eth_num - delta) + i; 182 183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], 184 sizeof(struct bnx2x_fp_txdata)); 185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; 186 } 187 } 188 } 189 190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 191 192 /* free skb in the packet ring at pos idx 193 * return idx of last bd freed 194 */ 195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, 196 u16 idx, unsigned int *pkts_compl, 197 unsigned int *bytes_compl) 198 { 199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; 200 struct eth_tx_start_bd *tx_start_bd; 201 struct eth_tx_bd *tx_data_bd; 202 struct sk_buff *skb = tx_buf->skb; 203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 204 int nbd; 205 u16 split_bd_len = 0; 206 207 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 208 prefetch(&skb->end); 209 210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 211 txdata->txq_index, idx, tx_buf, skb); 212 213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 214 215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 216 #ifdef BNX2X_STOP_ON_ERROR 217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { 218 BNX2X_ERR("BAD nbd!\n"); 219 bnx2x_panic(); 220 } 221 #endif 222 new_cons = nbd + tx_buf->first_bd; 223 224 /* Get the next bd */ 225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 226 227 /* Skip a parse bd... */ 228 --nbd; 229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 230 231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { 232 /* Skip second parse bd... */ 233 --nbd; 234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 235 } 236 237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 240 split_bd_len = BD_UNMAP_LEN(tx_data_bd); 241 --nbd; 242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 243 } 244 245 /* unmap first bd */ 246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len, 248 DMA_TO_DEVICE); 249 250 /* now free frags */ 251 while (nbd > 0) { 252 253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), 255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 256 if (--nbd) 257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 258 } 259 260 /* release skb */ 261 WARN_ON(!skb); 262 if (likely(skb)) { 263 (*pkts_compl)++; 264 (*bytes_compl) += skb->len; 265 dev_kfree_skb_any(skb); 266 } 267 268 tx_buf->first_bd = 0; 269 tx_buf->skb = NULL; 270 271 return new_cons; 272 } 273 274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) 275 { 276 struct netdev_queue *txq; 277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; 278 unsigned int pkts_compl = 0, bytes_compl = 0; 279 280 #ifdef BNX2X_STOP_ON_ERROR 281 if (unlikely(bp->panic)) 282 return -1; 283 #endif 284 285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 287 sw_cons = txdata->tx_pkt_cons; 288 289 while (sw_cons != hw_cons) { 290 u16 pkt_cons; 291 292 pkt_cons = TX_BD(sw_cons); 293 294 DP(NETIF_MSG_TX_DONE, 295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n", 296 txdata->txq_index, hw_cons, sw_cons, pkt_cons); 297 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, 299 &pkts_compl, &bytes_compl); 300 301 sw_cons++; 302 } 303 304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 305 306 txdata->tx_pkt_cons = sw_cons; 307 txdata->tx_bd_cons = bd_cons; 308 309 /* Need to make the tx_bd_cons update visible to start_xmit() 310 * before checking for netif_tx_queue_stopped(). Without the 311 * memory barrier, there is a small possibility that 312 * start_xmit() will miss it and cause the queue to be stopped 313 * forever. 314 * On the other hand we need an rmb() here to ensure the proper 315 * ordering of bit testing in the following 316 * netif_tx_queue_stopped(txq) call. 317 */ 318 smp_mb(); 319 320 if (unlikely(netif_tx_queue_stopped(txq))) { 321 /* Taking tx_lock() is needed to prevent re-enabling the queue 322 * while it's empty. This could have happen if rx_action() gets 323 * suspended in bnx2x_tx_int() after the condition before 324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): 325 * 326 * stops the queue->sees fresh tx_bd_cons->releases the queue-> 327 * sends some packets consuming the whole queue again-> 328 * stops the queue 329 */ 330 331 __netif_tx_lock(txq, smp_processor_id()); 332 333 if ((netif_tx_queue_stopped(txq)) && 334 (bp->state == BNX2X_STATE_OPEN) && 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) 336 netif_tx_wake_queue(txq); 337 338 __netif_tx_unlock(txq); 339 } 340 return 0; 341 } 342 343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, 344 u16 idx) 345 { 346 u16 last_max = fp->last_max_sge; 347 348 if (SUB_S16(idx, last_max) > 0) 349 fp->last_max_sge = idx; 350 } 351 352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, 353 u16 sge_len, 354 struct eth_end_agg_rx_cqe *cqe) 355 { 356 struct bnx2x *bp = fp->bp; 357 u16 last_max, last_elem, first_elem; 358 u16 delta = 0; 359 u16 i; 360 361 if (!sge_len) 362 return; 363 364 /* First mark all used pages */ 365 for (i = 0; i < sge_len; i++) 366 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); 368 369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 371 372 /* Here we assume that the last SGE index is the biggest */ 373 prefetch((void *)(fp->sge_mask)); 374 bnx2x_update_last_max_sge(fp, 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 376 377 last_max = RX_SGE(fp->last_max_sge); 378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 380 381 /* If ring is not full */ 382 if (last_elem + 1 != first_elem) 383 last_elem++; 384 385 /* Now update the prod */ 386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { 387 if (likely(fp->sge_mask[i])) 388 break; 389 390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 391 delta += BIT_VEC64_ELEM_SZ; 392 } 393 394 if (delta > 0) { 395 fp->rx_sge_prod += delta; 396 /* clear page-end entries */ 397 bnx2x_clear_sge_mask_next_elems(fp); 398 } 399 400 DP(NETIF_MSG_RX_STATUS, 401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", 402 fp->last_max_sge, fp->rx_sge_prod); 403 } 404 405 /* Get Toeplitz hash value in the skb using the value from the 406 * CQE (calculated by HW). 407 */ 408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, 409 const struct eth_fast_path_rx_cqe *cqe, 410 enum pkt_hash_types *rxhash_type) 411 { 412 /* Get Toeplitz hash from CQE */ 413 if ((bp->dev->features & NETIF_F_RXHASH) && 414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { 415 enum eth_rss_hash_type htype; 416 417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; 418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || 419 (htype == TCP_IPV6_HASH_TYPE)) ? 420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; 421 422 return le32_to_cpu(cqe->rss_hash_result); 423 } 424 *rxhash_type = PKT_HASH_TYPE_NONE; 425 return 0; 426 } 427 428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, 429 u16 cons, u16 prod, 430 struct eth_fast_path_rx_cqe *cqe) 431 { 432 struct bnx2x *bp = fp->bp; 433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 436 dma_addr_t mapping; 437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; 438 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 439 440 /* print error if current state != stop */ 441 if (tpa_info->tpa_state != BNX2X_TPA_STOP) 442 BNX2X_ERR("start of bin not in stop [%d]\n", queue); 443 444 /* Try to map an empty data buffer from the aggregation info */ 445 mapping = dma_map_single(&bp->pdev->dev, 446 first_buf->data + NET_SKB_PAD, 447 fp->rx_buf_size, DMA_FROM_DEVICE); 448 /* 449 * ...if it fails - move the skb from the consumer to the producer 450 * and set the current aggregation state as ERROR to drop it 451 * when TPA_STOP arrives. 452 */ 453 454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 455 /* Move the BD from the consumer to the producer */ 456 bnx2x_reuse_rx_data(fp, cons, prod); 457 tpa_info->tpa_state = BNX2X_TPA_ERROR; 458 return; 459 } 460 461 /* move empty data from pool to prod */ 462 prod_rx_buf->data = first_buf->data; 463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 464 /* point prod_bd to new data */ 465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 467 468 /* move partial skb from cons to pool (don't unmap yet) */ 469 *first_buf = *cons_rx_buf; 470 471 /* mark bin state as START */ 472 tpa_info->parsing_flags = 473 le16_to_cpu(cqe->pars_flags.flags); 474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); 475 tpa_info->tpa_state = BNX2X_TPA_START; 476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 477 tpa_info->placement_offset = cqe->placement_offset; 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); 479 if (fp->mode == TPA_MODE_GRO) { 480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size; 482 tpa_info->gro_size = gro_size; 483 } 484 485 #ifdef BNX2X_STOP_ON_ERROR 486 fp->tpa_queue_used |= (1 << queue); 487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 488 fp->tpa_queue_used); 489 #endif 490 } 491 492 /* Timestamp option length allowed for TPA aggregation: 493 * 494 * nop nop kind length echo val 495 */ 496 #define TPA_TSTAMP_OPT_LEN 12 497 /** 498 * bnx2x_set_gro_params - compute GRO values 499 * 500 * @skb: packet skb 501 * @parsing_flags: parsing flags from the START CQE 502 * @len_on_bd: total length of the first packet for the 503 * aggregation. 504 * @pkt_len: length of all segments 505 * 506 * Approximate value of the MSS for this aggregation calculated using 507 * the first packet of it. 508 * Compute number of aggregated segments, and gso_type. 509 */ 510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, 511 u16 len_on_bd, unsigned int pkt_len, 512 u16 num_of_coalesced_segs) 513 { 514 /* TPA aggregation won't have either IP options or TCP options 515 * other than timestamp or IPv6 extension headers. 516 */ 517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); 518 519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == 520 PRS_FLAG_OVERETH_IPV6) { 521 hdrs_len += sizeof(struct ipv6hdr); 522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 523 } else { 524 hdrs_len += sizeof(struct iphdr); 525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 526 } 527 528 /* Check if there was a TCP timestamp, if there is it's will 529 * always be 12 bytes length: nop nop kind length echo val. 530 * 531 * Otherwise FW would close the aggregation. 532 */ 533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) 534 hdrs_len += TPA_TSTAMP_OPT_LEN; 535 536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; 537 538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count 539 * to skb_shinfo(skb)->gso_segs 540 */ 541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; 542 } 543 544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, 545 u16 index, gfp_t gfp_mask) 546 { 547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 549 struct bnx2x_alloc_pool *pool = &fp->page_pool; 550 dma_addr_t mapping; 551 552 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { 553 554 /* put page reference used by the memory pool, since we 555 * won't be using this page as the mempool anymore. 556 */ 557 if (pool->page) 558 put_page(pool->page); 559 560 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); 561 if (unlikely(!pool->page)) 562 return -ENOMEM; 563 564 pool->offset = 0; 565 } 566 567 mapping = dma_map_page(&bp->pdev->dev, pool->page, 568 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); 569 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 570 BNX2X_ERR("Can't map sge\n"); 571 return -ENOMEM; 572 } 573 574 get_page(pool->page); 575 sw_buf->page = pool->page; 576 sw_buf->offset = pool->offset; 577 578 dma_unmap_addr_set(sw_buf, mapping, mapping); 579 580 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 581 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 582 583 pool->offset += SGE_PAGE_SIZE; 584 585 return 0; 586 } 587 588 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 589 struct bnx2x_agg_info *tpa_info, 590 u16 pages, 591 struct sk_buff *skb, 592 struct eth_end_agg_rx_cqe *cqe, 593 u16 cqe_idx) 594 { 595 struct sw_rx_page *rx_pg, old_rx_pg; 596 u32 i, frag_len, frag_size; 597 int err, j, frag_id = 0; 598 u16 len_on_bd = tpa_info->len_on_bd; 599 u16 full_page = 0, gro_size = 0; 600 601 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; 602 603 if (fp->mode == TPA_MODE_GRO) { 604 gro_size = tpa_info->gro_size; 605 full_page = tpa_info->full_page; 606 } 607 608 /* This is needed in order to enable forwarding support */ 609 if (frag_size) 610 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, 611 le16_to_cpu(cqe->pkt_len), 612 le16_to_cpu(cqe->num_of_coalesced_segs)); 613 614 #ifdef BNX2X_STOP_ON_ERROR 615 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { 616 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 617 pages, cqe_idx); 618 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); 619 bnx2x_panic(); 620 return -EINVAL; 621 } 622 #endif 623 624 /* Run through the SGL and compose the fragmented skb */ 625 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 626 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); 627 628 /* FW gives the indices of the SGE as if the ring is an array 629 (meaning that "next" element will consume 2 indices) */ 630 if (fp->mode == TPA_MODE_GRO) 631 frag_len = min_t(u32, frag_size, (u32)full_page); 632 else /* LRO */ 633 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); 634 635 rx_pg = &fp->rx_page_ring[sge_idx]; 636 old_rx_pg = *rx_pg; 637 638 /* If we fail to allocate a substitute page, we simply stop 639 where we are and drop the whole packet */ 640 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); 641 if (unlikely(err)) { 642 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 643 return err; 644 } 645 646 dma_unmap_page(&bp->pdev->dev, 647 dma_unmap_addr(&old_rx_pg, mapping), 648 SGE_PAGE_SIZE, DMA_FROM_DEVICE); 649 /* Add one frag and update the appropriate fields in the skb */ 650 if (fp->mode == TPA_MODE_LRO) 651 skb_fill_page_desc(skb, j, old_rx_pg.page, 652 old_rx_pg.offset, frag_len); 653 else { /* GRO */ 654 int rem; 655 int offset = 0; 656 for (rem = frag_len; rem > 0; rem -= gro_size) { 657 int len = rem > gro_size ? gro_size : rem; 658 skb_fill_page_desc(skb, frag_id++, 659 old_rx_pg.page, 660 old_rx_pg.offset + offset, 661 len); 662 if (offset) 663 get_page(old_rx_pg.page); 664 offset += len; 665 } 666 } 667 668 skb->data_len += frag_len; 669 skb->truesize += SGE_PAGES; 670 skb->len += frag_len; 671 672 frag_size -= frag_len; 673 } 674 675 return 0; 676 } 677 678 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) 679 { 680 if (fp->rx_frag_size) 681 skb_free_frag(data); 682 else 683 kfree(data); 684 } 685 686 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) 687 { 688 if (fp->rx_frag_size) { 689 /* GFP_KERNEL allocations are used only during initialization */ 690 if (unlikely(gfpflags_allow_blocking(gfp_mask))) 691 return (void *)__get_free_page(gfp_mask); 692 693 return netdev_alloc_frag(fp->rx_frag_size); 694 } 695 696 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); 697 } 698 699 #ifdef CONFIG_INET 700 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) 701 { 702 const struct iphdr *iph = ip_hdr(skb); 703 struct tcphdr *th; 704 705 skb_set_transport_header(skb, sizeof(struct iphdr)); 706 th = tcp_hdr(skb); 707 708 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 709 iph->saddr, iph->daddr, 0); 710 } 711 712 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) 713 { 714 struct ipv6hdr *iph = ipv6_hdr(skb); 715 struct tcphdr *th; 716 717 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 718 th = tcp_hdr(skb); 719 720 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 721 &iph->saddr, &iph->daddr, 0); 722 } 723 724 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, 725 void (*gro_func)(struct bnx2x*, struct sk_buff*)) 726 { 727 skb_set_network_header(skb, 0); 728 gro_func(bp, skb); 729 tcp_gro_complete(skb); 730 } 731 #endif 732 733 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, 734 struct sk_buff *skb) 735 { 736 #ifdef CONFIG_INET 737 if (skb_shinfo(skb)->gso_size) { 738 switch (be16_to_cpu(skb->protocol)) { 739 case ETH_P_IP: 740 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); 741 break; 742 case ETH_P_IPV6: 743 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); 744 break; 745 default: 746 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", 747 be16_to_cpu(skb->protocol)); 748 } 749 } 750 #endif 751 skb_record_rx_queue(skb, fp->rx_queue); 752 napi_gro_receive(&fp->napi, skb); 753 } 754 755 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 756 struct bnx2x_agg_info *tpa_info, 757 u16 pages, 758 struct eth_end_agg_rx_cqe *cqe, 759 u16 cqe_idx) 760 { 761 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 762 u8 pad = tpa_info->placement_offset; 763 u16 len = tpa_info->len_on_bd; 764 struct sk_buff *skb = NULL; 765 u8 *new_data, *data = rx_buf->data; 766 u8 old_tpa_state = tpa_info->tpa_state; 767 768 tpa_info->tpa_state = BNX2X_TPA_STOP; 769 770 /* If we there was an error during the handling of the TPA_START - 771 * drop this aggregation. 772 */ 773 if (old_tpa_state == BNX2X_TPA_ERROR) 774 goto drop; 775 776 /* Try to allocate the new data */ 777 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); 778 /* Unmap skb in the pool anyway, as we are going to change 779 pool entry status to BNX2X_TPA_STOP even if new skb allocation 780 fails. */ 781 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 782 fp->rx_buf_size, DMA_FROM_DEVICE); 783 if (likely(new_data)) 784 skb = build_skb(data, fp->rx_frag_size); 785 786 if (likely(skb)) { 787 #ifdef BNX2X_STOP_ON_ERROR 788 if (pad + len > fp->rx_buf_size) { 789 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", 790 pad, len, fp->rx_buf_size); 791 bnx2x_panic(); 792 return; 793 } 794 #endif 795 796 skb_reserve(skb, pad + NET_SKB_PAD); 797 skb_put(skb, len); 798 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); 799 800 skb->protocol = eth_type_trans(skb, bp->dev); 801 skb->ip_summed = CHECKSUM_UNNECESSARY; 802 803 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, 804 skb, cqe, cqe_idx)) { 805 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) 806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); 807 bnx2x_gro_receive(bp, fp, skb); 808 } else { 809 DP(NETIF_MSG_RX_STATUS, 810 "Failed to allocate new pages - dropping packet!\n"); 811 dev_kfree_skb_any(skb); 812 } 813 814 /* put new data in bin */ 815 rx_buf->data = new_data; 816 817 return; 818 } 819 if (new_data) 820 bnx2x_frag_free(fp, new_data); 821 drop: 822 /* drop the packet and keep the buffer in the bin */ 823 DP(NETIF_MSG_RX_STATUS, 824 "Failed to allocate or map a new skb - dropping packet!\n"); 825 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; 826 } 827 828 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, 829 u16 index, gfp_t gfp_mask) 830 { 831 u8 *data; 832 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 833 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 834 dma_addr_t mapping; 835 836 data = bnx2x_frag_alloc(fp, gfp_mask); 837 if (unlikely(data == NULL)) 838 return -ENOMEM; 839 840 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, 841 fp->rx_buf_size, 842 DMA_FROM_DEVICE); 843 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 844 bnx2x_frag_free(fp, data); 845 BNX2X_ERR("Can't map rx data\n"); 846 return -ENOMEM; 847 } 848 849 rx_buf->data = data; 850 dma_unmap_addr_set(rx_buf, mapping, mapping); 851 852 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 853 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 854 855 return 0; 856 } 857 858 static 859 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 860 struct bnx2x_fastpath *fp, 861 struct bnx2x_eth_q_stats *qstats) 862 { 863 /* Do nothing if no L4 csum validation was done. 864 * We do not check whether IP csum was validated. For IPv4 we assume 865 * that if the card got as far as validating the L4 csum, it also 866 * validated the IP csum. IPv6 has no IP csum. 867 */ 868 if (cqe->fast_path_cqe.status_flags & 869 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) 870 return; 871 872 /* If L4 validation was done, check if an error was found. */ 873 874 if (cqe->fast_path_cqe.type_error_flags & 875 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 876 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 877 qstats->hw_csum_err++; 878 else 879 skb->ip_summed = CHECKSUM_UNNECESSARY; 880 } 881 882 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 883 { 884 struct bnx2x *bp = fp->bp; 885 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 886 u16 sw_comp_cons, sw_comp_prod; 887 int rx_pkt = 0; 888 union eth_rx_cqe *cqe; 889 struct eth_fast_path_rx_cqe *cqe_fp; 890 891 #ifdef BNX2X_STOP_ON_ERROR 892 if (unlikely(bp->panic)) 893 return 0; 894 #endif 895 if (budget <= 0) 896 return rx_pkt; 897 898 bd_cons = fp->rx_bd_cons; 899 bd_prod = fp->rx_bd_prod; 900 bd_prod_fw = bd_prod; 901 sw_comp_cons = fp->rx_comp_cons; 902 sw_comp_prod = fp->rx_comp_prod; 903 904 comp_ring_cons = RCQ_BD(sw_comp_cons); 905 cqe = &fp->rx_comp_ring[comp_ring_cons]; 906 cqe_fp = &cqe->fast_path_cqe; 907 908 DP(NETIF_MSG_RX_STATUS, 909 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); 910 911 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { 912 struct sw_rx_bd *rx_buf = NULL; 913 struct sk_buff *skb; 914 u8 cqe_fp_flags; 915 enum eth_rx_cqe_type cqe_fp_type; 916 u16 len, pad, queue; 917 u8 *data; 918 u32 rxhash; 919 enum pkt_hash_types rxhash_type; 920 921 #ifdef BNX2X_STOP_ON_ERROR 922 if (unlikely(bp->panic)) 923 return 0; 924 #endif 925 926 bd_prod = RX_BD(bd_prod); 927 bd_cons = RX_BD(bd_cons); 928 929 /* A rmb() is required to ensure that the CQE is not read 930 * before it is written by the adapter DMA. PCI ordering 931 * rules will make sure the other fields are written before 932 * the marker at the end of struct eth_fast_path_rx_cqe 933 * but without rmb() a weakly ordered processor can process 934 * stale data. Without the barrier TPA state-machine might 935 * enter inconsistent state and kernel stack might be 936 * provided with incorrect packet description - these lead 937 * to various kernel crashed. 938 */ 939 rmb(); 940 941 cqe_fp_flags = cqe_fp->type_error_flags; 942 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 943 944 DP(NETIF_MSG_RX_STATUS, 945 "CQE type %x err %x status %x queue %x vlan %x len %u\n", 946 CQE_TYPE(cqe_fp_flags), 947 cqe_fp_flags, cqe_fp->status_flags, 948 le32_to_cpu(cqe_fp->rss_hash_result), 949 le16_to_cpu(cqe_fp->vlan_tag), 950 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); 951 952 /* is this a slowpath msg? */ 953 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { 954 bnx2x_sp_event(fp, cqe); 955 goto next_cqe; 956 } 957 958 rx_buf = &fp->rx_buf_ring[bd_cons]; 959 data = rx_buf->data; 960 961 if (!CQE_TYPE_FAST(cqe_fp_type)) { 962 struct bnx2x_agg_info *tpa_info; 963 u16 frag_size, pages; 964 #ifdef BNX2X_STOP_ON_ERROR 965 /* sanity check */ 966 if (fp->mode == TPA_MODE_DISABLED && 967 (CQE_TYPE_START(cqe_fp_type) || 968 CQE_TYPE_STOP(cqe_fp_type))) 969 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", 970 CQE_TYPE(cqe_fp_type)); 971 #endif 972 973 if (CQE_TYPE_START(cqe_fp_type)) { 974 u16 queue = cqe_fp->queue_index; 975 DP(NETIF_MSG_RX_STATUS, 976 "calling tpa_start on queue %d\n", 977 queue); 978 979 bnx2x_tpa_start(fp, queue, 980 bd_cons, bd_prod, 981 cqe_fp); 982 983 goto next_rx; 984 } 985 queue = cqe->end_agg_cqe.queue_index; 986 tpa_info = &fp->tpa_info[queue]; 987 DP(NETIF_MSG_RX_STATUS, 988 "calling tpa_stop on queue %d\n", 989 queue); 990 991 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - 992 tpa_info->len_on_bd; 993 994 if (fp->mode == TPA_MODE_GRO) 995 pages = (frag_size + tpa_info->full_page - 1) / 996 tpa_info->full_page; 997 else 998 pages = SGE_PAGE_ALIGN(frag_size) >> 999 SGE_PAGE_SHIFT; 1000 1001 bnx2x_tpa_stop(bp, fp, tpa_info, pages, 1002 &cqe->end_agg_cqe, comp_ring_cons); 1003 #ifdef BNX2X_STOP_ON_ERROR 1004 if (bp->panic) 1005 return 0; 1006 #endif 1007 1008 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); 1009 goto next_cqe; 1010 } 1011 /* non TPA */ 1012 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); 1013 pad = cqe_fp->placement_offset; 1014 dma_sync_single_for_cpu(&bp->pdev->dev, 1015 dma_unmap_addr(rx_buf, mapping), 1016 pad + RX_COPY_THRESH, 1017 DMA_FROM_DEVICE); 1018 pad += NET_SKB_PAD; 1019 prefetch(data + pad); /* speedup eth_type_trans() */ 1020 /* is this an error packet? */ 1021 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1022 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1023 "ERROR flags %x rx packet %u\n", 1024 cqe_fp_flags, sw_comp_cons); 1025 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; 1026 goto reuse_rx; 1027 } 1028 1029 /* Since we don't have a jumbo ring 1030 * copy small packets if mtu > 1500 1031 */ 1032 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && 1033 (len <= RX_COPY_THRESH)) { 1034 skb = napi_alloc_skb(&fp->napi, len); 1035 if (skb == NULL) { 1036 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1037 "ERROR packet dropped because of alloc failure\n"); 1038 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 1039 goto reuse_rx; 1040 } 1041 memcpy(skb->data, data + pad, len); 1042 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 1043 } else { 1044 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, 1045 GFP_ATOMIC) == 0)) { 1046 dma_unmap_single(&bp->pdev->dev, 1047 dma_unmap_addr(rx_buf, mapping), 1048 fp->rx_buf_size, 1049 DMA_FROM_DEVICE); 1050 skb = build_skb(data, fp->rx_frag_size); 1051 if (unlikely(!skb)) { 1052 bnx2x_frag_free(fp, data); 1053 bnx2x_fp_qstats(bp, fp)-> 1054 rx_skb_alloc_failed++; 1055 goto next_rx; 1056 } 1057 skb_reserve(skb, pad); 1058 } else { 1059 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1060 "ERROR packet dropped because of alloc failure\n"); 1061 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 1062 reuse_rx: 1063 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 1064 goto next_rx; 1065 } 1066 } 1067 1068 skb_put(skb, len); 1069 skb->protocol = eth_type_trans(skb, bp->dev); 1070 1071 /* Set Toeplitz hash for a none-LRO skb */ 1072 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); 1073 skb_set_hash(skb, rxhash, rxhash_type); 1074 1075 skb_checksum_none_assert(skb); 1076 1077 if (bp->dev->features & NETIF_F_RXCSUM) 1078 bnx2x_csum_validate(skb, cqe, fp, 1079 bnx2x_fp_qstats(bp, fp)); 1080 1081 skb_record_rx_queue(skb, fp->rx_queue); 1082 1083 /* Check if this packet was timestamped */ 1084 if (unlikely(cqe->fast_path_cqe.type_error_flags & 1085 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) 1086 bnx2x_set_rx_ts(bp, skb); 1087 1088 if (le16_to_cpu(cqe_fp->pars_flags.flags) & 1089 PARSING_FLAGS_VLAN) 1090 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1091 le16_to_cpu(cqe_fp->vlan_tag)); 1092 1093 napi_gro_receive(&fp->napi, skb); 1094 next_rx: 1095 rx_buf->data = NULL; 1096 1097 bd_cons = NEXT_RX_IDX(bd_cons); 1098 bd_prod = NEXT_RX_IDX(bd_prod); 1099 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); 1100 rx_pkt++; 1101 next_cqe: 1102 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); 1103 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); 1104 1105 /* mark CQE as free */ 1106 BNX2X_SEED_CQE(cqe_fp); 1107 1108 if (rx_pkt == budget) 1109 break; 1110 1111 comp_ring_cons = RCQ_BD(sw_comp_cons); 1112 cqe = &fp->rx_comp_ring[comp_ring_cons]; 1113 cqe_fp = &cqe->fast_path_cqe; 1114 } /* while */ 1115 1116 fp->rx_bd_cons = bd_cons; 1117 fp->rx_bd_prod = bd_prod_fw; 1118 fp->rx_comp_cons = sw_comp_cons; 1119 fp->rx_comp_prod = sw_comp_prod; 1120 1121 /* Update producers */ 1122 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, 1123 fp->rx_sge_prod); 1124 1125 return rx_pkt; 1126 } 1127 1128 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) 1129 { 1130 struct bnx2x_fastpath *fp = fp_cookie; 1131 struct bnx2x *bp = fp->bp; 1132 u8 cos; 1133 1134 DP(NETIF_MSG_INTR, 1135 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", 1136 fp->index, fp->fw_sb_id, fp->igu_sb_id); 1137 1138 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 1139 1140 #ifdef BNX2X_STOP_ON_ERROR 1141 if (unlikely(bp->panic)) 1142 return IRQ_HANDLED; 1143 #endif 1144 1145 /* Handle Rx and Tx according to MSI-X vector */ 1146 for_each_cos_in_tx_queue(fp, cos) 1147 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1148 1149 prefetch(&fp->sb_running_index[SM_RX_ID]); 1150 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); 1151 1152 return IRQ_HANDLED; 1153 } 1154 1155 /* HW Lock for shared dual port PHYs */ 1156 void bnx2x_acquire_phy_lock(struct bnx2x *bp) 1157 { 1158 mutex_lock(&bp->port.phy_mutex); 1159 1160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 1161 } 1162 1163 void bnx2x_release_phy_lock(struct bnx2x *bp) 1164 { 1165 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 1166 1167 mutex_unlock(&bp->port.phy_mutex); 1168 } 1169 1170 /* calculates MF speed according to current linespeed and MF configuration */ 1171 u16 bnx2x_get_mf_speed(struct bnx2x *bp) 1172 { 1173 u16 line_speed = bp->link_vars.line_speed; 1174 if (IS_MF(bp)) { 1175 u16 maxCfg = bnx2x_extract_max_cfg(bp, 1176 bp->mf_config[BP_VN(bp)]); 1177 1178 /* Calculate the current MAX line speed limit for the MF 1179 * devices 1180 */ 1181 if (IS_MF_PERCENT_BW(bp)) 1182 line_speed = (line_speed * maxCfg) / 100; 1183 else { /* SD mode */ 1184 u16 vn_max_rate = maxCfg * 100; 1185 1186 if (vn_max_rate < line_speed) 1187 line_speed = vn_max_rate; 1188 } 1189 } 1190 1191 return line_speed; 1192 } 1193 1194 /** 1195 * bnx2x_fill_report_data - fill link report data to report 1196 * 1197 * @bp: driver handle 1198 * @data: link state to update 1199 * 1200 * It uses a none-atomic bit operations because is called under the mutex. 1201 */ 1202 static void bnx2x_fill_report_data(struct bnx2x *bp, 1203 struct bnx2x_link_report_data *data) 1204 { 1205 memset(data, 0, sizeof(*data)); 1206 1207 if (IS_PF(bp)) { 1208 /* Fill the report data: effective line speed */ 1209 data->line_speed = bnx2x_get_mf_speed(bp); 1210 1211 /* Link is down */ 1212 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) 1213 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1214 &data->link_report_flags); 1215 1216 if (!BNX2X_NUM_ETH_QUEUES(bp)) 1217 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1218 &data->link_report_flags); 1219 1220 /* Full DUPLEX */ 1221 if (bp->link_vars.duplex == DUPLEX_FULL) 1222 __set_bit(BNX2X_LINK_REPORT_FD, 1223 &data->link_report_flags); 1224 1225 /* Rx Flow Control is ON */ 1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) 1227 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1228 &data->link_report_flags); 1229 1230 /* Tx Flow Control is ON */ 1231 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 1232 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1233 &data->link_report_flags); 1234 } else { /* VF */ 1235 *data = bp->vf_link_vars; 1236 } 1237 } 1238 1239 /** 1240 * bnx2x_link_report - report link status to OS. 1241 * 1242 * @bp: driver handle 1243 * 1244 * Calls the __bnx2x_link_report() under the same locking scheme 1245 * as a link/PHY state managing code to ensure a consistent link 1246 * reporting. 1247 */ 1248 1249 void bnx2x_link_report(struct bnx2x *bp) 1250 { 1251 bnx2x_acquire_phy_lock(bp); 1252 __bnx2x_link_report(bp); 1253 bnx2x_release_phy_lock(bp); 1254 } 1255 1256 /** 1257 * __bnx2x_link_report - report link status to OS. 1258 * 1259 * @bp: driver handle 1260 * 1261 * None atomic implementation. 1262 * Should be called under the phy_lock. 1263 */ 1264 void __bnx2x_link_report(struct bnx2x *bp) 1265 { 1266 struct bnx2x_link_report_data cur_data; 1267 1268 /* reread mf_cfg */ 1269 if (IS_PF(bp) && !CHIP_IS_E1(bp)) 1270 bnx2x_read_mf_cfg(bp); 1271 1272 /* Read the current link report info */ 1273 bnx2x_fill_report_data(bp, &cur_data); 1274 1275 /* Don't report link down or exactly the same link status twice */ 1276 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || 1277 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1278 &bp->last_reported_link.link_report_flags) && 1279 test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1280 &cur_data.link_report_flags))) 1281 return; 1282 1283 bp->link_cnt++; 1284 1285 /* We are going to report a new link parameters now - 1286 * remember the current data for the next time. 1287 */ 1288 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); 1289 1290 /* propagate status to VFs */ 1291 if (IS_PF(bp)) 1292 bnx2x_iov_link_update(bp); 1293 1294 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1295 &cur_data.link_report_flags)) { 1296 netif_carrier_off(bp->dev); 1297 netdev_err(bp->dev, "NIC Link is Down\n"); 1298 return; 1299 } else { 1300 const char *duplex; 1301 const char *flow; 1302 1303 netif_carrier_on(bp->dev); 1304 1305 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, 1306 &cur_data.link_report_flags)) 1307 duplex = "full"; 1308 else 1309 duplex = "half"; 1310 1311 /* Handle the FC at the end so that only these flags would be 1312 * possibly set. This way we may easily check if there is no FC 1313 * enabled. 1314 */ 1315 if (cur_data.link_report_flags) { 1316 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1317 &cur_data.link_report_flags)) { 1318 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1319 &cur_data.link_report_flags)) 1320 flow = "ON - receive & transmit"; 1321 else 1322 flow = "ON - receive"; 1323 } else { 1324 flow = "ON - transmit"; 1325 } 1326 } else { 1327 flow = "none"; 1328 } 1329 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 1330 cur_data.line_speed, duplex, flow); 1331 } 1332 } 1333 1334 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1335 { 1336 int i; 1337 1338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1339 struct eth_rx_sge *sge; 1340 1341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1342 sge->addr_hi = 1343 cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1345 1346 sge->addr_lo = 1347 cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1348 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1349 } 1350 } 1351 1352 static void bnx2x_free_tpa_pool(struct bnx2x *bp, 1353 struct bnx2x_fastpath *fp, int last) 1354 { 1355 int i; 1356 1357 for (i = 0; i < last; i++) { 1358 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1359 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1360 u8 *data = first_buf->data; 1361 1362 if (data == NULL) { 1363 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1364 continue; 1365 } 1366 if (tpa_info->tpa_state == BNX2X_TPA_START) 1367 dma_unmap_single(&bp->pdev->dev, 1368 dma_unmap_addr(first_buf, mapping), 1369 fp->rx_buf_size, DMA_FROM_DEVICE); 1370 bnx2x_frag_free(fp, data); 1371 first_buf->data = NULL; 1372 } 1373 } 1374 1375 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) 1376 { 1377 int j; 1378 1379 for_each_rx_queue_cnic(bp, j) { 1380 struct bnx2x_fastpath *fp = &bp->fp[j]; 1381 1382 fp->rx_bd_cons = 0; 1383 1384 /* Activate BD ring */ 1385 /* Warning! 1386 * this will generate an interrupt (to the TSTORM) 1387 * must only be done after chip is initialized 1388 */ 1389 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 1390 fp->rx_sge_prod); 1391 } 1392 } 1393 1394 void bnx2x_init_rx_rings(struct bnx2x *bp) 1395 { 1396 int func = BP_FUNC(bp); 1397 u16 ring_prod; 1398 int i, j; 1399 1400 /* Allocate TPA resources */ 1401 for_each_eth_queue(bp, j) { 1402 struct bnx2x_fastpath *fp = &bp->fp[j]; 1403 1404 DP(NETIF_MSG_IFUP, 1405 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 1406 1407 if (fp->mode != TPA_MODE_DISABLED) { 1408 /* Fill the per-aggregation pool */ 1409 for (i = 0; i < MAX_AGG_QS(bp); i++) { 1410 struct bnx2x_agg_info *tpa_info = 1411 &fp->tpa_info[i]; 1412 struct sw_rx_bd *first_buf = 1413 &tpa_info->first_buf; 1414 1415 first_buf->data = 1416 bnx2x_frag_alloc(fp, GFP_KERNEL); 1417 if (!first_buf->data) { 1418 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1419 j); 1420 bnx2x_free_tpa_pool(bp, fp, i); 1421 fp->mode = TPA_MODE_DISABLED; 1422 break; 1423 } 1424 dma_unmap_addr_set(first_buf, mapping, 0); 1425 tpa_info->tpa_state = BNX2X_TPA_STOP; 1426 } 1427 1428 /* "next page" elements initialization */ 1429 bnx2x_set_next_page_sgl(fp); 1430 1431 /* set SGEs bit mask */ 1432 bnx2x_init_sge_ring_bit_mask(fp); 1433 1434 /* Allocate SGEs and initialize the ring elements */ 1435 for (i = 0, ring_prod = 0; 1436 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { 1437 1438 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, 1439 GFP_KERNEL) < 0) { 1440 BNX2X_ERR("was only able to allocate %d rx sges\n", 1441 i); 1442 BNX2X_ERR("disabling TPA for queue[%d]\n", 1443 j); 1444 /* Cleanup already allocated elements */ 1445 bnx2x_free_rx_sge_range(bp, fp, 1446 ring_prod); 1447 bnx2x_free_tpa_pool(bp, fp, 1448 MAX_AGG_QS(bp)); 1449 fp->mode = TPA_MODE_DISABLED; 1450 ring_prod = 0; 1451 break; 1452 } 1453 ring_prod = NEXT_SGE_IDX(ring_prod); 1454 } 1455 1456 fp->rx_sge_prod = ring_prod; 1457 } 1458 } 1459 1460 for_each_eth_queue(bp, j) { 1461 struct bnx2x_fastpath *fp = &bp->fp[j]; 1462 1463 fp->rx_bd_cons = 0; 1464 1465 /* Activate BD ring */ 1466 /* Warning! 1467 * this will generate an interrupt (to the TSTORM) 1468 * must only be done after chip is initialized 1469 */ 1470 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 1471 fp->rx_sge_prod); 1472 1473 if (j != 0) 1474 continue; 1475 1476 if (CHIP_IS_E1(bp)) { 1477 REG_WR(bp, BAR_USTRORM_INTMEM + 1478 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 1479 U64_LO(fp->rx_comp_mapping)); 1480 REG_WR(bp, BAR_USTRORM_INTMEM + 1481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 1482 U64_HI(fp->rx_comp_mapping)); 1483 } 1484 } 1485 } 1486 1487 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) 1488 { 1489 u8 cos; 1490 struct bnx2x *bp = fp->bp; 1491 1492 for_each_cos_in_tx_queue(fp, cos) { 1493 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1494 unsigned pkts_compl = 0, bytes_compl = 0; 1495 1496 u16 sw_prod = txdata->tx_pkt_prod; 1497 u16 sw_cons = txdata->tx_pkt_cons; 1498 1499 while (sw_cons != sw_prod) { 1500 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1501 &pkts_compl, &bytes_compl); 1502 sw_cons++; 1503 } 1504 1505 netdev_tx_reset_queue( 1506 netdev_get_tx_queue(bp->dev, 1507 txdata->txq_index)); 1508 } 1509 } 1510 1511 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) 1512 { 1513 int i; 1514 1515 for_each_tx_queue_cnic(bp, i) { 1516 bnx2x_free_tx_skbs_queue(&bp->fp[i]); 1517 } 1518 } 1519 1520 static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1521 { 1522 int i; 1523 1524 for_each_eth_queue(bp, i) { 1525 bnx2x_free_tx_skbs_queue(&bp->fp[i]); 1526 } 1527 } 1528 1529 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) 1530 { 1531 struct bnx2x *bp = fp->bp; 1532 int i; 1533 1534 /* ring wasn't allocated */ 1535 if (fp->rx_buf_ring == NULL) 1536 return; 1537 1538 for (i = 0; i < NUM_RX_BD; i++) { 1539 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; 1540 u8 *data = rx_buf->data; 1541 1542 if (data == NULL) 1543 continue; 1544 dma_unmap_single(&bp->pdev->dev, 1545 dma_unmap_addr(rx_buf, mapping), 1546 fp->rx_buf_size, DMA_FROM_DEVICE); 1547 1548 rx_buf->data = NULL; 1549 bnx2x_frag_free(fp, data); 1550 } 1551 } 1552 1553 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) 1554 { 1555 int j; 1556 1557 for_each_rx_queue_cnic(bp, j) { 1558 bnx2x_free_rx_bds(&bp->fp[j]); 1559 } 1560 } 1561 1562 static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1563 { 1564 int j; 1565 1566 for_each_eth_queue(bp, j) { 1567 struct bnx2x_fastpath *fp = &bp->fp[j]; 1568 1569 bnx2x_free_rx_bds(fp); 1570 1571 if (fp->mode != TPA_MODE_DISABLED) 1572 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1573 } 1574 } 1575 1576 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) 1577 { 1578 bnx2x_free_tx_skbs_cnic(bp); 1579 bnx2x_free_rx_skbs_cnic(bp); 1580 } 1581 1582 void bnx2x_free_skbs(struct bnx2x *bp) 1583 { 1584 bnx2x_free_tx_skbs(bp); 1585 bnx2x_free_rx_skbs(bp); 1586 } 1587 1588 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) 1589 { 1590 /* load old values */ 1591 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; 1592 1593 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { 1594 /* leave all but MAX value */ 1595 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 1596 1597 /* set new MAX value */ 1598 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) 1599 & FUNC_MF_CFG_MAX_BW_MASK; 1600 1601 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 1602 } 1603 } 1604 1605 /** 1606 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors 1607 * 1608 * @bp: driver handle 1609 * @nvecs: number of vectors to be released 1610 */ 1611 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) 1612 { 1613 int i, offset = 0; 1614 1615 if (nvecs == offset) 1616 return; 1617 1618 /* VFs don't have a default SB */ 1619 if (IS_PF(bp)) { 1620 free_irq(bp->msix_table[offset].vector, bp->dev); 1621 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1622 bp->msix_table[offset].vector); 1623 offset++; 1624 } 1625 1626 if (CNIC_SUPPORT(bp)) { 1627 if (nvecs == offset) 1628 return; 1629 offset++; 1630 } 1631 1632 for_each_eth_queue(bp, i) { 1633 if (nvecs == offset) 1634 return; 1635 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", 1636 i, bp->msix_table[offset].vector); 1637 1638 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); 1639 } 1640 } 1641 1642 void bnx2x_free_irq(struct bnx2x *bp) 1643 { 1644 if (bp->flags & USING_MSIX_FLAG && 1645 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { 1646 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); 1647 1648 /* vfs don't have a default status block */ 1649 if (IS_PF(bp)) 1650 nvecs++; 1651 1652 bnx2x_free_msix_irqs(bp, nvecs); 1653 } else { 1654 free_irq(bp->dev->irq, bp->dev); 1655 } 1656 } 1657 1658 int bnx2x_enable_msix(struct bnx2x *bp) 1659 { 1660 int msix_vec = 0, i, rc; 1661 1662 /* VFs don't have a default status block */ 1663 if (IS_PF(bp)) { 1664 bp->msix_table[msix_vec].entry = msix_vec; 1665 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", 1666 bp->msix_table[0].entry); 1667 msix_vec++; 1668 } 1669 1670 /* Cnic requires an msix vector for itself */ 1671 if (CNIC_SUPPORT(bp)) { 1672 bp->msix_table[msix_vec].entry = msix_vec; 1673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1674 msix_vec, bp->msix_table[msix_vec].entry); 1675 msix_vec++; 1676 } 1677 1678 /* We need separate vectors for ETH queues only (not FCoE) */ 1679 for_each_eth_queue(bp, i) { 1680 bp->msix_table[msix_vec].entry = msix_vec; 1681 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n", 1682 msix_vec, msix_vec, i); 1683 msix_vec++; 1684 } 1685 1686 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", 1687 msix_vec); 1688 1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1690 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); 1691 /* 1692 * reconfigure number of tx/rx queues according to available 1693 * MSI-X vectors 1694 */ 1695 if (rc == -ENOSPC) { 1696 /* Get by with single vector */ 1697 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); 1698 if (rc < 0) { 1699 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", 1700 rc); 1701 goto no_msix; 1702 } 1703 1704 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1705 bp->flags |= USING_SINGLE_MSIX_FLAG; 1706 1707 BNX2X_DEV_INFO("set number of queues to 1\n"); 1708 bp->num_ethernet_queues = 1; 1709 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1710 } else if (rc < 0) { 1711 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1712 goto no_msix; 1713 } else if (rc < msix_vec) { 1714 /* how less vectors we will have? */ 1715 int diff = msix_vec - rc; 1716 1717 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); 1718 1719 /* 1720 * decrease number of queues by number of unallocated entries 1721 */ 1722 bp->num_ethernet_queues -= diff; 1723 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1724 1725 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1726 bp->num_queues); 1727 } 1728 1729 bp->flags |= USING_MSIX_FLAG; 1730 1731 return 0; 1732 1733 no_msix: 1734 /* fall to INTx if not enough memory */ 1735 if (rc == -ENOMEM) 1736 bp->flags |= DISABLE_MSI_FLAG; 1737 1738 return rc; 1739 } 1740 1741 static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1742 { 1743 int i, rc, offset = 0; 1744 1745 /* no default status block for vf */ 1746 if (IS_PF(bp)) { 1747 rc = request_irq(bp->msix_table[offset++].vector, 1748 bnx2x_msix_sp_int, 0, 1749 bp->dev->name, bp->dev); 1750 if (rc) { 1751 BNX2X_ERR("request sp irq failed\n"); 1752 return -EBUSY; 1753 } 1754 } 1755 1756 if (CNIC_SUPPORT(bp)) 1757 offset++; 1758 1759 for_each_eth_queue(bp, i) { 1760 struct bnx2x_fastpath *fp = &bp->fp[i]; 1761 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1762 bp->dev->name, i); 1763 1764 rc = request_irq(bp->msix_table[offset].vector, 1765 bnx2x_msix_fp_int, 0, fp->name, fp); 1766 if (rc) { 1767 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, 1768 bp->msix_table[offset].vector, rc); 1769 bnx2x_free_msix_irqs(bp, offset); 1770 return -EBUSY; 1771 } 1772 1773 offset++; 1774 } 1775 1776 i = BNX2X_NUM_ETH_QUEUES(bp); 1777 if (IS_PF(bp)) { 1778 offset = 1 + CNIC_SUPPORT(bp); 1779 netdev_info(bp->dev, 1780 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1781 bp->msix_table[0].vector, 1782 0, bp->msix_table[offset].vector, 1783 i - 1, bp->msix_table[offset + i - 1].vector); 1784 } else { 1785 offset = CNIC_SUPPORT(bp); 1786 netdev_info(bp->dev, 1787 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", 1788 0, bp->msix_table[offset].vector, 1789 i - 1, bp->msix_table[offset + i - 1].vector); 1790 } 1791 return 0; 1792 } 1793 1794 int bnx2x_enable_msi(struct bnx2x *bp) 1795 { 1796 int rc; 1797 1798 rc = pci_enable_msi(bp->pdev); 1799 if (rc) { 1800 BNX2X_DEV_INFO("MSI is not attainable\n"); 1801 return -1; 1802 } 1803 bp->flags |= USING_MSI_FLAG; 1804 1805 return 0; 1806 } 1807 1808 static int bnx2x_req_irq(struct bnx2x *bp) 1809 { 1810 unsigned long flags; 1811 unsigned int irq; 1812 1813 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) 1814 flags = 0; 1815 else 1816 flags = IRQF_SHARED; 1817 1818 if (bp->flags & USING_MSIX_FLAG) 1819 irq = bp->msix_table[0].vector; 1820 else 1821 irq = bp->pdev->irq; 1822 1823 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); 1824 } 1825 1826 static int bnx2x_setup_irqs(struct bnx2x *bp) 1827 { 1828 int rc = 0; 1829 if (bp->flags & USING_MSIX_FLAG && 1830 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { 1831 rc = bnx2x_req_msix_irqs(bp); 1832 if (rc) 1833 return rc; 1834 } else { 1835 rc = bnx2x_req_irq(bp); 1836 if (rc) { 1837 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 1838 return rc; 1839 } 1840 if (bp->flags & USING_MSI_FLAG) { 1841 bp->dev->irq = bp->pdev->irq; 1842 netdev_info(bp->dev, "using MSI IRQ %d\n", 1843 bp->dev->irq); 1844 } 1845 if (bp->flags & USING_MSIX_FLAG) { 1846 bp->dev->irq = bp->msix_table[0].vector; 1847 netdev_info(bp->dev, "using MSIX IRQ %d\n", 1848 bp->dev->irq); 1849 } 1850 } 1851 1852 return 0; 1853 } 1854 1855 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) 1856 { 1857 int i; 1858 1859 for_each_rx_queue_cnic(bp, i) { 1860 napi_enable(&bnx2x_fp(bp, i, napi)); 1861 } 1862 } 1863 1864 static void bnx2x_napi_enable(struct bnx2x *bp) 1865 { 1866 int i; 1867 1868 for_each_eth_queue(bp, i) { 1869 napi_enable(&bnx2x_fp(bp, i, napi)); 1870 } 1871 } 1872 1873 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) 1874 { 1875 int i; 1876 1877 for_each_rx_queue_cnic(bp, i) { 1878 napi_disable(&bnx2x_fp(bp, i, napi)); 1879 } 1880 } 1881 1882 static void bnx2x_napi_disable(struct bnx2x *bp) 1883 { 1884 int i; 1885 1886 for_each_eth_queue(bp, i) { 1887 napi_disable(&bnx2x_fp(bp, i, napi)); 1888 } 1889 } 1890 1891 void bnx2x_netif_start(struct bnx2x *bp) 1892 { 1893 if (netif_running(bp->dev)) { 1894 bnx2x_napi_enable(bp); 1895 if (CNIC_LOADED(bp)) 1896 bnx2x_napi_enable_cnic(bp); 1897 bnx2x_int_enable(bp); 1898 if (bp->state == BNX2X_STATE_OPEN) 1899 netif_tx_wake_all_queues(bp->dev); 1900 } 1901 } 1902 1903 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 1904 { 1905 bnx2x_int_disable_sync(bp, disable_hw); 1906 bnx2x_napi_disable(bp); 1907 if (CNIC_LOADED(bp)) 1908 bnx2x_napi_disable_cnic(bp); 1909 } 1910 1911 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1912 void *accel_priv, select_queue_fallback_t fallback) 1913 { 1914 struct bnx2x *bp = netdev_priv(dev); 1915 1916 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { 1917 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1918 u16 ether_type = ntohs(hdr->h_proto); 1919 1920 /* Skip VLAN tag if present */ 1921 if (ether_type == ETH_P_8021Q) { 1922 struct vlan_ethhdr *vhdr = 1923 (struct vlan_ethhdr *)skb->data; 1924 1925 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); 1926 } 1927 1928 /* If ethertype is FCoE or FIP - use FCoE ring */ 1929 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1930 return bnx2x_fcoe_tx(bp, txq_index); 1931 } 1932 1933 /* select a non-FCoE queue */ 1934 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1935 } 1936 1937 void bnx2x_set_num_queues(struct bnx2x *bp) 1938 { 1939 /* RSS queues */ 1940 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); 1941 1942 /* override in STORAGE SD modes */ 1943 if (IS_MF_STORAGE_ONLY(bp)) 1944 bp->num_ethernet_queues = 1; 1945 1946 /* Add special queues */ 1947 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ 1948 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1949 1950 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1951 } 1952 1953 /** 1954 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues 1955 * 1956 * @bp: Driver handle 1957 * 1958 * We currently support for at most 16 Tx queues for each CoS thus we will 1959 * allocate a multiple of 16 for ETH L2 rings according to the value of the 1960 * bp->max_cos. 1961 * 1962 * If there is an FCoE L2 queue the appropriate Tx queue will have the next 1963 * index after all ETH L2 indices. 1964 * 1965 * If the actual number of Tx queues (for each CoS) is less than 16 then there 1966 * will be the holes at the end of each group of 16 ETh L2 indices (0..15, 1967 * 16..31,...) with indices that are not coupled with any real Tx queue. 1968 * 1969 * The proper configuration of skb->queue_mapping is handled by 1970 * bnx2x_select_queue() and __skb_tx_hash(). 1971 * 1972 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1973 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1974 */ 1975 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) 1976 { 1977 int rc, tx, rx; 1978 1979 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1980 rx = BNX2X_NUM_ETH_QUEUES(bp); 1981 1982 /* account for fcoe queue */ 1983 if (include_cnic && !NO_FCOE(bp)) { 1984 rx++; 1985 tx++; 1986 } 1987 1988 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1989 if (rc) { 1990 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); 1991 return rc; 1992 } 1993 rc = netif_set_real_num_rx_queues(bp->dev, rx); 1994 if (rc) { 1995 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); 1996 return rc; 1997 } 1998 1999 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", 2000 tx, rx); 2001 2002 return rc; 2003 } 2004 2005 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) 2006 { 2007 int i; 2008 2009 for_each_queue(bp, i) { 2010 struct bnx2x_fastpath *fp = &bp->fp[i]; 2011 u32 mtu; 2012 2013 /* Always use a mini-jumbo MTU for the FCoE L2 ring */ 2014 if (IS_FCOE_IDX(i)) 2015 /* 2016 * Although there are no IP frames expected to arrive to 2017 * this ring we still want to add an 2018 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer 2019 * overrun attack. 2020 */ 2021 mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2022 else 2023 mtu = bp->dev->mtu; 2024 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + 2025 IP_HEADER_ALIGNMENT_PADDING + 2026 ETH_OVREHEAD + 2027 mtu + 2028 BNX2X_FW_RX_ALIGN_END; 2029 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ 2030 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) 2031 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; 2032 else 2033 fp->rx_frag_size = 0; 2034 } 2035 } 2036 2037 static int bnx2x_init_rss(struct bnx2x *bp) 2038 { 2039 int i; 2040 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 2041 2042 /* Prepare the initial contents for the indirection table if RSS is 2043 * enabled 2044 */ 2045 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) 2046 bp->rss_conf_obj.ind_table[i] = 2047 bp->fp->cl_id + 2048 ethtool_rxfh_indir_default(i, num_eth_queues); 2049 2050 /* 2051 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 2052 * per-port, so if explicit configuration is needed , do it only 2053 * for a PMF. 2054 * 2055 * For 57712 and newer on the other hand it's a per-function 2056 * configuration. 2057 */ 2058 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); 2059 } 2060 2061 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 2062 bool config_hash, bool enable) 2063 { 2064 struct bnx2x_config_rss_params params = {NULL}; 2065 2066 /* Although RSS is meaningless when there is a single HW queue we 2067 * still need it enabled in order to have HW Rx hash generated. 2068 * 2069 * if (!is_eth_multi(bp)) 2070 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 2071 */ 2072 2073 params.rss_obj = rss_obj; 2074 2075 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 2076 2077 if (enable) { 2078 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); 2079 2080 /* RSS configuration */ 2081 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); 2082 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); 2083 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); 2084 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); 2085 if (rss_obj->udp_rss_v4) 2086 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); 2087 if (rss_obj->udp_rss_v6) 2088 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); 2089 2090 if (!CHIP_IS_E1x(bp)) { 2091 /* valid only for TUNN_MODE_VXLAN tunnel mode */ 2092 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); 2093 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); 2094 2095 /* valid only for TUNN_MODE_GRE tunnel mode */ 2096 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); 2097 } 2098 } else { 2099 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); 2100 } 2101 2102 /* Hash bits */ 2103 params.rss_result_mask = MULTI_MASK; 2104 2105 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 2106 2107 if (config_hash) { 2108 /* RSS keys */ 2109 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4); 2110 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); 2111 } 2112 2113 if (IS_PF(bp)) 2114 return bnx2x_config_rss(bp, ¶ms); 2115 else 2116 return bnx2x_vfpf_config_rss(bp, ¶ms); 2117 } 2118 2119 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 2120 { 2121 struct bnx2x_func_state_params func_params = {NULL}; 2122 2123 /* Prepare parameters for function state transitions */ 2124 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 2125 2126 func_params.f_obj = &bp->func_obj; 2127 func_params.cmd = BNX2X_F_CMD_HW_INIT; 2128 2129 func_params.params.hw_init.load_phase = load_code; 2130 2131 return bnx2x_func_state_change(bp, &func_params); 2132 } 2133 2134 /* 2135 * Cleans the object that have internal lists without sending 2136 * ramrods. Should be run when interrupts are disabled. 2137 */ 2138 void bnx2x_squeeze_objects(struct bnx2x *bp) 2139 { 2140 int rc; 2141 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 2142 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2143 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 2144 2145 /***************** Cleanup MACs' object first *************************/ 2146 2147 /* Wait for completion of requested */ 2148 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2149 /* Perform a dry cleanup */ 2150 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 2151 2152 /* Clean ETH primary MAC */ 2153 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 2154 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, 2155 &ramrod_flags); 2156 if (rc != 0) 2157 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 2158 2159 /* Cleanup UC list */ 2160 vlan_mac_flags = 0; 2161 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); 2162 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, 2163 &ramrod_flags); 2164 if (rc != 0) 2165 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); 2166 2167 /***************** Now clean mcast object *****************************/ 2168 rparam.mcast_obj = &bp->mcast_obj; 2169 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 2170 2171 /* Add a DEL command... - Since we're doing a driver cleanup only, 2172 * we take a lock surrounding both the initial send and the CONTs, 2173 * as we don't want a true completion to disrupt us in the middle. 2174 */ 2175 netif_addr_lock_bh(bp->dev); 2176 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 2177 if (rc < 0) 2178 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", 2179 rc); 2180 2181 /* ...and wait until all pending commands are cleared */ 2182 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2183 while (rc != 0) { 2184 if (rc < 0) { 2185 BNX2X_ERR("Failed to clean multi-cast object: %d\n", 2186 rc); 2187 netif_addr_unlock_bh(bp->dev); 2188 return; 2189 } 2190 2191 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2192 } 2193 netif_addr_unlock_bh(bp->dev); 2194 } 2195 2196 #ifndef BNX2X_STOP_ON_ERROR 2197 #define LOAD_ERROR_EXIT(bp, label) \ 2198 do { \ 2199 (bp)->state = BNX2X_STATE_ERROR; \ 2200 goto label; \ 2201 } while (0) 2202 2203 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2204 do { \ 2205 bp->cnic_loaded = false; \ 2206 goto label; \ 2207 } while (0) 2208 #else /*BNX2X_STOP_ON_ERROR*/ 2209 #define LOAD_ERROR_EXIT(bp, label) \ 2210 do { \ 2211 (bp)->state = BNX2X_STATE_ERROR; \ 2212 (bp)->panic = 1; \ 2213 return -EBUSY; \ 2214 } while (0) 2215 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2216 do { \ 2217 bp->cnic_loaded = false; \ 2218 (bp)->panic = 1; \ 2219 return -EBUSY; \ 2220 } while (0) 2221 #endif /*BNX2X_STOP_ON_ERROR*/ 2222 2223 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) 2224 { 2225 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 2226 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2227 return; 2228 } 2229 2230 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 2231 { 2232 int num_groups, vf_headroom = 0; 2233 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 2234 2235 /* number of queues for statistics is number of eth queues + FCoE */ 2236 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; 2237 2238 /* Total number of FW statistics requests = 2239 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper 2240 * and fcoe l2 queue) stats + num of queues (which includes another 1 2241 * for fcoe l2 queue if applicable) 2242 */ 2243 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 2244 2245 /* vf stats appear in the request list, but their data is allocated by 2246 * the VFs themselves. We don't include them in the bp->fw_stats_num as 2247 * it is used to determine where to place the vf stats queries in the 2248 * request struct 2249 */ 2250 if (IS_SRIOV(bp)) 2251 vf_headroom = bnx2x_vf_headroom(bp); 2252 2253 /* Request is built from stats_query_header and an array of 2254 * stats_query_cmd_group each of which contains 2255 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 2256 * configured in the stats_query_header. 2257 */ 2258 num_groups = 2259 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + 2260 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? 2261 1 : 0)); 2262 2263 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n", 2264 bp->fw_stats_num, vf_headroom, num_groups); 2265 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 2266 num_groups * sizeof(struct stats_query_cmd_group); 2267 2268 /* Data for statistics requests + stats_counter 2269 * stats_counter holds per-STORM counters that are incremented 2270 * when STORM has finished with the current request. 2271 * memory for FCoE offloaded statistics are counted anyway, 2272 * even if they will not be sent. 2273 * VF stats are not accounted for here as the data of VF stats is stored 2274 * in memory allocated by the VF, not here. 2275 */ 2276 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + 2277 sizeof(struct per_pf_stats) + 2278 sizeof(struct fcoe_statistics_params) + 2279 sizeof(struct per_queue_stats) * num_queue_stats + 2280 sizeof(struct stats_counter); 2281 2282 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, 2283 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2284 if (!bp->fw_stats) 2285 goto alloc_mem_err; 2286 2287 /* Set shortcuts */ 2288 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 2289 bp->fw_stats_req_mapping = bp->fw_stats_mapping; 2290 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) 2291 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); 2292 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 2293 bp->fw_stats_req_sz; 2294 2295 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", 2296 U64_HI(bp->fw_stats_req_mapping), 2297 U64_LO(bp->fw_stats_req_mapping)); 2298 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", 2299 U64_HI(bp->fw_stats_data_mapping), 2300 U64_LO(bp->fw_stats_data_mapping)); 2301 return 0; 2302 2303 alloc_mem_err: 2304 bnx2x_free_fw_stats_mem(bp); 2305 BNX2X_ERR("Can't allocate FW stats memory\n"); 2306 return -ENOMEM; 2307 } 2308 2309 /* send load request to mcp and analyze response */ 2310 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) 2311 { 2312 u32 param; 2313 2314 /* init fw_seq */ 2315 bp->fw_seq = 2316 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 2317 DRV_MSG_SEQ_NUMBER_MASK); 2318 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 2319 2320 /* Get current FW pulse sequence */ 2321 bp->fw_drv_pulse_wr_seq = 2322 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & 2323 DRV_PULSE_SEQ_MASK); 2324 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2325 2326 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; 2327 2328 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) 2329 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; 2330 2331 /* load request */ 2332 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); 2333 2334 /* if mcp fails to respond we must abort */ 2335 if (!(*load_code)) { 2336 BNX2X_ERR("MCP response failure, aborting\n"); 2337 return -EBUSY; 2338 } 2339 2340 /* If mcp refused (e.g. other port is in diagnostic mode) we 2341 * must abort 2342 */ 2343 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 2344 BNX2X_ERR("MCP refused load request, aborting\n"); 2345 return -EBUSY; 2346 } 2347 return 0; 2348 } 2349 2350 /* check whether another PF has already loaded FW to chip. In 2351 * virtualized environments a pf from another VM may have already 2352 * initialized the device including loading FW 2353 */ 2354 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) 2355 { 2356 /* is another pf loaded on this engine? */ 2357 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && 2358 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { 2359 /* build my FW version dword */ 2360 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + 2361 (BCM_5710_FW_MINOR_VERSION << 8) + 2362 (BCM_5710_FW_REVISION_VERSION << 16) + 2363 (BCM_5710_FW_ENGINEERING_VERSION << 24); 2364 2365 /* read loaded FW from chip */ 2366 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); 2367 2368 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", 2369 loaded_fw, my_fw); 2370 2371 /* abort nic load if version mismatch */ 2372 if (my_fw != loaded_fw) { 2373 if (print_err) 2374 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", 2375 loaded_fw, my_fw); 2376 else 2377 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", 2378 loaded_fw, my_fw); 2379 return -EBUSY; 2380 } 2381 } 2382 return 0; 2383 } 2384 2385 /* returns the "mcp load_code" according to global load_count array */ 2386 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) 2387 { 2388 int path = BP_PATH(bp); 2389 2390 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", 2391 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 2392 bnx2x_load_count[path][2]); 2393 bnx2x_load_count[path][0]++; 2394 bnx2x_load_count[path][1 + port]++; 2395 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", 2396 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 2397 bnx2x_load_count[path][2]); 2398 if (bnx2x_load_count[path][0] == 1) 2399 return FW_MSG_CODE_DRV_LOAD_COMMON; 2400 else if (bnx2x_load_count[path][1 + port] == 1) 2401 return FW_MSG_CODE_DRV_LOAD_PORT; 2402 else 2403 return FW_MSG_CODE_DRV_LOAD_FUNCTION; 2404 } 2405 2406 /* mark PMF if applicable */ 2407 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) 2408 { 2409 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2410 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 2411 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 2412 bp->port.pmf = 1; 2413 /* We need the barrier to ensure the ordering between the 2414 * writing to bp->port.pmf here and reading it from the 2415 * bnx2x_periodic_task(). 2416 */ 2417 smp_mb(); 2418 } else { 2419 bp->port.pmf = 0; 2420 } 2421 2422 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 2423 } 2424 2425 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) 2426 { 2427 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2428 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && 2429 (bp->common.shmem2_base)) { 2430 if (SHMEM2_HAS(bp, dcc_support)) 2431 SHMEM2_WR(bp, dcc_support, 2432 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 2433 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 2434 if (SHMEM2_HAS(bp, afex_driver_support)) 2435 SHMEM2_WR(bp, afex_driver_support, 2436 SHMEM_AFEX_SUPPORTED_VERSION_ONE); 2437 } 2438 2439 /* Set AFEX default VLAN tag to an invalid value */ 2440 bp->afex_def_vlan_tag = -1; 2441 } 2442 2443 /** 2444 * bnx2x_bz_fp - zero content of the fastpath structure. 2445 * 2446 * @bp: driver handle 2447 * @index: fastpath index to be zeroed 2448 * 2449 * Makes sure the contents of the bp->fp[index].napi is kept 2450 * intact. 2451 */ 2452 static void bnx2x_bz_fp(struct bnx2x *bp, int index) 2453 { 2454 struct bnx2x_fastpath *fp = &bp->fp[index]; 2455 int cos; 2456 struct napi_struct orig_napi = fp->napi; 2457 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; 2458 2459 /* bzero bnx2x_fastpath contents */ 2460 if (fp->tpa_info) 2461 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * 2462 sizeof(struct bnx2x_agg_info)); 2463 memset(fp, 0, sizeof(*fp)); 2464 2465 /* Restore the NAPI object as it has been already initialized */ 2466 fp->napi = orig_napi; 2467 fp->tpa_info = orig_tpa_info; 2468 fp->bp = bp; 2469 fp->index = index; 2470 if (IS_ETH_FP(fp)) 2471 fp->max_cos = bp->max_cos; 2472 else 2473 /* Special queues support only one CoS */ 2474 fp->max_cos = 1; 2475 2476 /* Init txdata pointers */ 2477 if (IS_FCOE_FP(fp)) 2478 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2479 if (IS_ETH_FP(fp)) 2480 for_each_cos_in_tx_queue(fp, cos) 2481 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2482 BNX2X_NUM_ETH_QUEUES(bp) + index]; 2483 2484 /* set the tpa flag for each queue. The tpa flag determines the queue 2485 * minimal size so it must be set prior to queue memory allocation 2486 */ 2487 if (bp->dev->features & NETIF_F_LRO) 2488 fp->mode = TPA_MODE_LRO; 2489 else if (bp->dev->features & NETIF_F_GRO && 2490 bnx2x_mtu_allows_gro(bp->dev->mtu)) 2491 fp->mode = TPA_MODE_GRO; 2492 else 2493 fp->mode = TPA_MODE_DISABLED; 2494 2495 /* We don't want TPA if it's disabled in bp 2496 * or if this is an FCoE L2 ring. 2497 */ 2498 if (bp->disable_tpa || IS_FCOE_FP(fp)) 2499 fp->mode = TPA_MODE_DISABLED; 2500 } 2501 2502 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) 2503 { 2504 u32 cur; 2505 2506 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) 2507 return; 2508 2509 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); 2510 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", 2511 cur, state); 2512 2513 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); 2514 } 2515 2516 int bnx2x_load_cnic(struct bnx2x *bp) 2517 { 2518 int i, rc, port = BP_PORT(bp); 2519 2520 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); 2521 2522 mutex_init(&bp->cnic_mutex); 2523 2524 if (IS_PF(bp)) { 2525 rc = bnx2x_alloc_mem_cnic(bp); 2526 if (rc) { 2527 BNX2X_ERR("Unable to allocate bp memory for cnic\n"); 2528 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2529 } 2530 } 2531 2532 rc = bnx2x_alloc_fp_mem_cnic(bp); 2533 if (rc) { 2534 BNX2X_ERR("Unable to allocate memory for cnic fps\n"); 2535 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2536 } 2537 2538 /* Update the number of queues with the cnic queues */ 2539 rc = bnx2x_set_real_num_queues(bp, 1); 2540 if (rc) { 2541 BNX2X_ERR("Unable to set real_num_queues including cnic\n"); 2542 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2543 } 2544 2545 /* Add all CNIC NAPI objects */ 2546 bnx2x_add_all_napi_cnic(bp); 2547 DP(NETIF_MSG_IFUP, "cnic napi added\n"); 2548 bnx2x_napi_enable_cnic(bp); 2549 2550 rc = bnx2x_init_hw_func_cnic(bp); 2551 if (rc) 2552 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); 2553 2554 bnx2x_nic_init_cnic(bp); 2555 2556 if (IS_PF(bp)) { 2557 /* Enable Timer scan */ 2558 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); 2559 2560 /* setup cnic queues */ 2561 for_each_cnic_queue(bp, i) { 2562 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2563 if (rc) { 2564 BNX2X_ERR("Queue setup failed\n"); 2565 LOAD_ERROR_EXIT(bp, load_error_cnic2); 2566 } 2567 } 2568 } 2569 2570 /* Initialize Rx filter. */ 2571 bnx2x_set_rx_mode_inner(bp); 2572 2573 /* re-read iscsi info */ 2574 bnx2x_get_iscsi_info(bp); 2575 bnx2x_setup_cnic_irq_info(bp); 2576 bnx2x_setup_cnic_info(bp); 2577 bp->cnic_loaded = true; 2578 if (bp->state == BNX2X_STATE_OPEN) 2579 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2580 2581 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); 2582 2583 return 0; 2584 2585 #ifndef BNX2X_STOP_ON_ERROR 2586 load_error_cnic2: 2587 /* Disable Timer scan */ 2588 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 2589 2590 load_error_cnic1: 2591 bnx2x_napi_disable_cnic(bp); 2592 /* Update the number of queues without the cnic queues */ 2593 if (bnx2x_set_real_num_queues(bp, 0)) 2594 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2595 load_error_cnic0: 2596 BNX2X_ERR("CNIC-related load failed\n"); 2597 bnx2x_free_fp_mem_cnic(bp); 2598 bnx2x_free_mem_cnic(bp); 2599 return rc; 2600 #endif /* ! BNX2X_STOP_ON_ERROR */ 2601 } 2602 2603 /* must be called with rtnl_lock */ 2604 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 2605 { 2606 int port = BP_PORT(bp); 2607 int i, rc = 0, load_code = 0; 2608 2609 DP(NETIF_MSG_IFUP, "Starting NIC load\n"); 2610 DP(NETIF_MSG_IFUP, 2611 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); 2612 2613 #ifdef BNX2X_STOP_ON_ERROR 2614 if (unlikely(bp->panic)) { 2615 BNX2X_ERR("Can't load NIC when there is panic\n"); 2616 return -EPERM; 2617 } 2618 #endif 2619 2620 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 2621 2622 /* zero the structure w/o any lock, before SP handler is initialized */ 2623 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); 2624 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 2625 &bp->last_reported_link.link_report_flags); 2626 2627 if (IS_PF(bp)) 2628 /* must be called before memory allocation and HW init */ 2629 bnx2x_ilt_set_info(bp); 2630 2631 /* 2632 * Zero fastpath structures preserving invariants like napi, which are 2633 * allocated only once, fp index, max_cos, bp pointer. 2634 * Also set fp->mode and txdata_ptr. 2635 */ 2636 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2637 for_each_queue(bp, i) 2638 bnx2x_bz_fp(bp, i); 2639 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + 2640 bp->num_cnic_queues) * 2641 sizeof(struct bnx2x_fp_txdata)); 2642 2643 bp->fcoe_init = false; 2644 2645 /* Set the receive queues buffer size */ 2646 bnx2x_set_rx_buf_size(bp); 2647 2648 if (IS_PF(bp)) { 2649 rc = bnx2x_alloc_mem(bp); 2650 if (rc) { 2651 BNX2X_ERR("Unable to allocate bp memory\n"); 2652 return rc; 2653 } 2654 } 2655 2656 /* need to be done after alloc mem, since it's self adjusting to amount 2657 * of memory available for RSS queues 2658 */ 2659 rc = bnx2x_alloc_fp_mem(bp); 2660 if (rc) { 2661 BNX2X_ERR("Unable to allocate memory for fps\n"); 2662 LOAD_ERROR_EXIT(bp, load_error0); 2663 } 2664 2665 /* Allocated memory for FW statistics */ 2666 if (bnx2x_alloc_fw_stats_mem(bp)) 2667 LOAD_ERROR_EXIT(bp, load_error0); 2668 2669 /* request pf to initialize status blocks */ 2670 if (IS_VF(bp)) { 2671 rc = bnx2x_vfpf_init(bp); 2672 if (rc) 2673 LOAD_ERROR_EXIT(bp, load_error0); 2674 } 2675 2676 /* As long as bnx2x_alloc_mem() may possibly update 2677 * bp->num_queues, bnx2x_set_real_num_queues() should always 2678 * come after it. At this stage cnic queues are not counted. 2679 */ 2680 rc = bnx2x_set_real_num_queues(bp, 0); 2681 if (rc) { 2682 BNX2X_ERR("Unable to set real_num_queues\n"); 2683 LOAD_ERROR_EXIT(bp, load_error0); 2684 } 2685 2686 /* configure multi cos mappings in kernel. 2687 * this configuration may be overridden by a multi class queue 2688 * discipline or by a dcbx negotiation result. 2689 */ 2690 bnx2x_setup_tc(bp->dev, bp->max_cos); 2691 2692 /* Add all NAPI objects */ 2693 bnx2x_add_all_napi(bp); 2694 DP(NETIF_MSG_IFUP, "napi added\n"); 2695 bnx2x_napi_enable(bp); 2696 2697 if (IS_PF(bp)) { 2698 /* set pf load just before approaching the MCP */ 2699 bnx2x_set_pf_load(bp); 2700 2701 /* if mcp exists send load request and analyze response */ 2702 if (!BP_NOMCP(bp)) { 2703 /* attempt to load pf */ 2704 rc = bnx2x_nic_load_request(bp, &load_code); 2705 if (rc) 2706 LOAD_ERROR_EXIT(bp, load_error1); 2707 2708 /* what did mcp say? */ 2709 rc = bnx2x_compare_fw_ver(bp, load_code, true); 2710 if (rc) { 2711 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2712 LOAD_ERROR_EXIT(bp, load_error2); 2713 } 2714 } else { 2715 load_code = bnx2x_nic_load_no_mcp(bp, port); 2716 } 2717 2718 /* mark pmf if applicable */ 2719 bnx2x_nic_load_pmf(bp, load_code); 2720 2721 /* Init Function state controlling object */ 2722 bnx2x__init_func_obj(bp); 2723 2724 /* Initialize HW */ 2725 rc = bnx2x_init_hw(bp, load_code); 2726 if (rc) { 2727 BNX2X_ERR("HW init failed, aborting\n"); 2728 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2729 LOAD_ERROR_EXIT(bp, load_error2); 2730 } 2731 } 2732 2733 bnx2x_pre_irq_nic_init(bp); 2734 2735 /* Connect to IRQs */ 2736 rc = bnx2x_setup_irqs(bp); 2737 if (rc) { 2738 BNX2X_ERR("setup irqs failed\n"); 2739 if (IS_PF(bp)) 2740 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2741 LOAD_ERROR_EXIT(bp, load_error2); 2742 } 2743 2744 /* Init per-function objects */ 2745 if (IS_PF(bp)) { 2746 /* Setup NIC internals and enable interrupts */ 2747 bnx2x_post_irq_nic_init(bp, load_code); 2748 2749 bnx2x_init_bp_objs(bp); 2750 bnx2x_iov_nic_init(bp); 2751 2752 /* Set AFEX default VLAN tag to an invalid value */ 2753 bp->afex_def_vlan_tag = -1; 2754 bnx2x_nic_load_afex_dcc(bp, load_code); 2755 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 2756 rc = bnx2x_func_start(bp); 2757 if (rc) { 2758 BNX2X_ERR("Function start failed!\n"); 2759 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2760 2761 LOAD_ERROR_EXIT(bp, load_error3); 2762 } 2763 2764 /* Send LOAD_DONE command to MCP */ 2765 if (!BP_NOMCP(bp)) { 2766 load_code = bnx2x_fw_command(bp, 2767 DRV_MSG_CODE_LOAD_DONE, 0); 2768 if (!load_code) { 2769 BNX2X_ERR("MCP response failure, aborting\n"); 2770 rc = -EBUSY; 2771 LOAD_ERROR_EXIT(bp, load_error3); 2772 } 2773 } 2774 2775 /* initialize FW coalescing state machines in RAM */ 2776 bnx2x_update_coalesce(bp); 2777 } 2778 2779 /* setup the leading queue */ 2780 rc = bnx2x_setup_leading(bp); 2781 if (rc) { 2782 BNX2X_ERR("Setup leading failed!\n"); 2783 LOAD_ERROR_EXIT(bp, load_error3); 2784 } 2785 2786 /* set up the rest of the queues */ 2787 for_each_nondefault_eth_queue(bp, i) { 2788 if (IS_PF(bp)) 2789 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); 2790 else /* VF */ 2791 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); 2792 if (rc) { 2793 BNX2X_ERR("Queue %d setup failed\n", i); 2794 LOAD_ERROR_EXIT(bp, load_error3); 2795 } 2796 } 2797 2798 /* setup rss */ 2799 rc = bnx2x_init_rss(bp); 2800 if (rc) { 2801 BNX2X_ERR("PF RSS init failed\n"); 2802 LOAD_ERROR_EXIT(bp, load_error3); 2803 } 2804 2805 /* Now when Clients are configured we are ready to work */ 2806 bp->state = BNX2X_STATE_OPEN; 2807 2808 /* Configure a ucast MAC */ 2809 if (IS_PF(bp)) 2810 rc = bnx2x_set_eth_mac(bp, true); 2811 else /* vf */ 2812 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, 2813 true); 2814 if (rc) { 2815 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2816 LOAD_ERROR_EXIT(bp, load_error3); 2817 } 2818 2819 if (IS_PF(bp) && bp->pending_max) { 2820 bnx2x_update_max_mf_config(bp, bp->pending_max); 2821 bp->pending_max = 0; 2822 } 2823 2824 if (bp->port.pmf) { 2825 rc = bnx2x_initial_phy_init(bp, load_mode); 2826 if (rc) 2827 LOAD_ERROR_EXIT(bp, load_error3); 2828 } 2829 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; 2830 2831 /* Start fast path */ 2832 2833 /* Re-configure vlan filters */ 2834 rc = bnx2x_vlan_reconfigure_vid(bp); 2835 if (rc) 2836 LOAD_ERROR_EXIT(bp, load_error3); 2837 2838 /* Initialize Rx filter. */ 2839 bnx2x_set_rx_mode_inner(bp); 2840 2841 if (bp->flags & PTP_SUPPORTED) { 2842 bnx2x_init_ptp(bp); 2843 bnx2x_configure_ptp_filters(bp); 2844 } 2845 /* Start Tx */ 2846 switch (load_mode) { 2847 case LOAD_NORMAL: 2848 /* Tx queue should be only re-enabled */ 2849 netif_tx_wake_all_queues(bp->dev); 2850 break; 2851 2852 case LOAD_OPEN: 2853 netif_tx_start_all_queues(bp->dev); 2854 smp_mb__after_atomic(); 2855 break; 2856 2857 case LOAD_DIAG: 2858 case LOAD_LOOPBACK_EXT: 2859 bp->state = BNX2X_STATE_DIAG; 2860 break; 2861 2862 default: 2863 break; 2864 } 2865 2866 if (bp->port.pmf) 2867 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); 2868 else 2869 bnx2x__link_status_update(bp); 2870 2871 /* start the timer */ 2872 mod_timer(&bp->timer, jiffies + bp->current_interval); 2873 2874 if (CNIC_ENABLED(bp)) 2875 bnx2x_load_cnic(bp); 2876 2877 if (IS_PF(bp)) 2878 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 2879 2880 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2881 /* mark driver is loaded in shmem2 */ 2882 u32 val; 2883 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 2884 val &= ~DRV_FLAGS_MTU_MASK; 2885 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); 2886 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 2887 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 2888 DRV_FLAGS_CAPABILITIES_LOADED_L2); 2889 } 2890 2891 /* Wait for all pending SP commands to complete */ 2892 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { 2893 BNX2X_ERR("Timeout waiting for SP elements to complete\n"); 2894 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 2895 return -EBUSY; 2896 } 2897 2898 /* Update driver data for On-Chip MFW dump. */ 2899 if (IS_PF(bp)) 2900 bnx2x_update_mfw_dump(bp); 2901 2902 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 2903 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2904 bnx2x_dcbx_init(bp, false); 2905 2906 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 2907 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); 2908 2909 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); 2910 2911 return 0; 2912 2913 #ifndef BNX2X_STOP_ON_ERROR 2914 load_error3: 2915 if (IS_PF(bp)) { 2916 bnx2x_int_disable_sync(bp, 1); 2917 2918 /* Clean queueable objects */ 2919 bnx2x_squeeze_objects(bp); 2920 } 2921 2922 /* Free SKBs, SGEs, TPA pool and driver internals */ 2923 bnx2x_free_skbs(bp); 2924 for_each_rx_queue(bp, i) 2925 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2926 2927 /* Release IRQs */ 2928 bnx2x_free_irq(bp); 2929 load_error2: 2930 if (IS_PF(bp) && !BP_NOMCP(bp)) { 2931 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 2932 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 2933 } 2934 2935 bp->port.pmf = 0; 2936 load_error1: 2937 bnx2x_napi_disable(bp); 2938 bnx2x_del_all_napi(bp); 2939 2940 /* clear pf_load status, as it was already set */ 2941 if (IS_PF(bp)) 2942 bnx2x_clear_pf_load(bp); 2943 load_error0: 2944 bnx2x_free_fw_stats_mem(bp); 2945 bnx2x_free_fp_mem(bp); 2946 bnx2x_free_mem(bp); 2947 2948 return rc; 2949 #endif /* ! BNX2X_STOP_ON_ERROR */ 2950 } 2951 2952 int bnx2x_drain_tx_queues(struct bnx2x *bp) 2953 { 2954 u8 rc = 0, cos, i; 2955 2956 /* Wait until tx fastpath tasks complete */ 2957 for_each_tx_queue(bp, i) { 2958 struct bnx2x_fastpath *fp = &bp->fp[i]; 2959 2960 for_each_cos_in_tx_queue(fp, cos) 2961 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 2962 if (rc) 2963 return rc; 2964 } 2965 return 0; 2966 } 2967 2968 /* must be called with rtnl_lock */ 2969 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) 2970 { 2971 int i; 2972 bool global = false; 2973 2974 DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); 2975 2976 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 2977 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); 2978 2979 /* mark driver is unloaded in shmem2 */ 2980 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2981 u32 val; 2982 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 2983 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 2984 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 2985 } 2986 2987 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && 2988 (bp->state == BNX2X_STATE_CLOSED || 2989 bp->state == BNX2X_STATE_ERROR)) { 2990 /* We can get here if the driver has been unloaded 2991 * during parity error recovery and is either waiting for a 2992 * leader to complete or for other functions to unload and 2993 * then ifdown has been issued. In this case we want to 2994 * unload and let other functions to complete a recovery 2995 * process. 2996 */ 2997 bp->recovery_state = BNX2X_RECOVERY_DONE; 2998 bp->is_leader = 0; 2999 bnx2x_release_leader_lock(bp); 3000 smp_mb(); 3001 3002 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n"); 3003 BNX2X_ERR("Can't unload in closed or error state\n"); 3004 return -EINVAL; 3005 } 3006 3007 /* Nothing to do during unload if previous bnx2x_nic_load() 3008 * have not completed successfully - all resources are released. 3009 * 3010 * we can get here only after unsuccessful ndo_* callback, during which 3011 * dev->IFF_UP flag is still on. 3012 */ 3013 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) 3014 return 0; 3015 3016 /* It's important to set the bp->state to the value different from 3017 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() 3018 * may restart the Tx from the NAPI context (see bnx2x_tx_int()). 3019 */ 3020 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 3021 smp_mb(); 3022 3023 /* indicate to VFs that the PF is going down */ 3024 bnx2x_iov_channel_down(bp); 3025 3026 if (CNIC_LOADED(bp)) 3027 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 3028 3029 /* Stop Tx */ 3030 bnx2x_tx_disable(bp); 3031 netdev_reset_tc(bp->dev); 3032 3033 bp->rx_mode = BNX2X_RX_MODE_NONE; 3034 3035 del_timer_sync(&bp->timer); 3036 3037 if (IS_PF(bp)) { 3038 /* Set ALWAYS_ALIVE bit in shmem */ 3039 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3040 bnx2x_drv_pulse(bp); 3041 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 3042 bnx2x_save_statistics(bp); 3043 } 3044 3045 /* wait till consumers catch up with producers in all queues */ 3046 bnx2x_drain_tx_queues(bp); 3047 3048 /* if VF indicate to PF this function is going down (PF will delete sp 3049 * elements and clear initializations 3050 */ 3051 if (IS_VF(bp)) 3052 bnx2x_vfpf_close_vf(bp); 3053 else if (unload_mode != UNLOAD_RECOVERY) 3054 /* if this is a normal/close unload need to clean up chip*/ 3055 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3056 else { 3057 /* Send the UNLOAD_REQUEST to the MCP */ 3058 bnx2x_send_unload_req(bp, unload_mode); 3059 3060 /* Prevent transactions to host from the functions on the 3061 * engine that doesn't reset global blocks in case of global 3062 * attention once global blocks are reset and gates are opened 3063 * (the engine which leader will perform the recovery 3064 * last). 3065 */ 3066 if (!CHIP_IS_E1x(bp)) 3067 bnx2x_pf_disable(bp); 3068 3069 /* Disable HW interrupts, NAPI */ 3070 bnx2x_netif_stop(bp, 1); 3071 /* Delete all NAPI objects */ 3072 bnx2x_del_all_napi(bp); 3073 if (CNIC_LOADED(bp)) 3074 bnx2x_del_all_napi_cnic(bp); 3075 /* Release IRQs */ 3076 bnx2x_free_irq(bp); 3077 3078 /* Report UNLOAD_DONE to MCP */ 3079 bnx2x_send_unload_done(bp, false); 3080 } 3081 3082 /* 3083 * At this stage no more interrupts will arrive so we may safely clean 3084 * the queueable objects here in case they failed to get cleaned so far. 3085 */ 3086 if (IS_PF(bp)) 3087 bnx2x_squeeze_objects(bp); 3088 3089 /* There should be no more pending SP commands at this stage */ 3090 bp->sp_state = 0; 3091 3092 bp->port.pmf = 0; 3093 3094 /* clear pending work in rtnl task */ 3095 bp->sp_rtnl_state = 0; 3096 smp_mb(); 3097 3098 /* Free SKBs, SGEs, TPA pool and driver internals */ 3099 bnx2x_free_skbs(bp); 3100 if (CNIC_LOADED(bp)) 3101 bnx2x_free_skbs_cnic(bp); 3102 for_each_rx_queue(bp, i) 3103 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 3104 3105 bnx2x_free_fp_mem(bp); 3106 if (CNIC_LOADED(bp)) 3107 bnx2x_free_fp_mem_cnic(bp); 3108 3109 if (IS_PF(bp)) { 3110 if (CNIC_LOADED(bp)) 3111 bnx2x_free_mem_cnic(bp); 3112 } 3113 bnx2x_free_mem(bp); 3114 3115 bp->state = BNX2X_STATE_CLOSED; 3116 bp->cnic_loaded = false; 3117 3118 /* Clear driver version indication in shmem */ 3119 if (IS_PF(bp)) 3120 bnx2x_update_mng_version(bp); 3121 3122 /* Check if there are pending parity attentions. If there are - set 3123 * RECOVERY_IN_PROGRESS. 3124 */ 3125 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { 3126 bnx2x_set_reset_in_progress(bp); 3127 3128 /* Set RESET_IS_GLOBAL if needed */ 3129 if (global) 3130 bnx2x_set_reset_global(bp); 3131 } 3132 3133 /* The last driver must disable a "close the gate" if there is no 3134 * parity attention or "process kill" pending. 3135 */ 3136 if (IS_PF(bp) && 3137 !bnx2x_clear_pf_load(bp) && 3138 bnx2x_reset_is_done(bp, BP_PATH(bp))) 3139 bnx2x_disable_close_the_gate(bp); 3140 3141 DP(NETIF_MSG_IFUP, "Ending NIC unload\n"); 3142 3143 return 0; 3144 } 3145 3146 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 3147 { 3148 u16 pmcsr; 3149 3150 /* If there is no power capability, silently succeed */ 3151 if (!bp->pdev->pm_cap) { 3152 BNX2X_DEV_INFO("No power capability. Breaking.\n"); 3153 return 0; 3154 } 3155 3156 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); 3157 3158 switch (state) { 3159 case PCI_D0: 3160 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3162 PCI_PM_CTRL_PME_STATUS)); 3163 3164 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 3165 /* delay required during transition out of D3hot */ 3166 msleep(20); 3167 break; 3168 3169 case PCI_D3hot: 3170 /* If there are other clients above don't 3171 shut down the power */ 3172 if (atomic_read(&bp->pdev->enable_cnt) != 1) 3173 return 0; 3174 /* Don't shut down the power for emulation and FPGA */ 3175 if (CHIP_REV_IS_SLOW(bp)) 3176 return 0; 3177 3178 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3179 pmcsr |= 3; 3180 3181 if (bp->wol) 3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 3183 3184 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3185 pmcsr); 3186 3187 /* No more memory access after this point until 3188 * device is brought back to D0. 3189 */ 3190 break; 3191 3192 default: 3193 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); 3194 return -EINVAL; 3195 } 3196 return 0; 3197 } 3198 3199 /* 3200 * net_device service functions 3201 */ 3202 static int bnx2x_poll(struct napi_struct *napi, int budget) 3203 { 3204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 3205 napi); 3206 struct bnx2x *bp = fp->bp; 3207 int rx_work_done; 3208 u8 cos; 3209 3210 #ifdef BNX2X_STOP_ON_ERROR 3211 if (unlikely(bp->panic)) { 3212 napi_complete(napi); 3213 return 0; 3214 } 3215 #endif 3216 for_each_cos_in_tx_queue(fp, cos) 3217 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3218 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); 3219 3220 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; 3221 3222 if (rx_work_done < budget) { 3223 /* No need to update SB for FCoE L2 ring as long as 3224 * it's connected to the default SB and the SB 3225 * has been updated when NAPI was scheduled. 3226 */ 3227 if (IS_FCOE_FP(fp)) { 3228 napi_complete(napi); 3229 } else { 3230 bnx2x_update_fpsb_idx(fp); 3231 /* bnx2x_has_rx_work() reads the status block, 3232 * thus we need to ensure that status block indices 3233 * have been actually read (bnx2x_update_fpsb_idx) 3234 * prior to this check (bnx2x_has_rx_work) so that 3235 * we won't write the "newer" value of the status block 3236 * to IGU (if there was a DMA right after 3237 * bnx2x_has_rx_work and if there is no rmb, the memory 3238 * reading (bnx2x_update_fpsb_idx) may be postponed 3239 * to right before bnx2x_ack_sb). In this case there 3240 * will never be another interrupt until there is 3241 * another update of the status block, while there 3242 * is still unhandled work. 3243 */ 3244 rmb(); 3245 3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 3247 napi_complete(napi); 3248 /* Re-enable interrupts */ 3249 DP(NETIF_MSG_RX_STATUS, 3250 "Update index to %d\n", fp->fp_hc_idx); 3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 3252 le16_to_cpu(fp->fp_hc_idx), 3253 IGU_INT_ENABLE, 1); 3254 } else { 3255 rx_work_done = budget; 3256 } 3257 } 3258 } 3259 3260 return rx_work_done; 3261 } 3262 3263 /* we split the first BD into headers and data BDs 3264 * to ease the pain of our fellow microcode engineers 3265 * we use one mapping for both BDs 3266 */ 3267 static u16 bnx2x_tx_split(struct bnx2x *bp, 3268 struct bnx2x_fp_txdata *txdata, 3269 struct sw_tx_bd *tx_buf, 3270 struct eth_tx_start_bd **tx_bd, u16 hlen, 3271 u16 bd_prod) 3272 { 3273 struct eth_tx_start_bd *h_tx_bd = *tx_bd; 3274 struct eth_tx_bd *d_tx_bd; 3275 dma_addr_t mapping; 3276 int old_len = le16_to_cpu(h_tx_bd->nbytes); 3277 3278 /* first fix first BD */ 3279 h_tx_bd->nbytes = cpu_to_le16(hlen); 3280 3281 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", 3282 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); 3283 3284 /* now get a new data BD 3285 * (after the pbd) and fill it */ 3286 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3287 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 3288 3289 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), 3290 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; 3291 3292 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 3293 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 3294 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); 3295 3296 /* this marks the BD as one that has no individual mapping */ 3297 tx_buf->flags |= BNX2X_TSO_SPLIT_BD; 3298 3299 DP(NETIF_MSG_TX_QUEUED, 3300 "TSO split data size is %d (%x:%x)\n", 3301 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); 3302 3303 /* update tx_bd */ 3304 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; 3305 3306 return bd_prod; 3307 } 3308 3309 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) 3310 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) 3311 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) 3312 { 3313 __sum16 tsum = (__force __sum16) csum; 3314 3315 if (fix > 0) 3316 tsum = ~csum_fold(csum_sub((__force __wsum) csum, 3317 csum_partial(t_header - fix, fix, 0))); 3318 3319 else if (fix < 0) 3320 tsum = ~csum_fold(csum_add((__force __wsum) csum, 3321 csum_partial(t_header, -fix, 0))); 3322 3323 return bswab16(tsum); 3324 } 3325 3326 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3327 { 3328 u32 rc; 3329 __u8 prot = 0; 3330 __be16 protocol; 3331 3332 if (skb->ip_summed != CHECKSUM_PARTIAL) 3333 return XMIT_PLAIN; 3334 3335 protocol = vlan_get_protocol(skb); 3336 if (protocol == htons(ETH_P_IPV6)) { 3337 rc = XMIT_CSUM_V6; 3338 prot = ipv6_hdr(skb)->nexthdr; 3339 } else { 3340 rc = XMIT_CSUM_V4; 3341 prot = ip_hdr(skb)->protocol; 3342 } 3343 3344 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { 3345 if (inner_ip_hdr(skb)->version == 6) { 3346 rc |= XMIT_CSUM_ENC_V6; 3347 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3348 rc |= XMIT_CSUM_TCP; 3349 } else { 3350 rc |= XMIT_CSUM_ENC_V4; 3351 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) 3352 rc |= XMIT_CSUM_TCP; 3353 } 3354 } 3355 if (prot == IPPROTO_TCP) 3356 rc |= XMIT_CSUM_TCP; 3357 3358 if (skb_is_gso(skb)) { 3359 if (skb_is_gso_v6(skb)) { 3360 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); 3361 if (rc & XMIT_CSUM_ENC) 3362 rc |= XMIT_GSO_ENC_V6; 3363 } else { 3364 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); 3365 if (rc & XMIT_CSUM_ENC) 3366 rc |= XMIT_GSO_ENC_V4; 3367 } 3368 } 3369 3370 return rc; 3371 } 3372 3373 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ 3374 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 3375 3376 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 3377 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3 3378 3379 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) 3380 /* check if packet requires linearization (packet is too fragmented) 3381 no need to check fragmentation if page size > 8K (there will be no 3382 violation to FW restrictions) */ 3383 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 3384 u32 xmit_type) 3385 { 3386 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; 3387 int to_copy = 0, hlen = 0; 3388 3389 if (xmit_type & XMIT_GSO_ENC) 3390 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; 3391 3392 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { 3393 if (xmit_type & XMIT_GSO) { 3394 unsigned short lso_mss = skb_shinfo(skb)->gso_size; 3395 int wnd_size = MAX_FETCH_BD - num_tso_win_sub; 3396 /* Number of windows to check */ 3397 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 3398 int wnd_idx = 0; 3399 int frag_idx = 0; 3400 u32 wnd_sum = 0; 3401 3402 /* Headers length */ 3403 if (xmit_type & XMIT_GSO_ENC) 3404 hlen = (int)(skb_inner_transport_header(skb) - 3405 skb->data) + 3406 inner_tcp_hdrlen(skb); 3407 else 3408 hlen = (int)(skb_transport_header(skb) - 3409 skb->data) + tcp_hdrlen(skb); 3410 3411 /* Amount of data (w/o headers) on linear part of SKB*/ 3412 first_bd_sz = skb_headlen(skb) - hlen; 3413 3414 wnd_sum = first_bd_sz; 3415 3416 /* Calculate the first sum - it's special */ 3417 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) 3418 wnd_sum += 3419 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); 3420 3421 /* If there was data on linear skb data - check it */ 3422 if (first_bd_sz > 0) { 3423 if (unlikely(wnd_sum < lso_mss)) { 3424 to_copy = 1; 3425 goto exit_lbl; 3426 } 3427 3428 wnd_sum -= first_bd_sz; 3429 } 3430 3431 /* Others are easier: run through the frag list and 3432 check all windows */ 3433 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { 3434 wnd_sum += 3435 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); 3436 3437 if (unlikely(wnd_sum < lso_mss)) { 3438 to_copy = 1; 3439 break; 3440 } 3441 wnd_sum -= 3442 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); 3443 } 3444 } else { 3445 /* in non-LSO too fragmented packet should always 3446 be linearized */ 3447 to_copy = 1; 3448 } 3449 } 3450 3451 exit_lbl: 3452 if (unlikely(to_copy)) 3453 DP(NETIF_MSG_TX_QUEUED, 3454 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n", 3455 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", 3456 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); 3457 3458 return to_copy; 3459 } 3460 #endif 3461 3462 /** 3463 * bnx2x_set_pbd_gso - update PBD in GSO case. 3464 * 3465 * @skb: packet skb 3466 * @pbd: parse BD 3467 * @xmit_type: xmit flags 3468 */ 3469 static void bnx2x_set_pbd_gso(struct sk_buff *skb, 3470 struct eth_tx_parse_bd_e1x *pbd, 3471 u32 xmit_type) 3472 { 3473 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3474 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); 3475 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); 3476 3477 if (xmit_type & XMIT_GSO_V4) { 3478 pbd->ip_id = bswab16(ip_hdr(skb)->id); 3479 pbd->tcp_pseudo_csum = 3480 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3481 ip_hdr(skb)->daddr, 3482 0, IPPROTO_TCP, 0)); 3483 } else { 3484 pbd->tcp_pseudo_csum = 3485 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3486 &ipv6_hdr(skb)->daddr, 3487 0, IPPROTO_TCP, 0)); 3488 } 3489 3490 pbd->global_data |= 3491 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 3492 } 3493 3494 /** 3495 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length 3496 * 3497 * @bp: driver handle 3498 * @skb: packet skb 3499 * @parsing_data: data to be updated 3500 * @xmit_type: xmit flags 3501 * 3502 * 57712/578xx related, when skb has encapsulation 3503 */ 3504 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, 3505 u32 *parsing_data, u32 xmit_type) 3506 { 3507 *parsing_data |= 3508 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << 3509 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; 3511 3512 if (xmit_type & XMIT_CSUM_TCP) { 3513 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << 3514 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 3516 3517 return skb_inner_transport_header(skb) + 3518 inner_tcp_hdrlen(skb) - skb->data; 3519 } 3520 3521 /* We support checksum offload for TCP and UDP only. 3522 * No need to pass the UDP header length - it's a constant. 3523 */ 3524 return skb_inner_transport_header(skb) + 3525 sizeof(struct udphdr) - skb->data; 3526 } 3527 3528 /** 3529 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length 3530 * 3531 * @bp: driver handle 3532 * @skb: packet skb 3533 * @parsing_data: data to be updated 3534 * @xmit_type: xmit flags 3535 * 3536 * 57712/578xx related 3537 */ 3538 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 3539 u32 *parsing_data, u32 xmit_type) 3540 { 3541 *parsing_data |= 3542 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 3543 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; 3545 3546 if (xmit_type & XMIT_CSUM_TCP) { 3547 *parsing_data |= ((tcp_hdrlen(skb) / 4) << 3548 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 3550 3551 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 3552 } 3553 /* We support checksum offload for TCP and UDP only. 3554 * No need to pass the UDP header length - it's a constant. 3555 */ 3556 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; 3557 } 3558 3559 /* set FW indication according to inner or outer protocols if tunneled */ 3560 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3561 struct eth_tx_start_bd *tx_start_bd, 3562 u32 xmit_type) 3563 { 3564 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 3565 3566 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) 3567 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; 3568 3569 if (!(xmit_type & XMIT_CSUM_TCP)) 3570 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; 3571 } 3572 3573 /** 3574 * bnx2x_set_pbd_csum - update PBD with checksum and return header length 3575 * 3576 * @bp: driver handle 3577 * @skb: packet skb 3578 * @pbd: parse BD to be updated 3579 * @xmit_type: xmit flags 3580 */ 3581 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3582 struct eth_tx_parse_bd_e1x *pbd, 3583 u32 xmit_type) 3584 { 3585 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 3586 3587 /* for now NS flag is not used in Linux */ 3588 pbd->global_data = 3589 cpu_to_le16(hlen | 3590 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 3591 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 3592 3593 pbd->ip_hlen_w = (skb_transport_header(skb) - 3594 skb_network_header(skb)) >> 1; 3595 3596 hlen += pbd->ip_hlen_w; 3597 3598 /* We support checksum offload for TCP and UDP only */ 3599 if (xmit_type & XMIT_CSUM_TCP) 3600 hlen += tcp_hdrlen(skb) / 2; 3601 else 3602 hlen += sizeof(struct udphdr) / 2; 3603 3604 pbd->total_hlen_w = cpu_to_le16(hlen); 3605 hlen = hlen*2; 3606 3607 if (xmit_type & XMIT_CSUM_TCP) { 3608 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); 3609 3610 } else { 3611 s8 fix = SKB_CS_OFF(skb); /* signed! */ 3612 3613 DP(NETIF_MSG_TX_QUEUED, 3614 "hlen %d fix %d csum before fix %x\n", 3615 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); 3616 3617 /* HW bug: fixup the CSUM */ 3618 pbd->tcp_pseudo_csum = 3619 bnx2x_csum_fix(skb_transport_header(skb), 3620 SKB_CS(skb), fix); 3621 3622 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", 3623 pbd->tcp_pseudo_csum); 3624 } 3625 3626 return hlen; 3627 } 3628 3629 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, 3630 struct eth_tx_parse_bd_e2 *pbd_e2, 3631 struct eth_tx_parse_2nd_bd *pbd2, 3632 u16 *global_data, 3633 u32 xmit_type) 3634 { 3635 u16 hlen_w = 0; 3636 u8 outerip_off, outerip_len = 0; 3637 3638 /* from outer IP to transport */ 3639 hlen_w = (skb_inner_transport_header(skb) - 3640 skb_network_header(skb)) >> 1; 3641 3642 /* transport len */ 3643 hlen_w += inner_tcp_hdrlen(skb) >> 1; 3644 3645 pbd2->fw_ip_hdr_to_payload_w = hlen_w; 3646 3647 /* outer IP header info */ 3648 if (xmit_type & XMIT_CSUM_V4) { 3649 struct iphdr *iph = ip_hdr(skb); 3650 u32 csum = (__force u32)(~iph->check) - 3651 (__force u32)iph->tot_len - 3652 (__force u32)iph->frag_off; 3653 3654 outerip_len = iph->ihl << 1; 3655 3656 pbd2->fw_ip_csum_wo_len_flags_frag = 3657 bswab16(csum_fold((__force __wsum)csum)); 3658 } else { 3659 pbd2->fw_ip_hdr_to_payload_w = 3660 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); 3661 pbd_e2->data.tunnel_data.flags |= 3662 ETH_TUNNEL_DATA_IPV6_OUTER; 3663 } 3664 3665 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); 3666 3667 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); 3668 3669 /* inner IP header info */ 3670 if (xmit_type & XMIT_CSUM_ENC_V4) { 3671 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); 3672 3673 pbd_e2->data.tunnel_data.pseudo_csum = 3674 bswab16(~csum_tcpudp_magic( 3675 inner_ip_hdr(skb)->saddr, 3676 inner_ip_hdr(skb)->daddr, 3677 0, IPPROTO_TCP, 0)); 3678 } else { 3679 pbd_e2->data.tunnel_data.pseudo_csum = 3680 bswab16(~csum_ipv6_magic( 3681 &inner_ipv6_hdr(skb)->saddr, 3682 &inner_ipv6_hdr(skb)->daddr, 3683 0, IPPROTO_TCP, 0)); 3684 } 3685 3686 outerip_off = (skb_network_header(skb) - skb->data) >> 1; 3687 3688 *global_data |= 3689 outerip_off | 3690 (outerip_len << 3691 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | 3692 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 3693 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); 3694 3695 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 3696 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); 3697 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; 3698 } 3699 } 3700 3701 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, 3702 u32 xmit_type) 3703 { 3704 struct ipv6hdr *ipv6; 3705 3706 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) 3707 return; 3708 3709 if (xmit_type & XMIT_GSO_ENC_V6) 3710 ipv6 = inner_ipv6_hdr(skb); 3711 else /* XMIT_GSO_V6 */ 3712 ipv6 = ipv6_hdr(skb); 3713 3714 if (ipv6->nexthdr == NEXTHDR_IPV6) 3715 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 3716 } 3717 3718 /* called with netif_tx_lock 3719 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 3720 * netif_wake_queue() 3721 */ 3722 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 3723 { 3724 struct bnx2x *bp = netdev_priv(dev); 3725 3726 struct netdev_queue *txq; 3727 struct bnx2x_fp_txdata *txdata; 3728 struct sw_tx_bd *tx_buf; 3729 struct eth_tx_start_bd *tx_start_bd, *first_bd; 3730 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 3731 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 3732 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 3733 struct eth_tx_parse_2nd_bd *pbd2 = NULL; 3734 u32 pbd_e2_parsing_data = 0; 3735 u16 pkt_prod, bd_prod; 3736 int nbd, txq_index; 3737 dma_addr_t mapping; 3738 u32 xmit_type = bnx2x_xmit_type(bp, skb); 3739 int i; 3740 u8 hlen = 0; 3741 __le16 pkt_size = 0; 3742 struct ethhdr *eth; 3743 u8 mac_type = UNICAST_ADDRESS; 3744 3745 #ifdef BNX2X_STOP_ON_ERROR 3746 if (unlikely(bp->panic)) 3747 return NETDEV_TX_BUSY; 3748 #endif 3749 3750 txq_index = skb_get_queue_mapping(skb); 3751 txq = netdev_get_tx_queue(dev, txq_index); 3752 3753 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); 3754 3755 txdata = &bp->bnx2x_txq[txq_index]; 3756 3757 /* enable this debug print to view the transmission queue being used 3758 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 3759 txq_index, fp_index, txdata_index); */ 3760 3761 /* enable this debug print to view the transmission details 3762 DP(NETIF_MSG_TX_QUEUED, 3763 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 3764 txdata->cid, fp_index, txdata_index, txdata, fp); */ 3765 3766 if (unlikely(bnx2x_tx_avail(bp, txdata) < 3767 skb_shinfo(skb)->nr_frags + 3768 BDS_PER_TX_PKT + 3769 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 3770 /* Handle special storage cases separately */ 3771 if (txdata->tx_ring_size == 0) { 3772 struct bnx2x_eth_q_stats *q_stats = 3773 bnx2x_fp_qstats(bp, txdata->parent_fp); 3774 q_stats->driver_filtered_tx_pkt++; 3775 dev_kfree_skb(skb); 3776 return NETDEV_TX_OK; 3777 } 3778 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 3779 netif_tx_stop_queue(txq); 3780 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 3781 3782 return NETDEV_TX_BUSY; 3783 } 3784 3785 DP(NETIF_MSG_TX_QUEUED, 3786 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n", 3787 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 3788 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, 3789 skb->len); 3790 3791 eth = (struct ethhdr *)skb->data; 3792 3793 /* set flag according to packet type (UNICAST_ADDRESS is default)*/ 3794 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { 3795 if (is_broadcast_ether_addr(eth->h_dest)) 3796 mac_type = BROADCAST_ADDRESS; 3797 else 3798 mac_type = MULTICAST_ADDRESS; 3799 } 3800 3801 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) 3802 /* First, check if we need to linearize the skb (due to FW 3803 restrictions). No need to check fragmentation if page size > 8K 3804 (there will be no violation to FW restrictions) */ 3805 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 3806 /* Statistics of linearization */ 3807 bp->lin_cnt++; 3808 if (skb_linearize(skb) != 0) { 3809 DP(NETIF_MSG_TX_QUEUED, 3810 "SKB linearization failed - silently dropping this SKB\n"); 3811 dev_kfree_skb_any(skb); 3812 return NETDEV_TX_OK; 3813 } 3814 } 3815 #endif 3816 /* Map skb linear data for DMA */ 3817 mapping = dma_map_single(&bp->pdev->dev, skb->data, 3818 skb_headlen(skb), DMA_TO_DEVICE); 3819 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 3820 DP(NETIF_MSG_TX_QUEUED, 3821 "SKB mapping failed - silently dropping this SKB\n"); 3822 dev_kfree_skb_any(skb); 3823 return NETDEV_TX_OK; 3824 } 3825 /* 3826 Please read carefully. First we use one BD which we mark as start, 3827 then we have a parsing info BD (used for TSO or xsum), 3828 and only then we have the rest of the TSO BDs. 3829 (don't forget to mark the last one as last, 3830 and to unmap only AFTER you write to the BD ...) 3831 And above all, all pdb sizes are in words - NOT DWORDS! 3832 */ 3833 3834 /* get current pkt produced now - advance it just before sending packet 3835 * since mapping of pages may fail and cause packet to be dropped 3836 */ 3837 pkt_prod = txdata->tx_pkt_prod; 3838 bd_prod = TX_BD(txdata->tx_bd_prod); 3839 3840 /* get a tx_buf and first BD 3841 * tx_start_bd may be changed during SPLIT, 3842 * but first_bd will always stay first 3843 */ 3844 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; 3845 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; 3846 first_bd = tx_start_bd; 3847 3848 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3849 3850 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 3851 if (!(bp->flags & TX_TIMESTAMPING_EN)) { 3852 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); 3853 } else if (bp->ptp_tx_skb) { 3854 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); 3855 } else { 3856 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3857 /* schedule check for Tx timestamp */ 3858 bp->ptp_tx_skb = skb_get(skb); 3859 bp->ptp_tx_start = jiffies; 3860 schedule_work(&bp->ptp_task); 3861 } 3862 } 3863 3864 /* header nbd: indirectly zero other flags! */ 3865 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; 3866 3867 /* remember the first BD of the packet */ 3868 tx_buf->first_bd = txdata->tx_bd_prod; 3869 tx_buf->skb = skb; 3870 tx_buf->flags = 0; 3871 3872 DP(NETIF_MSG_TX_QUEUED, 3873 "sending pkt %u @%p next_idx %u bd %u @%p\n", 3874 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); 3875 3876 if (skb_vlan_tag_present(skb)) { 3877 tx_start_bd->vlan_or_ethertype = 3878 cpu_to_le16(skb_vlan_tag_get(skb)); 3879 tx_start_bd->bd_flags.as_bitfield |= 3880 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3881 } else { 3882 /* when transmitting in a vf, start bd must hold the ethertype 3883 * for fw to enforce it 3884 */ 3885 #ifndef BNX2X_STOP_ON_ERROR 3886 if (IS_VF(bp)) 3887 #endif 3888 tx_start_bd->vlan_or_ethertype = 3889 cpu_to_le16(ntohs(eth->h_proto)); 3890 #ifndef BNX2X_STOP_ON_ERROR 3891 else 3892 /* used by FW for packet accounting */ 3893 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3894 #endif 3895 } 3896 3897 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ 3898 3899 /* turn on parsing and get a BD */ 3900 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3901 3902 if (xmit_type & XMIT_CSUM) 3903 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); 3904 3905 if (!CHIP_IS_E1x(bp)) { 3906 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 3907 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 3908 3909 if (xmit_type & XMIT_CSUM_ENC) { 3910 u16 global_data = 0; 3911 3912 /* Set PBD in enc checksum offload case */ 3913 hlen = bnx2x_set_pbd_csum_enc(bp, skb, 3914 &pbd_e2_parsing_data, 3915 xmit_type); 3916 3917 /* turn on 2nd parsing and get a BD */ 3918 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3919 3920 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; 3921 3922 memset(pbd2, 0, sizeof(*pbd2)); 3923 3924 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = 3925 (skb_inner_network_header(skb) - 3926 skb->data) >> 1; 3927 3928 if (xmit_type & XMIT_GSO_ENC) 3929 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, 3930 &global_data, 3931 xmit_type); 3932 3933 pbd2->global_data = cpu_to_le16(global_data); 3934 3935 /* add addition parse BD indication to start BD */ 3936 SET_FLAG(tx_start_bd->general_data, 3937 ETH_TX_START_BD_PARSE_NBDS, 1); 3938 /* set encapsulation flag in start BD */ 3939 SET_FLAG(tx_start_bd->general_data, 3940 ETH_TX_START_BD_TUNNEL_EXIST, 1); 3941 3942 tx_buf->flags |= BNX2X_HAS_SECOND_PBD; 3943 3944 nbd++; 3945 } else if (xmit_type & XMIT_CSUM) { 3946 /* Set PBD in checksum offload case w/o encapsulation */ 3947 hlen = bnx2x_set_pbd_csum_e2(bp, skb, 3948 &pbd_e2_parsing_data, 3949 xmit_type); 3950 } 3951 3952 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); 3953 /* Add the macs to the parsing BD if this is a vf or if 3954 * Tx Switching is enabled. 3955 */ 3956 if (IS_VF(bp)) { 3957 /* override GRE parameters in BD */ 3958 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 3959 &pbd_e2->data.mac_addr.src_mid, 3960 &pbd_e2->data.mac_addr.src_lo, 3961 eth->h_source); 3962 3963 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 3964 &pbd_e2->data.mac_addr.dst_mid, 3965 &pbd_e2->data.mac_addr.dst_lo, 3966 eth->h_dest); 3967 } else { 3968 if (bp->flags & TX_SWITCHING) 3969 bnx2x_set_fw_mac_addr( 3970 &pbd_e2->data.mac_addr.dst_hi, 3971 &pbd_e2->data.mac_addr.dst_mid, 3972 &pbd_e2->data.mac_addr.dst_lo, 3973 eth->h_dest); 3974 #ifdef BNX2X_STOP_ON_ERROR 3975 /* Enforce security is always set in Stop on Error - 3976 * source mac should be present in the parsing BD 3977 */ 3978 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 3979 &pbd_e2->data.mac_addr.src_mid, 3980 &pbd_e2->data.mac_addr.src_lo, 3981 eth->h_source); 3982 #endif 3983 } 3984 3985 SET_FLAG(pbd_e2_parsing_data, 3986 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); 3987 } else { 3988 u16 global_data = 0; 3989 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 3990 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 3991 /* Set PBD in checksum offload case */ 3992 if (xmit_type & XMIT_CSUM) 3993 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); 3994 3995 SET_FLAG(global_data, 3996 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 3997 pbd_e1x->global_data |= cpu_to_le16(global_data); 3998 } 3999 4000 /* Setup the data pointer of the first BD of the packet */ 4001 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 4002 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 4003 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 4004 pkt_size = tx_start_bd->nbytes; 4005 4006 DP(NETIF_MSG_TX_QUEUED, 4007 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", 4008 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 4009 le16_to_cpu(tx_start_bd->nbytes), 4010 tx_start_bd->bd_flags.as_bitfield, 4011 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); 4012 4013 if (xmit_type & XMIT_GSO) { 4014 4015 DP(NETIF_MSG_TX_QUEUED, 4016 "TSO packet len %d hlen %d total len %d tso size %d\n", 4017 skb->len, hlen, skb_headlen(skb), 4018 skb_shinfo(skb)->gso_size); 4019 4020 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 4021 4022 if (unlikely(skb_headlen(skb) > hlen)) { 4023 nbd++; 4024 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, 4025 &tx_start_bd, hlen, 4026 bd_prod); 4027 } 4028 if (!CHIP_IS_E1x(bp)) 4029 pbd_e2_parsing_data |= 4030 (skb_shinfo(skb)->gso_size << 4031 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 4032 ETH_TX_PARSE_BD_E2_LSO_MSS; 4033 else 4034 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 4035 } 4036 4037 /* Set the PBD's parsing_data field if not zero 4038 * (for the chips newer than 57711). 4039 */ 4040 if (pbd_e2_parsing_data) 4041 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); 4042 4043 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 4044 4045 /* Handle fragmented skb */ 4046 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4047 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4048 4049 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, 4050 skb_frag_size(frag), DMA_TO_DEVICE); 4051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 4052 unsigned int pkts_compl = 0, bytes_compl = 0; 4053 4054 DP(NETIF_MSG_TX_QUEUED, 4055 "Unable to map page - dropping packet...\n"); 4056 4057 /* we need unmap all buffers already mapped 4058 * for this SKB; 4059 * first_bd->nbd need to be properly updated 4060 * before call to bnx2x_free_tx_pkt 4061 */ 4062 first_bd->nbd = cpu_to_le16(nbd); 4063 bnx2x_free_tx_pkt(bp, txdata, 4064 TX_BD(txdata->tx_pkt_prod), 4065 &pkts_compl, &bytes_compl); 4066 return NETDEV_TX_OK; 4067 } 4068 4069 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 4070 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 4071 if (total_pkt_bd == NULL) 4072 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 4073 4074 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 4075 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 4076 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); 4077 le16_add_cpu(&pkt_size, skb_frag_size(frag)); 4078 nbd++; 4079 4080 DP(NETIF_MSG_TX_QUEUED, 4081 "frag %d bd @%p addr (%x:%x) nbytes %d\n", 4082 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, 4083 le16_to_cpu(tx_data_bd->nbytes)); 4084 } 4085 4086 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); 4087 4088 /* update with actual num BDs */ 4089 first_bd->nbd = cpu_to_le16(nbd); 4090 4091 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 4092 4093 /* now send a tx doorbell, counting the next BD 4094 * if the packet contains or ends with it 4095 */ 4096 if (TX_BD_POFF(bd_prod) < nbd) 4097 nbd++; 4098 4099 /* total_pkt_bytes should be set on the first data BD if 4100 * it's not an LSO packet and there is more than one 4101 * data BD. In this case pkt_size is limited by an MTU value. 4102 * However we prefer to set it for an LSO packet (while we don't 4103 * have to) in order to save some CPU cycles in a none-LSO 4104 * case, when we much more care about them. 4105 */ 4106 if (total_pkt_bd != NULL) 4107 total_pkt_bd->total_pkt_bytes = pkt_size; 4108 4109 if (pbd_e1x) 4110 DP(NETIF_MSG_TX_QUEUED, 4111 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n", 4112 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, 4113 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, 4114 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, 4115 le16_to_cpu(pbd_e1x->total_hlen_w)); 4116 if (pbd_e2) 4117 DP(NETIF_MSG_TX_QUEUED, 4118 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", 4119 pbd_e2, 4120 pbd_e2->data.mac_addr.dst_hi, 4121 pbd_e2->data.mac_addr.dst_mid, 4122 pbd_e2->data.mac_addr.dst_lo, 4123 pbd_e2->data.mac_addr.src_hi, 4124 pbd_e2->data.mac_addr.src_mid, 4125 pbd_e2->data.mac_addr.src_lo, 4126 pbd_e2->parsing_data); 4127 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 4128 4129 netdev_tx_sent_queue(txq, skb->len); 4130 4131 skb_tx_timestamp(skb); 4132 4133 txdata->tx_pkt_prod++; 4134 /* 4135 * Make sure that the BD data is updated before updating the producer 4136 * since FW might read the BD right after the producer is updated. 4137 * This is only applicable for weak-ordered memory model archs such 4138 * as IA-64. The following barrier is also mandatory since FW will 4139 * assumes packets must have BDs. 4140 */ 4141 wmb(); 4142 4143 txdata->tx_db.data.prod += nbd; 4144 barrier(); 4145 4146 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); 4147 4148 mmiowb(); 4149 4150 txdata->tx_bd_prod += nbd; 4151 4152 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { 4153 netif_tx_stop_queue(txq); 4154 4155 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 4156 * ordering of set_bit() in netif_tx_stop_queue() and read of 4157 * fp->bd_tx_cons */ 4158 smp_mb(); 4159 4160 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 4161 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) 4162 netif_tx_wake_queue(txq); 4163 } 4164 txdata->tx_pkt++; 4165 4166 return NETDEV_TX_OK; 4167 } 4168 4169 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) 4170 { 4171 int mfw_vn = BP_FW_MB_IDX(bp); 4172 u32 tmp; 4173 4174 /* If the shmem shouldn't affect configuration, reflect */ 4175 if (!IS_MF_BD(bp)) { 4176 int i; 4177 4178 for (i = 0; i < BNX2X_MAX_PRIORITY; i++) 4179 c2s_map[i] = i; 4180 *c2s_default = 0; 4181 4182 return; 4183 } 4184 4185 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); 4186 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4187 c2s_map[0] = tmp & 0xff; 4188 c2s_map[1] = (tmp >> 8) & 0xff; 4189 c2s_map[2] = (tmp >> 16) & 0xff; 4190 c2s_map[3] = (tmp >> 24) & 0xff; 4191 4192 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); 4193 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4194 c2s_map[4] = tmp & 0xff; 4195 c2s_map[5] = (tmp >> 8) & 0xff; 4196 c2s_map[6] = (tmp >> 16) & 0xff; 4197 c2s_map[7] = (tmp >> 24) & 0xff; 4198 4199 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); 4200 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4201 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; 4202 } 4203 4204 /** 4205 * bnx2x_setup_tc - routine to configure net_device for multi tc 4206 * 4207 * @netdev: net device to configure 4208 * @tc: number of traffic classes to enable 4209 * 4210 * callback connected to the ndo_setup_tc function pointer 4211 */ 4212 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) 4213 { 4214 struct bnx2x *bp = netdev_priv(dev); 4215 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; 4216 int cos, prio, count, offset; 4217 4218 /* setup tc must be called under rtnl lock */ 4219 ASSERT_RTNL(); 4220 4221 /* no traffic classes requested. Aborting */ 4222 if (!num_tc) { 4223 netdev_reset_tc(dev); 4224 return 0; 4225 } 4226 4227 /* requested to support too many traffic classes */ 4228 if (num_tc > bp->max_cos) { 4229 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", 4230 num_tc, bp->max_cos); 4231 return -EINVAL; 4232 } 4233 4234 /* declare amount of supported traffic classes */ 4235 if (netdev_set_num_tc(dev, num_tc)) { 4236 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc); 4237 return -EINVAL; 4238 } 4239 4240 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); 4241 4242 /* configure priority to traffic class mapping */ 4243 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { 4244 int outer_prio = c2s_map[prio]; 4245 4246 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); 4247 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4248 "mapping priority %d to tc %d\n", 4249 outer_prio, bp->prio_to_cos[outer_prio]); 4250 } 4251 4252 /* Use this configuration to differentiate tc0 from other COSes 4253 This can be used for ets or pfc, and save the effort of setting 4254 up a multio class queue disc or negotiating DCBX with a switch 4255 netdev_set_prio_tc_map(dev, 0, 0); 4256 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); 4257 for (prio = 1; prio < 16; prio++) { 4258 netdev_set_prio_tc_map(dev, prio, 1); 4259 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); 4260 } */ 4261 4262 /* configure traffic class to transmission queue mapping */ 4263 for (cos = 0; cos < bp->max_cos; cos++) { 4264 count = BNX2X_NUM_ETH_QUEUES(bp); 4265 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); 4266 netdev_set_tc_queue(dev, cos, count, offset); 4267 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4268 "mapping tc %d to offset %d count %d\n", 4269 cos, offset, count); 4270 } 4271 4272 return 0; 4273 } 4274 4275 /* called with rtnl_lock */ 4276 int bnx2x_change_mac_addr(struct net_device *dev, void *p) 4277 { 4278 struct sockaddr *addr = p; 4279 struct bnx2x *bp = netdev_priv(dev); 4280 int rc = 0; 4281 4282 if (!is_valid_ether_addr(addr->sa_data)) { 4283 BNX2X_ERR("Requested MAC address is not valid\n"); 4284 return -EINVAL; 4285 } 4286 4287 if (IS_MF_STORAGE_ONLY(bp)) { 4288 BNX2X_ERR("Can't change address on STORAGE ONLY function\n"); 4289 return -EINVAL; 4290 } 4291 4292 if (netif_running(dev)) { 4293 rc = bnx2x_set_eth_mac(bp, false); 4294 if (rc) 4295 return rc; 4296 } 4297 4298 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 4299 4300 if (netif_running(dev)) 4301 rc = bnx2x_set_eth_mac(bp, true); 4302 4303 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) 4304 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); 4305 4306 return rc; 4307 } 4308 4309 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) 4310 { 4311 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); 4312 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; 4313 u8 cos; 4314 4315 /* Common */ 4316 4317 if (IS_FCOE_IDX(fp_index)) { 4318 memset(sb, 0, sizeof(union host_hc_status_block)); 4319 fp->status_blk_mapping = 0; 4320 } else { 4321 /* status blocks */ 4322 if (!CHIP_IS_E1x(bp)) 4323 BNX2X_PCI_FREE(sb->e2_sb, 4324 bnx2x_fp(bp, fp_index, 4325 status_blk_mapping), 4326 sizeof(struct host_hc_status_block_e2)); 4327 else 4328 BNX2X_PCI_FREE(sb->e1x_sb, 4329 bnx2x_fp(bp, fp_index, 4330 status_blk_mapping), 4331 sizeof(struct host_hc_status_block_e1x)); 4332 } 4333 4334 /* Rx */ 4335 if (!skip_rx_queue(bp, fp_index)) { 4336 bnx2x_free_rx_bds(fp); 4337 4338 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4339 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); 4340 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), 4341 bnx2x_fp(bp, fp_index, rx_desc_mapping), 4342 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4343 4344 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), 4345 bnx2x_fp(bp, fp_index, rx_comp_mapping), 4346 sizeof(struct eth_fast_path_rx_cqe) * 4347 NUM_RCQ_BD); 4348 4349 /* SGE ring */ 4350 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); 4351 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), 4352 bnx2x_fp(bp, fp_index, rx_sge_mapping), 4353 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4354 } 4355 4356 /* Tx */ 4357 if (!skip_tx_queue(bp, fp_index)) { 4358 /* fastpath tx rings: tx_buf tx_desc */ 4359 for_each_cos_in_tx_queue(fp, cos) { 4360 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 4361 4362 DP(NETIF_MSG_IFDOWN, 4363 "freeing tx memory of fp %d cos %d cid %d\n", 4364 fp_index, cos, txdata->cid); 4365 4366 BNX2X_FREE(txdata->tx_buf_ring); 4367 BNX2X_PCI_FREE(txdata->tx_desc_ring, 4368 txdata->tx_desc_mapping, 4369 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4370 } 4371 } 4372 /* end of fastpath */ 4373 } 4374 4375 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) 4376 { 4377 int i; 4378 for_each_cnic_queue(bp, i) 4379 bnx2x_free_fp_mem_at(bp, i); 4380 } 4381 4382 void bnx2x_free_fp_mem(struct bnx2x *bp) 4383 { 4384 int i; 4385 for_each_eth_queue(bp, i) 4386 bnx2x_free_fp_mem_at(bp, i); 4387 } 4388 4389 static void set_sb_shortcuts(struct bnx2x *bp, int index) 4390 { 4391 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 4392 if (!CHIP_IS_E1x(bp)) { 4393 bnx2x_fp(bp, index, sb_index_values) = 4394 (__le16 *)status_blk.e2_sb->sb.index_values; 4395 bnx2x_fp(bp, index, sb_running_index) = 4396 (__le16 *)status_blk.e2_sb->sb.running_index; 4397 } else { 4398 bnx2x_fp(bp, index, sb_index_values) = 4399 (__le16 *)status_blk.e1x_sb->sb.index_values; 4400 bnx2x_fp(bp, index, sb_running_index) = 4401 (__le16 *)status_blk.e1x_sb->sb.running_index; 4402 } 4403 } 4404 4405 /* Returns the number of actually allocated BDs */ 4406 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 4407 int rx_ring_size) 4408 { 4409 struct bnx2x *bp = fp->bp; 4410 u16 ring_prod, cqe_ring_prod; 4411 int i, failure_cnt = 0; 4412 4413 fp->rx_comp_cons = 0; 4414 cqe_ring_prod = ring_prod = 0; 4415 4416 /* This routine is called only during fo init so 4417 * fp->eth_q_stats.rx_skb_alloc_failed = 0 4418 */ 4419 for (i = 0; i < rx_ring_size; i++) { 4420 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { 4421 failure_cnt++; 4422 continue; 4423 } 4424 ring_prod = NEXT_RX_IDX(ring_prod); 4425 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 4426 WARN_ON(ring_prod <= (i - failure_cnt)); 4427 } 4428 4429 if (failure_cnt) 4430 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", 4431 i - failure_cnt, fp->index); 4432 4433 fp->rx_bd_prod = ring_prod; 4434 /* Limit the CQE producer by the CQE ring size */ 4435 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 4436 cqe_ring_prod); 4437 4438 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 4439 4440 return i - failure_cnt; 4441 } 4442 4443 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 4444 { 4445 int i; 4446 4447 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 4448 struct eth_rx_cqe_next_page *nextpg; 4449 4450 nextpg = (struct eth_rx_cqe_next_page *) 4451 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 4452 nextpg->addr_hi = 4453 cpu_to_le32(U64_HI(fp->rx_comp_mapping + 4454 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 4455 nextpg->addr_lo = 4456 cpu_to_le32(U64_LO(fp->rx_comp_mapping + 4457 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 4458 } 4459 } 4460 4461 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 4462 { 4463 union host_hc_status_block *sb; 4464 struct bnx2x_fastpath *fp = &bp->fp[index]; 4465 int ring_size = 0; 4466 u8 cos; 4467 int rx_ring_size = 0; 4468 4469 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { 4470 rx_ring_size = MIN_RX_SIZE_NONTPA; 4471 bp->rx_ring_size = rx_ring_size; 4472 } else if (!bp->rx_ring_size) { 4473 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 4474 4475 if (CHIP_IS_E3(bp)) { 4476 u32 cfg = SHMEM_RD(bp, 4477 dev_info.port_hw_config[BP_PORT(bp)]. 4478 default_cfg); 4479 4480 /* Decrease ring size for 1G functions */ 4481 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == 4482 PORT_HW_CFG_NET_SERDES_IF_SGMII) 4483 rx_ring_size /= 10; 4484 } 4485 4486 /* allocate at least number of buffers required by FW */ 4487 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 4488 MIN_RX_SIZE_TPA, rx_ring_size); 4489 4490 bp->rx_ring_size = rx_ring_size; 4491 } else /* if rx_ring_size specified - use it */ 4492 rx_ring_size = bp->rx_ring_size; 4493 4494 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size); 4495 4496 /* Common */ 4497 sb = &bnx2x_fp(bp, index, status_blk); 4498 4499 if (!IS_FCOE_IDX(index)) { 4500 /* status blocks */ 4501 if (!CHIP_IS_E1x(bp)) { 4502 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), 4503 sizeof(struct host_hc_status_block_e2)); 4504 if (!sb->e2_sb) 4505 goto alloc_mem_err; 4506 } else { 4507 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), 4508 sizeof(struct host_hc_status_block_e1x)); 4509 if (!sb->e1x_sb) 4510 goto alloc_mem_err; 4511 } 4512 } 4513 4514 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 4515 * set shortcuts for it. 4516 */ 4517 if (!IS_FCOE_IDX(index)) 4518 set_sb_shortcuts(bp, index); 4519 4520 /* Tx */ 4521 if (!skip_tx_queue(bp, index)) { 4522 /* fastpath tx rings: tx_buf tx_desc */ 4523 for_each_cos_in_tx_queue(fp, cos) { 4524 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 4525 4526 DP(NETIF_MSG_IFUP, 4527 "allocating tx memory of fp %d cos %d\n", 4528 index, cos); 4529 4530 txdata->tx_buf_ring = kcalloc(NUM_TX_BD, 4531 sizeof(struct sw_tx_bd), 4532 GFP_KERNEL); 4533 if (!txdata->tx_buf_ring) 4534 goto alloc_mem_err; 4535 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, 4536 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4537 if (!txdata->tx_desc_ring) 4538 goto alloc_mem_err; 4539 } 4540 } 4541 4542 /* Rx */ 4543 if (!skip_rx_queue(bp, index)) { 4544 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4545 bnx2x_fp(bp, index, rx_buf_ring) = 4546 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); 4547 if (!bnx2x_fp(bp, index, rx_buf_ring)) 4548 goto alloc_mem_err; 4549 bnx2x_fp(bp, index, rx_desc_ring) = 4550 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), 4551 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4552 if (!bnx2x_fp(bp, index, rx_desc_ring)) 4553 goto alloc_mem_err; 4554 4555 /* Seed all CQEs by 1s */ 4556 bnx2x_fp(bp, index, rx_comp_ring) = 4557 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), 4558 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); 4559 if (!bnx2x_fp(bp, index, rx_comp_ring)) 4560 goto alloc_mem_err; 4561 4562 /* SGE ring */ 4563 bnx2x_fp(bp, index, rx_page_ring) = 4564 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), 4565 GFP_KERNEL); 4566 if (!bnx2x_fp(bp, index, rx_page_ring)) 4567 goto alloc_mem_err; 4568 bnx2x_fp(bp, index, rx_sge_ring) = 4569 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), 4570 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4571 if (!bnx2x_fp(bp, index, rx_sge_ring)) 4572 goto alloc_mem_err; 4573 /* RX BD ring */ 4574 bnx2x_set_next_page_rx_bd(fp); 4575 4576 /* CQ ring */ 4577 bnx2x_set_next_page_rx_cq(fp); 4578 4579 /* BDs */ 4580 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); 4581 if (ring_size < rx_ring_size) 4582 goto alloc_mem_err; 4583 } 4584 4585 return 0; 4586 4587 /* handles low memory cases */ 4588 alloc_mem_err: 4589 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", 4590 index, ring_size); 4591 /* FW will drop all packets if queue is not big enough, 4592 * In these cases we disable the queue 4593 * Min size is different for OOO, TPA and non-TPA queues 4594 */ 4595 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? 4596 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { 4597 /* release memory allocated for this queue */ 4598 bnx2x_free_fp_mem_at(bp, index); 4599 return -ENOMEM; 4600 } 4601 return 0; 4602 } 4603 4604 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) 4605 { 4606 if (!NO_FCOE(bp)) 4607 /* FCoE */ 4608 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) 4609 /* we will fail load process instead of mark 4610 * NO_FCOE_FLAG 4611 */ 4612 return -ENOMEM; 4613 4614 return 0; 4615 } 4616 4617 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) 4618 { 4619 int i; 4620 4621 /* 1. Allocate FP for leading - fatal if error 4622 * 2. Allocate RSS - fix number of queues if error 4623 */ 4624 4625 /* leading */ 4626 if (bnx2x_alloc_fp_mem_at(bp, 0)) 4627 return -ENOMEM; 4628 4629 /* RSS */ 4630 for_each_nondefault_eth_queue(bp, i) 4631 if (bnx2x_alloc_fp_mem_at(bp, i)) 4632 break; 4633 4634 /* handle memory failures */ 4635 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { 4636 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 4637 4638 WARN_ON(delta < 0); 4639 bnx2x_shrink_eth_fp(bp, delta); 4640 if (CNIC_SUPPORT(bp)) 4641 /* move non eth FPs next to last eth FP 4642 * must be done in that order 4643 * FCOE_IDX < FWD_IDX < OOO_IDX 4644 */ 4645 4646 /* move FCoE fp even NO_FCOE_FLAG is on */ 4647 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 4648 bp->num_ethernet_queues -= delta; 4649 bp->num_queues = bp->num_ethernet_queues + 4650 bp->num_cnic_queues; 4651 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 4652 bp->num_queues + delta, bp->num_queues); 4653 } 4654 4655 return 0; 4656 } 4657 4658 void bnx2x_free_mem_bp(struct bnx2x *bp) 4659 { 4660 int i; 4661 4662 for (i = 0; i < bp->fp_array_size; i++) 4663 kfree(bp->fp[i].tpa_info); 4664 kfree(bp->fp); 4665 kfree(bp->sp_objs); 4666 kfree(bp->fp_stats); 4667 kfree(bp->bnx2x_txq); 4668 kfree(bp->msix_table); 4669 kfree(bp->ilt); 4670 } 4671 4672 int bnx2x_alloc_mem_bp(struct bnx2x *bp) 4673 { 4674 struct bnx2x_fastpath *fp; 4675 struct msix_entry *tbl; 4676 struct bnx2x_ilt *ilt; 4677 int msix_table_size = 0; 4678 int fp_array_size, txq_array_size; 4679 int i; 4680 4681 /* 4682 * The biggest MSI-X table we might need is as a maximum number of fast 4683 * path IGU SBs plus default SB (for PF only). 4684 */ 4685 msix_table_size = bp->igu_sb_cnt; 4686 if (IS_PF(bp)) 4687 msix_table_size++; 4688 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size); 4689 4690 /* fp array: RSS plus CNIC related L2 queues */ 4691 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); 4692 bp->fp_array_size = fp_array_size; 4693 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); 4694 4695 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); 4696 if (!fp) 4697 goto alloc_err; 4698 for (i = 0; i < bp->fp_array_size; i++) { 4699 fp[i].tpa_info = 4700 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, 4701 sizeof(struct bnx2x_agg_info), GFP_KERNEL); 4702 if (!(fp[i].tpa_info)) 4703 goto alloc_err; 4704 } 4705 4706 bp->fp = fp; 4707 4708 /* allocate sp objs */ 4709 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), 4710 GFP_KERNEL); 4711 if (!bp->sp_objs) 4712 goto alloc_err; 4713 4714 /* allocate fp_stats */ 4715 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), 4716 GFP_KERNEL); 4717 if (!bp->fp_stats) 4718 goto alloc_err; 4719 4720 /* Allocate memory for the transmission queues array */ 4721 txq_array_size = 4722 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); 4723 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size); 4724 4725 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), 4726 GFP_KERNEL); 4727 if (!bp->bnx2x_txq) 4728 goto alloc_err; 4729 4730 /* msix table */ 4731 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); 4732 if (!tbl) 4733 goto alloc_err; 4734 bp->msix_table = tbl; 4735 4736 /* ilt */ 4737 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); 4738 if (!ilt) 4739 goto alloc_err; 4740 bp->ilt = ilt; 4741 4742 return 0; 4743 alloc_err: 4744 bnx2x_free_mem_bp(bp); 4745 return -ENOMEM; 4746 } 4747 4748 int bnx2x_reload_if_running(struct net_device *dev) 4749 { 4750 struct bnx2x *bp = netdev_priv(dev); 4751 4752 if (unlikely(!netif_running(dev))) 4753 return 0; 4754 4755 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 4756 return bnx2x_nic_load(bp, LOAD_NORMAL); 4757 } 4758 4759 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) 4760 { 4761 u32 sel_phy_idx = 0; 4762 if (bp->link_params.num_phys <= 1) 4763 return INT_PHY; 4764 4765 if (bp->link_vars.link_up) { 4766 sel_phy_idx = EXT_PHY1; 4767 /* In case link is SERDES, check if the EXT_PHY2 is the one */ 4768 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 4769 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) 4770 sel_phy_idx = EXT_PHY2; 4771 } else { 4772 4773 switch (bnx2x_phy_selection(&bp->link_params)) { 4774 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 4775 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 4776 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 4777 sel_phy_idx = EXT_PHY1; 4778 break; 4779 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 4780 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 4781 sel_phy_idx = EXT_PHY2; 4782 break; 4783 } 4784 } 4785 4786 return sel_phy_idx; 4787 } 4788 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) 4789 { 4790 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); 4791 /* 4792 * The selected activated PHY is always after swapping (in case PHY 4793 * swapping is enabled). So when swapping is enabled, we need to reverse 4794 * the configuration 4795 */ 4796 4797 if (bp->link_params.multi_phy_config & 4798 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 4799 if (sel_phy_idx == EXT_PHY1) 4800 sel_phy_idx = EXT_PHY2; 4801 else if (sel_phy_idx == EXT_PHY2) 4802 sel_phy_idx = EXT_PHY1; 4803 } 4804 return LINK_CONFIG_IDX(sel_phy_idx); 4805 } 4806 4807 #ifdef NETDEV_FCOE_WWNN 4808 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4809 { 4810 struct bnx2x *bp = netdev_priv(dev); 4811 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 4812 4813 switch (type) { 4814 case NETDEV_FCOE_WWNN: 4815 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, 4816 cp->fcoe_wwn_node_name_lo); 4817 break; 4818 case NETDEV_FCOE_WWPN: 4819 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, 4820 cp->fcoe_wwn_port_name_lo); 4821 break; 4822 default: 4823 BNX2X_ERR("Wrong WWN type requested - %d\n", type); 4824 return -EINVAL; 4825 } 4826 4827 return 0; 4828 } 4829 #endif 4830 4831 /* called with rtnl_lock */ 4832 int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 4833 { 4834 struct bnx2x *bp = netdev_priv(dev); 4835 4836 if (pci_num_vf(bp->pdev)) { 4837 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); 4838 return -EPERM; 4839 } 4840 4841 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4842 BNX2X_ERR("Can't perform change MTU during parity recovery\n"); 4843 return -EAGAIN; 4844 } 4845 4846 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 4847 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { 4848 BNX2X_ERR("Can't support requested MTU size\n"); 4849 return -EINVAL; 4850 } 4851 4852 /* This does not race with packet allocation 4853 * because the actual alloc size is 4854 * only updated as part of load 4855 */ 4856 dev->mtu = new_mtu; 4857 4858 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) 4859 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); 4860 4861 return bnx2x_reload_if_running(dev); 4862 } 4863 4864 netdev_features_t bnx2x_fix_features(struct net_device *dev, 4865 netdev_features_t features) 4866 { 4867 struct bnx2x *bp = netdev_priv(dev); 4868 4869 if (pci_num_vf(bp->pdev)) { 4870 netdev_features_t changed = dev->features ^ features; 4871 4872 /* Revert the requested changes in features if they 4873 * would require internal reload of PF in bnx2x_set_features(). 4874 */ 4875 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { 4876 features &= ~NETIF_F_RXCSUM; 4877 features |= dev->features & NETIF_F_RXCSUM; 4878 } 4879 4880 if (changed & NETIF_F_LOOPBACK) { 4881 features &= ~NETIF_F_LOOPBACK; 4882 features |= dev->features & NETIF_F_LOOPBACK; 4883 } 4884 } 4885 4886 /* TPA requires Rx CSUM offloading */ 4887 if (!(features & NETIF_F_RXCSUM)) { 4888 features &= ~NETIF_F_LRO; 4889 features &= ~NETIF_F_GRO; 4890 } 4891 4892 return features; 4893 } 4894 4895 int bnx2x_set_features(struct net_device *dev, netdev_features_t features) 4896 { 4897 struct bnx2x *bp = netdev_priv(dev); 4898 netdev_features_t changes = features ^ dev->features; 4899 bool bnx2x_reload = false; 4900 int rc; 4901 4902 /* VFs or non SRIOV PFs should be able to change loopback feature */ 4903 if (!pci_num_vf(bp->pdev)) { 4904 if (features & NETIF_F_LOOPBACK) { 4905 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { 4906 bp->link_params.loopback_mode = LOOPBACK_BMAC; 4907 bnx2x_reload = true; 4908 } 4909 } else { 4910 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { 4911 bp->link_params.loopback_mode = LOOPBACK_NONE; 4912 bnx2x_reload = true; 4913 } 4914 } 4915 } 4916 4917 /* if GRO is changed while LRO is enabled, don't force a reload */ 4918 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) 4919 changes &= ~NETIF_F_GRO; 4920 4921 /* if GRO is changed while HW TPA is off, don't force a reload */ 4922 if ((changes & NETIF_F_GRO) && bp->disable_tpa) 4923 changes &= ~NETIF_F_GRO; 4924 4925 if (changes) 4926 bnx2x_reload = true; 4927 4928 if (bnx2x_reload) { 4929 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { 4930 dev->features = features; 4931 rc = bnx2x_reload_if_running(dev); 4932 return rc ? rc : 1; 4933 } 4934 /* else: bnx2x_nic_load() will be called at end of recovery */ 4935 } 4936 4937 return 0; 4938 } 4939 4940 void bnx2x_tx_timeout(struct net_device *dev) 4941 { 4942 struct bnx2x *bp = netdev_priv(dev); 4943 4944 #ifdef BNX2X_STOP_ON_ERROR 4945 if (!bp->panic) 4946 bnx2x_panic(); 4947 #endif 4948 4949 /* This allows the netif to be shutdown gracefully before resetting */ 4950 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); 4951 } 4952 4953 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 4954 { 4955 struct net_device *dev = pci_get_drvdata(pdev); 4956 struct bnx2x *bp; 4957 4958 if (!dev) { 4959 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 4960 return -ENODEV; 4961 } 4962 bp = netdev_priv(dev); 4963 4964 rtnl_lock(); 4965 4966 pci_save_state(pdev); 4967 4968 if (!netif_running(dev)) { 4969 rtnl_unlock(); 4970 return 0; 4971 } 4972 4973 netif_device_detach(dev); 4974 4975 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 4976 4977 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 4978 4979 rtnl_unlock(); 4980 4981 return 0; 4982 } 4983 4984 int bnx2x_resume(struct pci_dev *pdev) 4985 { 4986 struct net_device *dev = pci_get_drvdata(pdev); 4987 struct bnx2x *bp; 4988 int rc; 4989 4990 if (!dev) { 4991 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 4992 return -ENODEV; 4993 } 4994 bp = netdev_priv(dev); 4995 4996 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4997 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 4998 return -EAGAIN; 4999 } 5000 5001 rtnl_lock(); 5002 5003 pci_restore_state(pdev); 5004 5005 if (!netif_running(dev)) { 5006 rtnl_unlock(); 5007 return 0; 5008 } 5009 5010 bnx2x_set_power_state(bp, PCI_D0); 5011 netif_device_attach(dev); 5012 5013 rc = bnx2x_nic_load(bp, LOAD_OPEN); 5014 5015 rtnl_unlock(); 5016 5017 return rc; 5018 } 5019 5020 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 5021 u32 cid) 5022 { 5023 if (!cxt) { 5024 BNX2X_ERR("bad context pointer %p\n", cxt); 5025 return; 5026 } 5027 5028 /* ustorm cxt validation */ 5029 cxt->ustorm_ag_context.cdu_usage = 5030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 5031 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 5032 /* xcontext validation */ 5033 cxt->xstorm_ag_context.cdu_reserved = 5034 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 5035 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 5036 } 5037 5038 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 5039 u8 fw_sb_id, u8 sb_index, 5040 u8 ticks) 5041 { 5042 u32 addr = BAR_CSTRORM_INTMEM + 5043 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); 5044 REG_WR8(bp, addr, ticks); 5045 DP(NETIF_MSG_IFUP, 5046 "port %x fw_sb_id %d sb_index %d ticks %d\n", 5047 port, fw_sb_id, sb_index, ticks); 5048 } 5049 5050 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 5051 u16 fw_sb_id, u8 sb_index, 5052 u8 disable) 5053 { 5054 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 5055 u32 addr = BAR_CSTRORM_INTMEM + 5056 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); 5057 u8 flags = REG_RD8(bp, addr); 5058 /* clear and set */ 5059 flags &= ~HC_INDEX_DATA_HC_ENABLED; 5060 flags |= enable_flag; 5061 REG_WR8(bp, addr, flags); 5062 DP(NETIF_MSG_IFUP, 5063 "port %x fw_sb_id %d sb_index %d disable %d\n", 5064 port, fw_sb_id, sb_index, disable); 5065 } 5066 5067 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 5068 u8 sb_index, u8 disable, u16 usec) 5069 { 5070 int port = BP_PORT(bp); 5071 u8 ticks = usec / BNX2X_BTR; 5072 5073 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); 5074 5075 disable = disable ? 1 : (usec ? 0 : 1); 5076 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); 5077 } 5078 5079 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, 5080 u32 verbose) 5081 { 5082 smp_mb__before_atomic(); 5083 set_bit(flag, &bp->sp_rtnl_state); 5084 smp_mb__after_atomic(); 5085 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", 5086 flag); 5087 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5088 } 5089 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); 5090