14ad79e13SYuval Mintz /* bnx2x_cmn.c: QLogic Everest network driver.
2adfc5217SJeff Kirsher *
3247fa82bSYuval Mintz * Copyright (c) 2007-2013 Broadcom Corporation
44ad79e13SYuval Mintz * Copyright (c) 2014 QLogic Corporation
54ad79e13SYuval Mintz * All rights reserved
6adfc5217SJeff Kirsher *
7adfc5217SJeff Kirsher * This program is free software; you can redistribute it and/or modify
8adfc5217SJeff Kirsher * it under the terms of the GNU General Public License as published by
9adfc5217SJeff Kirsher * the Free Software Foundation.
10adfc5217SJeff Kirsher *
1108f6dd89SAriel Elior * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12adfc5217SJeff Kirsher * Written by: Eliezer Tamir
13adfc5217SJeff Kirsher * Based on code from Michael Chan's bnx2 driver
14adfc5217SJeff Kirsher * UDP CSUM errata workaround by Arik Gendelman
15adfc5217SJeff Kirsher * Slowpath and fastpath rework by Vladislav Zolotarov
16adfc5217SJeff Kirsher * Statistics and Link management by Yitchak Gertner
17adfc5217SJeff Kirsher *
18adfc5217SJeff Kirsher */
19adfc5217SJeff Kirsher
20f1deab50SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21f1deab50SJoe Perches
22adfc5217SJeff Kirsher #include <linux/etherdevice.h>
23adfc5217SJeff Kirsher #include <linux/if_vlan.h>
24adfc5217SJeff Kirsher #include <linux/interrupt.h>
25adfc5217SJeff Kirsher #include <linux/ip.h>
26c9931896SAmir Vadai #include <linux/crash_dump.h>
279969085eSYuval Mintz #include <net/tcp.h>
284721031cSEric Dumazet #include <net/gro.h>
29adfc5217SJeff Kirsher #include <net/ipv6.h>
30adfc5217SJeff Kirsher #include <net/ip6_checksum.h>
31adfc5217SJeff Kirsher #include <linux/prefetch.h>
32adfc5217SJeff Kirsher #include "bnx2x_cmn.h"
33adfc5217SJeff Kirsher #include "bnx2x_init.h"
34adfc5217SJeff Kirsher #include "bnx2x_sp.h"
35adfc5217SJeff Kirsher
36a8f47eb7Sstephen hemminger static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37a8f47eb7Sstephen hemminger static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38a8f47eb7Sstephen hemminger static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39a8f47eb7Sstephen hemminger static int bnx2x_poll(struct napi_struct *napi, int budget);
40a8f47eb7Sstephen hemminger
bnx2x_add_all_napi_cnic(struct bnx2x * bp)41a8f47eb7Sstephen hemminger static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42a8f47eb7Sstephen hemminger {
43a8f47eb7Sstephen hemminger int i;
44a8f47eb7Sstephen hemminger
45a8f47eb7Sstephen hemminger /* Add NAPI objects */
46a8f47eb7Sstephen hemminger for_each_rx_queue_cnic(bp, i) {
47b48b89f9SJakub Kicinski netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
48a8f47eb7Sstephen hemminger }
49a8f47eb7Sstephen hemminger }
50a8f47eb7Sstephen hemminger
bnx2x_add_all_napi(struct bnx2x * bp)51a8f47eb7Sstephen hemminger static void bnx2x_add_all_napi(struct bnx2x *bp)
52a8f47eb7Sstephen hemminger {
53a8f47eb7Sstephen hemminger int i;
54a8f47eb7Sstephen hemminger
55a8f47eb7Sstephen hemminger /* Add NAPI objects */
56a8f47eb7Sstephen hemminger for_each_eth_queue(bp, i) {
57b48b89f9SJakub Kicinski netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
58a8f47eb7Sstephen hemminger }
59a8f47eb7Sstephen hemminger }
60a8f47eb7Sstephen hemminger
bnx2x_calc_num_queues(struct bnx2x * bp)61a8f47eb7Sstephen hemminger static int bnx2x_calc_num_queues(struct bnx2x *bp)
62a8f47eb7Sstephen hemminger {
637d0445d6SMichal Schmidt int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
64ff2ad307SMichal Schmidt
65ff2ad307SMichal Schmidt /* Reduce memory usage in kdump environment by using only one queue */
66c9931896SAmir Vadai if (is_kdump_kernel())
67ff2ad307SMichal Schmidt nq = 1;
68ff2ad307SMichal Schmidt
697d0445d6SMichal Schmidt nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
707d0445d6SMichal Schmidt return nq;
71a8f47eb7Sstephen hemminger }
72a8f47eb7Sstephen hemminger
73adfc5217SJeff Kirsher /**
74adfc5217SJeff Kirsher * bnx2x_move_fp - move content of the fastpath structure.
75adfc5217SJeff Kirsher *
76adfc5217SJeff Kirsher * @bp: driver handle
77adfc5217SJeff Kirsher * @from: source FP index
78adfc5217SJeff Kirsher * @to: destination FP index
79adfc5217SJeff Kirsher *
80adfc5217SJeff Kirsher * Makes sure the contents of the bp->fp[to].napi is kept
8172754080SAriel Elior * intact. This is done by first copying the napi struct from
8272754080SAriel Elior * the target to the source, and then mem copying the entire
8365565884SMerav Sicron * source onto the target. Update txdata pointers and related
8465565884SMerav Sicron * content.
85adfc5217SJeff Kirsher */
bnx2x_move_fp(struct bnx2x * bp,int from,int to)86adfc5217SJeff Kirsher static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
87adfc5217SJeff Kirsher {
88adfc5217SJeff Kirsher struct bnx2x_fastpath *from_fp = &bp->fp[from];
89adfc5217SJeff Kirsher struct bnx2x_fastpath *to_fp = &bp->fp[to];
9015192a8cSBarak Witkowski struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
9115192a8cSBarak Witkowski struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
9215192a8cSBarak Witkowski struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
9315192a8cSBarak Witkowski struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
9465565884SMerav Sicron int old_max_eth_txqs, new_max_eth_txqs;
9565565884SMerav Sicron int old_txdata_index = 0, new_txdata_index = 0;
9634d5626aSYuval Mintz struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
9772754080SAriel Elior
9872754080SAriel Elior /* Copy the NAPI object as it has been already initialized */
9972754080SAriel Elior from_fp->napi = to_fp->napi;
10072754080SAriel Elior
101adfc5217SJeff Kirsher /* Move bnx2x_fastpath contents */
102adfc5217SJeff Kirsher memcpy(to_fp, from_fp, sizeof(*to_fp));
103adfc5217SJeff Kirsher to_fp->index = to;
10465565884SMerav Sicron
10534d5626aSYuval Mintz /* Retain the tpa_info of the original `to' version as we don't want
10634d5626aSYuval Mintz * 2 FPs to contain the same tpa_info pointer.
10734d5626aSYuval Mintz */
10834d5626aSYuval Mintz to_fp->tpa_info = old_tpa_info;
10934d5626aSYuval Mintz
11015192a8cSBarak Witkowski /* move sp_objs contents as well, as their indices match fp ones */
11115192a8cSBarak Witkowski memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
11215192a8cSBarak Witkowski
11315192a8cSBarak Witkowski /* move fp_stats contents as well, as their indices match fp ones */
11415192a8cSBarak Witkowski memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
11515192a8cSBarak Witkowski
11665565884SMerav Sicron /* Update txdata pointers in fp and move txdata content accordingly:
11765565884SMerav Sicron * Each fp consumes 'max_cos' txdata structures, so the index should be
11865565884SMerav Sicron * decremented by max_cos x delta.
11965565884SMerav Sicron */
12065565884SMerav Sicron
12165565884SMerav Sicron old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
12265565884SMerav Sicron new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
12365565884SMerav Sicron (bp)->max_cos;
12465565884SMerav Sicron if (from == FCOE_IDX(bp)) {
12565565884SMerav Sicron old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
12665565884SMerav Sicron new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
12765565884SMerav Sicron }
12865565884SMerav Sicron
1294864a16aSYuval Mintz memcpy(&bp->bnx2x_txq[new_txdata_index],
1304864a16aSYuval Mintz &bp->bnx2x_txq[old_txdata_index],
13165565884SMerav Sicron sizeof(struct bnx2x_fp_txdata));
13265565884SMerav Sicron to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
133adfc5217SJeff Kirsher }
134adfc5217SJeff Kirsher
1358ca5e17eSAriel Elior /**
1368ca5e17eSAriel Elior * bnx2x_fill_fw_str - Fill buffer with FW version string.
1378ca5e17eSAriel Elior *
1388ca5e17eSAriel Elior * @bp: driver handle
1398ca5e17eSAriel Elior * @buf: character buffer to fill with the fw name
1408ca5e17eSAriel Elior * @buf_len: length of the above buffer
1418ca5e17eSAriel Elior *
1428ca5e17eSAriel Elior */
bnx2x_fill_fw_str(struct bnx2x * bp,char * buf,size_t buf_len)1438ca5e17eSAriel Elior void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
1448ca5e17eSAriel Elior {
1458ca5e17eSAriel Elior if (IS_PF(bp)) {
1468ca5e17eSAriel Elior u8 phy_fw_ver[PHY_FW_VER_LEN];
1478ca5e17eSAriel Elior
1488ca5e17eSAriel Elior phy_fw_ver[0] = '\0';
1498ca5e17eSAriel Elior bnx2x_get_ext_phy_fw_version(&bp->link_params,
150*3dac6ab4SKees Cook phy_fw_ver, sizeof(phy_fw_ver));
151*3dac6ab4SKees Cook /* This may become truncated. */
152*3dac6ab4SKees Cook scnprintf(buf, buf_len,
153*3dac6ab4SKees Cook "%sbc %d.%d.%d%s%s",
154*3dac6ab4SKees Cook bp->fw_ver,
1558ca5e17eSAriel Elior (bp->common.bc_ver & 0xff0000) >> 16,
1568ca5e17eSAriel Elior (bp->common.bc_ver & 0xff00) >> 8,
1578ca5e17eSAriel Elior (bp->common.bc_ver & 0xff),
1588ca5e17eSAriel Elior ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
1598ca5e17eSAriel Elior } else {
1606411280aSAriel Elior bnx2x_vf_fill_fw_str(bp, buf, buf_len);
1618ca5e17eSAriel Elior }
1628ca5e17eSAriel Elior }
1638ca5e17eSAriel Elior
1644b87f922SDavid S. Miller /**
1654864a16aSYuval Mintz * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
1664864a16aSYuval Mintz *
1674864a16aSYuval Mintz * @bp: driver handle
1684864a16aSYuval Mintz * @delta: number of eth queues which were not allocated
1694864a16aSYuval Mintz */
bnx2x_shrink_eth_fp(struct bnx2x * bp,int delta)1704864a16aSYuval Mintz static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
1714864a16aSYuval Mintz {
1724864a16aSYuval Mintz int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
1734864a16aSYuval Mintz
1744864a16aSYuval Mintz /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
17516a5fd92SYuval Mintz * backward along the array could cause memory to be overridden
1764864a16aSYuval Mintz */
1774864a16aSYuval Mintz for (cos = 1; cos < bp->max_cos; cos++) {
1784864a16aSYuval Mintz for (i = 0; i < old_eth_num - delta; i++) {
1794864a16aSYuval Mintz struct bnx2x_fastpath *fp = &bp->fp[i];
1804864a16aSYuval Mintz int new_idx = cos * (old_eth_num - delta) + i;
1814864a16aSYuval Mintz
1824864a16aSYuval Mintz memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
1834864a16aSYuval Mintz sizeof(struct bnx2x_fp_txdata));
1844864a16aSYuval Mintz fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
1854864a16aSYuval Mintz }
1864864a16aSYuval Mintz }
1874864a16aSYuval Mintz }
1884864a16aSYuval Mintz
189a8f47eb7Sstephen hemminger int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190adfc5217SJeff Kirsher
191adfc5217SJeff Kirsher /* free skb in the packet ring at pos idx
192adfc5217SJeff Kirsher * return idx of last bd freed
193adfc5217SJeff Kirsher */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,u16 idx,unsigned int * pkts_compl,unsigned int * bytes_compl)194adfc5217SJeff Kirsher static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
1952df1a70aSTom Herbert u16 idx, unsigned int *pkts_compl,
1962df1a70aSTom Herbert unsigned int *bytes_compl)
197adfc5217SJeff Kirsher {
198adfc5217SJeff Kirsher struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199adfc5217SJeff Kirsher struct eth_tx_start_bd *tx_start_bd;
200adfc5217SJeff Kirsher struct eth_tx_bd *tx_data_bd;
201adfc5217SJeff Kirsher struct sk_buff *skb = tx_buf->skb;
202adfc5217SJeff Kirsher u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203adfc5217SJeff Kirsher int nbd;
20495e92fd4SMichal Schmidt u16 split_bd_len = 0;
205adfc5217SJeff Kirsher
206adfc5217SJeff Kirsher /* prefetch skb end pointer to speedup dev_kfree_skb() */
207adfc5217SJeff Kirsher prefetch(&skb->end);
208adfc5217SJeff Kirsher
20951c1a580SMerav Sicron DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210adfc5217SJeff Kirsher txdata->txq_index, idx, tx_buf, skb);
211adfc5217SJeff Kirsher
212adfc5217SJeff Kirsher tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213adfc5217SJeff Kirsher
214adfc5217SJeff Kirsher nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
216adfc5217SJeff Kirsher if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217adfc5217SJeff Kirsher BNX2X_ERR("BAD nbd!\n");
218adfc5217SJeff Kirsher bnx2x_panic();
219adfc5217SJeff Kirsher }
220adfc5217SJeff Kirsher #endif
221adfc5217SJeff Kirsher new_cons = nbd + tx_buf->first_bd;
222adfc5217SJeff Kirsher
223adfc5217SJeff Kirsher /* Get the next bd */
224adfc5217SJeff Kirsher bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225adfc5217SJeff Kirsher
226adfc5217SJeff Kirsher /* Skip a parse bd... */
227adfc5217SJeff Kirsher --nbd;
228adfc5217SJeff Kirsher bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229adfc5217SJeff Kirsher
230fe26566dSDmitry Kravkov if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231fe26566dSDmitry Kravkov /* Skip second parse bd... */
232fe26566dSDmitry Kravkov --nbd;
233fe26566dSDmitry Kravkov bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234fe26566dSDmitry Kravkov }
235fe26566dSDmitry Kravkov
23695e92fd4SMichal Schmidt /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237adfc5217SJeff Kirsher if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
23895e92fd4SMichal Schmidt tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
23995e92fd4SMichal Schmidt split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240adfc5217SJeff Kirsher --nbd;
241adfc5217SJeff Kirsher bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242adfc5217SJeff Kirsher }
243adfc5217SJeff Kirsher
24495e92fd4SMichal Schmidt /* unmap first bd */
24595e92fd4SMichal Schmidt dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
24695e92fd4SMichal Schmidt BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
24795e92fd4SMichal Schmidt DMA_TO_DEVICE);
24895e92fd4SMichal Schmidt
249adfc5217SJeff Kirsher /* now free frags */
250adfc5217SJeff Kirsher while (nbd > 0) {
251adfc5217SJeff Kirsher
252adfc5217SJeff Kirsher tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253adfc5217SJeff Kirsher dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254adfc5217SJeff Kirsher BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255adfc5217SJeff Kirsher if (--nbd)
256adfc5217SJeff Kirsher bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257adfc5217SJeff Kirsher }
258adfc5217SJeff Kirsher
259adfc5217SJeff Kirsher /* release skb */
260adfc5217SJeff Kirsher WARN_ON(!skb);
261d8290ae5SYuval Mintz if (likely(skb)) {
2622df1a70aSTom Herbert (*pkts_compl)++;
2632df1a70aSTom Herbert (*bytes_compl) += skb->len;
264e1615903SYuval Mintz dev_kfree_skb_any(skb);
2652df1a70aSTom Herbert }
266d8290ae5SYuval Mintz
267adfc5217SJeff Kirsher tx_buf->first_bd = 0;
268adfc5217SJeff Kirsher tx_buf->skb = NULL;
269adfc5217SJeff Kirsher
270adfc5217SJeff Kirsher return new_cons;
271adfc5217SJeff Kirsher }
272adfc5217SJeff Kirsher
bnx2x_tx_int(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata)273adfc5217SJeff Kirsher int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274adfc5217SJeff Kirsher {
275adfc5217SJeff Kirsher struct netdev_queue *txq;
276adfc5217SJeff Kirsher u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2772df1a70aSTom Herbert unsigned int pkts_compl = 0, bytes_compl = 0;
278adfc5217SJeff Kirsher
279adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
280adfc5217SJeff Kirsher if (unlikely(bp->panic))
281adfc5217SJeff Kirsher return -1;
282adfc5217SJeff Kirsher #endif
283adfc5217SJeff Kirsher
284adfc5217SJeff Kirsher txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285adfc5217SJeff Kirsher hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286adfc5217SJeff Kirsher sw_cons = txdata->tx_pkt_cons;
287adfc5217SJeff Kirsher
288ea811b79SBrian King /* Ensure subsequent loads occur after hw_cons */
289ea811b79SBrian King smp_rmb();
290ea811b79SBrian King
291adfc5217SJeff Kirsher while (sw_cons != hw_cons) {
292adfc5217SJeff Kirsher u16 pkt_cons;
293adfc5217SJeff Kirsher
294adfc5217SJeff Kirsher pkt_cons = TX_BD(sw_cons);
295adfc5217SJeff Kirsher
29651c1a580SMerav Sicron DP(NETIF_MSG_TX_DONE,
29751c1a580SMerav Sicron "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298adfc5217SJeff Kirsher txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299adfc5217SJeff Kirsher
3002df1a70aSTom Herbert bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
3012df1a70aSTom Herbert &pkts_compl, &bytes_compl);
3022df1a70aSTom Herbert
303adfc5217SJeff Kirsher sw_cons++;
304adfc5217SJeff Kirsher }
305adfc5217SJeff Kirsher
3062df1a70aSTom Herbert netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
3072df1a70aSTom Herbert
308adfc5217SJeff Kirsher txdata->tx_pkt_cons = sw_cons;
309adfc5217SJeff Kirsher txdata->tx_bd_cons = bd_cons;
310adfc5217SJeff Kirsher
311adfc5217SJeff Kirsher /* Need to make the tx_bd_cons update visible to start_xmit()
312adfc5217SJeff Kirsher * before checking for netif_tx_queue_stopped(). Without the
313adfc5217SJeff Kirsher * memory barrier, there is a small possibility that
314adfc5217SJeff Kirsher * start_xmit() will miss it and cause the queue to be stopped
315adfc5217SJeff Kirsher * forever.
316adfc5217SJeff Kirsher * On the other hand we need an rmb() here to ensure the proper
317adfc5217SJeff Kirsher * ordering of bit testing in the following
318adfc5217SJeff Kirsher * netif_tx_queue_stopped(txq) call.
319adfc5217SJeff Kirsher */
320adfc5217SJeff Kirsher smp_mb();
321adfc5217SJeff Kirsher
322adfc5217SJeff Kirsher if (unlikely(netif_tx_queue_stopped(txq))) {
32316a5fd92SYuval Mintz /* Taking tx_lock() is needed to prevent re-enabling the queue
324adfc5217SJeff Kirsher * while it's empty. This could have happen if rx_action() gets
325adfc5217SJeff Kirsher * suspended in bnx2x_tx_int() after the condition before
326adfc5217SJeff Kirsher * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
327adfc5217SJeff Kirsher *
328adfc5217SJeff Kirsher * stops the queue->sees fresh tx_bd_cons->releases the queue->
329adfc5217SJeff Kirsher * sends some packets consuming the whole queue again->
330adfc5217SJeff Kirsher * stops the queue
331adfc5217SJeff Kirsher */
332adfc5217SJeff Kirsher
333adfc5217SJeff Kirsher __netif_tx_lock(txq, smp_processor_id());
334adfc5217SJeff Kirsher
335adfc5217SJeff Kirsher if ((netif_tx_queue_stopped(txq)) &&
336adfc5217SJeff Kirsher (bp->state == BNX2X_STATE_OPEN) &&
3377df2dc6bSDmitry Kravkov (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338adfc5217SJeff Kirsher netif_tx_wake_queue(txq);
339adfc5217SJeff Kirsher
340adfc5217SJeff Kirsher __netif_tx_unlock(txq);
341adfc5217SJeff Kirsher }
342adfc5217SJeff Kirsher return 0;
343adfc5217SJeff Kirsher }
344adfc5217SJeff Kirsher
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)345adfc5217SJeff Kirsher static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346adfc5217SJeff Kirsher u16 idx)
347adfc5217SJeff Kirsher {
348adfc5217SJeff Kirsher u16 last_max = fp->last_max_sge;
349adfc5217SJeff Kirsher
350adfc5217SJeff Kirsher if (SUB_S16(idx, last_max) > 0)
351adfc5217SJeff Kirsher fp->last_max_sge = idx;
352adfc5217SJeff Kirsher }
353adfc5217SJeff Kirsher
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,u16 sge_len,struct eth_end_agg_rx_cqe * cqe)354621b4d66SDmitry Kravkov static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355621b4d66SDmitry Kravkov u16 sge_len,
356621b4d66SDmitry Kravkov struct eth_end_agg_rx_cqe *cqe)
357adfc5217SJeff Kirsher {
358adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
359adfc5217SJeff Kirsher u16 last_max, last_elem, first_elem;
360adfc5217SJeff Kirsher u16 delta = 0;
361adfc5217SJeff Kirsher u16 i;
362adfc5217SJeff Kirsher
363adfc5217SJeff Kirsher if (!sge_len)
364adfc5217SJeff Kirsher return;
365adfc5217SJeff Kirsher
366adfc5217SJeff Kirsher /* First mark all used pages */
367adfc5217SJeff Kirsher for (i = 0; i < sge_len; i++)
368adfc5217SJeff Kirsher BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369621b4d66SDmitry Kravkov RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370adfc5217SJeff Kirsher
371adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372621b4d66SDmitry Kravkov sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373adfc5217SJeff Kirsher
374adfc5217SJeff Kirsher /* Here we assume that the last SGE index is the biggest */
375adfc5217SJeff Kirsher prefetch((void *)(fp->sge_mask));
376adfc5217SJeff Kirsher bnx2x_update_last_max_sge(fp,
377621b4d66SDmitry Kravkov le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378adfc5217SJeff Kirsher
379adfc5217SJeff Kirsher last_max = RX_SGE(fp->last_max_sge);
380adfc5217SJeff Kirsher last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381adfc5217SJeff Kirsher first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382adfc5217SJeff Kirsher
383adfc5217SJeff Kirsher /* If ring is not full */
384adfc5217SJeff Kirsher if (last_elem + 1 != first_elem)
385adfc5217SJeff Kirsher last_elem++;
386adfc5217SJeff Kirsher
387adfc5217SJeff Kirsher /* Now update the prod */
388adfc5217SJeff Kirsher for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389adfc5217SJeff Kirsher if (likely(fp->sge_mask[i]))
390adfc5217SJeff Kirsher break;
391adfc5217SJeff Kirsher
392adfc5217SJeff Kirsher fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393adfc5217SJeff Kirsher delta += BIT_VEC64_ELEM_SZ;
394adfc5217SJeff Kirsher }
395adfc5217SJeff Kirsher
396adfc5217SJeff Kirsher if (delta > 0) {
397adfc5217SJeff Kirsher fp->rx_sge_prod += delta;
398adfc5217SJeff Kirsher /* clear page-end entries */
399adfc5217SJeff Kirsher bnx2x_clear_sge_mask_next_elems(fp);
400adfc5217SJeff Kirsher }
401adfc5217SJeff Kirsher
402adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS,
403adfc5217SJeff Kirsher "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404adfc5217SJeff Kirsher fp->last_max_sge, fp->rx_sge_prod);
405adfc5217SJeff Kirsher }
406adfc5217SJeff Kirsher
4072de67439SYuval Mintz /* Get Toeplitz hash value in the skb using the value from the
408e52fcb24SEric Dumazet * CQE (calculated by HW).
409e52fcb24SEric Dumazet */
bnx2x_get_rxhash(const struct bnx2x * bp,const struct eth_fast_path_rx_cqe * cqe,enum pkt_hash_types * rxhash_type)410e52fcb24SEric Dumazet static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411a334b5fbSEric Dumazet const struct eth_fast_path_rx_cqe *cqe,
4125495ab75STom Herbert enum pkt_hash_types *rxhash_type)
413e52fcb24SEric Dumazet {
4142de67439SYuval Mintz /* Get Toeplitz hash from CQE */
415e52fcb24SEric Dumazet if ((bp->dev->features & NETIF_F_RXHASH) &&
416a334b5fbSEric Dumazet (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417a334b5fbSEric Dumazet enum eth_rss_hash_type htype;
418a334b5fbSEric Dumazet
419a334b5fbSEric Dumazet htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
4205495ab75STom Herbert *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
4215495ab75STom Herbert (htype == TCP_IPV6_HASH_TYPE)) ?
4225495ab75STom Herbert PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
4235495ab75STom Herbert
424e52fcb24SEric Dumazet return le32_to_cpu(cqe->rss_hash_result);
425a334b5fbSEric Dumazet }
4265495ab75STom Herbert *rxhash_type = PKT_HASH_TYPE_NONE;
427e52fcb24SEric Dumazet return 0;
428e52fcb24SEric Dumazet }
429e52fcb24SEric Dumazet
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,u16 cons,u16 prod,struct eth_fast_path_rx_cqe * cqe)430adfc5217SJeff Kirsher static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431e52fcb24SEric Dumazet u16 cons, u16 prod,
432adfc5217SJeff Kirsher struct eth_fast_path_rx_cqe *cqe)
433adfc5217SJeff Kirsher {
434adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
435adfc5217SJeff Kirsher struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436adfc5217SJeff Kirsher struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437adfc5217SJeff Kirsher struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438adfc5217SJeff Kirsher dma_addr_t mapping;
439adfc5217SJeff Kirsher struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440adfc5217SJeff Kirsher struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441adfc5217SJeff Kirsher
442adfc5217SJeff Kirsher /* print error if current state != stop */
443adfc5217SJeff Kirsher if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444adfc5217SJeff Kirsher BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445adfc5217SJeff Kirsher
446e52fcb24SEric Dumazet /* Try to map an empty data buffer from the aggregation info */
447adfc5217SJeff Kirsher mapping = dma_map_single(&bp->pdev->dev,
448e52fcb24SEric Dumazet first_buf->data + NET_SKB_PAD,
449adfc5217SJeff Kirsher fp->rx_buf_size, DMA_FROM_DEVICE);
450adfc5217SJeff Kirsher /*
451adfc5217SJeff Kirsher * ...if it fails - move the skb from the consumer to the producer
452adfc5217SJeff Kirsher * and set the current aggregation state as ERROR to drop it
453adfc5217SJeff Kirsher * when TPA_STOP arrives.
454adfc5217SJeff Kirsher */
455adfc5217SJeff Kirsher
456adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457adfc5217SJeff Kirsher /* Move the BD from the consumer to the producer */
458e52fcb24SEric Dumazet bnx2x_reuse_rx_data(fp, cons, prod);
459adfc5217SJeff Kirsher tpa_info->tpa_state = BNX2X_TPA_ERROR;
460adfc5217SJeff Kirsher return;
461adfc5217SJeff Kirsher }
462adfc5217SJeff Kirsher
463e52fcb24SEric Dumazet /* move empty data from pool to prod */
464e52fcb24SEric Dumazet prod_rx_buf->data = first_buf->data;
465adfc5217SJeff Kirsher dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466e52fcb24SEric Dumazet /* point prod_bd to new data */
467adfc5217SJeff Kirsher prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468adfc5217SJeff Kirsher prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469adfc5217SJeff Kirsher
470adfc5217SJeff Kirsher /* move partial skb from cons to pool (don't unmap yet) */
471adfc5217SJeff Kirsher *first_buf = *cons_rx_buf;
472adfc5217SJeff Kirsher
473adfc5217SJeff Kirsher /* mark bin state as START */
474adfc5217SJeff Kirsher tpa_info->parsing_flags =
475adfc5217SJeff Kirsher le16_to_cpu(cqe->pars_flags.flags);
476adfc5217SJeff Kirsher tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477adfc5217SJeff Kirsher tpa_info->tpa_state = BNX2X_TPA_START;
478adfc5217SJeff Kirsher tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479adfc5217SJeff Kirsher tpa_info->placement_offset = cqe->placement_offset;
4805495ab75STom Herbert tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481621b4d66SDmitry Kravkov if (fp->mode == TPA_MODE_GRO) {
482621b4d66SDmitry Kravkov u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483924d75abSYuval Mintz tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484621b4d66SDmitry Kravkov tpa_info->gro_size = gro_size;
485621b4d66SDmitry Kravkov }
486adfc5217SJeff Kirsher
487adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
488adfc5217SJeff Kirsher fp->tpa_queue_used |= (1 << queue);
489adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490adfc5217SJeff Kirsher fp->tpa_queue_used);
491adfc5217SJeff Kirsher #endif
492adfc5217SJeff Kirsher }
493adfc5217SJeff Kirsher
494adfc5217SJeff Kirsher /* Timestamp option length allowed for TPA aggregation:
495adfc5217SJeff Kirsher *
496adfc5217SJeff Kirsher * nop nop kind length echo val
497adfc5217SJeff Kirsher */
498adfc5217SJeff Kirsher #define TPA_TSTAMP_OPT_LEN 12
499adfc5217SJeff Kirsher /**
500cbf1de72SYuval Mintz * bnx2x_set_gro_params - compute GRO values
501adfc5217SJeff Kirsher *
502cbf1de72SYuval Mintz * @skb: packet skb
503adfc5217SJeff Kirsher * @parsing_flags: parsing flags from the START CQE
504adfc5217SJeff Kirsher * @len_on_bd: total length of the first packet for the
505adfc5217SJeff Kirsher * aggregation.
506cbf1de72SYuval Mintz * @pkt_len: length of all segments
507d0ea5cbdSJesse Brandeburg * @num_of_coalesced_segs: count of segments
508adfc5217SJeff Kirsher *
509adfc5217SJeff Kirsher * Approximate value of the MSS for this aggregation calculated using
510adfc5217SJeff Kirsher * the first packet of it.
5112de67439SYuval Mintz * Compute number of aggregated segments, and gso_type.
512adfc5217SJeff Kirsher */
bnx2x_set_gro_params(struct sk_buff * skb,u16 parsing_flags,u16 len_on_bd,unsigned int pkt_len,u16 num_of_coalesced_segs)513cbf1de72SYuval Mintz static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514ab5777d7SYuval Mintz u16 len_on_bd, unsigned int pkt_len,
515ab5777d7SYuval Mintz u16 num_of_coalesced_segs)
516adfc5217SJeff Kirsher {
517cbf1de72SYuval Mintz /* TPA aggregation won't have either IP options or TCP options
518adfc5217SJeff Kirsher * other than timestamp or IPv6 extension headers.
519adfc5217SJeff Kirsher */
520adfc5217SJeff Kirsher u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521adfc5217SJeff Kirsher
522adfc5217SJeff Kirsher if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523cbf1de72SYuval Mintz PRS_FLAG_OVERETH_IPV6) {
524adfc5217SJeff Kirsher hdrs_len += sizeof(struct ipv6hdr);
525cbf1de72SYuval Mintz skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526cbf1de72SYuval Mintz } else {
527adfc5217SJeff Kirsher hdrs_len += sizeof(struct iphdr);
528cbf1de72SYuval Mintz skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529cbf1de72SYuval Mintz }
530adfc5217SJeff Kirsher
531adfc5217SJeff Kirsher /* Check if there was a TCP timestamp, if there is it's will
532adfc5217SJeff Kirsher * always be 12 bytes length: nop nop kind length echo val.
533adfc5217SJeff Kirsher *
534adfc5217SJeff Kirsher * Otherwise FW would close the aggregation.
535adfc5217SJeff Kirsher */
536adfc5217SJeff Kirsher if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537adfc5217SJeff Kirsher hdrs_len += TPA_TSTAMP_OPT_LEN;
538adfc5217SJeff Kirsher
539cbf1de72SYuval Mintz skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540cbf1de72SYuval Mintz
541cbf1de72SYuval Mintz /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542cbf1de72SYuval Mintz * to skb_shinfo(skb)->gso_segs
543cbf1de72SYuval Mintz */
544ab5777d7SYuval Mintz NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
545adfc5217SJeff Kirsher }
546adfc5217SJeff Kirsher
bnx2x_alloc_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)547996dedbaSMichal Schmidt static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548996dedbaSMichal Schmidt u16 index, gfp_t gfp_mask)
5491191cb83SEric Dumazet {
5501191cb83SEric Dumazet struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
5511191cb83SEric Dumazet struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
5524cace675SGabriel Krisman Bertazi struct bnx2x_alloc_pool *pool = &fp->page_pool;
5531191cb83SEric Dumazet dma_addr_t mapping;
5541191cb83SEric Dumazet
555b9032741SEric Dumazet if (!pool->page) {
5564cace675SGabriel Krisman Bertazi pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
5575c9ffde4SMichal Schmidt if (unlikely(!pool->page))
5581191cb83SEric Dumazet return -ENOMEM;
5591191cb83SEric Dumazet
5608031612dSMichal Schmidt pool->offset = 0;
5618031612dSMichal Schmidt }
5628031612dSMichal Schmidt
5638031612dSMichal Schmidt mapping = dma_map_page(&bp->pdev->dev, pool->page,
5648031612dSMichal Schmidt pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
5658031612dSMichal Schmidt if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
5661191cb83SEric Dumazet BNX2X_ERR("Can't map sge\n");
5671191cb83SEric Dumazet return -ENOMEM;
5681191cb83SEric Dumazet }
5691191cb83SEric Dumazet
5704cace675SGabriel Krisman Bertazi sw_buf->page = pool->page;
5714cace675SGabriel Krisman Bertazi sw_buf->offset = pool->offset;
5724cace675SGabriel Krisman Bertazi
5731191cb83SEric Dumazet dma_unmap_addr_set(sw_buf, mapping, mapping);
5741191cb83SEric Dumazet
5751191cb83SEric Dumazet sge->addr_hi = cpu_to_le32(U64_HI(mapping));
5761191cb83SEric Dumazet sge->addr_lo = cpu_to_le32(U64_LO(mapping));
5771191cb83SEric Dumazet
5784cace675SGabriel Krisman Bertazi pool->offset += SGE_PAGE_SIZE;
579b9032741SEric Dumazet if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
580b9032741SEric Dumazet get_page(pool->page);
581b9032741SEric Dumazet else
582b9032741SEric Dumazet pool->page = NULL;
5831191cb83SEric Dumazet return 0;
5841191cb83SEric Dumazet }
5851191cb83SEric Dumazet
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct sk_buff * skb,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)586adfc5217SJeff Kirsher static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
587621b4d66SDmitry Kravkov struct bnx2x_agg_info *tpa_info,
588621b4d66SDmitry Kravkov u16 pages,
589621b4d66SDmitry Kravkov struct sk_buff *skb,
590adfc5217SJeff Kirsher struct eth_end_agg_rx_cqe *cqe,
591adfc5217SJeff Kirsher u16 cqe_idx)
592adfc5217SJeff Kirsher {
593adfc5217SJeff Kirsher struct sw_rx_page *rx_pg, old_rx_pg;
594621b4d66SDmitry Kravkov u32 i, frag_len, frag_size;
595621b4d66SDmitry Kravkov int err, j, frag_id = 0;
596adfc5217SJeff Kirsher u16 len_on_bd = tpa_info->len_on_bd;
597621b4d66SDmitry Kravkov u16 full_page = 0, gro_size = 0;
598adfc5217SJeff Kirsher
599adfc5217SJeff Kirsher frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
600621b4d66SDmitry Kravkov
601621b4d66SDmitry Kravkov if (fp->mode == TPA_MODE_GRO) {
602621b4d66SDmitry Kravkov gro_size = tpa_info->gro_size;
603621b4d66SDmitry Kravkov full_page = tpa_info->full_page;
604621b4d66SDmitry Kravkov }
605adfc5217SJeff Kirsher
606adfc5217SJeff Kirsher /* This is needed in order to enable forwarding support */
607cbf1de72SYuval Mintz if (frag_size)
608cbf1de72SYuval Mintz bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
609ab5777d7SYuval Mintz le16_to_cpu(cqe->pkt_len),
610ab5777d7SYuval Mintz le16_to_cpu(cqe->num_of_coalesced_segs));
611621b4d66SDmitry Kravkov
612adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
613924d75abSYuval Mintz if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
614adfc5217SJeff Kirsher BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
615adfc5217SJeff Kirsher pages, cqe_idx);
616adfc5217SJeff Kirsher BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
617adfc5217SJeff Kirsher bnx2x_panic();
618adfc5217SJeff Kirsher return -EINVAL;
619adfc5217SJeff Kirsher }
620adfc5217SJeff Kirsher #endif
621adfc5217SJeff Kirsher
622adfc5217SJeff Kirsher /* Run through the SGL and compose the fragmented skb */
623adfc5217SJeff Kirsher for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
624adfc5217SJeff Kirsher u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
625adfc5217SJeff Kirsher
626adfc5217SJeff Kirsher /* FW gives the indices of the SGE as if the ring is an array
627adfc5217SJeff Kirsher (meaning that "next" element will consume 2 indices) */
628621b4d66SDmitry Kravkov if (fp->mode == TPA_MODE_GRO)
629621b4d66SDmitry Kravkov frag_len = min_t(u32, frag_size, (u32)full_page);
630621b4d66SDmitry Kravkov else /* LRO */
631924d75abSYuval Mintz frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
632621b4d66SDmitry Kravkov
633adfc5217SJeff Kirsher rx_pg = &fp->rx_page_ring[sge_idx];
634adfc5217SJeff Kirsher old_rx_pg = *rx_pg;
635adfc5217SJeff Kirsher
636adfc5217SJeff Kirsher /* If we fail to allocate a substitute page, we simply stop
637adfc5217SJeff Kirsher where we are and drop the whole packet */
638996dedbaSMichal Schmidt err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
639adfc5217SJeff Kirsher if (unlikely(err)) {
64015192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
641adfc5217SJeff Kirsher return err;
642adfc5217SJeff Kirsher }
643adfc5217SJeff Kirsher
6448031612dSMichal Schmidt dma_unmap_page(&bp->pdev->dev,
645adfc5217SJeff Kirsher dma_unmap_addr(&old_rx_pg, mapping),
6464cace675SGabriel Krisman Bertazi SGE_PAGE_SIZE, DMA_FROM_DEVICE);
647adfc5217SJeff Kirsher /* Add one frag and update the appropriate fields in the skb */
648621b4d66SDmitry Kravkov if (fp->mode == TPA_MODE_LRO)
6494cace675SGabriel Krisman Bertazi skb_fill_page_desc(skb, j, old_rx_pg.page,
6504cace675SGabriel Krisman Bertazi old_rx_pg.offset, frag_len);
651621b4d66SDmitry Kravkov else { /* GRO */
652621b4d66SDmitry Kravkov int rem;
653621b4d66SDmitry Kravkov int offset = 0;
654621b4d66SDmitry Kravkov for (rem = frag_len; rem > 0; rem -= gro_size) {
655621b4d66SDmitry Kravkov int len = rem > gro_size ? gro_size : rem;
656621b4d66SDmitry Kravkov skb_fill_page_desc(skb, frag_id++,
6574cace675SGabriel Krisman Bertazi old_rx_pg.page,
6584cace675SGabriel Krisman Bertazi old_rx_pg.offset + offset,
6594cace675SGabriel Krisman Bertazi len);
660621b4d66SDmitry Kravkov if (offset)
661621b4d66SDmitry Kravkov get_page(old_rx_pg.page);
662621b4d66SDmitry Kravkov offset += len;
663621b4d66SDmitry Kravkov }
664621b4d66SDmitry Kravkov }
665adfc5217SJeff Kirsher
666adfc5217SJeff Kirsher skb->data_len += frag_len;
667924d75abSYuval Mintz skb->truesize += SGE_PAGES;
668adfc5217SJeff Kirsher skb->len += frag_len;
669adfc5217SJeff Kirsher
670adfc5217SJeff Kirsher frag_size -= frag_len;
671adfc5217SJeff Kirsher }
672adfc5217SJeff Kirsher
673adfc5217SJeff Kirsher return 0;
674adfc5217SJeff Kirsher }
675adfc5217SJeff Kirsher
6768c495270SJakub Kicinski static struct sk_buff *
bnx2x_build_skb(const struct bnx2x_fastpath * fp,void * data)6778c495270SJakub Kicinski bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
6788c495270SJakub Kicinski {
6798c495270SJakub Kicinski struct sk_buff *skb;
6808c495270SJakub Kicinski
6818c495270SJakub Kicinski if (fp->rx_frag_size)
6828c495270SJakub Kicinski skb = build_skb(data, fp->rx_frag_size);
6838c495270SJakub Kicinski else
6848c495270SJakub Kicinski skb = slab_build_skb(data);
6858c495270SJakub Kicinski return skb;
6868c495270SJakub Kicinski }
6878c495270SJakub Kicinski
bnx2x_frag_free(const struct bnx2x_fastpath * fp,void * data)688d46d132cSEric Dumazet static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
689d46d132cSEric Dumazet {
690d46d132cSEric Dumazet if (fp->rx_frag_size)
691e51423d9SAlexander Duyck skb_free_frag(data);
692d46d132cSEric Dumazet else
693d46d132cSEric Dumazet kfree(data);
694d46d132cSEric Dumazet }
695d46d132cSEric Dumazet
bnx2x_frag_alloc(const struct bnx2x_fastpath * fp,gfp_t gfp_mask)696996dedbaSMichal Schmidt static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
697d46d132cSEric Dumazet {
698996dedbaSMichal Schmidt if (fp->rx_frag_size) {
699996dedbaSMichal Schmidt /* GFP_KERNEL allocations are used only during initialization */
700d0164adcSMel Gorman if (unlikely(gfpflags_allow_blocking(gfp_mask)))
701996dedbaSMichal Schmidt return (void *)__get_free_page(gfp_mask);
702d46d132cSEric Dumazet
7033a89aae4SSebastian Andrzej Siewior return napi_alloc_frag(fp->rx_frag_size);
704996dedbaSMichal Schmidt }
705996dedbaSMichal Schmidt
706996dedbaSMichal Schmidt return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
707d46d132cSEric Dumazet }
708d46d132cSEric Dumazet
7099969085eSYuval Mintz #ifdef CONFIG_INET
bnx2x_gro_ip_csum(struct bnx2x * bp,struct sk_buff * skb)7109969085eSYuval Mintz static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
7119969085eSYuval Mintz {
7129969085eSYuval Mintz const struct iphdr *iph = ip_hdr(skb);
7139969085eSYuval Mintz struct tcphdr *th;
7149969085eSYuval Mintz
7159969085eSYuval Mintz skb_set_transport_header(skb, sizeof(struct iphdr));
7169969085eSYuval Mintz th = tcp_hdr(skb);
7179969085eSYuval Mintz
7189969085eSYuval Mintz th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
7199969085eSYuval Mintz iph->saddr, iph->daddr, 0);
7209969085eSYuval Mintz }
7219969085eSYuval Mintz
bnx2x_gro_ipv6_csum(struct bnx2x * bp,struct sk_buff * skb)7229969085eSYuval Mintz static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
7239969085eSYuval Mintz {
7249969085eSYuval Mintz struct ipv6hdr *iph = ipv6_hdr(skb);
7259969085eSYuval Mintz struct tcphdr *th;
7269969085eSYuval Mintz
7279969085eSYuval Mintz skb_set_transport_header(skb, sizeof(struct ipv6hdr));
7289969085eSYuval Mintz th = tcp_hdr(skb);
7299969085eSYuval Mintz
7309969085eSYuval Mintz th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
7319969085eSYuval Mintz &iph->saddr, &iph->daddr, 0);
7329969085eSYuval Mintz }
7332c2d06d5SYuval Mintz
bnx2x_gro_csum(struct bnx2x * bp,struct sk_buff * skb,void (* gro_func)(struct bnx2x *,struct sk_buff *))7342c2d06d5SYuval Mintz static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
7352c2d06d5SYuval Mintz void (*gro_func)(struct bnx2x*, struct sk_buff*))
7362c2d06d5SYuval Mintz {
7370e24c0adSZhang Shengju skb_reset_network_header(skb);
7382c2d06d5SYuval Mintz gro_func(bp, skb);
7392c2d06d5SYuval Mintz tcp_gro_complete(skb);
7402c2d06d5SYuval Mintz }
7419969085eSYuval Mintz #endif
7429969085eSYuval Mintz
bnx2x_gro_receive(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct sk_buff * skb)7439969085eSYuval Mintz static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7449969085eSYuval Mintz struct sk_buff *skb)
7459969085eSYuval Mintz {
7469969085eSYuval Mintz #ifdef CONFIG_INET
747cbf1de72SYuval Mintz if (skb_shinfo(skb)->gso_size) {
7489969085eSYuval Mintz switch (be16_to_cpu(skb->protocol)) {
7499969085eSYuval Mintz case ETH_P_IP:
7502c2d06d5SYuval Mintz bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
7519969085eSYuval Mintz break;
7529969085eSYuval Mintz case ETH_P_IPV6:
7532c2d06d5SYuval Mintz bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
7549969085eSYuval Mintz break;
7559969085eSYuval Mintz default:
75637ed41c4SGal Pressman netdev_WARN_ONCE(bp->dev,
75737ed41c4SGal Pressman "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
7589969085eSYuval Mintz be16_to_cpu(skb->protocol));
7599969085eSYuval Mintz }
7609969085eSYuval Mintz }
7619969085eSYuval Mintz #endif
76260e66feeSEric Dumazet skb_record_rx_queue(skb, fp->rx_queue);
7639969085eSYuval Mintz napi_gro_receive(&fp->napi, skb);
7649969085eSYuval Mintz }
7659969085eSYuval Mintz
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)7661191cb83SEric Dumazet static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767621b4d66SDmitry Kravkov struct bnx2x_agg_info *tpa_info,
768621b4d66SDmitry Kravkov u16 pages,
769621b4d66SDmitry Kravkov struct eth_end_agg_rx_cqe *cqe,
770adfc5217SJeff Kirsher u16 cqe_idx)
771adfc5217SJeff Kirsher {
772adfc5217SJeff Kirsher struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
773621b4d66SDmitry Kravkov u8 pad = tpa_info->placement_offset;
774adfc5217SJeff Kirsher u16 len = tpa_info->len_on_bd;
775e52fcb24SEric Dumazet struct sk_buff *skb = NULL;
776621b4d66SDmitry Kravkov u8 *new_data, *data = rx_buf->data;
777adfc5217SJeff Kirsher u8 old_tpa_state = tpa_info->tpa_state;
778adfc5217SJeff Kirsher
779adfc5217SJeff Kirsher tpa_info->tpa_state = BNX2X_TPA_STOP;
780adfc5217SJeff Kirsher
781adfc5217SJeff Kirsher /* If we there was an error during the handling of the TPA_START -
782adfc5217SJeff Kirsher * drop this aggregation.
783adfc5217SJeff Kirsher */
784adfc5217SJeff Kirsher if (old_tpa_state == BNX2X_TPA_ERROR)
785adfc5217SJeff Kirsher goto drop;
786adfc5217SJeff Kirsher
787e52fcb24SEric Dumazet /* Try to allocate the new data */
788996dedbaSMichal Schmidt new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
789adfc5217SJeff Kirsher /* Unmap skb in the pool anyway, as we are going to change
790adfc5217SJeff Kirsher pool entry status to BNX2X_TPA_STOP even if new skb allocation
791adfc5217SJeff Kirsher fails. */
792adfc5217SJeff Kirsher dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
793adfc5217SJeff Kirsher fp->rx_buf_size, DMA_FROM_DEVICE);
794e52fcb24SEric Dumazet if (likely(new_data))
7958c495270SJakub Kicinski skb = bnx2x_build_skb(fp, data);
796adfc5217SJeff Kirsher
797e52fcb24SEric Dumazet if (likely(skb)) {
798adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
799adfc5217SJeff Kirsher if (pad + len > fp->rx_buf_size) {
80051c1a580SMerav Sicron BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
801adfc5217SJeff Kirsher pad, len, fp->rx_buf_size);
802adfc5217SJeff Kirsher bnx2x_panic();
803b43f9acbSJianglei Nie bnx2x_frag_free(fp, new_data);
804adfc5217SJeff Kirsher return;
805adfc5217SJeff Kirsher }
806adfc5217SJeff Kirsher #endif
807adfc5217SJeff Kirsher
808e52fcb24SEric Dumazet skb_reserve(skb, pad + NET_SKB_PAD);
809adfc5217SJeff Kirsher skb_put(skb, len);
8105495ab75STom Herbert skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
811adfc5217SJeff Kirsher
812adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, bp->dev);
813adfc5217SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY;
814adfc5217SJeff Kirsher
815621b4d66SDmitry Kravkov if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
816621b4d66SDmitry Kravkov skb, cqe, cqe_idx)) {
817adfc5217SJeff Kirsher if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
81886a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
8199969085eSYuval Mintz bnx2x_gro_receive(bp, fp, skb);
820adfc5217SJeff Kirsher } else {
82151c1a580SMerav Sicron DP(NETIF_MSG_RX_STATUS,
82251c1a580SMerav Sicron "Failed to allocate new pages - dropping packet!\n");
823adfc5217SJeff Kirsher dev_kfree_skb_any(skb);
824adfc5217SJeff Kirsher }
825adfc5217SJeff Kirsher
826e52fcb24SEric Dumazet /* put new data in bin */
827e52fcb24SEric Dumazet rx_buf->data = new_data;
828adfc5217SJeff Kirsher
829adfc5217SJeff Kirsher return;
830adfc5217SJeff Kirsher }
83107b0f009SEric Dumazet if (new_data)
832d46d132cSEric Dumazet bnx2x_frag_free(fp, new_data);
833adfc5217SJeff Kirsher drop:
834adfc5217SJeff Kirsher /* drop the packet and keep the buffer in the bin */
835adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS,
836adfc5217SJeff Kirsher "Failed to allocate or map a new skb - dropping packet!\n");
83715192a8cSBarak Witkowski bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
838adfc5217SJeff Kirsher }
839adfc5217SJeff Kirsher
bnx2x_alloc_rx_data(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)840996dedbaSMichal Schmidt static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
841996dedbaSMichal Schmidt u16 index, gfp_t gfp_mask)
8421191cb83SEric Dumazet {
8431191cb83SEric Dumazet u8 *data;
8441191cb83SEric Dumazet struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
8451191cb83SEric Dumazet struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
8461191cb83SEric Dumazet dma_addr_t mapping;
8471191cb83SEric Dumazet
848996dedbaSMichal Schmidt data = bnx2x_frag_alloc(fp, gfp_mask);
8491191cb83SEric Dumazet if (unlikely(data == NULL))
8501191cb83SEric Dumazet return -ENOMEM;
8511191cb83SEric Dumazet
8521191cb83SEric Dumazet mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
8531191cb83SEric Dumazet fp->rx_buf_size,
8541191cb83SEric Dumazet DMA_FROM_DEVICE);
8551191cb83SEric Dumazet if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
856d46d132cSEric Dumazet bnx2x_frag_free(fp, data);
8571191cb83SEric Dumazet BNX2X_ERR("Can't map rx data\n");
8581191cb83SEric Dumazet return -ENOMEM;
8591191cb83SEric Dumazet }
8601191cb83SEric Dumazet
8611191cb83SEric Dumazet rx_buf->data = data;
8621191cb83SEric Dumazet dma_unmap_addr_set(rx_buf, mapping, mapping);
8631191cb83SEric Dumazet
8641191cb83SEric Dumazet rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8651191cb83SEric Dumazet rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8661191cb83SEric Dumazet
8671191cb83SEric Dumazet return 0;
8681191cb83SEric Dumazet }
8691191cb83SEric Dumazet
87015192a8cSBarak Witkowski static
bnx2x_csum_validate(struct sk_buff * skb,union eth_rx_cqe * cqe,struct bnx2x_fastpath * fp,struct bnx2x_eth_q_stats * qstats)87115192a8cSBarak Witkowski void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
87215192a8cSBarak Witkowski struct bnx2x_fastpath *fp,
87315192a8cSBarak Witkowski struct bnx2x_eth_q_stats *qstats)
874d6cb3e41SEric Dumazet {
875e488921fSMichal Schmidt /* Do nothing if no L4 csum validation was done.
876e488921fSMichal Schmidt * We do not check whether IP csum was validated. For IPv4 we assume
877e488921fSMichal Schmidt * that if the card got as far as validating the L4 csum, it also
878e488921fSMichal Schmidt * validated the IP csum. IPv6 has no IP csum.
879e488921fSMichal Schmidt */
880d6cb3e41SEric Dumazet if (cqe->fast_path_cqe.status_flags &
881e488921fSMichal Schmidt ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
882d6cb3e41SEric Dumazet return;
883d6cb3e41SEric Dumazet
884e488921fSMichal Schmidt /* If L4 validation was done, check if an error was found. */
885d6cb3e41SEric Dumazet
886d6cb3e41SEric Dumazet if (cqe->fast_path_cqe.type_error_flags &
887d6cb3e41SEric Dumazet (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
888d6cb3e41SEric Dumazet ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
88915192a8cSBarak Witkowski qstats->hw_csum_err++;
890d6cb3e41SEric Dumazet else
891d6cb3e41SEric Dumazet skb->ip_summed = CHECKSUM_UNNECESSARY;
892d6cb3e41SEric Dumazet }
893adfc5217SJeff Kirsher
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)894a8f47eb7Sstephen hemminger static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
895adfc5217SJeff Kirsher {
896adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
897adfc5217SJeff Kirsher u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
89875b29459SDmitry Kravkov u16 sw_comp_cons, sw_comp_prod;
899adfc5217SJeff Kirsher int rx_pkt = 0;
90075b29459SDmitry Kravkov union eth_rx_cqe *cqe;
90175b29459SDmitry Kravkov struct eth_fast_path_rx_cqe *cqe_fp;
902adfc5217SJeff Kirsher
903adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
904adfc5217SJeff Kirsher if (unlikely(bp->panic))
905adfc5217SJeff Kirsher return 0;
906adfc5217SJeff Kirsher #endif
907b3529744SEric W. Biederman if (budget <= 0)
908b3529744SEric W. Biederman return rx_pkt;
909adfc5217SJeff Kirsher
910adfc5217SJeff Kirsher bd_cons = fp->rx_bd_cons;
911adfc5217SJeff Kirsher bd_prod = fp->rx_bd_prod;
912adfc5217SJeff Kirsher bd_prod_fw = bd_prod;
913adfc5217SJeff Kirsher sw_comp_cons = fp->rx_comp_cons;
914adfc5217SJeff Kirsher sw_comp_prod = fp->rx_comp_prod;
915adfc5217SJeff Kirsher
91675b29459SDmitry Kravkov comp_ring_cons = RCQ_BD(sw_comp_cons);
91775b29459SDmitry Kravkov cqe = &fp->rx_comp_ring[comp_ring_cons];
91875b29459SDmitry Kravkov cqe_fp = &cqe->fast_path_cqe;
919adfc5217SJeff Kirsher
920adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS,
92175b29459SDmitry Kravkov "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
922adfc5217SJeff Kirsher
92375b29459SDmitry Kravkov while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
924adfc5217SJeff Kirsher struct sw_rx_bd *rx_buf = NULL;
925adfc5217SJeff Kirsher struct sk_buff *skb;
926adfc5217SJeff Kirsher u8 cqe_fp_flags;
927adfc5217SJeff Kirsher enum eth_rx_cqe_type cqe_fp_type;
928621b4d66SDmitry Kravkov u16 len, pad, queue;
929e52fcb24SEric Dumazet u8 *data;
930bd5cef03STom Herbert u32 rxhash;
9315495ab75STom Herbert enum pkt_hash_types rxhash_type;
932adfc5217SJeff Kirsher
933adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
934adfc5217SJeff Kirsher if (unlikely(bp->panic))
935adfc5217SJeff Kirsher return 0;
936adfc5217SJeff Kirsher #endif
937adfc5217SJeff Kirsher
938adfc5217SJeff Kirsher bd_prod = RX_BD(bd_prod);
939adfc5217SJeff Kirsher bd_cons = RX_BD(bd_cons);
940adfc5217SJeff Kirsher
9419aaae044Swenxiong@linux.vnet.ibm.com /* A rmb() is required to ensure that the CQE is not read
9429aaae044Swenxiong@linux.vnet.ibm.com * before it is written by the adapter DMA. PCI ordering
9439aaae044Swenxiong@linux.vnet.ibm.com * rules will make sure the other fields are written before
9449aaae044Swenxiong@linux.vnet.ibm.com * the marker at the end of struct eth_fast_path_rx_cqe
9459aaae044Swenxiong@linux.vnet.ibm.com * but without rmb() a weakly ordered processor can process
9469aaae044Swenxiong@linux.vnet.ibm.com * stale data. Without the barrier TPA state-machine might
9479aaae044Swenxiong@linux.vnet.ibm.com * enter inconsistent state and kernel stack might be
9489aaae044Swenxiong@linux.vnet.ibm.com * provided with incorrect packet description - these lead
9499aaae044Swenxiong@linux.vnet.ibm.com * to various kernel crashed.
9509aaae044Swenxiong@linux.vnet.ibm.com */
9519aaae044Swenxiong@linux.vnet.ibm.com rmb();
9529aaae044Swenxiong@linux.vnet.ibm.com
953adfc5217SJeff Kirsher cqe_fp_flags = cqe_fp->type_error_flags;
954adfc5217SJeff Kirsher cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
955adfc5217SJeff Kirsher
95651c1a580SMerav Sicron DP(NETIF_MSG_RX_STATUS,
95751c1a580SMerav Sicron "CQE type %x err %x status %x queue %x vlan %x len %u\n",
95851c1a580SMerav Sicron CQE_TYPE(cqe_fp_flags),
959adfc5217SJeff Kirsher cqe_fp_flags, cqe_fp->status_flags,
960adfc5217SJeff Kirsher le32_to_cpu(cqe_fp->rss_hash_result),
961621b4d66SDmitry Kravkov le16_to_cpu(cqe_fp->vlan_tag),
962621b4d66SDmitry Kravkov le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
963adfc5217SJeff Kirsher
964adfc5217SJeff Kirsher /* is this a slowpath msg? */
965adfc5217SJeff Kirsher if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
966adfc5217SJeff Kirsher bnx2x_sp_event(fp, cqe);
967adfc5217SJeff Kirsher goto next_cqe;
968e52fcb24SEric Dumazet }
969621b4d66SDmitry Kravkov
970adfc5217SJeff Kirsher rx_buf = &fp->rx_buf_ring[bd_cons];
971e52fcb24SEric Dumazet data = rx_buf->data;
972adfc5217SJeff Kirsher
973adfc5217SJeff Kirsher if (!CQE_TYPE_FAST(cqe_fp_type)) {
974621b4d66SDmitry Kravkov struct bnx2x_agg_info *tpa_info;
975621b4d66SDmitry Kravkov u16 frag_size, pages;
976adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
977adfc5217SJeff Kirsher /* sanity check */
9787e6b4d44SMichal Schmidt if (fp->mode == TPA_MODE_DISABLED &&
979adfc5217SJeff Kirsher (CQE_TYPE_START(cqe_fp_type) ||
980adfc5217SJeff Kirsher CQE_TYPE_STOP(cqe_fp_type)))
9817e6b4d44SMichal Schmidt BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
982adfc5217SJeff Kirsher CQE_TYPE(cqe_fp_type));
983adfc5217SJeff Kirsher #endif
984adfc5217SJeff Kirsher
985adfc5217SJeff Kirsher if (CQE_TYPE_START(cqe_fp_type)) {
986adfc5217SJeff Kirsher u16 queue = cqe_fp->queue_index;
987adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS,
988adfc5217SJeff Kirsher "calling tpa_start on queue %d\n",
989adfc5217SJeff Kirsher queue);
990adfc5217SJeff Kirsher
991e52fcb24SEric Dumazet bnx2x_tpa_start(fp, queue,
992adfc5217SJeff Kirsher bd_cons, bd_prod,
993adfc5217SJeff Kirsher cqe_fp);
994621b4d66SDmitry Kravkov
995adfc5217SJeff Kirsher goto next_rx;
996621b4d66SDmitry Kravkov }
997621b4d66SDmitry Kravkov queue = cqe->end_agg_cqe.queue_index;
998621b4d66SDmitry Kravkov tpa_info = &fp->tpa_info[queue];
999adfc5217SJeff Kirsher DP(NETIF_MSG_RX_STATUS,
1000adfc5217SJeff Kirsher "calling tpa_stop on queue %d\n",
1001adfc5217SJeff Kirsher queue);
1002adfc5217SJeff Kirsher
1003621b4d66SDmitry Kravkov frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
1004621b4d66SDmitry Kravkov tpa_info->len_on_bd;
1005621b4d66SDmitry Kravkov
1006621b4d66SDmitry Kravkov if (fp->mode == TPA_MODE_GRO)
1007621b4d66SDmitry Kravkov pages = (frag_size + tpa_info->full_page - 1) /
1008621b4d66SDmitry Kravkov tpa_info->full_page;
1009621b4d66SDmitry Kravkov else
1010621b4d66SDmitry Kravkov pages = SGE_PAGE_ALIGN(frag_size) >>
1011621b4d66SDmitry Kravkov SGE_PAGE_SHIFT;
1012621b4d66SDmitry Kravkov
1013621b4d66SDmitry Kravkov bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1014621b4d66SDmitry Kravkov &cqe->end_agg_cqe, comp_ring_cons);
1015adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
1016adfc5217SJeff Kirsher if (bp->panic)
1017adfc5217SJeff Kirsher return 0;
1018adfc5217SJeff Kirsher #endif
1019adfc5217SJeff Kirsher
1020621b4d66SDmitry Kravkov bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1021adfc5217SJeff Kirsher goto next_cqe;
1022adfc5217SJeff Kirsher }
1023adfc5217SJeff Kirsher /* non TPA */
1024621b4d66SDmitry Kravkov len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1025adfc5217SJeff Kirsher pad = cqe_fp->placement_offset;
1026adfc5217SJeff Kirsher dma_sync_single_for_cpu(&bp->pdev->dev,
1027adfc5217SJeff Kirsher dma_unmap_addr(rx_buf, mapping),
1028adfc5217SJeff Kirsher pad + RX_COPY_THRESH,
1029adfc5217SJeff Kirsher DMA_FROM_DEVICE);
1030e52fcb24SEric Dumazet pad += NET_SKB_PAD;
1031e52fcb24SEric Dumazet prefetch(data + pad); /* speedup eth_type_trans() */
1032adfc5217SJeff Kirsher /* is this an error packet? */
1033adfc5217SJeff Kirsher if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
103451c1a580SMerav Sicron DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035adfc5217SJeff Kirsher "ERROR flags %x rx packet %u\n",
1036adfc5217SJeff Kirsher cqe_fp_flags, sw_comp_cons);
103715192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1038adfc5217SJeff Kirsher goto reuse_rx;
1039adfc5217SJeff Kirsher }
1040adfc5217SJeff Kirsher
1041adfc5217SJeff Kirsher /* Since we don't have a jumbo ring
1042adfc5217SJeff Kirsher * copy small packets if mtu > 1500
1043adfc5217SJeff Kirsher */
1044adfc5217SJeff Kirsher if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1045adfc5217SJeff Kirsher (len <= RX_COPY_THRESH)) {
104645abfb10SAlexander Duyck skb = napi_alloc_skb(&fp->napi, len);
1047e52fcb24SEric Dumazet if (skb == NULL) {
104851c1a580SMerav Sicron DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1049e52fcb24SEric Dumazet "ERROR packet dropped because of alloc failure\n");
105015192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1051adfc5217SJeff Kirsher goto reuse_rx;
1052adfc5217SJeff Kirsher }
1053e52fcb24SEric Dumazet memcpy(skb->data, data + pad, len);
1054e52fcb24SEric Dumazet bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1055e52fcb24SEric Dumazet } else {
1056996dedbaSMichal Schmidt if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1057996dedbaSMichal Schmidt GFP_ATOMIC) == 0)) {
1058adfc5217SJeff Kirsher dma_unmap_single(&bp->pdev->dev,
1059adfc5217SJeff Kirsher dma_unmap_addr(rx_buf, mapping),
1060adfc5217SJeff Kirsher fp->rx_buf_size,
1061adfc5217SJeff Kirsher DMA_FROM_DEVICE);
10628c495270SJakub Kicinski skb = bnx2x_build_skb(fp, data);
1063e52fcb24SEric Dumazet if (unlikely(!skb)) {
1064d46d132cSEric Dumazet bnx2x_frag_free(fp, data);
106515192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp)->
106615192a8cSBarak Witkowski rx_skb_alloc_failed++;
1067e52fcb24SEric Dumazet goto next_rx;
1068e52fcb24SEric Dumazet }
1069adfc5217SJeff Kirsher skb_reserve(skb, pad);
1070adfc5217SJeff Kirsher } else {
107151c1a580SMerav Sicron DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
107251c1a580SMerav Sicron "ERROR packet dropped because of alloc failure\n");
107315192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1074adfc5217SJeff Kirsher reuse_rx:
1075e52fcb24SEric Dumazet bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1076adfc5217SJeff Kirsher goto next_rx;
1077adfc5217SJeff Kirsher }
1078036d2df9SDmitry Kravkov }
1079adfc5217SJeff Kirsher
1080e52fcb24SEric Dumazet skb_put(skb, len);
1081adfc5217SJeff Kirsher skb->protocol = eth_type_trans(skb, bp->dev);
1082adfc5217SJeff Kirsher
1083adfc5217SJeff Kirsher /* Set Toeplitz hash for a none-LRO skb */
10845495ab75STom Herbert rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
10855495ab75STom Herbert skb_set_hash(skb, rxhash, rxhash_type);
1086adfc5217SJeff Kirsher
1087adfc5217SJeff Kirsher skb_checksum_none_assert(skb);
1088adfc5217SJeff Kirsher
1089d6cb3e41SEric Dumazet if (bp->dev->features & NETIF_F_RXCSUM)
109015192a8cSBarak Witkowski bnx2x_csum_validate(skb, cqe, fp,
109115192a8cSBarak Witkowski bnx2x_fp_qstats(bp, fp));
1092adfc5217SJeff Kirsher
1093f233cafeSDmitry Kravkov skb_record_rx_queue(skb, fp->rx_queue);
1094adfc5217SJeff Kirsher
1095eeed018cSMichal Kalderon /* Check if this packet was timestamped */
109656daf66dSYuval Mintz if (unlikely(cqe->fast_path_cqe.type_error_flags &
1097eeed018cSMichal Kalderon (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1098eeed018cSMichal Kalderon bnx2x_set_rx_ts(bp, skb);
1099eeed018cSMichal Kalderon
1100adfc5217SJeff Kirsher if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1101adfc5217SJeff Kirsher PARSING_FLAGS_VLAN)
110286a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1103adfc5217SJeff Kirsher le16_to_cpu(cqe_fp->vlan_tag));
1104adfc5217SJeff Kirsher
11058f20aa57SDmitry Kravkov napi_gro_receive(&fp->napi, skb);
1106adfc5217SJeff Kirsher next_rx:
1107e52fcb24SEric Dumazet rx_buf->data = NULL;
1108adfc5217SJeff Kirsher
1109adfc5217SJeff Kirsher bd_cons = NEXT_RX_IDX(bd_cons);
1110adfc5217SJeff Kirsher bd_prod = NEXT_RX_IDX(bd_prod);
1111adfc5217SJeff Kirsher bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1112adfc5217SJeff Kirsher rx_pkt++;
1113adfc5217SJeff Kirsher next_cqe:
1114adfc5217SJeff Kirsher sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1115adfc5217SJeff Kirsher sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1116adfc5217SJeff Kirsher
111775b29459SDmitry Kravkov /* mark CQE as free */
111875b29459SDmitry Kravkov BNX2X_SEED_CQE(cqe_fp);
111975b29459SDmitry Kravkov
1120adfc5217SJeff Kirsher if (rx_pkt == budget)
1121adfc5217SJeff Kirsher break;
112275b29459SDmitry Kravkov
112375b29459SDmitry Kravkov comp_ring_cons = RCQ_BD(sw_comp_cons);
112475b29459SDmitry Kravkov cqe = &fp->rx_comp_ring[comp_ring_cons];
112575b29459SDmitry Kravkov cqe_fp = &cqe->fast_path_cqe;
1126adfc5217SJeff Kirsher } /* while */
1127adfc5217SJeff Kirsher
1128adfc5217SJeff Kirsher fp->rx_bd_cons = bd_cons;
1129adfc5217SJeff Kirsher fp->rx_bd_prod = bd_prod_fw;
1130adfc5217SJeff Kirsher fp->rx_comp_cons = sw_comp_cons;
1131adfc5217SJeff Kirsher fp->rx_comp_prod = sw_comp_prod;
1132adfc5217SJeff Kirsher
1133adfc5217SJeff Kirsher /* Update producers */
1134adfc5217SJeff Kirsher bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1135adfc5217SJeff Kirsher fp->rx_sge_prod);
1136adfc5217SJeff Kirsher
1137adfc5217SJeff Kirsher return rx_pkt;
1138adfc5217SJeff Kirsher }
1139adfc5217SJeff Kirsher
bnx2x_msix_fp_int(int irq,void * fp_cookie)1140adfc5217SJeff Kirsher static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1141adfc5217SJeff Kirsher {
1142adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = fp_cookie;
1143adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
1144adfc5217SJeff Kirsher u8 cos;
1145adfc5217SJeff Kirsher
114651c1a580SMerav Sicron DP(NETIF_MSG_INTR,
114751c1a580SMerav Sicron "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1148adfc5217SJeff Kirsher fp->index, fp->fw_sb_id, fp->igu_sb_id);
1149ecf01c22SYuval Mintz
1150adfc5217SJeff Kirsher bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1151adfc5217SJeff Kirsher
1152adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
1153adfc5217SJeff Kirsher if (unlikely(bp->panic))
1154adfc5217SJeff Kirsher return IRQ_HANDLED;
1155adfc5217SJeff Kirsher #endif
1156adfc5217SJeff Kirsher
1157adfc5217SJeff Kirsher /* Handle Rx and Tx according to MSI-X vector */
1158adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos)
115965565884SMerav Sicron prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1160adfc5217SJeff Kirsher
1161adfc5217SJeff Kirsher prefetch(&fp->sb_running_index[SM_RX_ID]);
1162f5fbf115SEric Dumazet napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1163adfc5217SJeff Kirsher
1164adfc5217SJeff Kirsher return IRQ_HANDLED;
1165adfc5217SJeff Kirsher }
1166adfc5217SJeff Kirsher
1167adfc5217SJeff Kirsher /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)1168adfc5217SJeff Kirsher void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1169adfc5217SJeff Kirsher {
1170adfc5217SJeff Kirsher mutex_lock(&bp->port.phy_mutex);
1171adfc5217SJeff Kirsher
1172adfc5217SJeff Kirsher bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1173adfc5217SJeff Kirsher }
1174adfc5217SJeff Kirsher
bnx2x_release_phy_lock(struct bnx2x * bp)1175adfc5217SJeff Kirsher void bnx2x_release_phy_lock(struct bnx2x *bp)
1176adfc5217SJeff Kirsher {
1177adfc5217SJeff Kirsher bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1178adfc5217SJeff Kirsher
1179adfc5217SJeff Kirsher mutex_unlock(&bp->port.phy_mutex);
1180adfc5217SJeff Kirsher }
1181adfc5217SJeff Kirsher
1182adfc5217SJeff Kirsher /* calculates MF speed according to current linespeed and MF configuration */
bnx2x_get_mf_speed(struct bnx2x * bp)1183adfc5217SJeff Kirsher u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1184adfc5217SJeff Kirsher {
1185adfc5217SJeff Kirsher u16 line_speed = bp->link_vars.line_speed;
1186adfc5217SJeff Kirsher if (IS_MF(bp)) {
1187adfc5217SJeff Kirsher u16 maxCfg = bnx2x_extract_max_cfg(bp,
1188adfc5217SJeff Kirsher bp->mf_config[BP_VN(bp)]);
1189adfc5217SJeff Kirsher
1190adfc5217SJeff Kirsher /* Calculate the current MAX line speed limit for the MF
1191adfc5217SJeff Kirsher * devices
1192adfc5217SJeff Kirsher */
1193da3cc2daSYuval Mintz if (IS_MF_PERCENT_BW(bp))
1194adfc5217SJeff Kirsher line_speed = (line_speed * maxCfg) / 100;
1195adfc5217SJeff Kirsher else { /* SD mode */
1196adfc5217SJeff Kirsher u16 vn_max_rate = maxCfg * 100;
1197adfc5217SJeff Kirsher
1198adfc5217SJeff Kirsher if (vn_max_rate < line_speed)
1199adfc5217SJeff Kirsher line_speed = vn_max_rate;
1200adfc5217SJeff Kirsher }
1201adfc5217SJeff Kirsher }
1202adfc5217SJeff Kirsher
1203adfc5217SJeff Kirsher return line_speed;
1204adfc5217SJeff Kirsher }
1205adfc5217SJeff Kirsher
1206adfc5217SJeff Kirsher /**
1207adfc5217SJeff Kirsher * bnx2x_fill_report_data - fill link report data to report
1208adfc5217SJeff Kirsher *
1209adfc5217SJeff Kirsher * @bp: driver handle
1210adfc5217SJeff Kirsher * @data: link state to update
1211adfc5217SJeff Kirsher *
1212adfc5217SJeff Kirsher * It uses a none-atomic bit operations because is called under the mutex.
1213adfc5217SJeff Kirsher */
bnx2x_fill_report_data(struct bnx2x * bp,struct bnx2x_link_report_data * data)12141191cb83SEric Dumazet static void bnx2x_fill_report_data(struct bnx2x *bp,
1215adfc5217SJeff Kirsher struct bnx2x_link_report_data *data)
1216adfc5217SJeff Kirsher {
1217adfc5217SJeff Kirsher memset(data, 0, sizeof(*data));
1218adfc5217SJeff Kirsher
12196495d15aSDmitry Kravkov if (IS_PF(bp)) {
122016a5fd92SYuval Mintz /* Fill the report data: effective line speed */
12216495d15aSDmitry Kravkov data->line_speed = bnx2x_get_mf_speed(bp);
1222adfc5217SJeff Kirsher
1223adfc5217SJeff Kirsher /* Link is down */
1224adfc5217SJeff Kirsher if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1225adfc5217SJeff Kirsher __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1226adfc5217SJeff Kirsher &data->link_report_flags);
1227adfc5217SJeff Kirsher
12286495d15aSDmitry Kravkov if (!BNX2X_NUM_ETH_QUEUES(bp))
12296495d15aSDmitry Kravkov __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
12306495d15aSDmitry Kravkov &data->link_report_flags);
12316495d15aSDmitry Kravkov
1232adfc5217SJeff Kirsher /* Full DUPLEX */
1233adfc5217SJeff Kirsher if (bp->link_vars.duplex == DUPLEX_FULL)
12346495d15aSDmitry Kravkov __set_bit(BNX2X_LINK_REPORT_FD,
12356495d15aSDmitry Kravkov &data->link_report_flags);
1236adfc5217SJeff Kirsher
1237adfc5217SJeff Kirsher /* Rx Flow Control is ON */
1238adfc5217SJeff Kirsher if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
12396495d15aSDmitry Kravkov __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
12406495d15aSDmitry Kravkov &data->link_report_flags);
1241adfc5217SJeff Kirsher
1242adfc5217SJeff Kirsher /* Tx Flow Control is ON */
1243adfc5217SJeff Kirsher if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
12446495d15aSDmitry Kravkov __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
12456495d15aSDmitry Kravkov &data->link_report_flags);
12466495d15aSDmitry Kravkov } else { /* VF */
12476495d15aSDmitry Kravkov *data = bp->vf_link_vars;
12486495d15aSDmitry Kravkov }
1249adfc5217SJeff Kirsher }
1250adfc5217SJeff Kirsher
1251adfc5217SJeff Kirsher /**
1252adfc5217SJeff Kirsher * bnx2x_link_report - report link status to OS.
1253adfc5217SJeff Kirsher *
1254adfc5217SJeff Kirsher * @bp: driver handle
1255adfc5217SJeff Kirsher *
1256adfc5217SJeff Kirsher * Calls the __bnx2x_link_report() under the same locking scheme
1257adfc5217SJeff Kirsher * as a link/PHY state managing code to ensure a consistent link
1258adfc5217SJeff Kirsher * reporting.
1259adfc5217SJeff Kirsher */
1260adfc5217SJeff Kirsher
bnx2x_link_report(struct bnx2x * bp)1261adfc5217SJeff Kirsher void bnx2x_link_report(struct bnx2x *bp)
1262adfc5217SJeff Kirsher {
1263adfc5217SJeff Kirsher bnx2x_acquire_phy_lock(bp);
1264adfc5217SJeff Kirsher __bnx2x_link_report(bp);
1265adfc5217SJeff Kirsher bnx2x_release_phy_lock(bp);
1266adfc5217SJeff Kirsher }
1267adfc5217SJeff Kirsher
1268adfc5217SJeff Kirsher /**
1269adfc5217SJeff Kirsher * __bnx2x_link_report - report link status to OS.
1270adfc5217SJeff Kirsher *
1271adfc5217SJeff Kirsher * @bp: driver handle
1272adfc5217SJeff Kirsher *
127316a5fd92SYuval Mintz * None atomic implementation.
1274adfc5217SJeff Kirsher * Should be called under the phy_lock.
1275adfc5217SJeff Kirsher */
__bnx2x_link_report(struct bnx2x * bp)1276adfc5217SJeff Kirsher void __bnx2x_link_report(struct bnx2x *bp)
1277adfc5217SJeff Kirsher {
1278adfc5217SJeff Kirsher struct bnx2x_link_report_data cur_data;
1279adfc5217SJeff Kirsher
1280484c016dSSudarsana Reddy Kalluru if (bp->force_link_down) {
1281484c016dSSudarsana Reddy Kalluru bp->link_vars.link_up = 0;
1282484c016dSSudarsana Reddy Kalluru return;
1283484c016dSSudarsana Reddy Kalluru }
1284484c016dSSudarsana Reddy Kalluru
1285adfc5217SJeff Kirsher /* reread mf_cfg */
1286ad5afc89SAriel Elior if (IS_PF(bp) && !CHIP_IS_E1(bp))
1287adfc5217SJeff Kirsher bnx2x_read_mf_cfg(bp);
1288adfc5217SJeff Kirsher
1289adfc5217SJeff Kirsher /* Read the current link report info */
1290adfc5217SJeff Kirsher bnx2x_fill_report_data(bp, &cur_data);
1291adfc5217SJeff Kirsher
1292adfc5217SJeff Kirsher /* Don't report link down or exactly the same link status twice */
1293adfc5217SJeff Kirsher if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1294adfc5217SJeff Kirsher (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1295adfc5217SJeff Kirsher &bp->last_reported_link.link_report_flags) &&
1296adfc5217SJeff Kirsher test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1297adfc5217SJeff Kirsher &cur_data.link_report_flags)))
1298adfc5217SJeff Kirsher return;
1299adfc5217SJeff Kirsher
1300adfc5217SJeff Kirsher bp->link_cnt++;
1301adfc5217SJeff Kirsher
1302adfc5217SJeff Kirsher /* We are going to report a new link parameters now -
1303adfc5217SJeff Kirsher * remember the current data for the next time.
1304adfc5217SJeff Kirsher */
1305adfc5217SJeff Kirsher memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1306adfc5217SJeff Kirsher
13076495d15aSDmitry Kravkov /* propagate status to VFs */
13086495d15aSDmitry Kravkov if (IS_PF(bp))
13096495d15aSDmitry Kravkov bnx2x_iov_link_update(bp);
13106495d15aSDmitry Kravkov
1311adfc5217SJeff Kirsher if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1312adfc5217SJeff Kirsher &cur_data.link_report_flags)) {
1313adfc5217SJeff Kirsher netif_carrier_off(bp->dev);
1314adfc5217SJeff Kirsher netdev_err(bp->dev, "NIC Link is Down\n");
1315adfc5217SJeff Kirsher return;
1316adfc5217SJeff Kirsher } else {
131794f05b0fSJoe Perches const char *duplex;
131894f05b0fSJoe Perches const char *flow;
131994f05b0fSJoe Perches
1320adfc5217SJeff Kirsher netif_carrier_on(bp->dev);
1321adfc5217SJeff Kirsher
1322adfc5217SJeff Kirsher if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1323adfc5217SJeff Kirsher &cur_data.link_report_flags))
132494f05b0fSJoe Perches duplex = "full";
1325adfc5217SJeff Kirsher else
132694f05b0fSJoe Perches duplex = "half";
1327adfc5217SJeff Kirsher
1328adfc5217SJeff Kirsher /* Handle the FC at the end so that only these flags would be
1329adfc5217SJeff Kirsher * possibly set. This way we may easily check if there is no FC
1330adfc5217SJeff Kirsher * enabled.
1331adfc5217SJeff Kirsher */
1332adfc5217SJeff Kirsher if (cur_data.link_report_flags) {
1333adfc5217SJeff Kirsher if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1334adfc5217SJeff Kirsher &cur_data.link_report_flags)) {
1335adfc5217SJeff Kirsher if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1336adfc5217SJeff Kirsher &cur_data.link_report_flags))
133794f05b0fSJoe Perches flow = "ON - receive & transmit";
133894f05b0fSJoe Perches else
133994f05b0fSJoe Perches flow = "ON - receive";
1340adfc5217SJeff Kirsher } else {
134194f05b0fSJoe Perches flow = "ON - transmit";
1342adfc5217SJeff Kirsher }
134394f05b0fSJoe Perches } else {
134494f05b0fSJoe Perches flow = "none";
1345adfc5217SJeff Kirsher }
134694f05b0fSJoe Perches netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
134794f05b0fSJoe Perches cur_data.line_speed, duplex, flow);
1348adfc5217SJeff Kirsher }
1349adfc5217SJeff Kirsher }
1350adfc5217SJeff Kirsher
bnx2x_set_next_page_sgl(struct bnx2x_fastpath * fp)13511191cb83SEric Dumazet static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
13521191cb83SEric Dumazet {
13531191cb83SEric Dumazet int i;
13541191cb83SEric Dumazet
13551191cb83SEric Dumazet for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
13561191cb83SEric Dumazet struct eth_rx_sge *sge;
13571191cb83SEric Dumazet
13581191cb83SEric Dumazet sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
13591191cb83SEric Dumazet sge->addr_hi =
13601191cb83SEric Dumazet cpu_to_le32(U64_HI(fp->rx_sge_mapping +
13611191cb83SEric Dumazet BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
13621191cb83SEric Dumazet
13631191cb83SEric Dumazet sge->addr_lo =
13641191cb83SEric Dumazet cpu_to_le32(U64_LO(fp->rx_sge_mapping +
13651191cb83SEric Dumazet BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
13661191cb83SEric Dumazet }
13671191cb83SEric Dumazet }
13681191cb83SEric Dumazet
bnx2x_free_tpa_pool(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)13691191cb83SEric Dumazet static void bnx2x_free_tpa_pool(struct bnx2x *bp,
13701191cb83SEric Dumazet struct bnx2x_fastpath *fp, int last)
13711191cb83SEric Dumazet {
13721191cb83SEric Dumazet int i;
13731191cb83SEric Dumazet
13741191cb83SEric Dumazet for (i = 0; i < last; i++) {
13751191cb83SEric Dumazet struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
13761191cb83SEric Dumazet struct sw_rx_bd *first_buf = &tpa_info->first_buf;
13771191cb83SEric Dumazet u8 *data = first_buf->data;
13781191cb83SEric Dumazet
13791191cb83SEric Dumazet if (data == NULL) {
13801191cb83SEric Dumazet DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
13811191cb83SEric Dumazet continue;
13821191cb83SEric Dumazet }
13831191cb83SEric Dumazet if (tpa_info->tpa_state == BNX2X_TPA_START)
13841191cb83SEric Dumazet dma_unmap_single(&bp->pdev->dev,
13851191cb83SEric Dumazet dma_unmap_addr(first_buf, mapping),
13861191cb83SEric Dumazet fp->rx_buf_size, DMA_FROM_DEVICE);
1387d46d132cSEric Dumazet bnx2x_frag_free(fp, data);
13881191cb83SEric Dumazet first_buf->data = NULL;
13891191cb83SEric Dumazet }
13901191cb83SEric Dumazet }
13911191cb83SEric Dumazet
bnx2x_init_rx_rings_cnic(struct bnx2x * bp)139255c11941SMerav Sicron void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
139355c11941SMerav Sicron {
139455c11941SMerav Sicron int j;
139555c11941SMerav Sicron
139655c11941SMerav Sicron for_each_rx_queue_cnic(bp, j) {
139755c11941SMerav Sicron struct bnx2x_fastpath *fp = &bp->fp[j];
139855c11941SMerav Sicron
139955c11941SMerav Sicron fp->rx_bd_cons = 0;
140055c11941SMerav Sicron
140155c11941SMerav Sicron /* Activate BD ring */
140255c11941SMerav Sicron /* Warning!
140355c11941SMerav Sicron * this will generate an interrupt (to the TSTORM)
140455c11941SMerav Sicron * must only be done after chip is initialized
140555c11941SMerav Sicron */
140655c11941SMerav Sicron bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
140755c11941SMerav Sicron fp->rx_sge_prod);
140855c11941SMerav Sicron }
140955c11941SMerav Sicron }
141055c11941SMerav Sicron
bnx2x_init_rx_rings(struct bnx2x * bp)1411adfc5217SJeff Kirsher void bnx2x_init_rx_rings(struct bnx2x *bp)
1412adfc5217SJeff Kirsher {
1413adfc5217SJeff Kirsher int func = BP_FUNC(bp);
1414adfc5217SJeff Kirsher u16 ring_prod;
1415adfc5217SJeff Kirsher int i, j;
1416adfc5217SJeff Kirsher
1417adfc5217SJeff Kirsher /* Allocate TPA resources */
141855c11941SMerav Sicron for_each_eth_queue(bp, j) {
1419adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[j];
1420adfc5217SJeff Kirsher
1421adfc5217SJeff Kirsher DP(NETIF_MSG_IFUP,
1422adfc5217SJeff Kirsher "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1423adfc5217SJeff Kirsher
14247e6b4d44SMichal Schmidt if (fp->mode != TPA_MODE_DISABLED) {
142516a5fd92SYuval Mintz /* Fill the per-aggregation pool */
14268decf868SDavid S. Miller for (i = 0; i < MAX_AGG_QS(bp); i++) {
1427adfc5217SJeff Kirsher struct bnx2x_agg_info *tpa_info =
1428adfc5217SJeff Kirsher &fp->tpa_info[i];
1429adfc5217SJeff Kirsher struct sw_rx_bd *first_buf =
1430adfc5217SJeff Kirsher &tpa_info->first_buf;
1431adfc5217SJeff Kirsher
1432996dedbaSMichal Schmidt first_buf->data =
1433996dedbaSMichal Schmidt bnx2x_frag_alloc(fp, GFP_KERNEL);
1434e52fcb24SEric Dumazet if (!first_buf->data) {
143551c1a580SMerav Sicron BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
143651c1a580SMerav Sicron j);
1437adfc5217SJeff Kirsher bnx2x_free_tpa_pool(bp, fp, i);
14387e6b4d44SMichal Schmidt fp->mode = TPA_MODE_DISABLED;
1439adfc5217SJeff Kirsher break;
1440adfc5217SJeff Kirsher }
1441adfc5217SJeff Kirsher dma_unmap_addr_set(first_buf, mapping, 0);
1442adfc5217SJeff Kirsher tpa_info->tpa_state = BNX2X_TPA_STOP;
1443adfc5217SJeff Kirsher }
1444adfc5217SJeff Kirsher
1445adfc5217SJeff Kirsher /* "next page" elements initialization */
1446adfc5217SJeff Kirsher bnx2x_set_next_page_sgl(fp);
1447adfc5217SJeff Kirsher
1448adfc5217SJeff Kirsher /* set SGEs bit mask */
1449adfc5217SJeff Kirsher bnx2x_init_sge_ring_bit_mask(fp);
1450adfc5217SJeff Kirsher
1451adfc5217SJeff Kirsher /* Allocate SGEs and initialize the ring elements */
1452adfc5217SJeff Kirsher for (i = 0, ring_prod = 0;
1453adfc5217SJeff Kirsher i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1454adfc5217SJeff Kirsher
1455996dedbaSMichal Schmidt if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1456996dedbaSMichal Schmidt GFP_KERNEL) < 0) {
145751c1a580SMerav Sicron BNX2X_ERR("was only able to allocate %d rx sges\n",
145851c1a580SMerav Sicron i);
145951c1a580SMerav Sicron BNX2X_ERR("disabling TPA for queue[%d]\n",
146051c1a580SMerav Sicron j);
1461adfc5217SJeff Kirsher /* Cleanup already allocated elements */
1462adfc5217SJeff Kirsher bnx2x_free_rx_sge_range(bp, fp,
1463adfc5217SJeff Kirsher ring_prod);
1464adfc5217SJeff Kirsher bnx2x_free_tpa_pool(bp, fp,
14658decf868SDavid S. Miller MAX_AGG_QS(bp));
14667e6b4d44SMichal Schmidt fp->mode = TPA_MODE_DISABLED;
1467adfc5217SJeff Kirsher ring_prod = 0;
1468adfc5217SJeff Kirsher break;
1469adfc5217SJeff Kirsher }
1470adfc5217SJeff Kirsher ring_prod = NEXT_SGE_IDX(ring_prod);
1471adfc5217SJeff Kirsher }
1472adfc5217SJeff Kirsher
1473adfc5217SJeff Kirsher fp->rx_sge_prod = ring_prod;
1474adfc5217SJeff Kirsher }
1475adfc5217SJeff Kirsher }
1476adfc5217SJeff Kirsher
147755c11941SMerav Sicron for_each_eth_queue(bp, j) {
1478adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[j];
1479adfc5217SJeff Kirsher
1480adfc5217SJeff Kirsher fp->rx_bd_cons = 0;
1481adfc5217SJeff Kirsher
1482adfc5217SJeff Kirsher /* Activate BD ring */
1483adfc5217SJeff Kirsher /* Warning!
1484adfc5217SJeff Kirsher * this will generate an interrupt (to the TSTORM)
1485adfc5217SJeff Kirsher * must only be done after chip is initialized
1486adfc5217SJeff Kirsher */
1487adfc5217SJeff Kirsher bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1488adfc5217SJeff Kirsher fp->rx_sge_prod);
1489adfc5217SJeff Kirsher
1490adfc5217SJeff Kirsher if (j != 0)
1491adfc5217SJeff Kirsher continue;
1492adfc5217SJeff Kirsher
1493adfc5217SJeff Kirsher if (CHIP_IS_E1(bp)) {
1494adfc5217SJeff Kirsher REG_WR(bp, BAR_USTRORM_INTMEM +
1495adfc5217SJeff Kirsher USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1496adfc5217SJeff Kirsher U64_LO(fp->rx_comp_mapping));
1497adfc5217SJeff Kirsher REG_WR(bp, BAR_USTRORM_INTMEM +
1498adfc5217SJeff Kirsher USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1499adfc5217SJeff Kirsher U64_HI(fp->rx_comp_mapping));
1500adfc5217SJeff Kirsher }
1501adfc5217SJeff Kirsher }
1502adfc5217SJeff Kirsher }
1503adfc5217SJeff Kirsher
bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath * fp)150455c11941SMerav Sicron static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1505adfc5217SJeff Kirsher {
1506adfc5217SJeff Kirsher u8 cos;
150755c11941SMerav Sicron struct bnx2x *bp = fp->bp;
1508adfc5217SJeff Kirsher
1509adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos) {
151065565884SMerav Sicron struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
15112df1a70aSTom Herbert unsigned pkts_compl = 0, bytes_compl = 0;
1512adfc5217SJeff Kirsher
1513adfc5217SJeff Kirsher u16 sw_prod = txdata->tx_pkt_prod;
1514adfc5217SJeff Kirsher u16 sw_cons = txdata->tx_pkt_cons;
1515adfc5217SJeff Kirsher
1516adfc5217SJeff Kirsher while (sw_cons != sw_prod) {
15172df1a70aSTom Herbert bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
15182df1a70aSTom Herbert &pkts_compl, &bytes_compl);
1519adfc5217SJeff Kirsher sw_cons++;
1520adfc5217SJeff Kirsher }
152155c11941SMerav Sicron
15222df1a70aSTom Herbert netdev_tx_reset_queue(
152365565884SMerav Sicron netdev_get_tx_queue(bp->dev,
152465565884SMerav Sicron txdata->txq_index));
1525adfc5217SJeff Kirsher }
1526adfc5217SJeff Kirsher }
152755c11941SMerav Sicron
bnx2x_free_tx_skbs_cnic(struct bnx2x * bp)152855c11941SMerav Sicron static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
152955c11941SMerav Sicron {
153055c11941SMerav Sicron int i;
153155c11941SMerav Sicron
153255c11941SMerav Sicron for_each_tx_queue_cnic(bp, i) {
153355c11941SMerav Sicron bnx2x_free_tx_skbs_queue(&bp->fp[i]);
153455c11941SMerav Sicron }
153555c11941SMerav Sicron }
153655c11941SMerav Sicron
bnx2x_free_tx_skbs(struct bnx2x * bp)153755c11941SMerav Sicron static void bnx2x_free_tx_skbs(struct bnx2x *bp)
153855c11941SMerav Sicron {
153955c11941SMerav Sicron int i;
154055c11941SMerav Sicron
154155c11941SMerav Sicron for_each_eth_queue(bp, i) {
154255c11941SMerav Sicron bnx2x_free_tx_skbs_queue(&bp->fp[i]);
154355c11941SMerav Sicron }
1544adfc5217SJeff Kirsher }
1545adfc5217SJeff Kirsher
bnx2x_free_rx_bds(struct bnx2x_fastpath * fp)1546adfc5217SJeff Kirsher static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1547adfc5217SJeff Kirsher {
1548adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
1549adfc5217SJeff Kirsher int i;
1550adfc5217SJeff Kirsher
1551adfc5217SJeff Kirsher /* ring wasn't allocated */
1552adfc5217SJeff Kirsher if (fp->rx_buf_ring == NULL)
1553adfc5217SJeff Kirsher return;
1554adfc5217SJeff Kirsher
1555adfc5217SJeff Kirsher for (i = 0; i < NUM_RX_BD; i++) {
1556adfc5217SJeff Kirsher struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1557e52fcb24SEric Dumazet u8 *data = rx_buf->data;
1558adfc5217SJeff Kirsher
1559e52fcb24SEric Dumazet if (data == NULL)
1560adfc5217SJeff Kirsher continue;
1561adfc5217SJeff Kirsher dma_unmap_single(&bp->pdev->dev,
1562adfc5217SJeff Kirsher dma_unmap_addr(rx_buf, mapping),
1563adfc5217SJeff Kirsher fp->rx_buf_size, DMA_FROM_DEVICE);
1564adfc5217SJeff Kirsher
1565e52fcb24SEric Dumazet rx_buf->data = NULL;
1566d46d132cSEric Dumazet bnx2x_frag_free(fp, data);
1567adfc5217SJeff Kirsher }
1568adfc5217SJeff Kirsher }
1569adfc5217SJeff Kirsher
bnx2x_free_rx_skbs_cnic(struct bnx2x * bp)157055c11941SMerav Sicron static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
157155c11941SMerav Sicron {
157255c11941SMerav Sicron int j;
157355c11941SMerav Sicron
157455c11941SMerav Sicron for_each_rx_queue_cnic(bp, j) {
157555c11941SMerav Sicron bnx2x_free_rx_bds(&bp->fp[j]);
157655c11941SMerav Sicron }
157755c11941SMerav Sicron }
157855c11941SMerav Sicron
bnx2x_free_rx_skbs(struct bnx2x * bp)1579adfc5217SJeff Kirsher static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1580adfc5217SJeff Kirsher {
1581adfc5217SJeff Kirsher int j;
1582adfc5217SJeff Kirsher
158355c11941SMerav Sicron for_each_eth_queue(bp, j) {
1584adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[j];
1585adfc5217SJeff Kirsher
1586adfc5217SJeff Kirsher bnx2x_free_rx_bds(fp);
1587adfc5217SJeff Kirsher
15887e6b4d44SMichal Schmidt if (fp->mode != TPA_MODE_DISABLED)
15898decf868SDavid S. Miller bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1590adfc5217SJeff Kirsher }
1591adfc5217SJeff Kirsher }
1592adfc5217SJeff Kirsher
bnx2x_free_skbs_cnic(struct bnx2x * bp)1593a8f47eb7Sstephen hemminger static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
159455c11941SMerav Sicron {
159555c11941SMerav Sicron bnx2x_free_tx_skbs_cnic(bp);
159655c11941SMerav Sicron bnx2x_free_rx_skbs_cnic(bp);
159755c11941SMerav Sicron }
159855c11941SMerav Sicron
bnx2x_free_skbs(struct bnx2x * bp)1599adfc5217SJeff Kirsher void bnx2x_free_skbs(struct bnx2x *bp)
1600adfc5217SJeff Kirsher {
1601adfc5217SJeff Kirsher bnx2x_free_tx_skbs(bp);
1602adfc5217SJeff Kirsher bnx2x_free_rx_skbs(bp);
1603adfc5217SJeff Kirsher }
1604adfc5217SJeff Kirsher
bnx2x_update_max_mf_config(struct bnx2x * bp,u32 value)1605adfc5217SJeff Kirsher void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1606adfc5217SJeff Kirsher {
1607adfc5217SJeff Kirsher /* load old values */
1608adfc5217SJeff Kirsher u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1609adfc5217SJeff Kirsher
1610adfc5217SJeff Kirsher if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1611adfc5217SJeff Kirsher /* leave all but MAX value */
1612adfc5217SJeff Kirsher mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1613adfc5217SJeff Kirsher
1614adfc5217SJeff Kirsher /* set new MAX value */
1615adfc5217SJeff Kirsher mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1616adfc5217SJeff Kirsher & FUNC_MF_CFG_MAX_BW_MASK;
1617adfc5217SJeff Kirsher
1618adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1619adfc5217SJeff Kirsher }
1620adfc5217SJeff Kirsher }
1621adfc5217SJeff Kirsher
1622adfc5217SJeff Kirsher /**
1623adfc5217SJeff Kirsher * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1624adfc5217SJeff Kirsher *
1625adfc5217SJeff Kirsher * @bp: driver handle
1626adfc5217SJeff Kirsher * @nvecs: number of vectors to be released
1627adfc5217SJeff Kirsher */
bnx2x_free_msix_irqs(struct bnx2x * bp,int nvecs)1628adfc5217SJeff Kirsher static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1629adfc5217SJeff Kirsher {
1630adfc5217SJeff Kirsher int i, offset = 0;
1631adfc5217SJeff Kirsher
1632adfc5217SJeff Kirsher if (nvecs == offset)
1633adfc5217SJeff Kirsher return;
1634ad5afc89SAriel Elior
1635ad5afc89SAriel Elior /* VFs don't have a default SB */
1636ad5afc89SAriel Elior if (IS_PF(bp)) {
1637adfc5217SJeff Kirsher free_irq(bp->msix_table[offset].vector, bp->dev);
1638adfc5217SJeff Kirsher DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1639adfc5217SJeff Kirsher bp->msix_table[offset].vector);
1640adfc5217SJeff Kirsher offset++;
1641ad5afc89SAriel Elior }
164255c11941SMerav Sicron
164355c11941SMerav Sicron if (CNIC_SUPPORT(bp)) {
1644adfc5217SJeff Kirsher if (nvecs == offset)
1645adfc5217SJeff Kirsher return;
1646adfc5217SJeff Kirsher offset++;
164755c11941SMerav Sicron }
1648adfc5217SJeff Kirsher
1649adfc5217SJeff Kirsher for_each_eth_queue(bp, i) {
1650adfc5217SJeff Kirsher if (nvecs == offset)
1651adfc5217SJeff Kirsher return;
165251c1a580SMerav Sicron DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
165351c1a580SMerav Sicron i, bp->msix_table[offset].vector);
1654adfc5217SJeff Kirsher
1655adfc5217SJeff Kirsher free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1656adfc5217SJeff Kirsher }
1657adfc5217SJeff Kirsher }
1658adfc5217SJeff Kirsher
bnx2x_free_irq(struct bnx2x * bp)1659adfc5217SJeff Kirsher void bnx2x_free_irq(struct bnx2x *bp)
1660adfc5217SJeff Kirsher {
166130a5de77SDmitry Kravkov if (bp->flags & USING_MSIX_FLAG &&
1662ad5afc89SAriel Elior !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1663ad5afc89SAriel Elior int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1664ad5afc89SAriel Elior
1665ad5afc89SAriel Elior /* vfs don't have a default status block */
1666ad5afc89SAriel Elior if (IS_PF(bp))
1667ad5afc89SAriel Elior nvecs++;
1668ad5afc89SAriel Elior
1669ad5afc89SAriel Elior bnx2x_free_msix_irqs(bp, nvecs);
1670ad5afc89SAriel Elior } else {
167130a5de77SDmitry Kravkov free_irq(bp->dev->irq, bp->dev);
1672adfc5217SJeff Kirsher }
1673ad5afc89SAriel Elior }
1674adfc5217SJeff Kirsher
bnx2x_enable_msix(struct bnx2x * bp)16750e8d2ec5SMerav Sicron int bnx2x_enable_msix(struct bnx2x *bp)
1676adfc5217SJeff Kirsher {
16771ab4434cSAriel Elior int msix_vec = 0, i, rc;
1678adfc5217SJeff Kirsher
16791ab4434cSAriel Elior /* VFs don't have a default status block */
16801ab4434cSAriel Elior if (IS_PF(bp)) {
1681adfc5217SJeff Kirsher bp->msix_table[msix_vec].entry = msix_vec;
168251c1a580SMerav Sicron BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1683adfc5217SJeff Kirsher bp->msix_table[0].entry);
1684adfc5217SJeff Kirsher msix_vec++;
16851ab4434cSAriel Elior }
1686adfc5217SJeff Kirsher
168755c11941SMerav Sicron /* Cnic requires an msix vector for itself */
168855c11941SMerav Sicron if (CNIC_SUPPORT(bp)) {
1689adfc5217SJeff Kirsher bp->msix_table[msix_vec].entry = msix_vec;
169051c1a580SMerav Sicron BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
169155c11941SMerav Sicron msix_vec, bp->msix_table[msix_vec].entry);
1692adfc5217SJeff Kirsher msix_vec++;
169355c11941SMerav Sicron }
169455c11941SMerav Sicron
1695adfc5217SJeff Kirsher /* We need separate vectors for ETH queues only (not FCoE) */
1696adfc5217SJeff Kirsher for_each_eth_queue(bp, i) {
1697adfc5217SJeff Kirsher bp->msix_table[msix_vec].entry = msix_vec;
169851c1a580SMerav Sicron BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
169951c1a580SMerav Sicron msix_vec, msix_vec, i);
1700adfc5217SJeff Kirsher msix_vec++;
1701adfc5217SJeff Kirsher }
1702adfc5217SJeff Kirsher
17031ab4434cSAriel Elior DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
17041ab4434cSAriel Elior msix_vec);
1705adfc5217SJeff Kirsher
1706a5444b17SAlexander Gordeev rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1707a5444b17SAlexander Gordeev BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1708adfc5217SJeff Kirsher /*
1709adfc5217SJeff Kirsher * reconfigure number of tx/rx queues according to available
1710adfc5217SJeff Kirsher * MSI-X vectors
1711adfc5217SJeff Kirsher */
1712a5444b17SAlexander Gordeev if (rc == -ENOSPC) {
171330a5de77SDmitry Kravkov /* Get by with single vector */
1714a5444b17SAlexander Gordeev rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1715a5444b17SAlexander Gordeev if (rc < 0) {
171630a5de77SDmitry Kravkov BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
171730a5de77SDmitry Kravkov rc);
171830a5de77SDmitry Kravkov goto no_msix;
171930a5de77SDmitry Kravkov }
172030a5de77SDmitry Kravkov
172130a5de77SDmitry Kravkov BNX2X_DEV_INFO("Using single MSI-X vector\n");
172230a5de77SDmitry Kravkov bp->flags |= USING_SINGLE_MSIX_FLAG;
172330a5de77SDmitry Kravkov
172455c11941SMerav Sicron BNX2X_DEV_INFO("set number of queues to 1\n");
172555c11941SMerav Sicron bp->num_ethernet_queues = 1;
172655c11941SMerav Sicron bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
172730a5de77SDmitry Kravkov } else if (rc < 0) {
172851c1a580SMerav Sicron BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
172930a5de77SDmitry Kravkov goto no_msix;
1730a5444b17SAlexander Gordeev } else if (rc < msix_vec) {
1731a5444b17SAlexander Gordeev /* how less vectors we will have? */
1732a5444b17SAlexander Gordeev int diff = msix_vec - rc;
1733a5444b17SAlexander Gordeev
1734a5444b17SAlexander Gordeev BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1735a5444b17SAlexander Gordeev
1736a5444b17SAlexander Gordeev /*
1737a5444b17SAlexander Gordeev * decrease number of queues by number of unallocated entries
1738a5444b17SAlexander Gordeev */
1739a5444b17SAlexander Gordeev bp->num_ethernet_queues -= diff;
1740a5444b17SAlexander Gordeev bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1741a5444b17SAlexander Gordeev
1742a5444b17SAlexander Gordeev BNX2X_DEV_INFO("New queue configuration set: %d\n",
1743a5444b17SAlexander Gordeev bp->num_queues);
1744adfc5217SJeff Kirsher }
1745adfc5217SJeff Kirsher
1746adfc5217SJeff Kirsher bp->flags |= USING_MSIX_FLAG;
1747adfc5217SJeff Kirsher
1748adfc5217SJeff Kirsher return 0;
174930a5de77SDmitry Kravkov
175030a5de77SDmitry Kravkov no_msix:
175130a5de77SDmitry Kravkov /* fall to INTx if not enough memory */
175230a5de77SDmitry Kravkov if (rc == -ENOMEM)
175330a5de77SDmitry Kravkov bp->flags |= DISABLE_MSI_FLAG;
175430a5de77SDmitry Kravkov
175530a5de77SDmitry Kravkov return rc;
1756adfc5217SJeff Kirsher }
1757adfc5217SJeff Kirsher
bnx2x_req_msix_irqs(struct bnx2x * bp)1758adfc5217SJeff Kirsher static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1759adfc5217SJeff Kirsher {
1760adfc5217SJeff Kirsher int i, rc, offset = 0;
1761adfc5217SJeff Kirsher
1762ad5afc89SAriel Elior /* no default status block for vf */
1763ad5afc89SAriel Elior if (IS_PF(bp)) {
1764adfc5217SJeff Kirsher rc = request_irq(bp->msix_table[offset++].vector,
1765adfc5217SJeff Kirsher bnx2x_msix_sp_int, 0,
1766adfc5217SJeff Kirsher bp->dev->name, bp->dev);
1767adfc5217SJeff Kirsher if (rc) {
1768adfc5217SJeff Kirsher BNX2X_ERR("request sp irq failed\n");
1769adfc5217SJeff Kirsher return -EBUSY;
1770adfc5217SJeff Kirsher }
1771ad5afc89SAriel Elior }
1772adfc5217SJeff Kirsher
177355c11941SMerav Sicron if (CNIC_SUPPORT(bp))
1774adfc5217SJeff Kirsher offset++;
177555c11941SMerav Sicron
1776adfc5217SJeff Kirsher for_each_eth_queue(bp, i) {
1777adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[i];
1778adfc5217SJeff Kirsher snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1779adfc5217SJeff Kirsher bp->dev->name, i);
1780adfc5217SJeff Kirsher
1781adfc5217SJeff Kirsher rc = request_irq(bp->msix_table[offset].vector,
1782adfc5217SJeff Kirsher bnx2x_msix_fp_int, 0, fp->name, fp);
1783adfc5217SJeff Kirsher if (rc) {
1784adfc5217SJeff Kirsher BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1785adfc5217SJeff Kirsher bp->msix_table[offset].vector, rc);
1786adfc5217SJeff Kirsher bnx2x_free_msix_irqs(bp, offset);
1787adfc5217SJeff Kirsher return -EBUSY;
1788adfc5217SJeff Kirsher }
1789adfc5217SJeff Kirsher
1790adfc5217SJeff Kirsher offset++;
1791adfc5217SJeff Kirsher }
1792adfc5217SJeff Kirsher
1793adfc5217SJeff Kirsher i = BNX2X_NUM_ETH_QUEUES(bp);
1794ad5afc89SAriel Elior if (IS_PF(bp)) {
179555c11941SMerav Sicron offset = 1 + CNIC_SUPPORT(bp);
1796ad5afc89SAriel Elior netdev_info(bp->dev,
1797ad5afc89SAriel Elior "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1798adfc5217SJeff Kirsher bp->msix_table[0].vector,
1799adfc5217SJeff Kirsher 0, bp->msix_table[offset].vector,
1800adfc5217SJeff Kirsher i - 1, bp->msix_table[offset + i - 1].vector);
1801ad5afc89SAriel Elior } else {
1802ad5afc89SAriel Elior offset = CNIC_SUPPORT(bp);
1803ad5afc89SAriel Elior netdev_info(bp->dev,
1804ad5afc89SAriel Elior "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1805ad5afc89SAriel Elior 0, bp->msix_table[offset].vector,
1806ad5afc89SAriel Elior i - 1, bp->msix_table[offset + i - 1].vector);
1807ad5afc89SAriel Elior }
1808adfc5217SJeff Kirsher return 0;
1809adfc5217SJeff Kirsher }
1810adfc5217SJeff Kirsher
bnx2x_enable_msi(struct bnx2x * bp)1811adfc5217SJeff Kirsher int bnx2x_enable_msi(struct bnx2x *bp)
1812adfc5217SJeff Kirsher {
1813adfc5217SJeff Kirsher int rc;
1814adfc5217SJeff Kirsher
1815adfc5217SJeff Kirsher rc = pci_enable_msi(bp->pdev);
1816adfc5217SJeff Kirsher if (rc) {
181751c1a580SMerav Sicron BNX2X_DEV_INFO("MSI is not attainable\n");
1818adfc5217SJeff Kirsher return -1;
1819adfc5217SJeff Kirsher }
1820adfc5217SJeff Kirsher bp->flags |= USING_MSI_FLAG;
1821adfc5217SJeff Kirsher
1822adfc5217SJeff Kirsher return 0;
1823adfc5217SJeff Kirsher }
1824adfc5217SJeff Kirsher
bnx2x_req_irq(struct bnx2x * bp)1825adfc5217SJeff Kirsher static int bnx2x_req_irq(struct bnx2x *bp)
1826adfc5217SJeff Kirsher {
1827adfc5217SJeff Kirsher unsigned long flags;
182830a5de77SDmitry Kravkov unsigned int irq;
1829adfc5217SJeff Kirsher
183030a5de77SDmitry Kravkov if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1831adfc5217SJeff Kirsher flags = 0;
1832adfc5217SJeff Kirsher else
1833adfc5217SJeff Kirsher flags = IRQF_SHARED;
1834adfc5217SJeff Kirsher
183530a5de77SDmitry Kravkov if (bp->flags & USING_MSIX_FLAG)
183630a5de77SDmitry Kravkov irq = bp->msix_table[0].vector;
183730a5de77SDmitry Kravkov else
183830a5de77SDmitry Kravkov irq = bp->pdev->irq;
183930a5de77SDmitry Kravkov
184030a5de77SDmitry Kravkov return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1841adfc5217SJeff Kirsher }
1842adfc5217SJeff Kirsher
bnx2x_setup_irqs(struct bnx2x * bp)1843c957d09fSYuval Mintz static int bnx2x_setup_irqs(struct bnx2x *bp)
1844adfc5217SJeff Kirsher {
1845adfc5217SJeff Kirsher int rc = 0;
184630a5de77SDmitry Kravkov if (bp->flags & USING_MSIX_FLAG &&
184730a5de77SDmitry Kravkov !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1848adfc5217SJeff Kirsher rc = bnx2x_req_msix_irqs(bp);
1849adfc5217SJeff Kirsher if (rc)
1850adfc5217SJeff Kirsher return rc;
1851adfc5217SJeff Kirsher } else {
1852adfc5217SJeff Kirsher rc = bnx2x_req_irq(bp);
1853adfc5217SJeff Kirsher if (rc) {
1854adfc5217SJeff Kirsher BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1855adfc5217SJeff Kirsher return rc;
1856adfc5217SJeff Kirsher }
1857adfc5217SJeff Kirsher if (bp->flags & USING_MSI_FLAG) {
1858adfc5217SJeff Kirsher bp->dev->irq = bp->pdev->irq;
1859adfc5217SJeff Kirsher netdev_info(bp->dev, "using MSI IRQ %d\n",
186030a5de77SDmitry Kravkov bp->dev->irq);
186130a5de77SDmitry Kravkov }
186230a5de77SDmitry Kravkov if (bp->flags & USING_MSIX_FLAG) {
186330a5de77SDmitry Kravkov bp->dev->irq = bp->msix_table[0].vector;
186430a5de77SDmitry Kravkov netdev_info(bp->dev, "using MSIX IRQ %d\n",
186530a5de77SDmitry Kravkov bp->dev->irq);
1866adfc5217SJeff Kirsher }
1867adfc5217SJeff Kirsher }
1868adfc5217SJeff Kirsher
1869adfc5217SJeff Kirsher return 0;
1870adfc5217SJeff Kirsher }
1871adfc5217SJeff Kirsher
bnx2x_napi_enable_cnic(struct bnx2x * bp)187255c11941SMerav Sicron static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
187355c11941SMerav Sicron {
187455c11941SMerav Sicron int i;
187555c11941SMerav Sicron
18768f20aa57SDmitry Kravkov for_each_rx_queue_cnic(bp, i) {
187755c11941SMerav Sicron napi_enable(&bnx2x_fp(bp, i, napi));
187855c11941SMerav Sicron }
18798f20aa57SDmitry Kravkov }
188055c11941SMerav Sicron
bnx2x_napi_enable(struct bnx2x * bp)18811191cb83SEric Dumazet static void bnx2x_napi_enable(struct bnx2x *bp)
1882adfc5217SJeff Kirsher {
1883adfc5217SJeff Kirsher int i;
1884adfc5217SJeff Kirsher
18858f20aa57SDmitry Kravkov for_each_eth_queue(bp, i) {
1886adfc5217SJeff Kirsher napi_enable(&bnx2x_fp(bp, i, napi));
1887adfc5217SJeff Kirsher }
18888f20aa57SDmitry Kravkov }
1889adfc5217SJeff Kirsher
bnx2x_napi_disable_cnic(struct bnx2x * bp)189055c11941SMerav Sicron static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
189155c11941SMerav Sicron {
189255c11941SMerav Sicron int i;
189355c11941SMerav Sicron
18948f20aa57SDmitry Kravkov for_each_rx_queue_cnic(bp, i) {
189555c11941SMerav Sicron napi_disable(&bnx2x_fp(bp, i, napi));
18968f20aa57SDmitry Kravkov }
189755c11941SMerav Sicron }
189855c11941SMerav Sicron
bnx2x_napi_disable(struct bnx2x * bp)18991191cb83SEric Dumazet static void bnx2x_napi_disable(struct bnx2x *bp)
1900adfc5217SJeff Kirsher {
1901adfc5217SJeff Kirsher int i;
1902adfc5217SJeff Kirsher
19038f20aa57SDmitry Kravkov for_each_eth_queue(bp, i) {
1904adfc5217SJeff Kirsher napi_disable(&bnx2x_fp(bp, i, napi));
19058f20aa57SDmitry Kravkov }
1906adfc5217SJeff Kirsher }
1907adfc5217SJeff Kirsher
bnx2x_netif_start(struct bnx2x * bp)1908adfc5217SJeff Kirsher void bnx2x_netif_start(struct bnx2x *bp)
1909adfc5217SJeff Kirsher {
1910adfc5217SJeff Kirsher if (netif_running(bp->dev)) {
1911adfc5217SJeff Kirsher bnx2x_napi_enable(bp);
191255c11941SMerav Sicron if (CNIC_LOADED(bp))
191355c11941SMerav Sicron bnx2x_napi_enable_cnic(bp);
1914adfc5217SJeff Kirsher bnx2x_int_enable(bp);
1915adfc5217SJeff Kirsher if (bp->state == BNX2X_STATE_OPEN)
1916adfc5217SJeff Kirsher netif_tx_wake_all_queues(bp->dev);
1917adfc5217SJeff Kirsher }
1918adfc5217SJeff Kirsher }
1919adfc5217SJeff Kirsher
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)1920adfc5217SJeff Kirsher void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1921adfc5217SJeff Kirsher {
1922adfc5217SJeff Kirsher bnx2x_int_disable_sync(bp, disable_hw);
1923adfc5217SJeff Kirsher bnx2x_napi_disable(bp);
192455c11941SMerav Sicron if (CNIC_LOADED(bp))
192555c11941SMerav Sicron bnx2x_napi_disable_cnic(bp);
1926adfc5217SJeff Kirsher }
1927adfc5217SJeff Kirsher
bnx2x_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1928f663dd9aSJason Wang u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1929a350ecceSPaolo Abeni struct net_device *sb_dev)
1930adfc5217SJeff Kirsher {
1931adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
1932823dcd25SDavid S. Miller
193355c11941SMerav Sicron if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1934adfc5217SJeff Kirsher struct ethhdr *hdr = (struct ethhdr *)skb->data;
1935adfc5217SJeff Kirsher u16 ether_type = ntohs(hdr->h_proto);
1936adfc5217SJeff Kirsher
1937adfc5217SJeff Kirsher /* Skip VLAN tag if present */
1938adfc5217SJeff Kirsher if (ether_type == ETH_P_8021Q) {
19391f5020acSVladimir Oltean struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
1940adfc5217SJeff Kirsher
1941adfc5217SJeff Kirsher ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1942adfc5217SJeff Kirsher }
1943adfc5217SJeff Kirsher
1944adfc5217SJeff Kirsher /* If ethertype is FCoE or FIP - use FCoE ring */
1945adfc5217SJeff Kirsher if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1946adfc5217SJeff Kirsher return bnx2x_fcoe_tx(bp, txq_index);
1947adfc5217SJeff Kirsher }
194855c11941SMerav Sicron
1949823dcd25SDavid S. Miller /* select a non-FCoE queue */
1950069e4782SSudarsana Reddy Kalluru return netdev_pick_tx(dev, skb, NULL) %
1951069e4782SSudarsana Reddy Kalluru (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1952adfc5217SJeff Kirsher }
1953adfc5217SJeff Kirsher
bnx2x_set_num_queues(struct bnx2x * bp)1954adfc5217SJeff Kirsher void bnx2x_set_num_queues(struct bnx2x *bp)
1955adfc5217SJeff Kirsher {
195696305234SDmitry Kravkov /* RSS queues */
195755c11941SMerav Sicron bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1958adfc5217SJeff Kirsher
1959a3348722SBarak Witkowski /* override in STORAGE SD modes */
19602e98ffc2SDmitry Kravkov if (IS_MF_STORAGE_ONLY(bp))
196155c11941SMerav Sicron bp->num_ethernet_queues = 1;
196255c11941SMerav Sicron
1963adfc5217SJeff Kirsher /* Add special queues */
196455c11941SMerav Sicron bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
196555c11941SMerav Sicron bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
196665565884SMerav Sicron
196765565884SMerav Sicron BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1968adfc5217SJeff Kirsher }
1969adfc5217SJeff Kirsher
1970823dcd25SDavid S. Miller /**
1971823dcd25SDavid S. Miller * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1972823dcd25SDavid S. Miller *
1973823dcd25SDavid S. Miller * @bp: Driver handle
1974d0ea5cbdSJesse Brandeburg * @include_cnic: handle cnic case
1975823dcd25SDavid S. Miller *
1976823dcd25SDavid S. Miller * We currently support for at most 16 Tx queues for each CoS thus we will
1977823dcd25SDavid S. Miller * allocate a multiple of 16 for ETH L2 rings according to the value of the
1978823dcd25SDavid S. Miller * bp->max_cos.
1979823dcd25SDavid S. Miller *
1980823dcd25SDavid S. Miller * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1981823dcd25SDavid S. Miller * index after all ETH L2 indices.
1982823dcd25SDavid S. Miller *
1983823dcd25SDavid S. Miller * If the actual number of Tx queues (for each CoS) is less than 16 then there
1984823dcd25SDavid S. Miller * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
198516a5fd92SYuval Mintz * 16..31,...) with indices that are not coupled with any real Tx queue.
1986823dcd25SDavid S. Miller *
1987823dcd25SDavid S. Miller * The proper configuration of skb->queue_mapping is handled by
1988823dcd25SDavid S. Miller * bnx2x_select_queue() and __skb_tx_hash().
1989823dcd25SDavid S. Miller *
1990823dcd25SDavid S. Miller * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1991823dcd25SDavid S. Miller * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1992823dcd25SDavid S. Miller */
bnx2x_set_real_num_queues(struct bnx2x * bp,int include_cnic)199355c11941SMerav Sicron static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1994adfc5217SJeff Kirsher {
1995adfc5217SJeff Kirsher int rc, tx, rx;
1996adfc5217SJeff Kirsher
199765565884SMerav Sicron tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
199855c11941SMerav Sicron rx = BNX2X_NUM_ETH_QUEUES(bp);
1999adfc5217SJeff Kirsher
2000adfc5217SJeff Kirsher /* account for fcoe queue */
200155c11941SMerav Sicron if (include_cnic && !NO_FCOE(bp)) {
200255c11941SMerav Sicron rx++;
200355c11941SMerav Sicron tx++;
2004adfc5217SJeff Kirsher }
2005adfc5217SJeff Kirsher
2006adfc5217SJeff Kirsher rc = netif_set_real_num_tx_queues(bp->dev, tx);
2007adfc5217SJeff Kirsher if (rc) {
2008adfc5217SJeff Kirsher BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2009adfc5217SJeff Kirsher return rc;
2010adfc5217SJeff Kirsher }
2011adfc5217SJeff Kirsher rc = netif_set_real_num_rx_queues(bp->dev, rx);
2012adfc5217SJeff Kirsher if (rc) {
2013adfc5217SJeff Kirsher BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2014adfc5217SJeff Kirsher return rc;
2015adfc5217SJeff Kirsher }
2016adfc5217SJeff Kirsher
201751c1a580SMerav Sicron DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2018adfc5217SJeff Kirsher tx, rx);
2019adfc5217SJeff Kirsher
2020adfc5217SJeff Kirsher return rc;
2021adfc5217SJeff Kirsher }
2022adfc5217SJeff Kirsher
bnx2x_set_rx_buf_size(struct bnx2x * bp)20231191cb83SEric Dumazet static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2024adfc5217SJeff Kirsher {
2025adfc5217SJeff Kirsher int i;
2026adfc5217SJeff Kirsher
2027adfc5217SJeff Kirsher for_each_queue(bp, i) {
2028adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[i];
2029e52fcb24SEric Dumazet u32 mtu;
2030adfc5217SJeff Kirsher
2031adfc5217SJeff Kirsher /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2032adfc5217SJeff Kirsher if (IS_FCOE_IDX(i))
2033adfc5217SJeff Kirsher /*
2034adfc5217SJeff Kirsher * Although there are no IP frames expected to arrive to
2035adfc5217SJeff Kirsher * this ring we still want to add an
2036adfc5217SJeff Kirsher * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2037adfc5217SJeff Kirsher * overrun attack.
2038adfc5217SJeff Kirsher */
2039e52fcb24SEric Dumazet mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2040adfc5217SJeff Kirsher else
2041e52fcb24SEric Dumazet mtu = bp->dev->mtu;
2042e52fcb24SEric Dumazet fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2043e52fcb24SEric Dumazet IP_HEADER_ALIGNMENT_PADDING +
2044e1c6dccaSJarod Wilson ETH_OVERHEAD +
2045e52fcb24SEric Dumazet mtu +
2046e52fcb24SEric Dumazet BNX2X_FW_RX_ALIGN_END;
20479b70de6dSScott Wood fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
204816a5fd92SYuval Mintz /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2049d46d132cSEric Dumazet if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2050d46d132cSEric Dumazet fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2051d46d132cSEric Dumazet else
2052d46d132cSEric Dumazet fp->rx_frag_size = 0;
2053adfc5217SJeff Kirsher }
2054adfc5217SJeff Kirsher }
2055adfc5217SJeff Kirsher
bnx2x_init_rss(struct bnx2x * bp)205660cad4e6SAriel Elior static int bnx2x_init_rss(struct bnx2x *bp)
2057adfc5217SJeff Kirsher {
2058adfc5217SJeff Kirsher int i;
2059adfc5217SJeff Kirsher u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2060adfc5217SJeff Kirsher
206116a5fd92SYuval Mintz /* Prepare the initial contents for the indirection table if RSS is
2062adfc5217SJeff Kirsher * enabled
2063adfc5217SJeff Kirsher */
20645d317c6aSMerav Sicron for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
20655d317c6aSMerav Sicron bp->rss_conf_obj.ind_table[i] =
2066278bc429SBen Hutchings bp->fp->cl_id +
2067278bc429SBen Hutchings ethtool_rxfh_indir_default(i, num_eth_queues);
2068adfc5217SJeff Kirsher
2069adfc5217SJeff Kirsher /*
2070adfc5217SJeff Kirsher * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2071adfc5217SJeff Kirsher * per-port, so if explicit configuration is needed , do it only
2072adfc5217SJeff Kirsher * for a PMF.
2073adfc5217SJeff Kirsher *
2074adfc5217SJeff Kirsher * For 57712 and newer on the other hand it's a per-function
2075adfc5217SJeff Kirsher * configuration.
2076adfc5217SJeff Kirsher */
20775d317c6aSMerav Sicron return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2078adfc5217SJeff Kirsher }
2079adfc5217SJeff Kirsher
bnx2x_rss(struct bnx2x * bp,struct bnx2x_rss_config_obj * rss_obj,bool config_hash,bool enable)208060cad4e6SAriel Elior int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
208160cad4e6SAriel Elior bool config_hash, bool enable)
2082adfc5217SJeff Kirsher {
20833b603066SYuval Mintz struct bnx2x_config_rss_params params = {NULL};
2084adfc5217SJeff Kirsher
2085adfc5217SJeff Kirsher /* Although RSS is meaningless when there is a single HW queue we
2086adfc5217SJeff Kirsher * still need it enabled in order to have HW Rx hash generated.
2087adfc5217SJeff Kirsher *
2088adfc5217SJeff Kirsher * if (!is_eth_multi(bp))
2089adfc5217SJeff Kirsher * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2090adfc5217SJeff Kirsher */
2091adfc5217SJeff Kirsher
209296305234SDmitry Kravkov params.rss_obj = rss_obj;
2093adfc5217SJeff Kirsher
2094adfc5217SJeff Kirsher __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2095adfc5217SJeff Kirsher
209660cad4e6SAriel Elior if (enable) {
2097adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2098adfc5217SJeff Kirsher
2099adfc5217SJeff Kirsher /* RSS configuration */
2100adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2101adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2102adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2103adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
21045d317c6aSMerav Sicron if (rss_obj->udp_rss_v4)
21055d317c6aSMerav Sicron __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
21065d317c6aSMerav Sicron if (rss_obj->udp_rss_v6)
21075d317c6aSMerav Sicron __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2108e42780b6SDmitry Kravkov
210928311f8eSYuval Mintz if (!CHIP_IS_E1x(bp)) {
211028311f8eSYuval Mintz /* valid only for TUNN_MODE_VXLAN tunnel mode */
211128311f8eSYuval Mintz __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
211228311f8eSYuval Mintz __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
211328311f8eSYuval Mintz
2114e42780b6SDmitry Kravkov /* valid only for TUNN_MODE_GRE tunnel mode */
211528311f8eSYuval Mintz __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
211628311f8eSYuval Mintz }
211760cad4e6SAriel Elior } else {
211860cad4e6SAriel Elior __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
211960cad4e6SAriel Elior }
2120adfc5217SJeff Kirsher
2121adfc5217SJeff Kirsher /* Hash bits */
2122adfc5217SJeff Kirsher params.rss_result_mask = MULTI_MASK;
2123adfc5217SJeff Kirsher
21245d317c6aSMerav Sicron memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2125adfc5217SJeff Kirsher
2126adfc5217SJeff Kirsher if (config_hash) {
2127adfc5217SJeff Kirsher /* RSS keys */
2128e3ec69caSEric Dumazet netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2129adfc5217SJeff Kirsher __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2130adfc5217SJeff Kirsher }
2131adfc5217SJeff Kirsher
213260cad4e6SAriel Elior if (IS_PF(bp))
2133adfc5217SJeff Kirsher return bnx2x_config_rss(bp, ¶ms);
213460cad4e6SAriel Elior else
213560cad4e6SAriel Elior return bnx2x_vfpf_config_rss(bp, ¶ms);
2136adfc5217SJeff Kirsher }
2137adfc5217SJeff Kirsher
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)21381191cb83SEric Dumazet static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2139adfc5217SJeff Kirsher {
21403b603066SYuval Mintz struct bnx2x_func_state_params func_params = {NULL};
2141adfc5217SJeff Kirsher
2142adfc5217SJeff Kirsher /* Prepare parameters for function state transitions */
2143adfc5217SJeff Kirsher __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2144adfc5217SJeff Kirsher
2145adfc5217SJeff Kirsher func_params.f_obj = &bp->func_obj;
2146adfc5217SJeff Kirsher func_params.cmd = BNX2X_F_CMD_HW_INIT;
2147adfc5217SJeff Kirsher
2148adfc5217SJeff Kirsher func_params.params.hw_init.load_phase = load_code;
2149adfc5217SJeff Kirsher
2150adfc5217SJeff Kirsher return bnx2x_func_state_change(bp, &func_params);
2151adfc5217SJeff Kirsher }
2152adfc5217SJeff Kirsher
2153adfc5217SJeff Kirsher /*
2154adfc5217SJeff Kirsher * Cleans the object that have internal lists without sending
215516a5fd92SYuval Mintz * ramrods. Should be run when interrupts are disabled.
2156adfc5217SJeff Kirsher */
bnx2x_squeeze_objects(struct bnx2x * bp)21577fa6f340SYuval Mintz void bnx2x_squeeze_objects(struct bnx2x *bp)
2158adfc5217SJeff Kirsher {
2159adfc5217SJeff Kirsher int rc;
2160adfc5217SJeff Kirsher unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
21613b603066SYuval Mintz struct bnx2x_mcast_ramrod_params rparam = {NULL};
216215192a8cSBarak Witkowski struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2163adfc5217SJeff Kirsher
2164adfc5217SJeff Kirsher /***************** Cleanup MACs' object first *************************/
2165adfc5217SJeff Kirsher
2166adfc5217SJeff Kirsher /* Wait for completion of requested */
2167adfc5217SJeff Kirsher __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2168adfc5217SJeff Kirsher /* Perform a dry cleanup */
2169adfc5217SJeff Kirsher __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2170adfc5217SJeff Kirsher
2171adfc5217SJeff Kirsher /* Clean ETH primary MAC */
2172adfc5217SJeff Kirsher __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
217315192a8cSBarak Witkowski rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2174adfc5217SJeff Kirsher &ramrod_flags);
2175adfc5217SJeff Kirsher if (rc != 0)
2176adfc5217SJeff Kirsher BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2177adfc5217SJeff Kirsher
2178adfc5217SJeff Kirsher /* Cleanup UC list */
2179adfc5217SJeff Kirsher vlan_mac_flags = 0;
2180adfc5217SJeff Kirsher __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2181adfc5217SJeff Kirsher rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2182adfc5217SJeff Kirsher &ramrod_flags);
2183adfc5217SJeff Kirsher if (rc != 0)
2184adfc5217SJeff Kirsher BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2185adfc5217SJeff Kirsher
2186adfc5217SJeff Kirsher /***************** Now clean mcast object *****************************/
2187adfc5217SJeff Kirsher rparam.mcast_obj = &bp->mcast_obj;
2188adfc5217SJeff Kirsher __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2189adfc5217SJeff Kirsher
21908b09be5fSYuval Mintz /* Add a DEL command... - Since we're doing a driver cleanup only,
21918b09be5fSYuval Mintz * we take a lock surrounding both the initial send and the CONTs,
21928b09be5fSYuval Mintz * as we don't want a true completion to disrupt us in the middle.
21938b09be5fSYuval Mintz */
21948b09be5fSYuval Mintz netif_addr_lock_bh(bp->dev);
2195adfc5217SJeff Kirsher rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2196adfc5217SJeff Kirsher if (rc < 0)
219751c1a580SMerav Sicron BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
219851c1a580SMerav Sicron rc);
2199adfc5217SJeff Kirsher
2200adfc5217SJeff Kirsher /* ...and wait until all pending commands are cleared */
2201adfc5217SJeff Kirsher rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2202adfc5217SJeff Kirsher while (rc != 0) {
2203adfc5217SJeff Kirsher if (rc < 0) {
2204adfc5217SJeff Kirsher BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2205adfc5217SJeff Kirsher rc);
22068b09be5fSYuval Mintz netif_addr_unlock_bh(bp->dev);
2207adfc5217SJeff Kirsher return;
2208adfc5217SJeff Kirsher }
2209adfc5217SJeff Kirsher
2210adfc5217SJeff Kirsher rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2211adfc5217SJeff Kirsher }
22128b09be5fSYuval Mintz netif_addr_unlock_bh(bp->dev);
2213adfc5217SJeff Kirsher }
2214adfc5217SJeff Kirsher
2215adfc5217SJeff Kirsher #ifndef BNX2X_STOP_ON_ERROR
2216adfc5217SJeff Kirsher #define LOAD_ERROR_EXIT(bp, label) \
2217adfc5217SJeff Kirsher do { \
2218adfc5217SJeff Kirsher (bp)->state = BNX2X_STATE_ERROR; \
2219adfc5217SJeff Kirsher goto label; \
2220adfc5217SJeff Kirsher } while (0)
222155c11941SMerav Sicron
222255c11941SMerav Sicron #define LOAD_ERROR_EXIT_CNIC(bp, label) \
222355c11941SMerav Sicron do { \
222455c11941SMerav Sicron bp->cnic_loaded = false; \
222555c11941SMerav Sicron goto label; \
222655c11941SMerav Sicron } while (0)
222755c11941SMerav Sicron #else /*BNX2X_STOP_ON_ERROR*/
2228adfc5217SJeff Kirsher #define LOAD_ERROR_EXIT(bp, label) \
2229adfc5217SJeff Kirsher do { \
2230adfc5217SJeff Kirsher (bp)->state = BNX2X_STATE_ERROR; \
2231adfc5217SJeff Kirsher (bp)->panic = 1; \
2232adfc5217SJeff Kirsher return -EBUSY; \
2233adfc5217SJeff Kirsher } while (0)
223455c11941SMerav Sicron #define LOAD_ERROR_EXIT_CNIC(bp, label) \
223555c11941SMerav Sicron do { \
223655c11941SMerav Sicron bp->cnic_loaded = false; \
223755c11941SMerav Sicron (bp)->panic = 1; \
223855c11941SMerav Sicron return -EBUSY; \
223955c11941SMerav Sicron } while (0)
224055c11941SMerav Sicron #endif /*BNX2X_STOP_ON_ERROR*/
2241adfc5217SJeff Kirsher
bnx2x_free_fw_stats_mem(struct bnx2x * bp)2242ad5afc89SAriel Elior static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2243452427b0SYuval Mintz {
2244ad5afc89SAriel Elior BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2245ad5afc89SAriel Elior bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2246ad5afc89SAriel Elior return;
2247ad5afc89SAriel Elior }
2248ad5afc89SAriel Elior
bnx2x_alloc_fw_stats_mem(struct bnx2x * bp)2249ad5afc89SAriel Elior static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2250ad5afc89SAriel Elior {
22518db573baSAriel Elior int num_groups, vf_headroom = 0;
2252ad5afc89SAriel Elior int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2253ad5afc89SAriel Elior
2254ad5afc89SAriel Elior /* number of queues for statistics is number of eth queues + FCoE */
2255ad5afc89SAriel Elior u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2256ad5afc89SAriel Elior
2257ad5afc89SAriel Elior /* Total number of FW statistics requests =
2258ad5afc89SAriel Elior * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2259ad5afc89SAriel Elior * and fcoe l2 queue) stats + num of queues (which includes another 1
2260ad5afc89SAriel Elior * for fcoe l2 queue if applicable)
2261ad5afc89SAriel Elior */
2262ad5afc89SAriel Elior bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2263ad5afc89SAriel Elior
22648db573baSAriel Elior /* vf stats appear in the request list, but their data is allocated by
22658db573baSAriel Elior * the VFs themselves. We don't include them in the bp->fw_stats_num as
22668db573baSAriel Elior * it is used to determine where to place the vf stats queries in the
22678db573baSAriel Elior * request struct
22688db573baSAriel Elior */
22698db573baSAriel Elior if (IS_SRIOV(bp))
22706411280aSAriel Elior vf_headroom = bnx2x_vf_headroom(bp);
22718db573baSAriel Elior
2272ad5afc89SAriel Elior /* Request is built from stats_query_header and an array of
2273ad5afc89SAriel Elior * stats_query_cmd_group each of which contains
2274ad5afc89SAriel Elior * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2275ad5afc89SAriel Elior * configured in the stats_query_header.
2276ad5afc89SAriel Elior */
2277ad5afc89SAriel Elior num_groups =
22788db573baSAriel Elior (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
22798db573baSAriel Elior (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2280ad5afc89SAriel Elior 1 : 0));
2281ad5afc89SAriel Elior
22828db573baSAriel Elior DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
22838db573baSAriel Elior bp->fw_stats_num, vf_headroom, num_groups);
2284ad5afc89SAriel Elior bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2285ad5afc89SAriel Elior num_groups * sizeof(struct stats_query_cmd_group);
2286ad5afc89SAriel Elior
2287ad5afc89SAriel Elior /* Data for statistics requests + stats_counter
2288ad5afc89SAriel Elior * stats_counter holds per-STORM counters that are incremented
2289ad5afc89SAriel Elior * when STORM has finished with the current request.
2290ad5afc89SAriel Elior * memory for FCoE offloaded statistics are counted anyway,
2291ad5afc89SAriel Elior * even if they will not be sent.
2292ad5afc89SAriel Elior * VF stats are not accounted for here as the data of VF stats is stored
2293ad5afc89SAriel Elior * in memory allocated by the VF, not here.
2294ad5afc89SAriel Elior */
2295ad5afc89SAriel Elior bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2296ad5afc89SAriel Elior sizeof(struct per_pf_stats) +
2297ad5afc89SAriel Elior sizeof(struct fcoe_statistics_params) +
2298ad5afc89SAriel Elior sizeof(struct per_queue_stats) * num_queue_stats +
2299ad5afc89SAriel Elior sizeof(struct stats_counter);
2300ad5afc89SAriel Elior
2301cd2b0389SJoe Perches bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2302ad5afc89SAriel Elior bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2303cd2b0389SJoe Perches if (!bp->fw_stats)
2304cd2b0389SJoe Perches goto alloc_mem_err;
2305ad5afc89SAriel Elior
2306ad5afc89SAriel Elior /* Set shortcuts */
2307ad5afc89SAriel Elior bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2308ad5afc89SAriel Elior bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2309ad5afc89SAriel Elior bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2310ad5afc89SAriel Elior ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2311ad5afc89SAriel Elior bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2312ad5afc89SAriel Elior bp->fw_stats_req_sz;
2313ad5afc89SAriel Elior
23146bf07b8eSYuval Mintz DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2315ad5afc89SAriel Elior U64_HI(bp->fw_stats_req_mapping),
2316ad5afc89SAriel Elior U64_LO(bp->fw_stats_req_mapping));
23176bf07b8eSYuval Mintz DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2318ad5afc89SAriel Elior U64_HI(bp->fw_stats_data_mapping),
2319ad5afc89SAriel Elior U64_LO(bp->fw_stats_data_mapping));
2320ad5afc89SAriel Elior return 0;
2321ad5afc89SAriel Elior
2322ad5afc89SAriel Elior alloc_mem_err:
2323ad5afc89SAriel Elior bnx2x_free_fw_stats_mem(bp);
2324ad5afc89SAriel Elior BNX2X_ERR("Can't allocate FW stats memory\n");
2325ad5afc89SAriel Elior return -ENOMEM;
2326ad5afc89SAriel Elior }
2327ad5afc89SAriel Elior
2328ad5afc89SAriel Elior /* send load request to mcp and analyze response */
bnx2x_nic_load_request(struct bnx2x * bp,u32 * load_code)2329ad5afc89SAriel Elior static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2330ad5afc89SAriel Elior {
2331178135c1SDmitry Kravkov u32 param;
2332178135c1SDmitry Kravkov
2333ad5afc89SAriel Elior /* init fw_seq */
2334ad5afc89SAriel Elior bp->fw_seq =
2335ad5afc89SAriel Elior (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2336ad5afc89SAriel Elior DRV_MSG_SEQ_NUMBER_MASK);
2337ad5afc89SAriel Elior BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2338ad5afc89SAriel Elior
2339ad5afc89SAriel Elior /* Get current FW pulse sequence */
2340ad5afc89SAriel Elior bp->fw_drv_pulse_wr_seq =
2341ad5afc89SAriel Elior (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2342ad5afc89SAriel Elior DRV_PULSE_SEQ_MASK);
2343ad5afc89SAriel Elior BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2344ad5afc89SAriel Elior
2345178135c1SDmitry Kravkov param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2346178135c1SDmitry Kravkov
2347178135c1SDmitry Kravkov if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2348178135c1SDmitry Kravkov param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2349178135c1SDmitry Kravkov
2350ad5afc89SAriel Elior /* load request */
2351178135c1SDmitry Kravkov (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2352ad5afc89SAriel Elior
2353ad5afc89SAriel Elior /* if mcp fails to respond we must abort */
2354ad5afc89SAriel Elior if (!(*load_code)) {
2355ad5afc89SAriel Elior BNX2X_ERR("MCP response failure, aborting\n");
2356ad5afc89SAriel Elior return -EBUSY;
2357ad5afc89SAriel Elior }
2358ad5afc89SAriel Elior
2359ad5afc89SAriel Elior /* If mcp refused (e.g. other port is in diagnostic mode) we
2360ad5afc89SAriel Elior * must abort
2361ad5afc89SAriel Elior */
2362ad5afc89SAriel Elior if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2363ad5afc89SAriel Elior BNX2X_ERR("MCP refused load request, aborting\n");
2364ad5afc89SAriel Elior return -EBUSY;
2365ad5afc89SAriel Elior }
2366ad5afc89SAriel Elior return 0;
2367ad5afc89SAriel Elior }
2368ad5afc89SAriel Elior
2369ad5afc89SAriel Elior /* check whether another PF has already loaded FW to chip. In
2370ad5afc89SAriel Elior * virtualized environments a pf from another VM may have already
2371ad5afc89SAriel Elior * initialized the device including loading FW
2372ad5afc89SAriel Elior */
bnx2x_compare_fw_ver(struct bnx2x * bp,u32 load_code,bool print_err)237391ebb929SYuval Mintz int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2374ad5afc89SAriel Elior {
2375ad5afc89SAriel Elior /* is another pf loaded on this engine? */
2376ad5afc89SAriel Elior if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2377ad5afc89SAriel Elior load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2378424e7834SManish Chopra u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
2379424e7834SManish Chopra u32 loaded_fw;
2380452427b0SYuval Mintz
2381452427b0SYuval Mintz /* read loaded FW from chip */
2382424e7834SManish Chopra loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2383452427b0SYuval Mintz
2384424e7834SManish Chopra loaded_fw_major = loaded_fw & 0xff;
2385424e7834SManish Chopra loaded_fw_minor = (loaded_fw >> 8) & 0xff;
2386424e7834SManish Chopra loaded_fw_rev = (loaded_fw >> 16) & 0xff;
2387424e7834SManish Chopra loaded_fw_eng = (loaded_fw >> 24) & 0xff;
2388424e7834SManish Chopra
2389424e7834SManish Chopra DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
2390424e7834SManish Chopra loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
2391ad5afc89SAriel Elior
2392ad5afc89SAriel Elior /* abort nic load if version mismatch */
2393424e7834SManish Chopra if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
2394424e7834SManish Chopra loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
2395424e7834SManish Chopra loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
2396424e7834SManish Chopra loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
239791ebb929SYuval Mintz if (print_err)
2398424e7834SManish Chopra BNX2X_ERR("loaded FW incompatible. Aborting\n");
239991ebb929SYuval Mintz else
2400424e7834SManish Chopra BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
2401424e7834SManish Chopra
2402ad5afc89SAriel Elior return -EBUSY;
2403ad5afc89SAriel Elior }
2404ad5afc89SAriel Elior }
2405ad5afc89SAriel Elior return 0;
2406452427b0SYuval Mintz }
2407452427b0SYuval Mintz
2408ad5afc89SAriel Elior /* returns the "mcp load_code" according to global load_count array */
bnx2x_nic_load_no_mcp(struct bnx2x * bp,int port)2409ad5afc89SAriel Elior static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2410ad5afc89SAriel Elior {
2411ad5afc89SAriel Elior int path = BP_PATH(bp);
2412ad5afc89SAriel Elior
2413ad5afc89SAriel Elior DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2414a8f47eb7Sstephen hemminger path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2415a8f47eb7Sstephen hemminger bnx2x_load_count[path][2]);
2416a8f47eb7Sstephen hemminger bnx2x_load_count[path][0]++;
2417a8f47eb7Sstephen hemminger bnx2x_load_count[path][1 + port]++;
2418ad5afc89SAriel Elior DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2419a8f47eb7Sstephen hemminger path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2420a8f47eb7Sstephen hemminger bnx2x_load_count[path][2]);
2421a8f47eb7Sstephen hemminger if (bnx2x_load_count[path][0] == 1)
2422ad5afc89SAriel Elior return FW_MSG_CODE_DRV_LOAD_COMMON;
2423a8f47eb7Sstephen hemminger else if (bnx2x_load_count[path][1 + port] == 1)
2424ad5afc89SAriel Elior return FW_MSG_CODE_DRV_LOAD_PORT;
2425ad5afc89SAriel Elior else
2426ad5afc89SAriel Elior return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2427ad5afc89SAriel Elior }
2428ad5afc89SAriel Elior
2429ad5afc89SAriel Elior /* mark PMF if applicable */
bnx2x_nic_load_pmf(struct bnx2x * bp,u32 load_code)2430ad5afc89SAriel Elior static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2431ad5afc89SAriel Elior {
2432ad5afc89SAriel Elior if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2433ad5afc89SAriel Elior (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2434ad5afc89SAriel Elior (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2435ad5afc89SAriel Elior bp->port.pmf = 1;
2436ad5afc89SAriel Elior /* We need the barrier to ensure the ordering between the
2437ad5afc89SAriel Elior * writing to bp->port.pmf here and reading it from the
2438ad5afc89SAriel Elior * bnx2x_periodic_task().
2439ad5afc89SAriel Elior */
2440ad5afc89SAriel Elior smp_mb();
2441ad5afc89SAriel Elior } else {
2442ad5afc89SAriel Elior bp->port.pmf = 0;
2443ad5afc89SAriel Elior }
2444ad5afc89SAriel Elior
2445ad5afc89SAriel Elior DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2446ad5afc89SAriel Elior }
2447ad5afc89SAriel Elior
bnx2x_nic_load_afex_dcc(struct bnx2x * bp,int load_code)2448ad5afc89SAriel Elior static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2449ad5afc89SAriel Elior {
2450ad5afc89SAriel Elior if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2451ad5afc89SAriel Elior (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2452ad5afc89SAriel Elior (bp->common.shmem2_base)) {
2453ad5afc89SAriel Elior if (SHMEM2_HAS(bp, dcc_support))
2454ad5afc89SAriel Elior SHMEM2_WR(bp, dcc_support,
2455ad5afc89SAriel Elior (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2456ad5afc89SAriel Elior SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2457ad5afc89SAriel Elior if (SHMEM2_HAS(bp, afex_driver_support))
2458ad5afc89SAriel Elior SHMEM2_WR(bp, afex_driver_support,
2459ad5afc89SAriel Elior SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2460ad5afc89SAriel Elior }
2461ad5afc89SAriel Elior
2462ad5afc89SAriel Elior /* Set AFEX default VLAN tag to an invalid value */
2463ad5afc89SAriel Elior bp->afex_def_vlan_tag = -1;
2464452427b0SYuval Mintz }
2465452427b0SYuval Mintz
24661191cb83SEric Dumazet /**
24671191cb83SEric Dumazet * bnx2x_bz_fp - zero content of the fastpath structure.
24681191cb83SEric Dumazet *
24691191cb83SEric Dumazet * @bp: driver handle
24701191cb83SEric Dumazet * @index: fastpath index to be zeroed
24711191cb83SEric Dumazet *
24721191cb83SEric Dumazet * Makes sure the contents of the bp->fp[index].napi is kept
24731191cb83SEric Dumazet * intact.
24741191cb83SEric Dumazet */
bnx2x_bz_fp(struct bnx2x * bp,int index)24751191cb83SEric Dumazet static void bnx2x_bz_fp(struct bnx2x *bp, int index)
24761191cb83SEric Dumazet {
24771191cb83SEric Dumazet struct bnx2x_fastpath *fp = &bp->fp[index];
247865565884SMerav Sicron int cos;
24791191cb83SEric Dumazet struct napi_struct orig_napi = fp->napi;
248015192a8cSBarak Witkowski struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2481d76a6111SYuval Mintz
24821191cb83SEric Dumazet /* bzero bnx2x_fastpath contents */
2483c3146eb6SDmitry Kravkov if (fp->tpa_info)
2484c3146eb6SDmitry Kravkov memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2485c3146eb6SDmitry Kravkov sizeof(struct bnx2x_agg_info));
24861191cb83SEric Dumazet memset(fp, 0, sizeof(*fp));
24871191cb83SEric Dumazet
24881191cb83SEric Dumazet /* Restore the NAPI object as it has been already initialized */
24891191cb83SEric Dumazet fp->napi = orig_napi;
249015192a8cSBarak Witkowski fp->tpa_info = orig_tpa_info;
24911191cb83SEric Dumazet fp->bp = bp;
24921191cb83SEric Dumazet fp->index = index;
24931191cb83SEric Dumazet if (IS_ETH_FP(fp))
24941191cb83SEric Dumazet fp->max_cos = bp->max_cos;
24951191cb83SEric Dumazet else
24961191cb83SEric Dumazet /* Special queues support only one CoS */
24971191cb83SEric Dumazet fp->max_cos = 1;
24981191cb83SEric Dumazet
249965565884SMerav Sicron /* Init txdata pointers */
250065565884SMerav Sicron if (IS_FCOE_FP(fp))
250165565884SMerav Sicron fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
250265565884SMerav Sicron if (IS_ETH_FP(fp))
250365565884SMerav Sicron for_each_cos_in_tx_queue(fp, cos)
250465565884SMerav Sicron fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
250565565884SMerav Sicron BNX2X_NUM_ETH_QUEUES(bp) + index];
250665565884SMerav Sicron
250716a5fd92SYuval Mintz /* set the tpa flag for each queue. The tpa flag determines the queue
25081191cb83SEric Dumazet * minimal size so it must be set prior to queue memory allocation
25091191cb83SEric Dumazet */
2510f8dcb5e3SMichal Schmidt if (bp->dev->features & NETIF_F_LRO)
25111191cb83SEric Dumazet fp->mode = TPA_MODE_LRO;
25123c3def5fSMichael Chan else if (bp->dev->features & NETIF_F_GRO_HW)
25131191cb83SEric Dumazet fp->mode = TPA_MODE_GRO;
25147e6b4d44SMichal Schmidt else
25157e6b4d44SMichal Schmidt fp->mode = TPA_MODE_DISABLED;
25161191cb83SEric Dumazet
251722a8f237SMichal Schmidt /* We don't want TPA if it's disabled in bp
251822a8f237SMichal Schmidt * or if this is an FCoE L2 ring.
251922a8f237SMichal Schmidt */
252022a8f237SMichal Schmidt if (bp->disable_tpa || IS_FCOE_FP(fp))
25217e6b4d44SMichal Schmidt fp->mode = TPA_MODE_DISABLED;
252255c11941SMerav Sicron }
252355c11941SMerav Sicron
bnx2x_set_os_driver_state(struct bnx2x * bp,u32 state)2524230d00ebSYuval Mintz void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2525230d00ebSYuval Mintz {
2526230d00ebSYuval Mintz u32 cur;
2527230d00ebSYuval Mintz
2528230d00ebSYuval Mintz if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2529230d00ebSYuval Mintz return;
2530230d00ebSYuval Mintz
2531230d00ebSYuval Mintz cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2532230d00ebSYuval Mintz DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2533230d00ebSYuval Mintz cur, state);
2534230d00ebSYuval Mintz
2535230d00ebSYuval Mintz SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2536230d00ebSYuval Mintz }
2537230d00ebSYuval Mintz
bnx2x_load_cnic(struct bnx2x * bp)253855c11941SMerav Sicron int bnx2x_load_cnic(struct bnx2x *bp)
253955c11941SMerav Sicron {
254055c11941SMerav Sicron int i, rc, port = BP_PORT(bp);
254155c11941SMerav Sicron
254255c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
254355c11941SMerav Sicron
254455c11941SMerav Sicron mutex_init(&bp->cnic_mutex);
254555c11941SMerav Sicron
2546ad5afc89SAriel Elior if (IS_PF(bp)) {
254755c11941SMerav Sicron rc = bnx2x_alloc_mem_cnic(bp);
254855c11941SMerav Sicron if (rc) {
254955c11941SMerav Sicron BNX2X_ERR("Unable to allocate bp memory for cnic\n");
255055c11941SMerav Sicron LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
255155c11941SMerav Sicron }
2552ad5afc89SAriel Elior }
255355c11941SMerav Sicron
255455c11941SMerav Sicron rc = bnx2x_alloc_fp_mem_cnic(bp);
255555c11941SMerav Sicron if (rc) {
255655c11941SMerav Sicron BNX2X_ERR("Unable to allocate memory for cnic fps\n");
255755c11941SMerav Sicron LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
255855c11941SMerav Sicron }
255955c11941SMerav Sicron
256055c11941SMerav Sicron /* Update the number of queues with the cnic queues */
256155c11941SMerav Sicron rc = bnx2x_set_real_num_queues(bp, 1);
256255c11941SMerav Sicron if (rc) {
256355c11941SMerav Sicron BNX2X_ERR("Unable to set real_num_queues including cnic\n");
256455c11941SMerav Sicron LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
256555c11941SMerav Sicron }
256655c11941SMerav Sicron
256755c11941SMerav Sicron /* Add all CNIC NAPI objects */
256855c11941SMerav Sicron bnx2x_add_all_napi_cnic(bp);
256955c11941SMerav Sicron DP(NETIF_MSG_IFUP, "cnic napi added\n");
257055c11941SMerav Sicron bnx2x_napi_enable_cnic(bp);
257155c11941SMerav Sicron
257255c11941SMerav Sicron rc = bnx2x_init_hw_func_cnic(bp);
257355c11941SMerav Sicron if (rc)
257455c11941SMerav Sicron LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
257555c11941SMerav Sicron
257655c11941SMerav Sicron bnx2x_nic_init_cnic(bp);
257755c11941SMerav Sicron
2578ad5afc89SAriel Elior if (IS_PF(bp)) {
257955c11941SMerav Sicron /* Enable Timer scan */
258055c11941SMerav Sicron REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
258155c11941SMerav Sicron
2582ad5afc89SAriel Elior /* setup cnic queues */
258355c11941SMerav Sicron for_each_cnic_queue(bp, i) {
258455c11941SMerav Sicron rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
258555c11941SMerav Sicron if (rc) {
258655c11941SMerav Sicron BNX2X_ERR("Queue setup failed\n");
258755c11941SMerav Sicron LOAD_ERROR_EXIT(bp, load_error_cnic2);
258855c11941SMerav Sicron }
258955c11941SMerav Sicron }
2590ad5afc89SAriel Elior }
259155c11941SMerav Sicron
259255c11941SMerav Sicron /* Initialize Rx filter. */
25938b09be5fSYuval Mintz bnx2x_set_rx_mode_inner(bp);
259455c11941SMerav Sicron
259555c11941SMerav Sicron /* re-read iscsi info */
259655c11941SMerav Sicron bnx2x_get_iscsi_info(bp);
259755c11941SMerav Sicron bnx2x_setup_cnic_irq_info(bp);
259855c11941SMerav Sicron bnx2x_setup_cnic_info(bp);
259955c11941SMerav Sicron bp->cnic_loaded = true;
260055c11941SMerav Sicron if (bp->state == BNX2X_STATE_OPEN)
260155c11941SMerav Sicron bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
260255c11941SMerav Sicron
260355c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
260455c11941SMerav Sicron
260555c11941SMerav Sicron return 0;
260655c11941SMerav Sicron
260755c11941SMerav Sicron #ifndef BNX2X_STOP_ON_ERROR
260855c11941SMerav Sicron load_error_cnic2:
260955c11941SMerav Sicron /* Disable Timer scan */
261055c11941SMerav Sicron REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
261155c11941SMerav Sicron
261255c11941SMerav Sicron load_error_cnic1:
261355c11941SMerav Sicron bnx2x_napi_disable_cnic(bp);
261455c11941SMerav Sicron /* Update the number of queues without the cnic queues */
2615d9d81862SYuval Mintz if (bnx2x_set_real_num_queues(bp, 0))
261655c11941SMerav Sicron BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
261755c11941SMerav Sicron load_error_cnic0:
261855c11941SMerav Sicron BNX2X_ERR("CNIC-related load failed\n");
261955c11941SMerav Sicron bnx2x_free_fp_mem_cnic(bp);
262055c11941SMerav Sicron bnx2x_free_mem_cnic(bp);
262155c11941SMerav Sicron return rc;
262255c11941SMerav Sicron #endif /* ! BNX2X_STOP_ON_ERROR */
26231191cb83SEric Dumazet }
26241191cb83SEric Dumazet
2625adfc5217SJeff Kirsher /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)2626adfc5217SJeff Kirsher int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2627adfc5217SJeff Kirsher {
2628adfc5217SJeff Kirsher int port = BP_PORT(bp);
2629ad5afc89SAriel Elior int i, rc = 0, load_code = 0;
2630adfc5217SJeff Kirsher
263155c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Starting NIC load\n");
263255c11941SMerav Sicron DP(NETIF_MSG_IFUP,
263355c11941SMerav Sicron "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
263455c11941SMerav Sicron
2635adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
263651c1a580SMerav Sicron if (unlikely(bp->panic)) {
263751c1a580SMerav Sicron BNX2X_ERR("Can't load NIC when there is panic\n");
2638adfc5217SJeff Kirsher return -EPERM;
263951c1a580SMerav Sicron }
2640adfc5217SJeff Kirsher #endif
2641adfc5217SJeff Kirsher
2642adfc5217SJeff Kirsher bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2643adfc5217SJeff Kirsher
264416a5fd92SYuval Mintz /* zero the structure w/o any lock, before SP handler is initialized */
2645adfc5217SJeff Kirsher memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2646adfc5217SJeff Kirsher __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2647adfc5217SJeff Kirsher &bp->last_reported_link.link_report_flags);
2648adfc5217SJeff Kirsher
2649ad5afc89SAriel Elior if (IS_PF(bp))
2650adfc5217SJeff Kirsher /* must be called before memory allocation and HW init */
2651adfc5217SJeff Kirsher bnx2x_ilt_set_info(bp);
2652adfc5217SJeff Kirsher
2653adfc5217SJeff Kirsher /*
2654adfc5217SJeff Kirsher * Zero fastpath structures preserving invariants like napi, which are
2655adfc5217SJeff Kirsher * allocated only once, fp index, max_cos, bp pointer.
26567e6b4d44SMichal Schmidt * Also set fp->mode and txdata_ptr.
2657adfc5217SJeff Kirsher */
265851c1a580SMerav Sicron DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2659adfc5217SJeff Kirsher for_each_queue(bp, i)
2660adfc5217SJeff Kirsher bnx2x_bz_fp(bp, i);
266155c11941SMerav Sicron memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
266255c11941SMerav Sicron bp->num_cnic_queues) *
266365565884SMerav Sicron sizeof(struct bnx2x_fp_txdata));
2664adfc5217SJeff Kirsher
266555c11941SMerav Sicron bp->fcoe_init = false;
2666adfc5217SJeff Kirsher
2667adfc5217SJeff Kirsher /* Set the receive queues buffer size */
2668adfc5217SJeff Kirsher bnx2x_set_rx_buf_size(bp);
2669adfc5217SJeff Kirsher
2670ad5afc89SAriel Elior if (IS_PF(bp)) {
2671ad5afc89SAriel Elior rc = bnx2x_alloc_mem(bp);
2672ad5afc89SAriel Elior if (rc) {
2673ad5afc89SAriel Elior BNX2X_ERR("Unable to allocate bp memory\n");
2674ad5afc89SAriel Elior return rc;
2675ad5afc89SAriel Elior }
2676ad5afc89SAriel Elior }
2677ad5afc89SAriel Elior
2678ad5afc89SAriel Elior /* need to be done after alloc mem, since it's self adjusting to amount
2679ad5afc89SAriel Elior * of memory available for RSS queues
2680ad5afc89SAriel Elior */
2681ad5afc89SAriel Elior rc = bnx2x_alloc_fp_mem(bp);
2682ad5afc89SAriel Elior if (rc) {
2683ad5afc89SAriel Elior BNX2X_ERR("Unable to allocate memory for fps\n");
2684ad5afc89SAriel Elior LOAD_ERROR_EXIT(bp, load_error0);
2685ad5afc89SAriel Elior }
2686adfc5217SJeff Kirsher
2687e3ed4eaeSDmitry Kravkov /* Allocated memory for FW statistics */
2688fb653827SDan Carpenter rc = bnx2x_alloc_fw_stats_mem(bp);
2689fb653827SDan Carpenter if (rc)
2690e3ed4eaeSDmitry Kravkov LOAD_ERROR_EXIT(bp, load_error0);
2691e3ed4eaeSDmitry Kravkov
26928d9ac297SAriel Elior /* request pf to initialize status blocks */
26938d9ac297SAriel Elior if (IS_VF(bp)) {
26948d9ac297SAriel Elior rc = bnx2x_vfpf_init(bp);
26958d9ac297SAriel Elior if (rc)
26968d9ac297SAriel Elior LOAD_ERROR_EXIT(bp, load_error0);
26978d9ac297SAriel Elior }
26988d9ac297SAriel Elior
2699adfc5217SJeff Kirsher /* As long as bnx2x_alloc_mem() may possibly update
2700adfc5217SJeff Kirsher * bp->num_queues, bnx2x_set_real_num_queues() should always
270155c11941SMerav Sicron * come after it. At this stage cnic queues are not counted.
2702adfc5217SJeff Kirsher */
270355c11941SMerav Sicron rc = bnx2x_set_real_num_queues(bp, 0);
2704adfc5217SJeff Kirsher if (rc) {
2705adfc5217SJeff Kirsher BNX2X_ERR("Unable to set real_num_queues\n");
2706adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error0);
2707adfc5217SJeff Kirsher }
2708adfc5217SJeff Kirsher
2709adfc5217SJeff Kirsher /* configure multi cos mappings in kernel.
271016a5fd92SYuval Mintz * this configuration may be overridden by a multi class queue
271116a5fd92SYuval Mintz * discipline or by a dcbx negotiation result.
2712adfc5217SJeff Kirsher */
2713adfc5217SJeff Kirsher bnx2x_setup_tc(bp->dev, bp->max_cos);
2714adfc5217SJeff Kirsher
271526614ba5SMerav Sicron /* Add all NAPI objects */
271626614ba5SMerav Sicron bnx2x_add_all_napi(bp);
271755c11941SMerav Sicron DP(NETIF_MSG_IFUP, "napi added\n");
2718adfc5217SJeff Kirsher bnx2x_napi_enable(bp);
2719bf23ffc8SThinh Tran bp->nic_stopped = false;
2720adfc5217SJeff Kirsher
2721ad5afc89SAriel Elior if (IS_PF(bp)) {
2722889b9af3SAriel Elior /* set pf load just before approaching the MCP */
2723889b9af3SAriel Elior bnx2x_set_pf_load(bp);
2724889b9af3SAriel Elior
2725ad5afc89SAriel Elior /* if mcp exists send load request and analyze response */
2726adfc5217SJeff Kirsher if (!BP_NOMCP(bp)) {
2727ad5afc89SAriel Elior /* attempt to load pf */
2728ad5afc89SAriel Elior rc = bnx2x_nic_load_request(bp, &load_code);
2729ad5afc89SAriel Elior if (rc)
2730adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error1);
2731ad5afc89SAriel Elior
2732ad5afc89SAriel Elior /* what did mcp say? */
273391ebb929SYuval Mintz rc = bnx2x_compare_fw_ver(bp, load_code, true);
2734ad5afc89SAriel Elior if (rc) {
2735ad5afc89SAriel Elior bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2736d1e2d966SAriel Elior LOAD_ERROR_EXIT(bp, load_error2);
2737d1e2d966SAriel Elior }
2738adfc5217SJeff Kirsher } else {
2739ad5afc89SAriel Elior load_code = bnx2x_nic_load_no_mcp(bp, port);
2740adfc5217SJeff Kirsher }
2741adfc5217SJeff Kirsher
2742ad5afc89SAriel Elior /* mark pmf if applicable */
2743ad5afc89SAriel Elior bnx2x_nic_load_pmf(bp, load_code);
2744adfc5217SJeff Kirsher
2745adfc5217SJeff Kirsher /* Init Function state controlling object */
2746adfc5217SJeff Kirsher bnx2x__init_func_obj(bp);
2747adfc5217SJeff Kirsher
2748adfc5217SJeff Kirsher /* Initialize HW */
2749adfc5217SJeff Kirsher rc = bnx2x_init_hw(bp, load_code);
2750adfc5217SJeff Kirsher if (rc) {
2751adfc5217SJeff Kirsher BNX2X_ERR("HW init failed, aborting\n");
2752adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2753adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error2);
2754adfc5217SJeff Kirsher }
2755ad5afc89SAriel Elior }
2756adfc5217SJeff Kirsher
2757ecf01c22SYuval Mintz bnx2x_pre_irq_nic_init(bp);
2758ecf01c22SYuval Mintz
2759adfc5217SJeff Kirsher /* Connect to IRQs */
2760adfc5217SJeff Kirsher rc = bnx2x_setup_irqs(bp);
2761adfc5217SJeff Kirsher if (rc) {
2762ad5afc89SAriel Elior BNX2X_ERR("setup irqs failed\n");
2763ad5afc89SAriel Elior if (IS_PF(bp))
2764adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2765adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error2);
2766adfc5217SJeff Kirsher }
2767adfc5217SJeff Kirsher
2768adfc5217SJeff Kirsher /* Init per-function objects */
2769ad5afc89SAriel Elior if (IS_PF(bp)) {
2770ecf01c22SYuval Mintz /* Setup NIC internals and enable interrupts */
2771ecf01c22SYuval Mintz bnx2x_post_irq_nic_init(bp, load_code);
2772ecf01c22SYuval Mintz
2773adfc5217SJeff Kirsher bnx2x_init_bp_objs(bp);
2774b56e9670SAriel Elior bnx2x_iov_nic_init(bp);
2775adfc5217SJeff Kirsher
2776a3348722SBarak Witkowski /* Set AFEX default VLAN tag to an invalid value */
2777a3348722SBarak Witkowski bp->afex_def_vlan_tag = -1;
2778ad5afc89SAriel Elior bnx2x_nic_load_afex_dcc(bp, load_code);
2779adfc5217SJeff Kirsher bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2780adfc5217SJeff Kirsher rc = bnx2x_func_start(bp);
2781adfc5217SJeff Kirsher if (rc) {
2782adfc5217SJeff Kirsher BNX2X_ERR("Function start failed!\n");
2783adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2784ad5afc89SAriel Elior
2785adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error3);
2786adfc5217SJeff Kirsher }
2787adfc5217SJeff Kirsher
2788adfc5217SJeff Kirsher /* Send LOAD_DONE command to MCP */
2789adfc5217SJeff Kirsher if (!BP_NOMCP(bp)) {
2790ad5afc89SAriel Elior load_code = bnx2x_fw_command(bp,
2791ad5afc89SAriel Elior DRV_MSG_CODE_LOAD_DONE, 0);
2792adfc5217SJeff Kirsher if (!load_code) {
2793adfc5217SJeff Kirsher BNX2X_ERR("MCP response failure, aborting\n");
2794adfc5217SJeff Kirsher rc = -EBUSY;
2795adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error3);
2796adfc5217SJeff Kirsher }
2797adfc5217SJeff Kirsher }
2798adfc5217SJeff Kirsher
27990c14e5ceSAriel Elior /* initialize FW coalescing state machines in RAM */
28000c14e5ceSAriel Elior bnx2x_update_coalesce(bp);
280160cad4e6SAriel Elior }
28020c14e5ceSAriel Elior
2803ad5afc89SAriel Elior /* setup the leading queue */
2804adfc5217SJeff Kirsher rc = bnx2x_setup_leading(bp);
2805adfc5217SJeff Kirsher if (rc) {
2806adfc5217SJeff Kirsher BNX2X_ERR("Setup leading failed!\n");
2807adfc5217SJeff Kirsher LOAD_ERROR_EXIT(bp, load_error3);
2808adfc5217SJeff Kirsher }
2809adfc5217SJeff Kirsher
2810ad5afc89SAriel Elior /* set up the rest of the queues */
281155c11941SMerav Sicron for_each_nondefault_eth_queue(bp, i) {
281260cad4e6SAriel Elior if (IS_PF(bp))
281360cad4e6SAriel Elior rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
281460cad4e6SAriel Elior else /* VF */
281560cad4e6SAriel Elior rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
281651c1a580SMerav Sicron if (rc) {
281760cad4e6SAriel Elior BNX2X_ERR("Queue %d setup failed\n", i);
281855c11941SMerav Sicron LOAD_ERROR_EXIT(bp, load_error3);
2819adfc5217SJeff Kirsher }
282051c1a580SMerav Sicron }
2821adfc5217SJeff Kirsher
2822ad5afc89SAriel Elior /* setup rss */
282360cad4e6SAriel Elior rc = bnx2x_init_rss(bp);
282451c1a580SMerav Sicron if (rc) {
282551c1a580SMerav Sicron BNX2X_ERR("PF RSS init failed\n");
282655c11941SMerav Sicron LOAD_ERROR_EXIT(bp, load_error3);
282751c1a580SMerav Sicron }
28288d9ac297SAriel Elior
2829adfc5217SJeff Kirsher /* Now when Clients are configured we are ready to work */
2830adfc5217SJeff Kirsher bp->state = BNX2X_STATE_OPEN;
2831adfc5217SJeff Kirsher
2832adfc5217SJeff Kirsher /* Configure a ucast MAC */
2833ad5afc89SAriel Elior if (IS_PF(bp))
2834adfc5217SJeff Kirsher rc = bnx2x_set_eth_mac(bp, true);
28358d9ac297SAriel Elior else /* vf */
2836f8f4f61aSDmitry Kravkov rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2837f8f4f61aSDmitry Kravkov true);
283851c1a580SMerav Sicron if (rc) {
283951c1a580SMerav Sicron BNX2X_ERR("Setting Ethernet MAC failed\n");
284055c11941SMerav Sicron LOAD_ERROR_EXIT(bp, load_error3);
284151c1a580SMerav Sicron }
2842adfc5217SJeff Kirsher
2843ad5afc89SAriel Elior if (IS_PF(bp) && bp->pending_max) {
2844adfc5217SJeff Kirsher bnx2x_update_max_mf_config(bp, bp->pending_max);
2845adfc5217SJeff Kirsher bp->pending_max = 0;
2846adfc5217SJeff Kirsher }
2847adfc5217SJeff Kirsher
2848484c016dSSudarsana Reddy Kalluru bp->force_link_down = false;
2849ad5afc89SAriel Elior if (bp->port.pmf) {
2850ad5afc89SAriel Elior rc = bnx2x_initial_phy_init(bp, load_mode);
2851ad5afc89SAriel Elior if (rc)
2852ad5afc89SAriel Elior LOAD_ERROR_EXIT(bp, load_error3);
2853ad5afc89SAriel Elior }
2854c63da990SBarak Witkowski bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2855adfc5217SJeff Kirsher
2856adfc5217SJeff Kirsher /* Start fast path */
2857adfc5217SJeff Kirsher
285805cc5a39SYuval Mintz /* Re-configure vlan filters */
285905cc5a39SYuval Mintz rc = bnx2x_vlan_reconfigure_vid(bp);
286005cc5a39SYuval Mintz if (rc)
286105cc5a39SYuval Mintz LOAD_ERROR_EXIT(bp, load_error3);
286205cc5a39SYuval Mintz
2863adfc5217SJeff Kirsher /* Initialize Rx filter. */
28648b09be5fSYuval Mintz bnx2x_set_rx_mode_inner(bp);
2865adfc5217SJeff Kirsher
2866eeed018cSMichal Kalderon if (bp->flags & PTP_SUPPORTED) {
286707f12622SSudarsana Reddy Kalluru bnx2x_register_phc(bp);
2868eeed018cSMichal Kalderon bnx2x_init_ptp(bp);
2869eeed018cSMichal Kalderon bnx2x_configure_ptp_filters(bp);
2870eeed018cSMichal Kalderon }
2871eeed018cSMichal Kalderon /* Start Tx */
2872adfc5217SJeff Kirsher switch (load_mode) {
2873adfc5217SJeff Kirsher case LOAD_NORMAL:
287416a5fd92SYuval Mintz /* Tx queue should be only re-enabled */
2875adfc5217SJeff Kirsher netif_tx_wake_all_queues(bp->dev);
2876adfc5217SJeff Kirsher break;
2877adfc5217SJeff Kirsher
2878adfc5217SJeff Kirsher case LOAD_OPEN:
2879adfc5217SJeff Kirsher netif_tx_start_all_queues(bp->dev);
28804e857c58SPeter Zijlstra smp_mb__after_atomic();
2881adfc5217SJeff Kirsher break;
2882adfc5217SJeff Kirsher
2883adfc5217SJeff Kirsher case LOAD_DIAG:
28848970b2e4SMerav Sicron case LOAD_LOOPBACK_EXT:
2885adfc5217SJeff Kirsher bp->state = BNX2X_STATE_DIAG;
2886adfc5217SJeff Kirsher break;
2887adfc5217SJeff Kirsher
2888adfc5217SJeff Kirsher default:
2889adfc5217SJeff Kirsher break;
2890adfc5217SJeff Kirsher }
2891adfc5217SJeff Kirsher
289200253a8cSDmitry Kravkov if (bp->port.pmf)
28934c704899SBarak Witkowski bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
289400253a8cSDmitry Kravkov else
2895adfc5217SJeff Kirsher bnx2x__link_status_update(bp);
2896adfc5217SJeff Kirsher
2897adfc5217SJeff Kirsher /* start the timer */
2898adfc5217SJeff Kirsher mod_timer(&bp->timer, jiffies + bp->current_interval);
2899adfc5217SJeff Kirsher
290055c11941SMerav Sicron if (CNIC_ENABLED(bp))
290155c11941SMerav Sicron bnx2x_load_cnic(bp);
2902adfc5217SJeff Kirsher
290342f8277fSYuval Mintz if (IS_PF(bp))
290442f8277fSYuval Mintz bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
290542f8277fSYuval Mintz
2906ad5afc89SAriel Elior if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
29079ce392d4SYuval Mintz /* mark driver is loaded in shmem2 */
29089ce392d4SYuval Mintz u32 val;
29099ce392d4SYuval Mintz val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2910230d00ebSYuval Mintz val &= ~DRV_FLAGS_MTU_MASK;
2911230d00ebSYuval Mintz val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
29129ce392d4SYuval Mintz SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
29139ce392d4SYuval Mintz val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
29149ce392d4SYuval Mintz DRV_FLAGS_CAPABILITIES_LOADED_L2);
29159ce392d4SYuval Mintz }
29169ce392d4SYuval Mintz
2917adfc5217SJeff Kirsher /* Wait for all pending SP commands to complete */
2918ad5afc89SAriel Elior if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2919adfc5217SJeff Kirsher BNX2X_ERR("Timeout waiting for SP elements to complete\n");
29205d07d868SYuval Mintz bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2921adfc5217SJeff Kirsher return -EBUSY;
2922adfc5217SJeff Kirsher }
2923adfc5217SJeff Kirsher
2924c48f350fSYuval Mintz /* Update driver data for On-Chip MFW dump. */
2925c48f350fSYuval Mintz if (IS_PF(bp))
2926c48f350fSYuval Mintz bnx2x_update_mfw_dump(bp);
2927c48f350fSYuval Mintz
29289876879fSBarak Witkowski /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
29299876879fSBarak Witkowski if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
29309876879fSBarak Witkowski bnx2x_dcbx_init(bp, false);
29319876879fSBarak Witkowski
2932230d00ebSYuval Mintz if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2933230d00ebSYuval Mintz bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2934230d00ebSYuval Mintz
293555c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
293655c11941SMerav Sicron
2937adfc5217SJeff Kirsher return 0;
2938adfc5217SJeff Kirsher
2939adfc5217SJeff Kirsher #ifndef BNX2X_STOP_ON_ERROR
2940adfc5217SJeff Kirsher load_error3:
2941ad5afc89SAriel Elior if (IS_PF(bp)) {
2942adfc5217SJeff Kirsher bnx2x_int_disable_sync(bp, 1);
2943adfc5217SJeff Kirsher
2944adfc5217SJeff Kirsher /* Clean queueable objects */
2945adfc5217SJeff Kirsher bnx2x_squeeze_objects(bp);
2946ad5afc89SAriel Elior }
2947adfc5217SJeff Kirsher
2948adfc5217SJeff Kirsher /* Free SKBs, SGEs, TPA pool and driver internals */
2949adfc5217SJeff Kirsher bnx2x_free_skbs(bp);
2950adfc5217SJeff Kirsher for_each_rx_queue(bp, i)
2951adfc5217SJeff Kirsher bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2952adfc5217SJeff Kirsher
2953adfc5217SJeff Kirsher /* Release IRQs */
2954adfc5217SJeff Kirsher bnx2x_free_irq(bp);
2955adfc5217SJeff Kirsher load_error2:
2956ad5afc89SAriel Elior if (IS_PF(bp) && !BP_NOMCP(bp)) {
2957adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2958adfc5217SJeff Kirsher bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2959adfc5217SJeff Kirsher }
2960adfc5217SJeff Kirsher
2961adfc5217SJeff Kirsher bp->port.pmf = 0;
2962adfc5217SJeff Kirsher load_error1:
2963adfc5217SJeff Kirsher bnx2x_napi_disable(bp);
2964722c6f58SMichal Schmidt bnx2x_del_all_napi(bp);
2965bf23ffc8SThinh Tran bp->nic_stopped = true;
2966ad5afc89SAriel Elior
2967889b9af3SAriel Elior /* clear pf_load status, as it was already set */
2968ad5afc89SAriel Elior if (IS_PF(bp))
2969889b9af3SAriel Elior bnx2x_clear_pf_load(bp);
2970adfc5217SJeff Kirsher load_error0:
2971ad5afc89SAriel Elior bnx2x_free_fw_stats_mem(bp);
2972e3ed4eaeSDmitry Kravkov bnx2x_free_fp_mem(bp);
2973adfc5217SJeff Kirsher bnx2x_free_mem(bp);
2974adfc5217SJeff Kirsher
2975adfc5217SJeff Kirsher return rc;
2976adfc5217SJeff Kirsher #endif /* ! BNX2X_STOP_ON_ERROR */
2977adfc5217SJeff Kirsher }
2978adfc5217SJeff Kirsher
bnx2x_drain_tx_queues(struct bnx2x * bp)29797fa6f340SYuval Mintz int bnx2x_drain_tx_queues(struct bnx2x *bp)
2980ad5afc89SAriel Elior {
2981ad5afc89SAriel Elior u8 rc = 0, cos, i;
2982ad5afc89SAriel Elior
2983ad5afc89SAriel Elior /* Wait until tx fastpath tasks complete */
2984ad5afc89SAriel Elior for_each_tx_queue(bp, i) {
2985ad5afc89SAriel Elior struct bnx2x_fastpath *fp = &bp->fp[i];
2986ad5afc89SAriel Elior
2987ad5afc89SAriel Elior for_each_cos_in_tx_queue(fp, cos)
2988ad5afc89SAriel Elior rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2989ad5afc89SAriel Elior if (rc)
2990ad5afc89SAriel Elior return rc;
2991ad5afc89SAriel Elior }
2992ad5afc89SAriel Elior return 0;
2993ad5afc89SAriel Elior }
2994ad5afc89SAriel Elior
2995adfc5217SJeff Kirsher /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode,bool keep_link)29965d07d868SYuval Mintz int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2997adfc5217SJeff Kirsher {
2998adfc5217SJeff Kirsher int i;
2999adfc5217SJeff Kirsher bool global = false;
3000adfc5217SJeff Kirsher
300155c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
300255c11941SMerav Sicron
3003230d00ebSYuval Mintz if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
3004230d00ebSYuval Mintz bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
3005230d00ebSYuval Mintz
30069ce392d4SYuval Mintz /* mark driver is unloaded in shmem2 */
3007ad5afc89SAriel Elior if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
30089ce392d4SYuval Mintz u32 val;
30099ce392d4SYuval Mintz val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
30109ce392d4SYuval Mintz SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
30119ce392d4SYuval Mintz val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
30129ce392d4SYuval Mintz }
30139ce392d4SYuval Mintz
301480bfe5ccSYuval Mintz if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3015ad5afc89SAriel Elior (bp->state == BNX2X_STATE_CLOSED ||
3016ad5afc89SAriel Elior bp->state == BNX2X_STATE_ERROR)) {
3017adfc5217SJeff Kirsher /* We can get here if the driver has been unloaded
3018adfc5217SJeff Kirsher * during parity error recovery and is either waiting for a
3019adfc5217SJeff Kirsher * leader to complete or for other functions to unload and
3020adfc5217SJeff Kirsher * then ifdown has been issued. In this case we want to
3021adfc5217SJeff Kirsher * unload and let other functions to complete a recovery
3022adfc5217SJeff Kirsher * process.
3023adfc5217SJeff Kirsher */
3024adfc5217SJeff Kirsher bp->recovery_state = BNX2X_RECOVERY_DONE;
3025adfc5217SJeff Kirsher bp->is_leader = 0;
3026adfc5217SJeff Kirsher bnx2x_release_leader_lock(bp);
3027adfc5217SJeff Kirsher smp_mb();
3028adfc5217SJeff Kirsher
302951c1a580SMerav Sicron DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
303051c1a580SMerav Sicron BNX2X_ERR("Can't unload in closed or error state\n");
3031adfc5217SJeff Kirsher return -EINVAL;
3032adfc5217SJeff Kirsher }
3033adfc5217SJeff Kirsher
303480bfe5ccSYuval Mintz /* Nothing to do during unload if previous bnx2x_nic_load()
303516a5fd92SYuval Mintz * have not completed successfully - all resources are released.
303680bfe5ccSYuval Mintz *
303780bfe5ccSYuval Mintz * we can get here only after unsuccessful ndo_* callback, during which
303880bfe5ccSYuval Mintz * dev->IFF_UP flag is still on.
303980bfe5ccSYuval Mintz */
304080bfe5ccSYuval Mintz if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
304180bfe5ccSYuval Mintz return 0;
304280bfe5ccSYuval Mintz
304380bfe5ccSYuval Mintz /* It's important to set the bp->state to the value different from
3044adfc5217SJeff Kirsher * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3045adfc5217SJeff Kirsher * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3046adfc5217SJeff Kirsher */
3047adfc5217SJeff Kirsher bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3048adfc5217SJeff Kirsher smp_mb();
3049adfc5217SJeff Kirsher
305078c3bcc5SAriel Elior /* indicate to VFs that the PF is going down */
305178c3bcc5SAriel Elior bnx2x_iov_channel_down(bp);
305278c3bcc5SAriel Elior
305355c11941SMerav Sicron if (CNIC_LOADED(bp))
305455c11941SMerav Sicron bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
305555c11941SMerav Sicron
3056adfc5217SJeff Kirsher /* Stop Tx */
3057adfc5217SJeff Kirsher bnx2x_tx_disable(bp);
305865565884SMerav Sicron netdev_reset_tc(bp->dev);
3059adfc5217SJeff Kirsher
3060adfc5217SJeff Kirsher bp->rx_mode = BNX2X_RX_MODE_NONE;
3061adfc5217SJeff Kirsher
3062adfc5217SJeff Kirsher del_timer_sync(&bp->timer);
3063adfc5217SJeff Kirsher
3064f7084059SGuilherme G. Piccoli if (IS_PF(bp) && !BP_NOMCP(bp)) {
3065adfc5217SJeff Kirsher /* Set ALWAYS_ALIVE bit in shmem */
3066adfc5217SJeff Kirsher bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3067adfc5217SJeff Kirsher bnx2x_drv_pulse(bp);
3068adfc5217SJeff Kirsher bnx2x_stats_handle(bp, STATS_EVENT_STOP);
30691355b704SMintz Yuval bnx2x_save_statistics(bp);
3070ad5afc89SAriel Elior }
3071ad5afc89SAriel Elior
3072d78a1f08SYuval Mintz /* wait till consumers catch up with producers in all queues.
3073d78a1f08SYuval Mintz * If we're recovering, FW can't write to host so no reason
3074d78a1f08SYuval Mintz * to wait for the queues to complete all Tx.
3075d78a1f08SYuval Mintz */
3076d78a1f08SYuval Mintz if (unload_mode != UNLOAD_RECOVERY)
3077ad5afc89SAriel Elior bnx2x_drain_tx_queues(bp);
3078adfc5217SJeff Kirsher
30799b176b6bSAriel Elior /* if VF indicate to PF this function is going down (PF will delete sp
30809b176b6bSAriel Elior * elements and clear initializations
30819b176b6bSAriel Elior */
30824a4d2d37SManish Chopra if (IS_VF(bp)) {
30834a4d2d37SManish Chopra bnx2x_clear_vlan_info(bp);
30849b176b6bSAriel Elior bnx2x_vfpf_close_vf(bp);
30854a4d2d37SManish Chopra } else if (unload_mode != UNLOAD_RECOVERY) {
30869b176b6bSAriel Elior /* if this is a normal/close unload need to clean up chip*/
30875d07d868SYuval Mintz bnx2x_chip_cleanup(bp, unload_mode, keep_link);
30884a4d2d37SManish Chopra } else {
3089adfc5217SJeff Kirsher /* Send the UNLOAD_REQUEST to the MCP */
3090adfc5217SJeff Kirsher bnx2x_send_unload_req(bp, unload_mode);
3091adfc5217SJeff Kirsher
309216a5fd92SYuval Mintz /* Prevent transactions to host from the functions on the
3093adfc5217SJeff Kirsher * engine that doesn't reset global blocks in case of global
309416a5fd92SYuval Mintz * attention once global blocks are reset and gates are opened
3095adfc5217SJeff Kirsher * (the engine which leader will perform the recovery
3096adfc5217SJeff Kirsher * last).
3097adfc5217SJeff Kirsher */
3098adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp))
3099adfc5217SJeff Kirsher bnx2x_pf_disable(bp);
3100adfc5217SJeff Kirsher
3101bf23ffc8SThinh Tran if (!bp->nic_stopped) {
3102adfc5217SJeff Kirsher /* Disable HW interrupts, NAPI */
3103adfc5217SJeff Kirsher bnx2x_netif_stop(bp, 1);
310426614ba5SMerav Sicron /* Delete all NAPI objects */
310526614ba5SMerav Sicron bnx2x_del_all_napi(bp);
310655c11941SMerav Sicron if (CNIC_LOADED(bp))
310755c11941SMerav Sicron bnx2x_del_all_napi_cnic(bp);
3108adfc5217SJeff Kirsher /* Release IRQs */
3109adfc5217SJeff Kirsher bnx2x_free_irq(bp);
3110bf23ffc8SThinh Tran bp->nic_stopped = true;
3111bf23ffc8SThinh Tran }
3112adfc5217SJeff Kirsher
3113adfc5217SJeff Kirsher /* Report UNLOAD_DONE to MCP */
31145d07d868SYuval Mintz bnx2x_send_unload_done(bp, false);
3115adfc5217SJeff Kirsher }
3116adfc5217SJeff Kirsher
3117adfc5217SJeff Kirsher /*
311816a5fd92SYuval Mintz * At this stage no more interrupts will arrive so we may safely clean
3119adfc5217SJeff Kirsher * the queueable objects here in case they failed to get cleaned so far.
3120adfc5217SJeff Kirsher */
3121ad5afc89SAriel Elior if (IS_PF(bp))
3122adfc5217SJeff Kirsher bnx2x_squeeze_objects(bp);
3123adfc5217SJeff Kirsher
3124adfc5217SJeff Kirsher /* There should be no more pending SP commands at this stage */
3125adfc5217SJeff Kirsher bp->sp_state = 0;
3126adfc5217SJeff Kirsher
3127adfc5217SJeff Kirsher bp->port.pmf = 0;
3128adfc5217SJeff Kirsher
3129a0d307b2SDmitry Kravkov /* clear pending work in rtnl task */
3130a0d307b2SDmitry Kravkov bp->sp_rtnl_state = 0;
3131a0d307b2SDmitry Kravkov smp_mb();
3132a0d307b2SDmitry Kravkov
3133adfc5217SJeff Kirsher /* Free SKBs, SGEs, TPA pool and driver internals */
3134adfc5217SJeff Kirsher bnx2x_free_skbs(bp);
313555c11941SMerav Sicron if (CNIC_LOADED(bp))
313655c11941SMerav Sicron bnx2x_free_skbs_cnic(bp);
3137adfc5217SJeff Kirsher for_each_rx_queue(bp, i)
3138adfc5217SJeff Kirsher bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3139adfc5217SJeff Kirsher
3140ad5afc89SAriel Elior bnx2x_free_fp_mem(bp);
3141ad5afc89SAriel Elior if (CNIC_LOADED(bp))
314255c11941SMerav Sicron bnx2x_free_fp_mem_cnic(bp);
3143ad5afc89SAriel Elior
3144ad5afc89SAriel Elior if (IS_PF(bp)) {
3145ad5afc89SAriel Elior if (CNIC_LOADED(bp))
314655c11941SMerav Sicron bnx2x_free_mem_cnic(bp);
314755c11941SMerav Sicron }
3148b4cddbd6SAriel Elior bnx2x_free_mem(bp);
3149b4cddbd6SAriel Elior
3150adfc5217SJeff Kirsher bp->state = BNX2X_STATE_CLOSED;
315155c11941SMerav Sicron bp->cnic_loaded = false;
3152adfc5217SJeff Kirsher
315342f8277fSYuval Mintz /* Clear driver version indication in shmem */
3154f7084059SGuilherme G. Piccoli if (IS_PF(bp) && !BP_NOMCP(bp))
315542f8277fSYuval Mintz bnx2x_update_mng_version(bp);
315642f8277fSYuval Mintz
3157adfc5217SJeff Kirsher /* Check if there are pending parity attentions. If there are - set
3158adfc5217SJeff Kirsher * RECOVERY_IN_PROGRESS.
3159adfc5217SJeff Kirsher */
3160ad5afc89SAriel Elior if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3161adfc5217SJeff Kirsher bnx2x_set_reset_in_progress(bp);
3162adfc5217SJeff Kirsher
3163adfc5217SJeff Kirsher /* Set RESET_IS_GLOBAL if needed */
3164adfc5217SJeff Kirsher if (global)
3165adfc5217SJeff Kirsher bnx2x_set_reset_global(bp);
3166adfc5217SJeff Kirsher }
3167adfc5217SJeff Kirsher
3168adfc5217SJeff Kirsher /* The last driver must disable a "close the gate" if there is no
3169adfc5217SJeff Kirsher * parity attention or "process kill" pending.
3170adfc5217SJeff Kirsher */
3171ad5afc89SAriel Elior if (IS_PF(bp) &&
3172ad5afc89SAriel Elior !bnx2x_clear_pf_load(bp) &&
3173ad5afc89SAriel Elior bnx2x_reset_is_done(bp, BP_PATH(bp)))
3174adfc5217SJeff Kirsher bnx2x_disable_close_the_gate(bp);
3175adfc5217SJeff Kirsher
317655c11941SMerav Sicron DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
317755c11941SMerav Sicron
3178adfc5217SJeff Kirsher return 0;
3179adfc5217SJeff Kirsher }
3180adfc5217SJeff Kirsher
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)3181adfc5217SJeff Kirsher int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3182adfc5217SJeff Kirsher {
3183adfc5217SJeff Kirsher u16 pmcsr;
3184adfc5217SJeff Kirsher
3185adfc5217SJeff Kirsher /* If there is no power capability, silently succeed */
318629ed74c3SJon Mason if (!bp->pdev->pm_cap) {
318751c1a580SMerav Sicron BNX2X_DEV_INFO("No power capability. Breaking.\n");
3188adfc5217SJeff Kirsher return 0;
3189adfc5217SJeff Kirsher }
3190adfc5217SJeff Kirsher
319129ed74c3SJon Mason pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3192adfc5217SJeff Kirsher
3193adfc5217SJeff Kirsher switch (state) {
3194adfc5217SJeff Kirsher case PCI_D0:
319529ed74c3SJon Mason pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3196adfc5217SJeff Kirsher ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3197adfc5217SJeff Kirsher PCI_PM_CTRL_PME_STATUS));
3198adfc5217SJeff Kirsher
3199adfc5217SJeff Kirsher if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3200adfc5217SJeff Kirsher /* delay required during transition out of D3hot */
3201adfc5217SJeff Kirsher msleep(20);
3202adfc5217SJeff Kirsher break;
3203adfc5217SJeff Kirsher
3204adfc5217SJeff Kirsher case PCI_D3hot:
3205adfc5217SJeff Kirsher /* If there are other clients above don't
3206adfc5217SJeff Kirsher shut down the power */
3207adfc5217SJeff Kirsher if (atomic_read(&bp->pdev->enable_cnt) != 1)
3208adfc5217SJeff Kirsher return 0;
3209adfc5217SJeff Kirsher /* Don't shut down the power for emulation and FPGA */
3210adfc5217SJeff Kirsher if (CHIP_REV_IS_SLOW(bp))
3211adfc5217SJeff Kirsher return 0;
3212adfc5217SJeff Kirsher
3213adfc5217SJeff Kirsher pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3214adfc5217SJeff Kirsher pmcsr |= 3;
3215adfc5217SJeff Kirsher
3216adfc5217SJeff Kirsher if (bp->wol)
3217adfc5217SJeff Kirsher pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3218adfc5217SJeff Kirsher
321929ed74c3SJon Mason pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3220adfc5217SJeff Kirsher pmcsr);
3221adfc5217SJeff Kirsher
3222adfc5217SJeff Kirsher /* No more memory access after this point until
3223adfc5217SJeff Kirsher * device is brought back to D0.
3224adfc5217SJeff Kirsher */
3225adfc5217SJeff Kirsher break;
3226adfc5217SJeff Kirsher
3227adfc5217SJeff Kirsher default:
322851c1a580SMerav Sicron dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3229adfc5217SJeff Kirsher return -EINVAL;
3230adfc5217SJeff Kirsher }
3231adfc5217SJeff Kirsher return 0;
3232adfc5217SJeff Kirsher }
3233adfc5217SJeff Kirsher
3234adfc5217SJeff Kirsher /*
3235adfc5217SJeff Kirsher * net_device service functions
3236adfc5217SJeff Kirsher */
bnx2x_poll(struct napi_struct * napi,int budget)3237a8f47eb7Sstephen hemminger static int bnx2x_poll(struct napi_struct *napi, int budget)
3238adfc5217SJeff Kirsher {
3239adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3240adfc5217SJeff Kirsher napi);
3241adfc5217SJeff Kirsher struct bnx2x *bp = fp->bp;
32424d6acb62SEric Dumazet int rx_work_done;
32434d6acb62SEric Dumazet u8 cos;
3244adfc5217SJeff Kirsher
3245adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
3246adfc5217SJeff Kirsher if (unlikely(bp->panic)) {
3247adfc5217SJeff Kirsher napi_complete(napi);
3248adfc5217SJeff Kirsher return 0;
3249adfc5217SJeff Kirsher }
3250adfc5217SJeff Kirsher #endif
3251adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos)
325265565884SMerav Sicron if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
325365565884SMerav Sicron bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3254adfc5217SJeff Kirsher
32554d6acb62SEric Dumazet rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3256adfc5217SJeff Kirsher
32574d6acb62SEric Dumazet if (rx_work_done < budget) {
3258adfc5217SJeff Kirsher /* No need to update SB for FCoE L2 ring as long as
3259adfc5217SJeff Kirsher * it's connected to the default SB and the SB
3260adfc5217SJeff Kirsher * has been updated when NAPI was scheduled.
3261adfc5217SJeff Kirsher */
3262adfc5217SJeff Kirsher if (IS_FCOE_FP(fp)) {
32636ad20165SEric Dumazet napi_complete_done(napi, rx_work_done);
32644d6acb62SEric Dumazet } else {
3265adfc5217SJeff Kirsher bnx2x_update_fpsb_idx(fp);
3266adfc5217SJeff Kirsher /* bnx2x_has_rx_work() reads the status block,
3267adfc5217SJeff Kirsher * thus we need to ensure that status block indices
3268adfc5217SJeff Kirsher * have been actually read (bnx2x_update_fpsb_idx)
3269adfc5217SJeff Kirsher * prior to this check (bnx2x_has_rx_work) so that
3270adfc5217SJeff Kirsher * we won't write the "newer" value of the status block
3271adfc5217SJeff Kirsher * to IGU (if there was a DMA right after
3272adfc5217SJeff Kirsher * bnx2x_has_rx_work and if there is no rmb, the memory
3273adfc5217SJeff Kirsher * reading (bnx2x_update_fpsb_idx) may be postponed
3274adfc5217SJeff Kirsher * to right before bnx2x_ack_sb). In this case there
3275adfc5217SJeff Kirsher * will never be another interrupt until there is
3276adfc5217SJeff Kirsher * another update of the status block, while there
3277adfc5217SJeff Kirsher * is still unhandled work.
3278adfc5217SJeff Kirsher */
3279adfc5217SJeff Kirsher rmb();
3280adfc5217SJeff Kirsher
3281adfc5217SJeff Kirsher if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
328280f1c21cSEric Dumazet if (napi_complete_done(napi, rx_work_done)) {
3283adfc5217SJeff Kirsher /* Re-enable interrupts */
328451c1a580SMerav Sicron DP(NETIF_MSG_RX_STATUS,
3285adfc5217SJeff Kirsher "Update index to %d\n", fp->fp_hc_idx);
3286adfc5217SJeff Kirsher bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3287adfc5217SJeff Kirsher le16_to_cpu(fp->fp_hc_idx),
3288adfc5217SJeff Kirsher IGU_INT_ENABLE, 1);
328980f1c21cSEric Dumazet }
32904d6acb62SEric Dumazet } else {
32914d6acb62SEric Dumazet rx_work_done = budget;
3292adfc5217SJeff Kirsher }
3293adfc5217SJeff Kirsher }
3294adfc5217SJeff Kirsher }
3295adfc5217SJeff Kirsher
32964d6acb62SEric Dumazet return rx_work_done;
3297adfc5217SJeff Kirsher }
3298adfc5217SJeff Kirsher
3299adfc5217SJeff Kirsher /* we split the first BD into headers and data BDs
3300adfc5217SJeff Kirsher * to ease the pain of our fellow microcode engineers
3301adfc5217SJeff Kirsher * we use one mapping for both BDs
3302adfc5217SJeff Kirsher */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,struct sw_tx_bd * tx_buf,struct eth_tx_start_bd ** tx_bd,u16 hlen,u16 bd_prod)330391226790SDmitry Kravkov static u16 bnx2x_tx_split(struct bnx2x *bp,
3304adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata,
3305adfc5217SJeff Kirsher struct sw_tx_bd *tx_buf,
3306adfc5217SJeff Kirsher struct eth_tx_start_bd **tx_bd, u16 hlen,
330791226790SDmitry Kravkov u16 bd_prod)
3308adfc5217SJeff Kirsher {
3309adfc5217SJeff Kirsher struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3310adfc5217SJeff Kirsher struct eth_tx_bd *d_tx_bd;
3311adfc5217SJeff Kirsher dma_addr_t mapping;
3312adfc5217SJeff Kirsher int old_len = le16_to_cpu(h_tx_bd->nbytes);
3313adfc5217SJeff Kirsher
3314adfc5217SJeff Kirsher /* first fix first BD */
3315adfc5217SJeff Kirsher h_tx_bd->nbytes = cpu_to_le16(hlen);
3316adfc5217SJeff Kirsher
331791226790SDmitry Kravkov DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
331891226790SDmitry Kravkov h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3319adfc5217SJeff Kirsher
3320adfc5217SJeff Kirsher /* now get a new data BD
3321adfc5217SJeff Kirsher * (after the pbd) and fill it */
3322adfc5217SJeff Kirsher bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3323adfc5217SJeff Kirsher d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3324adfc5217SJeff Kirsher
3325adfc5217SJeff Kirsher mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3326adfc5217SJeff Kirsher le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3327adfc5217SJeff Kirsher
3328adfc5217SJeff Kirsher d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3329adfc5217SJeff Kirsher d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3330adfc5217SJeff Kirsher d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3331adfc5217SJeff Kirsher
3332adfc5217SJeff Kirsher /* this marks the BD as one that has no individual mapping */
3333adfc5217SJeff Kirsher tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3334adfc5217SJeff Kirsher
3335adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
3336adfc5217SJeff Kirsher "TSO split data size is %d (%x:%x)\n",
3337adfc5217SJeff Kirsher d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3338adfc5217SJeff Kirsher
3339adfc5217SJeff Kirsher /* update tx_bd */
3340adfc5217SJeff Kirsher *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3341adfc5217SJeff Kirsher
3342adfc5217SJeff Kirsher return bd_prod;
3343adfc5217SJeff Kirsher }
3344adfc5217SJeff Kirsher
334586564c3fSYuval Mintz #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
334686564c3fSYuval Mintz #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)334791226790SDmitry Kravkov static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3348adfc5217SJeff Kirsher {
334986564c3fSYuval Mintz __sum16 tsum = (__force __sum16) csum;
335086564c3fSYuval Mintz
3351adfc5217SJeff Kirsher if (fix > 0)
335286564c3fSYuval Mintz tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3353adfc5217SJeff Kirsher csum_partial(t_header - fix, fix, 0)));
3354adfc5217SJeff Kirsher
3355adfc5217SJeff Kirsher else if (fix < 0)
335686564c3fSYuval Mintz tsum = ~csum_fold(csum_add((__force __wsum) csum,
3357adfc5217SJeff Kirsher csum_partial(t_header, -fix, 0)));
3358adfc5217SJeff Kirsher
3359e2593fcdSDmitry Kravkov return bswab16(tsum);
3360adfc5217SJeff Kirsher }
3361adfc5217SJeff Kirsher
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)336291226790SDmitry Kravkov static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3363adfc5217SJeff Kirsher {
3364adfc5217SJeff Kirsher u32 rc;
3365a848ade4SDmitry Kravkov __u8 prot = 0;
3366a848ade4SDmitry Kravkov __be16 protocol;
3367adfc5217SJeff Kirsher
3368adfc5217SJeff Kirsher if (skb->ip_summed != CHECKSUM_PARTIAL)
3369a848ade4SDmitry Kravkov return XMIT_PLAIN;
3370adfc5217SJeff Kirsher
3371a848ade4SDmitry Kravkov protocol = vlan_get_protocol(skb);
3372a848ade4SDmitry Kravkov if (protocol == htons(ETH_P_IPV6)) {
3373adfc5217SJeff Kirsher rc = XMIT_CSUM_V6;
3374a848ade4SDmitry Kravkov prot = ipv6_hdr(skb)->nexthdr;
3375adfc5217SJeff Kirsher } else {
3376adfc5217SJeff Kirsher rc = XMIT_CSUM_V4;
3377a848ade4SDmitry Kravkov prot = ip_hdr(skb)->protocol;
3378a848ade4SDmitry Kravkov }
3379a848ade4SDmitry Kravkov
3380a848ade4SDmitry Kravkov if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3381a848ade4SDmitry Kravkov if (inner_ip_hdr(skb)->version == 6) {
3382a848ade4SDmitry Kravkov rc |= XMIT_CSUM_ENC_V6;
3383a848ade4SDmitry Kravkov if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3384a848ade4SDmitry Kravkov rc |= XMIT_CSUM_TCP;
3385a848ade4SDmitry Kravkov } else {
3386a848ade4SDmitry Kravkov rc |= XMIT_CSUM_ENC_V4;
3387a848ade4SDmitry Kravkov if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3388adfc5217SJeff Kirsher rc |= XMIT_CSUM_TCP;
3389adfc5217SJeff Kirsher }
3390adfc5217SJeff Kirsher }
3391a848ade4SDmitry Kravkov if (prot == IPPROTO_TCP)
3392a848ade4SDmitry Kravkov rc |= XMIT_CSUM_TCP;
3393adfc5217SJeff Kirsher
339436a8f39eSEric Dumazet if (skb_is_gso(skb)) {
3395a848ade4SDmitry Kravkov if (skb_is_gso_v6(skb)) {
3396e768fb29SDmitry Kravkov rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3397a848ade4SDmitry Kravkov if (rc & XMIT_CSUM_ENC)
3398a848ade4SDmitry Kravkov rc |= XMIT_GSO_ENC_V6;
339936a8f39eSEric Dumazet } else {
3400e768fb29SDmitry Kravkov rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3401a848ade4SDmitry Kravkov if (rc & XMIT_CSUM_ENC)
3402a848ade4SDmitry Kravkov rc |= XMIT_GSO_ENC_V4;
3403a848ade4SDmitry Kravkov }
340436a8f39eSEric Dumazet }
3405adfc5217SJeff Kirsher
3406adfc5217SJeff Kirsher return rc;
3407adfc5217SJeff Kirsher }
3408adfc5217SJeff Kirsher
3409ea2465afSYuval Mintz /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3410ea2465afSYuval Mintz #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3411ea2465afSYuval Mintz
3412ea2465afSYuval Mintz /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3413ea2465afSYuval Mintz #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3414ea2465afSYuval Mintz
3415ea2465afSYuval Mintz #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3416adfc5217SJeff Kirsher /* check if packet requires linearization (packet is too fragmented)
3417adfc5217SJeff Kirsher no need to check fragmentation if page size > 8K (there will be no
3418adfc5217SJeff Kirsher violation to FW restrictions) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)3419adfc5217SJeff Kirsher static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3420adfc5217SJeff Kirsher u32 xmit_type)
3421adfc5217SJeff Kirsher {
3422ea2465afSYuval Mintz int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3423ea2465afSYuval Mintz int to_copy = 0, hlen = 0;
3424adfc5217SJeff Kirsher
3425ea2465afSYuval Mintz if (xmit_type & XMIT_GSO_ENC)
3426ea2465afSYuval Mintz num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3427adfc5217SJeff Kirsher
3428ea2465afSYuval Mintz if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3429adfc5217SJeff Kirsher if (xmit_type & XMIT_GSO) {
3430adfc5217SJeff Kirsher unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3431ea2465afSYuval Mintz int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3432adfc5217SJeff Kirsher /* Number of windows to check */
3433adfc5217SJeff Kirsher int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3434adfc5217SJeff Kirsher int wnd_idx = 0;
3435adfc5217SJeff Kirsher int frag_idx = 0;
3436adfc5217SJeff Kirsher u32 wnd_sum = 0;
3437adfc5217SJeff Kirsher
3438adfc5217SJeff Kirsher /* Headers length */
3439592b9b8dSYuval Mintz if (xmit_type & XMIT_GSO_ENC)
3440504148feSEric Dumazet hlen = skb_inner_tcp_all_headers(skb);
3441592b9b8dSYuval Mintz else
3442504148feSEric Dumazet hlen = skb_tcp_all_headers(skb);
3443adfc5217SJeff Kirsher
3444adfc5217SJeff Kirsher /* Amount of data (w/o headers) on linear part of SKB*/
3445adfc5217SJeff Kirsher first_bd_sz = skb_headlen(skb) - hlen;
3446adfc5217SJeff Kirsher
3447adfc5217SJeff Kirsher wnd_sum = first_bd_sz;
3448adfc5217SJeff Kirsher
3449adfc5217SJeff Kirsher /* Calculate the first sum - it's special */
3450adfc5217SJeff Kirsher for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3451adfc5217SJeff Kirsher wnd_sum +=
34529e903e08SEric Dumazet skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3453adfc5217SJeff Kirsher
3454adfc5217SJeff Kirsher /* If there was data on linear skb data - check it */
3455adfc5217SJeff Kirsher if (first_bd_sz > 0) {
3456adfc5217SJeff Kirsher if (unlikely(wnd_sum < lso_mss)) {
3457adfc5217SJeff Kirsher to_copy = 1;
3458adfc5217SJeff Kirsher goto exit_lbl;
3459adfc5217SJeff Kirsher }
3460adfc5217SJeff Kirsher
3461adfc5217SJeff Kirsher wnd_sum -= first_bd_sz;
3462adfc5217SJeff Kirsher }
3463adfc5217SJeff Kirsher
3464adfc5217SJeff Kirsher /* Others are easier: run through the frag list and
3465adfc5217SJeff Kirsher check all windows */
3466adfc5217SJeff Kirsher for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3467adfc5217SJeff Kirsher wnd_sum +=
34689e903e08SEric Dumazet skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3469adfc5217SJeff Kirsher
3470adfc5217SJeff Kirsher if (unlikely(wnd_sum < lso_mss)) {
3471adfc5217SJeff Kirsher to_copy = 1;
3472adfc5217SJeff Kirsher break;
3473adfc5217SJeff Kirsher }
3474adfc5217SJeff Kirsher wnd_sum -=
34759e903e08SEric Dumazet skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3476adfc5217SJeff Kirsher }
3477adfc5217SJeff Kirsher } else {
3478adfc5217SJeff Kirsher /* in non-LSO too fragmented packet should always
3479adfc5217SJeff Kirsher be linearized */
3480adfc5217SJeff Kirsher to_copy = 1;
3481adfc5217SJeff Kirsher }
3482adfc5217SJeff Kirsher }
3483adfc5217SJeff Kirsher
3484adfc5217SJeff Kirsher exit_lbl:
3485adfc5217SJeff Kirsher if (unlikely(to_copy))
3486adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
348751c1a580SMerav Sicron "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3488adfc5217SJeff Kirsher (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3489adfc5217SJeff Kirsher skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3490adfc5217SJeff Kirsher
3491adfc5217SJeff Kirsher return to_copy;
3492adfc5217SJeff Kirsher }
3493adfc5217SJeff Kirsher #endif
3494adfc5217SJeff Kirsher
3495adfc5217SJeff Kirsher /**
3496adfc5217SJeff Kirsher * bnx2x_set_pbd_gso - update PBD in GSO case.
3497adfc5217SJeff Kirsher *
3498adfc5217SJeff Kirsher * @skb: packet skb
3499adfc5217SJeff Kirsher * @pbd: parse BD
3500adfc5217SJeff Kirsher * @xmit_type: xmit flags
3501adfc5217SJeff Kirsher */
bnx2x_set_pbd_gso(struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)350291226790SDmitry Kravkov static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3503adfc5217SJeff Kirsher struct eth_tx_parse_bd_e1x *pbd,
3504adfc5217SJeff Kirsher u32 xmit_type)
3505adfc5217SJeff Kirsher {
3506adfc5217SJeff Kirsher pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
350786564c3fSYuval Mintz pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
350891226790SDmitry Kravkov pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3509adfc5217SJeff Kirsher
3510adfc5217SJeff Kirsher if (xmit_type & XMIT_GSO_V4) {
351186564c3fSYuval Mintz pbd->ip_id = bswab16(ip_hdr(skb)->id);
3512adfc5217SJeff Kirsher pbd->tcp_pseudo_csum =
351386564c3fSYuval Mintz bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3514adfc5217SJeff Kirsher ip_hdr(skb)->daddr,
3515adfc5217SJeff Kirsher 0, IPPROTO_TCP, 0));
3516057cf65eSYuval Mintz } else {
3517adfc5217SJeff Kirsher pbd->tcp_pseudo_csum =
351886564c3fSYuval Mintz bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3519adfc5217SJeff Kirsher &ipv6_hdr(skb)->daddr,
3520adfc5217SJeff Kirsher 0, IPPROTO_TCP, 0));
3521057cf65eSYuval Mintz }
3522adfc5217SJeff Kirsher
352386564c3fSYuval Mintz pbd->global_data |=
352486564c3fSYuval Mintz cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3525adfc5217SJeff Kirsher }
3526adfc5217SJeff Kirsher
3527adfc5217SJeff Kirsher /**
3528a848ade4SDmitry Kravkov * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3529a848ade4SDmitry Kravkov *
3530a848ade4SDmitry Kravkov * @bp: driver handle
3531a848ade4SDmitry Kravkov * @skb: packet skb
3532a848ade4SDmitry Kravkov * @parsing_data: data to be updated
3533a848ade4SDmitry Kravkov * @xmit_type: xmit flags
3534a848ade4SDmitry Kravkov *
3535a848ade4SDmitry Kravkov * 57712/578xx related, when skb has encapsulation
3536a848ade4SDmitry Kravkov */
bnx2x_set_pbd_csum_enc(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3537a848ade4SDmitry Kravkov static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3538a848ade4SDmitry Kravkov u32 *parsing_data, u32 xmit_type)
3539a848ade4SDmitry Kravkov {
3540a848ade4SDmitry Kravkov *parsing_data |=
3541a848ade4SDmitry Kravkov ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3542a848ade4SDmitry Kravkov ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3543a848ade4SDmitry Kravkov ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3544a848ade4SDmitry Kravkov
3545a848ade4SDmitry Kravkov if (xmit_type & XMIT_CSUM_TCP) {
3546a848ade4SDmitry Kravkov *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3547a848ade4SDmitry Kravkov ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3548a848ade4SDmitry Kravkov ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3549a848ade4SDmitry Kravkov
3550504148feSEric Dumazet return skb_inner_tcp_all_headers(skb);
3551a848ade4SDmitry Kravkov }
3552a848ade4SDmitry Kravkov
3553a848ade4SDmitry Kravkov /* We support checksum offload for TCP and UDP only.
3554a848ade4SDmitry Kravkov * No need to pass the UDP header length - it's a constant.
3555a848ade4SDmitry Kravkov */
3556504148feSEric Dumazet return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
3557a848ade4SDmitry Kravkov }
3558a848ade4SDmitry Kravkov
3559a848ade4SDmitry Kravkov /**
3560adfc5217SJeff Kirsher * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3561adfc5217SJeff Kirsher *
3562adfc5217SJeff Kirsher * @bp: driver handle
3563adfc5217SJeff Kirsher * @skb: packet skb
3564adfc5217SJeff Kirsher * @parsing_data: data to be updated
3565adfc5217SJeff Kirsher * @xmit_type: xmit flags
3566adfc5217SJeff Kirsher *
356791226790SDmitry Kravkov * 57712/578xx related
3568adfc5217SJeff Kirsher */
bnx2x_set_pbd_csum_e2(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)356991226790SDmitry Kravkov static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3570adfc5217SJeff Kirsher u32 *parsing_data, u32 xmit_type)
3571adfc5217SJeff Kirsher {
3572adfc5217SJeff Kirsher *parsing_data |=
3573adfc5217SJeff Kirsher ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
357491226790SDmitry Kravkov ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
357591226790SDmitry Kravkov ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3576adfc5217SJeff Kirsher
3577adfc5217SJeff Kirsher if (xmit_type & XMIT_CSUM_TCP) {
3578adfc5217SJeff Kirsher *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3579adfc5217SJeff Kirsher ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3580adfc5217SJeff Kirsher ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3581adfc5217SJeff Kirsher
3582504148feSEric Dumazet return skb_tcp_all_headers(skb);
3583924d75abSYuval Mintz }
3584adfc5217SJeff Kirsher /* We support checksum offload for TCP and UDP only.
3585adfc5217SJeff Kirsher * No need to pass the UDP header length - it's a constant.
3586adfc5217SJeff Kirsher */
3587504148feSEric Dumazet return skb_transport_offset(skb) + sizeof(struct udphdr);
3588adfc5217SJeff Kirsher }
3589adfc5217SJeff Kirsher
3590a848ade4SDmitry Kravkov /* set FW indication according to inner or outer protocols if tunneled */
bnx2x_set_sbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_start_bd * tx_start_bd,u32 xmit_type)359191226790SDmitry Kravkov static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
359291226790SDmitry Kravkov struct eth_tx_start_bd *tx_start_bd,
359391226790SDmitry Kravkov u32 xmit_type)
3594adfc5217SJeff Kirsher {
3595adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3596adfc5217SJeff Kirsher
3597a848ade4SDmitry Kravkov if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
359891226790SDmitry Kravkov tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3599adfc5217SJeff Kirsher
3600adfc5217SJeff Kirsher if (!(xmit_type & XMIT_CSUM_TCP))
3601adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3602adfc5217SJeff Kirsher }
3603adfc5217SJeff Kirsher
3604adfc5217SJeff Kirsher /**
3605adfc5217SJeff Kirsher * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3606adfc5217SJeff Kirsher *
3607adfc5217SJeff Kirsher * @bp: driver handle
3608adfc5217SJeff Kirsher * @skb: packet skb
3609adfc5217SJeff Kirsher * @pbd: parse BD to be updated
3610adfc5217SJeff Kirsher * @xmit_type: xmit flags
3611adfc5217SJeff Kirsher */
bnx2x_set_pbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)361291226790SDmitry Kravkov static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3613adfc5217SJeff Kirsher struct eth_tx_parse_bd_e1x *pbd,
3614adfc5217SJeff Kirsher u32 xmit_type)
3615adfc5217SJeff Kirsher {
3616adfc5217SJeff Kirsher u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3617adfc5217SJeff Kirsher
3618adfc5217SJeff Kirsher /* for now NS flag is not used in Linux */
3619adfc5217SJeff Kirsher pbd->global_data =
362086564c3fSYuval Mintz cpu_to_le16(hlen |
362186564c3fSYuval Mintz ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3622adfc5217SJeff Kirsher ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3623adfc5217SJeff Kirsher
3624adfc5217SJeff Kirsher pbd->ip_hlen_w = (skb_transport_header(skb) -
3625adfc5217SJeff Kirsher skb_network_header(skb)) >> 1;
3626adfc5217SJeff Kirsher
3627adfc5217SJeff Kirsher hlen += pbd->ip_hlen_w;
3628adfc5217SJeff Kirsher
3629adfc5217SJeff Kirsher /* We support checksum offload for TCP and UDP only */
3630adfc5217SJeff Kirsher if (xmit_type & XMIT_CSUM_TCP)
3631adfc5217SJeff Kirsher hlen += tcp_hdrlen(skb) / 2;
3632adfc5217SJeff Kirsher else
3633adfc5217SJeff Kirsher hlen += sizeof(struct udphdr) / 2;
3634adfc5217SJeff Kirsher
3635adfc5217SJeff Kirsher pbd->total_hlen_w = cpu_to_le16(hlen);
3636adfc5217SJeff Kirsher hlen = hlen*2;
3637adfc5217SJeff Kirsher
3638adfc5217SJeff Kirsher if (xmit_type & XMIT_CSUM_TCP) {
363986564c3fSYuval Mintz pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3640adfc5217SJeff Kirsher
3641adfc5217SJeff Kirsher } else {
3642adfc5217SJeff Kirsher s8 fix = SKB_CS_OFF(skb); /* signed! */
3643adfc5217SJeff Kirsher
3644adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
3645adfc5217SJeff Kirsher "hlen %d fix %d csum before fix %x\n",
3646adfc5217SJeff Kirsher le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3647adfc5217SJeff Kirsher
3648adfc5217SJeff Kirsher /* HW bug: fixup the CSUM */
3649adfc5217SJeff Kirsher pbd->tcp_pseudo_csum =
3650adfc5217SJeff Kirsher bnx2x_csum_fix(skb_transport_header(skb),
3651adfc5217SJeff Kirsher SKB_CS(skb), fix);
3652adfc5217SJeff Kirsher
3653adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3654adfc5217SJeff Kirsher pbd->tcp_pseudo_csum);
3655adfc5217SJeff Kirsher }
3656adfc5217SJeff Kirsher
3657adfc5217SJeff Kirsher return hlen;
3658adfc5217SJeff Kirsher }
3659adfc5217SJeff Kirsher
bnx2x_update_pbds_gso_enc(struct sk_buff * skb,struct eth_tx_parse_bd_e2 * pbd_e2,struct eth_tx_parse_2nd_bd * pbd2,u16 * global_data,u32 xmit_type)3660a848ade4SDmitry Kravkov static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3661a848ade4SDmitry Kravkov struct eth_tx_parse_bd_e2 *pbd_e2,
3662a848ade4SDmitry Kravkov struct eth_tx_parse_2nd_bd *pbd2,
3663a848ade4SDmitry Kravkov u16 *global_data,
3664a848ade4SDmitry Kravkov u32 xmit_type)
3665a848ade4SDmitry Kravkov {
3666e287a75cSDmitry Kravkov u16 hlen_w = 0;
3667a848ade4SDmitry Kravkov u8 outerip_off, outerip_len = 0;
3668e768fb29SDmitry Kravkov
3669e287a75cSDmitry Kravkov /* from outer IP to transport */
3670e287a75cSDmitry Kravkov hlen_w = (skb_inner_transport_header(skb) -
3671e287a75cSDmitry Kravkov skb_network_header(skb)) >> 1;
3672a848ade4SDmitry Kravkov
3673a848ade4SDmitry Kravkov /* transport len */
3674e287a75cSDmitry Kravkov hlen_w += inner_tcp_hdrlen(skb) >> 1;
3675a848ade4SDmitry Kravkov
3676e287a75cSDmitry Kravkov pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3677a848ade4SDmitry Kravkov
3678e768fb29SDmitry Kravkov /* outer IP header info */
3679e768fb29SDmitry Kravkov if (xmit_type & XMIT_CSUM_V4) {
3680e287a75cSDmitry Kravkov struct iphdr *iph = ip_hdr(skb);
36811b4fc0e2SDmitry Kravkov u32 csum = (__force u32)(~iph->check) -
36821b4fc0e2SDmitry Kravkov (__force u32)iph->tot_len -
36831b4fc0e2SDmitry Kravkov (__force u32)iph->frag_off;
3684c957d09fSYuval Mintz
3685e42780b6SDmitry Kravkov outerip_len = iph->ihl << 1;
3686e42780b6SDmitry Kravkov
3687a848ade4SDmitry Kravkov pbd2->fw_ip_csum_wo_len_flags_frag =
3688c957d09fSYuval Mintz bswab16(csum_fold((__force __wsum)csum));
3689a848ade4SDmitry Kravkov } else {
3690a848ade4SDmitry Kravkov pbd2->fw_ip_hdr_to_payload_w =
3691e287a75cSDmitry Kravkov hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3692e42780b6SDmitry Kravkov pbd_e2->data.tunnel_data.flags |=
369328311f8eSYuval Mintz ETH_TUNNEL_DATA_IPV6_OUTER;
3694a848ade4SDmitry Kravkov }
3695a848ade4SDmitry Kravkov
3696a848ade4SDmitry Kravkov pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3697a848ade4SDmitry Kravkov
3698a848ade4SDmitry Kravkov pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3699a848ade4SDmitry Kravkov
3700e42780b6SDmitry Kravkov /* inner IP header info */
3701e42780b6SDmitry Kravkov if (xmit_type & XMIT_CSUM_ENC_V4) {
3702e287a75cSDmitry Kravkov pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3703a848ade4SDmitry Kravkov
3704a848ade4SDmitry Kravkov pbd_e2->data.tunnel_data.pseudo_csum =
3705a848ade4SDmitry Kravkov bswab16(~csum_tcpudp_magic(
3706a848ade4SDmitry Kravkov inner_ip_hdr(skb)->saddr,
3707a848ade4SDmitry Kravkov inner_ip_hdr(skb)->daddr,
3708a848ade4SDmitry Kravkov 0, IPPROTO_TCP, 0));
3709a848ade4SDmitry Kravkov } else {
3710a848ade4SDmitry Kravkov pbd_e2->data.tunnel_data.pseudo_csum =
3711a848ade4SDmitry Kravkov bswab16(~csum_ipv6_magic(
3712a848ade4SDmitry Kravkov &inner_ipv6_hdr(skb)->saddr,
3713a848ade4SDmitry Kravkov &inner_ipv6_hdr(skb)->daddr,
3714a848ade4SDmitry Kravkov 0, IPPROTO_TCP, 0));
3715a848ade4SDmitry Kravkov }
3716a848ade4SDmitry Kravkov
3717a848ade4SDmitry Kravkov outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3718a848ade4SDmitry Kravkov
3719a848ade4SDmitry Kravkov *global_data |=
3720a848ade4SDmitry Kravkov outerip_off |
3721a848ade4SDmitry Kravkov (outerip_len <<
3722a848ade4SDmitry Kravkov ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3723a848ade4SDmitry Kravkov ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3724a848ade4SDmitry Kravkov ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
372565bc0cfeSDmitry Kravkov
372665bc0cfeSDmitry Kravkov if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
372765bc0cfeSDmitry Kravkov SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
372865bc0cfeSDmitry Kravkov pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
372965bc0cfeSDmitry Kravkov }
3730a848ade4SDmitry Kravkov }
3731a848ade4SDmitry Kravkov
bnx2x_set_ipv6_ext_e2(struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3732e42780b6SDmitry Kravkov static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3733e42780b6SDmitry Kravkov u32 xmit_type)
3734e42780b6SDmitry Kravkov {
3735e42780b6SDmitry Kravkov struct ipv6hdr *ipv6;
3736e42780b6SDmitry Kravkov
3737e42780b6SDmitry Kravkov if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3738e42780b6SDmitry Kravkov return;
3739e42780b6SDmitry Kravkov
3740e42780b6SDmitry Kravkov if (xmit_type & XMIT_GSO_ENC_V6)
3741e42780b6SDmitry Kravkov ipv6 = inner_ipv6_hdr(skb);
3742e42780b6SDmitry Kravkov else /* XMIT_GSO_V6 */
3743e42780b6SDmitry Kravkov ipv6 = ipv6_hdr(skb);
3744e42780b6SDmitry Kravkov
3745e42780b6SDmitry Kravkov if (ipv6->nexthdr == NEXTHDR_IPV6)
3746e42780b6SDmitry Kravkov *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3747e42780b6SDmitry Kravkov }
3748e42780b6SDmitry Kravkov
3749adfc5217SJeff Kirsher /* called with netif_tx_lock
3750adfc5217SJeff Kirsher * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3751adfc5217SJeff Kirsher * netif_wake_queue()
3752adfc5217SJeff Kirsher */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)3753adfc5217SJeff Kirsher netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3754adfc5217SJeff Kirsher {
3755adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
3756adfc5217SJeff Kirsher
3757adfc5217SJeff Kirsher struct netdev_queue *txq;
3758adfc5217SJeff Kirsher struct bnx2x_fp_txdata *txdata;
3759adfc5217SJeff Kirsher struct sw_tx_bd *tx_buf;
3760adfc5217SJeff Kirsher struct eth_tx_start_bd *tx_start_bd, *first_bd;
3761adfc5217SJeff Kirsher struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3762adfc5217SJeff Kirsher struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3763adfc5217SJeff Kirsher struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3764a848ade4SDmitry Kravkov struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3765adfc5217SJeff Kirsher u32 pbd_e2_parsing_data = 0;
3766adfc5217SJeff Kirsher u16 pkt_prod, bd_prod;
376765565884SMerav Sicron int nbd, txq_index;
3768adfc5217SJeff Kirsher dma_addr_t mapping;
3769adfc5217SJeff Kirsher u32 xmit_type = bnx2x_xmit_type(bp, skb);
3770adfc5217SJeff Kirsher int i;
3771adfc5217SJeff Kirsher u8 hlen = 0;
3772adfc5217SJeff Kirsher __le16 pkt_size = 0;
3773adfc5217SJeff Kirsher struct ethhdr *eth;
3774adfc5217SJeff Kirsher u8 mac_type = UNICAST_ADDRESS;
3775adfc5217SJeff Kirsher
3776adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR
3777adfc5217SJeff Kirsher if (unlikely(bp->panic))
3778adfc5217SJeff Kirsher return NETDEV_TX_BUSY;
3779adfc5217SJeff Kirsher #endif
3780adfc5217SJeff Kirsher
3781adfc5217SJeff Kirsher txq_index = skb_get_queue_mapping(skb);
3782adfc5217SJeff Kirsher txq = netdev_get_tx_queue(dev, txq_index);
3783adfc5217SJeff Kirsher
378455c11941SMerav Sicron BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3785adfc5217SJeff Kirsher
378665565884SMerav Sicron txdata = &bp->bnx2x_txq[txq_index];
3787adfc5217SJeff Kirsher
3788adfc5217SJeff Kirsher /* enable this debug print to view the transmission queue being used
378951c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3790adfc5217SJeff Kirsher txq_index, fp_index, txdata_index); */
3791adfc5217SJeff Kirsher
379216a5fd92SYuval Mintz /* enable this debug print to view the transmission details
379351c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
379451c1a580SMerav Sicron "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3795adfc5217SJeff Kirsher txdata->cid, fp_index, txdata_index, txdata, fp); */
3796adfc5217SJeff Kirsher
3797adfc5217SJeff Kirsher if (unlikely(bnx2x_tx_avail(bp, txdata) <
37987df2dc6bSDmitry Kravkov skb_shinfo(skb)->nr_frags +
37997df2dc6bSDmitry Kravkov BDS_PER_TX_PKT +
38007df2dc6bSDmitry Kravkov NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
38012384d6aaSDmitry Kravkov /* Handle special storage cases separately */
3802c96bdc0cSDmitry Kravkov if (txdata->tx_ring_size == 0) {
3803c96bdc0cSDmitry Kravkov struct bnx2x_eth_q_stats *q_stats =
3804c96bdc0cSDmitry Kravkov bnx2x_fp_qstats(bp, txdata->parent_fp);
3805c96bdc0cSDmitry Kravkov q_stats->driver_filtered_tx_pkt++;
3806c96bdc0cSDmitry Kravkov dev_kfree_skb(skb);
3807c96bdc0cSDmitry Kravkov return NETDEV_TX_OK;
3808c96bdc0cSDmitry Kravkov }
380915192a8cSBarak Witkowski bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3810adfc5217SJeff Kirsher netif_tx_stop_queue(txq);
3811c96bdc0cSDmitry Kravkov BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
38122384d6aaSDmitry Kravkov
3813adfc5217SJeff Kirsher return NETDEV_TX_BUSY;
3814adfc5217SJeff Kirsher }
3815adfc5217SJeff Kirsher
381651c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
381704c46736SYuval Mintz "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3818adfc5217SJeff Kirsher txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
381904c46736SYuval Mintz ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
382004c46736SYuval Mintz skb->len);
3821adfc5217SJeff Kirsher
3822adfc5217SJeff Kirsher eth = (struct ethhdr *)skb->data;
3823adfc5217SJeff Kirsher
3824adfc5217SJeff Kirsher /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3825adfc5217SJeff Kirsher if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3826adfc5217SJeff Kirsher if (is_broadcast_ether_addr(eth->h_dest))
3827adfc5217SJeff Kirsher mac_type = BROADCAST_ADDRESS;
3828adfc5217SJeff Kirsher else
3829adfc5217SJeff Kirsher mac_type = MULTICAST_ADDRESS;
3830adfc5217SJeff Kirsher }
3831adfc5217SJeff Kirsher
383291226790SDmitry Kravkov #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3833adfc5217SJeff Kirsher /* First, check if we need to linearize the skb (due to FW
3834adfc5217SJeff Kirsher restrictions). No need to check fragmentation if page size > 8K
3835adfc5217SJeff Kirsher (there will be no violation to FW restrictions) */
3836adfc5217SJeff Kirsher if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3837adfc5217SJeff Kirsher /* Statistics of linearization */
3838adfc5217SJeff Kirsher bp->lin_cnt++;
3839adfc5217SJeff Kirsher if (skb_linearize(skb) != 0) {
384051c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
384151c1a580SMerav Sicron "SKB linearization failed - silently dropping this SKB\n");
3842adfc5217SJeff Kirsher dev_kfree_skb_any(skb);
3843adfc5217SJeff Kirsher return NETDEV_TX_OK;
3844adfc5217SJeff Kirsher }
3845adfc5217SJeff Kirsher }
3846adfc5217SJeff Kirsher #endif
3847adfc5217SJeff Kirsher /* Map skb linear data for DMA */
3848adfc5217SJeff Kirsher mapping = dma_map_single(&bp->pdev->dev, skb->data,
3849adfc5217SJeff Kirsher skb_headlen(skb), DMA_TO_DEVICE);
3850adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
385151c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
385251c1a580SMerav Sicron "SKB mapping failed - silently dropping this SKB\n");
3853adfc5217SJeff Kirsher dev_kfree_skb_any(skb);
3854adfc5217SJeff Kirsher return NETDEV_TX_OK;
3855adfc5217SJeff Kirsher }
3856adfc5217SJeff Kirsher /*
3857adfc5217SJeff Kirsher Please read carefully. First we use one BD which we mark as start,
3858adfc5217SJeff Kirsher then we have a parsing info BD (used for TSO or xsum),
3859adfc5217SJeff Kirsher and only then we have the rest of the TSO BDs.
3860adfc5217SJeff Kirsher (don't forget to mark the last one as last,
3861adfc5217SJeff Kirsher and to unmap only AFTER you write to the BD ...)
3862adfc5217SJeff Kirsher And above all, all pdb sizes are in words - NOT DWORDS!
3863adfc5217SJeff Kirsher */
3864adfc5217SJeff Kirsher
3865adfc5217SJeff Kirsher /* get current pkt produced now - advance it just before sending packet
3866adfc5217SJeff Kirsher * since mapping of pages may fail and cause packet to be dropped
3867adfc5217SJeff Kirsher */
3868adfc5217SJeff Kirsher pkt_prod = txdata->tx_pkt_prod;
3869adfc5217SJeff Kirsher bd_prod = TX_BD(txdata->tx_bd_prod);
3870adfc5217SJeff Kirsher
3871adfc5217SJeff Kirsher /* get a tx_buf and first BD
3872adfc5217SJeff Kirsher * tx_start_bd may be changed during SPLIT,
3873adfc5217SJeff Kirsher * but first_bd will always stay first
3874adfc5217SJeff Kirsher */
3875adfc5217SJeff Kirsher tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3876adfc5217SJeff Kirsher tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3877adfc5217SJeff Kirsher first_bd = tx_start_bd;
3878adfc5217SJeff Kirsher
3879adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3880adfc5217SJeff Kirsher
3881eeed018cSMichal Kalderon if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3882eeed018cSMichal Kalderon if (!(bp->flags & TX_TIMESTAMPING_EN)) {
38833c91f25cSGuilherme G. Piccoli bp->eth_stats.ptp_skip_tx_ts++;
3884eeed018cSMichal Kalderon BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3885eeed018cSMichal Kalderon } else if (bp->ptp_tx_skb) {
38863c91f25cSGuilherme G. Piccoli bp->eth_stats.ptp_skip_tx_ts++;
38873c91f25cSGuilherme G. Piccoli netdev_err_once(bp->dev,
38883c91f25cSGuilherme G. Piccoli "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3889eeed018cSMichal Kalderon } else {
3890eeed018cSMichal Kalderon skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3891eeed018cSMichal Kalderon /* schedule check for Tx timestamp */
3892eeed018cSMichal Kalderon bp->ptp_tx_skb = skb_get(skb);
3893eeed018cSMichal Kalderon bp->ptp_tx_start = jiffies;
3894eeed018cSMichal Kalderon schedule_work(&bp->ptp_task);
3895eeed018cSMichal Kalderon }
3896eeed018cSMichal Kalderon }
3897eeed018cSMichal Kalderon
389891226790SDmitry Kravkov /* header nbd: indirectly zero other flags! */
389991226790SDmitry Kravkov tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3900adfc5217SJeff Kirsher
3901adfc5217SJeff Kirsher /* remember the first BD of the packet */
3902adfc5217SJeff Kirsher tx_buf->first_bd = txdata->tx_bd_prod;
3903adfc5217SJeff Kirsher tx_buf->skb = skb;
3904adfc5217SJeff Kirsher tx_buf->flags = 0;
3905adfc5217SJeff Kirsher
3906adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
3907adfc5217SJeff Kirsher "sending pkt %u @%p next_idx %u bd %u @%p\n",
3908adfc5217SJeff Kirsher pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3909adfc5217SJeff Kirsher
3910df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) {
3911adfc5217SJeff Kirsher tx_start_bd->vlan_or_ethertype =
3912df8a39deSJiri Pirko cpu_to_le16(skb_vlan_tag_get(skb));
3913adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield |=
3914adfc5217SJeff Kirsher (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3915dc1ba591SAriel Elior } else {
3916dc1ba591SAriel Elior /* when transmitting in a vf, start bd must hold the ethertype
3917dc1ba591SAriel Elior * for fw to enforce it
3918dc1ba591SAriel Elior */
391992f85f05SMintz, Yuval u16 vlan_tci = 0;
3920ea36475aSYuval Mintz #ifndef BNX2X_STOP_ON_ERROR
392192f85f05SMintz, Yuval if (IS_VF(bp)) {
3922ea36475aSYuval Mintz #endif
392392f85f05SMintz, Yuval /* Still need to consider inband vlan for enforced */
392492f85f05SMintz, Yuval if (__vlan_get_tag(skb, &vlan_tci)) {
3925dc1ba591SAriel Elior tx_start_bd->vlan_or_ethertype =
3926dc1ba591SAriel Elior cpu_to_le16(ntohs(eth->h_proto));
392792f85f05SMintz, Yuval } else {
392892f85f05SMintz, Yuval tx_start_bd->bd_flags.as_bitfield |=
392992f85f05SMintz, Yuval (X_ETH_INBAND_VLAN <<
393092f85f05SMintz, Yuval ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
393192f85f05SMintz, Yuval tx_start_bd->vlan_or_ethertype =
393292f85f05SMintz, Yuval cpu_to_le16(vlan_tci);
393392f85f05SMintz, Yuval }
3934ea36475aSYuval Mintz #ifndef BNX2X_STOP_ON_ERROR
393592f85f05SMintz, Yuval } else {
3936dc1ba591SAriel Elior /* used by FW for packet accounting */
3937adfc5217SJeff Kirsher tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
393892f85f05SMintz, Yuval }
3939ea36475aSYuval Mintz #endif
3940dc1ba591SAriel Elior }
394191226790SDmitry Kravkov
394291226790SDmitry Kravkov nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3943adfc5217SJeff Kirsher
3944adfc5217SJeff Kirsher /* turn on parsing and get a BD */
3945adfc5217SJeff Kirsher bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3946adfc5217SJeff Kirsher
3947adfc5217SJeff Kirsher if (xmit_type & XMIT_CSUM)
3948adfc5217SJeff Kirsher bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3949adfc5217SJeff Kirsher
3950adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp)) {
3951adfc5217SJeff Kirsher pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3952adfc5217SJeff Kirsher memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3953a848ade4SDmitry Kravkov
3954a848ade4SDmitry Kravkov if (xmit_type & XMIT_CSUM_ENC) {
3955a848ade4SDmitry Kravkov u16 global_data = 0;
3956a848ade4SDmitry Kravkov
3957a848ade4SDmitry Kravkov /* Set PBD in enc checksum offload case */
3958a848ade4SDmitry Kravkov hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3959a848ade4SDmitry Kravkov &pbd_e2_parsing_data,
3960a848ade4SDmitry Kravkov xmit_type);
3961a848ade4SDmitry Kravkov
3962a848ade4SDmitry Kravkov /* turn on 2nd parsing and get a BD */
3963a848ade4SDmitry Kravkov bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3964a848ade4SDmitry Kravkov
3965a848ade4SDmitry Kravkov pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3966a848ade4SDmitry Kravkov
3967a848ade4SDmitry Kravkov memset(pbd2, 0, sizeof(*pbd2));
3968a848ade4SDmitry Kravkov
3969a848ade4SDmitry Kravkov pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3970a848ade4SDmitry Kravkov (skb_inner_network_header(skb) -
3971a848ade4SDmitry Kravkov skb->data) >> 1;
3972a848ade4SDmitry Kravkov
3973a848ade4SDmitry Kravkov if (xmit_type & XMIT_GSO_ENC)
3974a848ade4SDmitry Kravkov bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3975a848ade4SDmitry Kravkov &global_data,
3976a848ade4SDmitry Kravkov xmit_type);
3977a848ade4SDmitry Kravkov
3978a848ade4SDmitry Kravkov pbd2->global_data = cpu_to_le16(global_data);
3979a848ade4SDmitry Kravkov
3980a848ade4SDmitry Kravkov /* add addition parse BD indication to start BD */
3981a848ade4SDmitry Kravkov SET_FLAG(tx_start_bd->general_data,
3982a848ade4SDmitry Kravkov ETH_TX_START_BD_PARSE_NBDS, 1);
3983a848ade4SDmitry Kravkov /* set encapsulation flag in start BD */
3984a848ade4SDmitry Kravkov SET_FLAG(tx_start_bd->general_data,
3985a848ade4SDmitry Kravkov ETH_TX_START_BD_TUNNEL_EXIST, 1);
3986fe26566dSDmitry Kravkov
3987fe26566dSDmitry Kravkov tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3988fe26566dSDmitry Kravkov
3989a848ade4SDmitry Kravkov nbd++;
3990a848ade4SDmitry Kravkov } else if (xmit_type & XMIT_CSUM) {
399191226790SDmitry Kravkov /* Set PBD in checksum offload case w/o encapsulation */
3992adfc5217SJeff Kirsher hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3993adfc5217SJeff Kirsher &pbd_e2_parsing_data,
3994adfc5217SJeff Kirsher xmit_type);
3995a848ade4SDmitry Kravkov }
3996dc1ba591SAriel Elior
3997e42780b6SDmitry Kravkov bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3998babe723dSYuval Mintz /* Add the macs to the parsing BD if this is a vf or if
3999babe723dSYuval Mintz * Tx Switching is enabled.
4000babe723dSYuval Mintz */
400191226790SDmitry Kravkov if (IS_VF(bp)) {
400291226790SDmitry Kravkov /* override GRE parameters in BD */
400391226790SDmitry Kravkov bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
400491226790SDmitry Kravkov &pbd_e2->data.mac_addr.src_mid,
400591226790SDmitry Kravkov &pbd_e2->data.mac_addr.src_lo,
4006adfc5217SJeff Kirsher eth->h_source);
400791226790SDmitry Kravkov
400891226790SDmitry Kravkov bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
400991226790SDmitry Kravkov &pbd_e2->data.mac_addr.dst_mid,
401091226790SDmitry Kravkov &pbd_e2->data.mac_addr.dst_lo,
4011adfc5217SJeff Kirsher eth->h_dest);
4012ea36475aSYuval Mintz } else {
4013ea36475aSYuval Mintz if (bp->flags & TX_SWITCHING)
4014ea36475aSYuval Mintz bnx2x_set_fw_mac_addr(
4015ea36475aSYuval Mintz &pbd_e2->data.mac_addr.dst_hi,
4016babe723dSYuval Mintz &pbd_e2->data.mac_addr.dst_mid,
4017babe723dSYuval Mintz &pbd_e2->data.mac_addr.dst_lo,
4018babe723dSYuval Mintz eth->h_dest);
4019ea36475aSYuval Mintz #ifdef BNX2X_STOP_ON_ERROR
4020ea36475aSYuval Mintz /* Enforce security is always set in Stop on Error -
4021ea36475aSYuval Mintz * source mac should be present in the parsing BD
4022ea36475aSYuval Mintz */
4023ea36475aSYuval Mintz bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4024ea36475aSYuval Mintz &pbd_e2->data.mac_addr.src_mid,
4025ea36475aSYuval Mintz &pbd_e2->data.mac_addr.src_lo,
4026ea36475aSYuval Mintz eth->h_source);
4027ea36475aSYuval Mintz #endif
4028adfc5217SJeff Kirsher }
402996bed4b9SYuval Mintz
403096bed4b9SYuval Mintz SET_FLAG(pbd_e2_parsing_data,
403196bed4b9SYuval Mintz ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4032adfc5217SJeff Kirsher } else {
403396bed4b9SYuval Mintz u16 global_data = 0;
4034adfc5217SJeff Kirsher pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4035adfc5217SJeff Kirsher memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4036adfc5217SJeff Kirsher /* Set PBD in checksum offload case */
4037adfc5217SJeff Kirsher if (xmit_type & XMIT_CSUM)
4038adfc5217SJeff Kirsher hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4039adfc5217SJeff Kirsher
404096bed4b9SYuval Mintz SET_FLAG(global_data,
404196bed4b9SYuval Mintz ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
404296bed4b9SYuval Mintz pbd_e1x->global_data |= cpu_to_le16(global_data);
4043adfc5217SJeff Kirsher }
4044adfc5217SJeff Kirsher
4045adfc5217SJeff Kirsher /* Setup the data pointer of the first BD of the packet */
4046adfc5217SJeff Kirsher tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4047adfc5217SJeff Kirsher tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4048adfc5217SJeff Kirsher tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4049adfc5217SJeff Kirsher pkt_size = tx_start_bd->nbytes;
4050adfc5217SJeff Kirsher
405151c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
405291226790SDmitry Kravkov "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4053adfc5217SJeff Kirsher tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
405491226790SDmitry Kravkov le16_to_cpu(tx_start_bd->nbytes),
4055adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield,
4056adfc5217SJeff Kirsher le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4057adfc5217SJeff Kirsher
4058adfc5217SJeff Kirsher if (xmit_type & XMIT_GSO) {
4059adfc5217SJeff Kirsher
4060adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
4061adfc5217SJeff Kirsher "TSO packet len %d hlen %d total len %d tso size %d\n",
4062adfc5217SJeff Kirsher skb->len, hlen, skb_headlen(skb),
4063adfc5217SJeff Kirsher skb_shinfo(skb)->gso_size);
4064adfc5217SJeff Kirsher
4065adfc5217SJeff Kirsher tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4066adfc5217SJeff Kirsher
406791226790SDmitry Kravkov if (unlikely(skb_headlen(skb) > hlen)) {
406891226790SDmitry Kravkov nbd++;
4069adfc5217SJeff Kirsher bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4070adfc5217SJeff Kirsher &tx_start_bd, hlen,
407191226790SDmitry Kravkov bd_prod);
407291226790SDmitry Kravkov }
4073adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp))
4074e42780b6SDmitry Kravkov pbd_e2_parsing_data |=
4075e42780b6SDmitry Kravkov (skb_shinfo(skb)->gso_size <<
4076e42780b6SDmitry Kravkov ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4077e42780b6SDmitry Kravkov ETH_TX_PARSE_BD_E2_LSO_MSS;
4078adfc5217SJeff Kirsher else
4079e42780b6SDmitry Kravkov bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4080adfc5217SJeff Kirsher }
4081adfc5217SJeff Kirsher
4082adfc5217SJeff Kirsher /* Set the PBD's parsing_data field if not zero
4083adfc5217SJeff Kirsher * (for the chips newer than 57711).
4084adfc5217SJeff Kirsher */
4085adfc5217SJeff Kirsher if (pbd_e2_parsing_data)
4086adfc5217SJeff Kirsher pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4087adfc5217SJeff Kirsher
4088adfc5217SJeff Kirsher tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4089adfc5217SJeff Kirsher
4090adfc5217SJeff Kirsher /* Handle fragmented skb */
4091adfc5217SJeff Kirsher for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4092adfc5217SJeff Kirsher skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4093adfc5217SJeff Kirsher
40949e903e08SEric Dumazet mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
40959e903e08SEric Dumazet skb_frag_size(frag), DMA_TO_DEVICE);
4096adfc5217SJeff Kirsher if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
40972df1a70aSTom Herbert unsigned int pkts_compl = 0, bytes_compl = 0;
4098adfc5217SJeff Kirsher
409951c1a580SMerav Sicron DP(NETIF_MSG_TX_QUEUED,
410051c1a580SMerav Sicron "Unable to map page - dropping packet...\n");
4101adfc5217SJeff Kirsher
4102adfc5217SJeff Kirsher /* we need unmap all buffers already mapped
4103adfc5217SJeff Kirsher * for this SKB;
4104adfc5217SJeff Kirsher * first_bd->nbd need to be properly updated
4105adfc5217SJeff Kirsher * before call to bnx2x_free_tx_pkt
4106adfc5217SJeff Kirsher */
4107adfc5217SJeff Kirsher first_bd->nbd = cpu_to_le16(nbd);
4108adfc5217SJeff Kirsher bnx2x_free_tx_pkt(bp, txdata,
41092df1a70aSTom Herbert TX_BD(txdata->tx_pkt_prod),
41102df1a70aSTom Herbert &pkts_compl, &bytes_compl);
4111adfc5217SJeff Kirsher return NETDEV_TX_OK;
4112adfc5217SJeff Kirsher }
4113adfc5217SJeff Kirsher
4114adfc5217SJeff Kirsher bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4115adfc5217SJeff Kirsher tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4116adfc5217SJeff Kirsher if (total_pkt_bd == NULL)
4117adfc5217SJeff Kirsher total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4118adfc5217SJeff Kirsher
4119adfc5217SJeff Kirsher tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4120adfc5217SJeff Kirsher tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
41219e903e08SEric Dumazet tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
41229e903e08SEric Dumazet le16_add_cpu(&pkt_size, skb_frag_size(frag));
4123adfc5217SJeff Kirsher nbd++;
4124adfc5217SJeff Kirsher
4125adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
4126adfc5217SJeff Kirsher "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4127adfc5217SJeff Kirsher i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4128adfc5217SJeff Kirsher le16_to_cpu(tx_data_bd->nbytes));
4129adfc5217SJeff Kirsher }
4130adfc5217SJeff Kirsher
4131adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4132adfc5217SJeff Kirsher
4133adfc5217SJeff Kirsher /* update with actual num BDs */
4134adfc5217SJeff Kirsher first_bd->nbd = cpu_to_le16(nbd);
4135adfc5217SJeff Kirsher
4136adfc5217SJeff Kirsher bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4137adfc5217SJeff Kirsher
4138adfc5217SJeff Kirsher /* now send a tx doorbell, counting the next BD
4139adfc5217SJeff Kirsher * if the packet contains or ends with it
4140adfc5217SJeff Kirsher */
4141adfc5217SJeff Kirsher if (TX_BD_POFF(bd_prod) < nbd)
4142adfc5217SJeff Kirsher nbd++;
4143adfc5217SJeff Kirsher
4144adfc5217SJeff Kirsher /* total_pkt_bytes should be set on the first data BD if
4145adfc5217SJeff Kirsher * it's not an LSO packet and there is more than one
4146adfc5217SJeff Kirsher * data BD. In this case pkt_size is limited by an MTU value.
4147adfc5217SJeff Kirsher * However we prefer to set it for an LSO packet (while we don't
4148adfc5217SJeff Kirsher * have to) in order to save some CPU cycles in a none-LSO
4149adfc5217SJeff Kirsher * case, when we much more care about them.
4150adfc5217SJeff Kirsher */
4151adfc5217SJeff Kirsher if (total_pkt_bd != NULL)
4152adfc5217SJeff Kirsher total_pkt_bd->total_pkt_bytes = pkt_size;
4153adfc5217SJeff Kirsher
4154adfc5217SJeff Kirsher if (pbd_e1x)
4155adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
415651c1a580SMerav Sicron "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4157adfc5217SJeff Kirsher pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4158adfc5217SJeff Kirsher pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4159adfc5217SJeff Kirsher pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4160adfc5217SJeff Kirsher le16_to_cpu(pbd_e1x->total_hlen_w));
4161adfc5217SJeff Kirsher if (pbd_e2)
4162adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED,
4163adfc5217SJeff Kirsher "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
416491226790SDmitry Kravkov pbd_e2,
416591226790SDmitry Kravkov pbd_e2->data.mac_addr.dst_hi,
416691226790SDmitry Kravkov pbd_e2->data.mac_addr.dst_mid,
416791226790SDmitry Kravkov pbd_e2->data.mac_addr.dst_lo,
416891226790SDmitry Kravkov pbd_e2->data.mac_addr.src_hi,
416991226790SDmitry Kravkov pbd_e2->data.mac_addr.src_mid,
417091226790SDmitry Kravkov pbd_e2->data.mac_addr.src_lo,
4171adfc5217SJeff Kirsher pbd_e2->parsing_data);
4172adfc5217SJeff Kirsher DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4173adfc5217SJeff Kirsher
41742df1a70aSTom Herbert netdev_tx_sent_queue(txq, skb->len);
41752df1a70aSTom Herbert
41768373c57dSWillem de Bruijn skb_tx_timestamp(skb);
41778373c57dSWillem de Bruijn
4178adfc5217SJeff Kirsher txdata->tx_pkt_prod++;
4179adfc5217SJeff Kirsher /*
4180adfc5217SJeff Kirsher * Make sure that the BD data is updated before updating the producer
4181adfc5217SJeff Kirsher * since FW might read the BD right after the producer is updated.
4182adfc5217SJeff Kirsher * This is only applicable for weak-ordered memory model archs such
4183adfc5217SJeff Kirsher * as IA-64. The following barrier is also mandatory since FW will
4184adfc5217SJeff Kirsher * assumes packets must have BDs.
4185adfc5217SJeff Kirsher */
4186adfc5217SJeff Kirsher wmb();
4187adfc5217SJeff Kirsher
4188adfc5217SJeff Kirsher txdata->tx_db.data.prod += nbd;
4189edd87423SSinan Kaya /* make sure descriptor update is observed by HW */
4190edd87423SSinan Kaya wmb();
4191adfc5217SJeff Kirsher
41927f883c77SSinan Kaya DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4193adfc5217SJeff Kirsher
4194adfc5217SJeff Kirsher txdata->tx_bd_prod += nbd;
4195adfc5217SJeff Kirsher
41967df2dc6bSDmitry Kravkov if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4197adfc5217SJeff Kirsher netif_tx_stop_queue(txq);
4198adfc5217SJeff Kirsher
4199adfc5217SJeff Kirsher /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4200adfc5217SJeff Kirsher * ordering of set_bit() in netif_tx_stop_queue() and read of
4201adfc5217SJeff Kirsher * fp->bd_tx_cons */
4202adfc5217SJeff Kirsher smp_mb();
4203adfc5217SJeff Kirsher
420415192a8cSBarak Witkowski bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
42057df2dc6bSDmitry Kravkov if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4206adfc5217SJeff Kirsher netif_tx_wake_queue(txq);
4207adfc5217SJeff Kirsher }
4208adfc5217SJeff Kirsher txdata->tx_pkt++;
4209adfc5217SJeff Kirsher
4210adfc5217SJeff Kirsher return NETDEV_TX_OK;
4211adfc5217SJeff Kirsher }
4212adfc5217SJeff Kirsher
bnx2x_get_c2s_mapping(struct bnx2x * bp,u8 * c2s_map,u8 * c2s_default)4213230d00ebSYuval Mintz void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4214230d00ebSYuval Mintz {
4215230d00ebSYuval Mintz int mfw_vn = BP_FW_MB_IDX(bp);
4216230d00ebSYuval Mintz u32 tmp;
4217230d00ebSYuval Mintz
4218230d00ebSYuval Mintz /* If the shmem shouldn't affect configuration, reflect */
4219230d00ebSYuval Mintz if (!IS_MF_BD(bp)) {
4220230d00ebSYuval Mintz int i;
4221230d00ebSYuval Mintz
4222230d00ebSYuval Mintz for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4223230d00ebSYuval Mintz c2s_map[i] = i;
4224230d00ebSYuval Mintz *c2s_default = 0;
4225230d00ebSYuval Mintz
4226230d00ebSYuval Mintz return;
4227230d00ebSYuval Mintz }
4228230d00ebSYuval Mintz
4229230d00ebSYuval Mintz tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4230230d00ebSYuval Mintz tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4231230d00ebSYuval Mintz c2s_map[0] = tmp & 0xff;
4232230d00ebSYuval Mintz c2s_map[1] = (tmp >> 8) & 0xff;
4233230d00ebSYuval Mintz c2s_map[2] = (tmp >> 16) & 0xff;
4234230d00ebSYuval Mintz c2s_map[3] = (tmp >> 24) & 0xff;
4235230d00ebSYuval Mintz
4236230d00ebSYuval Mintz tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4237230d00ebSYuval Mintz tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4238230d00ebSYuval Mintz c2s_map[4] = tmp & 0xff;
4239230d00ebSYuval Mintz c2s_map[5] = (tmp >> 8) & 0xff;
4240230d00ebSYuval Mintz c2s_map[6] = (tmp >> 16) & 0xff;
4241230d00ebSYuval Mintz c2s_map[7] = (tmp >> 24) & 0xff;
4242230d00ebSYuval Mintz
4243230d00ebSYuval Mintz tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4244230d00ebSYuval Mintz tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4245230d00ebSYuval Mintz *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4246230d00ebSYuval Mintz }
4247230d00ebSYuval Mintz
4248adfc5217SJeff Kirsher /**
4249adfc5217SJeff Kirsher * bnx2x_setup_tc - routine to configure net_device for multi tc
4250adfc5217SJeff Kirsher *
4251525090b5SWang Hai * @dev: net device to configure
4252525090b5SWang Hai * @num_tc: number of traffic classes to enable
4253adfc5217SJeff Kirsher *
4254adfc5217SJeff Kirsher * callback connected to the ndo_setup_tc function pointer
4255adfc5217SJeff Kirsher */
bnx2x_setup_tc(struct net_device * dev,u8 num_tc)4256adfc5217SJeff Kirsher int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4257adfc5217SJeff Kirsher {
4258adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4259230d00ebSYuval Mintz u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4260230d00ebSYuval Mintz int cos, prio, count, offset;
4261adfc5217SJeff Kirsher
4262adfc5217SJeff Kirsher /* setup tc must be called under rtnl lock */
4263adfc5217SJeff Kirsher ASSERT_RTNL();
4264adfc5217SJeff Kirsher
426516a5fd92SYuval Mintz /* no traffic classes requested. Aborting */
4266adfc5217SJeff Kirsher if (!num_tc) {
4267adfc5217SJeff Kirsher netdev_reset_tc(dev);
4268adfc5217SJeff Kirsher return 0;
4269adfc5217SJeff Kirsher }
4270adfc5217SJeff Kirsher
4271adfc5217SJeff Kirsher /* requested to support too many traffic classes */
4272adfc5217SJeff Kirsher if (num_tc > bp->max_cos) {
42736bf07b8eSYuval Mintz BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4274adfc5217SJeff Kirsher num_tc, bp->max_cos);
4275adfc5217SJeff Kirsher return -EINVAL;
4276adfc5217SJeff Kirsher }
4277adfc5217SJeff Kirsher
4278adfc5217SJeff Kirsher /* declare amount of supported traffic classes */
4279adfc5217SJeff Kirsher if (netdev_set_num_tc(dev, num_tc)) {
428051c1a580SMerav Sicron BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4281adfc5217SJeff Kirsher return -EINVAL;
4282adfc5217SJeff Kirsher }
4283adfc5217SJeff Kirsher
4284230d00ebSYuval Mintz bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4285230d00ebSYuval Mintz
4286adfc5217SJeff Kirsher /* configure priority to traffic class mapping */
4287adfc5217SJeff Kirsher for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4288230d00ebSYuval Mintz int outer_prio = c2s_map[prio];
4289230d00ebSYuval Mintz
4290230d00ebSYuval Mintz netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
429151c1a580SMerav Sicron DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
429251c1a580SMerav Sicron "mapping priority %d to tc %d\n",
4293230d00ebSYuval Mintz outer_prio, bp->prio_to_cos[outer_prio]);
4294adfc5217SJeff Kirsher }
4295adfc5217SJeff Kirsher
429616a5fd92SYuval Mintz /* Use this configuration to differentiate tc0 from other COSes
4297adfc5217SJeff Kirsher This can be used for ets or pfc, and save the effort of setting
4298adfc5217SJeff Kirsher up a multio class queue disc or negotiating DCBX with a switch
4299adfc5217SJeff Kirsher netdev_set_prio_tc_map(dev, 0, 0);
430094f05b0fSJoe Perches DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4301adfc5217SJeff Kirsher for (prio = 1; prio < 16; prio++) {
4302adfc5217SJeff Kirsher netdev_set_prio_tc_map(dev, prio, 1);
430394f05b0fSJoe Perches DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4304adfc5217SJeff Kirsher } */
4305adfc5217SJeff Kirsher
4306adfc5217SJeff Kirsher /* configure traffic class to transmission queue mapping */
4307adfc5217SJeff Kirsher for (cos = 0; cos < bp->max_cos; cos++) {
4308adfc5217SJeff Kirsher count = BNX2X_NUM_ETH_QUEUES(bp);
430965565884SMerav Sicron offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4310adfc5217SJeff Kirsher netdev_set_tc_queue(dev, cos, count, offset);
431151c1a580SMerav Sicron DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
431251c1a580SMerav Sicron "mapping tc %d to offset %d count %d\n",
4313adfc5217SJeff Kirsher cos, offset, count);
4314adfc5217SJeff Kirsher }
4315adfc5217SJeff Kirsher
4316adfc5217SJeff Kirsher return 0;
4317adfc5217SJeff Kirsher }
4318adfc5217SJeff Kirsher
__bnx2x_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)43192572ac53SJiri Pirko int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4320de4784caSJiri Pirko void *type_data)
4321e4c6734eSJohn Fastabend {
4322de4784caSJiri Pirko struct tc_mqprio_qopt *mqprio = type_data;
4323de4784caSJiri Pirko
4324575ed7d3SNogah Frankel if (type != TC_SETUP_QDISC_MQPRIO)
432538cf0426SJiri Pirko return -EOPNOTSUPP;
432656f36acdSAmritha Nambiar
4327de4784caSJiri Pirko mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
432856f36acdSAmritha Nambiar
4329de4784caSJiri Pirko return bnx2x_setup_tc(dev, mqprio->num_tc);
4330e4c6734eSJohn Fastabend }
4331e4c6734eSJohn Fastabend
4332adfc5217SJeff Kirsher /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)4333adfc5217SJeff Kirsher int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4334adfc5217SJeff Kirsher {
4335adfc5217SJeff Kirsher struct sockaddr *addr = p;
4336adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4337adfc5217SJeff Kirsher int rc = 0;
4338adfc5217SJeff Kirsher
43392e98ffc2SDmitry Kravkov if (!is_valid_ether_addr(addr->sa_data)) {
434051c1a580SMerav Sicron BNX2X_ERR("Requested MAC address is not valid\n");
4341adfc5217SJeff Kirsher return -EINVAL;
434251c1a580SMerav Sicron }
4343adfc5217SJeff Kirsher
43442e98ffc2SDmitry Kravkov if (IS_MF_STORAGE_ONLY(bp)) {
43452e98ffc2SDmitry Kravkov BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4346614c76dfSDmitry Kravkov return -EINVAL;
434751c1a580SMerav Sicron }
4348614c76dfSDmitry Kravkov
4349adfc5217SJeff Kirsher if (netif_running(dev)) {
4350adfc5217SJeff Kirsher rc = bnx2x_set_eth_mac(bp, false);
4351adfc5217SJeff Kirsher if (rc)
4352adfc5217SJeff Kirsher return rc;
4353adfc5217SJeff Kirsher }
4354adfc5217SJeff Kirsher
4355a05e4c0aSJakub Kicinski eth_hw_addr_set(dev, addr->sa_data);
4356adfc5217SJeff Kirsher
4357adfc5217SJeff Kirsher if (netif_running(dev))
4358adfc5217SJeff Kirsher rc = bnx2x_set_eth_mac(bp, true);
4359adfc5217SJeff Kirsher
4360230d00ebSYuval Mintz if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4361230d00ebSYuval Mintz SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4362230d00ebSYuval Mintz
4363adfc5217SJeff Kirsher return rc;
4364adfc5217SJeff Kirsher }
4365adfc5217SJeff Kirsher
bnx2x_free_fp_mem_at(struct bnx2x * bp,int fp_index)4366adfc5217SJeff Kirsher static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4367adfc5217SJeff Kirsher {
4368adfc5217SJeff Kirsher union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4369adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4370adfc5217SJeff Kirsher u8 cos;
4371adfc5217SJeff Kirsher
4372adfc5217SJeff Kirsher /* Common */
437355c11941SMerav Sicron
4374adfc5217SJeff Kirsher if (IS_FCOE_IDX(fp_index)) {
4375adfc5217SJeff Kirsher memset(sb, 0, sizeof(union host_hc_status_block));
4376adfc5217SJeff Kirsher fp->status_blk_mapping = 0;
4377adfc5217SJeff Kirsher } else {
4378adfc5217SJeff Kirsher /* status blocks */
4379adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp))
4380adfc5217SJeff Kirsher BNX2X_PCI_FREE(sb->e2_sb,
4381adfc5217SJeff Kirsher bnx2x_fp(bp, fp_index,
4382adfc5217SJeff Kirsher status_blk_mapping),
4383adfc5217SJeff Kirsher sizeof(struct host_hc_status_block_e2));
4384adfc5217SJeff Kirsher else
4385adfc5217SJeff Kirsher BNX2X_PCI_FREE(sb->e1x_sb,
4386adfc5217SJeff Kirsher bnx2x_fp(bp, fp_index,
4387adfc5217SJeff Kirsher status_blk_mapping),
4388adfc5217SJeff Kirsher sizeof(struct host_hc_status_block_e1x));
4389adfc5217SJeff Kirsher }
439055c11941SMerav Sicron
4391adfc5217SJeff Kirsher /* Rx */
4392adfc5217SJeff Kirsher if (!skip_rx_queue(bp, fp_index)) {
4393adfc5217SJeff Kirsher bnx2x_free_rx_bds(fp);
4394adfc5217SJeff Kirsher
4395adfc5217SJeff Kirsher /* fastpath rx rings: rx_buf rx_desc rx_comp */
4396adfc5217SJeff Kirsher BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4397adfc5217SJeff Kirsher BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4398adfc5217SJeff Kirsher bnx2x_fp(bp, fp_index, rx_desc_mapping),
4399adfc5217SJeff Kirsher sizeof(struct eth_rx_bd) * NUM_RX_BD);
4400adfc5217SJeff Kirsher
4401adfc5217SJeff Kirsher BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4402adfc5217SJeff Kirsher bnx2x_fp(bp, fp_index, rx_comp_mapping),
4403adfc5217SJeff Kirsher sizeof(struct eth_fast_path_rx_cqe) *
4404adfc5217SJeff Kirsher NUM_RCQ_BD);
4405adfc5217SJeff Kirsher
4406adfc5217SJeff Kirsher /* SGE ring */
4407adfc5217SJeff Kirsher BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4408adfc5217SJeff Kirsher BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4409adfc5217SJeff Kirsher bnx2x_fp(bp, fp_index, rx_sge_mapping),
4410adfc5217SJeff Kirsher BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4411adfc5217SJeff Kirsher }
4412adfc5217SJeff Kirsher
4413adfc5217SJeff Kirsher /* Tx */
4414adfc5217SJeff Kirsher if (!skip_tx_queue(bp, fp_index)) {
4415adfc5217SJeff Kirsher /* fastpath tx rings: tx_buf tx_desc */
4416adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos) {
441765565884SMerav Sicron struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4418adfc5217SJeff Kirsher
441951c1a580SMerav Sicron DP(NETIF_MSG_IFDOWN,
442094f05b0fSJoe Perches "freeing tx memory of fp %d cos %d cid %d\n",
4421adfc5217SJeff Kirsher fp_index, cos, txdata->cid);
4422adfc5217SJeff Kirsher
4423adfc5217SJeff Kirsher BNX2X_FREE(txdata->tx_buf_ring);
4424adfc5217SJeff Kirsher BNX2X_PCI_FREE(txdata->tx_desc_ring,
4425adfc5217SJeff Kirsher txdata->tx_desc_mapping,
4426adfc5217SJeff Kirsher sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4427adfc5217SJeff Kirsher }
4428adfc5217SJeff Kirsher }
4429adfc5217SJeff Kirsher /* end of fastpath */
4430adfc5217SJeff Kirsher }
4431adfc5217SJeff Kirsher
bnx2x_free_fp_mem_cnic(struct bnx2x * bp)4432a8f47eb7Sstephen hemminger static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
443355c11941SMerav Sicron {
443455c11941SMerav Sicron int i;
443555c11941SMerav Sicron for_each_cnic_queue(bp, i)
443655c11941SMerav Sicron bnx2x_free_fp_mem_at(bp, i);
443755c11941SMerav Sicron }
443855c11941SMerav Sicron
bnx2x_free_fp_mem(struct bnx2x * bp)4439adfc5217SJeff Kirsher void bnx2x_free_fp_mem(struct bnx2x *bp)
4440adfc5217SJeff Kirsher {
4441adfc5217SJeff Kirsher int i;
444255c11941SMerav Sicron for_each_eth_queue(bp, i)
4443adfc5217SJeff Kirsher bnx2x_free_fp_mem_at(bp, i);
4444adfc5217SJeff Kirsher }
4445adfc5217SJeff Kirsher
set_sb_shortcuts(struct bnx2x * bp,int index)44461191cb83SEric Dumazet static void set_sb_shortcuts(struct bnx2x *bp, int index)
4447adfc5217SJeff Kirsher {
4448adfc5217SJeff Kirsher union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4449adfc5217SJeff Kirsher if (!CHIP_IS_E1x(bp)) {
4450adfc5217SJeff Kirsher bnx2x_fp(bp, index, sb_index_values) =
4451adfc5217SJeff Kirsher (__le16 *)status_blk.e2_sb->sb.index_values;
4452adfc5217SJeff Kirsher bnx2x_fp(bp, index, sb_running_index) =
4453adfc5217SJeff Kirsher (__le16 *)status_blk.e2_sb->sb.running_index;
4454adfc5217SJeff Kirsher } else {
4455adfc5217SJeff Kirsher bnx2x_fp(bp, index, sb_index_values) =
4456adfc5217SJeff Kirsher (__le16 *)status_blk.e1x_sb->sb.index_values;
4457adfc5217SJeff Kirsher bnx2x_fp(bp, index, sb_running_index) =
4458adfc5217SJeff Kirsher (__le16 *)status_blk.e1x_sb->sb.running_index;
4459adfc5217SJeff Kirsher }
4460adfc5217SJeff Kirsher }
4461adfc5217SJeff Kirsher
44621191cb83SEric Dumazet /* Returns the number of actually allocated BDs */
bnx2x_alloc_rx_bds(struct bnx2x_fastpath * fp,int rx_ring_size)44631191cb83SEric Dumazet static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
44641191cb83SEric Dumazet int rx_ring_size)
44651191cb83SEric Dumazet {
44661191cb83SEric Dumazet struct bnx2x *bp = fp->bp;
44671191cb83SEric Dumazet u16 ring_prod, cqe_ring_prod;
44681191cb83SEric Dumazet int i, failure_cnt = 0;
44691191cb83SEric Dumazet
44701191cb83SEric Dumazet fp->rx_comp_cons = 0;
44711191cb83SEric Dumazet cqe_ring_prod = ring_prod = 0;
44721191cb83SEric Dumazet
44731191cb83SEric Dumazet /* This routine is called only during fo init so
44741191cb83SEric Dumazet * fp->eth_q_stats.rx_skb_alloc_failed = 0
44751191cb83SEric Dumazet */
44761191cb83SEric Dumazet for (i = 0; i < rx_ring_size; i++) {
4477996dedbaSMichal Schmidt if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
44781191cb83SEric Dumazet failure_cnt++;
44791191cb83SEric Dumazet continue;
44801191cb83SEric Dumazet }
44811191cb83SEric Dumazet ring_prod = NEXT_RX_IDX(ring_prod);
44821191cb83SEric Dumazet cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
44831191cb83SEric Dumazet WARN_ON(ring_prod <= (i - failure_cnt));
44841191cb83SEric Dumazet }
44851191cb83SEric Dumazet
44861191cb83SEric Dumazet if (failure_cnt)
44871191cb83SEric Dumazet BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
44881191cb83SEric Dumazet i - failure_cnt, fp->index);
44891191cb83SEric Dumazet
44901191cb83SEric Dumazet fp->rx_bd_prod = ring_prod;
44911191cb83SEric Dumazet /* Limit the CQE producer by the CQE ring size */
44921191cb83SEric Dumazet fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
44931191cb83SEric Dumazet cqe_ring_prod);
44941191cb83SEric Dumazet
449515192a8cSBarak Witkowski bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
44961191cb83SEric Dumazet
44971191cb83SEric Dumazet return i - failure_cnt;
44981191cb83SEric Dumazet }
44991191cb83SEric Dumazet
bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath * fp)45001191cb83SEric Dumazet static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
45011191cb83SEric Dumazet {
45021191cb83SEric Dumazet int i;
45031191cb83SEric Dumazet
45041191cb83SEric Dumazet for (i = 1; i <= NUM_RCQ_RINGS; i++) {
45051191cb83SEric Dumazet struct eth_rx_cqe_next_page *nextpg;
45061191cb83SEric Dumazet
45071191cb83SEric Dumazet nextpg = (struct eth_rx_cqe_next_page *)
45081191cb83SEric Dumazet &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
45091191cb83SEric Dumazet nextpg->addr_hi =
45101191cb83SEric Dumazet cpu_to_le32(U64_HI(fp->rx_comp_mapping +
45111191cb83SEric Dumazet BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
45121191cb83SEric Dumazet nextpg->addr_lo =
45131191cb83SEric Dumazet cpu_to_le32(U64_LO(fp->rx_comp_mapping +
45141191cb83SEric Dumazet BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
45151191cb83SEric Dumazet }
45161191cb83SEric Dumazet }
45171191cb83SEric Dumazet
bnx2x_alloc_fp_mem_at(struct bnx2x * bp,int index)4518adfc5217SJeff Kirsher static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4519adfc5217SJeff Kirsher {
4520adfc5217SJeff Kirsher union host_hc_status_block *sb;
4521adfc5217SJeff Kirsher struct bnx2x_fastpath *fp = &bp->fp[index];
4522adfc5217SJeff Kirsher int ring_size = 0;
4523adfc5217SJeff Kirsher u8 cos;
45248decf868SDavid S. Miller int rx_ring_size = 0;
4525adfc5217SJeff Kirsher
45262e98ffc2SDmitry Kravkov if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4527614c76dfSDmitry Kravkov rx_ring_size = MIN_RX_SIZE_NONTPA;
4528614c76dfSDmitry Kravkov bp->rx_ring_size = rx_ring_size;
452955c11941SMerav Sicron } else if (!bp->rx_ring_size) {
45308decf868SDavid S. Miller rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4531adfc5217SJeff Kirsher
4532065f8b92SYuval Mintz if (CHIP_IS_E3(bp)) {
4533065f8b92SYuval Mintz u32 cfg = SHMEM_RD(bp,
4534065f8b92SYuval Mintz dev_info.port_hw_config[BP_PORT(bp)].
4535065f8b92SYuval Mintz default_cfg);
4536065f8b92SYuval Mintz
4537065f8b92SYuval Mintz /* Decrease ring size for 1G functions */
4538d760fc37SMintz Yuval if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4539d760fc37SMintz Yuval PORT_HW_CFG_NET_SERDES_IF_SGMII)
4540d760fc37SMintz Yuval rx_ring_size /= 10;
4541065f8b92SYuval Mintz }
4542d760fc37SMintz Yuval
4543adfc5217SJeff Kirsher /* allocate at least number of buffers required by FW */
4544adfc5217SJeff Kirsher rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
45458decf868SDavid S. Miller MIN_RX_SIZE_TPA, rx_ring_size);
45468decf868SDavid S. Miller
45478decf868SDavid S. Miller bp->rx_ring_size = rx_ring_size;
4548614c76dfSDmitry Kravkov } else /* if rx_ring_size specified - use it */
45498decf868SDavid S. Miller rx_ring_size = bp->rx_ring_size;
4550adfc5217SJeff Kirsher
455104c46736SYuval Mintz DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
455204c46736SYuval Mintz
4553adfc5217SJeff Kirsher /* Common */
4554adfc5217SJeff Kirsher sb = &bnx2x_fp(bp, index, status_blk);
455555c11941SMerav Sicron
4556adfc5217SJeff Kirsher if (!IS_FCOE_IDX(index)) {
4557adfc5217SJeff Kirsher /* status blocks */
4558cd2b0389SJoe Perches if (!CHIP_IS_E1x(bp)) {
4559cd2b0389SJoe Perches sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4560adfc5217SJeff Kirsher sizeof(struct host_hc_status_block_e2));
4561cd2b0389SJoe Perches if (!sb->e2_sb)
4562cd2b0389SJoe Perches goto alloc_mem_err;
4563cd2b0389SJoe Perches } else {
4564cd2b0389SJoe Perches sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4565adfc5217SJeff Kirsher sizeof(struct host_hc_status_block_e1x));
4566cd2b0389SJoe Perches if (!sb->e1x_sb)
4567cd2b0389SJoe Perches goto alloc_mem_err;
4568cd2b0389SJoe Perches }
4569adfc5217SJeff Kirsher }
4570adfc5217SJeff Kirsher
4571adfc5217SJeff Kirsher /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4572adfc5217SJeff Kirsher * set shortcuts for it.
4573adfc5217SJeff Kirsher */
4574adfc5217SJeff Kirsher if (!IS_FCOE_IDX(index))
4575adfc5217SJeff Kirsher set_sb_shortcuts(bp, index);
4576adfc5217SJeff Kirsher
4577adfc5217SJeff Kirsher /* Tx */
4578adfc5217SJeff Kirsher if (!skip_tx_queue(bp, index)) {
4579adfc5217SJeff Kirsher /* fastpath tx rings: tx_buf tx_desc */
4580adfc5217SJeff Kirsher for_each_cos_in_tx_queue(fp, cos) {
458165565884SMerav Sicron struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4582adfc5217SJeff Kirsher
458351c1a580SMerav Sicron DP(NETIF_MSG_IFUP,
458451c1a580SMerav Sicron "allocating tx memory of fp %d cos %d\n",
4585adfc5217SJeff Kirsher index, cos);
4586adfc5217SJeff Kirsher
4587cd2b0389SJoe Perches txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4588cd2b0389SJoe Perches sizeof(struct sw_tx_bd),
4589cd2b0389SJoe Perches GFP_KERNEL);
4590cd2b0389SJoe Perches if (!txdata->tx_buf_ring)
4591cd2b0389SJoe Perches goto alloc_mem_err;
4592cd2b0389SJoe Perches txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4593adfc5217SJeff Kirsher sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4594cd2b0389SJoe Perches if (!txdata->tx_desc_ring)
4595cd2b0389SJoe Perches goto alloc_mem_err;
4596adfc5217SJeff Kirsher }
4597adfc5217SJeff Kirsher }
4598adfc5217SJeff Kirsher
4599adfc5217SJeff Kirsher /* Rx */
4600adfc5217SJeff Kirsher if (!skip_rx_queue(bp, index)) {
4601adfc5217SJeff Kirsher /* fastpath rx rings: rx_buf rx_desc rx_comp */
4602cd2b0389SJoe Perches bnx2x_fp(bp, index, rx_buf_ring) =
4603cd2b0389SJoe Perches kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4604cd2b0389SJoe Perches if (!bnx2x_fp(bp, index, rx_buf_ring))
4605cd2b0389SJoe Perches goto alloc_mem_err;
4606cd2b0389SJoe Perches bnx2x_fp(bp, index, rx_desc_ring) =
4607cd2b0389SJoe Perches BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4608adfc5217SJeff Kirsher sizeof(struct eth_rx_bd) * NUM_RX_BD);
4609cd2b0389SJoe Perches if (!bnx2x_fp(bp, index, rx_desc_ring))
4610cd2b0389SJoe Perches goto alloc_mem_err;
4611adfc5217SJeff Kirsher
461275b29459SDmitry Kravkov /* Seed all CQEs by 1s */
4613cd2b0389SJoe Perches bnx2x_fp(bp, index, rx_comp_ring) =
4614cd2b0389SJoe Perches BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4615cd2b0389SJoe Perches sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4616cd2b0389SJoe Perches if (!bnx2x_fp(bp, index, rx_comp_ring))
4617cd2b0389SJoe Perches goto alloc_mem_err;
4618adfc5217SJeff Kirsher
4619adfc5217SJeff Kirsher /* SGE ring */
4620cd2b0389SJoe Perches bnx2x_fp(bp, index, rx_page_ring) =
4621cd2b0389SJoe Perches kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4622cd2b0389SJoe Perches GFP_KERNEL);
4623cd2b0389SJoe Perches if (!bnx2x_fp(bp, index, rx_page_ring))
4624cd2b0389SJoe Perches goto alloc_mem_err;
4625cd2b0389SJoe Perches bnx2x_fp(bp, index, rx_sge_ring) =
4626cd2b0389SJoe Perches BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4627adfc5217SJeff Kirsher BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4628cd2b0389SJoe Perches if (!bnx2x_fp(bp, index, rx_sge_ring))
4629cd2b0389SJoe Perches goto alloc_mem_err;
4630adfc5217SJeff Kirsher /* RX BD ring */
4631adfc5217SJeff Kirsher bnx2x_set_next_page_rx_bd(fp);
4632adfc5217SJeff Kirsher
4633adfc5217SJeff Kirsher /* CQ ring */
4634adfc5217SJeff Kirsher bnx2x_set_next_page_rx_cq(fp);
4635adfc5217SJeff Kirsher
4636adfc5217SJeff Kirsher /* BDs */
4637adfc5217SJeff Kirsher ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4638adfc5217SJeff Kirsher if (ring_size < rx_ring_size)
4639adfc5217SJeff Kirsher goto alloc_mem_err;
4640adfc5217SJeff Kirsher }
4641adfc5217SJeff Kirsher
4642adfc5217SJeff Kirsher return 0;
4643adfc5217SJeff Kirsher
4644adfc5217SJeff Kirsher /* handles low memory cases */
4645adfc5217SJeff Kirsher alloc_mem_err:
4646adfc5217SJeff Kirsher BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4647adfc5217SJeff Kirsher index, ring_size);
4648adfc5217SJeff Kirsher /* FW will drop all packets if queue is not big enough,
4649adfc5217SJeff Kirsher * In these cases we disable the queue
4650adfc5217SJeff Kirsher * Min size is different for OOO, TPA and non-TPA queues
4651adfc5217SJeff Kirsher */
46527e6b4d44SMichal Schmidt if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4653adfc5217SJeff Kirsher MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4654adfc5217SJeff Kirsher /* release memory allocated for this queue */
4655adfc5217SJeff Kirsher bnx2x_free_fp_mem_at(bp, index);
4656adfc5217SJeff Kirsher return -ENOMEM;
4657adfc5217SJeff Kirsher }
4658adfc5217SJeff Kirsher return 0;
4659adfc5217SJeff Kirsher }
4660adfc5217SJeff Kirsher
bnx2x_alloc_fp_mem_cnic(struct bnx2x * bp)4661a8f47eb7Sstephen hemminger static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4662adfc5217SJeff Kirsher {
4663adfc5217SJeff Kirsher if (!NO_FCOE(bp))
4664adfc5217SJeff Kirsher /* FCoE */
466565565884SMerav Sicron if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4666adfc5217SJeff Kirsher /* we will fail load process instead of mark
4667adfc5217SJeff Kirsher * NO_FCOE_FLAG
4668adfc5217SJeff Kirsher */
4669adfc5217SJeff Kirsher return -ENOMEM;
467055c11941SMerav Sicron
467155c11941SMerav Sicron return 0;
467255c11941SMerav Sicron }
467355c11941SMerav Sicron
bnx2x_alloc_fp_mem(struct bnx2x * bp)4674a8f47eb7Sstephen hemminger static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
467555c11941SMerav Sicron {
467655c11941SMerav Sicron int i;
467755c11941SMerav Sicron
467855c11941SMerav Sicron /* 1. Allocate FP for leading - fatal if error
467955c11941SMerav Sicron * 2. Allocate RSS - fix number of queues if error
468055c11941SMerav Sicron */
468155c11941SMerav Sicron
468255c11941SMerav Sicron /* leading */
468355c11941SMerav Sicron if (bnx2x_alloc_fp_mem_at(bp, 0))
468455c11941SMerav Sicron return -ENOMEM;
4685adfc5217SJeff Kirsher
4686adfc5217SJeff Kirsher /* RSS */
4687adfc5217SJeff Kirsher for_each_nondefault_eth_queue(bp, i)
4688adfc5217SJeff Kirsher if (bnx2x_alloc_fp_mem_at(bp, i))
4689adfc5217SJeff Kirsher break;
4690adfc5217SJeff Kirsher
4691adfc5217SJeff Kirsher /* handle memory failures */
4692adfc5217SJeff Kirsher if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4693adfc5217SJeff Kirsher int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4694adfc5217SJeff Kirsher
4695adfc5217SJeff Kirsher WARN_ON(delta < 0);
46964864a16aSYuval Mintz bnx2x_shrink_eth_fp(bp, delta);
469755c11941SMerav Sicron if (CNIC_SUPPORT(bp))
469855c11941SMerav Sicron /* move non eth FPs next to last eth FP
4699adfc5217SJeff Kirsher * must be done in that order
4700adfc5217SJeff Kirsher * FCOE_IDX < FWD_IDX < OOO_IDX
4701adfc5217SJeff Kirsher */
4702adfc5217SJeff Kirsher
4703adfc5217SJeff Kirsher /* move FCoE fp even NO_FCOE_FLAG is on */
470465565884SMerav Sicron bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
470555c11941SMerav Sicron bp->num_ethernet_queues -= delta;
470655c11941SMerav Sicron bp->num_queues = bp->num_ethernet_queues +
470755c11941SMerav Sicron bp->num_cnic_queues;
4708adfc5217SJeff Kirsher BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4709adfc5217SJeff Kirsher bp->num_queues + delta, bp->num_queues);
4710adfc5217SJeff Kirsher }
4711adfc5217SJeff Kirsher
4712adfc5217SJeff Kirsher return 0;
4713adfc5217SJeff Kirsher }
4714adfc5217SJeff Kirsher
bnx2x_free_mem_bp(struct bnx2x * bp)4715adfc5217SJeff Kirsher void bnx2x_free_mem_bp(struct bnx2x *bp)
4716adfc5217SJeff Kirsher {
4717c3146eb6SDmitry Kravkov int i;
4718c3146eb6SDmitry Kravkov
4719c3146eb6SDmitry Kravkov for (i = 0; i < bp->fp_array_size; i++)
4720c3146eb6SDmitry Kravkov kfree(bp->fp[i].tpa_info);
4721adfc5217SJeff Kirsher kfree(bp->fp);
472215192a8cSBarak Witkowski kfree(bp->sp_objs);
472315192a8cSBarak Witkowski kfree(bp->fp_stats);
472465565884SMerav Sicron kfree(bp->bnx2x_txq);
4725adfc5217SJeff Kirsher kfree(bp->msix_table);
4726adfc5217SJeff Kirsher kfree(bp->ilt);
4727adfc5217SJeff Kirsher }
4728adfc5217SJeff Kirsher
bnx2x_alloc_mem_bp(struct bnx2x * bp)47290329aba1SBill Pemberton int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4730adfc5217SJeff Kirsher {
4731adfc5217SJeff Kirsher struct bnx2x_fastpath *fp;
4732adfc5217SJeff Kirsher struct msix_entry *tbl;
4733adfc5217SJeff Kirsher struct bnx2x_ilt *ilt;
4734adfc5217SJeff Kirsher int msix_table_size = 0;
473555c11941SMerav Sicron int fp_array_size, txq_array_size;
473615192a8cSBarak Witkowski int i;
4737adfc5217SJeff Kirsher
4738adfc5217SJeff Kirsher /*
4739adfc5217SJeff Kirsher * The biggest MSI-X table we might need is as a maximum number of fast
47402de67439SYuval Mintz * path IGU SBs plus default SB (for PF only).
4741adfc5217SJeff Kirsher */
47421ab4434cSAriel Elior msix_table_size = bp->igu_sb_cnt;
47431ab4434cSAriel Elior if (IS_PF(bp))
47441ab4434cSAriel Elior msix_table_size++;
47451ab4434cSAriel Elior BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4746adfc5217SJeff Kirsher
4747adfc5217SJeff Kirsher /* fp array: RSS plus CNIC related L2 queues */
474855c11941SMerav Sicron fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4749c3146eb6SDmitry Kravkov bp->fp_array_size = fp_array_size;
4750c3146eb6SDmitry Kravkov BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
475115192a8cSBarak Witkowski
4752c3146eb6SDmitry Kravkov fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4753adfc5217SJeff Kirsher if (!fp)
4754adfc5217SJeff Kirsher goto alloc_err;
4755c3146eb6SDmitry Kravkov for (i = 0; i < bp->fp_array_size; i++) {
475615192a8cSBarak Witkowski fp[i].tpa_info =
475715192a8cSBarak Witkowski kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
475815192a8cSBarak Witkowski sizeof(struct bnx2x_agg_info), GFP_KERNEL);
475915192a8cSBarak Witkowski if (!(fp[i].tpa_info))
476015192a8cSBarak Witkowski goto alloc_err;
476115192a8cSBarak Witkowski }
476215192a8cSBarak Witkowski
4763adfc5217SJeff Kirsher bp->fp = fp;
4764adfc5217SJeff Kirsher
476515192a8cSBarak Witkowski /* allocate sp objs */
4766c3146eb6SDmitry Kravkov bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
476715192a8cSBarak Witkowski GFP_KERNEL);
476815192a8cSBarak Witkowski if (!bp->sp_objs)
476915192a8cSBarak Witkowski goto alloc_err;
477015192a8cSBarak Witkowski
477115192a8cSBarak Witkowski /* allocate fp_stats */
4772c3146eb6SDmitry Kravkov bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
477315192a8cSBarak Witkowski GFP_KERNEL);
477415192a8cSBarak Witkowski if (!bp->fp_stats)
477515192a8cSBarak Witkowski goto alloc_err;
477615192a8cSBarak Witkowski
477765565884SMerav Sicron /* Allocate memory for the transmission queues array */
477855c11941SMerav Sicron txq_array_size =
477955c11941SMerav Sicron BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
478055c11941SMerav Sicron BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
478155c11941SMerav Sicron
478255c11941SMerav Sicron bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
478355c11941SMerav Sicron GFP_KERNEL);
478465565884SMerav Sicron if (!bp->bnx2x_txq)
478565565884SMerav Sicron goto alloc_err;
478665565884SMerav Sicron
4787adfc5217SJeff Kirsher /* msix table */
478801e23742SThomas Meyer tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4789adfc5217SJeff Kirsher if (!tbl)
4790adfc5217SJeff Kirsher goto alloc_err;
4791adfc5217SJeff Kirsher bp->msix_table = tbl;
4792adfc5217SJeff Kirsher
4793adfc5217SJeff Kirsher /* ilt */
4794adfc5217SJeff Kirsher ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4795adfc5217SJeff Kirsher if (!ilt)
4796adfc5217SJeff Kirsher goto alloc_err;
4797adfc5217SJeff Kirsher bp->ilt = ilt;
4798adfc5217SJeff Kirsher
4799adfc5217SJeff Kirsher return 0;
4800adfc5217SJeff Kirsher alloc_err:
4801adfc5217SJeff Kirsher bnx2x_free_mem_bp(bp);
4802adfc5217SJeff Kirsher return -ENOMEM;
4803adfc5217SJeff Kirsher }
4804adfc5217SJeff Kirsher
bnx2x_reload_if_running(struct net_device * dev)4805adfc5217SJeff Kirsher int bnx2x_reload_if_running(struct net_device *dev)
4806adfc5217SJeff Kirsher {
4807adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4808adfc5217SJeff Kirsher
4809adfc5217SJeff Kirsher if (unlikely(!netif_running(dev)))
4810adfc5217SJeff Kirsher return 0;
4811adfc5217SJeff Kirsher
48125d07d868SYuval Mintz bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4813adfc5217SJeff Kirsher return bnx2x_nic_load(bp, LOAD_NORMAL);
4814adfc5217SJeff Kirsher }
4815adfc5217SJeff Kirsher
bnx2x_get_cur_phy_idx(struct bnx2x * bp)4816adfc5217SJeff Kirsher int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4817adfc5217SJeff Kirsher {
4818adfc5217SJeff Kirsher u32 sel_phy_idx = 0;
4819adfc5217SJeff Kirsher if (bp->link_params.num_phys <= 1)
4820adfc5217SJeff Kirsher return INT_PHY;
4821adfc5217SJeff Kirsher
4822adfc5217SJeff Kirsher if (bp->link_vars.link_up) {
4823adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY1;
4824adfc5217SJeff Kirsher /* In case link is SERDES, check if the EXT_PHY2 is the one */
4825adfc5217SJeff Kirsher if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4826adfc5217SJeff Kirsher (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4827adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY2;
4828adfc5217SJeff Kirsher } else {
4829adfc5217SJeff Kirsher
4830adfc5217SJeff Kirsher switch (bnx2x_phy_selection(&bp->link_params)) {
4831adfc5217SJeff Kirsher case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4832adfc5217SJeff Kirsher case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4833adfc5217SJeff Kirsher case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4834adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY1;
4835adfc5217SJeff Kirsher break;
4836adfc5217SJeff Kirsher case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4837adfc5217SJeff Kirsher case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4838adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY2;
4839adfc5217SJeff Kirsher break;
4840adfc5217SJeff Kirsher }
4841adfc5217SJeff Kirsher }
4842adfc5217SJeff Kirsher
4843adfc5217SJeff Kirsher return sel_phy_idx;
4844adfc5217SJeff Kirsher }
bnx2x_get_link_cfg_idx(struct bnx2x * bp)4845adfc5217SJeff Kirsher int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4846adfc5217SJeff Kirsher {
4847adfc5217SJeff Kirsher u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4848adfc5217SJeff Kirsher /*
48492de67439SYuval Mintz * The selected activated PHY is always after swapping (in case PHY
4850adfc5217SJeff Kirsher * swapping is enabled). So when swapping is enabled, we need to reverse
4851adfc5217SJeff Kirsher * the configuration
4852adfc5217SJeff Kirsher */
4853adfc5217SJeff Kirsher
4854adfc5217SJeff Kirsher if (bp->link_params.multi_phy_config &
4855adfc5217SJeff Kirsher PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4856adfc5217SJeff Kirsher if (sel_phy_idx == EXT_PHY1)
4857adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY2;
4858adfc5217SJeff Kirsher else if (sel_phy_idx == EXT_PHY2)
4859adfc5217SJeff Kirsher sel_phy_idx = EXT_PHY1;
4860adfc5217SJeff Kirsher }
4861adfc5217SJeff Kirsher return LINK_CONFIG_IDX(sel_phy_idx);
4862adfc5217SJeff Kirsher }
4863adfc5217SJeff Kirsher
486455c11941SMerav Sicron #ifdef NETDEV_FCOE_WWNN
bnx2x_fcoe_get_wwn(struct net_device * dev,u64 * wwn,int type)4865adfc5217SJeff Kirsher int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4866adfc5217SJeff Kirsher {
4867adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4868adfc5217SJeff Kirsher struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4869adfc5217SJeff Kirsher
4870adfc5217SJeff Kirsher switch (type) {
4871adfc5217SJeff Kirsher case NETDEV_FCOE_WWNN:
4872adfc5217SJeff Kirsher *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4873adfc5217SJeff Kirsher cp->fcoe_wwn_node_name_lo);
4874adfc5217SJeff Kirsher break;
4875adfc5217SJeff Kirsher case NETDEV_FCOE_WWPN:
4876adfc5217SJeff Kirsher *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4877adfc5217SJeff Kirsher cp->fcoe_wwn_port_name_lo);
4878adfc5217SJeff Kirsher break;
4879adfc5217SJeff Kirsher default:
488051c1a580SMerav Sicron BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4881adfc5217SJeff Kirsher return -EINVAL;
4882adfc5217SJeff Kirsher }
4883adfc5217SJeff Kirsher
4884adfc5217SJeff Kirsher return 0;
4885adfc5217SJeff Kirsher }
4886adfc5217SJeff Kirsher #endif
4887adfc5217SJeff Kirsher
4888adfc5217SJeff Kirsher /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)4889adfc5217SJeff Kirsher int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4890adfc5217SJeff Kirsher {
4891adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4892adfc5217SJeff Kirsher
48930650c0b8SYuval Mintz if (pci_num_vf(bp->pdev)) {
48940650c0b8SYuval Mintz DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
48950650c0b8SYuval Mintz return -EPERM;
48960650c0b8SYuval Mintz }
48970650c0b8SYuval Mintz
4898adfc5217SJeff Kirsher if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
489951c1a580SMerav Sicron BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4900adfc5217SJeff Kirsher return -EAGAIN;
4901adfc5217SJeff Kirsher }
4902adfc5217SJeff Kirsher
4903adfc5217SJeff Kirsher /* This does not race with packet allocation
4904adfc5217SJeff Kirsher * because the actual alloc size is
4905adfc5217SJeff Kirsher * only updated as part of load
4906adfc5217SJeff Kirsher */
4907adfc5217SJeff Kirsher dev->mtu = new_mtu;
4908adfc5217SJeff Kirsher
49093c3def5fSMichael Chan if (!bnx2x_mtu_allows_gro(new_mtu))
49103c3def5fSMichael Chan dev->features &= ~NETIF_F_GRO_HW;
49113c3def5fSMichael Chan
4912230d00ebSYuval Mintz if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4913230d00ebSYuval Mintz SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4914230d00ebSYuval Mintz
4915adfc5217SJeff Kirsher return bnx2x_reload_if_running(dev);
4916adfc5217SJeff Kirsher }
4917adfc5217SJeff Kirsher
bnx2x_fix_features(struct net_device * dev,netdev_features_t features)4918c8f44affSMichał Mirosław netdev_features_t bnx2x_fix_features(struct net_device *dev,
4919c8f44affSMichał Mirosław netdev_features_t features)
4920adfc5217SJeff Kirsher {
4921adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4922adfc5217SJeff Kirsher
4923909d9faaSYuval Mintz if (pci_num_vf(bp->pdev)) {
4924909d9faaSYuval Mintz netdev_features_t changed = dev->features ^ features;
4925909d9faaSYuval Mintz
4926909d9faaSYuval Mintz /* Revert the requested changes in features if they
4927909d9faaSYuval Mintz * would require internal reload of PF in bnx2x_set_features().
4928909d9faaSYuval Mintz */
4929909d9faaSYuval Mintz if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4930909d9faaSYuval Mintz features &= ~NETIF_F_RXCSUM;
4931909d9faaSYuval Mintz features |= dev->features & NETIF_F_RXCSUM;
4932909d9faaSYuval Mintz }
4933909d9faaSYuval Mintz
4934909d9faaSYuval Mintz if (changed & NETIF_F_LOOPBACK) {
4935909d9faaSYuval Mintz features &= ~NETIF_F_LOOPBACK;
4936909d9faaSYuval Mintz features |= dev->features & NETIF_F_LOOPBACK;
4937909d9faaSYuval Mintz }
4938909d9faaSYuval Mintz }
4939909d9faaSYuval Mintz
4940adfc5217SJeff Kirsher /* TPA requires Rx CSUM offloading */
49413c3def5fSMichael Chan if (!(features & NETIF_F_RXCSUM))
4942adfc5217SJeff Kirsher features &= ~NETIF_F_LRO;
49433c3def5fSMichael Chan
49443c3def5fSMichael Chan if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
49453c3def5fSMichael Chan features &= ~NETIF_F_GRO_HW;
49463c3def5fSMichael Chan if (features & NETIF_F_GRO_HW)
49473c3def5fSMichael Chan features &= ~NETIF_F_LRO;
4948adfc5217SJeff Kirsher
4949adfc5217SJeff Kirsher return features;
4950adfc5217SJeff Kirsher }
4951adfc5217SJeff Kirsher
bnx2x_set_features(struct net_device * dev,netdev_features_t features)4952c8f44affSMichał Mirosław int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4953adfc5217SJeff Kirsher {
4954adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4955f8dcb5e3SMichal Schmidt netdev_features_t changes = features ^ dev->features;
4956adfc5217SJeff Kirsher bool bnx2x_reload = false;
4957f8dcb5e3SMichal Schmidt int rc;
4958621b4d66SDmitry Kravkov
4959909d9faaSYuval Mintz /* VFs or non SRIOV PFs should be able to change loopback feature */
4960909d9faaSYuval Mintz if (!pci_num_vf(bp->pdev)) {
4961adfc5217SJeff Kirsher if (features & NETIF_F_LOOPBACK) {
4962adfc5217SJeff Kirsher if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4963adfc5217SJeff Kirsher bp->link_params.loopback_mode = LOOPBACK_BMAC;
4964adfc5217SJeff Kirsher bnx2x_reload = true;
4965adfc5217SJeff Kirsher }
4966adfc5217SJeff Kirsher } else {
4967adfc5217SJeff Kirsher if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4968adfc5217SJeff Kirsher bp->link_params.loopback_mode = LOOPBACK_NONE;
4969adfc5217SJeff Kirsher bnx2x_reload = true;
4970adfc5217SJeff Kirsher }
4971adfc5217SJeff Kirsher }
4972909d9faaSYuval Mintz }
4973adfc5217SJeff Kirsher
49743c3def5fSMichael Chan /* Don't care about GRO changes */
4975f8dcb5e3SMichal Schmidt changes &= ~NETIF_F_GRO;
4976aebf6244SDmitry Kravkov
49778802f579SEric Dumazet if (changes)
4978adfc5217SJeff Kirsher bnx2x_reload = true;
49798802f579SEric Dumazet
4980adfc5217SJeff Kirsher if (bnx2x_reload) {
4981f8dcb5e3SMichal Schmidt if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4982f8dcb5e3SMichal Schmidt dev->features = features;
4983f8dcb5e3SMichal Schmidt rc = bnx2x_reload_if_running(dev);
4984f8dcb5e3SMichal Schmidt return rc ? rc : 1;
4985f8dcb5e3SMichal Schmidt }
4986adfc5217SJeff Kirsher /* else: bnx2x_nic_load() will be called at end of recovery */
4987adfc5217SJeff Kirsher }
4988adfc5217SJeff Kirsher
4989adfc5217SJeff Kirsher return 0;
4990adfc5217SJeff Kirsher }
4991adfc5217SJeff Kirsher
bnx2x_tx_timeout(struct net_device * dev,unsigned int txqueue)49920290bd29SMichael S. Tsirkin void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4993adfc5217SJeff Kirsher {
4994adfc5217SJeff Kirsher struct bnx2x *bp = netdev_priv(dev);
4995adfc5217SJeff Kirsher
49961b40428cSSudarsana Reddy Kalluru /* We want the information of the dump logged,
49971b40428cSSudarsana Reddy Kalluru * but calling bnx2x_panic() would kill all chances of recovery.
49981b40428cSSudarsana Reddy Kalluru */
4999adfc5217SJeff Kirsher if (!bp->panic)
50001b40428cSSudarsana Reddy Kalluru #ifndef BNX2X_STOP_ON_ERROR
50011b40428cSSudarsana Reddy Kalluru bnx2x_panic_dump(bp, false);
50021b40428cSSudarsana Reddy Kalluru #else
5003adfc5217SJeff Kirsher bnx2x_panic();
5004adfc5217SJeff Kirsher #endif
5005adfc5217SJeff Kirsher
5006adfc5217SJeff Kirsher /* This allows the netif to be shutdown gracefully before resetting */
5007230bb0f3SYuval Mintz bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5008adfc5217SJeff Kirsher }
5009adfc5217SJeff Kirsher
bnx2x_suspend(struct device * dev_d)50104ced637bSVaibhav Gupta static int __maybe_unused bnx2x_suspend(struct device *dev_d)
5011adfc5217SJeff Kirsher {
50124ced637bSVaibhav Gupta struct pci_dev *pdev = to_pci_dev(dev_d);
5013adfc5217SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev);
5014adfc5217SJeff Kirsher struct bnx2x *bp;
5015adfc5217SJeff Kirsher
5016adfc5217SJeff Kirsher if (!dev) {
5017adfc5217SJeff Kirsher dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5018adfc5217SJeff Kirsher return -ENODEV;
5019adfc5217SJeff Kirsher }
5020adfc5217SJeff Kirsher bp = netdev_priv(dev);
5021adfc5217SJeff Kirsher
5022adfc5217SJeff Kirsher rtnl_lock();
5023adfc5217SJeff Kirsher
5024adfc5217SJeff Kirsher if (!netif_running(dev)) {
5025adfc5217SJeff Kirsher rtnl_unlock();
5026adfc5217SJeff Kirsher return 0;
5027adfc5217SJeff Kirsher }
5028adfc5217SJeff Kirsher
5029adfc5217SJeff Kirsher netif_device_detach(dev);
5030adfc5217SJeff Kirsher
50315d07d868SYuval Mintz bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5032adfc5217SJeff Kirsher
5033adfc5217SJeff Kirsher rtnl_unlock();
5034adfc5217SJeff Kirsher
5035adfc5217SJeff Kirsher return 0;
5036adfc5217SJeff Kirsher }
5037adfc5217SJeff Kirsher
bnx2x_resume(struct device * dev_d)50384ced637bSVaibhav Gupta static int __maybe_unused bnx2x_resume(struct device *dev_d)
5039adfc5217SJeff Kirsher {
50404ced637bSVaibhav Gupta struct pci_dev *pdev = to_pci_dev(dev_d);
5041adfc5217SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev);
5042adfc5217SJeff Kirsher struct bnx2x *bp;
5043adfc5217SJeff Kirsher int rc;
5044adfc5217SJeff Kirsher
5045adfc5217SJeff Kirsher if (!dev) {
5046adfc5217SJeff Kirsher dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5047adfc5217SJeff Kirsher return -ENODEV;
5048adfc5217SJeff Kirsher }
5049adfc5217SJeff Kirsher bp = netdev_priv(dev);
5050adfc5217SJeff Kirsher
5051adfc5217SJeff Kirsher if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
505251c1a580SMerav Sicron BNX2X_ERR("Handling parity error recovery. Try again later\n");
5053adfc5217SJeff Kirsher return -EAGAIN;
5054adfc5217SJeff Kirsher }
5055adfc5217SJeff Kirsher
5056adfc5217SJeff Kirsher rtnl_lock();
5057adfc5217SJeff Kirsher
5058adfc5217SJeff Kirsher if (!netif_running(dev)) {
5059adfc5217SJeff Kirsher rtnl_unlock();
5060adfc5217SJeff Kirsher return 0;
5061adfc5217SJeff Kirsher }
5062adfc5217SJeff Kirsher
5063adfc5217SJeff Kirsher netif_device_attach(dev);
5064adfc5217SJeff Kirsher
5065adfc5217SJeff Kirsher rc = bnx2x_nic_load(bp, LOAD_OPEN);
5066adfc5217SJeff Kirsher
5067adfc5217SJeff Kirsher rtnl_unlock();
5068adfc5217SJeff Kirsher
5069adfc5217SJeff Kirsher return rc;
5070adfc5217SJeff Kirsher }
5071adfc5217SJeff Kirsher
50724ced637bSVaibhav Gupta SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
50734ced637bSVaibhav Gupta
bnx2x_set_ctx_validation(struct bnx2x * bp,struct eth_context * cxt,u32 cid)5074adfc5217SJeff Kirsher void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5075adfc5217SJeff Kirsher u32 cid)
5076adfc5217SJeff Kirsher {
5077b9871bcfSAriel Elior if (!cxt) {
5078b9871bcfSAriel Elior BNX2X_ERR("bad context pointer %p\n", cxt);
5079b9871bcfSAriel Elior return;
5080b9871bcfSAriel Elior }
5081b9871bcfSAriel Elior
5082adfc5217SJeff Kirsher /* ustorm cxt validation */
5083adfc5217SJeff Kirsher cxt->ustorm_ag_context.cdu_usage =
5084adfc5217SJeff Kirsher CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5085adfc5217SJeff Kirsher CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5086adfc5217SJeff Kirsher /* xcontext validation */
5087adfc5217SJeff Kirsher cxt->xstorm_ag_context.cdu_reserved =
5088adfc5217SJeff Kirsher CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5089adfc5217SJeff Kirsher CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5090adfc5217SJeff Kirsher }
5091adfc5217SJeff Kirsher
storm_memset_hc_timeout(struct bnx2x * bp,u8 port,u8 fw_sb_id,u8 sb_index,u8 ticks)50921191cb83SEric Dumazet static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5093adfc5217SJeff Kirsher u8 fw_sb_id, u8 sb_index,
5094adfc5217SJeff Kirsher u8 ticks)
5095adfc5217SJeff Kirsher {
5096adfc5217SJeff Kirsher u32 addr = BAR_CSTRORM_INTMEM +
5097adfc5217SJeff Kirsher CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5098adfc5217SJeff Kirsher REG_WR8(bp, addr, ticks);
509951c1a580SMerav Sicron DP(NETIF_MSG_IFUP,
510051c1a580SMerav Sicron "port %x fw_sb_id %d sb_index %d ticks %d\n",
5101adfc5217SJeff Kirsher port, fw_sb_id, sb_index, ticks);
5102adfc5217SJeff Kirsher }
5103adfc5217SJeff Kirsher
storm_memset_hc_disable(struct bnx2x * bp,u8 port,u16 fw_sb_id,u8 sb_index,u8 disable)51041191cb83SEric Dumazet static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5105adfc5217SJeff Kirsher u16 fw_sb_id, u8 sb_index,
5106adfc5217SJeff Kirsher u8 disable)
5107adfc5217SJeff Kirsher {
5108adfc5217SJeff Kirsher u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5109adfc5217SJeff Kirsher u32 addr = BAR_CSTRORM_INTMEM +
5110adfc5217SJeff Kirsher CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
51110c14e5ceSAriel Elior u8 flags = REG_RD8(bp, addr);
5112adfc5217SJeff Kirsher /* clear and set */
5113adfc5217SJeff Kirsher flags &= ~HC_INDEX_DATA_HC_ENABLED;
5114adfc5217SJeff Kirsher flags |= enable_flag;
51150c14e5ceSAriel Elior REG_WR8(bp, addr, flags);
511651c1a580SMerav Sicron DP(NETIF_MSG_IFUP,
511751c1a580SMerav Sicron "port %x fw_sb_id %d sb_index %d disable %d\n",
5118adfc5217SJeff Kirsher port, fw_sb_id, sb_index, disable);
5119adfc5217SJeff Kirsher }
5120adfc5217SJeff Kirsher
bnx2x_update_coalesce_sb_index(struct bnx2x * bp,u8 fw_sb_id,u8 sb_index,u8 disable,u16 usec)5121adfc5217SJeff Kirsher void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5122adfc5217SJeff Kirsher u8 sb_index, u8 disable, u16 usec)
5123adfc5217SJeff Kirsher {
5124adfc5217SJeff Kirsher int port = BP_PORT(bp);
5125adfc5217SJeff Kirsher u8 ticks = usec / BNX2X_BTR;
5126adfc5217SJeff Kirsher
5127adfc5217SJeff Kirsher storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5128adfc5217SJeff Kirsher
5129adfc5217SJeff Kirsher disable = disable ? 1 : (usec ? 0 : 1);
5130adfc5217SJeff Kirsher storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5131adfc5217SJeff Kirsher }
5132230bb0f3SYuval Mintz
bnx2x_schedule_sp_rtnl(struct bnx2x * bp,enum sp_rtnl_flag flag,u32 verbose)5133230bb0f3SYuval Mintz void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5134230bb0f3SYuval Mintz u32 verbose)
5135230bb0f3SYuval Mintz {
51364e857c58SPeter Zijlstra smp_mb__before_atomic();
5137230bb0f3SYuval Mintz set_bit(flag, &bp->sp_rtnl_state);
51384e857c58SPeter Zijlstra smp_mb__after_atomic();
5139230bb0f3SYuval Mintz DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5140230bb0f3SYuval Mintz flag);
5141230bb0f3SYuval Mintz schedule_delayed_work(&bp->sp_rtnl_task, 0);
5142230bb0f3SYuval Mintz }
5143