1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2016-2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/if_vlan.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf_trace.h> 17 #include <linux/filter.h> 18 #include "bnxt_hsi.h" 19 #include "bnxt.h" 20 #include "bnxt_xdp.h" 21 22 static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 23 dma_addr_t mapping, u32 len, u16 rx_prod) 24 { 25 struct bnxt_sw_tx_bd *tx_buf; 26 struct tx_bd_ext *txbd1; 27 struct tx_bd *txbd; 28 u32 flags; 29 u16 prod; 30 31 prod = txr->tx_prod; 32 tx_buf = &txr->tx_buf_ring[prod]; 33 tx_buf->rx_prod = rx_prod; 34 35 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 36 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 37 (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW | 38 TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; 39 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 40 txbd->tx_bd_opaque = prod; 41 txbd->tx_bd_haddr = cpu_to_le64(mapping); 42 43 prod = NEXT_TX(prod); 44 txbd1 = (struct tx_bd_ext *) 45 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 46 47 txbd1->tx_bd_hsize_lflags = cpu_to_le32(0); 48 txbd1->tx_bd_mss = cpu_to_le32(0); 49 txbd1->tx_bd_cfa_action = cpu_to_le32(0); 50 txbd1->tx_bd_cfa_meta = cpu_to_le32(0); 51 52 prod = NEXT_TX(prod); 53 txr->tx_prod = prod; 54 } 55 56 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 57 { 58 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 59 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 60 struct bnxt_sw_tx_bd *tx_buf; 61 u16 tx_cons = txr->tx_cons; 62 u16 last_tx_cons = tx_cons; 63 u16 rx_prod; 64 int i; 65 66 for (i = 0; i < nr_pkts; i++) { 67 last_tx_cons = tx_cons; 68 tx_cons = NEXT_TX(tx_cons); 69 tx_cons = NEXT_TX(tx_cons); 70 } 71 txr->tx_cons = tx_cons; 72 if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) { 73 rx_prod = rxr->rx_prod; 74 } else { 75 tx_buf = &txr->tx_buf_ring[last_tx_cons]; 76 rx_prod = tx_buf->rx_prod; 77 } 78 writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell); 79 } 80 81 /* returns the following: 82 * true - packet consumed by XDP and new buffer is allocated. 83 * false - packet should be passed to the stack. 84 */ 85 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, 86 struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) 87 { 88 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); 89 struct bnxt_tx_ring_info *txr; 90 struct bnxt_sw_rx_bd *rx_buf; 91 struct pci_dev *pdev; 92 struct xdp_buff xdp; 93 dma_addr_t mapping; 94 void *orig_data; 95 u32 tx_avail; 96 u32 offset; 97 u32 act; 98 99 if (!xdp_prog) 100 return false; 101 102 pdev = bp->pdev; 103 txr = rxr->bnapi->tx_ring; 104 rx_buf = &rxr->rx_buf_ring[cons]; 105 offset = bp->rx_offset; 106 107 xdp.data_hard_start = *data_ptr - offset; 108 xdp.data = *data_ptr; 109 xdp.data_end = *data_ptr + *len; 110 orig_data = xdp.data; 111 mapping = rx_buf->mapping - bp->rx_dma_offset; 112 113 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); 114 115 rcu_read_lock(); 116 act = bpf_prog_run_xdp(xdp_prog, &xdp); 117 rcu_read_unlock(); 118 119 tx_avail = bnxt_tx_avail(bp, txr); 120 /* If the tx ring is not full, we must not update the rx producer yet 121 * because we may still be transmitting on some BDs. 122 */ 123 if (tx_avail != bp->tx_ring_size) 124 *event &= ~BNXT_RX_EVENT; 125 126 if (orig_data != xdp.data) { 127 offset = xdp.data - xdp.data_hard_start; 128 *data_ptr = xdp.data_hard_start + offset; 129 *len = xdp.data_end - xdp.data; 130 } 131 switch (act) { 132 case XDP_PASS: 133 return false; 134 135 case XDP_TX: 136 if (tx_avail < 2) { 137 trace_xdp_exception(bp->dev, xdp_prog, act); 138 bnxt_reuse_rx_data(rxr, cons, page); 139 return true; 140 } 141 142 *event = BNXT_TX_EVENT; 143 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, 144 bp->rx_dir); 145 bnxt_xmit_xdp(bp, txr, mapping + offset, *len, 146 NEXT_RX(rxr->rx_prod)); 147 bnxt_reuse_rx_data(rxr, cons, page); 148 return true; 149 default: 150 bpf_warn_invalid_xdp_action(act); 151 /* Fall thru */ 152 case XDP_ABORTED: 153 trace_xdp_exception(bp->dev, xdp_prog, act); 154 /* Fall thru */ 155 case XDP_DROP: 156 bnxt_reuse_rx_data(rxr, cons, page); 157 break; 158 } 159 return true; 160 } 161 162 /* Under rtnl_lock */ 163 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) 164 { 165 struct net_device *dev = bp->dev; 166 int tx_xdp = 0, rc, tc; 167 struct bpf_prog *old; 168 169 if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 170 netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", 171 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); 172 return -EOPNOTSUPP; 173 } 174 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { 175 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); 176 return -EOPNOTSUPP; 177 } 178 if (prog) 179 tx_xdp = bp->rx_nr_rings; 180 181 tc = netdev_get_num_tc(dev); 182 if (!tc) 183 tc = 1; 184 rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 185 tc, tx_xdp); 186 if (rc) { 187 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); 188 return rc; 189 } 190 if (netif_running(dev)) 191 bnxt_close_nic(bp, true, false); 192 193 old = xchg(&bp->xdp_prog, prog); 194 if (old) 195 bpf_prog_put(old); 196 197 if (prog) { 198 bnxt_set_rx_skb_mode(bp, true); 199 } else { 200 int rx, tx; 201 202 bnxt_set_rx_skb_mode(bp, false); 203 bnxt_get_max_rings(bp, &rx, &tx, true); 204 if (rx > 1) { 205 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 206 bp->dev->hw_features |= NETIF_F_LRO; 207 } 208 } 209 bp->tx_nr_rings_xdp = tx_xdp; 210 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; 211 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 212 bp->num_stat_ctxs = bp->cp_nr_rings; 213 bnxt_set_tpa_flags(bp); 214 bnxt_set_ring_params(bp); 215 216 if (netif_running(dev)) 217 return bnxt_open_nic(bp, true, false); 218 219 return 0; 220 } 221 222 int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) 223 { 224 struct bnxt *bp = netdev_priv(dev); 225 int rc; 226 227 switch (xdp->command) { 228 case XDP_SETUP_PROG: 229 rc = bnxt_xdp_set(bp, xdp->prog); 230 break; 231 case XDP_QUERY_PROG: 232 xdp->prog_attached = !!bp->xdp_prog; 233 rc = 0; 234 break; 235 default: 236 rc = -EINVAL; 237 break; 238 } 239 return rc; 240 } 241