1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2016-2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/if_vlan.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf_trace.h> 17 #include <linux/filter.h> 18 #include <net/page_pool.h> 19 #include "bnxt_hsi.h" 20 #include "bnxt.h" 21 #include "bnxt_xdp.h" 22 23 DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); 24 25 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, 26 struct bnxt_tx_ring_info *txr, 27 dma_addr_t mapping, u32 len) 28 { 29 struct bnxt_sw_tx_bd *tx_buf; 30 struct tx_bd *txbd; 31 u32 flags; 32 u16 prod; 33 34 prod = txr->tx_prod; 35 tx_buf = &txr->tx_buf_ring[prod]; 36 37 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 38 flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | 39 TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; 40 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 41 txbd->tx_bd_opaque = prod; 42 txbd->tx_bd_haddr = cpu_to_le64(mapping); 43 44 prod = NEXT_TX(prod); 45 txr->tx_prod = prod; 46 return tx_buf; 47 } 48 49 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 50 dma_addr_t mapping, u32 len, u16 rx_prod) 51 { 52 struct bnxt_sw_tx_bd *tx_buf; 53 54 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); 55 tx_buf->rx_prod = rx_prod; 56 tx_buf->action = XDP_TX; 57 } 58 59 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, 60 struct bnxt_tx_ring_info *txr, 61 dma_addr_t mapping, u32 len, 62 struct xdp_frame *xdpf) 63 { 64 struct bnxt_sw_tx_bd *tx_buf; 65 66 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); 67 tx_buf->action = XDP_REDIRECT; 68 tx_buf->xdpf = xdpf; 69 dma_unmap_addr_set(tx_buf, mapping, mapping); 70 dma_unmap_len_set(tx_buf, len, 0); 71 } 72 73 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 74 { 75 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 76 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 77 bool rx_doorbell_needed = false; 78 struct bnxt_sw_tx_bd *tx_buf; 79 u16 tx_cons = txr->tx_cons; 80 u16 last_tx_cons = tx_cons; 81 int i; 82 83 for (i = 0; i < nr_pkts; i++) { 84 tx_buf = &txr->tx_buf_ring[tx_cons]; 85 86 if (tx_buf->action == XDP_REDIRECT) { 87 struct pci_dev *pdev = bp->pdev; 88 89 dma_unmap_single(&pdev->dev, 90 dma_unmap_addr(tx_buf, mapping), 91 dma_unmap_len(tx_buf, len), 92 DMA_TO_DEVICE); 93 xdp_return_frame(tx_buf->xdpf); 94 tx_buf->action = 0; 95 tx_buf->xdpf = NULL; 96 } else if (tx_buf->action == XDP_TX) { 97 rx_doorbell_needed = true; 98 last_tx_cons = tx_cons; 99 } 100 tx_cons = NEXT_TX(tx_cons); 101 } 102 txr->tx_cons = tx_cons; 103 if (rx_doorbell_needed) { 104 tx_buf = &txr->tx_buf_ring[last_tx_cons]; 105 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); 106 } 107 } 108 109 /* returns the following: 110 * true - packet consumed by XDP and new buffer is allocated. 111 * false - packet should be passed to the stack. 112 */ 113 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, 114 struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) 115 { 116 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); 117 struct bnxt_tx_ring_info *txr; 118 struct bnxt_sw_rx_bd *rx_buf; 119 struct pci_dev *pdev; 120 struct xdp_buff xdp; 121 dma_addr_t mapping; 122 void *orig_data; 123 u32 tx_avail; 124 u32 offset; 125 u32 act; 126 127 if (!xdp_prog) 128 return false; 129 130 pdev = bp->pdev; 131 rx_buf = &rxr->rx_buf_ring[cons]; 132 offset = bp->rx_offset; 133 134 mapping = rx_buf->mapping - bp->rx_dma_offset; 135 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); 136 137 txr = rxr->bnapi->tx_ring; 138 /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ 139 xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); 140 xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false); 141 orig_data = xdp.data; 142 143 act = bpf_prog_run_xdp(xdp_prog, &xdp); 144 145 tx_avail = bnxt_tx_avail(bp, txr); 146 /* If the tx ring is not full, we must not update the rx producer yet 147 * because we may still be transmitting on some BDs. 148 */ 149 if (tx_avail != bp->tx_ring_size) 150 *event &= ~BNXT_RX_EVENT; 151 152 *len = xdp.data_end - xdp.data; 153 if (orig_data != xdp.data) { 154 offset = xdp.data - xdp.data_hard_start; 155 *data_ptr = xdp.data_hard_start + offset; 156 } 157 switch (act) { 158 case XDP_PASS: 159 return false; 160 161 case XDP_TX: 162 if (tx_avail < 1) { 163 trace_xdp_exception(bp->dev, xdp_prog, act); 164 bnxt_reuse_rx_data(rxr, cons, page); 165 return true; 166 } 167 168 *event = BNXT_TX_EVENT; 169 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, 170 bp->rx_dir); 171 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, 172 NEXT_RX(rxr->rx_prod)); 173 bnxt_reuse_rx_data(rxr, cons, page); 174 return true; 175 case XDP_REDIRECT: 176 /* if we are calling this here then we know that the 177 * redirect is coming from a frame received by the 178 * bnxt_en driver. 179 */ 180 dma_unmap_page_attrs(&pdev->dev, mapping, 181 PAGE_SIZE, bp->rx_dir, 182 DMA_ATTR_WEAK_ORDERING); 183 184 /* if we are unable to allocate a new buffer, abort and reuse */ 185 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { 186 trace_xdp_exception(bp->dev, xdp_prog, act); 187 bnxt_reuse_rx_data(rxr, cons, page); 188 return true; 189 } 190 191 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { 192 trace_xdp_exception(bp->dev, xdp_prog, act); 193 page_pool_recycle_direct(rxr->page_pool, page); 194 return true; 195 } 196 197 *event |= BNXT_REDIRECT_EVENT; 198 break; 199 default: 200 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); 201 fallthrough; 202 case XDP_ABORTED: 203 trace_xdp_exception(bp->dev, xdp_prog, act); 204 fallthrough; 205 case XDP_DROP: 206 bnxt_reuse_rx_data(rxr, cons, page); 207 break; 208 } 209 return true; 210 } 211 212 int bnxt_xdp_xmit(struct net_device *dev, int num_frames, 213 struct xdp_frame **frames, u32 flags) 214 { 215 struct bnxt *bp = netdev_priv(dev); 216 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); 217 struct pci_dev *pdev = bp->pdev; 218 struct bnxt_tx_ring_info *txr; 219 dma_addr_t mapping; 220 int nxmit = 0; 221 int ring; 222 int i; 223 224 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 225 !bp->tx_nr_rings_xdp || 226 !xdp_prog) 227 return -EINVAL; 228 229 ring = smp_processor_id() % bp->tx_nr_rings_xdp; 230 txr = &bp->tx_ring[ring]; 231 232 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) 233 return -EINVAL; 234 235 if (static_branch_unlikely(&bnxt_xdp_locking_key)) 236 spin_lock(&txr->xdp_tx_lock); 237 238 for (i = 0; i < num_frames; i++) { 239 struct xdp_frame *xdp = frames[i]; 240 241 if (!bnxt_tx_avail(bp, txr)) 242 break; 243 244 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, 245 DMA_TO_DEVICE); 246 247 if (dma_mapping_error(&pdev->dev, mapping)) 248 break; 249 250 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); 251 nxmit++; 252 } 253 254 if (flags & XDP_XMIT_FLUSH) { 255 /* Sync BD data before updating doorbell */ 256 wmb(); 257 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 258 } 259 260 if (static_branch_unlikely(&bnxt_xdp_locking_key)) 261 spin_unlock(&txr->xdp_tx_lock); 262 263 return nxmit; 264 } 265 266 /* Under rtnl_lock */ 267 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) 268 { 269 struct net_device *dev = bp->dev; 270 int tx_xdp = 0, rc, tc; 271 struct bpf_prog *old; 272 273 if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 274 netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", 275 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); 276 return -EOPNOTSUPP; 277 } 278 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { 279 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); 280 return -EOPNOTSUPP; 281 } 282 if (prog) 283 tx_xdp = bp->rx_nr_rings; 284 285 tc = netdev_get_num_tc(dev); 286 if (!tc) 287 tc = 1; 288 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 289 true, tc, tx_xdp); 290 if (rc) { 291 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); 292 return rc; 293 } 294 if (netif_running(dev)) 295 bnxt_close_nic(bp, true, false); 296 297 old = xchg(&bp->xdp_prog, prog); 298 if (old) 299 bpf_prog_put(old); 300 301 if (prog) { 302 bnxt_set_rx_skb_mode(bp, true); 303 } else { 304 int rx, tx; 305 306 bnxt_set_rx_skb_mode(bp, false); 307 bnxt_get_max_rings(bp, &rx, &tx, true); 308 if (rx > 1) { 309 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 310 bp->dev->hw_features |= NETIF_F_LRO; 311 } 312 } 313 bp->tx_nr_rings_xdp = tx_xdp; 314 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; 315 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 316 bnxt_set_tpa_flags(bp); 317 bnxt_set_ring_params(bp); 318 319 if (netif_running(dev)) 320 return bnxt_open_nic(bp, true, false); 321 322 return 0; 323 } 324 325 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) 326 { 327 struct bnxt *bp = netdev_priv(dev); 328 int rc; 329 330 switch (xdp->command) { 331 case XDP_SETUP_PROG: 332 rc = bnxt_xdp_set(bp, xdp->prog); 333 break; 334 default: 335 rc = -EINVAL; 336 break; 337 } 338 return rc; 339 } 340