1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2016-2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/if_vlan.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf_trace.h> 17 #include <linux/filter.h> 18 #include <net/page_pool.h> 19 #include "bnxt_hsi.h" 20 #include "bnxt.h" 21 #include "bnxt_xdp.h" 22 23 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, 24 struct bnxt_tx_ring_info *txr, 25 dma_addr_t mapping, u32 len) 26 { 27 struct bnxt_sw_tx_bd *tx_buf; 28 struct tx_bd *txbd; 29 u32 flags; 30 u16 prod; 31 32 prod = txr->tx_prod; 33 tx_buf = &txr->tx_buf_ring[prod]; 34 35 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 36 flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | 37 TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; 38 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 39 txbd->tx_bd_opaque = prod; 40 txbd->tx_bd_haddr = cpu_to_le64(mapping); 41 42 prod = NEXT_TX(prod); 43 txr->tx_prod = prod; 44 return tx_buf; 45 } 46 47 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 48 dma_addr_t mapping, u32 len, u16 rx_prod) 49 { 50 struct bnxt_sw_tx_bd *tx_buf; 51 52 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); 53 tx_buf->rx_prod = rx_prod; 54 tx_buf->action = XDP_TX; 55 } 56 57 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, 58 struct bnxt_tx_ring_info *txr, 59 dma_addr_t mapping, u32 len, 60 struct xdp_frame *xdpf) 61 { 62 struct bnxt_sw_tx_bd *tx_buf; 63 64 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); 65 tx_buf->action = XDP_REDIRECT; 66 tx_buf->xdpf = xdpf; 67 dma_unmap_addr_set(tx_buf, mapping, mapping); 68 dma_unmap_len_set(tx_buf, len, 0); 69 } 70 71 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 72 { 73 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 74 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 75 bool rx_doorbell_needed = false; 76 struct bnxt_sw_tx_bd *tx_buf; 77 u16 tx_cons = txr->tx_cons; 78 u16 last_tx_cons = tx_cons; 79 int i; 80 81 for (i = 0; i < nr_pkts; i++) { 82 tx_buf = &txr->tx_buf_ring[tx_cons]; 83 84 if (tx_buf->action == XDP_REDIRECT) { 85 struct pci_dev *pdev = bp->pdev; 86 87 dma_unmap_single(&pdev->dev, 88 dma_unmap_addr(tx_buf, mapping), 89 dma_unmap_len(tx_buf, len), 90 PCI_DMA_TODEVICE); 91 xdp_return_frame(tx_buf->xdpf); 92 tx_buf->action = 0; 93 tx_buf->xdpf = NULL; 94 } else if (tx_buf->action == XDP_TX) { 95 rx_doorbell_needed = true; 96 last_tx_cons = tx_cons; 97 } 98 tx_cons = NEXT_TX(tx_cons); 99 } 100 txr->tx_cons = tx_cons; 101 if (rx_doorbell_needed) { 102 tx_buf = &txr->tx_buf_ring[last_tx_cons]; 103 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); 104 } 105 } 106 107 /* returns the following: 108 * true - packet consumed by XDP and new buffer is allocated. 109 * false - packet should be passed to the stack. 110 */ 111 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, 112 struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) 113 { 114 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); 115 struct bnxt_tx_ring_info *txr; 116 struct bnxt_sw_rx_bd *rx_buf; 117 struct pci_dev *pdev; 118 struct xdp_buff xdp; 119 dma_addr_t mapping; 120 void *orig_data; 121 u32 tx_avail; 122 u32 offset; 123 u32 act; 124 125 if (!xdp_prog) 126 return false; 127 128 pdev = bp->pdev; 129 rx_buf = &rxr->rx_buf_ring[cons]; 130 offset = bp->rx_offset; 131 132 mapping = rx_buf->mapping - bp->rx_dma_offset; 133 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); 134 135 txr = rxr->bnapi->tx_ring; 136 xdp.data_hard_start = *data_ptr - offset; 137 xdp.data = *data_ptr; 138 xdp_set_data_meta_invalid(&xdp); 139 xdp.data_end = *data_ptr + *len; 140 xdp.rxq = &rxr->xdp_rxq; 141 xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ 142 orig_data = xdp.data; 143 144 rcu_read_lock(); 145 act = bpf_prog_run_xdp(xdp_prog, &xdp); 146 rcu_read_unlock(); 147 148 tx_avail = bnxt_tx_avail(bp, txr); 149 /* If the tx ring is not full, we must not update the rx producer yet 150 * because we may still be transmitting on some BDs. 151 */ 152 if (tx_avail != bp->tx_ring_size) 153 *event &= ~BNXT_RX_EVENT; 154 155 *len = xdp.data_end - xdp.data; 156 if (orig_data != xdp.data) { 157 offset = xdp.data - xdp.data_hard_start; 158 *data_ptr = xdp.data_hard_start + offset; 159 } 160 switch (act) { 161 case XDP_PASS: 162 return false; 163 164 case XDP_TX: 165 if (tx_avail < 1) { 166 trace_xdp_exception(bp->dev, xdp_prog, act); 167 bnxt_reuse_rx_data(rxr, cons, page); 168 return true; 169 } 170 171 *event = BNXT_TX_EVENT; 172 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, 173 bp->rx_dir); 174 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, 175 NEXT_RX(rxr->rx_prod)); 176 bnxt_reuse_rx_data(rxr, cons, page); 177 return true; 178 case XDP_REDIRECT: 179 /* if we are calling this here then we know that the 180 * redirect is coming from a frame received by the 181 * bnxt_en driver. 182 */ 183 dma_unmap_page_attrs(&pdev->dev, mapping, 184 PAGE_SIZE, bp->rx_dir, 185 DMA_ATTR_WEAK_ORDERING); 186 187 /* if we are unable to allocate a new buffer, abort and reuse */ 188 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { 189 trace_xdp_exception(bp->dev, xdp_prog, act); 190 bnxt_reuse_rx_data(rxr, cons, page); 191 return true; 192 } 193 194 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { 195 trace_xdp_exception(bp->dev, xdp_prog, act); 196 page_pool_recycle_direct(rxr->page_pool, page); 197 return true; 198 } 199 200 *event |= BNXT_REDIRECT_EVENT; 201 break; 202 default: 203 bpf_warn_invalid_xdp_action(act); 204 /* Fall thru */ 205 case XDP_ABORTED: 206 trace_xdp_exception(bp->dev, xdp_prog, act); 207 /* Fall thru */ 208 case XDP_DROP: 209 bnxt_reuse_rx_data(rxr, cons, page); 210 break; 211 } 212 return true; 213 } 214 215 int bnxt_xdp_xmit(struct net_device *dev, int num_frames, 216 struct xdp_frame **frames, u32 flags) 217 { 218 struct bnxt *bp = netdev_priv(dev); 219 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); 220 struct pci_dev *pdev = bp->pdev; 221 struct bnxt_tx_ring_info *txr; 222 dma_addr_t mapping; 223 int drops = 0; 224 int ring; 225 int i; 226 227 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 228 !bp->tx_nr_rings_xdp || 229 !xdp_prog) 230 return -EINVAL; 231 232 ring = smp_processor_id() % bp->tx_nr_rings_xdp; 233 txr = &bp->tx_ring[ring]; 234 235 for (i = 0; i < num_frames; i++) { 236 struct xdp_frame *xdp = frames[i]; 237 238 if (!txr || !bnxt_tx_avail(bp, txr) || 239 !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) { 240 xdp_return_frame_rx_napi(xdp); 241 drops++; 242 continue; 243 } 244 245 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, 246 DMA_TO_DEVICE); 247 248 if (dma_mapping_error(&pdev->dev, mapping)) { 249 xdp_return_frame_rx_napi(xdp); 250 drops++; 251 continue; 252 } 253 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); 254 } 255 256 if (flags & XDP_XMIT_FLUSH) { 257 /* Sync BD data before updating doorbell */ 258 wmb(); 259 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 260 } 261 262 return num_frames - drops; 263 } 264 265 /* Under rtnl_lock */ 266 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) 267 { 268 struct net_device *dev = bp->dev; 269 int tx_xdp = 0, rc, tc; 270 struct bpf_prog *old; 271 272 if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 273 netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", 274 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); 275 return -EOPNOTSUPP; 276 } 277 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { 278 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); 279 return -EOPNOTSUPP; 280 } 281 if (prog) 282 tx_xdp = bp->rx_nr_rings; 283 284 tc = netdev_get_num_tc(dev); 285 if (!tc) 286 tc = 1; 287 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 288 true, tc, tx_xdp); 289 if (rc) { 290 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); 291 return rc; 292 } 293 if (netif_running(dev)) 294 bnxt_close_nic(bp, true, false); 295 296 old = xchg(&bp->xdp_prog, prog); 297 if (old) 298 bpf_prog_put(old); 299 300 if (prog) { 301 bnxt_set_rx_skb_mode(bp, true); 302 } else { 303 int rx, tx; 304 305 bnxt_set_rx_skb_mode(bp, false); 306 bnxt_get_max_rings(bp, &rx, &tx, true); 307 if (rx > 1) { 308 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 309 bp->dev->hw_features |= NETIF_F_LRO; 310 } 311 } 312 bp->tx_nr_rings_xdp = tx_xdp; 313 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; 314 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 315 bnxt_set_tpa_flags(bp); 316 bnxt_set_ring_params(bp); 317 318 if (netif_running(dev)) 319 return bnxt_open_nic(bp, true, false); 320 321 return 0; 322 } 323 324 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) 325 { 326 struct bnxt *bp = netdev_priv(dev); 327 int rc; 328 329 switch (xdp->command) { 330 case XDP_SETUP_PROG: 331 rc = bnxt_xdp_set(bp, xdp->prog); 332 break; 333 default: 334 rc = -EINVAL; 335 break; 336 } 337 return rc; 338 } 339