1 /* 2 * Copyright (C) 2015-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 /* 35 * nfp_net.h 36 * Declarations for Netronome network device driver. 37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 38 * Jason McMullan <jason.mcmullan@netronome.com> 39 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 40 */ 41 42 #ifndef _NFP_NET_H_ 43 #define _NFP_NET_H_ 44 45 #include <linux/interrupt.h> 46 #include <linux/list.h> 47 #include <linux/netdevice.h> 48 #include <linux/pci.h> 49 #include <linux/io-64-nonatomic-hi-lo.h> 50 #include <net/xdp.h> 51 52 #include "nfp_net_ctrl.h" 53 54 #define nn_pr(nn, lvl, fmt, args...) \ 55 ({ \ 56 struct nfp_net *__nn = (nn); \ 57 \ 58 if (__nn->dp.netdev) \ 59 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \ 60 else \ 61 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \ 62 }) 63 64 #define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args) 65 #define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args) 66 #define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args) 67 #define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args) 68 69 #define nn_dp_warn(dp, fmt, args...) \ 70 ({ \ 71 struct nfp_net_dp *__dp = (dp); \ 72 \ 73 if (unlikely(net_ratelimit())) { \ 74 if (__dp->netdev) \ 75 netdev_warn(__dp->netdev, fmt, ## args); \ 76 else \ 77 dev_warn(__dp->dev, fmt, ## args); \ 78 } \ 79 }) 80 81 /* Max time to wait for NFP to respond on updates (in seconds) */ 82 #define NFP_NET_POLL_TIMEOUT 5 83 84 /* Interval for reading offloaded filter stats */ 85 #define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100) 86 87 /* Bar allocation */ 88 #define NFP_NET_CTRL_BAR 0 89 #define NFP_NET_Q0_BAR 2 90 #define NFP_NET_Q1_BAR 4 /* OBSOLETE */ 91 92 /* Max bits in DMA address */ 93 #define NFP_NET_MAX_DMA_BITS 40 94 95 /* Default size for MTU and freelist buffer sizes */ 96 #define NFP_NET_DEFAULT_MTU 1500 97 98 /* Maximum number of bytes prepended to a packet */ 99 #define NFP_NET_MAX_PREPEND 64 100 101 /* Interrupt definitions */ 102 #define NFP_NET_NON_Q_VECTORS 2 103 #define NFP_NET_IRQ_LSC_IDX 0 104 #define NFP_NET_IRQ_EXN_IDX 1 105 #define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1) 106 107 /* Queue/Ring definitions */ 108 #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ 109 #define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */ 110 #define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \ 111 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS) 112 #define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS) 113 114 #define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */ 115 #define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */ 116 #define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */ 117 #define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */ 118 119 #define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */ 120 #define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */ 121 122 #define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */ 123 #define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */ 124 125 /* Offload definitions */ 126 #define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16)) 127 128 #define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 129 #define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \ 130 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 131 132 /* Forward declarations */ 133 struct nfp_cpp; 134 struct nfp_eth_table_port; 135 struct nfp_net; 136 struct nfp_net_r_vector; 137 struct nfp_port; 138 139 /* Convenience macro for wrapping descriptor index on ring size */ 140 #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1)) 141 142 /* Convenience macro for writing dma address into RX/TX descriptors */ 143 #define nfp_desc_set_dma_addr(desc, dma_addr) \ 144 do { \ 145 __typeof(desc) __d = (desc); \ 146 dma_addr_t __addr = (dma_addr); \ 147 \ 148 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \ 149 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \ 150 } while (0) 151 152 /* TX descriptor format */ 153 154 #define PCIE_DESC_TX_EOP BIT(7) 155 #define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0) 156 #define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0) 157 158 /* Flags in the host TX descriptor */ 159 #define PCIE_DESC_TX_CSUM BIT(7) 160 #define PCIE_DESC_TX_IP4_CSUM BIT(6) 161 #define PCIE_DESC_TX_TCP_CSUM BIT(5) 162 #define PCIE_DESC_TX_UDP_CSUM BIT(4) 163 #define PCIE_DESC_TX_VLAN BIT(3) 164 #define PCIE_DESC_TX_LSO BIT(2) 165 #define PCIE_DESC_TX_ENCAP BIT(1) 166 #define PCIE_DESC_TX_O_IP4_CSUM BIT(0) 167 168 struct nfp_net_tx_desc { 169 union { 170 struct { 171 u8 dma_addr_hi; /* High bits of host buf address */ 172 __le16 dma_len; /* Length to DMA for this desc */ 173 u8 offset_eop; /* Offset in buf where pkt starts + 174 * highest bit is eop flag. 175 */ 176 __le32 dma_addr_lo; /* Low 32bit of host buf addr */ 177 178 __le16 mss; /* MSS to be used for LSO */ 179 u8 lso_hdrlen; /* LSO, TCP payload offset */ 180 u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */ 181 union { 182 struct { 183 u8 l3_offset; /* L3 header offset */ 184 u8 l4_offset; /* L4 header offset */ 185 }; 186 __le16 vlan; /* VLAN tag to add if indicated */ 187 }; 188 __le16 data_len; /* Length of frame + meta data */ 189 } __packed; 190 __le32 vals[4]; 191 }; 192 }; 193 194 /** 195 * struct nfp_net_tx_buf - software TX buffer descriptor 196 * @skb: normal ring, sk_buff associated with this buffer 197 * @frag: XDP ring, page frag associated with this buffer 198 * @dma_addr: DMA mapping address of the buffer 199 * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags) 200 * @pkt_cnt: Number of packets to be produced out of the skb associated 201 * with this buffer (valid only on the head's buffer). 202 * Will be 1 for all non-TSO packets. 203 * @real_len: Number of bytes which to be produced out of the skb (valid only 204 * on the head's buffer). Equal to skb->len for non-TSO packets. 205 */ 206 struct nfp_net_tx_buf { 207 union { 208 struct sk_buff *skb; 209 void *frag; 210 }; 211 dma_addr_t dma_addr; 212 short int fidx; 213 u16 pkt_cnt; 214 u32 real_len; 215 }; 216 217 /** 218 * struct nfp_net_tx_ring - TX ring structure 219 * @r_vec: Back pointer to ring vector structure 220 * @idx: Ring index from Linux's perspective 221 * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue 222 * @qcp_q: Pointer to base of the QCP TX queue 223 * @cnt: Size of the queue in number of descriptors 224 * @wr_p: TX ring write pointer (free running) 225 * @rd_p: TX ring read pointer (free running) 226 * @qcp_rd_p: Local copy of QCP TX queue read pointer 227 * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer 228 * (used for .xmit_more delayed kick) 229 * @txbufs: Array of transmitted TX buffers, to free on transmit 230 * @txds: Virtual address of TX ring in host memory 231 * @dma: DMA address of the TX ring 232 * @size: Size, in bytes, of the TX ring (needed to free) 233 * @is_xdp: Is this a XDP TX ring? 234 */ 235 struct nfp_net_tx_ring { 236 struct nfp_net_r_vector *r_vec; 237 238 u32 idx; 239 int qcidx; 240 u8 __iomem *qcp_q; 241 242 u32 cnt; 243 u32 wr_p; 244 u32 rd_p; 245 u32 qcp_rd_p; 246 247 u32 wr_ptr_add; 248 249 struct nfp_net_tx_buf *txbufs; 250 struct nfp_net_tx_desc *txds; 251 252 dma_addr_t dma; 253 unsigned int size; 254 bool is_xdp; 255 } ____cacheline_aligned; 256 257 /* RX and freelist descriptor format */ 258 259 #define PCIE_DESC_RX_DD BIT(7) 260 #define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0) 261 262 /* Flags in the RX descriptor */ 263 #define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15)) 264 #define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14)) 265 #define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13)) 266 #define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12)) 267 #define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11)) 268 #define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10)) 269 #define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9)) 270 #define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8)) 271 #define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7)) 272 #define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6)) 273 #define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5)) 274 #define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4)) 275 #define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3)) 276 #define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2)) 277 #define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1)) 278 #define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0)) 279 280 #define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \ 281 PCIE_DESC_RX_TCP_CSUM | \ 282 PCIE_DESC_RX_UDP_CSUM | \ 283 PCIE_DESC_RX_I_IP4_CSUM | \ 284 PCIE_DESC_RX_I_TCP_CSUM | \ 285 PCIE_DESC_RX_I_UDP_CSUM) 286 #define PCIE_DESC_RX_CSUM_OK_SHIFT 1 287 #define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL) 288 #define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \ 289 PCIE_DESC_RX_CSUM_OK_SHIFT) 290 291 struct nfp_net_rx_desc { 292 union { 293 struct { 294 u8 dma_addr_hi; /* High bits of the buf address */ 295 __le16 reserved; /* Must be zero */ 296 u8 meta_len_dd; /* Must be zero */ 297 298 __le32 dma_addr_lo; /* Low bits of the buffer address */ 299 } __packed fld; 300 301 struct { 302 __le16 data_len; /* Length of the frame + meta data */ 303 u8 reserved; 304 u8 meta_len_dd; /* Length of meta data prepended + 305 * descriptor done flag. 306 */ 307 308 __le16 flags; /* RX flags. See @PCIE_DESC_RX_* */ 309 __le16 vlan; /* VLAN if stripped */ 310 } __packed rxd; 311 312 __le32 vals[2]; 313 }; 314 }; 315 316 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0) 317 318 struct nfp_meta_parsed { 319 u8 hash_type; 320 u8 csum_type; 321 u32 hash; 322 u32 mark; 323 u32 portid; 324 __wsum csum; 325 }; 326 327 struct nfp_net_rx_hash { 328 __be32 hash_type; 329 __be32 hash; 330 }; 331 332 /** 333 * struct nfp_net_rx_buf - software RX buffer descriptor 334 * @frag: page fragment buffer 335 * @dma_addr: DMA mapping address of the buffer 336 */ 337 struct nfp_net_rx_buf { 338 void *frag; 339 dma_addr_t dma_addr; 340 }; 341 342 /** 343 * struct nfp_net_rx_ring - RX ring structure 344 * @r_vec: Back pointer to ring vector structure 345 * @cnt: Size of the queue in number of descriptors 346 * @wr_p: FL/RX ring write pointer (free running) 347 * @rd_p: FL/RX ring read pointer (free running) 348 * @idx: Ring index from Linux's perspective 349 * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist 350 * @qcp_fl: Pointer to base of the QCP freelist queue 351 * @rxbufs: Array of transmitted FL/RX buffers 352 * @rxds: Virtual address of FL/RX ring in host memory 353 * @dma: DMA address of the FL/RX ring 354 * @size: Size, in bytes, of the FL/RX ring (needed to free) 355 * @xdp_rxq: RX-ring info avail for XDP 356 */ 357 struct nfp_net_rx_ring { 358 struct nfp_net_r_vector *r_vec; 359 360 u32 cnt; 361 u32 wr_p; 362 u32 rd_p; 363 364 u32 idx; 365 366 int fl_qcidx; 367 unsigned int size; 368 u8 __iomem *qcp_fl; 369 370 struct nfp_net_rx_buf *rxbufs; 371 struct nfp_net_rx_desc *rxds; 372 373 dma_addr_t dma; 374 struct xdp_rxq_info xdp_rxq; 375 } ____cacheline_aligned; 376 377 /** 378 * struct nfp_net_r_vector - Per ring interrupt vector configuration 379 * @nfp_net: Backpointer to nfp_net structure 380 * @napi: NAPI structure for this ring vec 381 * @tasklet: ctrl vNIC, tasklet for servicing the r_vec 382 * @queue: ctrl vNIC, send queue 383 * @lock: ctrl vNIC, r_vec lock protects @queue 384 * @tx_ring: Pointer to TX ring 385 * @rx_ring: Pointer to RX ring 386 * @xdp_ring: Pointer to an extra TX ring for XDP 387 * @irq_entry: MSI-X table entry (use for talking to the device) 388 * @rx_sync: Seqlock for atomic updates of RX stats 389 * @rx_pkts: Number of received packets 390 * @rx_bytes: Number of received bytes 391 * @rx_drops: Number of packets dropped on RX due to lack of resources 392 * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK 393 * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK 394 * @hw_csum_rx_error: Counter of packets with bad checksums 395 * @tx_sync: Seqlock for atomic updates of TX stats 396 * @tx_pkts: Number of Transmitted packets 397 * @tx_bytes: Number of Transmitted bytes 398 * @hw_csum_tx: Counter of packets with TX checksum offload requested 399 * @hw_csum_tx_inner: Counter of inner TX checksum offload requests 400 * @tx_gather: Counter of packets with Gather DMA 401 * @tx_lso: Counter of LSO packets sent 402 * @tx_errors: How many TX errors were encountered 403 * @tx_busy: How often was TX busy (no space)? 404 * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures 405 * @irq_vector: Interrupt vector number (use for talking to the OS) 406 * @handler: Interrupt handler for this ring vector 407 * @name: Name of the interrupt vector 408 * @affinity_mask: SMP affinity mask for this vector 409 * 410 * This structure ties RX and TX rings to interrupt vectors and a NAPI 411 * context. This currently only supports one RX and TX ring per 412 * interrupt vector but might be extended in the future to allow 413 * association of multiple rings per vector. 414 */ 415 struct nfp_net_r_vector { 416 struct nfp_net *nfp_net; 417 union { 418 struct napi_struct napi; 419 struct { 420 struct tasklet_struct tasklet; 421 struct sk_buff_head queue; 422 struct spinlock lock; 423 }; 424 }; 425 426 struct nfp_net_tx_ring *tx_ring; 427 struct nfp_net_rx_ring *rx_ring; 428 429 u16 irq_entry; 430 431 struct u64_stats_sync rx_sync; 432 u64 rx_pkts; 433 u64 rx_bytes; 434 u64 rx_drops; 435 u64 hw_csum_rx_ok; 436 u64 hw_csum_rx_inner_ok; 437 u64 hw_csum_rx_error; 438 439 struct nfp_net_tx_ring *xdp_ring; 440 441 struct u64_stats_sync tx_sync; 442 u64 tx_pkts; 443 u64 tx_bytes; 444 u64 hw_csum_tx; 445 u64 hw_csum_tx_inner; 446 u64 tx_gather; 447 u64 tx_lso; 448 449 u64 rx_replace_buf_alloc_fail; 450 u64 tx_errors; 451 u64 tx_busy; 452 453 u32 irq_vector; 454 irq_handler_t handler; 455 char name[IFNAMSIZ + 8]; 456 cpumask_t affinity_mask; 457 } ____cacheline_aligned; 458 459 /* Firmware version as it is written in the 32bit value in the BAR */ 460 struct nfp_net_fw_version { 461 u8 minor; 462 u8 major; 463 u8 class; 464 u8 resv; 465 } __packed; 466 467 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, 468 u8 resv, u8 class, u8 major, u8 minor) 469 { 470 return fw_ver->resv == resv && 471 fw_ver->class == class && 472 fw_ver->major == major && 473 fw_ver->minor == minor; 474 } 475 476 struct nfp_stat_pair { 477 u64 pkts; 478 u64 bytes; 479 }; 480 481 /** 482 * struct nfp_net_dp - NFP network device datapath data structure 483 * @dev: Backpointer to struct device 484 * @netdev: Backpointer to net_device structure 485 * @is_vf: Is the driver attached to a VF? 486 * @bpf_offload_xdp: Offloaded BPF program is XDP 487 * @chained_metadata_format: Firemware will use new metadata format 488 * @rx_dma_dir: Mapping direction for RX buffers 489 * @rx_dma_off: Offset at which DMA packets (for XDP headroom) 490 * @rx_offset: Offset in the RX buffers where packet data starts 491 * @ctrl: Local copy of the control register/word. 492 * @fl_bufsz: Currently configured size of the freelist buffers 493 * @xdp_prog: Installed XDP program 494 * @tx_rings: Array of pre-allocated TX ring structures 495 * @rx_rings: Array of pre-allocated RX ring structures 496 * @ctrl_bar: Pointer to mapped control BAR 497 * 498 * @txd_cnt: Size of the TX ring in number of descriptors 499 * @rxd_cnt: Size of the RX ring in number of descriptors 500 * @num_r_vecs: Number of used ring vectors 501 * @num_tx_rings: Currently configured number of TX rings 502 * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) 503 * @num_rx_rings: Currently configured number of RX rings 504 * @mtu: Device MTU 505 */ 506 struct nfp_net_dp { 507 struct device *dev; 508 struct net_device *netdev; 509 510 u8 is_vf:1; 511 u8 bpf_offload_xdp:1; 512 u8 chained_metadata_format:1; 513 514 u8 rx_dma_dir; 515 u8 rx_offset; 516 517 u32 rx_dma_off; 518 519 u32 ctrl; 520 u32 fl_bufsz; 521 522 struct bpf_prog *xdp_prog; 523 524 struct nfp_net_tx_ring *tx_rings; 525 struct nfp_net_rx_ring *rx_rings; 526 527 u8 __iomem *ctrl_bar; 528 529 /* Cold data follows */ 530 531 unsigned int txd_cnt; 532 unsigned int rxd_cnt; 533 534 unsigned int num_r_vecs; 535 536 unsigned int num_tx_rings; 537 unsigned int num_stack_tx_rings; 538 unsigned int num_rx_rings; 539 540 unsigned int mtu; 541 }; 542 543 /** 544 * struct nfp_net - NFP network device structure 545 * @dp: Datapath structure 546 * @fw_ver: Firmware version 547 * @cap: Capabilities advertised by the Firmware 548 * @max_mtu: Maximum support MTU advertised by the Firmware 549 * @rss_hfunc: RSS selected hash function 550 * @rss_cfg: RSS configuration 551 * @rss_key: RSS secret key 552 * @rss_itbl: RSS indirection table 553 * @xdp_flags: Flags with which XDP prog was loaded 554 * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes) 555 * @max_r_vecs: Number of allocated interrupt vectors for RX/TX 556 * @max_tx_rings: Maximum number of TX rings supported by the Firmware 557 * @max_rx_rings: Maximum number of RX rings supported by the Firmware 558 * @stride_rx: Queue controller RX queue spacing 559 * @stride_tx: Queue controller TX queue spacing 560 * @r_vecs: Pre-allocated array of ring vectors 561 * @irq_entries: Pre-allocated array of MSI-X entries 562 * @lsc_handler: Handler for Link State Change interrupt 563 * @lsc_name: Name for Link State Change interrupt 564 * @exn_handler: Handler for Exception interrupt 565 * @exn_name: Name for Exception interrupt 566 * @shared_handler: Handler for shared interrupts 567 * @shared_name: Name for shared interrupt 568 * @me_freq_mhz: ME clock_freq (MHz) 569 * @reconfig_lock: Protects HW reconfiguration request regs/machinery 570 * @reconfig_posted: Pending reconfig bits coming from async sources 571 * @reconfig_timer_active: Timer for reading reconfiguration results is pending 572 * @reconfig_sync_present: Some thread is performing synchronous reconfig 573 * @reconfig_timer: Timer for async reading of reconfig results 574 * @link_up: Is the link up? 575 * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading 576 * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter 577 * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter 578 * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter 579 * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter 580 * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW 581 * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts 582 * @qcp_cfg: Pointer to QCP queue used for configuration notification 583 * @tx_bar: Pointer to mapped TX queues 584 * @rx_bar: Pointer to mapped FL/RX queues 585 * @tlv_caps: Parsed TLV capabilities 586 * @debugfs_dir: Device directory in debugfs 587 * @vnic_list: Entry on device vNIC list 588 * @pdev: Backpointer to PCI device 589 * @app: APP handle if available 590 * @port: Pointer to nfp_port structure if vNIC is a port 591 * @app_priv: APP private data for this vNIC 592 */ 593 struct nfp_net { 594 struct nfp_net_dp dp; 595 596 struct nfp_net_fw_version fw_ver; 597 598 u32 cap; 599 u32 max_mtu; 600 601 u8 rss_hfunc; 602 u32 rss_cfg; 603 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; 604 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; 605 606 u32 xdp_flags; 607 struct bpf_prog *xdp_prog; 608 609 unsigned int max_tx_rings; 610 unsigned int max_rx_rings; 611 612 int stride_tx; 613 int stride_rx; 614 615 unsigned int max_r_vecs; 616 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS]; 617 struct msix_entry irq_entries[NFP_NET_MAX_IRQS]; 618 619 irq_handler_t lsc_handler; 620 char lsc_name[IFNAMSIZ + 8]; 621 622 irq_handler_t exn_handler; 623 char exn_name[IFNAMSIZ + 8]; 624 625 irq_handler_t shared_handler; 626 char shared_name[IFNAMSIZ + 8]; 627 628 u32 me_freq_mhz; 629 630 bool link_up; 631 spinlock_t link_status_lock; 632 633 spinlock_t reconfig_lock; 634 u32 reconfig_posted; 635 bool reconfig_timer_active; 636 bool reconfig_sync_present; 637 struct timer_list reconfig_timer; 638 639 u32 rx_coalesce_usecs; 640 u32 rx_coalesce_max_frames; 641 u32 tx_coalesce_usecs; 642 u32 tx_coalesce_max_frames; 643 644 __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS]; 645 u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS]; 646 647 u8 __iomem *qcp_cfg; 648 649 u8 __iomem *tx_bar; 650 u8 __iomem *rx_bar; 651 652 struct nfp_net_tlv_caps tlv_caps; 653 654 struct dentry *debugfs_dir; 655 656 struct list_head vnic_list; 657 658 struct pci_dev *pdev; 659 struct nfp_app *app; 660 661 struct nfp_port *port; 662 663 void *app_priv; 664 }; 665 666 /* Functions to read/write from/to a BAR 667 * Performs any endian conversion necessary. 668 */ 669 static inline u16 nn_readb(struct nfp_net *nn, int off) 670 { 671 return readb(nn->dp.ctrl_bar + off); 672 } 673 674 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) 675 { 676 writeb(val, nn->dp.ctrl_bar + off); 677 } 678 679 static inline u16 nn_readw(struct nfp_net *nn, int off) 680 { 681 return readw(nn->dp.ctrl_bar + off); 682 } 683 684 static inline void nn_writew(struct nfp_net *nn, int off, u16 val) 685 { 686 writew(val, nn->dp.ctrl_bar + off); 687 } 688 689 static inline u32 nn_readl(struct nfp_net *nn, int off) 690 { 691 return readl(nn->dp.ctrl_bar + off); 692 } 693 694 static inline void nn_writel(struct nfp_net *nn, int off, u32 val) 695 { 696 writel(val, nn->dp.ctrl_bar + off); 697 } 698 699 static inline u64 nn_readq(struct nfp_net *nn, int off) 700 { 701 return readq(nn->dp.ctrl_bar + off); 702 } 703 704 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) 705 { 706 writeq(val, nn->dp.ctrl_bar + off); 707 } 708 709 /* Flush posted PCI writes by reading something without side effects */ 710 static inline void nn_pci_flush(struct nfp_net *nn) 711 { 712 nn_readl(nn, NFP_NET_CFG_VERSION); 713 } 714 715 /* Queue Controller Peripheral access functions and definitions. 716 * 717 * Some of the BARs of the NFP are mapped to portions of the Queue 718 * Controller Peripheral (QCP) address space on the NFP. A QCP queue 719 * has a read and a write pointer (as well as a size and flags, 720 * indicating overflow etc). The QCP offers a number of different 721 * operation on queue pointers, but here we only offer function to 722 * either add to a pointer or to read the pointer value. 723 */ 724 #define NFP_QCP_QUEUE_ADDR_SZ 0x800 725 #define NFP_QCP_QUEUE_AREA_SZ 0x80000 726 #define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ) 727 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000 728 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004 729 #define NFP_QCP_QUEUE_STS_LO 0x0008 730 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff 731 #define NFP_QCP_QUEUE_STS_HI 0x000c 732 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff 733 734 /* The offset of a QCP queues in the PCIe Target */ 735 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) 736 737 /* nfp_qcp_ptr - Read or Write Pointer of a queue */ 738 enum nfp_qcp_ptr { 739 NFP_QCP_READ_PTR = 0, 740 NFP_QCP_WRITE_PTR 741 }; 742 743 /* There appear to be an *undocumented* upper limit on the value which 744 * one can add to a queue and that value is either 0x3f or 0x7f. We 745 * go with 0x3f as a conservative measure. 746 */ 747 #define NFP_QCP_MAX_ADD 0x3f 748 749 static inline void _nfp_qcp_ptr_add(u8 __iomem *q, 750 enum nfp_qcp_ptr ptr, u32 val) 751 { 752 u32 off; 753 754 if (ptr == NFP_QCP_READ_PTR) 755 off = NFP_QCP_QUEUE_ADD_RPTR; 756 else 757 off = NFP_QCP_QUEUE_ADD_WPTR; 758 759 while (val > NFP_QCP_MAX_ADD) { 760 writel(NFP_QCP_MAX_ADD, q + off); 761 val -= NFP_QCP_MAX_ADD; 762 } 763 764 writel(val, q + off); 765 } 766 767 /** 768 * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue 769 * 770 * @q: Base address for queue structure 771 * @val: Value to add to the queue pointer 772 * 773 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. 774 */ 775 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val) 776 { 777 _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val); 778 } 779 780 /** 781 * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue 782 * 783 * @q: Base address for queue structure 784 * @val: Value to add to the queue pointer 785 * 786 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. 787 */ 788 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val) 789 { 790 _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val); 791 } 792 793 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr) 794 { 795 u32 off; 796 u32 val; 797 798 if (ptr == NFP_QCP_READ_PTR) 799 off = NFP_QCP_QUEUE_STS_LO; 800 else 801 off = NFP_QCP_QUEUE_STS_HI; 802 803 val = readl(q + off); 804 805 if (ptr == NFP_QCP_READ_PTR) 806 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; 807 else 808 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; 809 } 810 811 /** 812 * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue 813 * @q: Base address for queue structure 814 * 815 * Return: Value read. 816 */ 817 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q) 818 { 819 return _nfp_qcp_read(q, NFP_QCP_READ_PTR); 820 } 821 822 /** 823 * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue 824 * @q: Base address for queue structure 825 * 826 * Return: Value read. 827 */ 828 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) 829 { 830 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR); 831 } 832 833 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn) 834 { 835 WARN_ON_ONCE(!nn->dp.netdev && nn->port); 836 return !!nn->dp.netdev; 837 } 838 839 static inline bool nfp_net_running(struct nfp_net *nn) 840 { 841 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE; 842 } 843 844 static inline const char *nfp_net_name(struct nfp_net *nn) 845 { 846 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl"; 847 } 848 849 static inline void nfp_ctrl_lock(struct nfp_net *nn) 850 __acquires(&nn->r_vecs[0].lock) 851 { 852 spin_lock_bh(&nn->r_vecs[0].lock); 853 } 854 855 static inline void nfp_ctrl_unlock(struct nfp_net *nn) 856 __releases(&nn->r_vecs[0].lock) 857 { 858 spin_unlock_bh(&nn->r_vecs[0].lock); 859 } 860 861 /* Globals */ 862 extern const char nfp_driver_version[]; 863 864 extern const struct net_device_ops nfp_net_netdev_ops; 865 866 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev) 867 { 868 return netdev->netdev_ops == &nfp_net_netdev_ops; 869 } 870 871 /* Prototypes */ 872 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, 873 void __iomem *ctrl_bar); 874 875 struct nfp_net * 876 nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, 877 unsigned int max_tx_rings, unsigned int max_rx_rings); 878 void nfp_net_free(struct nfp_net *nn); 879 880 int nfp_net_init(struct nfp_net *nn); 881 void nfp_net_clean(struct nfp_net *nn); 882 883 int nfp_ctrl_open(struct nfp_net *nn); 884 void nfp_ctrl_close(struct nfp_net *nn); 885 886 void nfp_net_set_ethtool_ops(struct net_device *netdev); 887 void nfp_net_info(struct nfp_net *nn); 888 int nfp_net_reconfig(struct nfp_net *nn, u32 update); 889 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); 890 void nfp_net_rss_write_itbl(struct nfp_net *nn); 891 void nfp_net_rss_write_key(struct nfp_net *nn); 892 void nfp_net_coalesce_write_cfg(struct nfp_net *nn); 893 894 unsigned int 895 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, 896 unsigned int min_irqs, unsigned int want_irqs); 897 void nfp_net_irqs_disable(struct pci_dev *pdev); 898 void 899 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, 900 unsigned int n); 901 902 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); 903 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, 904 struct netlink_ext_ack *extack); 905 906 #ifdef CONFIG_NFP_DEBUG 907 void nfp_net_debugfs_create(void); 908 void nfp_net_debugfs_destroy(void); 909 struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); 910 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id); 911 void nfp_net_debugfs_dir_clean(struct dentry **dir); 912 #else 913 static inline void nfp_net_debugfs_create(void) 914 { 915 } 916 917 static inline void nfp_net_debugfs_destroy(void) 918 { 919 } 920 921 static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) 922 { 923 return NULL; 924 } 925 926 static inline void 927 nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) 928 { 929 } 930 931 static inline void nfp_net_debugfs_dir_clean(struct dentry **dir) 932 { 933 } 934 #endif /* CONFIG_NFP_DEBUG */ 935 936 #endif /* _NFP_NET_H_ */ 937