1 /*
2  * Copyright (C) 2015 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net.h
36  * Declarations for Netronome network device driver.
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Jason McMullan <jason.mcmullan@netronome.com>
39  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
40  */
41 
42 #ifndef _NFP_NET_H_
43 #define _NFP_NET_H_
44 
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/pci.h>
48 #include <linux/io-64-nonatomic-hi-lo.h>
49 
50 #include "nfp_net_ctrl.h"
51 
52 #define nn_err(nn, fmt, args...)  netdev_err((nn)->netdev, fmt, ## args)
53 #define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
54 #define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
55 #define nn_dbg(nn, fmt, args...)  netdev_dbg((nn)->netdev, fmt, ## args)
56 #define nn_warn_ratelimit(nn, fmt, args...)				\
57 	do {								\
58 		if (unlikely(net_ratelimit()))				\
59 			netdev_warn((nn)->netdev, fmt, ## args);	\
60 	} while (0)
61 
62 /* Max time to wait for NFP to respond on updates (in seconds) */
63 #define NFP_NET_POLL_TIMEOUT	5
64 
65 /* Interval for reading offloaded filter stats */
66 #define NFP_NET_STAT_POLL_IVL	msecs_to_jiffies(100)
67 
68 /* Bar allocation */
69 #define NFP_NET_CTRL_BAR	0
70 #define NFP_NET_Q0_BAR		2
71 #define NFP_NET_Q1_BAR		4	/* OBSOLETE */
72 
73 /* Max bits in DMA address */
74 #define NFP_NET_MAX_DMA_BITS	40
75 
76 /* Default size for MTU and freelist buffer sizes */
77 #define NFP_NET_DEFAULT_MTU		1500
78 #define NFP_NET_DEFAULT_RX_BUFSZ	2048
79 
80 /* Maximum number of bytes prepended to a packet */
81 #define NFP_NET_MAX_PREPEND		64
82 
83 /* Interrupt definitions */
84 #define NFP_NET_NON_Q_VECTORS		2
85 #define NFP_NET_IRQ_LSC_IDX		0
86 #define NFP_NET_IRQ_EXN_IDX		1
87 
88 /* Queue/Ring definitions */
89 #define NFP_NET_MAX_TX_RINGS	64	/* Max. # of Tx rings per device */
90 #define NFP_NET_MAX_RX_RINGS	64	/* Max. # of Rx rings per device */
91 
92 #define NFP_NET_MIN_TX_DESCS	256	/* Min. # of Tx descs per ring */
93 #define NFP_NET_MIN_RX_DESCS	256	/* Min. # of Rx descs per ring */
94 #define NFP_NET_MAX_TX_DESCS	(256 * 1024) /* Max. # of Tx descs per ring */
95 #define NFP_NET_MAX_RX_DESCS	(256 * 1024) /* Max. # of Rx descs per ring */
96 
97 #define NFP_NET_TX_DESCS_DEFAULT 4096	/* Default # of Tx descs per ring */
98 #define NFP_NET_RX_DESCS_DEFAULT 4096	/* Default # of Rx descs per ring */
99 
100 #define NFP_NET_FL_BATCH	16	/* Add freelist in this Batch size */
101 
102 /* Offload definitions */
103 #define NFP_NET_N_VXLAN_PORTS	(NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
104 
105 /* Forward declarations */
106 struct nfp_net;
107 struct nfp_net_r_vector;
108 
109 /* Convenience macro for writing dma address into RX/TX descriptors */
110 #define nfp_desc_set_dma_addr(desc, dma_addr)				\
111 	do {								\
112 		__typeof(desc) __d = (desc);				\
113 		dma_addr_t __addr = (dma_addr);				\
114 									\
115 		__d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));	\
116 		__d->dma_addr_hi = upper_32_bits(__addr) & 0xff;	\
117 	} while (0)
118 
119 /* TX descriptor format */
120 
121 #define PCIE_DESC_TX_EOP		BIT(7)
122 #define PCIE_DESC_TX_OFFSET_MASK	GENMASK(6, 0)
123 #define PCIE_DESC_TX_MSS_MASK		GENMASK(13, 0)
124 
125 /* Flags in the host TX descriptor */
126 #define PCIE_DESC_TX_CSUM		BIT(7)
127 #define PCIE_DESC_TX_IP4_CSUM		BIT(6)
128 #define PCIE_DESC_TX_TCP_CSUM		BIT(5)
129 #define PCIE_DESC_TX_UDP_CSUM		BIT(4)
130 #define PCIE_DESC_TX_VLAN		BIT(3)
131 #define PCIE_DESC_TX_LSO		BIT(2)
132 #define PCIE_DESC_TX_ENCAP		BIT(1)
133 #define PCIE_DESC_TX_O_IP4_CSUM	BIT(0)
134 
135 struct nfp_net_tx_desc {
136 	union {
137 		struct {
138 			u8 dma_addr_hi; /* High bits of host buf address */
139 			__le16 dma_len;	/* Length to DMA for this desc */
140 			u8 offset_eop;	/* Offset in buf where pkt starts +
141 					 * highest bit is eop flag.
142 					 */
143 			__le32 dma_addr_lo; /* Low 32bit of host buf addr */
144 
145 			__le16 mss;	/* MSS to be used for LSO */
146 			u8 l4_offset;	/* LSO, where the L4 data starts */
147 			u8 flags;	/* TX Flags, see @PCIE_DESC_TX_* */
148 
149 			__le16 vlan;	/* VLAN tag to add if indicated */
150 			__le16 data_len; /* Length of frame + meta data */
151 		} __packed;
152 		__le32 vals[4];
153 	};
154 };
155 
156 /**
157  * struct nfp_net_tx_buf - software TX buffer descriptor
158  * @skb:	sk_buff associated with this buffer
159  * @dma_addr:	DMA mapping address of the buffer
160  * @fidx:	Fragment index (-1 for the head and [0..nr_frags-1] for frags)
161  * @pkt_cnt:	Number of packets to be produced out of the skb associated
162  *		with this buffer (valid only on the head's buffer).
163  *		Will be 1 for all non-TSO packets.
164  * @real_len:	Number of bytes which to be produced out of the skb (valid only
165  *		on the head's buffer). Equal to skb->len for non-TSO packets.
166  */
167 struct nfp_net_tx_buf {
168 	struct sk_buff *skb;
169 	dma_addr_t dma_addr;
170 	short int fidx;
171 	u16 pkt_cnt;
172 	u32 real_len;
173 };
174 
175 /**
176  * struct nfp_net_tx_ring - TX ring structure
177  * @r_vec:      Back pointer to ring vector structure
178  * @idx:        Ring index from Linux's perspective
179  * @qcidx:      Queue Controller Peripheral (QCP) queue index for the TX queue
180  * @qcp_q:      Pointer to base of the QCP TX queue
181  * @cnt:        Size of the queue in number of descriptors
182  * @wr_p:       TX ring write pointer (free running)
183  * @rd_p:       TX ring read pointer (free running)
184  * @qcp_rd_p:   Local copy of QCP TX queue read pointer
185  * @wr_ptr_add:	Accumulated number of buffers to add to QCP write pointer
186  *		(used for .xmit_more delayed kick)
187  * @txbufs:     Array of transmitted TX buffers, to free on transmit
188  * @txds:       Virtual address of TX ring in host memory
189  * @dma:        DMA address of the TX ring
190  * @size:       Size, in bytes, of the TX ring (needed to free)
191  */
192 struct nfp_net_tx_ring {
193 	struct nfp_net_r_vector *r_vec;
194 
195 	u32 idx;
196 	int qcidx;
197 	u8 __iomem *qcp_q;
198 
199 	u32 cnt;
200 	u32 wr_p;
201 	u32 rd_p;
202 	u32 qcp_rd_p;
203 
204 	u32 wr_ptr_add;
205 
206 	struct nfp_net_tx_buf *txbufs;
207 	struct nfp_net_tx_desc *txds;
208 
209 	dma_addr_t dma;
210 	unsigned int size;
211 } ____cacheline_aligned;
212 
213 /* RX and freelist descriptor format */
214 
215 #define PCIE_DESC_RX_DD			BIT(7)
216 #define PCIE_DESC_RX_META_LEN_MASK	GENMASK(6, 0)
217 
218 /* Flags in the RX descriptor */
219 #define PCIE_DESC_RX_RSS		cpu_to_le16(BIT(15))
220 #define PCIE_DESC_RX_I_IP4_CSUM		cpu_to_le16(BIT(14))
221 #define PCIE_DESC_RX_I_IP4_CSUM_OK	cpu_to_le16(BIT(13))
222 #define PCIE_DESC_RX_I_TCP_CSUM		cpu_to_le16(BIT(12))
223 #define PCIE_DESC_RX_I_TCP_CSUM_OK	cpu_to_le16(BIT(11))
224 #define PCIE_DESC_RX_I_UDP_CSUM		cpu_to_le16(BIT(10))
225 #define PCIE_DESC_RX_I_UDP_CSUM_OK	cpu_to_le16(BIT(9))
226 #define PCIE_DESC_RX_BPF		cpu_to_le16(BIT(8))
227 #define PCIE_DESC_RX_EOP		cpu_to_le16(BIT(7))
228 #define PCIE_DESC_RX_IP4_CSUM		cpu_to_le16(BIT(6))
229 #define PCIE_DESC_RX_IP4_CSUM_OK	cpu_to_le16(BIT(5))
230 #define PCIE_DESC_RX_TCP_CSUM		cpu_to_le16(BIT(4))
231 #define PCIE_DESC_RX_TCP_CSUM_OK	cpu_to_le16(BIT(3))
232 #define PCIE_DESC_RX_UDP_CSUM		cpu_to_le16(BIT(2))
233 #define PCIE_DESC_RX_UDP_CSUM_OK	cpu_to_le16(BIT(1))
234 #define PCIE_DESC_RX_VLAN		cpu_to_le16(BIT(0))
235 
236 #define PCIE_DESC_RX_CSUM_ALL		(PCIE_DESC_RX_IP4_CSUM |	\
237 					 PCIE_DESC_RX_TCP_CSUM |	\
238 					 PCIE_DESC_RX_UDP_CSUM |	\
239 					 PCIE_DESC_RX_I_IP4_CSUM |	\
240 					 PCIE_DESC_RX_I_TCP_CSUM |	\
241 					 PCIE_DESC_RX_I_UDP_CSUM)
242 #define PCIE_DESC_RX_CSUM_OK_SHIFT	1
243 #define __PCIE_DESC_RX_CSUM_ALL		le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
244 #define __PCIE_DESC_RX_CSUM_ALL_OK	(__PCIE_DESC_RX_CSUM_ALL >>	\
245 					 PCIE_DESC_RX_CSUM_OK_SHIFT)
246 
247 struct nfp_net_rx_desc {
248 	union {
249 		struct {
250 			u8 dma_addr_hi;	/* High bits of the buf address */
251 			__le16 reserved; /* Must be zero */
252 			u8 meta_len_dd; /* Must be zero */
253 
254 			__le32 dma_addr_lo; /* Low bits of the buffer address */
255 		} __packed fld;
256 
257 		struct {
258 			__le16 data_len; /* Length of the frame + meta data */
259 			u8 reserved;
260 			u8 meta_len_dd;	/* Length of meta data prepended +
261 					 * descriptor done flag.
262 					 */
263 
264 			__le16 flags;	/* RX flags. See @PCIE_DESC_RX_* */
265 			__le16 vlan;	/* VLAN if stripped */
266 		} __packed rxd;
267 
268 		__le32 vals[2];
269 	};
270 };
271 
272 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
273 
274 struct nfp_net_rx_hash {
275 	__be32 hash_type;
276 	__be32 hash;
277 };
278 
279 /**
280  * struct nfp_net_rx_buf - software RX buffer descriptor
281  * @skb:	sk_buff associated with this buffer
282  * @dma_addr:	DMA mapping address of the buffer
283  */
284 struct nfp_net_rx_buf {
285 	struct sk_buff *skb;
286 	dma_addr_t dma_addr;
287 };
288 
289 /**
290  * struct nfp_net_rx_ring - RX ring structure
291  * @r_vec:      Back pointer to ring vector structure
292  * @cnt:        Size of the queue in number of descriptors
293  * @wr_p:       FL/RX ring write pointer (free running)
294  * @rd_p:       FL/RX ring read pointer (free running)
295  * @idx:        Ring index from Linux's perspective
296  * @fl_qcidx:   Queue Controller Peripheral (QCP) queue index for the freelist
297  * @rx_qcidx:   Queue Controller Peripheral (QCP) queue index for the RX queue
298  * @qcp_fl:     Pointer to base of the QCP freelist queue
299  * @qcp_rx:     Pointer to base of the QCP RX queue
300  * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
301  *              (used for free list batching)
302  * @rxbufs:     Array of transmitted FL/RX buffers
303  * @rxds:       Virtual address of FL/RX ring in host memory
304  * @dma:        DMA address of the FL/RX ring
305  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
306  * @bufsz:	Buffer allocation size for convenience of management routines
307  *		(NOTE: this is in second cache line, do not use on fast path!)
308  */
309 struct nfp_net_rx_ring {
310 	struct nfp_net_r_vector *r_vec;
311 
312 	u32 cnt;
313 	u32 wr_p;
314 	u32 rd_p;
315 
316 	u16 idx;
317 	u16 wr_ptr_add;
318 
319 	int fl_qcidx;
320 	int rx_qcidx;
321 	u8 __iomem *qcp_fl;
322 	u8 __iomem *qcp_rx;
323 
324 	struct nfp_net_rx_buf *rxbufs;
325 	struct nfp_net_rx_desc *rxds;
326 
327 	dma_addr_t dma;
328 	unsigned int size;
329 	unsigned int bufsz;
330 } ____cacheline_aligned;
331 
332 /**
333  * struct nfp_net_r_vector - Per ring interrupt vector configuration
334  * @nfp_net:        Backpointer to nfp_net structure
335  * @napi:           NAPI structure for this ring vec
336  * @tx_ring:        Pointer to TX ring
337  * @rx_ring:        Pointer to RX ring
338  * @irq_idx:        Index into MSI-X table
339  * @rx_sync:	    Seqlock for atomic updates of RX stats
340  * @rx_pkts:        Number of received packets
341  * @rx_bytes:	    Number of received bytes
342  * @rx_drops:	    Number of packets dropped on RX due to lack of resources
343  * @hw_csum_rx_ok:  Counter of packets where the HW checksum was OK
344  * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
345  * @hw_csum_rx_error:	 Counter of packets with bad checksums
346  * @tx_sync:	    Seqlock for atomic updates of TX stats
347  * @tx_pkts:	    Number of Transmitted packets
348  * @tx_bytes:	    Number of Transmitted bytes
349  * @hw_csum_tx:	    Counter of packets with TX checksum offload requested
350  * @hw_csum_tx_inner:	 Counter of inner TX checksum offload requests
351  * @tx_gather:	    Counter of packets with Gather DMA
352  * @tx_lso:	    Counter of LSO packets sent
353  * @tx_errors:	    How many TX errors were encountered
354  * @tx_busy:        How often was TX busy (no space)?
355  * @handler:        Interrupt handler for this ring vector
356  * @name:           Name of the interrupt vector
357  * @affinity_mask:  SMP affinity mask for this vector
358  *
359  * This structure ties RX and TX rings to interrupt vectors and a NAPI
360  * context. This currently only supports one RX and TX ring per
361  * interrupt vector but might be extended in the future to allow
362  * association of multiple rings per vector.
363  */
364 struct nfp_net_r_vector {
365 	struct nfp_net *nfp_net;
366 	struct napi_struct napi;
367 
368 	struct nfp_net_tx_ring *tx_ring;
369 	struct nfp_net_rx_ring *rx_ring;
370 
371 	int irq_idx;
372 
373 	struct u64_stats_sync rx_sync;
374 	u64 rx_pkts;
375 	u64 rx_bytes;
376 	u64 rx_drops;
377 	u64 hw_csum_rx_ok;
378 	u64 hw_csum_rx_inner_ok;
379 	u64 hw_csum_rx_error;
380 
381 	struct u64_stats_sync tx_sync;
382 	u64 tx_pkts;
383 	u64 tx_bytes;
384 	u64 hw_csum_tx;
385 	u64 hw_csum_tx_inner;
386 	u64 tx_gather;
387 	u64 tx_lso;
388 	u64 tx_errors;
389 	u64 tx_busy;
390 
391 	irq_handler_t handler;
392 	char name[IFNAMSIZ + 8];
393 	cpumask_t affinity_mask;
394 } ____cacheline_aligned;
395 
396 /* Firmware version as it is written in the 32bit value in the BAR */
397 struct nfp_net_fw_version {
398 	u8 minor;
399 	u8 major;
400 	u8 class;
401 	u8 resv;
402 } __packed;
403 
404 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
405 				     u8 resv, u8 class, u8 major, u8 minor)
406 {
407 	return fw_ver->resv == resv &&
408 	       fw_ver->class == class &&
409 	       fw_ver->major == major &&
410 	       fw_ver->minor == minor;
411 }
412 
413 struct nfp_stat_pair {
414 	u64 pkts;
415 	u64 bytes;
416 };
417 
418 /**
419  * struct nfp_net - NFP network device structure
420  * @pdev:               Backpointer to PCI device
421  * @netdev:             Backpointer to net_device structure
422  * @nfp_fallback:       Is the driver used in fallback mode?
423  * @is_vf:              Is the driver attached to a VF?
424  * @is_nfp3200:         Is the driver for a NFP-3200 card?
425  * @fw_loaded:          Is the firmware loaded?
426  * @bpf_offload_skip_sw:  Offloaded BPF program will not be rerun by cls_bpf
427  * @ctrl:               Local copy of the control register/word.
428  * @fl_bufsz:           Currently configured size of the freelist buffers
429  * @rx_offset:		Offset in the RX buffers where packet data starts
430  * @cpp:                Pointer to the CPP handle
431  * @nfp_dev_cpp:        Pointer to the NFP Device handle
432  * @ctrl_area:          Pointer to the CPP area for the control BAR
433  * @tx_area:            Pointer to the CPP area for the TX queues
434  * @rx_area:            Pointer to the CPP area for the FL/RX queues
435  * @fw_ver:             Firmware version
436  * @cap:                Capabilities advertised by the Firmware
437  * @max_mtu:            Maximum support MTU advertised by the Firmware
438  * @rss_cfg:            RSS configuration
439  * @rss_key:            RSS secret key
440  * @rss_itbl:           RSS indirection table
441  * @rx_filter:		Filter offload statistics - dropped packets/bytes
442  * @rx_filter_prev:	Filter offload statistics - values from previous update
443  * @rx_filter_change:	Jiffies when statistics last changed
444  * @rx_filter_stats_timer:  Timer for polling filter offload statistics
445  * @rx_filter_lock:	Lock protecting timer state changes (teardown)
446  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
447  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
448  * @num_tx_rings:       Currently configured number of TX rings
449  * @num_rx_rings:       Currently configured number of RX rings
450  * @txd_cnt:            Size of the TX ring in number of descriptors
451  * @rxd_cnt:            Size of the RX ring in number of descriptors
452  * @tx_rings:           Array of pre-allocated TX ring structures
453  * @rx_rings:           Array of pre-allocated RX ring structures
454  * @num_irqs:	        Number of allocated interrupt vectors
455  * @num_r_vecs:         Number of used ring vectors
456  * @r_vecs:             Pre-allocated array of ring vectors
457  * @irq_entries:        Pre-allocated array of MSI-X entries
458  * @lsc_handler:        Handler for Link State Change interrupt
459  * @lsc_name:           Name for Link State Change interrupt
460  * @exn_handler:        Handler for Exception interrupt
461  * @exn_name:           Name for Exception interrupt
462  * @shared_handler:     Handler for shared interrupts
463  * @shared_name:        Name for shared interrupt
464  * @me_freq_mhz:        ME clock_freq (MHz)
465  * @reconfig_lock:	Protects HW reconfiguration request regs/machinery
466  * @reconfig_posted:	Pending reconfig bits coming from async sources
467  * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
468  * @reconfig_sync_present:  Some thread is performing synchronous reconfig
469  * @reconfig_timer:	Timer for async reading of reconfig results
470  * @link_up:            Is the link up?
471  * @link_status_lock:	Protects @link_up and ensures atomicity with BAR reading
472  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
473  * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
474  * @tx_coalesce_usecs:      TX interrupt moderation usecs delay parameter
475  * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter
476  * @vxlan_ports:	VXLAN ports for RX inner csum offload communicated to HW
477  * @vxlan_usecnt:	IPv4/IPv6 VXLAN port use counts
478  * @qcp_cfg:            Pointer to QCP queue used for configuration notification
479  * @ctrl_bar:           Pointer to mapped control BAR
480  * @tx_bar:             Pointer to mapped TX queues
481  * @rx_bar:             Pointer to mapped FL/RX queues
482  * @debugfs_dir:	Device directory in debugfs
483  */
484 struct nfp_net {
485 	struct pci_dev *pdev;
486 	struct net_device *netdev;
487 
488 	unsigned nfp_fallback:1;
489 	unsigned is_vf:1;
490 	unsigned is_nfp3200:1;
491 	unsigned fw_loaded:1;
492 	unsigned bpf_offload_skip_sw:1;
493 
494 	u32 ctrl;
495 	u32 fl_bufsz;
496 
497 	u32 rx_offset;
498 
499 	struct nfp_net_tx_ring *tx_rings;
500 	struct nfp_net_rx_ring *rx_rings;
501 
502 #ifdef CONFIG_PCI_IOV
503 	unsigned int num_vfs;
504 	struct vf_data_storage *vfinfo;
505 	int vf_rate_link_speed;
506 #endif
507 
508 	struct nfp_cpp *cpp;
509 	struct platform_device *nfp_dev_cpp;
510 	struct nfp_cpp_area *ctrl_area;
511 	struct nfp_cpp_area *tx_area;
512 	struct nfp_cpp_area *rx_area;
513 
514 	struct nfp_net_fw_version fw_ver;
515 	u32 cap;
516 	u32 max_mtu;
517 
518 	u32 rss_cfg;
519 	u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
520 	u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
521 
522 	struct nfp_stat_pair rx_filter, rx_filter_prev;
523 	unsigned long rx_filter_change;
524 	struct timer_list rx_filter_stats_timer;
525 	spinlock_t rx_filter_lock;
526 
527 	int max_tx_rings;
528 	int max_rx_rings;
529 
530 	int num_tx_rings;
531 	int num_rx_rings;
532 
533 	int stride_tx;
534 	int stride_rx;
535 
536 	int txd_cnt;
537 	int rxd_cnt;
538 
539 	u8 num_irqs;
540 	u8 num_r_vecs;
541 	struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
542 	struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
543 				      NFP_NET_MAX_TX_RINGS];
544 
545 	irq_handler_t lsc_handler;
546 	char lsc_name[IFNAMSIZ + 8];
547 
548 	irq_handler_t exn_handler;
549 	char exn_name[IFNAMSIZ + 8];
550 
551 	irq_handler_t shared_handler;
552 	char shared_name[IFNAMSIZ + 8];
553 
554 	u32 me_freq_mhz;
555 
556 	bool link_up;
557 	spinlock_t link_status_lock;
558 
559 	spinlock_t reconfig_lock;
560 	u32 reconfig_posted;
561 	bool reconfig_timer_active;
562 	bool reconfig_sync_present;
563 	struct timer_list reconfig_timer;
564 
565 	u32 rx_coalesce_usecs;
566 	u32 rx_coalesce_max_frames;
567 	u32 tx_coalesce_usecs;
568 	u32 tx_coalesce_max_frames;
569 
570 	__be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
571 	u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
572 
573 	u8 __iomem *qcp_cfg;
574 
575 	u8 __iomem *ctrl_bar;
576 	u8 __iomem *q_bar;
577 	u8 __iomem *tx_bar;
578 	u8 __iomem *rx_bar;
579 
580 	struct dentry *debugfs_dir;
581 };
582 
583 /* Functions to read/write from/to a BAR
584  * Performs any endian conversion necessary.
585  */
586 static inline u16 nn_readb(struct nfp_net *nn, int off)
587 {
588 	return readb(nn->ctrl_bar + off);
589 }
590 
591 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
592 {
593 	writeb(val, nn->ctrl_bar + off);
594 }
595 
596 /* NFP-3200 can't handle 16-bit accesses too well */
597 static inline u16 nn_readw(struct nfp_net *nn, int off)
598 {
599 	WARN_ON_ONCE(nn->is_nfp3200);
600 	return readw(nn->ctrl_bar + off);
601 }
602 
603 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
604 {
605 	WARN_ON_ONCE(nn->is_nfp3200);
606 	writew(val, nn->ctrl_bar + off);
607 }
608 
609 static inline u32 nn_readl(struct nfp_net *nn, int off)
610 {
611 	return readl(nn->ctrl_bar + off);
612 }
613 
614 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
615 {
616 	writel(val, nn->ctrl_bar + off);
617 }
618 
619 static inline u64 nn_readq(struct nfp_net *nn, int off)
620 {
621 	return readq(nn->ctrl_bar + off);
622 }
623 
624 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
625 {
626 	writeq(val, nn->ctrl_bar + off);
627 }
628 
629 /* Flush posted PCI writes by reading something without side effects */
630 static inline void nn_pci_flush(struct nfp_net *nn)
631 {
632 	nn_readl(nn, NFP_NET_CFG_VERSION);
633 }
634 
635 /* Queue Controller Peripheral access functions and definitions.
636  *
637  * Some of the BARs of the NFP are mapped to portions of the Queue
638  * Controller Peripheral (QCP) address space on the NFP.  A QCP queue
639  * has a read and a write pointer (as well as a size and flags,
640  * indicating overflow etc).  The QCP offers a number of different
641  * operation on queue pointers, but here we only offer function to
642  * either add to a pointer or to read the pointer value.
643  */
644 #define NFP_QCP_QUEUE_ADDR_SZ			0x800
645 #define NFP_QCP_QUEUE_OFF(_x)			((_x) * NFP_QCP_QUEUE_ADDR_SZ)
646 #define NFP_QCP_QUEUE_ADD_RPTR			0x0000
647 #define NFP_QCP_QUEUE_ADD_WPTR			0x0004
648 #define NFP_QCP_QUEUE_STS_LO			0x0008
649 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask	0x3ffff
650 #define NFP_QCP_QUEUE_STS_HI			0x000c
651 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask	0x3ffff
652 
653 /* The offset of a QCP queues in the PCIe Target (same on NFP3200 and NFP6000 */
654 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
655 
656 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
657 enum nfp_qcp_ptr {
658 	NFP_QCP_READ_PTR = 0,
659 	NFP_QCP_WRITE_PTR
660 };
661 
662 /* There appear to be an *undocumented* upper limit on the value which
663  * one can add to a queue and that value is either 0x3f or 0x7f.  We
664  * go with 0x3f as a conservative measure.
665  */
666 #define NFP_QCP_MAX_ADD				0x3f
667 
668 static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
669 				    enum nfp_qcp_ptr ptr, u32 val)
670 {
671 	u32 off;
672 
673 	if (ptr == NFP_QCP_READ_PTR)
674 		off = NFP_QCP_QUEUE_ADD_RPTR;
675 	else
676 		off = NFP_QCP_QUEUE_ADD_WPTR;
677 
678 	while (val > NFP_QCP_MAX_ADD) {
679 		writel(NFP_QCP_MAX_ADD, q + off);
680 		val -= NFP_QCP_MAX_ADD;
681 	}
682 
683 	writel(val, q + off);
684 }
685 
686 /**
687  * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
688  *
689  * @q:   Base address for queue structure
690  * @val: Value to add to the queue pointer
691  *
692  * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
693  */
694 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
695 {
696 	_nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
697 }
698 
699 /**
700  * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue
701  *
702  * @q:   Base address for queue structure
703  * @val: Value to add to the queue pointer
704  *
705  * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
706  */
707 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
708 {
709 	_nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
710 }
711 
712 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
713 {
714 	u32 off;
715 	u32 val;
716 
717 	if (ptr == NFP_QCP_READ_PTR)
718 		off = NFP_QCP_QUEUE_STS_LO;
719 	else
720 		off = NFP_QCP_QUEUE_STS_HI;
721 
722 	val = readl(q + off);
723 
724 	if (ptr == NFP_QCP_READ_PTR)
725 		return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
726 	else
727 		return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
728 }
729 
730 /**
731  * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue
732  * @q:  Base address for queue structure
733  *
734  * Return: Value read.
735  */
736 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
737 {
738 	return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
739 }
740 
741 /**
742  * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue
743  * @q:  Base address for queue structure
744  *
745  * Return: Value read.
746  */
747 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
748 {
749 	return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
750 }
751 
752 /* Globals */
753 extern const char nfp_net_driver_name[];
754 extern const char nfp_net_driver_version[];
755 
756 /* Prototypes */
757 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
758 			    void __iomem *ctrl_bar);
759 
760 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
761 				     int max_tx_rings, int max_rx_rings);
762 void nfp_net_netdev_free(struct nfp_net *nn);
763 int nfp_net_netdev_init(struct net_device *netdev);
764 void nfp_net_netdev_clean(struct net_device *netdev);
765 void nfp_net_set_ethtool_ops(struct net_device *netdev);
766 void nfp_net_info(struct nfp_net *nn);
767 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
768 void nfp_net_rss_write_itbl(struct nfp_net *nn);
769 void nfp_net_rss_write_key(struct nfp_net *nn);
770 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
771 int nfp_net_irqs_alloc(struct nfp_net *nn);
772 void nfp_net_irqs_disable(struct nfp_net *nn);
773 int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
774 
775 #ifdef CONFIG_NFP_NET_DEBUG
776 void nfp_net_debugfs_create(void);
777 void nfp_net_debugfs_destroy(void);
778 void nfp_net_debugfs_adapter_add(struct nfp_net *nn);
779 void nfp_net_debugfs_adapter_del(struct nfp_net *nn);
780 #else
781 static inline void nfp_net_debugfs_create(void)
782 {
783 }
784 
785 static inline void nfp_net_debugfs_destroy(void)
786 {
787 }
788 
789 static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
790 {
791 }
792 
793 static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
794 {
795 }
796 #endif /* CONFIG_NFP_NET_DEBUG */
797 
798 void nfp_net_filter_stats_timer(unsigned long data);
799 int
800 nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
801 		    struct tc_cls_bpf_offload *cls_bpf);
802 
803 #endif /* _NFP_NET_H_ */
804