xref: /openbmc/linux/include/linux/mlx5/driver.h (revision 5a1ea477)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DRIVER_H
34 #define MLX5_DRIVER_H
35 
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/radix-tree.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
50 
51 #include <linux/mlx5/device.h>
52 #include <linux/mlx5/doorbell.h>
53 #include <linux/mlx5/eq.h>
54 #include <linux/timecounter.h>
55 #include <linux/ptp_clock_kernel.h>
56 
57 enum {
58 	MLX5_BOARD_ID_LEN = 64,
59 };
60 
61 enum {
62 	/* one minute for the sake of bringup. Generally, commands must always
63 	 * complete and we may need to increase this timeout value
64 	 */
65 	MLX5_CMD_TIMEOUT_MSEC	= 60 * 1000,
66 	MLX5_CMD_WQ_MAX_NAME	= 32,
67 };
68 
69 enum {
70 	CMD_OWNER_SW		= 0x0,
71 	CMD_OWNER_HW		= 0x1,
72 	CMD_STATUS_SUCCESS	= 0,
73 };
74 
75 enum mlx5_sqp_t {
76 	MLX5_SQP_SMI		= 0,
77 	MLX5_SQP_GSI		= 1,
78 	MLX5_SQP_IEEE_1588	= 2,
79 	MLX5_SQP_SNIFFER	= 3,
80 	MLX5_SQP_SYNC_UMR	= 4,
81 };
82 
83 enum {
84 	MLX5_MAX_PORTS	= 2,
85 };
86 
87 enum {
88 	MLX5_ATOMIC_MODE_OFFSET = 16,
89 	MLX5_ATOMIC_MODE_IB_COMP = 1,
90 	MLX5_ATOMIC_MODE_CX = 2,
91 	MLX5_ATOMIC_MODE_8B = 3,
92 	MLX5_ATOMIC_MODE_16B = 4,
93 	MLX5_ATOMIC_MODE_32B = 5,
94 	MLX5_ATOMIC_MODE_64B = 6,
95 	MLX5_ATOMIC_MODE_128B = 7,
96 	MLX5_ATOMIC_MODE_256B = 8,
97 };
98 
99 enum {
100 	MLX5_REG_QPTS            = 0x4002,
101 	MLX5_REG_QETCR		 = 0x4005,
102 	MLX5_REG_QTCT		 = 0x400a,
103 	MLX5_REG_QPDPM           = 0x4013,
104 	MLX5_REG_QCAM            = 0x4019,
105 	MLX5_REG_DCBX_PARAM      = 0x4020,
106 	MLX5_REG_DCBX_APP        = 0x4021,
107 	MLX5_REG_FPGA_CAP	 = 0x4022,
108 	MLX5_REG_FPGA_CTRL	 = 0x4023,
109 	MLX5_REG_FPGA_ACCESS_REG = 0x4024,
110 	MLX5_REG_PCAP		 = 0x5001,
111 	MLX5_REG_PMTU		 = 0x5003,
112 	MLX5_REG_PTYS		 = 0x5004,
113 	MLX5_REG_PAOS		 = 0x5006,
114 	MLX5_REG_PFCC            = 0x5007,
115 	MLX5_REG_PPCNT		 = 0x5008,
116 	MLX5_REG_PPTB            = 0x500b,
117 	MLX5_REG_PBMC            = 0x500c,
118 	MLX5_REG_PMAOS		 = 0x5012,
119 	MLX5_REG_PUDE		 = 0x5009,
120 	MLX5_REG_PMPE		 = 0x5010,
121 	MLX5_REG_PELC		 = 0x500e,
122 	MLX5_REG_PVLC		 = 0x500f,
123 	MLX5_REG_PCMR		 = 0x5041,
124 	MLX5_REG_PMLP		 = 0x5002,
125 	MLX5_REG_PPLM		 = 0x5023,
126 	MLX5_REG_PCAM		 = 0x507f,
127 	MLX5_REG_NODE_DESC	 = 0x6001,
128 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
129 	MLX5_REG_MCIA		 = 0x9014,
130 	MLX5_REG_MLCR		 = 0x902b,
131 	MLX5_REG_MTRC_CAP	 = 0x9040,
132 	MLX5_REG_MTRC_CONF	 = 0x9041,
133 	MLX5_REG_MTRC_STDB	 = 0x9042,
134 	MLX5_REG_MTRC_CTRL	 = 0x9043,
135 	MLX5_REG_MPEIN		 = 0x9050,
136 	MLX5_REG_MPCNT		 = 0x9051,
137 	MLX5_REG_MTPPS		 = 0x9053,
138 	MLX5_REG_MTPPSE		 = 0x9054,
139 	MLX5_REG_MPEGC		 = 0x9056,
140 	MLX5_REG_MCQI		 = 0x9061,
141 	MLX5_REG_MCC		 = 0x9062,
142 	MLX5_REG_MCDA		 = 0x9063,
143 	MLX5_REG_MCAM		 = 0x907f,
144 };
145 
146 enum mlx5_qpts_trust_state {
147 	MLX5_QPTS_TRUST_PCP  = 1,
148 	MLX5_QPTS_TRUST_DSCP = 2,
149 };
150 
151 enum mlx5_dcbx_oper_mode {
152 	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
153 	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
154 };
155 
156 enum {
157 	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
158 	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
159 	MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
160 	MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
161 };
162 
163 enum mlx5_page_fault_resume_flags {
164 	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
165 	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
166 	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
167 	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
168 };
169 
170 enum dbg_rsc_type {
171 	MLX5_DBG_RSC_QP,
172 	MLX5_DBG_RSC_EQ,
173 	MLX5_DBG_RSC_CQ,
174 };
175 
176 enum port_state_policy {
177 	MLX5_POLICY_DOWN	= 0,
178 	MLX5_POLICY_UP		= 1,
179 	MLX5_POLICY_FOLLOW	= 2,
180 	MLX5_POLICY_INVALID	= 0xffffffff
181 };
182 
183 struct mlx5_field_desc {
184 	struct dentry	       *dent;
185 	int			i;
186 };
187 
188 struct mlx5_rsc_debug {
189 	struct mlx5_core_dev   *dev;
190 	void		       *object;
191 	enum dbg_rsc_type	type;
192 	struct dentry	       *root;
193 	struct mlx5_field_desc	fields[0];
194 };
195 
196 enum mlx5_dev_event {
197 	MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
198 	MLX5_DEV_EVENT_PORT_AFFINITY = 129,
199 };
200 
201 enum mlx5_port_status {
202 	MLX5_PORT_UP        = 1,
203 	MLX5_PORT_DOWN      = 2,
204 };
205 
206 struct mlx5_bfreg_info {
207 	u32		       *sys_pages;
208 	int			num_low_latency_bfregs;
209 	unsigned int	       *count;
210 
211 	/*
212 	 * protect bfreg allocation data structs
213 	 */
214 	struct mutex		lock;
215 	u32			ver;
216 	bool			lib_uar_4k;
217 	u32			num_sys_pages;
218 	u32			num_static_sys_pages;
219 	u32			total_num_bfregs;
220 	u32			num_dyn_bfregs;
221 };
222 
223 struct mlx5_cmd_first {
224 	__be32		data[4];
225 };
226 
227 struct mlx5_cmd_msg {
228 	struct list_head		list;
229 	struct cmd_msg_cache	       *parent;
230 	u32				len;
231 	struct mlx5_cmd_first		first;
232 	struct mlx5_cmd_mailbox	       *next;
233 };
234 
235 struct mlx5_cmd_debug {
236 	struct dentry	       *dbg_root;
237 	struct dentry	       *dbg_in;
238 	struct dentry	       *dbg_out;
239 	struct dentry	       *dbg_outlen;
240 	struct dentry	       *dbg_status;
241 	struct dentry	       *dbg_run;
242 	void		       *in_msg;
243 	void		       *out_msg;
244 	u8			status;
245 	u16			inlen;
246 	u16			outlen;
247 };
248 
249 struct cmd_msg_cache {
250 	/* protect block chain allocations
251 	 */
252 	spinlock_t		lock;
253 	struct list_head	head;
254 	unsigned int		max_inbox_size;
255 	unsigned int		num_ent;
256 };
257 
258 enum {
259 	MLX5_NUM_COMMAND_CACHES = 5,
260 };
261 
262 struct mlx5_cmd_stats {
263 	u64		sum;
264 	u64		n;
265 	struct dentry  *root;
266 	struct dentry  *avg;
267 	struct dentry  *count;
268 	/* protect command average calculations */
269 	spinlock_t	lock;
270 };
271 
272 struct mlx5_cmd {
273 	struct mlx5_nb    nb;
274 
275 	void	       *cmd_alloc_buf;
276 	dma_addr_t	alloc_dma;
277 	int		alloc_size;
278 	void	       *cmd_buf;
279 	dma_addr_t	dma;
280 	u16		cmdif_rev;
281 	u8		log_sz;
282 	u8		log_stride;
283 	int		max_reg_cmds;
284 	int		events;
285 	u32 __iomem    *vector;
286 
287 	/* protect command queue allocations
288 	 */
289 	spinlock_t	alloc_lock;
290 
291 	/* protect token allocations
292 	 */
293 	spinlock_t	token_lock;
294 	u8		token;
295 	unsigned long	bitmask;
296 	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
297 	struct workqueue_struct *wq;
298 	struct semaphore sem;
299 	struct semaphore pages_sem;
300 	int	mode;
301 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
302 	struct dma_pool *pool;
303 	struct mlx5_cmd_debug dbg;
304 	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
305 	int checksum_disabled;
306 	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
307 };
308 
309 struct mlx5_port_caps {
310 	int	gid_table_len;
311 	int	pkey_table_len;
312 	u8	ext_port_cap;
313 	bool	has_smi;
314 };
315 
316 struct mlx5_cmd_mailbox {
317 	void	       *buf;
318 	dma_addr_t	dma;
319 	struct mlx5_cmd_mailbox *next;
320 };
321 
322 struct mlx5_buf_list {
323 	void		       *buf;
324 	dma_addr_t		map;
325 };
326 
327 struct mlx5_frag_buf {
328 	struct mlx5_buf_list	*frags;
329 	int			npages;
330 	int			size;
331 	u8			page_shift;
332 };
333 
334 struct mlx5_frag_buf_ctrl {
335 	struct mlx5_buf_list   *frags;
336 	u32			sz_m1;
337 	u16			frag_sz_m1;
338 	u16			strides_offset;
339 	u8			log_sz;
340 	u8			log_stride;
341 	u8			log_frag_strides;
342 };
343 
344 struct mlx5_core_psv {
345 	u32	psv_idx;
346 	struct psv_layout {
347 		u32	pd;
348 		u16	syndrome;
349 		u16	reserved;
350 		u16	bg;
351 		u16	app_tag;
352 		u32	ref_tag;
353 	} psv;
354 };
355 
356 struct mlx5_core_sig_ctx {
357 	struct mlx5_core_psv	psv_memory;
358 	struct mlx5_core_psv	psv_wire;
359 	struct ib_sig_err       err_item;
360 	bool			sig_status_checked;
361 	bool			sig_err_exists;
362 	u32			sigerr_count;
363 };
364 
365 enum {
366 	MLX5_MKEY_MR = 1,
367 	MLX5_MKEY_MW,
368 	MLX5_MKEY_INDIRECT_DEVX,
369 };
370 
371 struct mlx5_core_mkey {
372 	u64			iova;
373 	u64			size;
374 	u32			key;
375 	u32			pd;
376 	u32			type;
377 };
378 
379 #define MLX5_24BIT_MASK		((1 << 24) - 1)
380 
381 enum mlx5_res_type {
382 	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
383 	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
384 	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
385 	MLX5_RES_SRQ	= 3,
386 	MLX5_RES_XSRQ	= 4,
387 	MLX5_RES_XRQ	= 5,
388 	MLX5_RES_DCT	= MLX5_EVENT_QUEUE_TYPE_DCT,
389 };
390 
391 struct mlx5_core_rsc_common {
392 	enum mlx5_res_type	res;
393 	atomic_t		refcount;
394 	struct completion	free;
395 };
396 
397 struct mlx5_uars_page {
398 	void __iomem	       *map;
399 	bool			wc;
400 	u32			index;
401 	struct list_head	list;
402 	unsigned int		bfregs;
403 	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
404 	unsigned long	       *fp_bitmap;
405 	unsigned int		reg_avail;
406 	unsigned int		fp_avail;
407 	struct kref		ref_count;
408 	struct mlx5_core_dev   *mdev;
409 };
410 
411 struct mlx5_bfreg_head {
412 	/* protect blue flame registers allocations */
413 	struct mutex		lock;
414 	struct list_head	list;
415 };
416 
417 struct mlx5_bfreg_data {
418 	struct mlx5_bfreg_head	reg_head;
419 	struct mlx5_bfreg_head	wc_head;
420 };
421 
422 struct mlx5_sq_bfreg {
423 	void __iomem	       *map;
424 	struct mlx5_uars_page  *up;
425 	bool			wc;
426 	u32			index;
427 	unsigned int		offset;
428 };
429 
430 struct mlx5_core_health {
431 	struct health_buffer __iomem   *health;
432 	__be32 __iomem		       *health_counter;
433 	struct timer_list		timer;
434 	u32				prev;
435 	int				miss_counter;
436 	bool				sick;
437 	/* wq spinlock to synchronize draining */
438 	spinlock_t			wq_lock;
439 	struct workqueue_struct	       *wq;
440 	unsigned long			flags;
441 	struct work_struct		work;
442 	struct delayed_work		recover_work;
443 };
444 
445 struct mlx5_qp_table {
446 	struct notifier_block   nb;
447 
448 	/* protect radix tree
449 	 */
450 	spinlock_t		lock;
451 	struct radix_tree_root	tree;
452 };
453 
454 struct mlx5_mkey_table {
455 	/* protect radix tree
456 	 */
457 	rwlock_t		lock;
458 	struct radix_tree_root	tree;
459 };
460 
461 struct mlx5_vf_context {
462 	int	enabled;
463 	u64	port_guid;
464 	u64	node_guid;
465 	enum port_state_policy	policy;
466 };
467 
468 struct mlx5_core_sriov {
469 	struct mlx5_vf_context	*vfs_ctx;
470 	int			num_vfs;
471 	int			enabled_vfs;
472 };
473 
474 struct mlx5_fc_stats {
475 	spinlock_t counters_idr_lock; /* protects counters_idr */
476 	struct idr counters_idr;
477 	struct list_head counters;
478 	struct llist_head addlist;
479 	struct llist_head dellist;
480 
481 	struct workqueue_struct *wq;
482 	struct delayed_work work;
483 	unsigned long next_query;
484 	unsigned long sampling_interval; /* jiffies */
485 };
486 
487 struct mlx5_events;
488 struct mlx5_mpfs;
489 struct mlx5_eswitch;
490 struct mlx5_lag;
491 struct mlx5_devcom;
492 struct mlx5_eq_table;
493 
494 struct mlx5_rate_limit {
495 	u32			rate;
496 	u32			max_burst_sz;
497 	u16			typical_pkt_sz;
498 };
499 
500 struct mlx5_rl_entry {
501 	struct mlx5_rate_limit	rl;
502 	u16                     index;
503 	u16                     refcount;
504 };
505 
506 struct mlx5_rl_table {
507 	/* protect rate limit table */
508 	struct mutex            rl_lock;
509 	u16                     max_size;
510 	u32                     max_rate;
511 	u32                     min_rate;
512 	struct mlx5_rl_entry   *rl_entry;
513 };
514 
515 struct mlx5_core_roce {
516 	struct mlx5_flow_table *ft;
517 	struct mlx5_flow_group *fg;
518 	struct mlx5_flow_handle *allow_rule;
519 };
520 
521 struct mlx5_priv {
522 	struct mlx5_eq_table	*eq_table;
523 
524 	/* pages stuff */
525 	struct mlx5_nb          pg_nb;
526 	struct workqueue_struct *pg_wq;
527 	struct rb_root		page_root;
528 	int			fw_pages;
529 	atomic_t		reg_pages;
530 	struct list_head	free_list;
531 	int			vfs_pages;
532 	int			peer_pf_pages;
533 
534 	struct mlx5_core_health health;
535 
536 	/* start: qp staff */
537 	struct mlx5_qp_table	qp_table;
538 	struct dentry	       *qp_debugfs;
539 	struct dentry	       *eq_debugfs;
540 	struct dentry	       *cq_debugfs;
541 	struct dentry	       *cmdif_debugfs;
542 	/* end: qp staff */
543 
544 	/* start: mkey staff */
545 	struct mlx5_mkey_table	mkey_table;
546 	/* end: mkey staff */
547 
548 	/* start: alloc staff */
549 	/* protect buffer alocation according to numa node */
550 	struct mutex            alloc_mutex;
551 	int                     numa_node;
552 
553 	struct mutex            pgdir_mutex;
554 	struct list_head        pgdir_list;
555 	/* end: alloc staff */
556 	struct dentry	       *dbg_root;
557 
558 	/* protect mkey key part */
559 	spinlock_t		mkey_lock;
560 	u8			mkey_key;
561 
562 	struct list_head        dev_list;
563 	struct list_head        ctx_list;
564 	spinlock_t              ctx_lock;
565 	struct mlx5_events      *events;
566 
567 	struct mlx5_flow_steering *steering;
568 	struct mlx5_mpfs        *mpfs;
569 	struct mlx5_eswitch     *eswitch;
570 	struct mlx5_core_sriov	sriov;
571 	struct mlx5_lag		*lag;
572 	struct mlx5_devcom	*devcom;
573 	unsigned long		pci_dev_data;
574 	struct mlx5_core_roce	roce;
575 	struct mlx5_fc_stats		fc_stats;
576 	struct mlx5_rl_table            rl_table;
577 
578 	struct mlx5_bfreg_data		bfregs;
579 	struct mlx5_uars_page	       *uar;
580 };
581 
582 enum mlx5_device_state {
583 	MLX5_DEVICE_STATE_UP,
584 	MLX5_DEVICE_STATE_INTERNAL_ERROR,
585 };
586 
587 enum mlx5_interface_state {
588 	MLX5_INTERFACE_STATE_UP = BIT(0),
589 };
590 
591 enum mlx5_pci_status {
592 	MLX5_PCI_STATUS_DISABLED,
593 	MLX5_PCI_STATUS_ENABLED,
594 };
595 
596 enum mlx5_pagefault_type_flags {
597 	MLX5_PFAULT_REQUESTOR = 1 << 0,
598 	MLX5_PFAULT_WRITE     = 1 << 1,
599 	MLX5_PFAULT_RDMA      = 1 << 2,
600 };
601 
602 struct mlx5_td {
603 	/* protects tirs list changes while tirs refresh */
604 	struct mutex     list_lock;
605 	struct list_head tirs_list;
606 	u32              tdn;
607 };
608 
609 struct mlx5e_resources {
610 	u32                        pdn;
611 	struct mlx5_td             td;
612 	struct mlx5_core_mkey      mkey;
613 	struct mlx5_sq_bfreg       bfreg;
614 };
615 
616 #define MLX5_MAX_RESERVED_GIDS 8
617 
618 struct mlx5_rsvd_gids {
619 	unsigned int start;
620 	unsigned int count;
621 	struct ida ida;
622 };
623 
624 #define MAX_PIN_NUM	8
625 struct mlx5_pps {
626 	u8                         pin_caps[MAX_PIN_NUM];
627 	struct work_struct         out_work;
628 	u64                        start[MAX_PIN_NUM];
629 	u8                         enabled;
630 };
631 
632 struct mlx5_clock {
633 	struct mlx5_core_dev      *mdev;
634 	struct mlx5_nb             pps_nb;
635 	seqlock_t                  lock;
636 	struct cyclecounter        cycles;
637 	struct timecounter         tc;
638 	struct hwtstamp_config     hwtstamp_config;
639 	u32                        nominal_c_mult;
640 	unsigned long              overflow_period;
641 	struct delayed_work        overflow_work;
642 	struct ptp_clock          *ptp;
643 	struct ptp_clock_info      ptp_info;
644 	struct mlx5_pps            pps_info;
645 };
646 
647 struct mlx5_fw_tracer;
648 struct mlx5_vxlan;
649 
650 struct mlx5_core_dev {
651 	struct device *device;
652 	struct pci_dev	       *pdev;
653 	/* sync pci state */
654 	struct mutex		pci_status_mutex;
655 	enum mlx5_pci_status	pci_status;
656 	u8			rev_id;
657 	char			board_id[MLX5_BOARD_ID_LEN];
658 	struct mlx5_cmd		cmd;
659 	struct mlx5_port_caps	port_caps[MLX5_MAX_PORTS];
660 	struct {
661 		u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
662 		u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
663 		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
664 		u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
665 		u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
666 		u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
667 		u8  embedded_cpu;
668 	} caps;
669 	u64			sys_image_guid;
670 	phys_addr_t		iseg_base;
671 	struct mlx5_init_seg __iomem *iseg;
672 	phys_addr_t             bar_addr;
673 	enum mlx5_device_state	state;
674 	/* sync interface state */
675 	struct mutex		intf_state_mutex;
676 	unsigned long		intf_state;
677 	struct mlx5_priv	priv;
678 	struct mlx5_profile	*profile;
679 	atomic_t		num_qps;
680 	u32			issi;
681 	struct mlx5e_resources  mlx5e_res;
682 	struct mlx5_vxlan       *vxlan;
683 	struct {
684 		struct mlx5_rsvd_gids	reserved_gids;
685 		u32			roce_en;
686 	} roce;
687 #ifdef CONFIG_MLX5_FPGA
688 	struct mlx5_fpga_device *fpga;
689 #endif
690 	struct mlx5_clock        clock;
691 	struct mlx5_ib_clock_info  *clock_info;
692 	struct mlx5_fw_tracer   *tracer;
693 };
694 
695 struct mlx5_db {
696 	__be32			*db;
697 	union {
698 		struct mlx5_db_pgdir		*pgdir;
699 		struct mlx5_ib_user_db_page	*user_page;
700 	}			u;
701 	dma_addr_t		dma;
702 	int			index;
703 };
704 
705 enum {
706 	MLX5_COMP_EQ_SIZE = 1024,
707 };
708 
709 enum {
710 	MLX5_PTYS_IB = 1 << 0,
711 	MLX5_PTYS_EN = 1 << 2,
712 };
713 
714 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
715 
716 enum {
717 	MLX5_CMD_ENT_STATE_PENDING_COMP,
718 };
719 
720 struct mlx5_cmd_work_ent {
721 	unsigned long		state;
722 	struct mlx5_cmd_msg    *in;
723 	struct mlx5_cmd_msg    *out;
724 	void		       *uout;
725 	int			uout_size;
726 	mlx5_cmd_cbk_t		callback;
727 	struct delayed_work	cb_timeout_work;
728 	void		       *context;
729 	int			idx;
730 	struct completion	done;
731 	struct mlx5_cmd        *cmd;
732 	struct work_struct	work;
733 	struct mlx5_cmd_layout *lay;
734 	int			ret;
735 	int			page_queue;
736 	u8			status;
737 	u8			token;
738 	u64			ts1;
739 	u64			ts2;
740 	u16			op;
741 	bool			polling;
742 };
743 
744 struct mlx5_pas {
745 	u64	pa;
746 	u8	log_sz;
747 };
748 
749 enum phy_port_state {
750 	MLX5_AAA_111
751 };
752 
753 struct mlx5_hca_vport_context {
754 	u32			field_select;
755 	bool			sm_virt_aware;
756 	bool			has_smi;
757 	bool			has_raw;
758 	enum port_state_policy	policy;
759 	enum phy_port_state	phys_state;
760 	enum ib_port_state	vport_state;
761 	u8			port_physical_state;
762 	u64			sys_image_guid;
763 	u64			port_guid;
764 	u64			node_guid;
765 	u32			cap_mask1;
766 	u32			cap_mask1_perm;
767 	u16			cap_mask2;
768 	u16			cap_mask2_perm;
769 	u16			lid;
770 	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
771 	u8			lmc;
772 	u8			subnet_timeout;
773 	u16			sm_lid;
774 	u8			sm_sl;
775 	u16			qkey_violation_counter;
776 	u16			pkey_violation_counter;
777 	bool			grh_required;
778 };
779 
780 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
781 {
782 		return buf->frags->buf + offset;
783 }
784 
785 #define STRUCT_FIELD(header, field) \
786 	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
787 	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
788 
789 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
790 {
791 	return pci_get_drvdata(pdev);
792 }
793 
794 extern struct dentry *mlx5_debugfs_root;
795 
796 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
797 {
798 	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
799 }
800 
801 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
802 {
803 	return ioread32be(&dev->iseg->fw_rev) >> 16;
804 }
805 
806 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
807 {
808 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
809 }
810 
811 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
812 {
813 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
814 }
815 
816 static inline u32 mlx5_base_mkey(const u32 key)
817 {
818 	return key & 0xffffff00u;
819 }
820 
821 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
822 					u8 log_stride, u8 log_sz,
823 					u16 strides_offset,
824 					struct mlx5_frag_buf_ctrl *fbc)
825 {
826 	fbc->frags      = frags;
827 	fbc->log_stride = log_stride;
828 	fbc->log_sz     = log_sz;
829 	fbc->sz_m1	= (1 << fbc->log_sz) - 1;
830 	fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
831 	fbc->frag_sz_m1	= (1 << fbc->log_frag_strides) - 1;
832 	fbc->strides_offset = strides_offset;
833 }
834 
835 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
836 				 u8 log_stride, u8 log_sz,
837 				 struct mlx5_frag_buf_ctrl *fbc)
838 {
839 	mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
840 }
841 
842 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
843 					  u32 ix)
844 {
845 	unsigned int frag;
846 
847 	ix  += fbc->strides_offset;
848 	frag = ix >> fbc->log_frag_strides;
849 
850 	return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
851 }
852 
853 static inline u32
854 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
855 {
856 	u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
857 
858 	return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
859 }
860 
861 int mlx5_cmd_init(struct mlx5_core_dev *dev);
862 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
863 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
864 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
865 
866 struct mlx5_async_ctx {
867 	struct mlx5_core_dev *dev;
868 	atomic_t num_inflight;
869 	struct wait_queue_head wait;
870 };
871 
872 struct mlx5_async_work;
873 
874 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
875 
876 struct mlx5_async_work {
877 	struct mlx5_async_ctx *ctx;
878 	mlx5_async_cbk_t user_callback;
879 };
880 
881 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
882 			     struct mlx5_async_ctx *ctx);
883 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
884 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
885 		     void *out, int out_size, mlx5_async_cbk_t callback,
886 		     struct mlx5_async_work *work);
887 
888 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
889 		  int out_size);
890 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
891 			  void *out, int out_size);
892 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
893 
894 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
895 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
896 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
897 void mlx5_health_flush(struct mlx5_core_dev *dev);
898 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
899 int mlx5_health_init(struct mlx5_core_dev *dev);
900 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
901 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
902 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
903 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
904 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
905 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
906 			struct mlx5_frag_buf *buf, int node);
907 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
908 		   int size, struct mlx5_frag_buf *buf);
909 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
910 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
911 			     struct mlx5_frag_buf *buf, int node);
912 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
913 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
914 						      gfp_t flags, int npages);
915 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
916 				 struct mlx5_cmd_mailbox *head);
917 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
918 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
919 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
920 			     struct mlx5_core_mkey *mkey,
921 			     struct mlx5_async_ctx *async_ctx, u32 *in,
922 			     int inlen, u32 *out, int outlen,
923 			     mlx5_async_cbk_t callback,
924 			     struct mlx5_async_work *context);
925 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
926 			  struct mlx5_core_mkey *mkey,
927 			  u32 *in, int inlen);
928 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
929 			   struct mlx5_core_mkey *mkey);
930 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
931 			 u32 *out, int outlen);
932 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
933 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
934 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
935 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
936 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
937 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
938 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
939 				 s32 npages, bool ec_function);
940 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
941 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
942 void mlx5_register_debugfs(void);
943 void mlx5_unregister_debugfs(void);
944 
945 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
946 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
947 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
948 		    unsigned int *irqn);
949 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
950 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
951 
952 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
953 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
954 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
955 			 int size_in, void *data_out, int size_out,
956 			 u16 reg_num, int arg, int write);
957 
958 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
959 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
960 		       int node);
961 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
962 
963 const char *mlx5_command_str(int command);
964 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
965 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
966 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
967 			 int npsvs, u32 *sig_index);
968 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
969 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
970 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
971 			struct mlx5_odp_caps *odp_caps);
972 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
973 			     u8 port_num, void *out, size_t sz);
974 
975 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
976 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
977 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
978 		     struct mlx5_rate_limit *rl);
979 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
980 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
981 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
982 		       struct mlx5_rate_limit *rl_1);
983 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
984 		     bool map_wc, bool fast_path);
985 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
986 
987 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
988 struct cpumask *
989 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
990 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
991 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
992 			   u8 roce_version, u8 roce_l3_type, const u8 *gid,
993 			   const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
994 
995 static inline int fw_initializing(struct mlx5_core_dev *dev)
996 {
997 	return ioread32be(&dev->iseg->initializing) >> 31;
998 }
999 
1000 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1001 {
1002 	return mkey >> 8;
1003 }
1004 
1005 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1006 {
1007 	return mkey_idx << 8;
1008 }
1009 
1010 static inline u8 mlx5_mkey_variant(u32 mkey)
1011 {
1012 	return mkey & 0xff;
1013 }
1014 
1015 enum {
1016 	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
1017 	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
1018 };
1019 
1020 enum {
1021 	MR_CACHE_LAST_STD_ENTRY = 20,
1022 	MLX5_IMR_MTT_CACHE_ENTRY,
1023 	MLX5_IMR_KSM_CACHE_ENTRY,
1024 	MAX_MR_CACHE_ENTRIES
1025 };
1026 
1027 enum {
1028 	MLX5_INTERFACE_PROTOCOL_IB  = 0,
1029 	MLX5_INTERFACE_PROTOCOL_ETH = 1,
1030 };
1031 
1032 struct mlx5_interface {
1033 	void *			(*add)(struct mlx5_core_dev *dev);
1034 	void			(*remove)(struct mlx5_core_dev *dev, void *context);
1035 	int			(*attach)(struct mlx5_core_dev *dev, void *context);
1036 	void			(*detach)(struct mlx5_core_dev *dev, void *context);
1037 	int			protocol;
1038 	struct list_head	list;
1039 };
1040 
1041 int mlx5_register_interface(struct mlx5_interface *intf);
1042 void mlx5_unregister_interface(struct mlx5_interface *intf);
1043 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1044 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1045 
1046 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1047 
1048 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1049 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1050 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1051 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1052 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
1053 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1054 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1055 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1056 				 u64 *values,
1057 				 int num_counters,
1058 				 size_t *offsets);
1059 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1060 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1061 
1062 #ifdef CONFIG_MLX5_CORE_IPOIB
1063 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1064 					  struct ib_device *ibdev,
1065 					  const char *name,
1066 					  void (*setup)(struct net_device *));
1067 #endif /* CONFIG_MLX5_CORE_IPOIB */
1068 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1069 			    struct ib_device *device,
1070 			    struct rdma_netdev_alloc_params *params);
1071 
1072 struct mlx5_profile {
1073 	u64	mask;
1074 	u8	log_max_qp;
1075 	struct {
1076 		int	size;
1077 		int	limit;
1078 	} mr_cache[MAX_MR_CACHE_ENTRIES];
1079 };
1080 
1081 enum {
1082 	MLX5_PCI_DEV_IS_VF		= 1 << 0,
1083 };
1084 
1085 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1086 {
1087 	return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1088 }
1089 
1090 static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
1091 {
1092 	return dev->caps.embedded_cpu;
1093 }
1094 
1095 static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
1096 {
1097 	return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1098 }
1099 
1100 static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev)
1101 {
1102 	return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1103 }
1104 
1105 #define MLX5_HOST_PF_MAX_VFS	(127u)
1106 static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
1107 {
1108 	if (mlx5_core_is_ecpf_esw_manager(dev))
1109 		return MLX5_HOST_PF_MAX_VFS;
1110 	else
1111 		return pci_sriov_get_totalvfs(dev->pdev);
1112 }
1113 
1114 static inline int mlx5_get_gid_table_len(u16 param)
1115 {
1116 	if (param > 4) {
1117 		pr_warn("gid table length is zero\n");
1118 		return 0;
1119 	}
1120 
1121 	return 8 * (1 << param);
1122 }
1123 
1124 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1125 {
1126 	return !!(dev->priv.rl_table.max_size);
1127 }
1128 
1129 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1130 {
1131 	return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1132 	       MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1133 }
1134 
1135 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1136 {
1137 	return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1138 }
1139 
1140 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1141 {
1142 	return mlx5_core_is_mp_slave(dev) ||
1143 	       mlx5_core_is_mp_master(dev);
1144 }
1145 
1146 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1147 {
1148 	if (!mlx5_core_mp_enabled(dev))
1149 		return 1;
1150 
1151 	return MLX5_CAP_GEN(dev, native_port_num);
1152 }
1153 
1154 enum {
1155 	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1156 };
1157 
1158 #endif /* MLX5_DRIVER_H */
1159