xref: /openbmc/linux/include/linux/mlx5/device.h (revision fdd350fe)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35 
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 #include <linux/bitfield.h>
40 
41 #if defined(__LITTLE_ENDIAN)
42 #define MLX5_SET_HOST_ENDIANNESS	0
43 #elif defined(__BIG_ENDIAN)
44 #define MLX5_SET_HOST_ENDIANNESS	0x80
45 #else
46 #error Host endianness not defined
47 #endif
48 
49 /* helper macros */
50 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
51 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
52 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
53 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
54 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
55 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
56 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
57 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
58 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
59 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
60 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
61 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
62 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
63 
64 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
65 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
66 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
67 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
68 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
69 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
70 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
71 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
72 
73 /* insert a value to a struct */
74 #define MLX5_SET(typ, p, fld, v) do { \
75 	u32 _v = v; \
76 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
77 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
78 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
79 		     (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
80 		     << __mlx5_dw_bit_off(typ, fld))); \
81 } while (0)
82 
83 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
84 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
85 	MLX5_SET(typ, p, fld[idx], v); \
86 } while (0)
87 
88 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
89 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
90 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
91 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
92 		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
93 		     << __mlx5_dw_bit_off(typ, fld))); \
94 } while (0)
95 
96 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
97 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
98 __mlx5_mask(typ, fld))
99 
100 #define MLX5_GET_PR(typ, p, fld) ({ \
101 	u32 ___t = MLX5_GET(typ, p, fld); \
102 	pr_debug(#fld " = 0x%x\n", ___t); \
103 	___t; \
104 })
105 
106 #define __MLX5_SET64(typ, p, fld, v) do { \
107 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
108 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
109 } while (0)
110 
111 #define MLX5_SET64(typ, p, fld, v) do { \
112 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
113 	__MLX5_SET64(typ, p, fld, v); \
114 } while (0)
115 
116 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
117 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
118 	__MLX5_SET64(typ, p, fld[idx], v); \
119 } while (0)
120 
121 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
122 
123 #define MLX5_GET64_PR(typ, p, fld) ({ \
124 	u64 ___t = MLX5_GET64(typ, p, fld); \
125 	pr_debug(#fld " = 0x%llx\n", ___t); \
126 	___t; \
127 })
128 
129 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
130 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
131 __mlx5_mask16(typ, fld))
132 
133 #define MLX5_SET16(typ, p, fld, v) do { \
134 	u16 _v = v; \
135 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16);             \
136 	*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
137 	cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
138 		     (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
139 		     << __mlx5_16_bit_off(typ, fld))); \
140 } while (0)
141 
142 /* Big endian getters */
143 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
144 	__mlx5_64_off(typ, fld)))
145 
146 #define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
147 		type_t tmp;						  \
148 		switch (sizeof(tmp)) {					  \
149 		case sizeof(u8):					  \
150 			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
151 			break;						  \
152 		case sizeof(u16):					  \
153 			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
154 			break;						  \
155 		case sizeof(u32):					  \
156 			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
157 			break;						  \
158 		case sizeof(u64):					  \
159 			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
160 			break;						  \
161 			}						  \
162 		tmp;							  \
163 		})
164 
165 enum mlx5_inline_modes {
166 	MLX5_INLINE_MODE_NONE,
167 	MLX5_INLINE_MODE_L2,
168 	MLX5_INLINE_MODE_IP,
169 	MLX5_INLINE_MODE_TCP_UDP,
170 };
171 
172 enum {
173 	MLX5_MAX_COMMANDS		= 32,
174 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
175 	MLX5_PCI_CMD_XPORT		= 7,
176 	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
177 	MLX5_MAX_PSVS			= 4,
178 };
179 
180 enum {
181 	MLX5_EXTENDED_UD_AV		= 0x80000000,
182 };
183 
184 enum {
185 	MLX5_CQ_STATE_ARMED		= 9,
186 	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
187 	MLX5_CQ_STATE_FIRED		= 0xa,
188 };
189 
190 enum {
191 	MLX5_STAT_RATE_OFFSET	= 5,
192 };
193 
194 enum {
195 	MLX5_INLINE_SEG = 0x80000000,
196 };
197 
198 enum {
199 	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
200 };
201 
202 enum {
203 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
204 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
205 };
206 
207 enum {
208 	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
209 };
210 
211 enum {
212 	MLX5_PFAULT_SUBTYPE_WQE = 0,
213 	MLX5_PFAULT_SUBTYPE_RDMA = 1,
214 };
215 
216 enum wqe_page_fault_type {
217 	MLX5_WQE_PF_TYPE_RMP = 0,
218 	MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
219 	MLX5_WQE_PF_TYPE_RESP = 2,
220 	MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
221 };
222 
223 enum {
224 	MLX5_PERM_LOCAL_READ	= 1 << 2,
225 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
226 	MLX5_PERM_REMOTE_READ	= 1 << 4,
227 	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
228 	MLX5_PERM_ATOMIC	= 1 << 6,
229 	MLX5_PERM_UMR_EN	= 1 << 7,
230 };
231 
232 enum {
233 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
234 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
235 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
236 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
237 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
238 };
239 
240 enum {
241 	MLX5_EN_RD	= (u64)1,
242 	MLX5_EN_WR	= (u64)2
243 };
244 
245 enum {
246 	MLX5_ADAPTER_PAGE_SHIFT		= 12,
247 	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
248 };
249 
250 enum {
251 	MLX5_BFREGS_PER_UAR		= 4,
252 	MLX5_MAX_UARS			= 1 << 8,
253 	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
254 	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
255 					  MLX5_NON_FP_BFREGS_PER_UAR,
256 	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
257 					  MLX5_NON_FP_BFREGS_PER_UAR,
258 	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
259 	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
260 	MLX5_MIN_DYN_BFREGS		= 512,
261 	MLX5_MAX_DYN_BFREGS		= 1024,
262 };
263 
264 enum {
265 	MLX5_MKEY_MASK_LEN		= 1ull << 0,
266 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
267 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
268 	MLX5_MKEY_MASK_PD		= 1ull << 7,
269 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
270 	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
271 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
272 	MLX5_MKEY_MASK_KEY		= 1ull << 13,
273 	MLX5_MKEY_MASK_QPN		= 1ull << 14,
274 	MLX5_MKEY_MASK_LR		= 1ull << 17,
275 	MLX5_MKEY_MASK_LW		= 1ull << 18,
276 	MLX5_MKEY_MASK_RR		= 1ull << 19,
277 	MLX5_MKEY_MASK_RW		= 1ull << 20,
278 	MLX5_MKEY_MASK_A		= 1ull << 21,
279 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
280 	MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE	= 1ull << 25,
281 	MLX5_MKEY_MASK_FREE			= 1ull << 29,
282 	MLX5_MKEY_MASK_RELAXED_ORDERING_READ	= 1ull << 47,
283 };
284 
285 enum {
286 	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
287 
288 	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
289 	MLX5_UMR_CHECK_FREE		= (2 << 5),
290 
291 	MLX5_UMR_INLINE			= (1 << 7),
292 };
293 
294 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
295 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
296 #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
297 
298 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
299 
300 enum {
301 	MLX5_EVENT_QUEUE_TYPE_QP = 0,
302 	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
303 	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
304 	MLX5_EVENT_QUEUE_TYPE_DCT = 6,
305 };
306 
307 /* mlx5 components can subscribe to any one of these events via
308  * mlx5_eq_notifier_register API.
309  */
310 enum mlx5_event {
311 	/* Special value to subscribe to any event */
312 	MLX5_EVENT_TYPE_NOTIFY_ANY	   = 0x0,
313 	/* HW events enum start: comp events are not subscribable */
314 	MLX5_EVENT_TYPE_COMP		   = 0x0,
315 	/* HW Async events enum start: subscribable events */
316 	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
317 	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
318 	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
319 	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
320 	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
321 
322 	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
323 	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
324 	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
325 	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
326 	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
327 	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
328 	MLX5_EVENT_TYPE_OBJECT_CHANGE	   = 0x27,
329 
330 	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
331 	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
332 	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
333 	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
334 	MLX5_EVENT_TYPE_TEMP_WARN_EVENT    = 0x17,
335 	MLX5_EVENT_TYPE_XRQ_ERROR	   = 0x18,
336 	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
337 	MLX5_EVENT_TYPE_GENERAL_EVENT	   = 0x22,
338 	MLX5_EVENT_TYPE_MONITOR_COUNTER    = 0x24,
339 	MLX5_EVENT_TYPE_PPS_EVENT          = 0x25,
340 
341 	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
342 	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
343 
344 	MLX5_EVENT_TYPE_CMD		   = 0x0a,
345 	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
346 
347 	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
348 	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
349 
350 	MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
351 	MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
352 
353 	MLX5_EVENT_TYPE_DCT_DRAINED        = 0x1c,
354 	MLX5_EVENT_TYPE_DCT_KEY_VIOLATION  = 0x1d,
355 
356 	MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
357 	MLX5_EVENT_TYPE_FPGA_QP_ERROR      = 0x21,
358 
359 	MLX5_EVENT_TYPE_DEVICE_TRACER      = 0x26,
360 
361 	MLX5_EVENT_TYPE_MAX                = 0x100,
362 };
363 
364 enum mlx5_driver_event {
365 	MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
366 	MLX5_DRIVER_EVENT_UPLINK_NETDEV,
367 	MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
368 	MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
369 	MLX5_DRIVER_EVENT_AFFILIATION_DONE,
370 	MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
371 };
372 
373 enum {
374 	MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
375 	MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
376 	MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
377 };
378 
379 enum {
380 	MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
381 	MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
382 	MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
383 	MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
384 };
385 
386 enum {
387 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
388 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
389 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
390 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
391 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
392 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
393 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
394 };
395 
396 enum {
397 	MLX5_ROCE_VERSION_1		= 0,
398 	MLX5_ROCE_VERSION_2		= 2,
399 };
400 
401 enum {
402 	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
403 	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
404 };
405 
406 enum {
407 	MLX5_ROCE_L3_TYPE_IPV4		= 0,
408 	MLX5_ROCE_L3_TYPE_IPV6		= 1,
409 };
410 
411 enum {
412 	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
413 	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
414 };
415 
416 enum {
417 	MLX5_OPCODE_NOP			= 0x00,
418 	MLX5_OPCODE_SEND_INVAL		= 0x01,
419 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
420 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
421 	MLX5_OPCODE_SEND		= 0x0a,
422 	MLX5_OPCODE_SEND_IMM		= 0x0b,
423 	MLX5_OPCODE_LSO			= 0x0e,
424 	MLX5_OPCODE_RDMA_READ		= 0x10,
425 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
426 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
427 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
428 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
429 	MLX5_OPCODE_BIND_MW		= 0x18,
430 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
431 	MLX5_OPCODE_ENHANCED_MPSW	= 0x29,
432 
433 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
434 	MLX5_RECV_OPCODE_SEND		= 0x01,
435 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
436 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
437 
438 	MLX5_CQE_OPCODE_ERROR		= 0x1e,
439 	MLX5_CQE_OPCODE_RESIZE		= 0x16,
440 
441 	MLX5_OPCODE_SET_PSV		= 0x20,
442 	MLX5_OPCODE_GET_PSV		= 0x21,
443 	MLX5_OPCODE_CHECK_PSV		= 0x22,
444 	MLX5_OPCODE_DUMP		= 0x23,
445 	MLX5_OPCODE_RGET_PSV		= 0x26,
446 	MLX5_OPCODE_RCHECK_PSV		= 0x27,
447 
448 	MLX5_OPCODE_UMR			= 0x25,
449 
450 	MLX5_OPCODE_FLOW_TBL_ACCESS	= 0x2c,
451 
452 	MLX5_OPCODE_ACCESS_ASO		= 0x2d,
453 };
454 
455 enum {
456 	MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
457 	MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
458 };
459 
460 enum {
461 	MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
462 	MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
463 };
464 
465 struct mlx5_wqe_tls_static_params_seg {
466 	u8     ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
467 };
468 
469 struct mlx5_wqe_tls_progress_params_seg {
470 	__be32 tis_tir_num;
471 	u8     ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
472 };
473 
474 enum {
475 	MLX5_SET_PORT_RESET_QKEY	= 0,
476 	MLX5_SET_PORT_GUID0		= 16,
477 	MLX5_SET_PORT_NODE_GUID		= 17,
478 	MLX5_SET_PORT_SYS_GUID		= 18,
479 	MLX5_SET_PORT_GID_TABLE		= 19,
480 	MLX5_SET_PORT_PKEY_TABLE	= 20,
481 };
482 
483 enum {
484 	MLX5_BW_NO_LIMIT   = 0,
485 	MLX5_100_MBPS_UNIT = 3,
486 	MLX5_GBPS_UNIT	   = 4,
487 };
488 
489 enum {
490 	MLX5_MAX_PAGE_SHIFT		= 31
491 };
492 
493 enum {
494 	/*
495 	 * Max wqe size for rdma read is 512 bytes, so this
496 	 * limits our max_sge_rd as the wqe needs to fit:
497 	 * - ctrl segment (16 bytes)
498 	 * - rdma segment (16 bytes)
499 	 * - scatter elements (16 bytes each)
500 	 */
501 	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
502 };
503 
504 enum mlx5_odp_transport_cap_bits {
505 	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
506 	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
507 	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
508 	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
509 };
510 
511 struct mlx5_odp_caps {
512 	char reserved[0x10];
513 	struct {
514 		__be32			rc_odp_caps;
515 		__be32			uc_odp_caps;
516 		__be32			ud_odp_caps;
517 	} per_transport_caps;
518 	char reserved2[0xe4];
519 };
520 
521 struct mlx5_cmd_layout {
522 	u8		type;
523 	u8		rsvd0[3];
524 	__be32		inlen;
525 	__be64		in_ptr;
526 	__be32		in[4];
527 	__be32		out[4];
528 	__be64		out_ptr;
529 	__be32		outlen;
530 	u8		token;
531 	u8		sig;
532 	u8		rsvd1;
533 	u8		status_own;
534 };
535 
536 enum mlx5_rfr_severity_bit_offsets {
537 	MLX5_RFR_BIT_OFFSET = 0x7,
538 };
539 
540 struct health_buffer {
541 	__be32		assert_var[6];
542 	__be32		rsvd0[2];
543 	__be32		assert_exit_ptr;
544 	__be32		assert_callra;
545 	__be32		rsvd1[1];
546 	__be32		time;
547 	__be32		fw_ver;
548 	__be32		hw_id;
549 	u8		rfr_severity;
550 	u8		rsvd2[3];
551 	u8		irisc_index;
552 	u8		synd;
553 	__be16		ext_synd;
554 };
555 
556 enum mlx5_initializing_bit_offsets {
557 	MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
558 };
559 
560 enum mlx5_cmd_addr_l_sz_offset {
561 	MLX5_NIC_IFC_OFFSET = 8,
562 };
563 
564 struct mlx5_init_seg {
565 	__be32			fw_rev;
566 	__be32			cmdif_rev_fw_sub;
567 	__be32			rsvd0[2];
568 	__be32			cmdq_addr_h;
569 	__be32			cmdq_addr_l_sz;
570 	__be32			cmd_dbell;
571 	__be32			rsvd1[120];
572 	__be32			initializing;
573 	struct health_buffer	health;
574 	__be32			rsvd2[878];
575 	__be32			cmd_exec_to;
576 	__be32			cmd_q_init_to;
577 	__be32			internal_timer_h;
578 	__be32			internal_timer_l;
579 	__be32			rsvd3[2];
580 	__be32			health_counter;
581 	__be32			rsvd4[11];
582 	__be32			real_time_h;
583 	__be32			real_time_l;
584 	__be32			rsvd5[1006];
585 	__be64			ieee1588_clk;
586 	__be32			ieee1588_clk_type;
587 	__be32			clr_intx;
588 };
589 
590 struct mlx5_eqe_comp {
591 	__be32	reserved[6];
592 	__be32	cqn;
593 };
594 
595 struct mlx5_eqe_qp_srq {
596 	__be32	reserved1[5];
597 	u8	type;
598 	u8	reserved2[3];
599 	__be32	qp_srq_n;
600 };
601 
602 struct mlx5_eqe_cq_err {
603 	__be32	cqn;
604 	u8	reserved1[7];
605 	u8	syndrome;
606 };
607 
608 struct mlx5_eqe_xrq_err {
609 	__be32	reserved1[5];
610 	__be32	type_xrqn;
611 	__be32	reserved2;
612 };
613 
614 struct mlx5_eqe_port_state {
615 	u8	reserved0[8];
616 	u8	port;
617 };
618 
619 struct mlx5_eqe_gpio {
620 	__be32	reserved0[2];
621 	__be64	gpio_event;
622 };
623 
624 struct mlx5_eqe_congestion {
625 	u8	type;
626 	u8	rsvd0;
627 	u8	congestion_level;
628 };
629 
630 struct mlx5_eqe_stall_vl {
631 	u8	rsvd0[3];
632 	u8	port_vl;
633 };
634 
635 struct mlx5_eqe_cmd {
636 	__be32	vector;
637 	__be32	rsvd[6];
638 };
639 
640 struct mlx5_eqe_page_req {
641 	__be16		ec_function;
642 	__be16		func_id;
643 	__be32		num_pages;
644 	__be32		rsvd1[5];
645 };
646 
647 struct mlx5_eqe_page_fault {
648 	__be32 bytes_committed;
649 	union {
650 		struct {
651 			u16     reserved1;
652 			__be16  wqe_index;
653 			u16	reserved2;
654 			__be16  packet_length;
655 			__be32  token;
656 			u8	reserved4[8];
657 			__be32  pftype_wq;
658 		} __packed wqe;
659 		struct {
660 			__be32  r_key;
661 			u16	reserved1;
662 			__be16  packet_length;
663 			__be32  rdma_op_len;
664 			__be64  rdma_va;
665 			__be32  pftype_token;
666 		} __packed rdma;
667 	} __packed;
668 } __packed;
669 
670 struct mlx5_eqe_vport_change {
671 	u8		rsvd0[2];
672 	__be16		vport_num;
673 	__be32		rsvd1[6];
674 } __packed;
675 
676 struct mlx5_eqe_port_module {
677 	u8        reserved_at_0[1];
678 	u8        module;
679 	u8        reserved_at_2[1];
680 	u8        module_status;
681 	u8        reserved_at_4[2];
682 	u8        error_type;
683 } __packed;
684 
685 struct mlx5_eqe_pps {
686 	u8		rsvd0[3];
687 	u8		pin;
688 	u8		rsvd1[4];
689 	union {
690 		struct {
691 			__be32		time_sec;
692 			__be32		time_nsec;
693 		};
694 		struct {
695 			__be64		time_stamp;
696 		};
697 	};
698 	u8		rsvd2[12];
699 } __packed;
700 
701 struct mlx5_eqe_dct {
702 	__be32  reserved[6];
703 	__be32  dctn;
704 };
705 
706 struct mlx5_eqe_temp_warning {
707 	__be64 sensor_warning_msb;
708 	__be64 sensor_warning_lsb;
709 } __packed;
710 
711 struct mlx5_eqe_obj_change {
712 	u8      rsvd0[2];
713 	__be16  obj_type;
714 	__be32  obj_id;
715 } __packed;
716 
717 #define SYNC_RST_STATE_MASK    0xf
718 
719 enum sync_rst_state_type {
720 	MLX5_SYNC_RST_STATE_RESET_REQUEST	= 0x0,
721 	MLX5_SYNC_RST_STATE_RESET_NOW		= 0x1,
722 	MLX5_SYNC_RST_STATE_RESET_ABORT		= 0x2,
723 	MLX5_SYNC_RST_STATE_RESET_UNLOAD	= 0x3,
724 };
725 
726 struct mlx5_eqe_sync_fw_update {
727 	u8 reserved_at_0[3];
728 	u8 sync_rst_state;
729 };
730 
731 struct mlx5_eqe_vhca_state {
732 	__be16 ec_function;
733 	__be16 function_id;
734 } __packed;
735 
736 union ev_data {
737 	__be32				raw[7];
738 	struct mlx5_eqe_cmd		cmd;
739 	struct mlx5_eqe_comp		comp;
740 	struct mlx5_eqe_qp_srq		qp_srq;
741 	struct mlx5_eqe_cq_err		cq_err;
742 	struct mlx5_eqe_port_state	port;
743 	struct mlx5_eqe_gpio		gpio;
744 	struct mlx5_eqe_congestion	cong;
745 	struct mlx5_eqe_stall_vl	stall_vl;
746 	struct mlx5_eqe_page_req	req_pages;
747 	struct mlx5_eqe_page_fault	page_fault;
748 	struct mlx5_eqe_vport_change	vport_change;
749 	struct mlx5_eqe_port_module	port_module;
750 	struct mlx5_eqe_pps		pps;
751 	struct mlx5_eqe_dct             dct;
752 	struct mlx5_eqe_temp_warning	temp_warning;
753 	struct mlx5_eqe_xrq_err		xrq_err;
754 	struct mlx5_eqe_sync_fw_update	sync_fw_update;
755 	struct mlx5_eqe_vhca_state	vhca_state;
756 	struct mlx5_eqe_obj_change	obj_change;
757 } __packed;
758 
759 struct mlx5_eqe {
760 	u8		rsvd0;
761 	u8		type;
762 	u8		rsvd1;
763 	u8		sub_type;
764 	__be32		rsvd2[7];
765 	union ev_data	data;
766 	__be16		rsvd3;
767 	u8		signature;
768 	u8		owner;
769 } __packed;
770 
771 struct mlx5_cmd_prot_block {
772 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
773 	u8		rsvd0[48];
774 	__be64		next;
775 	__be32		block_num;
776 	u8		rsvd1;
777 	u8		token;
778 	u8		ctrl_sig;
779 	u8		sig;
780 };
781 
782 enum {
783 	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
784 };
785 
786 struct mlx5_err_cqe {
787 	u8	rsvd0[32];
788 	__be32	srqn;
789 	u8	rsvd1[18];
790 	u8	vendor_err_synd;
791 	u8	syndrome;
792 	__be32	s_wqe_opcode_qpn;
793 	__be16	wqe_counter;
794 	u8	signature;
795 	u8	op_own;
796 };
797 
798 struct mlx5_cqe64 {
799 	u8		tls_outer_l3_tunneled;
800 	u8		rsvd0;
801 	__be16		wqe_id;
802 	union {
803 		struct {
804 			u8	tcppsh_abort_dupack;
805 			u8	min_ttl;
806 			__be16	tcp_win;
807 			__be32	ack_seq_num;
808 		} lro;
809 		struct {
810 			u8	reserved0:1;
811 			u8	match:1;
812 			u8	flush:1;
813 			u8	reserved3:5;
814 			u8	header_size;
815 			__be16	header_entry_index;
816 			__be32	data_offset;
817 		} shampo;
818 	};
819 	__be32		rss_hash_result;
820 	u8		rss_hash_type;
821 	u8		ml_path;
822 	u8		rsvd20[2];
823 	__be16		check_sum;
824 	__be16		slid;
825 	__be32		flags_rqpn;
826 	u8		hds_ip_ext;
827 	u8		l4_l3_hdr_type;
828 	__be16		vlan_info;
829 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
830 	union {
831 		__be32 immediate;
832 		__be32 inval_rkey;
833 		__be32 pkey;
834 		__be32 ft_metadata;
835 	};
836 	u8		rsvd40[4];
837 	__be32		byte_cnt;
838 	__be32		timestamp_h;
839 	__be32		timestamp_l;
840 	__be32		sop_drop_qpn;
841 	__be16		wqe_counter;
842 	union {
843 		u8	signature;
844 		u8	validity_iteration_count;
845 	};
846 	u8		op_own;
847 };
848 
849 struct mlx5_mini_cqe8 {
850 	union {
851 		__be32 rx_hash_result;
852 		struct {
853 			__be16 checksum;
854 			__be16 stridx;
855 		};
856 		struct {
857 			__be16 wqe_counter;
858 			u8  s_wqe_opcode;
859 			u8  reserved;
860 		} s_wqe_info;
861 	};
862 	__be32 byte_cnt;
863 };
864 
865 enum {
866 	MLX5_NO_INLINE_DATA,
867 	MLX5_INLINE_DATA32_SEG,
868 	MLX5_INLINE_DATA64_SEG,
869 	MLX5_COMPRESSED,
870 };
871 
872 enum {
873 	MLX5_CQE_FORMAT_CSUM = 0x1,
874 	MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
875 };
876 
877 enum {
878 	MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
879 	MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
880 };
881 
882 #define MLX5_MINI_CQE_ARRAY_SIZE 8
883 
mlx5_get_cqe_format(struct mlx5_cqe64 * cqe)884 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
885 {
886 	return (cqe->op_own >> 2) & 0x3;
887 }
888 
get_cqe_opcode(struct mlx5_cqe64 * cqe)889 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
890 {
891 	return cqe->op_own >> 4;
892 }
893 
get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 * cqe)894 static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
895 {
896 	/* num_of_mini_cqes is zero based */
897 	return get_cqe_opcode(cqe) + 1;
898 }
899 
get_cqe_lro_tcppsh(struct mlx5_cqe64 * cqe)900 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
901 {
902 	return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
903 }
904 
get_cqe_l4_hdr_type(struct mlx5_cqe64 * cqe)905 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
906 {
907 	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
908 }
909 
cqe_is_tunneled(struct mlx5_cqe64 * cqe)910 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
911 {
912 	return cqe->tls_outer_l3_tunneled & 0x1;
913 }
914 
get_cqe_tls_offload(struct mlx5_cqe64 * cqe)915 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
916 {
917 	return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
918 }
919 
cqe_has_vlan(struct mlx5_cqe64 * cqe)920 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
921 {
922 	return cqe->l4_l3_hdr_type & 0x1;
923 }
924 
get_cqe_ts(struct mlx5_cqe64 * cqe)925 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
926 {
927 	u32 hi, lo;
928 
929 	hi = be32_to_cpu(cqe->timestamp_h);
930 	lo = be32_to_cpu(cqe->timestamp_l);
931 
932 	return (u64)lo | ((u64)hi << 32);
933 }
934 
get_cqe_flow_tag(struct mlx5_cqe64 * cqe)935 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
936 {
937 	return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
938 }
939 
940 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE	3
941 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE		9
942 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX		16
943 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE		6
944 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX		13
945 
946 struct mpwrq_cqe_bc {
947 	__be16	filler_consumed_strides;
948 	__be16	byte_cnt;
949 };
950 
mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 * cqe)951 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
952 {
953 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
954 
955 	return be16_to_cpu(bc->byte_cnt);
956 }
957 
mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc * bc)958 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
959 {
960 	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
961 }
962 
mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 * cqe)963 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
964 {
965 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
966 
967 	return mpwrq_get_cqe_bc_consumed_strides(bc);
968 }
969 
mpwrq_is_filler_cqe(struct mlx5_cqe64 * cqe)970 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
971 {
972 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
973 
974 	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
975 }
976 
mpwrq_get_cqe_stride_index(struct mlx5_cqe64 * cqe)977 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
978 {
979 	return be16_to_cpu(cqe->wqe_counter);
980 }
981 
982 enum {
983 	CQE_L4_HDR_TYPE_NONE			= 0x0,
984 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
985 	CQE_L4_HDR_TYPE_UDP			= 0x2,
986 	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
987 	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
988 };
989 
990 enum {
991 	CQE_RSS_HTYPE_IP	= GENMASK(3, 2),
992 	/* cqe->rss_hash_type[3:2] - IP destination selected for hash
993 	 * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
994 	 */
995 	CQE_RSS_IP_NONE		= 0x0,
996 	CQE_RSS_IPV4		= 0x1,
997 	CQE_RSS_IPV6		= 0x2,
998 	CQE_RSS_RESERVED	= 0x3,
999 
1000 	CQE_RSS_HTYPE_L4	= GENMASK(7, 6),
1001 	/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
1002 	 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
1003 	 */
1004 	CQE_RSS_L4_NONE		= 0x0,
1005 	CQE_RSS_L4_TCP		= 0x1,
1006 	CQE_RSS_L4_UDP		= 0x2,
1007 	CQE_RSS_L4_IPSEC	= 0x3,
1008 };
1009 
1010 enum {
1011 	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
1012 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
1013 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
1014 };
1015 
1016 enum {
1017 	CQE_L2_OK	= 1 << 0,
1018 	CQE_L3_OK	= 1 << 1,
1019 	CQE_L4_OK	= 1 << 2,
1020 };
1021 
1022 enum {
1023 	CQE_TLS_OFFLOAD_NOT_DECRYPTED		= 0x0,
1024 	CQE_TLS_OFFLOAD_DECRYPTED		= 0x1,
1025 	CQE_TLS_OFFLOAD_RESYNC			= 0x2,
1026 	CQE_TLS_OFFLOAD_ERROR			= 0x3,
1027 };
1028 
1029 struct mlx5_sig_err_cqe {
1030 	u8		rsvd0[16];
1031 	__be32		expected_trans_sig;
1032 	__be32		actual_trans_sig;
1033 	__be32		expected_reftag;
1034 	__be32		actual_reftag;
1035 	__be16		syndrome;
1036 	u8		rsvd22[2];
1037 	__be32		mkey;
1038 	__be64		err_offset;
1039 	u8		rsvd30[8];
1040 	__be32		qpn;
1041 	u8		rsvd38[2];
1042 	u8		signature;
1043 	u8		op_own;
1044 };
1045 
1046 struct mlx5_wqe_srq_next_seg {
1047 	u8			rsvd0[2];
1048 	__be16			next_wqe_index;
1049 	u8			signature;
1050 	u8			rsvd1[11];
1051 };
1052 
1053 union mlx5_ext_cqe {
1054 	struct ib_grh	grh;
1055 	u8		inl[64];
1056 };
1057 
1058 struct mlx5_cqe128 {
1059 	union mlx5_ext_cqe	inl_grh;
1060 	struct mlx5_cqe64	cqe64;
1061 };
1062 
1063 enum {
1064 	MLX5_MKEY_STATUS_FREE = 1 << 6,
1065 };
1066 
1067 enum {
1068 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
1069 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1070 	MLX5_MKEY_BSF_EN	= 1 << 30,
1071 };
1072 
1073 struct mlx5_mkey_seg {
1074 	/* This is a two bit field occupying bits 31-30.
1075 	 * bit 31 is always 0,
1076 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
1077 	 */
1078 	u8		status;
1079 	u8		pcie_control;
1080 	u8		flags;
1081 	u8		version;
1082 	__be32		qpn_mkey7_0;
1083 	u8		rsvd1[4];
1084 	__be32		flags_pd;
1085 	__be64		start_addr;
1086 	__be64		len;
1087 	__be32		bsfs_octo_size;
1088 	u8		rsvd2[16];
1089 	__be32		xlt_oct_size;
1090 	u8		rsvd3[3];
1091 	u8		log2_page_size;
1092 	u8		rsvd4[4];
1093 };
1094 
1095 #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
1096 
1097 enum {
1098 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
1099 };
1100 
1101 enum {
1102 	VPORT_STATE_DOWN		= 0x0,
1103 	VPORT_STATE_UP			= 0x1,
1104 };
1105 
1106 enum {
1107 	MLX5_VPORT_ADMIN_STATE_DOWN  = 0x0,
1108 	MLX5_VPORT_ADMIN_STATE_UP    = 0x1,
1109 	MLX5_VPORT_ADMIN_STATE_AUTO  = 0x2,
1110 };
1111 
1112 enum {
1113 	MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN  = 0x1,
1114 	MLX5_VPORT_CVLAN_INSERT_ALWAYS         = 0x3,
1115 };
1116 
1117 enum {
1118 	MLX5_L3_PROT_TYPE_IPV4		= 0,
1119 	MLX5_L3_PROT_TYPE_IPV6		= 1,
1120 };
1121 
1122 enum {
1123 	MLX5_L4_PROT_TYPE_TCP		= 0,
1124 	MLX5_L4_PROT_TYPE_UDP		= 1,
1125 };
1126 
1127 enum {
1128 	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
1129 	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
1130 	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
1131 	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
1132 	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
1133 };
1134 
1135 enum {
1136 	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
1137 	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
1138 	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
1139 	MLX5_MATCH_MISC_PARAMETERS_2	= 1 << 3,
1140 	MLX5_MATCH_MISC_PARAMETERS_3	= 1 << 4,
1141 	MLX5_MATCH_MISC_PARAMETERS_4	= 1 << 5,
1142 	MLX5_MATCH_MISC_PARAMETERS_5	= 1 << 6,
1143 };
1144 
1145 enum {
1146 	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
1147 	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
1148 };
1149 
1150 enum {
1151 	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
1152 	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
1153 	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
1154 };
1155 
1156 enum mlx5_list_type {
1157 	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
1158 	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
1159 	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1160 };
1161 
1162 enum {
1163 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1164 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
1165 };
1166 
1167 enum mlx5_wol_mode {
1168 	MLX5_WOL_DISABLE        = 0,
1169 	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
1170 	MLX5_WOL_MAGIC          = 1 << 2,
1171 	MLX5_WOL_ARP            = 1 << 3,
1172 	MLX5_WOL_BROADCAST      = 1 << 4,
1173 	MLX5_WOL_MULTICAST      = 1 << 5,
1174 	MLX5_WOL_UNICAST        = 1 << 6,
1175 	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
1176 };
1177 
1178 enum mlx5_mpls_supported_fields {
1179 	MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1180 	MLX5_FIELD_SUPPORT_MPLS_EXP   = 1 << 1,
1181 	MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1182 	MLX5_FIELD_SUPPORT_MPLS_TTL   = 1 << 3
1183 };
1184 
1185 enum mlx5_flex_parser_protos {
1186 	MLX5_FLEX_PROTO_GENEVE	      = 1 << 3,
1187 	MLX5_FLEX_PROTO_CW_MPLS_GRE   = 1 << 4,
1188 	MLX5_FLEX_PROTO_CW_MPLS_UDP   = 1 << 5,
1189 	MLX5_FLEX_PROTO_ICMP	      = 1 << 8,
1190 	MLX5_FLEX_PROTO_ICMPV6	      = 1 << 9,
1191 };
1192 
1193 /* MLX5 DEV CAPs */
1194 
1195 /* TODO: EAT.ME */
1196 enum mlx5_cap_mode {
1197 	HCA_CAP_OPMOD_GET_MAX	= 0,
1198 	HCA_CAP_OPMOD_GET_CUR	= 1,
1199 };
1200 
1201 /* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
1202  * capability memory.
1203  */
1204 enum mlx5_cap_type {
1205 	MLX5_CAP_GENERAL = 0,
1206 	MLX5_CAP_ETHERNET_OFFLOADS,
1207 	MLX5_CAP_ODP,
1208 	MLX5_CAP_ATOMIC,
1209 	MLX5_CAP_ROCE,
1210 	MLX5_CAP_IPOIB_OFFLOADS,
1211 	MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1212 	MLX5_CAP_FLOW_TABLE,
1213 	MLX5_CAP_ESWITCH_FLOW_TABLE,
1214 	MLX5_CAP_ESWITCH,
1215 	MLX5_CAP_QOS = 0xc,
1216 	MLX5_CAP_DEBUG,
1217 	MLX5_CAP_RESERVED_14,
1218 	MLX5_CAP_DEV_MEM,
1219 	MLX5_CAP_RESERVED_16,
1220 	MLX5_CAP_TLS,
1221 	MLX5_CAP_VDPA_EMULATION = 0x13,
1222 	MLX5_CAP_DEV_EVENT = 0x14,
1223 	MLX5_CAP_IPSEC,
1224 	MLX5_CAP_CRYPTO = 0x1a,
1225 	MLX5_CAP_MACSEC = 0x1f,
1226 	MLX5_CAP_GENERAL_2 = 0x20,
1227 	MLX5_CAP_PORT_SELECTION = 0x25,
1228 	MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
1229 	/* NUM OF CAP Types */
1230 	MLX5_CAP_NUM
1231 };
1232 
1233 enum mlx5_pcam_reg_groups {
1234 	MLX5_PCAM_REGS_5000_TO_507F                 = 0x0,
1235 };
1236 
1237 enum mlx5_pcam_feature_groups {
1238 	MLX5_PCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1239 };
1240 
1241 enum mlx5_mcam_reg_groups {
1242 	MLX5_MCAM_REGS_FIRST_128                    = 0x0,
1243 	MLX5_MCAM_REGS_0x9100_0x917F                = 0x2,
1244 	MLX5_MCAM_REGS_NUM                          = 0x3,
1245 };
1246 
1247 enum mlx5_mcam_feature_groups {
1248 	MLX5_MCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1249 };
1250 
1251 enum mlx5_qcam_reg_groups {
1252 	MLX5_QCAM_REGS_FIRST_128                    = 0x0,
1253 };
1254 
1255 enum mlx5_qcam_feature_groups {
1256 	MLX5_QCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1257 };
1258 
1259 /* GET Dev Caps macros */
1260 #define MLX5_CAP_GEN(mdev, cap) \
1261 	MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1262 
1263 #define MLX5_CAP_GEN_64(mdev, cap) \
1264 	MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1265 
1266 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1267 	MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
1268 
1269 #define MLX5_CAP_GEN_2(mdev, cap) \
1270 	MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1271 
1272 #define MLX5_CAP_GEN_2_64(mdev, cap) \
1273 	MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1274 
1275 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \
1276 	MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
1277 
1278 #define MLX5_CAP_ETH(mdev, cap) \
1279 	MLX5_GET(per_protocol_networking_offload_caps,\
1280 		 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
1281 
1282 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1283 	MLX5_GET(per_protocol_networking_offload_caps,\
1284 		 mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
1285 
1286 #define MLX5_CAP_ROCE(mdev, cap) \
1287 	MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
1288 
1289 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1290 	MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
1291 
1292 #define MLX5_CAP_ATOMIC(mdev, cap) \
1293 	MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
1294 
1295 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1296 	MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
1297 
1298 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1299 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1300 
1301 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1302 	MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1303 
1304 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1305 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1306 
1307 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1308 		MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1309 
1310 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1311 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1312 
1313 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1314 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1315 
1316 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1317 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1318 
1319 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1320 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1321 
1322 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1323 	MLX5_GET(flow_table_eswitch_cap, \
1324 		 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1325 
1326 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1327 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1328 
1329 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1330 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1331 
1332 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1333 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1334 
1335 #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
1336 	MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
1337 
1338 #define MLX5_CAP_ESW(mdev, cap) \
1339 	MLX5_GET(e_switch_cap, \
1340 		 mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
1341 
1342 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1343 	MLX5_GET64(flow_table_eswitch_cap, \
1344 		(mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1345 
1346 #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
1347 	MLX5_GET(port_selection_cap, \
1348 		 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
1349 
1350 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
1351 	MLX5_GET(port_selection_cap, \
1352 		 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
1353 
1354 #define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
1355 	MLX5_GET(adv_virtualization_cap, \
1356 		 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
1357 
1358 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
1359 	MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
1360 
1361 #define MLX5_CAP_ODP(mdev, cap)\
1362 	MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
1363 
1364 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1365 	MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
1366 
1367 #define MLX5_CAP_QOS(mdev, cap)\
1368 	MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
1369 
1370 #define MLX5_CAP_DEBUG(mdev, cap)\
1371 	MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
1372 
1373 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1374 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1375 
1376 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1377 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1378 
1379 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1380 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1381 		 mng_access_reg_cap_mask.access_regs.reg)
1382 
1383 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1384 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1385 		 mng_access_reg_cap_mask.access_regs2.reg)
1386 
1387 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1388 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1389 
1390 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1391 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1392 
1393 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1394 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1395 
1396 #define MLX5_CAP_FPGA(mdev, cap) \
1397 	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1398 
1399 #define MLX5_CAP64_FPGA(mdev, cap) \
1400 	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1401 
1402 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1403 	MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1404 
1405 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1406 	MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1407 
1408 #define MLX5_CAP_TLS(mdev, cap) \
1409 	MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
1410 
1411 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1412 	MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
1413 
1414 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1415 	MLX5_GET(virtio_emulation_cap, \
1416 		(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1417 
1418 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1419 	MLX5_GET64(virtio_emulation_cap, \
1420 		(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1421 
1422 #define MLX5_CAP_IPSEC(mdev, cap)\
1423 	MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
1424 
1425 #define MLX5_CAP_CRYPTO(mdev, cap)\
1426 	MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
1427 
1428 #define MLX5_CAP_MACSEC(mdev, cap)\
1429 	MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
1430 
1431 enum {
1432 	MLX5_CMD_STAT_OK			= 0x0,
1433 	MLX5_CMD_STAT_INT_ERR			= 0x1,
1434 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1435 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1436 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1437 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1438 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1439 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1440 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1441 	MLX5_CMD_STAT_IX_ERR			= 0xa,
1442 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1443 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1444 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1445 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1446 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1447 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1448 };
1449 
1450 enum {
1451 	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1452 	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1453 	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1454 	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1455 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1456 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1457 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1458 	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1459 	MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1460 	MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1461 	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
1462 };
1463 
1464 enum {
1465 	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1466 };
1467 
mlx5_to_sw_pkey_sz(int pkey_sz)1468 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1469 {
1470 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1471 		return 0;
1472 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1473 }
1474 
1475 #define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
1476 #define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
1477 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1478 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1479 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1480 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1481 				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1482 				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1483 
1484 #endif /* MLX5_DEVICE_H */
1485