xref: /openbmc/linux/include/uapi/rdma/mlx5-abi.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1d50e14abSJason Gunthorpe /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
23085e29eSLeon Romanovsky /*
33085e29eSLeon Romanovsky  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
43085e29eSLeon Romanovsky  *
53085e29eSLeon Romanovsky  * This software is available to you under a choice of one of two
63085e29eSLeon Romanovsky  * licenses.  You may choose to be licensed under the terms of the GNU
73085e29eSLeon Romanovsky  * General Public License (GPL) Version 2, available from the file
83085e29eSLeon Romanovsky  * COPYING in the main directory of this source tree, or the
93085e29eSLeon Romanovsky  * OpenIB.org BSD license below:
103085e29eSLeon Romanovsky  *
113085e29eSLeon Romanovsky  *     Redistribution and use in source and binary forms, with or
123085e29eSLeon Romanovsky  *     without modification, are permitted provided that the following
133085e29eSLeon Romanovsky  *     conditions are met:
143085e29eSLeon Romanovsky  *
153085e29eSLeon Romanovsky  *      - Redistributions of source code must retain the above
163085e29eSLeon Romanovsky  *        copyright notice, this list of conditions and the following
173085e29eSLeon Romanovsky  *        disclaimer.
183085e29eSLeon Romanovsky  *
193085e29eSLeon Romanovsky  *      - Redistributions in binary form must reproduce the above
203085e29eSLeon Romanovsky  *        copyright notice, this list of conditions and the following
213085e29eSLeon Romanovsky  *        disclaimer in the documentation and/or other materials
223085e29eSLeon Romanovsky  *        provided with the distribution.
233085e29eSLeon Romanovsky  *
243085e29eSLeon Romanovsky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
253085e29eSLeon Romanovsky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
263085e29eSLeon Romanovsky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
273085e29eSLeon Romanovsky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
283085e29eSLeon Romanovsky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
293085e29eSLeon Romanovsky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
303085e29eSLeon Romanovsky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
313085e29eSLeon Romanovsky  * SOFTWARE.
323085e29eSLeon Romanovsky  */
333085e29eSLeon Romanovsky 
343085e29eSLeon Romanovsky #ifndef MLX5_ABI_USER_H
353085e29eSLeon Romanovsky #define MLX5_ABI_USER_H
363085e29eSLeon Romanovsky 
373085e29eSLeon Romanovsky #include <linux/types.h>
38812755d6SDmitry V. Levin #include <linux/if_ether.h>	/* For ETH_ALEN. */
393b3233fbSRaed Salem #include <rdma/ib_user_ioctl_verbs.h>
403085e29eSLeon Romanovsky 
413085e29eSLeon Romanovsky enum {
423085e29eSLeon Romanovsky 	MLX5_QP_FLAG_SIGNATURE		= 1 << 0,
433085e29eSLeon Romanovsky 	MLX5_QP_FLAG_SCATTER_CQE	= 1 << 1,
44f95ef6cbSMaor Gottlieb 	MLX5_QP_FLAG_TUNNEL_OFFLOADS	= 1 << 2,
451ee47ab3SYishai Hadas 	MLX5_QP_FLAG_BFREG_INDEX	= 1 << 3,
46b4aaa1f0SMoni Shoua 	MLX5_QP_FLAG_TYPE_DCT		= 1 << 4,
47b4aaa1f0SMoni Shoua 	MLX5_QP_FLAG_TYPE_DCI		= 1 << 5,
48175edba8SMark Bloch 	MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
49175edba8SMark Bloch 	MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
506f4bc0eaSYonatan Cohen 	MLX5_QP_FLAG_ALLOW_SCATTER_CQE	= 1 << 8,
51569c6651SDanit Goldberg 	MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE	= 1 << 9,
52ac42a5eeSYishai Hadas 	MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
5311656f59SLior Nahmanson 	MLX5_QP_FLAG_DCI_STREAM	= 1 << 11,
543085e29eSLeon Romanovsky };
553085e29eSLeon Romanovsky 
563085e29eSLeon Romanovsky enum {
573085e29eSLeon Romanovsky 	MLX5_SRQ_FLAG_SIGNATURE		= 1 << 0,
583085e29eSLeon Romanovsky };
593085e29eSLeon Romanovsky 
603085e29eSLeon Romanovsky enum {
613085e29eSLeon Romanovsky 	MLX5_WQ_FLAG_SIGNATURE		= 1 << 0,
623085e29eSLeon Romanovsky };
633085e29eSLeon Romanovsky 
643085e29eSLeon Romanovsky /* Increment this value if any changes that break userspace ABI
653085e29eSLeon Romanovsky  * compatibility are made.
663085e29eSLeon Romanovsky  */
673085e29eSLeon Romanovsky #define MLX5_IB_UVERBS_ABI_VERSION	1
683085e29eSLeon Romanovsky 
693085e29eSLeon Romanovsky /* Make sure that all structs defined in this file remain laid out so
703085e29eSLeon Romanovsky  * that they pack the same way on 32-bit and 64-bit architectures (to
713085e29eSLeon Romanovsky  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
723085e29eSLeon Romanovsky  * In particular do not use pointer types -- pass pointers in __u64
733085e29eSLeon Romanovsky  * instead.
743085e29eSLeon Romanovsky  */
753085e29eSLeon Romanovsky 
763085e29eSLeon Romanovsky struct mlx5_ib_alloc_ucontext_req {
772f5ff264SEli Cohen 	__u32	total_num_bfregs;
782f5ff264SEli Cohen 	__u32	num_low_latency_bfregs;
793085e29eSLeon Romanovsky };
803085e29eSLeon Romanovsky 
8130aa60b3SEli Cohen enum mlx5_lib_caps {
82812755d6SDmitry V. Levin 	MLX5_LIB_CAP_4K_UAR	= (__u64)1 << 0,
830a2fd01cSYishai Hadas 	MLX5_LIB_CAP_DYN_UAR	= (__u64)1 << 1,
8430aa60b3SEli Cohen };
8530aa60b3SEli Cohen 
86a8b92ca1SYishai Hadas enum mlx5_ib_alloc_uctx_v2_flags {
87a8b92ca1SYishai Hadas 	MLX5_IB_ALLOC_UCTX_DEVX	= 1 << 0,
88a8b92ca1SYishai Hadas };
893085e29eSLeon Romanovsky struct mlx5_ib_alloc_ucontext_req_v2 {
902f5ff264SEli Cohen 	__u32	total_num_bfregs;
912f5ff264SEli Cohen 	__u32	num_low_latency_bfregs;
923085e29eSLeon Romanovsky 	__u32	flags;
933085e29eSLeon Romanovsky 	__u32	comp_mask;
943085e29eSLeon Romanovsky 	__u8	max_cqe_version;
953085e29eSLeon Romanovsky 	__u8	reserved0;
963085e29eSLeon Romanovsky 	__u16	reserved1;
973085e29eSLeon Romanovsky 	__u32	reserved2;
9826b99066SJason Gunthorpe 	__aligned_u64 lib_caps;
993085e29eSLeon Romanovsky };
1003085e29eSLeon Romanovsky 
1013085e29eSLeon Romanovsky enum mlx5_ib_alloc_ucontext_resp_mask {
1023085e29eSLeon Romanovsky 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
10325bb36e7SYonatan Cohen 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY    = 1UL << 1,
1045f62a521SLeon Romanovsky 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE               = 1UL << 2,
105c906b86eSSergey Gorenko 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS           = 1UL << 3,
10633652951SAharon Landau 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS	   = 1UL << 4,
107*13ad1125SAharon Landau 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG   = 1UL << 5,
1083085e29eSLeon Romanovsky };
1093085e29eSLeon Romanovsky 
1103085e29eSLeon Romanovsky enum mlx5_user_cmds_supp_uhw {
1113085e29eSLeon Romanovsky 	MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
1126ad279c5SMoni Shoua 	MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
1133085e29eSLeon Romanovsky };
1143085e29eSLeon Romanovsky 
11578984898SOr Gerlitz /* The eth_min_inline response value is set to off-by-one vs the FW
11678984898SOr Gerlitz  * returned value to allow user-space to deal with older kernels.
11778984898SOr Gerlitz  */
11878984898SOr Gerlitz enum mlx5_user_inline_mode {
11978984898SOr Gerlitz 	MLX5_USER_INLINE_MODE_NA,
12078984898SOr Gerlitz 	MLX5_USER_INLINE_MODE_NONE,
12178984898SOr Gerlitz 	MLX5_USER_INLINE_MODE_L2,
12278984898SOr Gerlitz 	MLX5_USER_INLINE_MODE_IP,
12378984898SOr Gerlitz 	MLX5_USER_INLINE_MODE_TCP_UDP,
12478984898SOr Gerlitz };
12578984898SOr Gerlitz 
126c03faa56SMatan Barak enum {
127c03faa56SMatan Barak 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
128c03faa56SMatan Barak 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
129c03faa56SMatan Barak 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
130c03faa56SMatan Barak 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
131c03faa56SMatan Barak 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
132c03faa56SMatan Barak };
133c03faa56SMatan Barak 
1343085e29eSLeon Romanovsky struct mlx5_ib_alloc_ucontext_resp {
1353085e29eSLeon Romanovsky 	__u32	qp_tab_size;
1363085e29eSLeon Romanovsky 	__u32	bf_reg_size;
1372f5ff264SEli Cohen 	__u32	tot_bfregs;
1383085e29eSLeon Romanovsky 	__u32	cache_line_size;
1393085e29eSLeon Romanovsky 	__u16	max_sq_desc_sz;
1403085e29eSLeon Romanovsky 	__u16	max_rq_desc_sz;
1413085e29eSLeon Romanovsky 	__u32	max_send_wqebb;
1423085e29eSLeon Romanovsky 	__u32	max_recv_wr;
1433085e29eSLeon Romanovsky 	__u32	max_srq_recv_wr;
1443085e29eSLeon Romanovsky 	__u16	num_ports;
145c03faa56SMatan Barak 	__u16	flow_action_flags;
1463085e29eSLeon Romanovsky 	__u32	comp_mask;
1473085e29eSLeon Romanovsky 	__u32	response_length;
1483085e29eSLeon Romanovsky 	__u8	cqe_version;
1493085e29eSLeon Romanovsky 	__u8	cmds_supp_uhw;
15078984898SOr Gerlitz 	__u8	eth_min_inline;
1515c99eaecSFeras Daoud 	__u8	clock_info_versions;
15226b99066SJason Gunthorpe 	__aligned_u64 hca_core_clock_offset;
15330aa60b3SEli Cohen 	__u32	log_uar_size;
15430aa60b3SEli Cohen 	__u32	num_uars_per_page;
15531a78a5aSYishai Hadas 	__u32	num_dyn_bfregs;
15625bb36e7SYonatan Cohen 	__u32	dump_fill_mkey;
1573085e29eSLeon Romanovsky };
1583085e29eSLeon Romanovsky 
1593085e29eSLeon Romanovsky struct mlx5_ib_alloc_pd_resp {
1603085e29eSLeon Romanovsky 	__u32	pdn;
1613085e29eSLeon Romanovsky };
1623085e29eSLeon Romanovsky 
1633085e29eSLeon Romanovsky struct mlx5_ib_tso_caps {
1643085e29eSLeon Romanovsky 	__u32 max_tso; /* Maximum tso payload size in bytes */
1653085e29eSLeon Romanovsky 
1663085e29eSLeon Romanovsky 	/* Corresponding bit will be set if qp type from
1673085e29eSLeon Romanovsky 	 * 'enum ib_qp_type' is supported, e.g.
1683085e29eSLeon Romanovsky 	 * supported_qpts |= 1 << IB_QPT_UD
1693085e29eSLeon Romanovsky 	 */
1703085e29eSLeon Romanovsky 	__u32 supported_qpts;
1713085e29eSLeon Romanovsky };
1723085e29eSLeon Romanovsky 
1733085e29eSLeon Romanovsky struct mlx5_ib_rss_caps {
17426b99066SJason Gunthorpe 	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
1753085e29eSLeon Romanovsky 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
1763085e29eSLeon Romanovsky 	__u8 reserved[7];
1773085e29eSLeon Romanovsky };
1783085e29eSLeon Romanovsky 
1797e43a2a5SBodong Wang enum mlx5_ib_cqe_comp_res_format {
1807e43a2a5SBodong Wang 	MLX5_IB_CQE_RES_FORMAT_HASH	= 1 << 0,
1817e43a2a5SBodong Wang 	MLX5_IB_CQE_RES_FORMAT_CSUM	= 1 << 1,
1826f1006a4SYonatan Cohen 	MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
1837e43a2a5SBodong Wang };
1847e43a2a5SBodong Wang 
1857e43a2a5SBodong Wang struct mlx5_ib_cqe_comp_caps {
1867e43a2a5SBodong Wang 	__u32 max_num;
1877e43a2a5SBodong Wang 	__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
1887e43a2a5SBodong Wang };
1897e43a2a5SBodong Wang 
19061147f39SBodong Wang enum mlx5_ib_packet_pacing_cap_flags {
19161147f39SBodong Wang 	MLX5_IB_PP_SUPPORT_BURST	= 1 << 0,
19261147f39SBodong Wang };
19361147f39SBodong Wang 
194d949167dSBodong Wang struct mlx5_packet_pacing_caps {
195d949167dSBodong Wang 	__u32 qp_rate_limit_min;
196d949167dSBodong Wang 	__u32 qp_rate_limit_max; /* In kpbs */
197d949167dSBodong Wang 
198d949167dSBodong Wang 	/* Corresponding bit will be set if qp type from
199d949167dSBodong Wang 	 * 'enum ib_qp_type' is supported, e.g.
200d949167dSBodong Wang 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
201d949167dSBodong Wang 	 */
202d949167dSBodong Wang 	__u32 supported_qpts;
20361147f39SBodong Wang 	__u8  cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
20461147f39SBodong Wang 	__u8  reserved[3];
205d949167dSBodong Wang };
206d949167dSBodong Wang 
207795b609cSBodong Wang enum mlx5_ib_mpw_caps {
208795b609cSBodong Wang 	MPW_RESERVED		= 1 << 0,
209795b609cSBodong Wang 	MLX5_IB_ALLOW_MPW	= 1 << 1,
210050da902SBodong Wang 	MLX5_IB_SUPPORT_EMPW	= 1 << 2,
211795b609cSBodong Wang };
212795b609cSBodong Wang 
21396dc3fc5SNoa Osherovich enum mlx5_ib_sw_parsing_offloads {
21496dc3fc5SNoa Osherovich 	MLX5_IB_SW_PARSING = 1 << 0,
21596dc3fc5SNoa Osherovich 	MLX5_IB_SW_PARSING_CSUM = 1 << 1,
21696dc3fc5SNoa Osherovich 	MLX5_IB_SW_PARSING_LSO = 1 << 2,
21796dc3fc5SNoa Osherovich };
21896dc3fc5SNoa Osherovich 
21996dc3fc5SNoa Osherovich struct mlx5_ib_sw_parsing_caps {
22096dc3fc5SNoa Osherovich 	__u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
22196dc3fc5SNoa Osherovich 
22296dc3fc5SNoa Osherovich 	/* Corresponding bit will be set if qp type from
22396dc3fc5SNoa Osherovich 	 * 'enum ib_qp_type' is supported, e.g.
22496dc3fc5SNoa Osherovich 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
22596dc3fc5SNoa Osherovich 	 */
22696dc3fc5SNoa Osherovich 	__u32 supported_qpts;
22796dc3fc5SNoa Osherovich };
22896dc3fc5SNoa Osherovich 
229b4f34597SNoa Osherovich struct mlx5_ib_striding_rq_caps {
230b4f34597SNoa Osherovich 	__u32 min_single_stride_log_num_of_bytes;
231b4f34597SNoa Osherovich 	__u32 max_single_stride_log_num_of_bytes;
232b4f34597SNoa Osherovich 	__u32 min_single_wqe_log_num_of_strides;
233b4f34597SNoa Osherovich 	__u32 max_single_wqe_log_num_of_strides;
234b4f34597SNoa Osherovich 
235b4f34597SNoa Osherovich 	/* Corresponding bit will be set if qp type from
236b4f34597SNoa Osherovich 	 * 'enum ib_qp_type' is supported, e.g.
237b4f34597SNoa Osherovich 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
238b4f34597SNoa Osherovich 	 */
239b4f34597SNoa Osherovich 	__u32 supported_qpts;
240f17966f1SNoa Osherovich 	__u32 reserved;
241b4f34597SNoa Osherovich };
242b4f34597SNoa Osherovich 
24311656f59SLior Nahmanson struct mlx5_ib_dci_streams_caps {
24411656f59SLior Nahmanson 	__u8 max_log_num_concurent;
24511656f59SLior Nahmanson 	__u8 max_log_num_errored;
24611656f59SLior Nahmanson };
24711656f59SLior Nahmanson 
248de57f2adSGuy Levi enum mlx5_ib_query_dev_resp_flags {
249de57f2adSGuy Levi 	/* Support 128B CQE compression */
250de57f2adSGuy Levi 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
2517a0c8f42SGuy Levi 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
2527e11b911SDanit Goldberg 	MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
2537249c8eaSGuy Levi 	MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
254de57f2adSGuy Levi };
255de57f2adSGuy Levi 
256f95ef6cbSMaor Gottlieb enum mlx5_ib_tunnel_offloads {
257f95ef6cbSMaor Gottlieb 	MLX5_IB_TUNNELED_OFFLOADS_VXLAN  = 1 << 0,
258f95ef6cbSMaor Gottlieb 	MLX5_IB_TUNNELED_OFFLOADS_GRE    = 1 << 1,
259e818e255SAriel Levkovich 	MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
260e818e255SAriel Levkovich 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
261e818e255SAriel Levkovich 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
262f95ef6cbSMaor Gottlieb };
263f95ef6cbSMaor Gottlieb 
2643085e29eSLeon Romanovsky struct mlx5_ib_query_device_resp {
2653085e29eSLeon Romanovsky 	__u32	comp_mask;
2663085e29eSLeon Romanovsky 	__u32	response_length;
2673085e29eSLeon Romanovsky 	struct	mlx5_ib_tso_caps tso_caps;
2683085e29eSLeon Romanovsky 	struct	mlx5_ib_rss_caps rss_caps;
2697e43a2a5SBodong Wang 	struct	mlx5_ib_cqe_comp_caps cqe_comp_caps;
270d949167dSBodong Wang 	struct	mlx5_packet_pacing_caps packet_pacing_caps;
271191ded4aSBodong Wang 	__u32	mlx5_ib_support_multi_pkt_send_wqes;
272de57f2adSGuy Levi 	__u32	flags; /* Use enum mlx5_ib_query_dev_resp_flags */
27396dc3fc5SNoa Osherovich 	struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
274b4f34597SNoa Osherovich 	struct mlx5_ib_striding_rq_caps striding_rq_caps;
275f95ef6cbSMaor Gottlieb 	__u32	tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
27611656f59SLior Nahmanson 	struct  mlx5_ib_dci_streams_caps dci_streams_caps;
27711656f59SLior Nahmanson 	__u16 reserved;
2783085e29eSLeon Romanovsky };
2793085e29eSLeon Romanovsky 
2807a0c8f42SGuy Levi enum mlx5_ib_create_cq_flags {
2817a0c8f42SGuy Levi 	MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD	= 1 << 0,
28264d99f6aSYishai Hadas 	MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX  = 1 << 1,
28333652951SAharon Landau 	MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS	= 1 << 2,
2843085e29eSLeon Romanovsky };
2853085e29eSLeon Romanovsky 
2863085e29eSLeon Romanovsky struct mlx5_ib_create_cq {
28726b99066SJason Gunthorpe 	__aligned_u64 buf_addr;
28826b99066SJason Gunthorpe 	__aligned_u64 db_addr;
2893085e29eSLeon Romanovsky 	__u32	cqe_size;
2901cbe6fc8SBodong Wang 	__u8    cqe_comp_en;
2911cbe6fc8SBodong Wang 	__u8    cqe_comp_res_format;
2927a0c8f42SGuy Levi 	__u16	flags;
29364d99f6aSYishai Hadas 	__u16	uar_page_index;
29464d99f6aSYishai Hadas 	__u16	reserved0;
29564d99f6aSYishai Hadas 	__u32	reserved1;
2963085e29eSLeon Romanovsky };
2973085e29eSLeon Romanovsky 
2983085e29eSLeon Romanovsky struct mlx5_ib_create_cq_resp {
2993085e29eSLeon Romanovsky 	__u32	cqn;
3003085e29eSLeon Romanovsky 	__u32	reserved;
3013085e29eSLeon Romanovsky };
3023085e29eSLeon Romanovsky 
3033085e29eSLeon Romanovsky struct mlx5_ib_resize_cq {
30426b99066SJason Gunthorpe 	__aligned_u64 buf_addr;
3053085e29eSLeon Romanovsky 	__u16	cqe_size;
3063085e29eSLeon Romanovsky 	__u16	reserved0;
3073085e29eSLeon Romanovsky 	__u32	reserved1;
3083085e29eSLeon Romanovsky };
3093085e29eSLeon Romanovsky 
3103085e29eSLeon Romanovsky struct mlx5_ib_create_srq {
31126b99066SJason Gunthorpe 	__aligned_u64 buf_addr;
31226b99066SJason Gunthorpe 	__aligned_u64 db_addr;
3133085e29eSLeon Romanovsky 	__u32	flags;
3143085e29eSLeon Romanovsky 	__u32	reserved0; /* explicit padding (optional on i386) */
3153085e29eSLeon Romanovsky 	__u32	uidx;
3163085e29eSLeon Romanovsky 	__u32	reserved1;
3173085e29eSLeon Romanovsky };
3183085e29eSLeon Romanovsky 
3193085e29eSLeon Romanovsky struct mlx5_ib_create_srq_resp {
3203085e29eSLeon Romanovsky 	__u32	srqn;
3213085e29eSLeon Romanovsky 	__u32	reserved;
3223085e29eSLeon Romanovsky };
3233085e29eSLeon Romanovsky 
32411656f59SLior Nahmanson struct mlx5_ib_create_qp_dci_streams {
32511656f59SLior Nahmanson 	__u8 log_num_concurent;
32611656f59SLior Nahmanson 	__u8 log_num_errored;
32711656f59SLior Nahmanson };
32811656f59SLior Nahmanson 
3293085e29eSLeon Romanovsky struct mlx5_ib_create_qp {
33026b99066SJason Gunthorpe 	__aligned_u64 buf_addr;
33126b99066SJason Gunthorpe 	__aligned_u64 db_addr;
3323085e29eSLeon Romanovsky 	__u32	sq_wqe_count;
3333085e29eSLeon Romanovsky 	__u32	rq_wqe_count;
3343085e29eSLeon Romanovsky 	__u32	rq_wqe_shift;
3353085e29eSLeon Romanovsky 	__u32	flags;
3363085e29eSLeon Romanovsky 	__u32	uidx;
3371ee47ab3SYishai Hadas 	__u32	bfreg_index;
338b4aaa1f0SMoni Shoua 	union {
33926b99066SJason Gunthorpe 		__aligned_u64 sq_buf_addr;
34026b99066SJason Gunthorpe 		__aligned_u64 access_key;
341b4aaa1f0SMoni Shoua 	};
342e383085cSLeon Romanovsky 	__u32  ece_options;
34311656f59SLior Nahmanson 	struct  mlx5_ib_create_qp_dci_streams dci_streams;
34411656f59SLior Nahmanson 	__u16 reserved;
3453085e29eSLeon Romanovsky };
3463085e29eSLeon Romanovsky 
3473085e29eSLeon Romanovsky /* RX Hash function flags */
3483085e29eSLeon Romanovsky enum mlx5_rx_hash_function_flags {
3493085e29eSLeon Romanovsky 	MLX5_RX_HASH_FUNC_TOEPLITZ	= 1 << 0,
3503085e29eSLeon Romanovsky };
3513085e29eSLeon Romanovsky 
3523085e29eSLeon Romanovsky /*
3533085e29eSLeon Romanovsky  * RX Hash flags, these flags allows to set which incoming packet's field should
3543085e29eSLeon Romanovsky  * participates in RX Hash. Each flag represent certain packet's field,
3553085e29eSLeon Romanovsky  * when the flag is set the field that is represented by the flag will
3563085e29eSLeon Romanovsky  * participate in RX Hash calculation.
3573085e29eSLeon Romanovsky  * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
3583085e29eSLeon Romanovsky  * and *TCP and *UDP flags can't be enabled together on the same QP.
3593085e29eSLeon Romanovsky */
3603085e29eSLeon Romanovsky enum mlx5_rx_hash_fields {
3613085e29eSLeon Romanovsky 	MLX5_RX_HASH_SRC_IPV4	= 1 << 0,
3623085e29eSLeon Romanovsky 	MLX5_RX_HASH_DST_IPV4	= 1 << 1,
3633085e29eSLeon Romanovsky 	MLX5_RX_HASH_SRC_IPV6	= 1 << 2,
3643085e29eSLeon Romanovsky 	MLX5_RX_HASH_DST_IPV6	= 1 << 3,
3653085e29eSLeon Romanovsky 	MLX5_RX_HASH_SRC_PORT_TCP	= 1 << 4,
3663085e29eSLeon Romanovsky 	MLX5_RX_HASH_DST_PORT_TCP	= 1 << 5,
3673085e29eSLeon Romanovsky 	MLX5_RX_HASH_SRC_PORT_UDP	= 1 << 6,
368309fa347SMaor Gottlieb 	MLX5_RX_HASH_DST_PORT_UDP	= 1 << 7,
3692d93fc85SMatan Barak 	MLX5_RX_HASH_IPSEC_SPI		= 1 << 8,
370309fa347SMaor Gottlieb 	/* Save bits for future fields */
3714e2b53a5SMaor Gottlieb 	MLX5_RX_HASH_INNER		= (1UL << 31),
3723085e29eSLeon Romanovsky };
3733085e29eSLeon Romanovsky 
3743085e29eSLeon Romanovsky struct mlx5_ib_create_qp_rss {
37526b99066SJason Gunthorpe 	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
3763085e29eSLeon Romanovsky 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
3773085e29eSLeon Romanovsky 	__u8 rx_key_len; /* valid only for Toeplitz */
3783085e29eSLeon Romanovsky 	__u8 reserved[6];
3793085e29eSLeon Romanovsky 	__u8 rx_hash_key[128]; /* valid only for Toeplitz */
3803085e29eSLeon Romanovsky 	__u32   comp_mask;
381f95ef6cbSMaor Gottlieb 	__u32	flags;
3823085e29eSLeon Romanovsky };
3833085e29eSLeon Romanovsky 
3847f72052cSYishai Hadas enum mlx5_ib_create_qp_resp_mask {
3857f72052cSYishai Hadas 	MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
3867f72052cSYishai Hadas 	MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
3877f72052cSYishai Hadas 	MLX5_IB_CREATE_QP_RESP_MASK_RQN  = 1UL << 2,
3887f72052cSYishai Hadas 	MLX5_IB_CREATE_QP_RESP_MASK_SQN  = 1UL << 3,
3891f1d6abbSAriel Levkovich 	MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR  = 1UL << 4,
3907f72052cSYishai Hadas };
3917f72052cSYishai Hadas 
3923085e29eSLeon Romanovsky struct mlx5_ib_create_qp_resp {
3932f5ff264SEli Cohen 	__u32	bfreg_index;
3943e09a427SLeon Romanovsky 	__u32   ece_options;
3957f72052cSYishai Hadas 	__u32	comp_mask;
3967f72052cSYishai Hadas 	__u32	tirn;
3977f72052cSYishai Hadas 	__u32	tisn;
3987f72052cSYishai Hadas 	__u32	rqn;
3997f72052cSYishai Hadas 	__u32	sqn;
4007f72052cSYishai Hadas 	__u32   reserved1;
4011f1d6abbSAriel Levkovich 	__u64	tir_icm_addr;
4023085e29eSLeon Romanovsky };
4033085e29eSLeon Romanovsky 
4043085e29eSLeon Romanovsky struct mlx5_ib_alloc_mw {
4053085e29eSLeon Romanovsky 	__u32	comp_mask;
4063085e29eSLeon Romanovsky 	__u8	num_klms;
4073085e29eSLeon Romanovsky 	__u8	reserved1;
4083085e29eSLeon Romanovsky 	__u16	reserved2;
4093085e29eSLeon Romanovsky };
4103085e29eSLeon Romanovsky 
411ccc87087SNoa Osherovich enum mlx5_ib_create_wq_mask {
412ccc87087SNoa Osherovich 	MLX5_IB_CREATE_WQ_STRIDING_RQ	= (1 << 0),
413ccc87087SNoa Osherovich };
414ccc87087SNoa Osherovich 
4153085e29eSLeon Romanovsky struct mlx5_ib_create_wq {
41626b99066SJason Gunthorpe 	__aligned_u64 buf_addr;
41726b99066SJason Gunthorpe 	__aligned_u64 db_addr;
4183085e29eSLeon Romanovsky 	__u32   rq_wqe_count;
4193085e29eSLeon Romanovsky 	__u32   rq_wqe_shift;
4203085e29eSLeon Romanovsky 	__u32   user_index;
4213085e29eSLeon Romanovsky 	__u32   flags;
4223085e29eSLeon Romanovsky 	__u32   comp_mask;
423ccc87087SNoa Osherovich 	__u32	single_stride_log_num_of_bytes;
424ccc87087SNoa Osherovich 	__u32	single_wqe_log_num_of_strides;
425ccc87087SNoa Osherovich 	__u32	two_byte_shift_en;
4263085e29eSLeon Romanovsky };
4273085e29eSLeon Romanovsky 
4285097e71fSMoni Shoua struct mlx5_ib_create_ah_resp {
4295097e71fSMoni Shoua 	__u32	response_length;
4305097e71fSMoni Shoua 	__u8	dmac[ETH_ALEN];
4315097e71fSMoni Shoua 	__u8	reserved[6];
4325097e71fSMoni Shoua };
4335097e71fSMoni Shoua 
43461147f39SBodong Wang struct mlx5_ib_burst_info {
43561147f39SBodong Wang 	__u32       max_burst_sz;
43661147f39SBodong Wang 	__u16       typical_pkt_sz;
43761147f39SBodong Wang 	__u16       reserved;
43861147f39SBodong Wang };
43961147f39SBodong Wang 
44061147f39SBodong Wang struct mlx5_ib_modify_qp {
44161147f39SBodong Wang 	__u32			   comp_mask;
44261147f39SBodong Wang 	struct mlx5_ib_burst_info  burst_info;
4435f62a521SLeon Romanovsky 	__u32			   ece_options;
44461147f39SBodong Wang };
44561147f39SBodong Wang 
446776a3906SMoni Shoua struct mlx5_ib_modify_qp_resp {
447776a3906SMoni Shoua 	__u32	response_length;
448776a3906SMoni Shoua 	__u32	dctn;
44950aec2c3SLeon Romanovsky 	__u32   ece_options;
45050aec2c3SLeon Romanovsky 	__u32   reserved;
451776a3906SMoni Shoua };
452776a3906SMoni Shoua 
4533085e29eSLeon Romanovsky struct mlx5_ib_create_wq_resp {
4543085e29eSLeon Romanovsky 	__u32	response_length;
4553085e29eSLeon Romanovsky 	__u32	reserved;
4563085e29eSLeon Romanovsky };
4573085e29eSLeon Romanovsky 
4583085e29eSLeon Romanovsky struct mlx5_ib_create_rwq_ind_tbl_resp {
4593085e29eSLeon Romanovsky 	__u32	response_length;
4603085e29eSLeon Romanovsky 	__u32	reserved;
4613085e29eSLeon Romanovsky };
4623085e29eSLeon Romanovsky 
4633085e29eSLeon Romanovsky struct mlx5_ib_modify_wq {
4643085e29eSLeon Romanovsky 	__u32	comp_mask;
4653085e29eSLeon Romanovsky 	__u32	reserved;
4663085e29eSLeon Romanovsky };
46724d33d2cSFeras Daoud 
46824d33d2cSFeras Daoud struct mlx5_ib_clock_info {
46924d33d2cSFeras Daoud 	__u32 sign;
47024d33d2cSFeras Daoud 	__u32 resv;
47126b99066SJason Gunthorpe 	__aligned_u64 nsec;
47226b99066SJason Gunthorpe 	__aligned_u64 cycles;
47326b99066SJason Gunthorpe 	__aligned_u64 frac;
47424d33d2cSFeras Daoud 	__u32 mult;
47524d33d2cSFeras Daoud 	__u32 shift;
47626b99066SJason Gunthorpe 	__aligned_u64 mask;
47726b99066SJason Gunthorpe 	__aligned_u64 overflow_period;
47824d33d2cSFeras Daoud };
47924d33d2cSFeras Daoud 
4805c99eaecSFeras Daoud enum mlx5_ib_mmap_cmd {
4815c99eaecSFeras Daoud 	MLX5_IB_MMAP_REGULAR_PAGE               = 0,
4825c99eaecSFeras Daoud 	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES       = 1,
4835c99eaecSFeras Daoud 	MLX5_IB_MMAP_WC_PAGE                    = 2,
4845c99eaecSFeras Daoud 	MLX5_IB_MMAP_NC_PAGE                    = 3,
4855c99eaecSFeras Daoud 	/* 5 is chosen in order to be compatible with old versions of libmlx5 */
4865c99eaecSFeras Daoud 	MLX5_IB_MMAP_CORE_CLOCK                 = 5,
4875c99eaecSFeras Daoud 	MLX5_IB_MMAP_ALLOC_WC                   = 6,
4885c99eaecSFeras Daoud 	MLX5_IB_MMAP_CLOCK_INFO                 = 7,
48924da0016SAriel Levkovich 	MLX5_IB_MMAP_DEVICE_MEM                 = 8,
4905c99eaecSFeras Daoud };
4915c99eaecSFeras Daoud 
49224d33d2cSFeras Daoud enum {
49324d33d2cSFeras Daoud 	MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
49424d33d2cSFeras Daoud };
4955c99eaecSFeras Daoud 
4965c99eaecSFeras Daoud /* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
4975c99eaecSFeras Daoud enum {
4985c99eaecSFeras Daoud 	MLX5_IB_CLOCK_INFO_V1              = 0,
4995c99eaecSFeras Daoud };
5003b3233fbSRaed Salem 
5013b3233fbSRaed Salem struct mlx5_ib_flow_counters_desc {
5023b3233fbSRaed Salem 	__u32	description;
5033b3233fbSRaed Salem 	__u32	index;
5043b3233fbSRaed Salem };
5053b3233fbSRaed Salem 
5063b3233fbSRaed Salem struct mlx5_ib_flow_counters_data {
5073b3233fbSRaed Salem 	RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
5083b3233fbSRaed Salem 	__u32   ncounters;
5093b3233fbSRaed Salem 	__u32   reserved;
5103b3233fbSRaed Salem };
5113b3233fbSRaed Salem 
5123b3233fbSRaed Salem struct mlx5_ib_create_flow {
5133b3233fbSRaed Salem 	__u32   ncounters_data;
5143b3233fbSRaed Salem 	__u32   reserved;
5153b3233fbSRaed Salem 	/*
5163b3233fbSRaed Salem 	 * Following are counters data based on ncounters_data, each
5173b3233fbSRaed Salem 	 * entry in the data[] should match a corresponding counter object
5183b3233fbSRaed Salem 	 * that was pointed by a counters spec upon the flow creation
5193b3233fbSRaed Salem 	 */
5203b3233fbSRaed Salem 	struct mlx5_ib_flow_counters_data data[];
5213b3233fbSRaed Salem };
5223b3233fbSRaed Salem 
5233085e29eSLeon Romanovsky #endif /* MLX5_ABI_USER_H */
524