1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_ABI_USER_H 34 #define MLX5_ABI_USER_H 35 36 #include <linux/types.h> 37 #include <linux/if_ether.h> /* For ETH_ALEN. */ 38 39 enum { 40 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 41 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, 42 }; 43 44 enum { 45 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, 46 }; 47 48 enum { 49 MLX5_WQ_FLAG_SIGNATURE = 1 << 0, 50 }; 51 52 /* Increment this value if any changes that break userspace ABI 53 * compatibility are made. 54 */ 55 #define MLX5_IB_UVERBS_ABI_VERSION 1 56 57 /* Make sure that all structs defined in this file remain laid out so 58 * that they pack the same way on 32-bit and 64-bit architectures (to 59 * avoid incompatibility between 32-bit userspace and 64-bit kernels). 60 * In particular do not use pointer types -- pass pointers in __u64 61 * instead. 62 */ 63 64 struct mlx5_ib_alloc_ucontext_req { 65 __u32 total_num_bfregs; 66 __u32 num_low_latency_bfregs; 67 }; 68 69 enum mlx5_lib_caps { 70 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, 71 }; 72 73 struct mlx5_ib_alloc_ucontext_req_v2 { 74 __u32 total_num_bfregs; 75 __u32 num_low_latency_bfregs; 76 __u32 flags; 77 __u32 comp_mask; 78 __u8 max_cqe_version; 79 __u8 reserved0; 80 __u16 reserved1; 81 __u32 reserved2; 82 __u64 lib_caps; 83 }; 84 85 enum mlx5_ib_alloc_ucontext_resp_mask { 86 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, 87 }; 88 89 enum mlx5_user_cmds_supp_uhw { 90 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, 91 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, 92 }; 93 94 /* The eth_min_inline response value is set to off-by-one vs the FW 95 * returned value to allow user-space to deal with older kernels. 96 */ 97 enum mlx5_user_inline_mode { 98 MLX5_USER_INLINE_MODE_NA, 99 MLX5_USER_INLINE_MODE_NONE, 100 MLX5_USER_INLINE_MODE_L2, 101 MLX5_USER_INLINE_MODE_IP, 102 MLX5_USER_INLINE_MODE_TCP_UDP, 103 }; 104 105 struct mlx5_ib_alloc_ucontext_resp { 106 __u32 qp_tab_size; 107 __u32 bf_reg_size; 108 __u32 tot_bfregs; 109 __u32 cache_line_size; 110 __u16 max_sq_desc_sz; 111 __u16 max_rq_desc_sz; 112 __u32 max_send_wqebb; 113 __u32 max_recv_wr; 114 __u32 max_srq_recv_wr; 115 __u16 num_ports; 116 __u16 reserved1; 117 __u32 comp_mask; 118 __u32 response_length; 119 __u8 cqe_version; 120 __u8 cmds_supp_uhw; 121 __u8 eth_min_inline; 122 __u8 reserved2; 123 __u64 hca_core_clock_offset; 124 __u32 log_uar_size; 125 __u32 num_uars_per_page; 126 }; 127 128 struct mlx5_ib_alloc_pd_resp { 129 __u32 pdn; 130 }; 131 132 struct mlx5_ib_tso_caps { 133 __u32 max_tso; /* Maximum tso payload size in bytes */ 134 135 /* Corresponding bit will be set if qp type from 136 * 'enum ib_qp_type' is supported, e.g. 137 * supported_qpts |= 1 << IB_QPT_UD 138 */ 139 __u32 supported_qpts; 140 }; 141 142 struct mlx5_ib_rss_caps { 143 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 144 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 145 __u8 reserved[7]; 146 }; 147 148 enum mlx5_ib_cqe_comp_res_format { 149 MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, 150 MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, 151 MLX5_IB_CQE_RES_RESERVED = 1 << 2, 152 }; 153 154 struct mlx5_ib_cqe_comp_caps { 155 __u32 max_num; 156 __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ 157 }; 158 159 struct mlx5_packet_pacing_caps { 160 __u32 qp_rate_limit_min; 161 __u32 qp_rate_limit_max; /* In kpbs */ 162 163 /* Corresponding bit will be set if qp type from 164 * 'enum ib_qp_type' is supported, e.g. 165 * supported_qpts |= 1 << IB_QPT_RAW_PACKET 166 */ 167 __u32 supported_qpts; 168 __u32 reserved; 169 }; 170 171 enum mlx5_ib_mpw_caps { 172 MPW_RESERVED = 1 << 0, 173 MLX5_IB_ALLOW_MPW = 1 << 1, 174 }; 175 176 enum mlx5_ib_sw_parsing_offloads { 177 MLX5_IB_SW_PARSING = 1 << 0, 178 MLX5_IB_SW_PARSING_CSUM = 1 << 1, 179 MLX5_IB_SW_PARSING_LSO = 1 << 2, 180 }; 181 182 struct mlx5_ib_sw_parsing_caps { 183 __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */ 184 185 /* Corresponding bit will be set if qp type from 186 * 'enum ib_qp_type' is supported, e.g. 187 * supported_qpts |= 1 << IB_QPT_RAW_PACKET 188 */ 189 __u32 supported_qpts; 190 }; 191 192 struct mlx5_ib_query_device_resp { 193 __u32 comp_mask; 194 __u32 response_length; 195 struct mlx5_ib_tso_caps tso_caps; 196 struct mlx5_ib_rss_caps rss_caps; 197 struct mlx5_ib_cqe_comp_caps cqe_comp_caps; 198 struct mlx5_packet_pacing_caps packet_pacing_caps; 199 __u32 mlx5_ib_support_multi_pkt_send_wqes; 200 __u32 reserved; 201 struct mlx5_ib_sw_parsing_caps sw_parsing_caps; 202 }; 203 204 struct mlx5_ib_create_cq { 205 __u64 buf_addr; 206 __u64 db_addr; 207 __u32 cqe_size; 208 __u8 cqe_comp_en; 209 __u8 cqe_comp_res_format; 210 __u16 reserved; /* explicit padding (optional on i386) */ 211 }; 212 213 struct mlx5_ib_create_cq_resp { 214 __u32 cqn; 215 __u32 reserved; 216 }; 217 218 struct mlx5_ib_resize_cq { 219 __u64 buf_addr; 220 __u16 cqe_size; 221 __u16 reserved0; 222 __u32 reserved1; 223 }; 224 225 struct mlx5_ib_create_srq { 226 __u64 buf_addr; 227 __u64 db_addr; 228 __u32 flags; 229 __u32 reserved0; /* explicit padding (optional on i386) */ 230 __u32 uidx; 231 __u32 reserved1; 232 }; 233 234 struct mlx5_ib_create_srq_resp { 235 __u32 srqn; 236 __u32 reserved; 237 }; 238 239 struct mlx5_ib_create_qp { 240 __u64 buf_addr; 241 __u64 db_addr; 242 __u32 sq_wqe_count; 243 __u32 rq_wqe_count; 244 __u32 rq_wqe_shift; 245 __u32 flags; 246 __u32 uidx; 247 __u32 reserved0; 248 __u64 sq_buf_addr; 249 }; 250 251 /* RX Hash function flags */ 252 enum mlx5_rx_hash_function_flags { 253 MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0, 254 }; 255 256 /* 257 * RX Hash flags, these flags allows to set which incoming packet's field should 258 * participates in RX Hash. Each flag represent certain packet's field, 259 * when the flag is set the field that is represented by the flag will 260 * participate in RX Hash calculation. 261 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP 262 * and *TCP and *UDP flags can't be enabled together on the same QP. 263 */ 264 enum mlx5_rx_hash_fields { 265 MLX5_RX_HASH_SRC_IPV4 = 1 << 0, 266 MLX5_RX_HASH_DST_IPV4 = 1 << 1, 267 MLX5_RX_HASH_SRC_IPV6 = 1 << 2, 268 MLX5_RX_HASH_DST_IPV6 = 1 << 3, 269 MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4, 270 MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, 271 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, 272 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7 273 }; 274 275 struct mlx5_ib_create_qp_rss { 276 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 277 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 278 __u8 rx_key_len; /* valid only for Toeplitz */ 279 __u8 reserved[6]; 280 __u8 rx_hash_key[128]; /* valid only for Toeplitz */ 281 __u32 comp_mask; 282 __u32 reserved1; 283 }; 284 285 struct mlx5_ib_create_qp_resp { 286 __u32 bfreg_index; 287 }; 288 289 struct mlx5_ib_alloc_mw { 290 __u32 comp_mask; 291 __u8 num_klms; 292 __u8 reserved1; 293 __u16 reserved2; 294 }; 295 296 struct mlx5_ib_create_wq { 297 __u64 buf_addr; 298 __u64 db_addr; 299 __u32 rq_wqe_count; 300 __u32 rq_wqe_shift; 301 __u32 user_index; 302 __u32 flags; 303 __u32 comp_mask; 304 __u32 reserved; 305 }; 306 307 struct mlx5_ib_create_ah_resp { 308 __u32 response_length; 309 __u8 dmac[ETH_ALEN]; 310 __u8 reserved[6]; 311 }; 312 313 struct mlx5_ib_create_wq_resp { 314 __u32 response_length; 315 __u32 reserved; 316 }; 317 318 struct mlx5_ib_create_rwq_ind_tbl_resp { 319 __u32 response_length; 320 __u32 reserved; 321 }; 322 323 struct mlx5_ib_modify_wq { 324 __u32 comp_mask; 325 __u32 reserved; 326 }; 327 #endif /* MLX5_ABI_USER_H */ 328