1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_ABI_USER_H 34 #define MLX5_ABI_USER_H 35 36 #include <linux/types.h> 37 #include <linux/if_ether.h> /* For ETH_ALEN. */ 38 39 enum { 40 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 41 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, 42 }; 43 44 enum { 45 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, 46 }; 47 48 enum { 49 MLX5_WQ_FLAG_SIGNATURE = 1 << 0, 50 }; 51 52 /* Increment this value if any changes that break userspace ABI 53 * compatibility are made. 54 */ 55 #define MLX5_IB_UVERBS_ABI_VERSION 1 56 57 /* Make sure that all structs defined in this file remain laid out so 58 * that they pack the same way on 32-bit and 64-bit architectures (to 59 * avoid incompatibility between 32-bit userspace and 64-bit kernels). 60 * In particular do not use pointer types -- pass pointers in __u64 61 * instead. 62 */ 63 64 struct mlx5_ib_alloc_ucontext_req { 65 __u32 total_num_bfregs; 66 __u32 num_low_latency_bfregs; 67 }; 68 69 enum mlx5_lib_caps { 70 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, 71 }; 72 73 struct mlx5_ib_alloc_ucontext_req_v2 { 74 __u32 total_num_bfregs; 75 __u32 num_low_latency_bfregs; 76 __u32 flags; 77 __u32 comp_mask; 78 __u8 max_cqe_version; 79 __u8 reserved0; 80 __u16 reserved1; 81 __u32 reserved2; 82 __u64 lib_caps; 83 }; 84 85 enum mlx5_ib_alloc_ucontext_resp_mask { 86 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, 87 }; 88 89 enum mlx5_user_cmds_supp_uhw { 90 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, 91 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, 92 }; 93 94 /* The eth_min_inline response value is set to off-by-one vs the FW 95 * returned value to allow user-space to deal with older kernels. 96 */ 97 enum mlx5_user_inline_mode { 98 MLX5_USER_INLINE_MODE_NA, 99 MLX5_USER_INLINE_MODE_NONE, 100 MLX5_USER_INLINE_MODE_L2, 101 MLX5_USER_INLINE_MODE_IP, 102 MLX5_USER_INLINE_MODE_TCP_UDP, 103 }; 104 105 struct mlx5_ib_alloc_ucontext_resp { 106 __u32 qp_tab_size; 107 __u32 bf_reg_size; 108 __u32 tot_bfregs; 109 __u32 cache_line_size; 110 __u16 max_sq_desc_sz; 111 __u16 max_rq_desc_sz; 112 __u32 max_send_wqebb; 113 __u32 max_recv_wr; 114 __u32 max_srq_recv_wr; 115 __u16 num_ports; 116 __u16 reserved1; 117 __u32 comp_mask; 118 __u32 response_length; 119 __u8 cqe_version; 120 __u8 cmds_supp_uhw; 121 __u8 eth_min_inline; 122 __u8 reserved2; 123 __u64 hca_core_clock_offset; 124 __u32 log_uar_size; 125 __u32 num_uars_per_page; 126 }; 127 128 struct mlx5_ib_alloc_pd_resp { 129 __u32 pdn; 130 }; 131 132 struct mlx5_ib_tso_caps { 133 __u32 max_tso; /* Maximum tso payload size in bytes */ 134 135 /* Corresponding bit will be set if qp type from 136 * 'enum ib_qp_type' is supported, e.g. 137 * supported_qpts |= 1 << IB_QPT_UD 138 */ 139 __u32 supported_qpts; 140 }; 141 142 struct mlx5_ib_rss_caps { 143 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 144 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 145 __u8 reserved[7]; 146 }; 147 148 enum mlx5_ib_cqe_comp_res_format { 149 MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, 150 MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, 151 MLX5_IB_CQE_RES_RESERVED = 1 << 2, 152 }; 153 154 struct mlx5_ib_cqe_comp_caps { 155 __u32 max_num; 156 __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ 157 }; 158 159 struct mlx5_packet_pacing_caps { 160 __u32 qp_rate_limit_min; 161 __u32 qp_rate_limit_max; /* In kpbs */ 162 163 /* Corresponding bit will be set if qp type from 164 * 'enum ib_qp_type' is supported, e.g. 165 * supported_qpts |= 1 << IB_QPT_RAW_PACKET 166 */ 167 __u32 supported_qpts; 168 __u32 reserved; 169 }; 170 171 enum mlx5_ib_mpw_caps { 172 MPW_RESERVED = 1 << 0, 173 MLX5_IB_ALLOW_MPW = 1 << 1, 174 MLX5_IB_SUPPORT_EMPW = 1 << 2, 175 }; 176 177 enum mlx5_ib_sw_parsing_offloads { 178 MLX5_IB_SW_PARSING = 1 << 0, 179 MLX5_IB_SW_PARSING_CSUM = 1 << 1, 180 MLX5_IB_SW_PARSING_LSO = 1 << 2, 181 }; 182 183 struct mlx5_ib_sw_parsing_caps { 184 __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */ 185 186 /* Corresponding bit will be set if qp type from 187 * 'enum ib_qp_type' is supported, e.g. 188 * supported_qpts |= 1 << IB_QPT_RAW_PACKET 189 */ 190 __u32 supported_qpts; 191 }; 192 193 struct mlx5_ib_query_device_resp { 194 __u32 comp_mask; 195 __u32 response_length; 196 struct mlx5_ib_tso_caps tso_caps; 197 struct mlx5_ib_rss_caps rss_caps; 198 struct mlx5_ib_cqe_comp_caps cqe_comp_caps; 199 struct mlx5_packet_pacing_caps packet_pacing_caps; 200 __u32 mlx5_ib_support_multi_pkt_send_wqes; 201 __u32 reserved; 202 struct mlx5_ib_sw_parsing_caps sw_parsing_caps; 203 }; 204 205 struct mlx5_ib_create_cq { 206 __u64 buf_addr; 207 __u64 db_addr; 208 __u32 cqe_size; 209 __u8 cqe_comp_en; 210 __u8 cqe_comp_res_format; 211 __u16 reserved; /* explicit padding (optional on i386) */ 212 }; 213 214 struct mlx5_ib_create_cq_resp { 215 __u32 cqn; 216 __u32 reserved; 217 }; 218 219 struct mlx5_ib_resize_cq { 220 __u64 buf_addr; 221 __u16 cqe_size; 222 __u16 reserved0; 223 __u32 reserved1; 224 }; 225 226 struct mlx5_ib_create_srq { 227 __u64 buf_addr; 228 __u64 db_addr; 229 __u32 flags; 230 __u32 reserved0; /* explicit padding (optional on i386) */ 231 __u32 uidx; 232 __u32 reserved1; 233 }; 234 235 struct mlx5_ib_create_srq_resp { 236 __u32 srqn; 237 __u32 reserved; 238 }; 239 240 struct mlx5_ib_create_qp { 241 __u64 buf_addr; 242 __u64 db_addr; 243 __u32 sq_wqe_count; 244 __u32 rq_wqe_count; 245 __u32 rq_wqe_shift; 246 __u32 flags; 247 __u32 uidx; 248 __u32 reserved0; 249 __u64 sq_buf_addr; 250 }; 251 252 /* RX Hash function flags */ 253 enum mlx5_rx_hash_function_flags { 254 MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0, 255 }; 256 257 /* 258 * RX Hash flags, these flags allows to set which incoming packet's field should 259 * participates in RX Hash. Each flag represent certain packet's field, 260 * when the flag is set the field that is represented by the flag will 261 * participate in RX Hash calculation. 262 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP 263 * and *TCP and *UDP flags can't be enabled together on the same QP. 264 */ 265 enum mlx5_rx_hash_fields { 266 MLX5_RX_HASH_SRC_IPV4 = 1 << 0, 267 MLX5_RX_HASH_DST_IPV4 = 1 << 1, 268 MLX5_RX_HASH_SRC_IPV6 = 1 << 2, 269 MLX5_RX_HASH_DST_IPV6 = 1 << 3, 270 MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4, 271 MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, 272 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, 273 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7 274 }; 275 276 struct mlx5_ib_create_qp_rss { 277 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 278 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 279 __u8 rx_key_len; /* valid only for Toeplitz */ 280 __u8 reserved[6]; 281 __u8 rx_hash_key[128]; /* valid only for Toeplitz */ 282 __u32 comp_mask; 283 __u32 reserved1; 284 }; 285 286 struct mlx5_ib_create_qp_resp { 287 __u32 bfreg_index; 288 }; 289 290 struct mlx5_ib_alloc_mw { 291 __u32 comp_mask; 292 __u8 num_klms; 293 __u8 reserved1; 294 __u16 reserved2; 295 }; 296 297 struct mlx5_ib_create_wq { 298 __u64 buf_addr; 299 __u64 db_addr; 300 __u32 rq_wqe_count; 301 __u32 rq_wqe_shift; 302 __u32 user_index; 303 __u32 flags; 304 __u32 comp_mask; 305 __u32 reserved; 306 }; 307 308 struct mlx5_ib_create_ah_resp { 309 __u32 response_length; 310 __u8 dmac[ETH_ALEN]; 311 __u8 reserved[6]; 312 }; 313 314 struct mlx5_ib_create_wq_resp { 315 __u32 response_length; 316 __u32 reserved; 317 }; 318 319 struct mlx5_ib_create_rwq_ind_tbl_resp { 320 __u32 response_length; 321 __u32 reserved; 322 }; 323 324 struct mlx5_ib_modify_wq { 325 __u32 comp_mask; 326 __u32 reserved; 327 }; 328 #endif /* MLX5_ABI_USER_H */ 329