1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_DEVICE_H 34 #define MLX5_DEVICE_H 35 36 #include <linux/types.h> 37 #include <rdma/ib_verbs.h> 38 #include <linux/mlx5/mlx5_ifc.h> 39 40 #if defined(__LITTLE_ENDIAN) 41 #define MLX5_SET_HOST_ENDIANNESS 0 42 #elif defined(__BIG_ENDIAN) 43 #define MLX5_SET_HOST_ENDIANNESS 0x80 44 #else 45 #error Host endianness not defined 46 #endif 47 48 /* helper macros */ 49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) 52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) 53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) 56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) 57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) 59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) 61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 62 63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) 67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 70 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) 71 72 /* insert a value to a struct */ 73 #define MLX5_SET(typ, p, fld, v) do { \ 74 u32 _v = v; \ 75 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 76 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 77 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 78 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ 79 << __mlx5_dw_bit_off(typ, fld))); \ 80 } while (0) 81 82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ 83 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ 84 MLX5_SET(typ, p, fld[idx], v); \ 85 } while (0) 86 87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \ 88 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 89 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 90 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 91 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ 92 << __mlx5_dw_bit_off(typ, fld))); \ 93 } while (0) 94 95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 97 __mlx5_mask(typ, fld)) 98 99 #define MLX5_GET_PR(typ, p, fld) ({ \ 100 u32 ___t = MLX5_GET(typ, p, fld); \ 101 pr_debug(#fld " = 0x%x\n", ___t); \ 102 ___t; \ 103 }) 104 105 #define __MLX5_SET64(typ, p, fld, v) do { \ 106 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ 107 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ 108 } while (0) 109 110 #define MLX5_SET64(typ, p, fld, v) do { \ 111 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 112 __MLX5_SET64(typ, p, fld, v); \ 113 } while (0) 114 115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ 116 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 117 __MLX5_SET64(typ, p, fld[idx], v); \ 118 } while (0) 119 120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 121 122 #define MLX5_GET64_PR(typ, p, fld) ({ \ 123 u64 ___t = MLX5_GET64(typ, p, fld); \ 124 pr_debug(#fld " = 0x%llx\n", ___t); \ 125 ___t; \ 126 }) 127 128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ 129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ 130 __mlx5_mask16(typ, fld)) 131 132 #define MLX5_SET16(typ, p, fld, v) do { \ 133 u16 _v = v; \ 134 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ 135 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ 136 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ 137 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ 138 << __mlx5_16_bit_off(typ, fld))); \ 139 } while (0) 140 141 /* Big endian getters */ 142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ 143 __mlx5_64_off(typ, fld))) 144 145 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ 146 type_t tmp; \ 147 switch (sizeof(tmp)) { \ 148 case sizeof(u8): \ 149 tmp = (__force type_t)MLX5_GET(typ, p, fld); \ 150 break; \ 151 case sizeof(u16): \ 152 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ 153 break; \ 154 case sizeof(u32): \ 155 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ 156 break; \ 157 case sizeof(u64): \ 158 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ 159 break; \ 160 } \ 161 tmp; \ 162 }) 163 164 enum mlx5_inline_modes { 165 MLX5_INLINE_MODE_NONE, 166 MLX5_INLINE_MODE_L2, 167 MLX5_INLINE_MODE_IP, 168 MLX5_INLINE_MODE_TCP_UDP, 169 }; 170 171 enum { 172 MLX5_MAX_COMMANDS = 32, 173 MLX5_CMD_DATA_BLOCK_SIZE = 512, 174 MLX5_PCI_CMD_XPORT = 7, 175 MLX5_MKEY_BSF_OCTO_SIZE = 4, 176 MLX5_MAX_PSVS = 4, 177 }; 178 179 enum { 180 MLX5_EXTENDED_UD_AV = 0x80000000, 181 }; 182 183 enum { 184 MLX5_CQ_STATE_ARMED = 9, 185 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, 186 MLX5_CQ_STATE_FIRED = 0xa, 187 }; 188 189 enum { 190 MLX5_STAT_RATE_OFFSET = 5, 191 }; 192 193 enum { 194 MLX5_INLINE_SEG = 0x80000000, 195 }; 196 197 enum { 198 MLX5_HW_START_PADDING = MLX5_INLINE_SEG, 199 }; 200 201 enum { 202 MLX5_MIN_PKEY_TABLE_SIZE = 128, 203 MLX5_MAX_LOG_PKEY_TABLE = 5, 204 }; 205 206 enum { 207 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 208 }; 209 210 enum { 211 MLX5_PFAULT_SUBTYPE_WQE = 0, 212 MLX5_PFAULT_SUBTYPE_RDMA = 1, 213 }; 214 215 enum wqe_page_fault_type { 216 MLX5_WQE_PF_TYPE_RMP = 0, 217 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, 218 MLX5_WQE_PF_TYPE_RESP = 2, 219 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, 220 }; 221 222 enum { 223 MLX5_PERM_LOCAL_READ = 1 << 2, 224 MLX5_PERM_LOCAL_WRITE = 1 << 3, 225 MLX5_PERM_REMOTE_READ = 1 << 4, 226 MLX5_PERM_REMOTE_WRITE = 1 << 5, 227 MLX5_PERM_ATOMIC = 1 << 6, 228 MLX5_PERM_UMR_EN = 1 << 7, 229 }; 230 231 enum { 232 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, 233 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, 234 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, 235 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, 236 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, 237 }; 238 239 enum { 240 MLX5_EN_RD = (u64)1, 241 MLX5_EN_WR = (u64)2 242 }; 243 244 enum { 245 MLX5_ADAPTER_PAGE_SHIFT = 12, 246 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, 247 }; 248 249 enum { 250 MLX5_BFREGS_PER_UAR = 4, 251 MLX5_MAX_UARS = 1 << 8, 252 MLX5_NON_FP_BFREGS_PER_UAR = 2, 253 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - 254 MLX5_NON_FP_BFREGS_PER_UAR, 255 MLX5_MAX_BFREGS = MLX5_MAX_UARS * 256 MLX5_NON_FP_BFREGS_PER_UAR, 257 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, 258 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, 259 MLX5_MIN_DYN_BFREGS = 512, 260 MLX5_MAX_DYN_BFREGS = 1024, 261 }; 262 263 enum { 264 MLX5_MKEY_MASK_LEN = 1ull << 0, 265 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, 266 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 267 MLX5_MKEY_MASK_PD = 1ull << 7, 268 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 269 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, 270 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 271 MLX5_MKEY_MASK_KEY = 1ull << 13, 272 MLX5_MKEY_MASK_QPN = 1ull << 14, 273 MLX5_MKEY_MASK_LR = 1ull << 17, 274 MLX5_MKEY_MASK_LW = 1ull << 18, 275 MLX5_MKEY_MASK_RR = 1ull << 19, 276 MLX5_MKEY_MASK_RW = 1ull << 20, 277 MLX5_MKEY_MASK_A = 1ull << 21, 278 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, 279 MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, 280 MLX5_MKEY_MASK_FREE = 1ull << 29, 281 MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, 282 }; 283 284 enum { 285 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), 286 287 MLX5_UMR_CHECK_NOT_FREE = (1 << 5), 288 MLX5_UMR_CHECK_FREE = (2 << 5), 289 290 MLX5_UMR_INLINE = (1 << 7), 291 }; 292 293 #define MLX5_UMR_MTT_ALIGNMENT 0x40 294 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 295 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 296 297 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) 298 299 enum { 300 MLX5_EVENT_QUEUE_TYPE_QP = 0, 301 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 302 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 303 MLX5_EVENT_QUEUE_TYPE_DCT = 6, 304 }; 305 306 /* mlx5 components can subscribe to any one of these events via 307 * mlx5_eq_notifier_register API. 308 */ 309 enum mlx5_event { 310 /* Special value to subscribe to any event */ 311 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, 312 /* HW events enum start: comp events are not subscribable */ 313 MLX5_EVENT_TYPE_COMP = 0x0, 314 /* HW Async events enum start: subscribable events */ 315 MLX5_EVENT_TYPE_PATH_MIG = 0x01, 316 MLX5_EVENT_TYPE_COMM_EST = 0x02, 317 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, 318 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, 319 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, 320 321 MLX5_EVENT_TYPE_CQ_ERROR = 0x04, 322 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 323 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, 324 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, 325 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, 326 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, 327 328 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, 329 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, 330 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 331 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, 332 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, 333 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, 334 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 335 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, 336 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, 337 MLX5_EVENT_TYPE_PPS_EVENT = 0x25, 338 339 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, 340 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, 341 342 MLX5_EVENT_TYPE_CMD = 0x0a, 343 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 344 345 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 346 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 347 348 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, 349 MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf, 350 351 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, 352 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, 353 354 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, 355 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, 356 357 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, 358 359 MLX5_EVENT_TYPE_MAX = 0x100, 360 }; 361 362 enum mlx5_driver_event { 363 MLX5_DRIVER_EVENT_TYPE_TRAP = 0, 364 }; 365 366 enum { 367 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, 368 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, 369 }; 370 371 enum { 372 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, 373 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, 374 MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7, 375 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, 376 }; 377 378 enum { 379 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, 380 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, 381 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, 382 MLX5_PORT_CHANGE_SUBTYPE_LID = 6, 383 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, 384 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, 385 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, 386 }; 387 388 enum { 389 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 390 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 391 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 392 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 393 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 394 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 395 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, 396 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 397 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 398 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 399 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 400 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 401 }; 402 403 enum { 404 MLX5_ROCE_VERSION_1 = 0, 405 MLX5_ROCE_VERSION_2 = 2, 406 }; 407 408 enum { 409 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, 410 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, 411 }; 412 413 enum { 414 MLX5_ROCE_L3_TYPE_IPV4 = 0, 415 MLX5_ROCE_L3_TYPE_IPV6 = 1, 416 }; 417 418 enum { 419 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, 420 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, 421 }; 422 423 enum { 424 MLX5_OPCODE_NOP = 0x00, 425 MLX5_OPCODE_SEND_INVAL = 0x01, 426 MLX5_OPCODE_RDMA_WRITE = 0x08, 427 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 428 MLX5_OPCODE_SEND = 0x0a, 429 MLX5_OPCODE_SEND_IMM = 0x0b, 430 MLX5_OPCODE_LSO = 0x0e, 431 MLX5_OPCODE_RDMA_READ = 0x10, 432 MLX5_OPCODE_ATOMIC_CS = 0x11, 433 MLX5_OPCODE_ATOMIC_FA = 0x12, 434 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, 435 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, 436 MLX5_OPCODE_BIND_MW = 0x18, 437 MLX5_OPCODE_CONFIG_CMD = 0x1f, 438 MLX5_OPCODE_ENHANCED_MPSW = 0x29, 439 440 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 441 MLX5_RECV_OPCODE_SEND = 0x01, 442 MLX5_RECV_OPCODE_SEND_IMM = 0x02, 443 MLX5_RECV_OPCODE_SEND_INVAL = 0x03, 444 445 MLX5_CQE_OPCODE_ERROR = 0x1e, 446 MLX5_CQE_OPCODE_RESIZE = 0x16, 447 448 MLX5_OPCODE_SET_PSV = 0x20, 449 MLX5_OPCODE_GET_PSV = 0x21, 450 MLX5_OPCODE_CHECK_PSV = 0x22, 451 MLX5_OPCODE_DUMP = 0x23, 452 MLX5_OPCODE_RGET_PSV = 0x26, 453 MLX5_OPCODE_RCHECK_PSV = 0x27, 454 455 MLX5_OPCODE_UMR = 0x25, 456 457 }; 458 459 enum { 460 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, 461 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, 462 }; 463 464 enum { 465 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, 466 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, 467 }; 468 469 struct mlx5_wqe_tls_static_params_seg { 470 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; 471 }; 472 473 struct mlx5_wqe_tls_progress_params_seg { 474 __be32 tis_tir_num; 475 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; 476 }; 477 478 enum { 479 MLX5_SET_PORT_RESET_QKEY = 0, 480 MLX5_SET_PORT_GUID0 = 16, 481 MLX5_SET_PORT_NODE_GUID = 17, 482 MLX5_SET_PORT_SYS_GUID = 18, 483 MLX5_SET_PORT_GID_TABLE = 19, 484 MLX5_SET_PORT_PKEY_TABLE = 20, 485 }; 486 487 enum { 488 MLX5_BW_NO_LIMIT = 0, 489 MLX5_100_MBPS_UNIT = 3, 490 MLX5_GBPS_UNIT = 4, 491 }; 492 493 enum { 494 MLX5_MAX_PAGE_SHIFT = 31 495 }; 496 497 enum { 498 MLX5_CAP_OFF_CMDIF_CSUM = 46, 499 }; 500 501 enum { 502 /* 503 * Max wqe size for rdma read is 512 bytes, so this 504 * limits our max_sge_rd as the wqe needs to fit: 505 * - ctrl segment (16 bytes) 506 * - rdma segment (16 bytes) 507 * - scatter elements (16 bytes each) 508 */ 509 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 510 }; 511 512 enum mlx5_odp_transport_cap_bits { 513 MLX5_ODP_SUPPORT_SEND = 1 << 31, 514 MLX5_ODP_SUPPORT_RECV = 1 << 30, 515 MLX5_ODP_SUPPORT_WRITE = 1 << 29, 516 MLX5_ODP_SUPPORT_READ = 1 << 28, 517 }; 518 519 struct mlx5_odp_caps { 520 char reserved[0x10]; 521 struct { 522 __be32 rc_odp_caps; 523 __be32 uc_odp_caps; 524 __be32 ud_odp_caps; 525 } per_transport_caps; 526 char reserved2[0xe4]; 527 }; 528 529 struct mlx5_cmd_layout { 530 u8 type; 531 u8 rsvd0[3]; 532 __be32 inlen; 533 __be64 in_ptr; 534 __be32 in[4]; 535 __be32 out[4]; 536 __be64 out_ptr; 537 __be32 outlen; 538 u8 token; 539 u8 sig; 540 u8 rsvd1; 541 u8 status_own; 542 }; 543 544 enum mlx5_fatal_assert_bit_offsets { 545 MLX5_RFR_OFFSET = 31, 546 }; 547 548 struct health_buffer { 549 __be32 assert_var[5]; 550 __be32 rsvd0[3]; 551 __be32 assert_exit_ptr; 552 __be32 assert_callra; 553 __be32 rsvd1[2]; 554 __be32 fw_ver; 555 __be32 hw_id; 556 __be32 rfr; 557 u8 irisc_index; 558 u8 synd; 559 __be16 ext_synd; 560 }; 561 562 enum mlx5_initializing_bit_offsets { 563 MLX5_FW_RESET_SUPPORTED_OFFSET = 30, 564 }; 565 566 enum mlx5_cmd_addr_l_sz_offset { 567 MLX5_NIC_IFC_OFFSET = 8, 568 }; 569 570 struct mlx5_init_seg { 571 __be32 fw_rev; 572 __be32 cmdif_rev_fw_sub; 573 __be32 rsvd0[2]; 574 __be32 cmdq_addr_h; 575 __be32 cmdq_addr_l_sz; 576 __be32 cmd_dbell; 577 __be32 rsvd1[120]; 578 __be32 initializing; 579 struct health_buffer health; 580 __be32 rsvd2[880]; 581 __be32 internal_timer_h; 582 __be32 internal_timer_l; 583 __be32 rsvd3[2]; 584 __be32 health_counter; 585 __be32 rsvd4[11]; 586 __be32 real_time_h; 587 __be32 real_time_l; 588 __be32 rsvd5[1006]; 589 __be64 ieee1588_clk; 590 __be32 ieee1588_clk_type; 591 __be32 clr_intx; 592 }; 593 594 struct mlx5_eqe_comp { 595 __be32 reserved[6]; 596 __be32 cqn; 597 }; 598 599 struct mlx5_eqe_qp_srq { 600 __be32 reserved1[5]; 601 u8 type; 602 u8 reserved2[3]; 603 __be32 qp_srq_n; 604 }; 605 606 struct mlx5_eqe_cq_err { 607 __be32 cqn; 608 u8 reserved1[7]; 609 u8 syndrome; 610 }; 611 612 struct mlx5_eqe_xrq_err { 613 __be32 reserved1[5]; 614 __be32 type_xrqn; 615 __be32 reserved2; 616 }; 617 618 struct mlx5_eqe_port_state { 619 u8 reserved0[8]; 620 u8 port; 621 }; 622 623 struct mlx5_eqe_gpio { 624 __be32 reserved0[2]; 625 __be64 gpio_event; 626 }; 627 628 struct mlx5_eqe_congestion { 629 u8 type; 630 u8 rsvd0; 631 u8 congestion_level; 632 }; 633 634 struct mlx5_eqe_stall_vl { 635 u8 rsvd0[3]; 636 u8 port_vl; 637 }; 638 639 struct mlx5_eqe_cmd { 640 __be32 vector; 641 __be32 rsvd[6]; 642 }; 643 644 struct mlx5_eqe_page_req { 645 __be16 ec_function; 646 __be16 func_id; 647 __be32 num_pages; 648 __be32 rsvd1[5]; 649 }; 650 651 struct mlx5_eqe_page_fault { 652 __be32 bytes_committed; 653 union { 654 struct { 655 u16 reserved1; 656 __be16 wqe_index; 657 u16 reserved2; 658 __be16 packet_length; 659 __be32 token; 660 u8 reserved4[8]; 661 __be32 pftype_wq; 662 } __packed wqe; 663 struct { 664 __be32 r_key; 665 u16 reserved1; 666 __be16 packet_length; 667 __be32 rdma_op_len; 668 __be64 rdma_va; 669 __be32 pftype_token; 670 } __packed rdma; 671 } __packed; 672 } __packed; 673 674 struct mlx5_eqe_vport_change { 675 u8 rsvd0[2]; 676 __be16 vport_num; 677 __be32 rsvd1[6]; 678 } __packed; 679 680 struct mlx5_eqe_port_module { 681 u8 reserved_at_0[1]; 682 u8 module; 683 u8 reserved_at_2[1]; 684 u8 module_status; 685 u8 reserved_at_4[2]; 686 u8 error_type; 687 } __packed; 688 689 struct mlx5_eqe_pps { 690 u8 rsvd0[3]; 691 u8 pin; 692 u8 rsvd1[4]; 693 union { 694 struct { 695 __be32 time_sec; 696 __be32 time_nsec; 697 }; 698 struct { 699 __be64 time_stamp; 700 }; 701 }; 702 u8 rsvd2[12]; 703 } __packed; 704 705 struct mlx5_eqe_dct { 706 __be32 reserved[6]; 707 __be32 dctn; 708 }; 709 710 struct mlx5_eqe_temp_warning { 711 __be64 sensor_warning_msb; 712 __be64 sensor_warning_lsb; 713 } __packed; 714 715 #define SYNC_RST_STATE_MASK 0xf 716 717 enum sync_rst_state_type { 718 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, 719 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, 720 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, 721 }; 722 723 struct mlx5_eqe_sync_fw_update { 724 u8 reserved_at_0[3]; 725 u8 sync_rst_state; 726 }; 727 728 struct mlx5_eqe_vhca_state { 729 __be16 ec_function; 730 __be16 function_id; 731 } __packed; 732 733 union ev_data { 734 __be32 raw[7]; 735 struct mlx5_eqe_cmd cmd; 736 struct mlx5_eqe_comp comp; 737 struct mlx5_eqe_qp_srq qp_srq; 738 struct mlx5_eqe_cq_err cq_err; 739 struct mlx5_eqe_port_state port; 740 struct mlx5_eqe_gpio gpio; 741 struct mlx5_eqe_congestion cong; 742 struct mlx5_eqe_stall_vl stall_vl; 743 struct mlx5_eqe_page_req req_pages; 744 struct mlx5_eqe_page_fault page_fault; 745 struct mlx5_eqe_vport_change vport_change; 746 struct mlx5_eqe_port_module port_module; 747 struct mlx5_eqe_pps pps; 748 struct mlx5_eqe_dct dct; 749 struct mlx5_eqe_temp_warning temp_warning; 750 struct mlx5_eqe_xrq_err xrq_err; 751 struct mlx5_eqe_sync_fw_update sync_fw_update; 752 struct mlx5_eqe_vhca_state vhca_state; 753 } __packed; 754 755 struct mlx5_eqe { 756 u8 rsvd0; 757 u8 type; 758 u8 rsvd1; 759 u8 sub_type; 760 __be32 rsvd2[7]; 761 union ev_data data; 762 __be16 rsvd3; 763 u8 signature; 764 u8 owner; 765 } __packed; 766 767 struct mlx5_cmd_prot_block { 768 u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; 769 u8 rsvd0[48]; 770 __be64 next; 771 __be32 block_num; 772 u8 rsvd1; 773 u8 token; 774 u8 ctrl_sig; 775 u8 sig; 776 }; 777 778 enum { 779 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, 780 }; 781 782 struct mlx5_err_cqe { 783 u8 rsvd0[32]; 784 __be32 srqn; 785 u8 rsvd1[18]; 786 u8 vendor_err_synd; 787 u8 syndrome; 788 __be32 s_wqe_opcode_qpn; 789 __be16 wqe_counter; 790 u8 signature; 791 u8 op_own; 792 }; 793 794 struct mlx5_cqe64 { 795 u8 tls_outer_l3_tunneled; 796 u8 rsvd0; 797 __be16 wqe_id; 798 u8 lro_tcppsh_abort_dupack; 799 u8 lro_min_ttl; 800 __be16 lro_tcp_win; 801 __be32 lro_ack_seq_num; 802 __be32 rss_hash_result; 803 u8 rss_hash_type; 804 u8 ml_path; 805 u8 rsvd20[2]; 806 __be16 check_sum; 807 __be16 slid; 808 __be32 flags_rqpn; 809 u8 hds_ip_ext; 810 u8 l4_l3_hdr_type; 811 __be16 vlan_info; 812 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ 813 union { 814 __be32 immediate; 815 __be32 inval_rkey; 816 __be32 pkey; 817 __be32 ft_metadata; 818 }; 819 u8 rsvd40[4]; 820 __be32 byte_cnt; 821 __be32 timestamp_h; 822 __be32 timestamp_l; 823 __be32 sop_drop_qpn; 824 __be16 wqe_counter; 825 u8 signature; 826 u8 op_own; 827 }; 828 829 struct mlx5_mini_cqe8 { 830 union { 831 __be32 rx_hash_result; 832 struct { 833 __be16 checksum; 834 __be16 stridx; 835 }; 836 struct { 837 __be16 wqe_counter; 838 u8 s_wqe_opcode; 839 u8 reserved; 840 } s_wqe_info; 841 }; 842 __be32 byte_cnt; 843 }; 844 845 enum { 846 MLX5_NO_INLINE_DATA, 847 MLX5_INLINE_DATA32_SEG, 848 MLX5_INLINE_DATA64_SEG, 849 MLX5_COMPRESSED, 850 }; 851 852 enum { 853 MLX5_CQE_FORMAT_CSUM = 0x1, 854 MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3, 855 }; 856 857 #define MLX5_MINI_CQE_ARRAY_SIZE 8 858 859 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 860 { 861 return (cqe->op_own >> 2) & 0x3; 862 } 863 864 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) 865 { 866 return cqe->op_own >> 4; 867 } 868 869 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 870 { 871 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; 872 } 873 874 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 875 { 876 return (cqe->l4_l3_hdr_type >> 4) & 0x7; 877 } 878 879 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) 880 { 881 return (cqe->l4_l3_hdr_type >> 2) & 0x3; 882 } 883 884 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) 885 { 886 return cqe->tls_outer_l3_tunneled & 0x1; 887 } 888 889 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) 890 { 891 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; 892 } 893 894 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) 895 { 896 return cqe->l4_l3_hdr_type & 0x1; 897 } 898 899 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) 900 { 901 u32 hi, lo; 902 903 hi = be32_to_cpu(cqe->timestamp_h); 904 lo = be32_to_cpu(cqe->timestamp_l); 905 906 return (u64)lo | ((u64)hi << 32); 907 } 908 909 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) 910 { 911 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; 912 } 913 914 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) 915 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) 916 917 struct mpwrq_cqe_bc { 918 __be16 filler_consumed_strides; 919 __be16 byte_cnt; 920 }; 921 922 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) 923 { 924 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 925 926 return be16_to_cpu(bc->byte_cnt); 927 } 928 929 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) 930 { 931 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); 932 } 933 934 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) 935 { 936 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 937 938 return mpwrq_get_cqe_bc_consumed_strides(bc); 939 } 940 941 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) 942 { 943 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 944 945 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); 946 } 947 948 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) 949 { 950 return be16_to_cpu(cqe->wqe_counter); 951 } 952 953 enum { 954 CQE_L4_HDR_TYPE_NONE = 0x0, 955 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 956 CQE_L4_HDR_TYPE_UDP = 0x2, 957 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, 958 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, 959 }; 960 961 enum { 962 CQE_RSS_HTYPE_IP = 0x3 << 2, 963 /* cqe->rss_hash_type[3:2] - IP destination selected for hash 964 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) 965 */ 966 CQE_RSS_HTYPE_L4 = 0x3 << 6, 967 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash 968 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI 969 */ 970 }; 971 972 enum { 973 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, 974 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, 975 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, 976 }; 977 978 enum { 979 CQE_L2_OK = 1 << 0, 980 CQE_L3_OK = 1 << 1, 981 CQE_L4_OK = 1 << 2, 982 }; 983 984 enum { 985 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, 986 CQE_TLS_OFFLOAD_DECRYPTED = 0x1, 987 CQE_TLS_OFFLOAD_RESYNC = 0x2, 988 CQE_TLS_OFFLOAD_ERROR = 0x3, 989 }; 990 991 struct mlx5_sig_err_cqe { 992 u8 rsvd0[16]; 993 __be32 expected_trans_sig; 994 __be32 actual_trans_sig; 995 __be32 expected_reftag; 996 __be32 actual_reftag; 997 __be16 syndrome; 998 u8 rsvd22[2]; 999 __be32 mkey; 1000 __be64 err_offset; 1001 u8 rsvd30[8]; 1002 __be32 qpn; 1003 u8 rsvd38[2]; 1004 u8 signature; 1005 u8 op_own; 1006 }; 1007 1008 struct mlx5_wqe_srq_next_seg { 1009 u8 rsvd0[2]; 1010 __be16 next_wqe_index; 1011 u8 signature; 1012 u8 rsvd1[11]; 1013 }; 1014 1015 union mlx5_ext_cqe { 1016 struct ib_grh grh; 1017 u8 inl[64]; 1018 }; 1019 1020 struct mlx5_cqe128 { 1021 union mlx5_ext_cqe inl_grh; 1022 struct mlx5_cqe64 cqe64; 1023 }; 1024 1025 enum { 1026 MLX5_MKEY_STATUS_FREE = 1 << 6, 1027 }; 1028 1029 enum { 1030 MLX5_MKEY_REMOTE_INVAL = 1 << 24, 1031 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, 1032 MLX5_MKEY_BSF_EN = 1 << 30, 1033 }; 1034 1035 struct mlx5_mkey_seg { 1036 /* This is a two bit field occupying bits 31-30. 1037 * bit 31 is always 0, 1038 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 1039 */ 1040 u8 status; 1041 u8 pcie_control; 1042 u8 flags; 1043 u8 version; 1044 __be32 qpn_mkey7_0; 1045 u8 rsvd1[4]; 1046 __be32 flags_pd; 1047 __be64 start_addr; 1048 __be64 len; 1049 __be32 bsfs_octo_size; 1050 u8 rsvd2[16]; 1051 __be32 xlt_oct_size; 1052 u8 rsvd3[3]; 1053 u8 log2_page_size; 1054 u8 rsvd4[4]; 1055 }; 1056 1057 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 1058 1059 enum { 1060 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 1061 }; 1062 1063 enum { 1064 VPORT_STATE_DOWN = 0x0, 1065 VPORT_STATE_UP = 0x1, 1066 }; 1067 1068 enum { 1069 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, 1070 MLX5_VPORT_ADMIN_STATE_UP = 0x1, 1071 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, 1072 }; 1073 1074 enum { 1075 MLX5_L3_PROT_TYPE_IPV4 = 0, 1076 MLX5_L3_PROT_TYPE_IPV6 = 1, 1077 }; 1078 1079 enum { 1080 MLX5_L4_PROT_TYPE_TCP = 0, 1081 MLX5_L4_PROT_TYPE_UDP = 1, 1082 }; 1083 1084 enum { 1085 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, 1086 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, 1087 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, 1088 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, 1089 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, 1090 }; 1091 1092 enum { 1093 MLX5_MATCH_OUTER_HEADERS = 1 << 0, 1094 MLX5_MATCH_MISC_PARAMETERS = 1 << 1, 1095 MLX5_MATCH_INNER_HEADERS = 1 << 2, 1096 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, 1097 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, 1098 MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5, 1099 }; 1100 1101 enum { 1102 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, 1103 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, 1104 }; 1105 1106 enum { 1107 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, 1108 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, 1109 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, 1110 }; 1111 1112 enum mlx5_list_type { 1113 MLX5_NVPRT_LIST_TYPE_UC = 0x0, 1114 MLX5_NVPRT_LIST_TYPE_MC = 0x1, 1115 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, 1116 }; 1117 1118 enum { 1119 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1120 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1121 }; 1122 1123 enum mlx5_wol_mode { 1124 MLX5_WOL_DISABLE = 0, 1125 MLX5_WOL_SECURED_MAGIC = 1 << 1, 1126 MLX5_WOL_MAGIC = 1 << 2, 1127 MLX5_WOL_ARP = 1 << 3, 1128 MLX5_WOL_BROADCAST = 1 << 4, 1129 MLX5_WOL_MULTICAST = 1 << 5, 1130 MLX5_WOL_UNICAST = 1 << 6, 1131 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 1132 }; 1133 1134 enum mlx5_mpls_supported_fields { 1135 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0, 1136 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1, 1137 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2, 1138 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3 1139 }; 1140 1141 enum mlx5_flex_parser_protos { 1142 MLX5_FLEX_PROTO_GENEVE = 1 << 3, 1143 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, 1144 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, 1145 }; 1146 1147 /* MLX5 DEV CAPs */ 1148 1149 /* TODO: EAT.ME */ 1150 enum mlx5_cap_mode { 1151 HCA_CAP_OPMOD_GET_MAX = 0, 1152 HCA_CAP_OPMOD_GET_CUR = 1, 1153 }; 1154 1155 enum mlx5_cap_type { 1156 MLX5_CAP_GENERAL = 0, 1157 MLX5_CAP_ETHERNET_OFFLOADS, 1158 MLX5_CAP_ODP, 1159 MLX5_CAP_ATOMIC, 1160 MLX5_CAP_ROCE, 1161 MLX5_CAP_IPOIB_OFFLOADS, 1162 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, 1163 MLX5_CAP_FLOW_TABLE, 1164 MLX5_CAP_ESWITCH_FLOW_TABLE, 1165 MLX5_CAP_ESWITCH, 1166 MLX5_CAP_RESERVED, 1167 MLX5_CAP_VECTOR_CALC, 1168 MLX5_CAP_QOS, 1169 MLX5_CAP_DEBUG, 1170 MLX5_CAP_RESERVED_14, 1171 MLX5_CAP_DEV_MEM, 1172 MLX5_CAP_RESERVED_16, 1173 MLX5_CAP_TLS, 1174 MLX5_CAP_VDPA_EMULATION = 0x13, 1175 MLX5_CAP_DEV_EVENT = 0x14, 1176 MLX5_CAP_IPSEC, 1177 /* NUM OF CAP Types */ 1178 MLX5_CAP_NUM 1179 }; 1180 1181 enum mlx5_pcam_reg_groups { 1182 MLX5_PCAM_REGS_5000_TO_507F = 0x0, 1183 }; 1184 1185 enum mlx5_pcam_feature_groups { 1186 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1187 }; 1188 1189 enum mlx5_mcam_reg_groups { 1190 MLX5_MCAM_REGS_FIRST_128 = 0x0, 1191 MLX5_MCAM_REGS_0x9080_0x90FF = 0x1, 1192 MLX5_MCAM_REGS_0x9100_0x917F = 0x2, 1193 MLX5_MCAM_REGS_NUM = 0x3, 1194 }; 1195 1196 enum mlx5_mcam_feature_groups { 1197 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1198 }; 1199 1200 enum mlx5_qcam_reg_groups { 1201 MLX5_QCAM_REGS_FIRST_128 = 0x0, 1202 }; 1203 1204 enum mlx5_qcam_feature_groups { 1205 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1206 }; 1207 1208 /* GET Dev Caps macros */ 1209 #define MLX5_CAP_GEN(mdev, cap) \ 1210 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1211 1212 #define MLX5_CAP_GEN_64(mdev, cap) \ 1213 MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1214 1215 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 1216 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) 1217 1218 #define MLX5_CAP_ETH(mdev, cap) \ 1219 MLX5_GET(per_protocol_networking_offload_caps,\ 1220 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1221 1222 #define MLX5_CAP_ETH_MAX(mdev, cap) \ 1223 MLX5_GET(per_protocol_networking_offload_caps,\ 1224 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1225 1226 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ 1227 MLX5_GET(per_protocol_networking_offload_caps,\ 1228 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) 1229 1230 #define MLX5_CAP_ROCE(mdev, cap) \ 1231 MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) 1232 1233 #define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1234 MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) 1235 1236 #define MLX5_CAP_ATOMIC(mdev, cap) \ 1237 MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) 1238 1239 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1240 MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) 1241 1242 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1243 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1244 1245 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ 1246 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1247 1248 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1249 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) 1250 1251 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1252 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) 1253 1254 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 1255 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 1256 1257 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ 1258 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) 1259 1260 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ 1261 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) 1262 1263 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ 1264 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) 1265 1266 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ 1267 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) 1268 1269 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ 1270 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) 1271 1272 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ 1273 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) 1274 1275 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ 1276 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) 1277 1278 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ 1279 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) 1280 1281 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ 1282 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) 1283 1284 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ 1285 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) 1286 1287 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1288 MLX5_GET(flow_table_eswitch_cap, \ 1289 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1290 1291 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1292 MLX5_GET(flow_table_eswitch_cap, \ 1293 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1294 1295 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1296 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 1297 1298 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ 1299 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) 1300 1301 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ 1302 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) 1303 1304 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ 1305 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) 1306 1307 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ 1308 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) 1309 1310 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ 1311 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) 1312 1313 #define MLX5_CAP_ESW(mdev, cap) \ 1314 MLX5_GET(e_switch_cap, \ 1315 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) 1316 1317 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ 1318 MLX5_GET64(flow_table_eswitch_cap, \ 1319 (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1320 1321 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 1322 MLX5_GET(e_switch_cap, \ 1323 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) 1324 1325 #define MLX5_CAP_ODP(mdev, cap)\ 1326 MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) 1327 1328 #define MLX5_CAP_ODP_MAX(mdev, cap)\ 1329 MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) 1330 1331 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1332 MLX5_GET(vector_calc_cap, \ 1333 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) 1334 1335 #define MLX5_CAP_QOS(mdev, cap)\ 1336 MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) 1337 1338 #define MLX5_CAP_DEBUG(mdev, cap)\ 1339 MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) 1340 1341 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ 1342 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) 1343 1344 #define MLX5_CAP_PCAM_REG(mdev, reg) \ 1345 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) 1346 1347 #define MLX5_CAP_MCAM_REG(mdev, reg) \ 1348 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \ 1349 mng_access_reg_cap_mask.access_regs.reg) 1350 1351 #define MLX5_CAP_MCAM_REG1(mdev, reg) \ 1352 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \ 1353 mng_access_reg_cap_mask.access_regs1.reg) 1354 1355 #define MLX5_CAP_MCAM_REG2(mdev, reg) \ 1356 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \ 1357 mng_access_reg_cap_mask.access_regs2.reg) 1358 1359 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ 1360 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) 1361 1362 #define MLX5_CAP_QCAM_REG(mdev, fld) \ 1363 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) 1364 1365 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ 1366 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) 1367 1368 #define MLX5_CAP_FPGA(mdev, cap) \ 1369 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) 1370 1371 #define MLX5_CAP64_FPGA(mdev, cap) \ 1372 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) 1373 1374 #define MLX5_CAP_DEV_MEM(mdev, cap)\ 1375 MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1376 1377 #define MLX5_CAP64_DEV_MEM(mdev, cap)\ 1378 MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1379 1380 #define MLX5_CAP_TLS(mdev, cap) \ 1381 MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) 1382 1383 #define MLX5_CAP_DEV_EVENT(mdev, cap)\ 1384 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) 1385 1386 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ 1387 MLX5_GET(virtio_emulation_cap, \ 1388 (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1389 1390 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ 1391 MLX5_GET64(virtio_emulation_cap, \ 1392 (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1393 1394 #define MLX5_CAP_IPSEC(mdev, cap)\ 1395 MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) 1396 1397 enum { 1398 MLX5_CMD_STAT_OK = 0x0, 1399 MLX5_CMD_STAT_INT_ERR = 0x1, 1400 MLX5_CMD_STAT_BAD_OP_ERR = 0x2, 1401 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, 1402 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, 1403 MLX5_CMD_STAT_BAD_RES_ERR = 0x5, 1404 MLX5_CMD_STAT_RES_BUSY = 0x6, 1405 MLX5_CMD_STAT_LIM_ERR = 0x8, 1406 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, 1407 MLX5_CMD_STAT_IX_ERR = 0xa, 1408 MLX5_CMD_STAT_NO_RES_ERR = 0xf, 1409 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, 1410 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, 1411 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, 1412 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, 1413 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1414 }; 1415 1416 enum { 1417 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, 1418 MLX5_RFC_2863_COUNTERS_GROUP = 0x1, 1419 MLX5_RFC_2819_COUNTERS_GROUP = 0x2, 1420 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1421 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1422 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1423 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1424 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1425 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, 1426 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, 1427 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1428 }; 1429 1430 enum { 1431 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, 1432 }; 1433 1434 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1435 { 1436 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1437 return 0; 1438 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1439 } 1440 1441 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 1442 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 1443 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 1444 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ 1445 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ 1446 MLX5_BY_PASS_NUM_MULTICAST_PRIOS) 1447 1448 #endif /* MLX5_DEVICE_H */ 1449