1 /* 2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/highmem.h> 34 #include <linux/module.h> 35 #include <linux/errno.h> 36 #include <linux/pci.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/slab.h> 39 #include <linux/delay.h> 40 #include <linux/random.h> 41 #include <linux/io-mapping.h> 42 #include <linux/mlx5/driver.h> 43 #include <linux/mlx5/eq.h> 44 #include <linux/debugfs.h> 45 46 #include "mlx5_core.h" 47 #include "lib/eq.h" 48 #include "lib/tout.h" 49 50 enum { 51 CMD_IF_REV = 5, 52 }; 53 54 enum { 55 CMD_MODE_POLLING, 56 CMD_MODE_EVENTS 57 }; 58 59 enum { 60 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 61 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 62 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 63 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 64 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 65 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 66 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 67 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 68 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 69 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 70 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 71 }; 72 73 static struct mlx5_cmd_work_ent * 74 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, 75 struct mlx5_cmd_msg *out, void *uout, int uout_size, 76 mlx5_cmd_cbk_t cbk, void *context, int page_queue) 77 { 78 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 79 struct mlx5_cmd_work_ent *ent; 80 81 ent = kzalloc(sizeof(*ent), alloc_flags); 82 if (!ent) 83 return ERR_PTR(-ENOMEM); 84 85 ent->idx = -EINVAL; 86 ent->in = in; 87 ent->out = out; 88 ent->uout = uout; 89 ent->uout_size = uout_size; 90 ent->callback = cbk; 91 ent->context = context; 92 ent->cmd = cmd; 93 ent->page_queue = page_queue; 94 refcount_set(&ent->refcnt, 1); 95 96 return ent; 97 } 98 99 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) 100 { 101 kfree(ent); 102 } 103 104 static u8 alloc_token(struct mlx5_cmd *cmd) 105 { 106 u8 token; 107 108 spin_lock(&cmd->token_lock); 109 cmd->token++; 110 if (cmd->token == 0) 111 cmd->token++; 112 token = cmd->token; 113 spin_unlock(&cmd->token_lock); 114 115 return token; 116 } 117 118 static int cmd_alloc_index(struct mlx5_cmd *cmd) 119 { 120 unsigned long flags; 121 int ret; 122 123 spin_lock_irqsave(&cmd->alloc_lock, flags); 124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 125 if (ret < cmd->max_reg_cmds) 126 clear_bit(ret, &cmd->bitmask); 127 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 128 129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM; 130 } 131 132 static void cmd_free_index(struct mlx5_cmd *cmd, int idx) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&cmd->alloc_lock, flags); 137 set_bit(idx, &cmd->bitmask); 138 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 139 } 140 141 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) 142 { 143 refcount_inc(&ent->refcnt); 144 } 145 146 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) 147 { 148 if (!refcount_dec_and_test(&ent->refcnt)) 149 return; 150 151 if (ent->idx >= 0) { 152 struct mlx5_cmd *cmd = ent->cmd; 153 154 cmd_free_index(cmd, ent->idx); 155 up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); 156 } 157 158 cmd_free_ent(ent); 159 } 160 161 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 162 { 163 return cmd->cmd_buf + (idx << cmd->log_stride); 164 } 165 166 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) 167 { 168 int size = msg->len; 169 int blen = size - min_t(int, sizeof(msg->first.data), size); 170 171 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); 172 } 173 174 static u8 xor8_buf(void *buf, size_t offset, int len) 175 { 176 u8 *ptr = buf; 177 u8 sum = 0; 178 int i; 179 int end = len + offset; 180 181 for (i = offset; i < end; i++) 182 sum ^= ptr[i]; 183 184 return sum; 185 } 186 187 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 188 { 189 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 190 int xor_len = sizeof(*block) - sizeof(block->data) - 1; 191 192 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) 193 return -EINVAL; 194 195 if (xor8_buf(block, 0, sizeof(*block)) != 0xff) 196 return -EINVAL; 197 198 return 0; 199 } 200 201 static void calc_block_sig(struct mlx5_cmd_prot_block *block) 202 { 203 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; 204 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 205 206 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); 207 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); 208 } 209 210 static void calc_chain_sig(struct mlx5_cmd_msg *msg) 211 { 212 struct mlx5_cmd_mailbox *next = msg->next; 213 int n = mlx5_calc_cmd_blocks(msg); 214 int i = 0; 215 216 for (i = 0; i < n && next; i++) { 217 calc_block_sig(next->buf); 218 next = next->next; 219 } 220 } 221 222 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 223 { 224 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 225 if (csum) { 226 calc_chain_sig(ent->in); 227 calc_chain_sig(ent->out); 228 } 229 } 230 231 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 232 { 233 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); 234 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); 235 unsigned long poll_end; 236 u8 own; 237 238 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000); 239 240 do { 241 own = READ_ONCE(ent->lay->status_own); 242 if (!(own & CMD_OWNER_HW)) { 243 ent->ret = 0; 244 return; 245 } 246 cond_resched(); 247 } while (time_before(jiffies, poll_end)); 248 249 ent->ret = -ETIMEDOUT; 250 } 251 252 static int verify_signature(struct mlx5_cmd_work_ent *ent) 253 { 254 struct mlx5_cmd_mailbox *next = ent->out->next; 255 int n = mlx5_calc_cmd_blocks(ent->out); 256 int err; 257 u8 sig; 258 int i = 0; 259 260 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 261 if (sig != 0xff) 262 return -EINVAL; 263 264 for (i = 0; i < n && next; i++) { 265 err = verify_block_sig(next->buf); 266 if (err) 267 return err; 268 269 next = next->next; 270 } 271 272 return 0; 273 } 274 275 static void dump_buf(void *buf, int size, int data_only, int offset, int idx) 276 { 277 __be32 *p = buf; 278 int i; 279 280 for (i = 0; i < size; i += 16) { 281 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, 282 be32_to_cpu(p[0]), be32_to_cpu(p[1]), 283 be32_to_cpu(p[2]), be32_to_cpu(p[3])); 284 p += 4; 285 offset += 16; 286 } 287 if (!data_only) 288 pr_debug("\n"); 289 } 290 291 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 292 u32 *synd, u8 *status) 293 { 294 *synd = 0; 295 *status = 0; 296 297 switch (op) { 298 case MLX5_CMD_OP_TEARDOWN_HCA: 299 case MLX5_CMD_OP_DISABLE_HCA: 300 case MLX5_CMD_OP_MANAGE_PAGES: 301 case MLX5_CMD_OP_DESTROY_MKEY: 302 case MLX5_CMD_OP_DESTROY_EQ: 303 case MLX5_CMD_OP_DESTROY_CQ: 304 case MLX5_CMD_OP_DESTROY_QP: 305 case MLX5_CMD_OP_DESTROY_PSV: 306 case MLX5_CMD_OP_DESTROY_SRQ: 307 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 308 case MLX5_CMD_OP_DESTROY_XRQ: 309 case MLX5_CMD_OP_DESTROY_DCT: 310 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 311 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 312 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 313 case MLX5_CMD_OP_DEALLOC_PD: 314 case MLX5_CMD_OP_DEALLOC_UAR: 315 case MLX5_CMD_OP_DETACH_FROM_MCG: 316 case MLX5_CMD_OP_DEALLOC_XRCD: 317 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 318 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 319 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 320 case MLX5_CMD_OP_DESTROY_LAG: 321 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 322 case MLX5_CMD_OP_DESTROY_TIR: 323 case MLX5_CMD_OP_DESTROY_SQ: 324 case MLX5_CMD_OP_DESTROY_RQ: 325 case MLX5_CMD_OP_DESTROY_RMP: 326 case MLX5_CMD_OP_DESTROY_TIS: 327 case MLX5_CMD_OP_DESTROY_RQT: 328 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 329 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 330 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 331 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 332 case MLX5_CMD_OP_2ERR_QP: 333 case MLX5_CMD_OP_2RST_QP: 334 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 335 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 336 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 337 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 338 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: 339 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 340 case MLX5_CMD_OP_FPGA_DESTROY_QP: 341 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 342 case MLX5_CMD_OP_DEALLOC_MEMIC: 343 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 344 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: 345 case MLX5_CMD_OP_DEALLOC_SF: 346 case MLX5_CMD_OP_DESTROY_UCTX: 347 case MLX5_CMD_OP_DESTROY_UMEM: 348 case MLX5_CMD_OP_MODIFY_RQT: 349 return MLX5_CMD_STAT_OK; 350 351 case MLX5_CMD_OP_QUERY_HCA_CAP: 352 case MLX5_CMD_OP_QUERY_ADAPTER: 353 case MLX5_CMD_OP_INIT_HCA: 354 case MLX5_CMD_OP_ENABLE_HCA: 355 case MLX5_CMD_OP_QUERY_PAGES: 356 case MLX5_CMD_OP_SET_HCA_CAP: 357 case MLX5_CMD_OP_QUERY_ISSI: 358 case MLX5_CMD_OP_SET_ISSI: 359 case MLX5_CMD_OP_CREATE_MKEY: 360 case MLX5_CMD_OP_QUERY_MKEY: 361 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 362 case MLX5_CMD_OP_CREATE_EQ: 363 case MLX5_CMD_OP_QUERY_EQ: 364 case MLX5_CMD_OP_GEN_EQE: 365 case MLX5_CMD_OP_CREATE_CQ: 366 case MLX5_CMD_OP_QUERY_CQ: 367 case MLX5_CMD_OP_MODIFY_CQ: 368 case MLX5_CMD_OP_CREATE_QP: 369 case MLX5_CMD_OP_RST2INIT_QP: 370 case MLX5_CMD_OP_INIT2RTR_QP: 371 case MLX5_CMD_OP_RTR2RTS_QP: 372 case MLX5_CMD_OP_RTS2RTS_QP: 373 case MLX5_CMD_OP_SQERR2RTS_QP: 374 case MLX5_CMD_OP_QUERY_QP: 375 case MLX5_CMD_OP_SQD_RTS_QP: 376 case MLX5_CMD_OP_INIT2INIT_QP: 377 case MLX5_CMD_OP_CREATE_PSV: 378 case MLX5_CMD_OP_CREATE_SRQ: 379 case MLX5_CMD_OP_QUERY_SRQ: 380 case MLX5_CMD_OP_ARM_RQ: 381 case MLX5_CMD_OP_CREATE_XRC_SRQ: 382 case MLX5_CMD_OP_QUERY_XRC_SRQ: 383 case MLX5_CMD_OP_ARM_XRC_SRQ: 384 case MLX5_CMD_OP_CREATE_XRQ: 385 case MLX5_CMD_OP_QUERY_XRQ: 386 case MLX5_CMD_OP_ARM_XRQ: 387 case MLX5_CMD_OP_CREATE_DCT: 388 case MLX5_CMD_OP_DRAIN_DCT: 389 case MLX5_CMD_OP_QUERY_DCT: 390 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 391 case MLX5_CMD_OP_QUERY_VPORT_STATE: 392 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 393 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 394 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 395 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 396 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 397 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 398 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 399 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 400 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 401 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 402 case MLX5_CMD_OP_QUERY_VNIC_ENV: 403 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 404 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 405 case MLX5_CMD_OP_QUERY_Q_COUNTER: 406 case MLX5_CMD_OP_SET_MONITOR_COUNTER: 407 case MLX5_CMD_OP_ARM_MONITOR_COUNTER: 408 case MLX5_CMD_OP_SET_PP_RATE_LIMIT: 409 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 410 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 411 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 412 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 413 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 414 case MLX5_CMD_OP_ALLOC_PD: 415 case MLX5_CMD_OP_ALLOC_UAR: 416 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 417 case MLX5_CMD_OP_ACCESS_REG: 418 case MLX5_CMD_OP_ATTACH_TO_MCG: 419 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 420 case MLX5_CMD_OP_MAD_IFC: 421 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 422 case MLX5_CMD_OP_SET_MAD_DEMUX: 423 case MLX5_CMD_OP_NOP: 424 case MLX5_CMD_OP_ALLOC_XRCD: 425 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 426 case MLX5_CMD_OP_QUERY_CONG_STATUS: 427 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 428 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 429 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 430 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 431 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 432 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 433 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 434 case MLX5_CMD_OP_CREATE_LAG: 435 case MLX5_CMD_OP_MODIFY_LAG: 436 case MLX5_CMD_OP_QUERY_LAG: 437 case MLX5_CMD_OP_CREATE_VPORT_LAG: 438 case MLX5_CMD_OP_CREATE_TIR: 439 case MLX5_CMD_OP_MODIFY_TIR: 440 case MLX5_CMD_OP_QUERY_TIR: 441 case MLX5_CMD_OP_CREATE_SQ: 442 case MLX5_CMD_OP_MODIFY_SQ: 443 case MLX5_CMD_OP_QUERY_SQ: 444 case MLX5_CMD_OP_CREATE_RQ: 445 case MLX5_CMD_OP_MODIFY_RQ: 446 case MLX5_CMD_OP_QUERY_RQ: 447 case MLX5_CMD_OP_CREATE_RMP: 448 case MLX5_CMD_OP_MODIFY_RMP: 449 case MLX5_CMD_OP_QUERY_RMP: 450 case MLX5_CMD_OP_CREATE_TIS: 451 case MLX5_CMD_OP_MODIFY_TIS: 452 case MLX5_CMD_OP_QUERY_TIS: 453 case MLX5_CMD_OP_CREATE_RQT: 454 case MLX5_CMD_OP_QUERY_RQT: 455 456 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 457 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 458 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 459 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 460 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 461 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 462 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 463 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 464 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 465 case MLX5_CMD_OP_FPGA_CREATE_QP: 466 case MLX5_CMD_OP_FPGA_MODIFY_QP: 467 case MLX5_CMD_OP_FPGA_QUERY_QP: 468 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: 469 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 470 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 471 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 472 case MLX5_CMD_OP_CREATE_UCTX: 473 case MLX5_CMD_OP_CREATE_UMEM: 474 case MLX5_CMD_OP_ALLOC_MEMIC: 475 case MLX5_CMD_OP_MODIFY_XRQ: 476 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 477 case MLX5_CMD_OP_QUERY_VHCA_STATE: 478 case MLX5_CMD_OP_MODIFY_VHCA_STATE: 479 case MLX5_CMD_OP_ALLOC_SF: 480 *status = MLX5_DRIVER_STATUS_ABORTED; 481 *synd = MLX5_DRIVER_SYND; 482 return -EIO; 483 default: 484 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 485 return -EINVAL; 486 } 487 } 488 489 const char *mlx5_command_str(int command) 490 { 491 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 492 493 switch (command) { 494 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 495 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 496 MLX5_COMMAND_STR_CASE(INIT_HCA); 497 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 498 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 499 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 500 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 501 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 502 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 503 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 504 MLX5_COMMAND_STR_CASE(SET_ISSI); 505 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); 506 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 507 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 508 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 509 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 510 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 511 MLX5_COMMAND_STR_CASE(CREATE_EQ); 512 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 513 MLX5_COMMAND_STR_CASE(QUERY_EQ); 514 MLX5_COMMAND_STR_CASE(GEN_EQE); 515 MLX5_COMMAND_STR_CASE(CREATE_CQ); 516 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 517 MLX5_COMMAND_STR_CASE(QUERY_CQ); 518 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 519 MLX5_COMMAND_STR_CASE(CREATE_QP); 520 MLX5_COMMAND_STR_CASE(DESTROY_QP); 521 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 522 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 523 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 524 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 525 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 526 MLX5_COMMAND_STR_CASE(2ERR_QP); 527 MLX5_COMMAND_STR_CASE(2RST_QP); 528 MLX5_COMMAND_STR_CASE(QUERY_QP); 529 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 530 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 531 MLX5_COMMAND_STR_CASE(CREATE_PSV); 532 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 533 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 534 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 535 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 536 MLX5_COMMAND_STR_CASE(ARM_RQ); 537 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 538 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 539 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 540 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 541 MLX5_COMMAND_STR_CASE(CREATE_DCT); 542 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 543 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 544 MLX5_COMMAND_STR_CASE(QUERY_DCT); 545 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 546 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 547 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 548 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 549 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 550 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 551 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 552 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 553 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 554 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 555 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 556 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 557 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 558 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 559 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 560 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 561 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 562 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 563 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); 564 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); 565 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); 566 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 567 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 568 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 569 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); 570 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); 571 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); 572 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); 573 MLX5_COMMAND_STR_CASE(ALLOC_PD); 574 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 575 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 576 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 577 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 578 MLX5_COMMAND_STR_CASE(ACCESS_REG); 579 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 580 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 581 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 582 MLX5_COMMAND_STR_CASE(MAD_IFC); 583 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 584 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 585 MLX5_COMMAND_STR_CASE(NOP); 586 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 587 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 588 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 589 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 590 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 591 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 592 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 593 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 594 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 595 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 596 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 597 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 598 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 599 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 600 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 601 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 602 MLX5_COMMAND_STR_CASE(CREATE_LAG); 603 MLX5_COMMAND_STR_CASE(MODIFY_LAG); 604 MLX5_COMMAND_STR_CASE(QUERY_LAG); 605 MLX5_COMMAND_STR_CASE(DESTROY_LAG); 606 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); 607 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); 608 MLX5_COMMAND_STR_CASE(CREATE_TIR); 609 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 610 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 611 MLX5_COMMAND_STR_CASE(QUERY_TIR); 612 MLX5_COMMAND_STR_CASE(CREATE_SQ); 613 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 614 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 615 MLX5_COMMAND_STR_CASE(QUERY_SQ); 616 MLX5_COMMAND_STR_CASE(CREATE_RQ); 617 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 618 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 619 MLX5_COMMAND_STR_CASE(QUERY_RQ); 620 MLX5_COMMAND_STR_CASE(CREATE_RMP); 621 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 622 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 623 MLX5_COMMAND_STR_CASE(QUERY_RMP); 624 MLX5_COMMAND_STR_CASE(CREATE_TIS); 625 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 626 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 627 MLX5_COMMAND_STR_CASE(QUERY_TIS); 628 MLX5_COMMAND_STR_CASE(CREATE_RQT); 629 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 630 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 631 MLX5_COMMAND_STR_CASE(QUERY_RQT); 632 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); 633 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 634 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 635 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 636 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 637 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 638 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 639 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 640 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 641 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 642 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 643 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 644 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 645 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); 646 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); 647 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); 648 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); 649 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); 650 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); 651 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP); 652 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); 653 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); 654 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); 655 MLX5_COMMAND_STR_CASE(CREATE_XRQ); 656 MLX5_COMMAND_STR_CASE(DESTROY_XRQ); 657 MLX5_COMMAND_STR_CASE(QUERY_XRQ); 658 MLX5_COMMAND_STR_CASE(ARM_XRQ); 659 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); 660 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); 661 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); 662 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); 663 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); 664 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); 665 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); 666 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); 667 MLX5_COMMAND_STR_CASE(CREATE_UCTX); 668 MLX5_COMMAND_STR_CASE(DESTROY_UCTX); 669 MLX5_COMMAND_STR_CASE(CREATE_UMEM); 670 MLX5_COMMAND_STR_CASE(DESTROY_UMEM); 671 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); 672 MLX5_COMMAND_STR_CASE(MODIFY_XRQ); 673 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); 674 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); 675 MLX5_COMMAND_STR_CASE(ALLOC_SF); 676 MLX5_COMMAND_STR_CASE(DEALLOC_SF); 677 default: return "unknown command opcode"; 678 } 679 } 680 681 static const char *cmd_status_str(u8 status) 682 { 683 switch (status) { 684 case MLX5_CMD_STAT_OK: 685 return "OK"; 686 case MLX5_CMD_STAT_INT_ERR: 687 return "internal error"; 688 case MLX5_CMD_STAT_BAD_OP_ERR: 689 return "bad operation"; 690 case MLX5_CMD_STAT_BAD_PARAM_ERR: 691 return "bad parameter"; 692 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 693 return "bad system state"; 694 case MLX5_CMD_STAT_BAD_RES_ERR: 695 return "bad resource"; 696 case MLX5_CMD_STAT_RES_BUSY: 697 return "resource busy"; 698 case MLX5_CMD_STAT_LIM_ERR: 699 return "limits exceeded"; 700 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 701 return "bad resource state"; 702 case MLX5_CMD_STAT_IX_ERR: 703 return "bad index"; 704 case MLX5_CMD_STAT_NO_RES_ERR: 705 return "no resources"; 706 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 707 return "bad input length"; 708 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 709 return "bad output length"; 710 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 711 return "bad QP state"; 712 case MLX5_CMD_STAT_BAD_PKT_ERR: 713 return "bad packet (discarded)"; 714 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 715 return "bad size too many outstanding CQEs"; 716 default: 717 return "unknown status"; 718 } 719 } 720 721 static int cmd_status_to_err(u8 status) 722 { 723 switch (status) { 724 case MLX5_CMD_STAT_OK: return 0; 725 case MLX5_CMD_STAT_INT_ERR: return -EIO; 726 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 727 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 728 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 729 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 730 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 731 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 732 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 733 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 734 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 735 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 736 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 737 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 738 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 739 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 740 default: return -EIO; 741 } 742 } 743 744 struct mlx5_ifc_mbox_out_bits { 745 u8 status[0x8]; 746 u8 reserved_at_8[0x18]; 747 748 u8 syndrome[0x20]; 749 750 u8 reserved_at_40[0x40]; 751 }; 752 753 struct mlx5_ifc_mbox_in_bits { 754 u8 opcode[0x10]; 755 u8 uid[0x10]; 756 757 u8 reserved_at_20[0x10]; 758 u8 op_mod[0x10]; 759 760 u8 reserved_at_40[0x40]; 761 }; 762 763 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) 764 { 765 *status = MLX5_GET(mbox_out, out, status); 766 *syndrome = MLX5_GET(mbox_out, out, syndrome); 767 } 768 769 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) 770 { 771 u32 syndrome; 772 u8 status; 773 u16 opcode; 774 u16 op_mod; 775 u16 uid; 776 777 mlx5_cmd_mbox_status(out, &status, &syndrome); 778 if (!status) 779 return 0; 780 781 opcode = MLX5_GET(mbox_in, in, opcode); 782 op_mod = MLX5_GET(mbox_in, in, op_mod); 783 uid = MLX5_GET(mbox_in, in, uid); 784 785 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) 786 mlx5_core_err_rl(dev, 787 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 788 mlx5_command_str(opcode), opcode, op_mod, 789 cmd_status_str(status), status, syndrome); 790 else 791 mlx5_core_dbg(dev, 792 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 793 mlx5_command_str(opcode), 794 opcode, op_mod, 795 cmd_status_str(status), 796 status, 797 syndrome); 798 799 return cmd_status_to_err(status); 800 } 801 802 static void dump_command(struct mlx5_core_dev *dev, 803 struct mlx5_cmd_work_ent *ent, int input) 804 { 805 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 806 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 807 struct mlx5_cmd_mailbox *next = msg->next; 808 int n = mlx5_calc_cmd_blocks(msg); 809 int data_only; 810 u32 offset = 0; 811 int dump_len; 812 int i; 813 814 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); 815 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 816 817 if (data_only) 818 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 819 "cmd[%d]: dump command data %s(0x%x) %s\n", 820 ent->idx, mlx5_command_str(op), op, 821 input ? "INPUT" : "OUTPUT"); 822 else 823 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", 824 ent->idx, mlx5_command_str(op), op, 825 input ? "INPUT" : "OUTPUT"); 826 827 if (data_only) { 828 if (input) { 829 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); 830 offset += sizeof(ent->lay->in); 831 } else { 832 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); 833 offset += sizeof(ent->lay->out); 834 } 835 } else { 836 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); 837 offset += sizeof(*ent->lay); 838 } 839 840 for (i = 0; i < n && next; i++) { 841 if (data_only) { 842 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 843 dump_buf(next->buf, dump_len, 1, offset, ent->idx); 844 offset += MLX5_CMD_DATA_BLOCK_SIZE; 845 } else { 846 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); 847 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, 848 ent->idx); 849 offset += sizeof(struct mlx5_cmd_prot_block); 850 } 851 next = next->next; 852 } 853 854 if (data_only) 855 pr_debug("\n"); 856 857 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); 858 } 859 860 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 861 { 862 return MLX5_GET(mbox_in, in->first.data, opcode); 863 } 864 865 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 866 867 static void cb_timeout_handler(struct work_struct *work) 868 { 869 struct delayed_work *dwork = container_of(work, struct delayed_work, 870 work); 871 struct mlx5_cmd_work_ent *ent = container_of(dwork, 872 struct mlx5_cmd_work_ent, 873 cb_timeout_work); 874 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 875 cmd); 876 877 mlx5_cmd_eq_recover(dev); 878 879 /* Maybe got handled by eq recover ? */ 880 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { 881 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, 882 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 883 goto out; /* phew, already handled */ 884 } 885 886 ent->ret = -ETIMEDOUT; 887 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 888 ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 889 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 890 891 out: 892 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ 893 } 894 895 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 896 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 897 struct mlx5_cmd_msg *msg); 898 899 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 900 { 901 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 902 return true; 903 904 return cmd->allowed_opcode == opcode; 905 } 906 907 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) 908 { 909 return pci_channel_offline(dev->pdev) || 910 dev->cmd.state != MLX5_CMDIF_STATE_UP || 911 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; 912 } 913 914 static void cmd_work_handler(struct work_struct *work) 915 { 916 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 917 struct mlx5_cmd *cmd = ent->cmd; 918 bool poll_cmd = ent->polling; 919 struct mlx5_cmd_layout *lay; 920 struct mlx5_core_dev *dev; 921 unsigned long cb_timeout; 922 struct semaphore *sem; 923 unsigned long flags; 924 int alloc_ret; 925 int cmd_mode; 926 927 dev = container_of(cmd, struct mlx5_core_dev, cmd); 928 cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 929 930 complete(&ent->handling); 931 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 932 down(sem); 933 if (!ent->page_queue) { 934 alloc_ret = cmd_alloc_index(cmd); 935 if (alloc_ret < 0) { 936 mlx5_core_err_rl(dev, "failed to allocate command entry\n"); 937 if (ent->callback) { 938 ent->callback(-EAGAIN, ent->context); 939 mlx5_free_cmd_msg(dev, ent->out); 940 free_msg(dev, ent->in); 941 cmd_ent_put(ent); 942 } else { 943 ent->ret = -EAGAIN; 944 complete(&ent->done); 945 } 946 up(sem); 947 return; 948 } 949 ent->idx = alloc_ret; 950 } else { 951 ent->idx = cmd->max_reg_cmds; 952 spin_lock_irqsave(&cmd->alloc_lock, flags); 953 clear_bit(ent->idx, &cmd->bitmask); 954 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 955 } 956 957 cmd->ent_arr[ent->idx] = ent; 958 lay = get_inst(cmd, ent->idx); 959 ent->lay = lay; 960 memset(lay, 0, sizeof(*lay)); 961 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 962 ent->op = be32_to_cpu(lay->in[0]) >> 16; 963 if (ent->in->next) 964 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 965 lay->inlen = cpu_to_be32(ent->in->len); 966 if (ent->out->next) 967 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 968 lay->outlen = cpu_to_be32(ent->out->len); 969 lay->type = MLX5_PCI_CMD_XPORT; 970 lay->token = ent->token; 971 lay->status_own = CMD_OWNER_HW; 972 set_signature(ent, !cmd->checksum_disabled); 973 dump_command(dev, ent, 1); 974 ent->ts1 = ktime_get_ns(); 975 cmd_mode = cmd->mode; 976 977 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) 978 cmd_ent_get(ent); 979 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 980 981 /* Skip sending command to fw if internal error */ 982 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { 983 u8 status = 0; 984 u32 drv_synd; 985 986 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); 987 MLX5_SET(mbox_out, ent->out, status, status); 988 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 989 990 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 991 return; 992 } 993 994 cmd_ent_get(ent); /* for the _real_ FW event on completion */ 995 /* ring doorbell after the descriptor is valid */ 996 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 997 wmb(); 998 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 999 /* if not in polling don't use ent after this point */ 1000 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { 1001 poll_timeout(ent); 1002 /* make sure we read the descriptor after ownership is SW */ 1003 rmb(); 1004 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); 1005 } 1006 } 1007 1008 static const char *deliv_status_to_str(u8 status) 1009 { 1010 switch (status) { 1011 case MLX5_CMD_DELIVERY_STAT_OK: 1012 return "no errors"; 1013 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1014 return "signature error"; 1015 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1016 return "token error"; 1017 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1018 return "bad block number"; 1019 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1020 return "output pointer not aligned to block size"; 1021 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1022 return "input pointer not aligned to block size"; 1023 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1024 return "firmware internal error"; 1025 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1026 return "command input length error"; 1027 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1028 return "command output length error"; 1029 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1030 return "reserved fields not cleared"; 1031 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1032 return "bad command descriptor type"; 1033 default: 1034 return "unknown status code"; 1035 } 1036 } 1037 1038 enum { 1039 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, 1040 }; 1041 1042 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, 1043 struct mlx5_cmd_work_ent *ent) 1044 { 1045 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); 1046 1047 mlx5_cmd_eq_recover(dev); 1048 1049 /* Re-wait on the ent->done after executing the recovery flow. If the 1050 * recovery flow (or any other recovery flow running simultaneously) 1051 * has recovered an EQE, it should cause the entry to be completed by 1052 * the command interface. 1053 */ 1054 if (wait_for_completion_timeout(&ent->done, timeout)) { 1055 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, 1056 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1057 return; 1058 } 1059 1060 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, 1061 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1062 1063 ent->ret = -ETIMEDOUT; 1064 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1065 } 1066 1067 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 1068 { 1069 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 1070 struct mlx5_cmd *cmd = &dev->cmd; 1071 int err; 1072 1073 if (!wait_for_completion_timeout(&ent->handling, timeout) && 1074 cancel_work_sync(&ent->work)) { 1075 ent->ret = -ECANCELED; 1076 goto out_err; 1077 } 1078 if (cmd->mode == CMD_MODE_POLLING || ent->polling) 1079 wait_for_completion(&ent->done); 1080 else if (!wait_for_completion_timeout(&ent->done, timeout)) 1081 wait_func_handle_exec_timeout(dev, ent); 1082 1083 out_err: 1084 err = ent->ret; 1085 1086 if (err == -ETIMEDOUT) { 1087 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1088 mlx5_command_str(msg_to_opcode(ent->in)), 1089 msg_to_opcode(ent->in)); 1090 } else if (err == -ECANCELED) { 1091 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1092 mlx5_command_str(msg_to_opcode(ent->in)), 1093 msg_to_opcode(ent->in)); 1094 } 1095 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1096 err, deliv_status_to_str(ent->status), ent->status); 1097 1098 return err; 1099 } 1100 1101 /* Notes: 1102 * 1. Callback functions may not sleep 1103 * 2. page queue commands do not support asynchrous completion 1104 */ 1105 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 1106 struct mlx5_cmd_msg *out, void *uout, int uout_size, 1107 mlx5_cmd_cbk_t callback, 1108 void *context, int page_queue, u8 *status, 1109 u8 token, bool force_polling) 1110 { 1111 struct mlx5_cmd *cmd = &dev->cmd; 1112 struct mlx5_cmd_work_ent *ent; 1113 struct mlx5_cmd_stats *stats; 1114 int err = 0; 1115 s64 ds; 1116 u16 op; 1117 1118 if (callback && page_queue) 1119 return -EINVAL; 1120 1121 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, 1122 callback, context, page_queue); 1123 if (IS_ERR(ent)) 1124 return PTR_ERR(ent); 1125 1126 /* put for this ent is when consumed, depending on the use case 1127 * 1) (!callback) blocking flow: by caller after wait_func completes 1128 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled 1129 */ 1130 1131 ent->token = token; 1132 ent->polling = force_polling; 1133 1134 init_completion(&ent->handling); 1135 if (!callback) 1136 init_completion(&ent->done); 1137 1138 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1139 INIT_WORK(&ent->work, cmd_work_handler); 1140 if (page_queue) { 1141 cmd_work_handler(&ent->work); 1142 } else if (!queue_work(cmd->wq, &ent->work)) { 1143 mlx5_core_warn(dev, "failed to queue work\n"); 1144 err = -ENOMEM; 1145 goto out_free; 1146 } 1147 1148 if (callback) 1149 goto out; /* mlx5_cmd_comp_handler() will put(ent) */ 1150 1151 err = wait_func(dev, ent); 1152 if (err == -ETIMEDOUT || err == -ECANCELED) 1153 goto out_free; 1154 1155 ds = ent->ts2 - ent->ts1; 1156 op = MLX5_GET(mbox_in, in->first.data, opcode); 1157 if (op < MLX5_CMD_OP_MAX) { 1158 stats = &cmd->stats[op]; 1159 spin_lock_irq(&stats->lock); 1160 stats->sum += ds; 1161 ++stats->n; 1162 spin_unlock_irq(&stats->lock); 1163 } 1164 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1165 "fw exec time for %s is %lld nsec\n", 1166 mlx5_command_str(op), ds); 1167 *status = ent->status; 1168 1169 out_free: 1170 cmd_ent_put(ent); 1171 out: 1172 return err; 1173 } 1174 1175 static ssize_t dbg_write(struct file *filp, const char __user *buf, 1176 size_t count, loff_t *pos) 1177 { 1178 struct mlx5_core_dev *dev = filp->private_data; 1179 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1180 char lbuf[3]; 1181 int err; 1182 1183 if (!dbg->in_msg || !dbg->out_msg) 1184 return -ENOMEM; 1185 1186 if (count < sizeof(lbuf) - 1) 1187 return -EINVAL; 1188 1189 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) 1190 return -EFAULT; 1191 1192 lbuf[sizeof(lbuf) - 1] = 0; 1193 1194 if (strcmp(lbuf, "go")) 1195 return -EINVAL; 1196 1197 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); 1198 1199 return err ? err : count; 1200 } 1201 1202 static const struct file_operations fops = { 1203 .owner = THIS_MODULE, 1204 .open = simple_open, 1205 .write = dbg_write, 1206 }; 1207 1208 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, 1209 u8 token) 1210 { 1211 struct mlx5_cmd_prot_block *block; 1212 struct mlx5_cmd_mailbox *next; 1213 int copy; 1214 1215 if (!to || !from) 1216 return -ENOMEM; 1217 1218 copy = min_t(int, size, sizeof(to->first.data)); 1219 memcpy(to->first.data, from, copy); 1220 size -= copy; 1221 from += copy; 1222 1223 next = to->next; 1224 while (size) { 1225 if (!next) { 1226 /* this is a BUG */ 1227 return -ENOMEM; 1228 } 1229 1230 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1231 block = next->buf; 1232 memcpy(block->data, from, copy); 1233 from += copy; 1234 size -= copy; 1235 block->token = token; 1236 next = next->next; 1237 } 1238 1239 return 0; 1240 } 1241 1242 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1243 { 1244 struct mlx5_cmd_prot_block *block; 1245 struct mlx5_cmd_mailbox *next; 1246 int copy; 1247 1248 if (!to || !from) 1249 return -ENOMEM; 1250 1251 copy = min_t(int, size, sizeof(from->first.data)); 1252 memcpy(to, from->first.data, copy); 1253 size -= copy; 1254 to += copy; 1255 1256 next = from->next; 1257 while (size) { 1258 if (!next) { 1259 /* this is a BUG */ 1260 return -ENOMEM; 1261 } 1262 1263 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1264 block = next->buf; 1265 1266 memcpy(to, block->data, copy); 1267 to += copy; 1268 size -= copy; 1269 next = next->next; 1270 } 1271 1272 return 0; 1273 } 1274 1275 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 1276 gfp_t flags) 1277 { 1278 struct mlx5_cmd_mailbox *mailbox; 1279 1280 mailbox = kmalloc(sizeof(*mailbox), flags); 1281 if (!mailbox) 1282 return ERR_PTR(-ENOMEM); 1283 1284 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags, 1285 &mailbox->dma); 1286 if (!mailbox->buf) { 1287 mlx5_core_dbg(dev, "failed allocation\n"); 1288 kfree(mailbox); 1289 return ERR_PTR(-ENOMEM); 1290 } 1291 mailbox->next = NULL; 1292 1293 return mailbox; 1294 } 1295 1296 static void free_cmd_box(struct mlx5_core_dev *dev, 1297 struct mlx5_cmd_mailbox *mailbox) 1298 { 1299 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 1300 kfree(mailbox); 1301 } 1302 1303 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 1304 gfp_t flags, int size, 1305 u8 token) 1306 { 1307 struct mlx5_cmd_mailbox *tmp, *head = NULL; 1308 struct mlx5_cmd_prot_block *block; 1309 struct mlx5_cmd_msg *msg; 1310 int err; 1311 int n; 1312 int i; 1313 1314 msg = kzalloc(sizeof(*msg), flags); 1315 if (!msg) 1316 return ERR_PTR(-ENOMEM); 1317 1318 msg->len = size; 1319 n = mlx5_calc_cmd_blocks(msg); 1320 1321 for (i = 0; i < n; i++) { 1322 tmp = alloc_cmd_box(dev, flags); 1323 if (IS_ERR(tmp)) { 1324 mlx5_core_warn(dev, "failed allocating block\n"); 1325 err = PTR_ERR(tmp); 1326 goto err_alloc; 1327 } 1328 1329 block = tmp->buf; 1330 tmp->next = head; 1331 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1332 block->block_num = cpu_to_be32(n - i - 1); 1333 block->token = token; 1334 head = tmp; 1335 } 1336 msg->next = head; 1337 return msg; 1338 1339 err_alloc: 1340 while (head) { 1341 tmp = head->next; 1342 free_cmd_box(dev, head); 1343 head = tmp; 1344 } 1345 kfree(msg); 1346 1347 return ERR_PTR(err); 1348 } 1349 1350 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1351 struct mlx5_cmd_msg *msg) 1352 { 1353 struct mlx5_cmd_mailbox *head = msg->next; 1354 struct mlx5_cmd_mailbox *next; 1355 1356 while (head) { 1357 next = head->next; 1358 free_cmd_box(dev, head); 1359 head = next; 1360 } 1361 kfree(msg); 1362 } 1363 1364 static ssize_t data_write(struct file *filp, const char __user *buf, 1365 size_t count, loff_t *pos) 1366 { 1367 struct mlx5_core_dev *dev = filp->private_data; 1368 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1369 void *ptr; 1370 1371 if (*pos != 0) 1372 return -EINVAL; 1373 1374 kfree(dbg->in_msg); 1375 dbg->in_msg = NULL; 1376 dbg->inlen = 0; 1377 ptr = memdup_user(buf, count); 1378 if (IS_ERR(ptr)) 1379 return PTR_ERR(ptr); 1380 dbg->in_msg = ptr; 1381 dbg->inlen = count; 1382 1383 *pos = count; 1384 1385 return count; 1386 } 1387 1388 static ssize_t data_read(struct file *filp, char __user *buf, size_t count, 1389 loff_t *pos) 1390 { 1391 struct mlx5_core_dev *dev = filp->private_data; 1392 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1393 1394 if (!dbg->out_msg) 1395 return -ENOMEM; 1396 1397 return simple_read_from_buffer(buf, count, pos, dbg->out_msg, 1398 dbg->outlen); 1399 } 1400 1401 static const struct file_operations dfops = { 1402 .owner = THIS_MODULE, 1403 .open = simple_open, 1404 .write = data_write, 1405 .read = data_read, 1406 }; 1407 1408 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, 1409 loff_t *pos) 1410 { 1411 struct mlx5_core_dev *dev = filp->private_data; 1412 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1413 char outlen[8]; 1414 int err; 1415 1416 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1417 if (err < 0) 1418 return err; 1419 1420 return simple_read_from_buffer(buf, count, pos, outlen, err); 1421 } 1422 1423 static ssize_t outlen_write(struct file *filp, const char __user *buf, 1424 size_t count, loff_t *pos) 1425 { 1426 struct mlx5_core_dev *dev = filp->private_data; 1427 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1428 char outlen_str[8] = {0}; 1429 int outlen; 1430 void *ptr; 1431 int err; 1432 1433 if (*pos != 0 || count > 6) 1434 return -EINVAL; 1435 1436 kfree(dbg->out_msg); 1437 dbg->out_msg = NULL; 1438 dbg->outlen = 0; 1439 1440 if (copy_from_user(outlen_str, buf, count)) 1441 return -EFAULT; 1442 1443 err = sscanf(outlen_str, "%d", &outlen); 1444 if (err < 0) 1445 return err; 1446 1447 ptr = kzalloc(outlen, GFP_KERNEL); 1448 if (!ptr) 1449 return -ENOMEM; 1450 1451 dbg->out_msg = ptr; 1452 dbg->outlen = outlen; 1453 1454 *pos = count; 1455 1456 return count; 1457 } 1458 1459 static const struct file_operations olfops = { 1460 .owner = THIS_MODULE, 1461 .open = simple_open, 1462 .write = outlen_write, 1463 .read = outlen_read, 1464 }; 1465 1466 static void set_wqname(struct mlx5_core_dev *dev) 1467 { 1468 struct mlx5_cmd *cmd = &dev->cmd; 1469 1470 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1471 dev_name(dev->device)); 1472 } 1473 1474 static void clean_debug_files(struct mlx5_core_dev *dev) 1475 { 1476 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1477 1478 if (!mlx5_debugfs_root) 1479 return; 1480 1481 mlx5_cmdif_debugfs_cleanup(dev); 1482 debugfs_remove_recursive(dbg->dbg_root); 1483 } 1484 1485 static void create_debugfs_files(struct mlx5_core_dev *dev) 1486 { 1487 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1488 1489 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); 1490 1491 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); 1492 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); 1493 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); 1494 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); 1495 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); 1496 1497 mlx5_cmdif_debugfs_init(dev); 1498 } 1499 1500 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1501 { 1502 struct mlx5_cmd *cmd = &dev->cmd; 1503 int i; 1504 1505 for (i = 0; i < cmd->max_reg_cmds; i++) 1506 down(&cmd->sem); 1507 down(&cmd->pages_sem); 1508 1509 cmd->allowed_opcode = opcode; 1510 1511 up(&cmd->pages_sem); 1512 for (i = 0; i < cmd->max_reg_cmds; i++) 1513 up(&cmd->sem); 1514 } 1515 1516 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1517 { 1518 struct mlx5_cmd *cmd = &dev->cmd; 1519 int i; 1520 1521 for (i = 0; i < cmd->max_reg_cmds; i++) 1522 down(&cmd->sem); 1523 down(&cmd->pages_sem); 1524 1525 cmd->mode = mode; 1526 1527 up(&cmd->pages_sem); 1528 for (i = 0; i < cmd->max_reg_cmds; i++) 1529 up(&cmd->sem); 1530 } 1531 1532 static int cmd_comp_notifier(struct notifier_block *nb, 1533 unsigned long type, void *data) 1534 { 1535 struct mlx5_core_dev *dev; 1536 struct mlx5_cmd *cmd; 1537 struct mlx5_eqe *eqe; 1538 1539 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); 1540 dev = container_of(cmd, struct mlx5_core_dev, cmd); 1541 eqe = data; 1542 1543 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); 1544 1545 return NOTIFY_OK; 1546 } 1547 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1548 { 1549 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); 1550 mlx5_eq_notifier_register(dev, &dev->cmd.nb); 1551 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1552 } 1553 1554 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1555 { 1556 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1557 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); 1558 } 1559 1560 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1561 { 1562 unsigned long flags; 1563 1564 if (msg->parent) { 1565 spin_lock_irqsave(&msg->parent->lock, flags); 1566 list_add_tail(&msg->list, &msg->parent->head); 1567 spin_unlock_irqrestore(&msg->parent->lock, flags); 1568 } else { 1569 mlx5_free_cmd_msg(dev, msg); 1570 } 1571 } 1572 1573 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) 1574 { 1575 struct mlx5_cmd *cmd = &dev->cmd; 1576 struct mlx5_cmd_work_ent *ent; 1577 mlx5_cmd_cbk_t callback; 1578 void *context; 1579 int err; 1580 int i; 1581 s64 ds; 1582 struct mlx5_cmd_stats *stats; 1583 unsigned long flags; 1584 unsigned long vector; 1585 1586 /* there can be at most 32 command queues */ 1587 vector = vec & 0xffffffff; 1588 for (i = 0; i < (1 << cmd->log_sz); i++) { 1589 if (test_bit(i, &vector)) { 1590 ent = cmd->ent_arr[i]; 1591 1592 /* if we already completed the command, ignore it */ 1593 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, 1594 &ent->state)) { 1595 /* only real completion can free the cmd slot */ 1596 if (!forced) { 1597 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1598 ent->idx); 1599 cmd_ent_put(ent); 1600 } 1601 continue; 1602 } 1603 1604 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) 1605 cmd_ent_put(ent); /* timeout work was canceled */ 1606 1607 if (!forced || /* Real FW completion */ 1608 pci_channel_offline(dev->pdev) || /* FW is inaccessible */ 1609 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1610 cmd_ent_put(ent); 1611 1612 ent->ts2 = ktime_get_ns(); 1613 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1614 dump_command(dev, ent, 0); 1615 if (!ent->ret) { 1616 if (!cmd->checksum_disabled) 1617 ent->ret = verify_signature(ent); 1618 else 1619 ent->ret = 0; 1620 if (vec & MLX5_TRIGGERED_CMD_COMP) 1621 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1622 else 1623 ent->status = ent->lay->status_own >> 1; 1624 1625 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1626 ent->ret, deliv_status_to_str(ent->status), ent->status); 1627 } 1628 1629 if (ent->callback) { 1630 ds = ent->ts2 - ent->ts1; 1631 if (ent->op < MLX5_CMD_OP_MAX) { 1632 stats = &cmd->stats[ent->op]; 1633 spin_lock_irqsave(&stats->lock, flags); 1634 stats->sum += ds; 1635 ++stats->n; 1636 spin_unlock_irqrestore(&stats->lock, flags); 1637 } 1638 1639 callback = ent->callback; 1640 context = ent->context; 1641 err = ent->ret; 1642 if (!err) { 1643 err = mlx5_copy_from_msg(ent->uout, 1644 ent->out, 1645 ent->uout_size); 1646 1647 err = err ? err : mlx5_cmd_check(dev, 1648 ent->in->first.data, 1649 ent->uout); 1650 } 1651 1652 mlx5_free_cmd_msg(dev, ent->out); 1653 free_msg(dev, ent->in); 1654 1655 err = err ? err : ent->status; 1656 /* final consumer is done, release ent */ 1657 cmd_ent_put(ent); 1658 callback(err, context); 1659 } else { 1660 /* release wait_func() so mlx5_cmd_invoke() 1661 * can make the final ent_put() 1662 */ 1663 complete(&ent->done); 1664 } 1665 } 1666 } 1667 } 1668 1669 void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) 1670 { 1671 struct mlx5_cmd *cmd = &dev->cmd; 1672 unsigned long bitmask; 1673 unsigned long flags; 1674 u64 vector; 1675 int i; 1676 1677 /* wait for pending handlers to complete */ 1678 mlx5_eq_synchronize_cmd_irq(dev); 1679 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1680 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1681 if (!vector) 1682 goto no_trig; 1683 1684 bitmask = vector; 1685 /* we must increment the allocated entries refcount before triggering the completions 1686 * to guarantee pending commands will not get freed in the meanwhile. 1687 * For that reason, it also has to be done inside the alloc_lock. 1688 */ 1689 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1690 cmd_ent_get(cmd->ent_arr[i]); 1691 vector |= MLX5_TRIGGERED_CMD_COMP; 1692 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1693 1694 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 1695 mlx5_cmd_comp_handler(dev, vector, true); 1696 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1697 cmd_ent_put(cmd->ent_arr[i]); 1698 return; 1699 1700 no_trig: 1701 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1702 } 1703 1704 void mlx5_cmd_flush(struct mlx5_core_dev *dev) 1705 { 1706 struct mlx5_cmd *cmd = &dev->cmd; 1707 int i; 1708 1709 for (i = 0; i < cmd->max_reg_cmds; i++) 1710 while (down_trylock(&cmd->sem)) 1711 mlx5_cmd_trigger_completions(dev); 1712 1713 while (down_trylock(&cmd->pages_sem)) 1714 mlx5_cmd_trigger_completions(dev); 1715 1716 /* Unlock cmdif */ 1717 up(&cmd->pages_sem); 1718 for (i = 0; i < cmd->max_reg_cmds; i++) 1719 up(&cmd->sem); 1720 } 1721 1722 static int status_to_err(u8 status) 1723 { 1724 switch (status) { 1725 case MLX5_CMD_DELIVERY_STAT_OK: 1726 case MLX5_DRIVER_STATUS_ABORTED: 1727 return 0; 1728 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1729 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1730 return -EBADR; 1731 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1732 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1733 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1734 return -EFAULT; /* Bad address */ 1735 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1736 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1737 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1738 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1739 return -ENOMSG; 1740 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1741 return -EIO; 1742 default: 1743 return -EINVAL; 1744 } 1745 } 1746 1747 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1748 gfp_t gfp) 1749 { 1750 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1751 struct cmd_msg_cache *ch = NULL; 1752 struct mlx5_cmd *cmd = &dev->cmd; 1753 int i; 1754 1755 if (in_size <= 16) 1756 goto cache_miss; 1757 1758 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1759 ch = &cmd->cache[i]; 1760 if (in_size > ch->max_inbox_size) 1761 continue; 1762 spin_lock_irq(&ch->lock); 1763 if (list_empty(&ch->head)) { 1764 spin_unlock_irq(&ch->lock); 1765 continue; 1766 } 1767 msg = list_entry(ch->head.next, typeof(*msg), list); 1768 /* For cached lists, we must explicitly state what is 1769 * the real size 1770 */ 1771 msg->len = in_size; 1772 list_del(&msg->list); 1773 spin_unlock_irq(&ch->lock); 1774 break; 1775 } 1776 1777 if (!IS_ERR(msg)) 1778 return msg; 1779 1780 cache_miss: 1781 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1782 return msg; 1783 } 1784 1785 static int is_manage_pages(void *in) 1786 { 1787 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1788 } 1789 1790 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1791 int out_size, mlx5_cmd_cbk_t callback, void *context, 1792 bool force_polling) 1793 { 1794 struct mlx5_cmd_msg *inb; 1795 struct mlx5_cmd_msg *outb; 1796 int pages_queue; 1797 gfp_t gfp; 1798 int err; 1799 u8 status = 0; 1800 u32 drv_synd; 1801 u16 opcode; 1802 u8 token; 1803 1804 opcode = MLX5_GET(mbox_in, in, opcode); 1805 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) { 1806 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1807 MLX5_SET(mbox_out, out, status, status); 1808 MLX5_SET(mbox_out, out, syndrome, drv_synd); 1809 return err; 1810 } 1811 1812 pages_queue = is_manage_pages(in); 1813 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1814 1815 inb = alloc_msg(dev, in_size, gfp); 1816 if (IS_ERR(inb)) { 1817 err = PTR_ERR(inb); 1818 return err; 1819 } 1820 1821 token = alloc_token(&dev->cmd); 1822 1823 err = mlx5_copy_to_msg(inb, in, in_size, token); 1824 if (err) { 1825 mlx5_core_warn(dev, "err %d\n", err); 1826 goto out_in; 1827 } 1828 1829 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); 1830 if (IS_ERR(outb)) { 1831 err = PTR_ERR(outb); 1832 goto out_in; 1833 } 1834 1835 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1836 pages_queue, &status, token, force_polling); 1837 if (err) 1838 goto out_out; 1839 1840 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1841 if (status) { 1842 err = status_to_err(status); 1843 goto out_out; 1844 } 1845 1846 if (!callback) 1847 err = mlx5_copy_from_msg(out, outb, out_size); 1848 1849 out_out: 1850 if (!callback) 1851 mlx5_free_cmd_msg(dev, outb); 1852 1853 out_in: 1854 if (!callback) 1855 free_msg(dev, inb); 1856 return err; 1857 } 1858 1859 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1860 int out_size) 1861 { 1862 int err; 1863 1864 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); 1865 return err ? : mlx5_cmd_check(dev, in, out); 1866 } 1867 EXPORT_SYMBOL(mlx5_cmd_exec); 1868 1869 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, 1870 struct mlx5_async_ctx *ctx) 1871 { 1872 ctx->dev = dev; 1873 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ 1874 atomic_set(&ctx->num_inflight, 1); 1875 init_waitqueue_head(&ctx->wait); 1876 } 1877 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); 1878 1879 /** 1880 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx 1881 * @ctx: The ctx to clean 1882 * 1883 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The 1884 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after 1885 * the call mlx5_cleanup_async_ctx(). 1886 */ 1887 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) 1888 { 1889 atomic_dec(&ctx->num_inflight); 1890 wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); 1891 } 1892 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); 1893 1894 static void mlx5_cmd_exec_cb_handler(int status, void *_work) 1895 { 1896 struct mlx5_async_work *work = _work; 1897 struct mlx5_async_ctx *ctx = work->ctx; 1898 1899 work->user_callback(status, work); 1900 if (atomic_dec_and_test(&ctx->num_inflight)) 1901 wake_up(&ctx->wait); 1902 } 1903 1904 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, 1905 void *out, int out_size, mlx5_async_cbk_t callback, 1906 struct mlx5_async_work *work) 1907 { 1908 int ret; 1909 1910 work->ctx = ctx; 1911 work->user_callback = callback; 1912 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) 1913 return -EIO; 1914 ret = cmd_exec(ctx->dev, in, in_size, out, out_size, 1915 mlx5_cmd_exec_cb_handler, work, false); 1916 if (ret && atomic_dec_and_test(&ctx->num_inflight)) 1917 wake_up(&ctx->wait); 1918 1919 return ret; 1920 } 1921 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1922 1923 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 1924 void *out, int out_size) 1925 { 1926 int err; 1927 1928 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); 1929 1930 return err ? : mlx5_cmd_check(dev, in, out); 1931 } 1932 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 1933 1934 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1935 { 1936 struct cmd_msg_cache *ch; 1937 struct mlx5_cmd_msg *msg; 1938 struct mlx5_cmd_msg *n; 1939 int i; 1940 1941 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1942 ch = &dev->cmd.cache[i]; 1943 list_for_each_entry_safe(msg, n, &ch->head, list) { 1944 list_del(&msg->list); 1945 mlx5_free_cmd_msg(dev, msg); 1946 } 1947 } 1948 } 1949 1950 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { 1951 512, 32, 16, 8, 2 1952 }; 1953 1954 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { 1955 16 + MLX5_CMD_DATA_BLOCK_SIZE, 1956 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, 1957 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, 1958 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, 1959 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, 1960 }; 1961 1962 static void create_msg_cache(struct mlx5_core_dev *dev) 1963 { 1964 struct mlx5_cmd *cmd = &dev->cmd; 1965 struct cmd_msg_cache *ch; 1966 struct mlx5_cmd_msg *msg; 1967 int i; 1968 int k; 1969 1970 /* Initialize and fill the caches with initial entries */ 1971 for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { 1972 ch = &cmd->cache[k]; 1973 spin_lock_init(&ch->lock); 1974 INIT_LIST_HEAD(&ch->head); 1975 ch->num_ent = cmd_cache_num_ent[k]; 1976 ch->max_inbox_size = cmd_cache_ent_size[k]; 1977 for (i = 0; i < ch->num_ent; i++) { 1978 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, 1979 ch->max_inbox_size, 0); 1980 if (IS_ERR(msg)) 1981 break; 1982 msg->parent = ch; 1983 list_add_tail(&msg->list, &ch->head); 1984 } 1985 } 1986 } 1987 1988 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1989 { 1990 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, 1991 &cmd->alloc_dma, GFP_KERNEL); 1992 if (!cmd->cmd_alloc_buf) 1993 return -ENOMEM; 1994 1995 /* make sure it is aligned to 4K */ 1996 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 1997 cmd->cmd_buf = cmd->cmd_alloc_buf; 1998 cmd->dma = cmd->alloc_dma; 1999 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 2000 return 0; 2001 } 2002 2003 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 2004 cmd->alloc_dma); 2005 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), 2006 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 2007 &cmd->alloc_dma, GFP_KERNEL); 2008 if (!cmd->cmd_alloc_buf) 2009 return -ENOMEM; 2010 2011 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 2012 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 2013 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 2014 return 0; 2015 } 2016 2017 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2018 { 2019 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf, 2020 cmd->alloc_dma); 2021 } 2022 2023 static u16 cmdif_rev(struct mlx5_core_dev *dev) 2024 { 2025 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2026 } 2027 2028 int mlx5_cmd_init(struct mlx5_core_dev *dev) 2029 { 2030 int size = sizeof(struct mlx5_cmd_prot_block); 2031 int align = roundup_pow_of_two(size); 2032 struct mlx5_cmd *cmd = &dev->cmd; 2033 u32 cmd_h, cmd_l; 2034 u16 cmd_if_rev; 2035 int err; 2036 int i; 2037 2038 memset(cmd, 0, sizeof(*cmd)); 2039 cmd_if_rev = cmdif_rev(dev); 2040 if (cmd_if_rev != CMD_IF_REV) { 2041 mlx5_core_err(dev, 2042 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 2043 CMD_IF_REV, cmd_if_rev); 2044 return -EINVAL; 2045 } 2046 2047 cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); 2048 if (!cmd->stats) 2049 return -ENOMEM; 2050 2051 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2052 if (!cmd->pool) { 2053 err = -ENOMEM; 2054 goto dma_pool_err; 2055 } 2056 2057 err = alloc_cmd_page(dev, cmd); 2058 if (err) 2059 goto err_free_pool; 2060 2061 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 2062 cmd->log_sz = cmd_l >> 4 & 0xf; 2063 cmd->log_stride = cmd_l & 0xf; 2064 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 2065 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", 2066 1 << cmd->log_sz); 2067 err = -EINVAL; 2068 goto err_free_page; 2069 } 2070 2071 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 2072 mlx5_core_err(dev, "command queue size overflow\n"); 2073 err = -EINVAL; 2074 goto err_free_page; 2075 } 2076 2077 cmd->state = MLX5_CMDIF_STATE_DOWN; 2078 cmd->checksum_disabled = 1; 2079 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 2080 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; 2081 2082 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2083 if (cmd->cmdif_rev > CMD_IF_REV) { 2084 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", 2085 CMD_IF_REV, cmd->cmdif_rev); 2086 err = -EOPNOTSUPP; 2087 goto err_free_page; 2088 } 2089 2090 spin_lock_init(&cmd->alloc_lock); 2091 spin_lock_init(&cmd->token_lock); 2092 for (i = 0; i < MLX5_CMD_OP_MAX; i++) 2093 spin_lock_init(&cmd->stats[i].lock); 2094 2095 sema_init(&cmd->sem, cmd->max_reg_cmds); 2096 sema_init(&cmd->pages_sem, 1); 2097 2098 cmd_h = (u32)((u64)(cmd->dma) >> 32); 2099 cmd_l = (u32)(cmd->dma); 2100 if (cmd_l & 0xfff) { 2101 mlx5_core_err(dev, "invalid command queue address\n"); 2102 err = -ENOMEM; 2103 goto err_free_page; 2104 } 2105 2106 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 2107 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 2108 2109 /* Make sure firmware sees the complete address before we proceed */ 2110 wmb(); 2111 2112 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2113 2114 cmd->mode = CMD_MODE_POLLING; 2115 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 2116 2117 create_msg_cache(dev); 2118 2119 set_wqname(dev); 2120 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 2121 if (!cmd->wq) { 2122 mlx5_core_err(dev, "failed to create command workqueue\n"); 2123 err = -ENOMEM; 2124 goto err_cache; 2125 } 2126 2127 create_debugfs_files(dev); 2128 2129 return 0; 2130 2131 err_cache: 2132 destroy_msg_cache(dev); 2133 2134 err_free_page: 2135 free_cmd_page(dev, cmd); 2136 2137 err_free_pool: 2138 dma_pool_destroy(cmd->pool); 2139 dma_pool_err: 2140 kvfree(cmd->stats); 2141 return err; 2142 } 2143 2144 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 2145 { 2146 struct mlx5_cmd *cmd = &dev->cmd; 2147 2148 clean_debug_files(dev); 2149 destroy_workqueue(cmd->wq); 2150 destroy_msg_cache(dev); 2151 free_cmd_page(dev, cmd); 2152 dma_pool_destroy(cmd->pool); 2153 kvfree(cmd->stats); 2154 } 2155 2156 void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2157 enum mlx5_cmdif_state cmdif_state) 2158 { 2159 dev->cmd.state = cmdif_state; 2160 } 2161