1 /* 2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/highmem.h> 34 #include <linux/errno.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/slab.h> 38 #include <linux/delay.h> 39 #include <linux/random.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/eq.h> 42 #include <linux/debugfs.h> 43 44 #include "mlx5_core.h" 45 #include "lib/eq.h" 46 #include "lib/tout.h" 47 #define CREATE_TRACE_POINTS 48 #include "diag/cmd_tracepoint.h" 49 50 enum { 51 CMD_IF_REV = 5, 52 }; 53 54 enum { 55 CMD_MODE_POLLING, 56 CMD_MODE_EVENTS 57 }; 58 59 enum { 60 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 61 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 62 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 63 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 64 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 65 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 66 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 67 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 68 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 69 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 70 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 71 }; 72 73 static struct mlx5_cmd_work_ent * 74 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, 75 struct mlx5_cmd_msg *out, void *uout, int uout_size, 76 mlx5_cmd_cbk_t cbk, void *context, int page_queue) 77 { 78 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 79 struct mlx5_cmd_work_ent *ent; 80 81 ent = kzalloc(sizeof(*ent), alloc_flags); 82 if (!ent) 83 return ERR_PTR(-ENOMEM); 84 85 ent->idx = -EINVAL; 86 ent->in = in; 87 ent->out = out; 88 ent->uout = uout; 89 ent->uout_size = uout_size; 90 ent->callback = cbk; 91 ent->context = context; 92 ent->cmd = cmd; 93 ent->page_queue = page_queue; 94 refcount_set(&ent->refcnt, 1); 95 96 return ent; 97 } 98 99 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) 100 { 101 kfree(ent); 102 } 103 104 static u8 alloc_token(struct mlx5_cmd *cmd) 105 { 106 u8 token; 107 108 spin_lock(&cmd->token_lock); 109 cmd->token++; 110 if (cmd->token == 0) 111 cmd->token++; 112 token = cmd->token; 113 spin_unlock(&cmd->token_lock); 114 115 return token; 116 } 117 118 static int cmd_alloc_index(struct mlx5_cmd *cmd) 119 { 120 unsigned long flags; 121 int ret; 122 123 spin_lock_irqsave(&cmd->alloc_lock, flags); 124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 125 if (ret < cmd->max_reg_cmds) 126 clear_bit(ret, &cmd->bitmask); 127 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 128 129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM; 130 } 131 132 static void cmd_free_index(struct mlx5_cmd *cmd, int idx) 133 { 134 lockdep_assert_held(&cmd->alloc_lock); 135 set_bit(idx, &cmd->bitmask); 136 } 137 138 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) 139 { 140 refcount_inc(&ent->refcnt); 141 } 142 143 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) 144 { 145 struct mlx5_cmd *cmd = ent->cmd; 146 unsigned long flags; 147 148 spin_lock_irqsave(&cmd->alloc_lock, flags); 149 if (!refcount_dec_and_test(&ent->refcnt)) 150 goto out; 151 152 if (ent->idx >= 0) { 153 cmd_free_index(cmd, ent->idx); 154 up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); 155 } 156 157 cmd_free_ent(ent); 158 out: 159 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 160 } 161 162 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 163 { 164 return cmd->cmd_buf + (idx << cmd->log_stride); 165 } 166 167 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) 168 { 169 int size = msg->len; 170 int blen = size - min_t(int, sizeof(msg->first.data), size); 171 172 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); 173 } 174 175 static u8 xor8_buf(void *buf, size_t offset, int len) 176 { 177 u8 *ptr = buf; 178 u8 sum = 0; 179 int i; 180 int end = len + offset; 181 182 for (i = offset; i < end; i++) 183 sum ^= ptr[i]; 184 185 return sum; 186 } 187 188 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 189 { 190 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 191 int xor_len = sizeof(*block) - sizeof(block->data) - 1; 192 193 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) 194 return -EHWPOISON; 195 196 if (xor8_buf(block, 0, sizeof(*block)) != 0xff) 197 return -EHWPOISON; 198 199 return 0; 200 } 201 202 static void calc_block_sig(struct mlx5_cmd_prot_block *block) 203 { 204 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; 205 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 206 207 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); 208 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); 209 } 210 211 static void calc_chain_sig(struct mlx5_cmd_msg *msg) 212 { 213 struct mlx5_cmd_mailbox *next = msg->next; 214 int n = mlx5_calc_cmd_blocks(msg); 215 int i = 0; 216 217 for (i = 0; i < n && next; i++) { 218 calc_block_sig(next->buf); 219 next = next->next; 220 } 221 } 222 223 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 224 { 225 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 226 if (csum) { 227 calc_chain_sig(ent->in); 228 calc_chain_sig(ent->out); 229 } 230 } 231 232 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 233 { 234 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); 235 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); 236 unsigned long poll_end; 237 u8 own; 238 239 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000); 240 241 do { 242 own = READ_ONCE(ent->lay->status_own); 243 if (!(own & CMD_OWNER_HW)) { 244 ent->ret = 0; 245 return; 246 } 247 cond_resched(); 248 } while (time_before(jiffies, poll_end)); 249 250 ent->ret = -ETIMEDOUT; 251 } 252 253 static int verify_signature(struct mlx5_cmd_work_ent *ent) 254 { 255 struct mlx5_cmd_mailbox *next = ent->out->next; 256 int n = mlx5_calc_cmd_blocks(ent->out); 257 int err; 258 u8 sig; 259 int i = 0; 260 261 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 262 if (sig != 0xff) 263 return -EHWPOISON; 264 265 for (i = 0; i < n && next; i++) { 266 err = verify_block_sig(next->buf); 267 if (err) 268 return -EHWPOISON; 269 270 next = next->next; 271 } 272 273 return 0; 274 } 275 276 static void dump_buf(void *buf, int size, int data_only, int offset, int idx) 277 { 278 __be32 *p = buf; 279 int i; 280 281 for (i = 0; i < size; i += 16) { 282 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, 283 be32_to_cpu(p[0]), be32_to_cpu(p[1]), 284 be32_to_cpu(p[2]), be32_to_cpu(p[3])); 285 p += 4; 286 offset += 16; 287 } 288 if (!data_only) 289 pr_debug("\n"); 290 } 291 292 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 293 u32 *synd, u8 *status) 294 { 295 *synd = 0; 296 *status = 0; 297 298 switch (op) { 299 case MLX5_CMD_OP_TEARDOWN_HCA: 300 case MLX5_CMD_OP_DISABLE_HCA: 301 case MLX5_CMD_OP_MANAGE_PAGES: 302 case MLX5_CMD_OP_DESTROY_MKEY: 303 case MLX5_CMD_OP_DESTROY_EQ: 304 case MLX5_CMD_OP_DESTROY_CQ: 305 case MLX5_CMD_OP_DESTROY_QP: 306 case MLX5_CMD_OP_DESTROY_PSV: 307 case MLX5_CMD_OP_DESTROY_SRQ: 308 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 309 case MLX5_CMD_OP_DESTROY_XRQ: 310 case MLX5_CMD_OP_DESTROY_DCT: 311 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 312 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 313 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 314 case MLX5_CMD_OP_DEALLOC_PD: 315 case MLX5_CMD_OP_DEALLOC_UAR: 316 case MLX5_CMD_OP_DETACH_FROM_MCG: 317 case MLX5_CMD_OP_DEALLOC_XRCD: 318 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 319 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 320 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 321 case MLX5_CMD_OP_DESTROY_LAG: 322 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 323 case MLX5_CMD_OP_DESTROY_TIR: 324 case MLX5_CMD_OP_DESTROY_SQ: 325 case MLX5_CMD_OP_DESTROY_RQ: 326 case MLX5_CMD_OP_DESTROY_RMP: 327 case MLX5_CMD_OP_DESTROY_TIS: 328 case MLX5_CMD_OP_DESTROY_RQT: 329 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 330 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 331 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 332 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 333 case MLX5_CMD_OP_2ERR_QP: 334 case MLX5_CMD_OP_2RST_QP: 335 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 336 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 337 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 338 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 339 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: 340 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 341 case MLX5_CMD_OP_FPGA_DESTROY_QP: 342 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 343 case MLX5_CMD_OP_DEALLOC_MEMIC: 344 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 345 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: 346 case MLX5_CMD_OP_DEALLOC_SF: 347 case MLX5_CMD_OP_DESTROY_UCTX: 348 case MLX5_CMD_OP_DESTROY_UMEM: 349 case MLX5_CMD_OP_MODIFY_RQT: 350 return MLX5_CMD_STAT_OK; 351 352 case MLX5_CMD_OP_QUERY_HCA_CAP: 353 case MLX5_CMD_OP_QUERY_ADAPTER: 354 case MLX5_CMD_OP_INIT_HCA: 355 case MLX5_CMD_OP_ENABLE_HCA: 356 case MLX5_CMD_OP_QUERY_PAGES: 357 case MLX5_CMD_OP_SET_HCA_CAP: 358 case MLX5_CMD_OP_QUERY_ISSI: 359 case MLX5_CMD_OP_SET_ISSI: 360 case MLX5_CMD_OP_CREATE_MKEY: 361 case MLX5_CMD_OP_QUERY_MKEY: 362 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 363 case MLX5_CMD_OP_CREATE_EQ: 364 case MLX5_CMD_OP_QUERY_EQ: 365 case MLX5_CMD_OP_GEN_EQE: 366 case MLX5_CMD_OP_CREATE_CQ: 367 case MLX5_CMD_OP_QUERY_CQ: 368 case MLX5_CMD_OP_MODIFY_CQ: 369 case MLX5_CMD_OP_CREATE_QP: 370 case MLX5_CMD_OP_RST2INIT_QP: 371 case MLX5_CMD_OP_INIT2RTR_QP: 372 case MLX5_CMD_OP_RTR2RTS_QP: 373 case MLX5_CMD_OP_RTS2RTS_QP: 374 case MLX5_CMD_OP_SQERR2RTS_QP: 375 case MLX5_CMD_OP_QUERY_QP: 376 case MLX5_CMD_OP_SQD_RTS_QP: 377 case MLX5_CMD_OP_INIT2INIT_QP: 378 case MLX5_CMD_OP_CREATE_PSV: 379 case MLX5_CMD_OP_CREATE_SRQ: 380 case MLX5_CMD_OP_QUERY_SRQ: 381 case MLX5_CMD_OP_ARM_RQ: 382 case MLX5_CMD_OP_CREATE_XRC_SRQ: 383 case MLX5_CMD_OP_QUERY_XRC_SRQ: 384 case MLX5_CMD_OP_ARM_XRC_SRQ: 385 case MLX5_CMD_OP_CREATE_XRQ: 386 case MLX5_CMD_OP_QUERY_XRQ: 387 case MLX5_CMD_OP_ARM_XRQ: 388 case MLX5_CMD_OP_CREATE_DCT: 389 case MLX5_CMD_OP_DRAIN_DCT: 390 case MLX5_CMD_OP_QUERY_DCT: 391 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 392 case MLX5_CMD_OP_QUERY_VPORT_STATE: 393 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 394 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 395 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 396 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 397 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 398 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 399 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 400 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 401 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 402 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 403 case MLX5_CMD_OP_QUERY_VNIC_ENV: 404 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 405 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 406 case MLX5_CMD_OP_QUERY_Q_COUNTER: 407 case MLX5_CMD_OP_SET_MONITOR_COUNTER: 408 case MLX5_CMD_OP_ARM_MONITOR_COUNTER: 409 case MLX5_CMD_OP_SET_PP_RATE_LIMIT: 410 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 411 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 412 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 413 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 414 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 415 case MLX5_CMD_OP_ALLOC_PD: 416 case MLX5_CMD_OP_ALLOC_UAR: 417 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 418 case MLX5_CMD_OP_ACCESS_REG: 419 case MLX5_CMD_OP_ATTACH_TO_MCG: 420 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 421 case MLX5_CMD_OP_MAD_IFC: 422 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 423 case MLX5_CMD_OP_SET_MAD_DEMUX: 424 case MLX5_CMD_OP_NOP: 425 case MLX5_CMD_OP_ALLOC_XRCD: 426 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 427 case MLX5_CMD_OP_QUERY_CONG_STATUS: 428 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 429 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 430 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 431 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 432 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 433 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 434 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 435 case MLX5_CMD_OP_CREATE_LAG: 436 case MLX5_CMD_OP_MODIFY_LAG: 437 case MLX5_CMD_OP_QUERY_LAG: 438 case MLX5_CMD_OP_CREATE_VPORT_LAG: 439 case MLX5_CMD_OP_CREATE_TIR: 440 case MLX5_CMD_OP_MODIFY_TIR: 441 case MLX5_CMD_OP_QUERY_TIR: 442 case MLX5_CMD_OP_CREATE_SQ: 443 case MLX5_CMD_OP_MODIFY_SQ: 444 case MLX5_CMD_OP_QUERY_SQ: 445 case MLX5_CMD_OP_CREATE_RQ: 446 case MLX5_CMD_OP_MODIFY_RQ: 447 case MLX5_CMD_OP_QUERY_RQ: 448 case MLX5_CMD_OP_CREATE_RMP: 449 case MLX5_CMD_OP_MODIFY_RMP: 450 case MLX5_CMD_OP_QUERY_RMP: 451 case MLX5_CMD_OP_CREATE_TIS: 452 case MLX5_CMD_OP_MODIFY_TIS: 453 case MLX5_CMD_OP_QUERY_TIS: 454 case MLX5_CMD_OP_CREATE_RQT: 455 case MLX5_CMD_OP_QUERY_RQT: 456 457 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 458 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 459 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 460 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 461 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 462 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 463 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 464 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 465 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 466 case MLX5_CMD_OP_FPGA_CREATE_QP: 467 case MLX5_CMD_OP_FPGA_MODIFY_QP: 468 case MLX5_CMD_OP_FPGA_QUERY_QP: 469 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: 470 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 471 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 472 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 473 case MLX5_CMD_OP_CREATE_UCTX: 474 case MLX5_CMD_OP_CREATE_UMEM: 475 case MLX5_CMD_OP_ALLOC_MEMIC: 476 case MLX5_CMD_OP_MODIFY_XRQ: 477 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 478 case MLX5_CMD_OP_QUERY_VHCA_STATE: 479 case MLX5_CMD_OP_MODIFY_VHCA_STATE: 480 case MLX5_CMD_OP_ALLOC_SF: 481 case MLX5_CMD_OP_SUSPEND_VHCA: 482 case MLX5_CMD_OP_RESUME_VHCA: 483 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: 484 case MLX5_CMD_OP_SAVE_VHCA_STATE: 485 case MLX5_CMD_OP_LOAD_VHCA_STATE: 486 *status = MLX5_DRIVER_STATUS_ABORTED; 487 *synd = MLX5_DRIVER_SYND; 488 return -ENOLINK; 489 default: 490 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 491 return -EINVAL; 492 } 493 } 494 495 const char *mlx5_command_str(int command) 496 { 497 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 498 499 switch (command) { 500 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 501 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 502 MLX5_COMMAND_STR_CASE(INIT_HCA); 503 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 504 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 505 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 506 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 507 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 508 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 509 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 510 MLX5_COMMAND_STR_CASE(SET_ISSI); 511 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); 512 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 513 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 514 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 515 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 516 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 517 MLX5_COMMAND_STR_CASE(CREATE_EQ); 518 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 519 MLX5_COMMAND_STR_CASE(QUERY_EQ); 520 MLX5_COMMAND_STR_CASE(GEN_EQE); 521 MLX5_COMMAND_STR_CASE(CREATE_CQ); 522 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 523 MLX5_COMMAND_STR_CASE(QUERY_CQ); 524 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 525 MLX5_COMMAND_STR_CASE(CREATE_QP); 526 MLX5_COMMAND_STR_CASE(DESTROY_QP); 527 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 528 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 529 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 530 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 531 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 532 MLX5_COMMAND_STR_CASE(2ERR_QP); 533 MLX5_COMMAND_STR_CASE(2RST_QP); 534 MLX5_COMMAND_STR_CASE(QUERY_QP); 535 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 536 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 537 MLX5_COMMAND_STR_CASE(CREATE_PSV); 538 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 539 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 540 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 541 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 542 MLX5_COMMAND_STR_CASE(ARM_RQ); 543 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 544 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 545 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 546 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 547 MLX5_COMMAND_STR_CASE(CREATE_DCT); 548 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 549 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 550 MLX5_COMMAND_STR_CASE(QUERY_DCT); 551 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 552 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 553 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 554 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 555 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 556 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 557 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 558 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 559 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 560 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 561 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 562 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 563 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 564 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 565 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 566 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 567 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 568 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 569 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); 570 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); 571 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); 572 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 573 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 574 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 575 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); 576 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); 577 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); 578 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); 579 MLX5_COMMAND_STR_CASE(ALLOC_PD); 580 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 581 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 582 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 583 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 584 MLX5_COMMAND_STR_CASE(ACCESS_REG); 585 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 586 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 587 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 588 MLX5_COMMAND_STR_CASE(MAD_IFC); 589 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 590 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 591 MLX5_COMMAND_STR_CASE(NOP); 592 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 593 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 594 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 595 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 596 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 597 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 598 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 599 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 600 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 601 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 602 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 603 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 604 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 605 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 606 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 607 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 608 MLX5_COMMAND_STR_CASE(CREATE_LAG); 609 MLX5_COMMAND_STR_CASE(MODIFY_LAG); 610 MLX5_COMMAND_STR_CASE(QUERY_LAG); 611 MLX5_COMMAND_STR_CASE(DESTROY_LAG); 612 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); 613 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); 614 MLX5_COMMAND_STR_CASE(CREATE_TIR); 615 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 616 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 617 MLX5_COMMAND_STR_CASE(QUERY_TIR); 618 MLX5_COMMAND_STR_CASE(CREATE_SQ); 619 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 620 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 621 MLX5_COMMAND_STR_CASE(QUERY_SQ); 622 MLX5_COMMAND_STR_CASE(CREATE_RQ); 623 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 624 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 625 MLX5_COMMAND_STR_CASE(QUERY_RQ); 626 MLX5_COMMAND_STR_CASE(CREATE_RMP); 627 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 628 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 629 MLX5_COMMAND_STR_CASE(QUERY_RMP); 630 MLX5_COMMAND_STR_CASE(CREATE_TIS); 631 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 632 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 633 MLX5_COMMAND_STR_CASE(QUERY_TIS); 634 MLX5_COMMAND_STR_CASE(CREATE_RQT); 635 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 636 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 637 MLX5_COMMAND_STR_CASE(QUERY_RQT); 638 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); 639 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 640 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 641 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 642 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 643 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 644 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 645 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 646 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 647 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 648 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 649 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 650 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 651 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); 652 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); 653 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); 654 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); 655 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); 656 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); 657 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP); 658 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); 659 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); 660 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); 661 MLX5_COMMAND_STR_CASE(CREATE_XRQ); 662 MLX5_COMMAND_STR_CASE(DESTROY_XRQ); 663 MLX5_COMMAND_STR_CASE(QUERY_XRQ); 664 MLX5_COMMAND_STR_CASE(ARM_XRQ); 665 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); 666 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); 667 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); 668 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); 669 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); 670 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); 671 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); 672 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); 673 MLX5_COMMAND_STR_CASE(CREATE_UCTX); 674 MLX5_COMMAND_STR_CASE(DESTROY_UCTX); 675 MLX5_COMMAND_STR_CASE(CREATE_UMEM); 676 MLX5_COMMAND_STR_CASE(DESTROY_UMEM); 677 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); 678 MLX5_COMMAND_STR_CASE(MODIFY_XRQ); 679 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); 680 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); 681 MLX5_COMMAND_STR_CASE(ALLOC_SF); 682 MLX5_COMMAND_STR_CASE(DEALLOC_SF); 683 MLX5_COMMAND_STR_CASE(SUSPEND_VHCA); 684 MLX5_COMMAND_STR_CASE(RESUME_VHCA); 685 MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE); 686 MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); 687 MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); 688 default: return "unknown command opcode"; 689 } 690 } 691 692 static const char *cmd_status_str(u8 status) 693 { 694 switch (status) { 695 case MLX5_CMD_STAT_OK: 696 return "OK"; 697 case MLX5_CMD_STAT_INT_ERR: 698 return "internal error"; 699 case MLX5_CMD_STAT_BAD_OP_ERR: 700 return "bad operation"; 701 case MLX5_CMD_STAT_BAD_PARAM_ERR: 702 return "bad parameter"; 703 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 704 return "bad system state"; 705 case MLX5_CMD_STAT_BAD_RES_ERR: 706 return "bad resource"; 707 case MLX5_CMD_STAT_RES_BUSY: 708 return "resource busy"; 709 case MLX5_CMD_STAT_LIM_ERR: 710 return "limits exceeded"; 711 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 712 return "bad resource state"; 713 case MLX5_CMD_STAT_IX_ERR: 714 return "bad index"; 715 case MLX5_CMD_STAT_NO_RES_ERR: 716 return "no resources"; 717 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 718 return "bad input length"; 719 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 720 return "bad output length"; 721 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 722 return "bad QP state"; 723 case MLX5_CMD_STAT_BAD_PKT_ERR: 724 return "bad packet (discarded)"; 725 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 726 return "bad size too many outstanding CQEs"; 727 default: 728 return "unknown status"; 729 } 730 } 731 732 static int cmd_status_to_err(u8 status) 733 { 734 switch (status) { 735 case MLX5_CMD_STAT_OK: return 0; 736 case MLX5_CMD_STAT_INT_ERR: return -EIO; 737 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 738 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 739 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 740 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 741 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 742 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 743 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 744 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 745 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 746 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 747 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 748 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 749 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 750 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 751 default: return -EIO; 752 } 753 } 754 755 struct mlx5_ifc_mbox_out_bits { 756 u8 status[0x8]; 757 u8 reserved_at_8[0x18]; 758 759 u8 syndrome[0x20]; 760 761 u8 reserved_at_40[0x40]; 762 }; 763 764 struct mlx5_ifc_mbox_in_bits { 765 u8 opcode[0x10]; 766 u8 uid[0x10]; 767 768 u8 reserved_at_20[0x10]; 769 u8 op_mod[0x10]; 770 771 u8 reserved_at_40[0x40]; 772 }; 773 774 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 775 { 776 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 777 u8 status = MLX5_GET(mbox_out, out, status); 778 779 mlx5_core_err_rl(dev, 780 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", 781 mlx5_command_str(opcode), opcode, op_mod, 782 cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); 783 } 784 EXPORT_SYMBOL(mlx5_cmd_out_err); 785 786 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) 787 { 788 u16 opcode, op_mod; 789 u16 uid; 790 791 opcode = MLX5_GET(mbox_in, in, opcode); 792 op_mod = MLX5_GET(mbox_in, in, op_mod); 793 uid = MLX5_GET(mbox_in, in, uid); 794 795 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) 796 mlx5_cmd_out_err(dev, opcode, op_mod, out); 797 } 798 799 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) 800 { 801 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ 802 if (err == -ENXIO) { 803 u16 opcode = MLX5_GET(mbox_in, in, opcode); 804 u32 syndrome; 805 u8 status; 806 807 /* PCI Error, emulate command return status, for smooth reset */ 808 err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status); 809 MLX5_SET(mbox_out, out, status, status); 810 MLX5_SET(mbox_out, out, syndrome, syndrome); 811 if (!err) 812 return 0; 813 } 814 815 /* driver or FW delivery error */ 816 if (err != -EREMOTEIO && err) 817 return err; 818 819 /* check outbox status */ 820 err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); 821 if (err) 822 cmd_status_print(dev, in, out); 823 824 return err; 825 } 826 EXPORT_SYMBOL(mlx5_cmd_check); 827 828 static void dump_command(struct mlx5_core_dev *dev, 829 struct mlx5_cmd_work_ent *ent, int input) 830 { 831 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 832 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 833 struct mlx5_cmd_mailbox *next = msg->next; 834 int n = mlx5_calc_cmd_blocks(msg); 835 int data_only; 836 u32 offset = 0; 837 int dump_len; 838 int i; 839 840 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); 841 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 842 843 if (data_only) 844 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 845 "cmd[%d]: dump command data %s(0x%x) %s\n", 846 ent->idx, mlx5_command_str(op), op, 847 input ? "INPUT" : "OUTPUT"); 848 else 849 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", 850 ent->idx, mlx5_command_str(op), op, 851 input ? "INPUT" : "OUTPUT"); 852 853 if (data_only) { 854 if (input) { 855 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); 856 offset += sizeof(ent->lay->in); 857 } else { 858 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); 859 offset += sizeof(ent->lay->out); 860 } 861 } else { 862 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); 863 offset += sizeof(*ent->lay); 864 } 865 866 for (i = 0; i < n && next; i++) { 867 if (data_only) { 868 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 869 dump_buf(next->buf, dump_len, 1, offset, ent->idx); 870 offset += MLX5_CMD_DATA_BLOCK_SIZE; 871 } else { 872 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); 873 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, 874 ent->idx); 875 offset += sizeof(struct mlx5_cmd_prot_block); 876 } 877 next = next->next; 878 } 879 880 if (data_only) 881 pr_debug("\n"); 882 883 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); 884 } 885 886 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 887 { 888 return MLX5_GET(mbox_in, in->first.data, opcode); 889 } 890 891 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 892 893 static void cb_timeout_handler(struct work_struct *work) 894 { 895 struct delayed_work *dwork = container_of(work, struct delayed_work, 896 work); 897 struct mlx5_cmd_work_ent *ent = container_of(dwork, 898 struct mlx5_cmd_work_ent, 899 cb_timeout_work); 900 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 901 cmd); 902 903 mlx5_cmd_eq_recover(dev); 904 905 /* Maybe got handled by eq recover ? */ 906 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { 907 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, 908 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 909 goto out; /* phew, already handled */ 910 } 911 912 ent->ret = -ETIMEDOUT; 913 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 914 ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 915 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 916 917 out: 918 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ 919 } 920 921 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 922 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 923 struct mlx5_cmd_msg *msg); 924 925 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 926 { 927 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 928 return true; 929 930 return cmd->allowed_opcode == opcode; 931 } 932 933 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) 934 { 935 return pci_channel_offline(dev->pdev) || 936 dev->cmd.state != MLX5_CMDIF_STATE_UP || 937 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; 938 } 939 940 static void cmd_work_handler(struct work_struct *work) 941 { 942 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 943 struct mlx5_cmd *cmd = ent->cmd; 944 bool poll_cmd = ent->polling; 945 struct mlx5_cmd_layout *lay; 946 struct mlx5_core_dev *dev; 947 unsigned long cb_timeout; 948 struct semaphore *sem; 949 unsigned long flags; 950 int alloc_ret; 951 int cmd_mode; 952 953 dev = container_of(cmd, struct mlx5_core_dev, cmd); 954 cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 955 956 complete(&ent->handling); 957 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 958 down(sem); 959 if (!ent->page_queue) { 960 alloc_ret = cmd_alloc_index(cmd); 961 if (alloc_ret < 0) { 962 mlx5_core_err_rl(dev, "failed to allocate command entry\n"); 963 if (ent->callback) { 964 ent->callback(-EAGAIN, ent->context); 965 mlx5_free_cmd_msg(dev, ent->out); 966 free_msg(dev, ent->in); 967 cmd_ent_put(ent); 968 } else { 969 ent->ret = -EAGAIN; 970 complete(&ent->done); 971 } 972 up(sem); 973 return; 974 } 975 ent->idx = alloc_ret; 976 } else { 977 ent->idx = cmd->max_reg_cmds; 978 spin_lock_irqsave(&cmd->alloc_lock, flags); 979 clear_bit(ent->idx, &cmd->bitmask); 980 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 981 } 982 983 cmd->ent_arr[ent->idx] = ent; 984 lay = get_inst(cmd, ent->idx); 985 ent->lay = lay; 986 memset(lay, 0, sizeof(*lay)); 987 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 988 ent->op = be32_to_cpu(lay->in[0]) >> 16; 989 if (ent->in->next) 990 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 991 lay->inlen = cpu_to_be32(ent->in->len); 992 if (ent->out->next) 993 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 994 lay->outlen = cpu_to_be32(ent->out->len); 995 lay->type = MLX5_PCI_CMD_XPORT; 996 lay->token = ent->token; 997 lay->status_own = CMD_OWNER_HW; 998 set_signature(ent, !cmd->checksum_disabled); 999 dump_command(dev, ent, 1); 1000 ent->ts1 = ktime_get_ns(); 1001 cmd_mode = cmd->mode; 1002 1003 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) 1004 cmd_ent_get(ent); 1005 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 1006 1007 cmd_ent_get(ent); /* for the _real_ FW event on completion */ 1008 /* Skip sending command to fw if internal error */ 1009 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { 1010 ent->ret = -ENXIO; 1011 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1012 return; 1013 } 1014 1015 /* ring doorbell after the descriptor is valid */ 1016 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 1017 wmb(); 1018 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 1019 /* if not in polling don't use ent after this point */ 1020 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { 1021 poll_timeout(ent); 1022 /* make sure we read the descriptor after ownership is SW */ 1023 rmb(); 1024 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); 1025 } 1026 } 1027 1028 static int deliv_status_to_err(u8 status) 1029 { 1030 switch (status) { 1031 case MLX5_CMD_DELIVERY_STAT_OK: 1032 case MLX5_DRIVER_STATUS_ABORTED: 1033 return 0; 1034 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1035 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1036 return -EBADR; 1037 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1038 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1039 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1040 return -EFAULT; /* Bad address */ 1041 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1042 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1043 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1044 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1045 return -ENOMSG; 1046 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1047 return -EIO; 1048 default: 1049 return -EINVAL; 1050 } 1051 } 1052 1053 static const char *deliv_status_to_str(u8 status) 1054 { 1055 switch (status) { 1056 case MLX5_CMD_DELIVERY_STAT_OK: 1057 return "no errors"; 1058 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1059 return "signature error"; 1060 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1061 return "token error"; 1062 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1063 return "bad block number"; 1064 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1065 return "output pointer not aligned to block size"; 1066 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1067 return "input pointer not aligned to block size"; 1068 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1069 return "firmware internal error"; 1070 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1071 return "command input length error"; 1072 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1073 return "command output length error"; 1074 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1075 return "reserved fields not cleared"; 1076 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1077 return "bad command descriptor type"; 1078 default: 1079 return "unknown status code"; 1080 } 1081 } 1082 1083 enum { 1084 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, 1085 }; 1086 1087 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, 1088 struct mlx5_cmd_work_ent *ent) 1089 { 1090 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); 1091 1092 mlx5_cmd_eq_recover(dev); 1093 1094 /* Re-wait on the ent->done after executing the recovery flow. If the 1095 * recovery flow (or any other recovery flow running simultaneously) 1096 * has recovered an EQE, it should cause the entry to be completed by 1097 * the command interface. 1098 */ 1099 if (wait_for_completion_timeout(&ent->done, timeout)) { 1100 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, 1101 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1102 return; 1103 } 1104 1105 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, 1106 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1107 1108 ent->ret = -ETIMEDOUT; 1109 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1110 } 1111 1112 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 1113 { 1114 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 1115 struct mlx5_cmd *cmd = &dev->cmd; 1116 int err; 1117 1118 if (!wait_for_completion_timeout(&ent->handling, timeout) && 1119 cancel_work_sync(&ent->work)) { 1120 ent->ret = -ECANCELED; 1121 goto out_err; 1122 } 1123 if (cmd->mode == CMD_MODE_POLLING || ent->polling) 1124 wait_for_completion(&ent->done); 1125 else if (!wait_for_completion_timeout(&ent->done, timeout)) 1126 wait_func_handle_exec_timeout(dev, ent); 1127 1128 out_err: 1129 err = ent->ret; 1130 1131 if (err == -ETIMEDOUT) { 1132 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1133 mlx5_command_str(msg_to_opcode(ent->in)), 1134 msg_to_opcode(ent->in)); 1135 } else if (err == -ECANCELED) { 1136 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1137 mlx5_command_str(msg_to_opcode(ent->in)), 1138 msg_to_opcode(ent->in)); 1139 } 1140 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1141 err, deliv_status_to_str(ent->status), ent->status); 1142 1143 return err; 1144 } 1145 1146 /* Notes: 1147 * 1. Callback functions may not sleep 1148 * 2. page queue commands do not support asynchrous completion 1149 * 1150 * return value in case (!callback): 1151 * ret < 0 : Command execution couldn't be submitted by driver 1152 * ret > 0 : Command execution couldn't be performed by firmware 1153 * ret == 0: Command was executed by FW, Caller must check FW outbox status. 1154 * 1155 * return value in case (callback): 1156 * ret < 0 : Command execution couldn't be submitted by driver 1157 * ret == 0: Command will be submitted to FW for execution 1158 * and the callback will be called for further status updates 1159 */ 1160 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 1161 struct mlx5_cmd_msg *out, void *uout, int uout_size, 1162 mlx5_cmd_cbk_t callback, 1163 void *context, int page_queue, 1164 u8 token, bool force_polling) 1165 { 1166 struct mlx5_cmd *cmd = &dev->cmd; 1167 struct mlx5_cmd_work_ent *ent; 1168 struct mlx5_cmd_stats *stats; 1169 u8 status = 0; 1170 int err = 0; 1171 s64 ds; 1172 u16 op; 1173 1174 if (callback && page_queue) 1175 return -EINVAL; 1176 1177 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, 1178 callback, context, page_queue); 1179 if (IS_ERR(ent)) 1180 return PTR_ERR(ent); 1181 1182 /* put for this ent is when consumed, depending on the use case 1183 * 1) (!callback) blocking flow: by caller after wait_func completes 1184 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled 1185 */ 1186 1187 ent->token = token; 1188 ent->polling = force_polling; 1189 1190 init_completion(&ent->handling); 1191 if (!callback) 1192 init_completion(&ent->done); 1193 1194 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1195 INIT_WORK(&ent->work, cmd_work_handler); 1196 if (page_queue) { 1197 cmd_work_handler(&ent->work); 1198 } else if (!queue_work(cmd->wq, &ent->work)) { 1199 mlx5_core_warn(dev, "failed to queue work\n"); 1200 err = -EALREADY; 1201 goto out_free; 1202 } 1203 1204 if (callback) 1205 return 0; /* mlx5_cmd_comp_handler() will put(ent) */ 1206 1207 err = wait_func(dev, ent); 1208 if (err == -ETIMEDOUT || err == -ECANCELED) 1209 goto out_free; 1210 1211 ds = ent->ts2 - ent->ts1; 1212 op = MLX5_GET(mbox_in, in->first.data, opcode); 1213 if (op < MLX5_CMD_OP_MAX) { 1214 stats = &cmd->stats[op]; 1215 spin_lock_irq(&stats->lock); 1216 stats->sum += ds; 1217 ++stats->n; 1218 spin_unlock_irq(&stats->lock); 1219 } 1220 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1221 "fw exec time for %s is %lld nsec\n", 1222 mlx5_command_str(op), ds); 1223 1224 out_free: 1225 status = ent->status; 1226 cmd_ent_put(ent); 1227 return err ? : status; 1228 } 1229 1230 static ssize_t dbg_write(struct file *filp, const char __user *buf, 1231 size_t count, loff_t *pos) 1232 { 1233 struct mlx5_core_dev *dev = filp->private_data; 1234 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1235 char lbuf[3]; 1236 int err; 1237 1238 if (!dbg->in_msg || !dbg->out_msg) 1239 return -ENOMEM; 1240 1241 if (count < sizeof(lbuf) - 1) 1242 return -EINVAL; 1243 1244 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) 1245 return -EFAULT; 1246 1247 lbuf[sizeof(lbuf) - 1] = 0; 1248 1249 if (strcmp(lbuf, "go")) 1250 return -EINVAL; 1251 1252 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); 1253 1254 return err ? err : count; 1255 } 1256 1257 static const struct file_operations fops = { 1258 .owner = THIS_MODULE, 1259 .open = simple_open, 1260 .write = dbg_write, 1261 }; 1262 1263 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, 1264 u8 token) 1265 { 1266 struct mlx5_cmd_prot_block *block; 1267 struct mlx5_cmd_mailbox *next; 1268 int copy; 1269 1270 if (!to || !from) 1271 return -ENOMEM; 1272 1273 copy = min_t(int, size, sizeof(to->first.data)); 1274 memcpy(to->first.data, from, copy); 1275 size -= copy; 1276 from += copy; 1277 1278 next = to->next; 1279 while (size) { 1280 if (!next) { 1281 /* this is a BUG */ 1282 return -ENOMEM; 1283 } 1284 1285 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1286 block = next->buf; 1287 memcpy(block->data, from, copy); 1288 from += copy; 1289 size -= copy; 1290 block->token = token; 1291 next = next->next; 1292 } 1293 1294 return 0; 1295 } 1296 1297 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1298 { 1299 struct mlx5_cmd_prot_block *block; 1300 struct mlx5_cmd_mailbox *next; 1301 int copy; 1302 1303 if (!to || !from) 1304 return -ENOMEM; 1305 1306 copy = min_t(int, size, sizeof(from->first.data)); 1307 memcpy(to, from->first.data, copy); 1308 size -= copy; 1309 to += copy; 1310 1311 next = from->next; 1312 while (size) { 1313 if (!next) { 1314 /* this is a BUG */ 1315 return -ENOMEM; 1316 } 1317 1318 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1319 block = next->buf; 1320 1321 memcpy(to, block->data, copy); 1322 to += copy; 1323 size -= copy; 1324 next = next->next; 1325 } 1326 1327 return 0; 1328 } 1329 1330 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 1331 gfp_t flags) 1332 { 1333 struct mlx5_cmd_mailbox *mailbox; 1334 1335 mailbox = kmalloc(sizeof(*mailbox), flags); 1336 if (!mailbox) 1337 return ERR_PTR(-ENOMEM); 1338 1339 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags, 1340 &mailbox->dma); 1341 if (!mailbox->buf) { 1342 mlx5_core_dbg(dev, "failed allocation\n"); 1343 kfree(mailbox); 1344 return ERR_PTR(-ENOMEM); 1345 } 1346 mailbox->next = NULL; 1347 1348 return mailbox; 1349 } 1350 1351 static void free_cmd_box(struct mlx5_core_dev *dev, 1352 struct mlx5_cmd_mailbox *mailbox) 1353 { 1354 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 1355 kfree(mailbox); 1356 } 1357 1358 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 1359 gfp_t flags, int size, 1360 u8 token) 1361 { 1362 struct mlx5_cmd_mailbox *tmp, *head = NULL; 1363 struct mlx5_cmd_prot_block *block; 1364 struct mlx5_cmd_msg *msg; 1365 int err; 1366 int n; 1367 int i; 1368 1369 msg = kzalloc(sizeof(*msg), flags); 1370 if (!msg) 1371 return ERR_PTR(-ENOMEM); 1372 1373 msg->len = size; 1374 n = mlx5_calc_cmd_blocks(msg); 1375 1376 for (i = 0; i < n; i++) { 1377 tmp = alloc_cmd_box(dev, flags); 1378 if (IS_ERR(tmp)) { 1379 mlx5_core_warn(dev, "failed allocating block\n"); 1380 err = PTR_ERR(tmp); 1381 goto err_alloc; 1382 } 1383 1384 block = tmp->buf; 1385 tmp->next = head; 1386 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1387 block->block_num = cpu_to_be32(n - i - 1); 1388 block->token = token; 1389 head = tmp; 1390 } 1391 msg->next = head; 1392 return msg; 1393 1394 err_alloc: 1395 while (head) { 1396 tmp = head->next; 1397 free_cmd_box(dev, head); 1398 head = tmp; 1399 } 1400 kfree(msg); 1401 1402 return ERR_PTR(err); 1403 } 1404 1405 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1406 struct mlx5_cmd_msg *msg) 1407 { 1408 struct mlx5_cmd_mailbox *head = msg->next; 1409 struct mlx5_cmd_mailbox *next; 1410 1411 while (head) { 1412 next = head->next; 1413 free_cmd_box(dev, head); 1414 head = next; 1415 } 1416 kfree(msg); 1417 } 1418 1419 static ssize_t data_write(struct file *filp, const char __user *buf, 1420 size_t count, loff_t *pos) 1421 { 1422 struct mlx5_core_dev *dev = filp->private_data; 1423 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1424 void *ptr; 1425 1426 if (*pos != 0) 1427 return -EINVAL; 1428 1429 kfree(dbg->in_msg); 1430 dbg->in_msg = NULL; 1431 dbg->inlen = 0; 1432 ptr = memdup_user(buf, count); 1433 if (IS_ERR(ptr)) 1434 return PTR_ERR(ptr); 1435 dbg->in_msg = ptr; 1436 dbg->inlen = count; 1437 1438 *pos = count; 1439 1440 return count; 1441 } 1442 1443 static ssize_t data_read(struct file *filp, char __user *buf, size_t count, 1444 loff_t *pos) 1445 { 1446 struct mlx5_core_dev *dev = filp->private_data; 1447 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1448 1449 if (!dbg->out_msg) 1450 return -ENOMEM; 1451 1452 return simple_read_from_buffer(buf, count, pos, dbg->out_msg, 1453 dbg->outlen); 1454 } 1455 1456 static const struct file_operations dfops = { 1457 .owner = THIS_MODULE, 1458 .open = simple_open, 1459 .write = data_write, 1460 .read = data_read, 1461 }; 1462 1463 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, 1464 loff_t *pos) 1465 { 1466 struct mlx5_core_dev *dev = filp->private_data; 1467 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1468 char outlen[8]; 1469 int err; 1470 1471 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1472 if (err < 0) 1473 return err; 1474 1475 return simple_read_from_buffer(buf, count, pos, outlen, err); 1476 } 1477 1478 static ssize_t outlen_write(struct file *filp, const char __user *buf, 1479 size_t count, loff_t *pos) 1480 { 1481 struct mlx5_core_dev *dev = filp->private_data; 1482 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1483 char outlen_str[8] = {0}; 1484 int outlen; 1485 void *ptr; 1486 int err; 1487 1488 if (*pos != 0 || count > 6) 1489 return -EINVAL; 1490 1491 kfree(dbg->out_msg); 1492 dbg->out_msg = NULL; 1493 dbg->outlen = 0; 1494 1495 if (copy_from_user(outlen_str, buf, count)) 1496 return -EFAULT; 1497 1498 err = sscanf(outlen_str, "%d", &outlen); 1499 if (err != 1) 1500 return -EINVAL; 1501 1502 ptr = kzalloc(outlen, GFP_KERNEL); 1503 if (!ptr) 1504 return -ENOMEM; 1505 1506 dbg->out_msg = ptr; 1507 dbg->outlen = outlen; 1508 1509 *pos = count; 1510 1511 return count; 1512 } 1513 1514 static const struct file_operations olfops = { 1515 .owner = THIS_MODULE, 1516 .open = simple_open, 1517 .write = outlen_write, 1518 .read = outlen_read, 1519 }; 1520 1521 static void set_wqname(struct mlx5_core_dev *dev) 1522 { 1523 struct mlx5_cmd *cmd = &dev->cmd; 1524 1525 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1526 dev_name(dev->device)); 1527 } 1528 1529 static void clean_debug_files(struct mlx5_core_dev *dev) 1530 { 1531 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1532 1533 if (!mlx5_debugfs_root) 1534 return; 1535 1536 mlx5_cmdif_debugfs_cleanup(dev); 1537 debugfs_remove_recursive(dbg->dbg_root); 1538 } 1539 1540 static void create_debugfs_files(struct mlx5_core_dev *dev) 1541 { 1542 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1543 1544 dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); 1545 1546 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); 1547 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); 1548 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); 1549 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); 1550 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); 1551 1552 mlx5_cmdif_debugfs_init(dev); 1553 } 1554 1555 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1556 { 1557 struct mlx5_cmd *cmd = &dev->cmd; 1558 int i; 1559 1560 for (i = 0; i < cmd->max_reg_cmds; i++) 1561 down(&cmd->sem); 1562 down(&cmd->pages_sem); 1563 1564 cmd->allowed_opcode = opcode; 1565 1566 up(&cmd->pages_sem); 1567 for (i = 0; i < cmd->max_reg_cmds; i++) 1568 up(&cmd->sem); 1569 } 1570 1571 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1572 { 1573 struct mlx5_cmd *cmd = &dev->cmd; 1574 int i; 1575 1576 for (i = 0; i < cmd->max_reg_cmds; i++) 1577 down(&cmd->sem); 1578 down(&cmd->pages_sem); 1579 1580 cmd->mode = mode; 1581 1582 up(&cmd->pages_sem); 1583 for (i = 0; i < cmd->max_reg_cmds; i++) 1584 up(&cmd->sem); 1585 } 1586 1587 static int cmd_comp_notifier(struct notifier_block *nb, 1588 unsigned long type, void *data) 1589 { 1590 struct mlx5_core_dev *dev; 1591 struct mlx5_cmd *cmd; 1592 struct mlx5_eqe *eqe; 1593 1594 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); 1595 dev = container_of(cmd, struct mlx5_core_dev, cmd); 1596 eqe = data; 1597 1598 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); 1599 1600 return NOTIFY_OK; 1601 } 1602 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1603 { 1604 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); 1605 mlx5_eq_notifier_register(dev, &dev->cmd.nb); 1606 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1607 } 1608 1609 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1610 { 1611 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1612 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); 1613 } 1614 1615 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1616 { 1617 unsigned long flags; 1618 1619 if (msg->parent) { 1620 spin_lock_irqsave(&msg->parent->lock, flags); 1621 list_add_tail(&msg->list, &msg->parent->head); 1622 spin_unlock_irqrestore(&msg->parent->lock, flags); 1623 } else { 1624 mlx5_free_cmd_msg(dev, msg); 1625 } 1626 } 1627 1628 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) 1629 { 1630 struct mlx5_cmd *cmd = &dev->cmd; 1631 struct mlx5_cmd_work_ent *ent; 1632 mlx5_cmd_cbk_t callback; 1633 void *context; 1634 int err; 1635 int i; 1636 s64 ds; 1637 struct mlx5_cmd_stats *stats; 1638 unsigned long flags; 1639 unsigned long vector; 1640 1641 /* there can be at most 32 command queues */ 1642 vector = vec & 0xffffffff; 1643 for (i = 0; i < (1 << cmd->log_sz); i++) { 1644 if (test_bit(i, &vector)) { 1645 ent = cmd->ent_arr[i]; 1646 1647 /* if we already completed the command, ignore it */ 1648 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, 1649 &ent->state)) { 1650 /* only real completion can free the cmd slot */ 1651 if (!forced) { 1652 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1653 ent->idx); 1654 cmd_ent_put(ent); 1655 } 1656 continue; 1657 } 1658 1659 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) 1660 cmd_ent_put(ent); /* timeout work was canceled */ 1661 1662 if (!forced || /* Real FW completion */ 1663 mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ 1664 !opcode_allowed(cmd, ent->op)) 1665 cmd_ent_put(ent); 1666 1667 ent->ts2 = ktime_get_ns(); 1668 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1669 dump_command(dev, ent, 0); 1670 1671 if (vec & MLX5_TRIGGERED_CMD_COMP) 1672 ent->ret = -ENXIO; 1673 1674 if (!ent->ret) { /* Command completed by FW */ 1675 if (!cmd->checksum_disabled) 1676 ent->ret = verify_signature(ent); 1677 1678 ent->status = ent->lay->status_own >> 1; 1679 1680 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1681 ent->ret, deliv_status_to_str(ent->status), ent->status); 1682 } 1683 1684 if (ent->callback) { 1685 ds = ent->ts2 - ent->ts1; 1686 if (ent->op < MLX5_CMD_OP_MAX) { 1687 stats = &cmd->stats[ent->op]; 1688 spin_lock_irqsave(&stats->lock, flags); 1689 stats->sum += ds; 1690 ++stats->n; 1691 spin_unlock_irqrestore(&stats->lock, flags); 1692 } 1693 1694 callback = ent->callback; 1695 context = ent->context; 1696 err = ent->ret ? : ent->status; 1697 if (err > 0) /* Failed in FW, command didn't execute */ 1698 err = deliv_status_to_err(err); 1699 1700 if (!err) 1701 err = mlx5_copy_from_msg(ent->uout, 1702 ent->out, 1703 ent->uout_size); 1704 1705 mlx5_free_cmd_msg(dev, ent->out); 1706 free_msg(dev, ent->in); 1707 1708 /* final consumer is done, release ent */ 1709 cmd_ent_put(ent); 1710 callback(err, context); 1711 } else { 1712 /* release wait_func() so mlx5_cmd_invoke() 1713 * can make the final ent_put() 1714 */ 1715 complete(&ent->done); 1716 } 1717 } 1718 } 1719 } 1720 1721 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) 1722 { 1723 struct mlx5_cmd *cmd = &dev->cmd; 1724 unsigned long bitmask; 1725 unsigned long flags; 1726 u64 vector; 1727 int i; 1728 1729 /* wait for pending handlers to complete */ 1730 mlx5_eq_synchronize_cmd_irq(dev); 1731 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1732 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1733 if (!vector) 1734 goto no_trig; 1735 1736 bitmask = vector; 1737 /* we must increment the allocated entries refcount before triggering the completions 1738 * to guarantee pending commands will not get freed in the meanwhile. 1739 * For that reason, it also has to be done inside the alloc_lock. 1740 */ 1741 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1742 cmd_ent_get(cmd->ent_arr[i]); 1743 vector |= MLX5_TRIGGERED_CMD_COMP; 1744 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1745 1746 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 1747 mlx5_cmd_comp_handler(dev, vector, true); 1748 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1749 cmd_ent_put(cmd->ent_arr[i]); 1750 return; 1751 1752 no_trig: 1753 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1754 } 1755 1756 void mlx5_cmd_flush(struct mlx5_core_dev *dev) 1757 { 1758 struct mlx5_cmd *cmd = &dev->cmd; 1759 int i; 1760 1761 for (i = 0; i < cmd->max_reg_cmds; i++) { 1762 while (down_trylock(&cmd->sem)) { 1763 mlx5_cmd_trigger_completions(dev); 1764 cond_resched(); 1765 } 1766 } 1767 1768 while (down_trylock(&cmd->pages_sem)) { 1769 mlx5_cmd_trigger_completions(dev); 1770 cond_resched(); 1771 } 1772 1773 /* Unlock cmdif */ 1774 up(&cmd->pages_sem); 1775 for (i = 0; i < cmd->max_reg_cmds; i++) 1776 up(&cmd->sem); 1777 } 1778 1779 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1780 gfp_t gfp) 1781 { 1782 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1783 struct cmd_msg_cache *ch = NULL; 1784 struct mlx5_cmd *cmd = &dev->cmd; 1785 int i; 1786 1787 if (in_size <= 16) 1788 goto cache_miss; 1789 1790 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1791 ch = &cmd->cache[i]; 1792 if (in_size > ch->max_inbox_size) 1793 continue; 1794 spin_lock_irq(&ch->lock); 1795 if (list_empty(&ch->head)) { 1796 spin_unlock_irq(&ch->lock); 1797 continue; 1798 } 1799 msg = list_entry(ch->head.next, typeof(*msg), list); 1800 /* For cached lists, we must explicitly state what is 1801 * the real size 1802 */ 1803 msg->len = in_size; 1804 list_del(&msg->list); 1805 spin_unlock_irq(&ch->lock); 1806 break; 1807 } 1808 1809 if (!IS_ERR(msg)) 1810 return msg; 1811 1812 cache_miss: 1813 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1814 return msg; 1815 } 1816 1817 static int is_manage_pages(void *in) 1818 { 1819 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1820 } 1821 1822 /* Notes: 1823 * 1. Callback functions may not sleep 1824 * 2. Page queue commands do not support asynchrous completion 1825 */ 1826 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1827 int out_size, mlx5_cmd_cbk_t callback, void *context, 1828 bool force_polling) 1829 { 1830 u16 opcode = MLX5_GET(mbox_in, in, opcode); 1831 struct mlx5_cmd_msg *inb, *outb; 1832 int pages_queue; 1833 gfp_t gfp; 1834 u8 token; 1835 int err; 1836 1837 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) 1838 return -ENXIO; 1839 1840 pages_queue = is_manage_pages(in); 1841 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1842 1843 inb = alloc_msg(dev, in_size, gfp); 1844 if (IS_ERR(inb)) { 1845 err = PTR_ERR(inb); 1846 return err; 1847 } 1848 1849 token = alloc_token(&dev->cmd); 1850 1851 err = mlx5_copy_to_msg(inb, in, in_size, token); 1852 if (err) { 1853 mlx5_core_warn(dev, "err %d\n", err); 1854 goto out_in; 1855 } 1856 1857 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); 1858 if (IS_ERR(outb)) { 1859 err = PTR_ERR(outb); 1860 goto out_in; 1861 } 1862 1863 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1864 pages_queue, token, force_polling); 1865 if (callback) 1866 return err; 1867 1868 if (err > 0) /* Failed in FW, command didn't execute */ 1869 err = deliv_status_to_err(err); 1870 1871 if (err) 1872 goto out_out; 1873 1874 /* command completed by FW */ 1875 err = mlx5_copy_from_msg(out, outb, out_size); 1876 out_out: 1877 mlx5_free_cmd_msg(dev, outb); 1878 out_in: 1879 free_msg(dev, inb); 1880 return err; 1881 } 1882 1883 static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 1884 { 1885 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 1886 u8 status = MLX5_GET(mbox_out, out, status); 1887 1888 trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod, 1889 cmd_status_str(status), status, syndrome, 1890 cmd_status_to_err(status)); 1891 } 1892 1893 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, 1894 u32 syndrome, int err) 1895 { 1896 struct mlx5_cmd_stats *stats; 1897 1898 if (!err) 1899 return; 1900 1901 stats = &dev->cmd.stats[opcode]; 1902 spin_lock_irq(&stats->lock); 1903 stats->failed++; 1904 if (err < 0) 1905 stats->last_failed_errno = -err; 1906 if (err == -EREMOTEIO) { 1907 stats->failed_mbox_status++; 1908 stats->last_failed_mbox_status = status; 1909 stats->last_failed_syndrome = syndrome; 1910 } 1911 spin_unlock_irq(&stats->lock); 1912 } 1913 1914 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ 1915 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out) 1916 { 1917 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 1918 u8 status = MLX5_GET(mbox_out, out, status); 1919 1920 if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ 1921 err = -EIO; 1922 1923 if (!err && status != MLX5_CMD_STAT_OK) { 1924 err = -EREMOTEIO; 1925 mlx5_cmd_err_trace(dev, opcode, op_mod, out); 1926 } 1927 1928 cmd_status_log(dev, opcode, status, syndrome, err); 1929 return err; 1930 } 1931 1932 /** 1933 * mlx5_cmd_do - Executes a fw command, wait for completion. 1934 * Unlike mlx5_cmd_exec, this function will not translate or intercept 1935 * outbox.status and will return -EREMOTEIO when 1936 * outbox.status != MLX5_CMD_STAT_OK 1937 * 1938 * @dev: mlx5 core device 1939 * @in: inbox mlx5_ifc command buffer 1940 * @in_size: inbox buffer size 1941 * @out: outbox mlx5_ifc buffer 1942 * @out_size: outbox size 1943 * 1944 * @return: 1945 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. 1946 * Caller must check FW outbox status. 1947 * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. 1948 * < 0 : Command execution couldn't be performed by firmware or driver 1949 */ 1950 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) 1951 { 1952 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); 1953 u16 opcode = MLX5_GET(mbox_in, in, opcode); 1954 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 1955 1956 return cmd_status_err(dev, err, opcode, op_mod, out); 1957 } 1958 EXPORT_SYMBOL(mlx5_cmd_do); 1959 1960 /** 1961 * mlx5_cmd_exec - Executes a fw command, wait for completion 1962 * 1963 * @dev: mlx5 core device 1964 * @in: inbox mlx5_ifc command buffer 1965 * @in_size: inbox buffer size 1966 * @out: outbox mlx5_ifc buffer 1967 * @out_size: outbox size 1968 * 1969 * @return: 0 if no error, FW command execution was successful 1970 * and outbox status is ok. 1971 */ 1972 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1973 int out_size) 1974 { 1975 int err = mlx5_cmd_do(dev, in, in_size, out, out_size); 1976 1977 return mlx5_cmd_check(dev, err, in, out); 1978 } 1979 EXPORT_SYMBOL(mlx5_cmd_exec); 1980 1981 /** 1982 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion 1983 * Needed for driver force teardown, when command completion EQ 1984 * will not be available to complete the command 1985 * 1986 * @dev: mlx5 core device 1987 * @in: inbox mlx5_ifc command buffer 1988 * @in_size: inbox buffer size 1989 * @out: outbox mlx5_ifc buffer 1990 * @out_size: outbox size 1991 * 1992 * @return: 0 if no error, FW command execution was successful 1993 * and outbox status is ok. 1994 */ 1995 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 1996 void *out, int out_size) 1997 { 1998 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); 1999 u16 opcode = MLX5_GET(mbox_in, in, opcode); 2000 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 2001 2002 err = cmd_status_err(dev, err, opcode, op_mod, out); 2003 return mlx5_cmd_check(dev, err, in, out); 2004 } 2005 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 2006 2007 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, 2008 struct mlx5_async_ctx *ctx) 2009 { 2010 ctx->dev = dev; 2011 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ 2012 atomic_set(&ctx->num_inflight, 1); 2013 init_completion(&ctx->inflight_done); 2014 } 2015 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); 2016 2017 /** 2018 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx 2019 * @ctx: The ctx to clean 2020 * 2021 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The 2022 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after 2023 * the call mlx5_cleanup_async_ctx(). 2024 */ 2025 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) 2026 { 2027 if (!atomic_dec_and_test(&ctx->num_inflight)) 2028 wait_for_completion(&ctx->inflight_done); 2029 } 2030 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); 2031 2032 static void mlx5_cmd_exec_cb_handler(int status, void *_work) 2033 { 2034 struct mlx5_async_work *work = _work; 2035 struct mlx5_async_ctx *ctx; 2036 2037 ctx = work->ctx; 2038 status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out); 2039 work->user_callback(status, work); 2040 if (atomic_dec_and_test(&ctx->num_inflight)) 2041 complete(&ctx->inflight_done); 2042 } 2043 2044 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, 2045 void *out, int out_size, mlx5_async_cbk_t callback, 2046 struct mlx5_async_work *work) 2047 { 2048 int ret; 2049 2050 work->ctx = ctx; 2051 work->user_callback = callback; 2052 work->opcode = MLX5_GET(mbox_in, in, opcode); 2053 work->op_mod = MLX5_GET(mbox_in, in, op_mod); 2054 work->out = out; 2055 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) 2056 return -EIO; 2057 ret = cmd_exec(ctx->dev, in, in_size, out, out_size, 2058 mlx5_cmd_exec_cb_handler, work, false); 2059 if (ret && atomic_dec_and_test(&ctx->num_inflight)) 2060 complete(&ctx->inflight_done); 2061 2062 return ret; 2063 } 2064 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 2065 2066 static void destroy_msg_cache(struct mlx5_core_dev *dev) 2067 { 2068 struct cmd_msg_cache *ch; 2069 struct mlx5_cmd_msg *msg; 2070 struct mlx5_cmd_msg *n; 2071 int i; 2072 2073 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 2074 ch = &dev->cmd.cache[i]; 2075 list_for_each_entry_safe(msg, n, &ch->head, list) { 2076 list_del(&msg->list); 2077 mlx5_free_cmd_msg(dev, msg); 2078 } 2079 } 2080 } 2081 2082 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { 2083 512, 32, 16, 8, 2 2084 }; 2085 2086 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { 2087 16 + MLX5_CMD_DATA_BLOCK_SIZE, 2088 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, 2089 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, 2090 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, 2091 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, 2092 }; 2093 2094 static void create_msg_cache(struct mlx5_core_dev *dev) 2095 { 2096 struct mlx5_cmd *cmd = &dev->cmd; 2097 struct cmd_msg_cache *ch; 2098 struct mlx5_cmd_msg *msg; 2099 int i; 2100 int k; 2101 2102 /* Initialize and fill the caches with initial entries */ 2103 for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { 2104 ch = &cmd->cache[k]; 2105 spin_lock_init(&ch->lock); 2106 INIT_LIST_HEAD(&ch->head); 2107 ch->num_ent = cmd_cache_num_ent[k]; 2108 ch->max_inbox_size = cmd_cache_ent_size[k]; 2109 for (i = 0; i < ch->num_ent; i++) { 2110 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, 2111 ch->max_inbox_size, 0); 2112 if (IS_ERR(msg)) 2113 break; 2114 msg->parent = ch; 2115 list_add_tail(&msg->list, &ch->head); 2116 } 2117 } 2118 } 2119 2120 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2121 { 2122 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, 2123 &cmd->alloc_dma, GFP_KERNEL); 2124 if (!cmd->cmd_alloc_buf) 2125 return -ENOMEM; 2126 2127 /* make sure it is aligned to 4K */ 2128 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 2129 cmd->cmd_buf = cmd->cmd_alloc_buf; 2130 cmd->dma = cmd->alloc_dma; 2131 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 2132 return 0; 2133 } 2134 2135 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 2136 cmd->alloc_dma); 2137 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), 2138 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 2139 &cmd->alloc_dma, GFP_KERNEL); 2140 if (!cmd->cmd_alloc_buf) 2141 return -ENOMEM; 2142 2143 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 2144 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 2145 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 2146 return 0; 2147 } 2148 2149 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2150 { 2151 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf, 2152 cmd->alloc_dma); 2153 } 2154 2155 static u16 cmdif_rev(struct mlx5_core_dev *dev) 2156 { 2157 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2158 } 2159 2160 int mlx5_cmd_init(struct mlx5_core_dev *dev) 2161 { 2162 int size = sizeof(struct mlx5_cmd_prot_block); 2163 int align = roundup_pow_of_two(size); 2164 struct mlx5_cmd *cmd = &dev->cmd; 2165 u32 cmd_h, cmd_l; 2166 u16 cmd_if_rev; 2167 int err; 2168 int i; 2169 2170 memset(cmd, 0, sizeof(*cmd)); 2171 cmd_if_rev = cmdif_rev(dev); 2172 if (cmd_if_rev != CMD_IF_REV) { 2173 mlx5_core_err(dev, 2174 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 2175 CMD_IF_REV, cmd_if_rev); 2176 return -EINVAL; 2177 } 2178 2179 cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); 2180 if (!cmd->stats) 2181 return -ENOMEM; 2182 2183 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2184 if (!cmd->pool) { 2185 err = -ENOMEM; 2186 goto dma_pool_err; 2187 } 2188 2189 err = alloc_cmd_page(dev, cmd); 2190 if (err) 2191 goto err_free_pool; 2192 2193 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 2194 cmd->log_sz = cmd_l >> 4 & 0xf; 2195 cmd->log_stride = cmd_l & 0xf; 2196 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 2197 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", 2198 1 << cmd->log_sz); 2199 err = -EINVAL; 2200 goto err_free_page; 2201 } 2202 2203 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 2204 mlx5_core_err(dev, "command queue size overflow\n"); 2205 err = -EINVAL; 2206 goto err_free_page; 2207 } 2208 2209 cmd->state = MLX5_CMDIF_STATE_DOWN; 2210 cmd->checksum_disabled = 1; 2211 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 2212 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; 2213 2214 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2215 if (cmd->cmdif_rev > CMD_IF_REV) { 2216 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", 2217 CMD_IF_REV, cmd->cmdif_rev); 2218 err = -EOPNOTSUPP; 2219 goto err_free_page; 2220 } 2221 2222 spin_lock_init(&cmd->alloc_lock); 2223 spin_lock_init(&cmd->token_lock); 2224 for (i = 0; i < MLX5_CMD_OP_MAX; i++) 2225 spin_lock_init(&cmd->stats[i].lock); 2226 2227 sema_init(&cmd->sem, cmd->max_reg_cmds); 2228 sema_init(&cmd->pages_sem, 1); 2229 2230 cmd_h = (u32)((u64)(cmd->dma) >> 32); 2231 cmd_l = (u32)(cmd->dma); 2232 if (cmd_l & 0xfff) { 2233 mlx5_core_err(dev, "invalid command queue address\n"); 2234 err = -ENOMEM; 2235 goto err_free_page; 2236 } 2237 2238 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 2239 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 2240 2241 /* Make sure firmware sees the complete address before we proceed */ 2242 wmb(); 2243 2244 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2245 2246 cmd->mode = CMD_MODE_POLLING; 2247 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 2248 2249 create_msg_cache(dev); 2250 2251 set_wqname(dev); 2252 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 2253 if (!cmd->wq) { 2254 mlx5_core_err(dev, "failed to create command workqueue\n"); 2255 err = -ENOMEM; 2256 goto err_cache; 2257 } 2258 2259 create_debugfs_files(dev); 2260 2261 return 0; 2262 2263 err_cache: 2264 destroy_msg_cache(dev); 2265 2266 err_free_page: 2267 free_cmd_page(dev, cmd); 2268 2269 err_free_pool: 2270 dma_pool_destroy(cmd->pool); 2271 dma_pool_err: 2272 kvfree(cmd->stats); 2273 return err; 2274 } 2275 2276 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 2277 { 2278 struct mlx5_cmd *cmd = &dev->cmd; 2279 2280 clean_debug_files(dev); 2281 destroy_workqueue(cmd->wq); 2282 destroy_msg_cache(dev); 2283 free_cmd_page(dev, cmd); 2284 dma_pool_destroy(cmd->pool); 2285 kvfree(cmd->stats); 2286 } 2287 2288 void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2289 enum mlx5_cmdif_state cmdif_state) 2290 { 2291 dev->cmd.state = cmdif_state; 2292 } 2293