1 /* 2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/highmem.h> 34 #include <linux/module.h> 35 #include <linux/errno.h> 36 #include <linux/pci.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/slab.h> 39 #include <linux/delay.h> 40 #include <linux/random.h> 41 #include <linux/io-mapping.h> 42 #include <linux/mlx5/driver.h> 43 #include <linux/debugfs.h> 44 45 #include "mlx5_core.h" 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 79 struct mlx5_cmd_msg *in, 80 struct mlx5_cmd_msg *out, 81 void *uout, int uout_size, 82 mlx5_cmd_cbk_t cbk, 83 void *context, int page_queue) 84 { 85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 86 struct mlx5_cmd_work_ent *ent; 87 88 ent = kzalloc(sizeof(*ent), alloc_flags); 89 if (!ent) 90 return ERR_PTR(-ENOMEM); 91 92 ent->in = in; 93 ent->out = out; 94 ent->uout = uout; 95 ent->uout_size = uout_size; 96 ent->callback = cbk; 97 ent->context = context; 98 ent->cmd = cmd; 99 ent->page_queue = page_queue; 100 101 return ent; 102 } 103 104 static u8 alloc_token(struct mlx5_cmd *cmd) 105 { 106 u8 token; 107 108 spin_lock(&cmd->token_lock); 109 cmd->token++; 110 if (cmd->token == 0) 111 cmd->token++; 112 token = cmd->token; 113 spin_unlock(&cmd->token_lock); 114 115 return token; 116 } 117 118 static int alloc_ent(struct mlx5_cmd *cmd) 119 { 120 unsigned long flags; 121 int ret; 122 123 spin_lock_irqsave(&cmd->alloc_lock, flags); 124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 125 if (ret < cmd->max_reg_cmds) 126 clear_bit(ret, &cmd->bitmask); 127 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 128 129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM; 130 } 131 132 static void free_ent(struct mlx5_cmd *cmd, int idx) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&cmd->alloc_lock, flags); 137 set_bit(idx, &cmd->bitmask); 138 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 139 } 140 141 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 142 { 143 return cmd->cmd_buf + (idx << cmd->log_stride); 144 } 145 146 static u8 xor8_buf(void *buf, int len) 147 { 148 u8 *ptr = buf; 149 u8 sum = 0; 150 int i; 151 152 for (i = 0; i < len; i++) 153 sum ^= ptr[i]; 154 155 return sum; 156 } 157 158 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 159 { 160 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 161 return -EINVAL; 162 163 if (xor8_buf(block, sizeof(*block)) != 0xff) 164 return -EINVAL; 165 166 return 0; 167 } 168 169 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 170 int csum) 171 { 172 block->token = token; 173 if (csum) { 174 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 175 sizeof(block->data) - 2); 176 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 177 } 178 } 179 180 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 181 { 182 struct mlx5_cmd_mailbox *next = msg->next; 183 184 while (next) { 185 calc_block_sig(next->buf, token, csum); 186 next = next->next; 187 } 188 } 189 190 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 191 { 192 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 193 calc_chain_sig(ent->in, ent->token, csum); 194 calc_chain_sig(ent->out, ent->token, csum); 195 } 196 197 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 198 { 199 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 200 u8 own; 201 202 do { 203 own = ent->lay->status_own; 204 if (!(own & CMD_OWNER_HW)) { 205 ent->ret = 0; 206 return; 207 } 208 usleep_range(5000, 10000); 209 } while (time_before(jiffies, poll_end)); 210 211 ent->ret = -ETIMEDOUT; 212 } 213 214 static void free_cmd(struct mlx5_cmd_work_ent *ent) 215 { 216 kfree(ent); 217 } 218 219 220 static int verify_signature(struct mlx5_cmd_work_ent *ent) 221 { 222 struct mlx5_cmd_mailbox *next = ent->out->next; 223 int err; 224 u8 sig; 225 226 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 227 if (sig != 0xff) 228 return -EINVAL; 229 230 while (next) { 231 err = verify_block_sig(next->buf); 232 if (err) 233 return err; 234 235 next = next->next; 236 } 237 238 return 0; 239 } 240 241 static void dump_buf(void *buf, int size, int data_only, int offset) 242 { 243 __be32 *p = buf; 244 int i; 245 246 for (i = 0; i < size; i += 16) { 247 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 248 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 249 be32_to_cpu(p[3])); 250 p += 4; 251 offset += 16; 252 } 253 if (!data_only) 254 pr_debug("\n"); 255 } 256 257 enum { 258 MLX5_DRIVER_STATUS_ABORTED = 0xfe, 259 MLX5_DRIVER_SYND = 0xbadd00de, 260 }; 261 262 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 263 u32 *synd, u8 *status) 264 { 265 *synd = 0; 266 *status = 0; 267 268 switch (op) { 269 case MLX5_CMD_OP_TEARDOWN_HCA: 270 case MLX5_CMD_OP_DISABLE_HCA: 271 case MLX5_CMD_OP_MANAGE_PAGES: 272 case MLX5_CMD_OP_DESTROY_MKEY: 273 case MLX5_CMD_OP_DESTROY_EQ: 274 case MLX5_CMD_OP_DESTROY_CQ: 275 case MLX5_CMD_OP_DESTROY_QP: 276 case MLX5_CMD_OP_DESTROY_PSV: 277 case MLX5_CMD_OP_DESTROY_SRQ: 278 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 279 case MLX5_CMD_OP_DESTROY_DCT: 280 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 281 case MLX5_CMD_OP_DEALLOC_PD: 282 case MLX5_CMD_OP_DEALLOC_UAR: 283 case MLX5_CMD_OP_DETTACH_FROM_MCG: 284 case MLX5_CMD_OP_DEALLOC_XRCD: 285 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 286 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 287 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 288 case MLX5_CMD_OP_DESTROY_TIR: 289 case MLX5_CMD_OP_DESTROY_SQ: 290 case MLX5_CMD_OP_DESTROY_RQ: 291 case MLX5_CMD_OP_DESTROY_RMP: 292 case MLX5_CMD_OP_DESTROY_TIS: 293 case MLX5_CMD_OP_DESTROY_RQT: 294 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 298 case MLX5_CMD_OP_2ERR_QP: 299 case MLX5_CMD_OP_2RST_QP: 300 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 301 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 302 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 303 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 304 return MLX5_CMD_STAT_OK; 305 306 case MLX5_CMD_OP_QUERY_HCA_CAP: 307 case MLX5_CMD_OP_QUERY_ADAPTER: 308 case MLX5_CMD_OP_INIT_HCA: 309 case MLX5_CMD_OP_ENABLE_HCA: 310 case MLX5_CMD_OP_QUERY_PAGES: 311 case MLX5_CMD_OP_SET_HCA_CAP: 312 case MLX5_CMD_OP_QUERY_ISSI: 313 case MLX5_CMD_OP_SET_ISSI: 314 case MLX5_CMD_OP_CREATE_MKEY: 315 case MLX5_CMD_OP_QUERY_MKEY: 316 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 317 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 318 case MLX5_CMD_OP_CREATE_EQ: 319 case MLX5_CMD_OP_QUERY_EQ: 320 case MLX5_CMD_OP_GEN_EQE: 321 case MLX5_CMD_OP_CREATE_CQ: 322 case MLX5_CMD_OP_QUERY_CQ: 323 case MLX5_CMD_OP_MODIFY_CQ: 324 case MLX5_CMD_OP_CREATE_QP: 325 case MLX5_CMD_OP_RST2INIT_QP: 326 case MLX5_CMD_OP_INIT2RTR_QP: 327 case MLX5_CMD_OP_RTR2RTS_QP: 328 case MLX5_CMD_OP_RTS2RTS_QP: 329 case MLX5_CMD_OP_SQERR2RTS_QP: 330 case MLX5_CMD_OP_QUERY_QP: 331 case MLX5_CMD_OP_SQD_RTS_QP: 332 case MLX5_CMD_OP_INIT2INIT_QP: 333 case MLX5_CMD_OP_CREATE_PSV: 334 case MLX5_CMD_OP_CREATE_SRQ: 335 case MLX5_CMD_OP_QUERY_SRQ: 336 case MLX5_CMD_OP_ARM_RQ: 337 case MLX5_CMD_OP_CREATE_XRC_SRQ: 338 case MLX5_CMD_OP_QUERY_XRC_SRQ: 339 case MLX5_CMD_OP_ARM_XRC_SRQ: 340 case MLX5_CMD_OP_CREATE_DCT: 341 case MLX5_CMD_OP_DRAIN_DCT: 342 case MLX5_CMD_OP_QUERY_DCT: 343 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 344 case MLX5_CMD_OP_QUERY_VPORT_STATE: 345 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 346 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 347 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 348 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 349 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 350 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 351 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 352 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 353 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 354 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 355 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 356 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 357 case MLX5_CMD_OP_QUERY_Q_COUNTER: 358 case MLX5_CMD_OP_ALLOC_PD: 359 case MLX5_CMD_OP_ALLOC_UAR: 360 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 361 case MLX5_CMD_OP_ACCESS_REG: 362 case MLX5_CMD_OP_ATTACH_TO_MCG: 363 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 364 case MLX5_CMD_OP_MAD_IFC: 365 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 366 case MLX5_CMD_OP_SET_MAD_DEMUX: 367 case MLX5_CMD_OP_NOP: 368 case MLX5_CMD_OP_ALLOC_XRCD: 369 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 370 case MLX5_CMD_OP_QUERY_CONG_STATUS: 371 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 372 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 373 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 374 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 375 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 376 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 377 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 378 case MLX5_CMD_OP_CREATE_TIR: 379 case MLX5_CMD_OP_MODIFY_TIR: 380 case MLX5_CMD_OP_QUERY_TIR: 381 case MLX5_CMD_OP_CREATE_SQ: 382 case MLX5_CMD_OP_MODIFY_SQ: 383 case MLX5_CMD_OP_QUERY_SQ: 384 case MLX5_CMD_OP_CREATE_RQ: 385 case MLX5_CMD_OP_MODIFY_RQ: 386 case MLX5_CMD_OP_QUERY_RQ: 387 case MLX5_CMD_OP_CREATE_RMP: 388 case MLX5_CMD_OP_MODIFY_RMP: 389 case MLX5_CMD_OP_QUERY_RMP: 390 case MLX5_CMD_OP_CREATE_TIS: 391 case MLX5_CMD_OP_MODIFY_TIS: 392 case MLX5_CMD_OP_QUERY_TIS: 393 case MLX5_CMD_OP_CREATE_RQT: 394 case MLX5_CMD_OP_MODIFY_RQT: 395 case MLX5_CMD_OP_QUERY_RQT: 396 397 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 398 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 399 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 400 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 401 402 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 403 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 404 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 405 *status = MLX5_DRIVER_STATUS_ABORTED; 406 *synd = MLX5_DRIVER_SYND; 407 return -EIO; 408 default: 409 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 410 return -EINVAL; 411 } 412 } 413 414 const char *mlx5_command_str(int command) 415 { 416 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 417 418 switch (command) { 419 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 420 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 421 MLX5_COMMAND_STR_CASE(INIT_HCA); 422 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 423 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 424 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 425 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 426 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 427 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 428 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 429 MLX5_COMMAND_STR_CASE(SET_ISSI); 430 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 431 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 432 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 433 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 434 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 435 MLX5_COMMAND_STR_CASE(CREATE_EQ); 436 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 437 MLX5_COMMAND_STR_CASE(QUERY_EQ); 438 MLX5_COMMAND_STR_CASE(GEN_EQE); 439 MLX5_COMMAND_STR_CASE(CREATE_CQ); 440 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 441 MLX5_COMMAND_STR_CASE(QUERY_CQ); 442 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 443 MLX5_COMMAND_STR_CASE(CREATE_QP); 444 MLX5_COMMAND_STR_CASE(DESTROY_QP); 445 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 446 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 447 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 448 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 449 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 450 MLX5_COMMAND_STR_CASE(2ERR_QP); 451 MLX5_COMMAND_STR_CASE(2RST_QP); 452 MLX5_COMMAND_STR_CASE(QUERY_QP); 453 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 454 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 455 MLX5_COMMAND_STR_CASE(CREATE_PSV); 456 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 457 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 458 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 459 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 460 MLX5_COMMAND_STR_CASE(ARM_RQ); 461 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 462 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 463 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 464 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 465 MLX5_COMMAND_STR_CASE(CREATE_DCT); 466 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 467 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 468 MLX5_COMMAND_STR_CASE(QUERY_DCT); 469 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 470 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 471 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 472 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 473 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 474 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 475 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 476 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 477 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 478 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 479 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 480 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 481 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 482 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 483 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 484 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 485 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 486 MLX5_COMMAND_STR_CASE(ALLOC_PD); 487 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 488 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 489 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 490 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 491 MLX5_COMMAND_STR_CASE(ACCESS_REG); 492 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 493 MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG); 494 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 495 MLX5_COMMAND_STR_CASE(MAD_IFC); 496 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 497 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 498 MLX5_COMMAND_STR_CASE(NOP); 499 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 500 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 501 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 502 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 503 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 504 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 505 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 506 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 507 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 508 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 509 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 510 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 511 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 512 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 513 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 514 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 515 MLX5_COMMAND_STR_CASE(CREATE_TIR); 516 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 517 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 518 MLX5_COMMAND_STR_CASE(QUERY_TIR); 519 MLX5_COMMAND_STR_CASE(CREATE_SQ); 520 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 521 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 522 MLX5_COMMAND_STR_CASE(QUERY_SQ); 523 MLX5_COMMAND_STR_CASE(CREATE_RQ); 524 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 525 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 526 MLX5_COMMAND_STR_CASE(QUERY_RQ); 527 MLX5_COMMAND_STR_CASE(CREATE_RMP); 528 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 529 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 530 MLX5_COMMAND_STR_CASE(QUERY_RMP); 531 MLX5_COMMAND_STR_CASE(CREATE_TIS); 532 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 533 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 534 MLX5_COMMAND_STR_CASE(QUERY_TIS); 535 MLX5_COMMAND_STR_CASE(CREATE_RQT); 536 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 537 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 538 MLX5_COMMAND_STR_CASE(QUERY_RQT); 539 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); 540 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 541 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 542 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 543 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 544 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 545 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 546 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 547 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 548 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 549 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 550 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 551 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 552 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); 553 default: return "unknown command opcode"; 554 } 555 } 556 557 static void dump_command(struct mlx5_core_dev *dev, 558 struct mlx5_cmd_work_ent *ent, int input) 559 { 560 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 561 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 562 struct mlx5_cmd_mailbox *next = msg->next; 563 int data_only; 564 u32 offset = 0; 565 int dump_len; 566 567 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 568 569 if (data_only) 570 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 571 "dump command data %s(0x%x) %s\n", 572 mlx5_command_str(op), op, 573 input ? "INPUT" : "OUTPUT"); 574 else 575 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 576 mlx5_command_str(op), op, 577 input ? "INPUT" : "OUTPUT"); 578 579 if (data_only) { 580 if (input) { 581 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 582 offset += sizeof(ent->lay->in); 583 } else { 584 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 585 offset += sizeof(ent->lay->out); 586 } 587 } else { 588 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 589 offset += sizeof(*ent->lay); 590 } 591 592 while (next && offset < msg->len) { 593 if (data_only) { 594 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 595 dump_buf(next->buf, dump_len, 1, offset); 596 offset += MLX5_CMD_DATA_BLOCK_SIZE; 597 } else { 598 mlx5_core_dbg(dev, "command block:\n"); 599 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); 600 offset += sizeof(struct mlx5_cmd_prot_block); 601 } 602 next = next->next; 603 } 604 605 if (data_only) 606 pr_debug("\n"); 607 } 608 609 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 610 { 611 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 612 613 return be16_to_cpu(hdr->opcode); 614 } 615 616 static void cb_timeout_handler(struct work_struct *work) 617 { 618 struct delayed_work *dwork = container_of(work, struct delayed_work, 619 work); 620 struct mlx5_cmd_work_ent *ent = container_of(dwork, 621 struct mlx5_cmd_work_ent, 622 cb_timeout_work); 623 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 624 cmd); 625 626 ent->ret = -ETIMEDOUT; 627 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 628 mlx5_command_str(msg_to_opcode(ent->in)), 629 msg_to_opcode(ent->in)); 630 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 631 } 632 633 static void cmd_work_handler(struct work_struct *work) 634 { 635 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 636 struct mlx5_cmd *cmd = ent->cmd; 637 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 638 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 639 struct mlx5_cmd_layout *lay; 640 struct semaphore *sem; 641 unsigned long flags; 642 643 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 644 down(sem); 645 if (!ent->page_queue) { 646 ent->idx = alloc_ent(cmd); 647 if (ent->idx < 0) { 648 mlx5_core_err(dev, "failed to allocate command entry\n"); 649 up(sem); 650 return; 651 } 652 } else { 653 ent->idx = cmd->max_reg_cmds; 654 spin_lock_irqsave(&cmd->alloc_lock, flags); 655 clear_bit(ent->idx, &cmd->bitmask); 656 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 657 } 658 659 ent->token = alloc_token(cmd); 660 cmd->ent_arr[ent->idx] = ent; 661 lay = get_inst(cmd, ent->idx); 662 ent->lay = lay; 663 memset(lay, 0, sizeof(*lay)); 664 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 665 ent->op = be32_to_cpu(lay->in[0]) >> 16; 666 if (ent->in->next) 667 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 668 lay->inlen = cpu_to_be32(ent->in->len); 669 if (ent->out->next) 670 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 671 lay->outlen = cpu_to_be32(ent->out->len); 672 lay->type = MLX5_PCI_CMD_XPORT; 673 lay->token = ent->token; 674 lay->status_own = CMD_OWNER_HW; 675 set_signature(ent, !cmd->checksum_disabled); 676 dump_command(dev, ent, 1); 677 ent->ts1 = ktime_get_ns(); 678 679 if (ent->callback) 680 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 681 682 /* ring doorbell after the descriptor is valid */ 683 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 684 wmb(); 685 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 686 mmiowb(); 687 /* if not in polling don't use ent after this point */ 688 if (cmd->mode == CMD_MODE_POLLING) { 689 poll_timeout(ent); 690 /* make sure we read the descriptor after ownership is SW */ 691 rmb(); 692 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 693 } 694 } 695 696 static const char *deliv_status_to_str(u8 status) 697 { 698 switch (status) { 699 case MLX5_CMD_DELIVERY_STAT_OK: 700 return "no errors"; 701 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 702 return "signature error"; 703 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 704 return "token error"; 705 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 706 return "bad block number"; 707 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 708 return "output pointer not aligned to block size"; 709 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 710 return "input pointer not aligned to block size"; 711 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 712 return "firmware internal error"; 713 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 714 return "command input length error"; 715 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 716 return "command ouput length error"; 717 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 718 return "reserved fields not cleared"; 719 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 720 return "bad command descriptor type"; 721 default: 722 return "unknown status code"; 723 } 724 } 725 726 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 727 { 728 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 729 struct mlx5_cmd *cmd = &dev->cmd; 730 int err; 731 732 if (cmd->mode == CMD_MODE_POLLING) { 733 wait_for_completion(&ent->done); 734 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 735 ent->ret = -ETIMEDOUT; 736 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 737 } 738 739 err = ent->ret; 740 741 if (err == -ETIMEDOUT) { 742 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 743 mlx5_command_str(msg_to_opcode(ent->in)), 744 msg_to_opcode(ent->in)); 745 } 746 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 747 err, deliv_status_to_str(ent->status), ent->status); 748 749 return err; 750 } 751 752 static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out) 753 { 754 return &out->syndrome; 755 } 756 757 static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) 758 { 759 return &out->status; 760 } 761 762 /* Notes: 763 * 1. Callback functions may not sleep 764 * 2. page queue commands do not support asynchrous completion 765 */ 766 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 767 struct mlx5_cmd_msg *out, void *uout, int uout_size, 768 mlx5_cmd_cbk_t callback, 769 void *context, int page_queue, u8 *status) 770 { 771 struct mlx5_cmd *cmd = &dev->cmd; 772 struct mlx5_cmd_work_ent *ent; 773 struct mlx5_cmd_stats *stats; 774 int err = 0; 775 s64 ds; 776 u16 op; 777 778 if (callback && page_queue) 779 return -EINVAL; 780 781 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, 782 page_queue); 783 if (IS_ERR(ent)) 784 return PTR_ERR(ent); 785 786 if (!callback) 787 init_completion(&ent->done); 788 789 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 790 INIT_WORK(&ent->work, cmd_work_handler); 791 if (page_queue) { 792 cmd_work_handler(&ent->work); 793 } else if (!queue_work(cmd->wq, &ent->work)) { 794 mlx5_core_warn(dev, "failed to queue work\n"); 795 err = -ENOMEM; 796 goto out_free; 797 } 798 799 if (callback) 800 goto out; 801 802 err = wait_func(dev, ent); 803 if (err == -ETIMEDOUT) 804 goto out_free; 805 806 ds = ent->ts2 - ent->ts1; 807 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 808 if (op < ARRAY_SIZE(cmd->stats)) { 809 stats = &cmd->stats[op]; 810 spin_lock_irq(&stats->lock); 811 stats->sum += ds; 812 ++stats->n; 813 spin_unlock_irq(&stats->lock); 814 } 815 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 816 "fw exec time for %s is %lld nsec\n", 817 mlx5_command_str(op), ds); 818 *status = ent->status; 819 820 out_free: 821 free_cmd(ent); 822 out: 823 return err; 824 } 825 826 static ssize_t dbg_write(struct file *filp, const char __user *buf, 827 size_t count, loff_t *pos) 828 { 829 struct mlx5_core_dev *dev = filp->private_data; 830 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 831 char lbuf[3]; 832 int err; 833 834 if (!dbg->in_msg || !dbg->out_msg) 835 return -ENOMEM; 836 837 if (copy_from_user(lbuf, buf, sizeof(lbuf))) 838 return -EFAULT; 839 840 lbuf[sizeof(lbuf) - 1] = 0; 841 842 if (strcmp(lbuf, "go")) 843 return -EINVAL; 844 845 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); 846 847 return err ? err : count; 848 } 849 850 851 static const struct file_operations fops = { 852 .owner = THIS_MODULE, 853 .open = simple_open, 854 .write = dbg_write, 855 }; 856 857 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) 858 { 859 struct mlx5_cmd_prot_block *block; 860 struct mlx5_cmd_mailbox *next; 861 int copy; 862 863 if (!to || !from) 864 return -ENOMEM; 865 866 copy = min_t(int, size, sizeof(to->first.data)); 867 memcpy(to->first.data, from, copy); 868 size -= copy; 869 from += copy; 870 871 next = to->next; 872 while (size) { 873 if (!next) { 874 /* this is a BUG */ 875 return -ENOMEM; 876 } 877 878 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 879 block = next->buf; 880 memcpy(block->data, from, copy); 881 from += copy; 882 size -= copy; 883 next = next->next; 884 } 885 886 return 0; 887 } 888 889 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 890 { 891 struct mlx5_cmd_prot_block *block; 892 struct mlx5_cmd_mailbox *next; 893 int copy; 894 895 if (!to || !from) 896 return -ENOMEM; 897 898 copy = min_t(int, size, sizeof(from->first.data)); 899 memcpy(to, from->first.data, copy); 900 size -= copy; 901 to += copy; 902 903 next = from->next; 904 while (size) { 905 if (!next) { 906 /* this is a BUG */ 907 return -ENOMEM; 908 } 909 910 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 911 block = next->buf; 912 913 memcpy(to, block->data, copy); 914 to += copy; 915 size -= copy; 916 next = next->next; 917 } 918 919 return 0; 920 } 921 922 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 923 gfp_t flags) 924 { 925 struct mlx5_cmd_mailbox *mailbox; 926 927 mailbox = kmalloc(sizeof(*mailbox), flags); 928 if (!mailbox) 929 return ERR_PTR(-ENOMEM); 930 931 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, 932 &mailbox->dma); 933 if (!mailbox->buf) { 934 mlx5_core_dbg(dev, "failed allocation\n"); 935 kfree(mailbox); 936 return ERR_PTR(-ENOMEM); 937 } 938 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); 939 mailbox->next = NULL; 940 941 return mailbox; 942 } 943 944 static void free_cmd_box(struct mlx5_core_dev *dev, 945 struct mlx5_cmd_mailbox *mailbox) 946 { 947 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 948 kfree(mailbox); 949 } 950 951 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 952 gfp_t flags, int size) 953 { 954 struct mlx5_cmd_mailbox *tmp, *head = NULL; 955 struct mlx5_cmd_prot_block *block; 956 struct mlx5_cmd_msg *msg; 957 int blen; 958 int err; 959 int n; 960 int i; 961 962 msg = kzalloc(sizeof(*msg), flags); 963 if (!msg) 964 return ERR_PTR(-ENOMEM); 965 966 blen = size - min_t(int, sizeof(msg->first.data), size); 967 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; 968 969 for (i = 0; i < n; i++) { 970 tmp = alloc_cmd_box(dev, flags); 971 if (IS_ERR(tmp)) { 972 mlx5_core_warn(dev, "failed allocating block\n"); 973 err = PTR_ERR(tmp); 974 goto err_alloc; 975 } 976 977 block = tmp->buf; 978 tmp->next = head; 979 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 980 block->block_num = cpu_to_be32(n - i - 1); 981 head = tmp; 982 } 983 msg->next = head; 984 msg->len = size; 985 return msg; 986 987 err_alloc: 988 while (head) { 989 tmp = head->next; 990 free_cmd_box(dev, head); 991 head = tmp; 992 } 993 kfree(msg); 994 995 return ERR_PTR(err); 996 } 997 998 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 999 struct mlx5_cmd_msg *msg) 1000 { 1001 struct mlx5_cmd_mailbox *head = msg->next; 1002 struct mlx5_cmd_mailbox *next; 1003 1004 while (head) { 1005 next = head->next; 1006 free_cmd_box(dev, head); 1007 head = next; 1008 } 1009 kfree(msg); 1010 } 1011 1012 static ssize_t data_write(struct file *filp, const char __user *buf, 1013 size_t count, loff_t *pos) 1014 { 1015 struct mlx5_core_dev *dev = filp->private_data; 1016 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1017 void *ptr; 1018 int err; 1019 1020 if (*pos != 0) 1021 return -EINVAL; 1022 1023 kfree(dbg->in_msg); 1024 dbg->in_msg = NULL; 1025 dbg->inlen = 0; 1026 1027 ptr = kzalloc(count, GFP_KERNEL); 1028 if (!ptr) 1029 return -ENOMEM; 1030 1031 if (copy_from_user(ptr, buf, count)) { 1032 err = -EFAULT; 1033 goto out; 1034 } 1035 dbg->in_msg = ptr; 1036 dbg->inlen = count; 1037 1038 *pos = count; 1039 1040 return count; 1041 1042 out: 1043 kfree(ptr); 1044 return err; 1045 } 1046 1047 static ssize_t data_read(struct file *filp, char __user *buf, size_t count, 1048 loff_t *pos) 1049 { 1050 struct mlx5_core_dev *dev = filp->private_data; 1051 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1052 int copy; 1053 1054 if (*pos) 1055 return 0; 1056 1057 if (!dbg->out_msg) 1058 return -ENOMEM; 1059 1060 copy = min_t(int, count, dbg->outlen); 1061 if (copy_to_user(buf, dbg->out_msg, copy)) 1062 return -EFAULT; 1063 1064 *pos += copy; 1065 1066 return copy; 1067 } 1068 1069 static const struct file_operations dfops = { 1070 .owner = THIS_MODULE, 1071 .open = simple_open, 1072 .write = data_write, 1073 .read = data_read, 1074 }; 1075 1076 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, 1077 loff_t *pos) 1078 { 1079 struct mlx5_core_dev *dev = filp->private_data; 1080 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1081 char outlen[8]; 1082 int err; 1083 1084 if (*pos) 1085 return 0; 1086 1087 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1088 if (err < 0) 1089 return err; 1090 1091 if (copy_to_user(buf, &outlen, err)) 1092 return -EFAULT; 1093 1094 *pos += err; 1095 1096 return err; 1097 } 1098 1099 static ssize_t outlen_write(struct file *filp, const char __user *buf, 1100 size_t count, loff_t *pos) 1101 { 1102 struct mlx5_core_dev *dev = filp->private_data; 1103 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1104 char outlen_str[8]; 1105 int outlen; 1106 void *ptr; 1107 int err; 1108 1109 if (*pos != 0 || count > 6) 1110 return -EINVAL; 1111 1112 kfree(dbg->out_msg); 1113 dbg->out_msg = NULL; 1114 dbg->outlen = 0; 1115 1116 if (copy_from_user(outlen_str, buf, count)) 1117 return -EFAULT; 1118 1119 outlen_str[7] = 0; 1120 1121 err = sscanf(outlen_str, "%d", &outlen); 1122 if (err < 0) 1123 return err; 1124 1125 ptr = kzalloc(outlen, GFP_KERNEL); 1126 if (!ptr) 1127 return -ENOMEM; 1128 1129 dbg->out_msg = ptr; 1130 dbg->outlen = outlen; 1131 1132 *pos = count; 1133 1134 return count; 1135 } 1136 1137 static const struct file_operations olfops = { 1138 .owner = THIS_MODULE, 1139 .open = simple_open, 1140 .write = outlen_write, 1141 .read = outlen_read, 1142 }; 1143 1144 static void set_wqname(struct mlx5_core_dev *dev) 1145 { 1146 struct mlx5_cmd *cmd = &dev->cmd; 1147 1148 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1149 dev_name(&dev->pdev->dev)); 1150 } 1151 1152 static void clean_debug_files(struct mlx5_core_dev *dev) 1153 { 1154 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1155 1156 if (!mlx5_debugfs_root) 1157 return; 1158 1159 mlx5_cmdif_debugfs_cleanup(dev); 1160 debugfs_remove_recursive(dbg->dbg_root); 1161 } 1162 1163 static int create_debugfs_files(struct mlx5_core_dev *dev) 1164 { 1165 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1166 int err = -ENOMEM; 1167 1168 if (!mlx5_debugfs_root) 1169 return 0; 1170 1171 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); 1172 if (!dbg->dbg_root) 1173 return err; 1174 1175 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, 1176 dev, &dfops); 1177 if (!dbg->dbg_in) 1178 goto err_dbg; 1179 1180 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, 1181 dev, &dfops); 1182 if (!dbg->dbg_out) 1183 goto err_dbg; 1184 1185 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, 1186 dev, &olfops); 1187 if (!dbg->dbg_outlen) 1188 goto err_dbg; 1189 1190 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, 1191 &dbg->status); 1192 if (!dbg->dbg_status) 1193 goto err_dbg; 1194 1195 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); 1196 if (!dbg->dbg_run) 1197 goto err_dbg; 1198 1199 mlx5_cmdif_debugfs_init(dev); 1200 1201 return 0; 1202 1203 err_dbg: 1204 clean_debug_files(dev); 1205 return err; 1206 } 1207 1208 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1209 { 1210 struct mlx5_cmd *cmd = &dev->cmd; 1211 int i; 1212 1213 for (i = 0; i < cmd->max_reg_cmds; i++) 1214 down(&cmd->sem); 1215 down(&cmd->pages_sem); 1216 1217 cmd->mode = mode; 1218 1219 up(&cmd->pages_sem); 1220 for (i = 0; i < cmd->max_reg_cmds; i++) 1221 up(&cmd->sem); 1222 } 1223 1224 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1225 { 1226 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1227 } 1228 1229 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1230 { 1231 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1232 } 1233 1234 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1235 { 1236 unsigned long flags; 1237 1238 if (msg->cache) { 1239 spin_lock_irqsave(&msg->cache->lock, flags); 1240 list_add_tail(&msg->list, &msg->cache->head); 1241 spin_unlock_irqrestore(&msg->cache->lock, flags); 1242 } else { 1243 mlx5_free_cmd_msg(dev, msg); 1244 } 1245 } 1246 1247 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) 1248 { 1249 struct mlx5_cmd *cmd = &dev->cmd; 1250 struct mlx5_cmd_work_ent *ent; 1251 mlx5_cmd_cbk_t callback; 1252 void *context; 1253 int err; 1254 int i; 1255 s64 ds; 1256 struct mlx5_cmd_stats *stats; 1257 unsigned long flags; 1258 unsigned long vector; 1259 1260 /* there can be at most 32 command queues */ 1261 vector = vec & 0xffffffff; 1262 for (i = 0; i < (1 << cmd->log_sz); i++) { 1263 if (test_bit(i, &vector)) { 1264 struct semaphore *sem; 1265 1266 ent = cmd->ent_arr[i]; 1267 if (ent->callback) 1268 cancel_delayed_work(&ent->cb_timeout_work); 1269 if (ent->page_queue) 1270 sem = &cmd->pages_sem; 1271 else 1272 sem = &cmd->sem; 1273 ent->ts2 = ktime_get_ns(); 1274 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1275 dump_command(dev, ent, 0); 1276 if (!ent->ret) { 1277 if (!cmd->checksum_disabled) 1278 ent->ret = verify_signature(ent); 1279 else 1280 ent->ret = 0; 1281 if (vec & MLX5_TRIGGERED_CMD_COMP) 1282 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1283 else 1284 ent->status = ent->lay->status_own >> 1; 1285 1286 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1287 ent->ret, deliv_status_to_str(ent->status), ent->status); 1288 } 1289 free_ent(cmd, ent->idx); 1290 1291 if (ent->callback) { 1292 ds = ent->ts2 - ent->ts1; 1293 if (ent->op < ARRAY_SIZE(cmd->stats)) { 1294 stats = &cmd->stats[ent->op]; 1295 spin_lock_irqsave(&stats->lock, flags); 1296 stats->sum += ds; 1297 ++stats->n; 1298 spin_unlock_irqrestore(&stats->lock, flags); 1299 } 1300 1301 callback = ent->callback; 1302 context = ent->context; 1303 err = ent->ret; 1304 if (!err) 1305 err = mlx5_copy_from_msg(ent->uout, 1306 ent->out, 1307 ent->uout_size); 1308 1309 mlx5_free_cmd_msg(dev, ent->out); 1310 free_msg(dev, ent->in); 1311 1312 err = err ? err : ent->status; 1313 free_cmd(ent); 1314 callback(err, context); 1315 } else { 1316 complete(&ent->done); 1317 } 1318 up(sem); 1319 } 1320 } 1321 } 1322 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1323 1324 static int status_to_err(u8 status) 1325 { 1326 return status ? -1 : 0; /* TBD more meaningful codes */ 1327 } 1328 1329 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1330 gfp_t gfp) 1331 { 1332 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1333 struct mlx5_cmd *cmd = &dev->cmd; 1334 struct cache_ent *ent = NULL; 1335 1336 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1337 ent = &cmd->cache.large; 1338 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1339 ent = &cmd->cache.med; 1340 1341 if (ent) { 1342 spin_lock_irq(&ent->lock); 1343 if (!list_empty(&ent->head)) { 1344 msg = list_entry(ent->head.next, typeof(*msg), list); 1345 /* For cached lists, we must explicitly state what is 1346 * the real size 1347 */ 1348 msg->len = in_size; 1349 list_del(&msg->list); 1350 } 1351 spin_unlock_irq(&ent->lock); 1352 } 1353 1354 if (IS_ERR(msg)) 1355 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1356 1357 return msg; 1358 } 1359 1360 static u16 opcode_from_in(struct mlx5_inbox_hdr *in) 1361 { 1362 return be16_to_cpu(in->opcode); 1363 } 1364 1365 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1366 { 1367 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1368 } 1369 1370 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1371 int out_size, mlx5_cmd_cbk_t callback, void *context) 1372 { 1373 struct mlx5_cmd_msg *inb; 1374 struct mlx5_cmd_msg *outb; 1375 int pages_queue; 1376 gfp_t gfp; 1377 int err; 1378 u8 status = 0; 1379 u32 drv_synd; 1380 1381 if (pci_channel_offline(dev->pdev) || 1382 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1383 err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status); 1384 *get_synd_ptr(out) = cpu_to_be32(drv_synd); 1385 *get_status_ptr(out) = status; 1386 return err; 1387 } 1388 1389 pages_queue = is_manage_pages(in); 1390 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1391 1392 inb = alloc_msg(dev, in_size, gfp); 1393 if (IS_ERR(inb)) { 1394 err = PTR_ERR(inb); 1395 return err; 1396 } 1397 1398 err = mlx5_copy_to_msg(inb, in, in_size); 1399 if (err) { 1400 mlx5_core_warn(dev, "err %d\n", err); 1401 goto out_in; 1402 } 1403 1404 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1405 if (IS_ERR(outb)) { 1406 err = PTR_ERR(outb); 1407 goto out_in; 1408 } 1409 1410 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1411 pages_queue, &status); 1412 if (err) 1413 goto out_out; 1414 1415 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1416 if (status) { 1417 err = status_to_err(status); 1418 goto out_out; 1419 } 1420 1421 if (!callback) 1422 err = mlx5_copy_from_msg(out, outb, out_size); 1423 1424 out_out: 1425 if (!callback) 1426 mlx5_free_cmd_msg(dev, outb); 1427 1428 out_in: 1429 if (!callback) 1430 free_msg(dev, inb); 1431 return err; 1432 } 1433 1434 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1435 int out_size) 1436 { 1437 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); 1438 } 1439 EXPORT_SYMBOL(mlx5_cmd_exec); 1440 1441 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1442 void *out, int out_size, mlx5_cmd_cbk_t callback, 1443 void *context) 1444 { 1445 return cmd_exec(dev, in, in_size, out, out_size, callback, context); 1446 } 1447 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1448 1449 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1450 { 1451 struct mlx5_cmd *cmd = &dev->cmd; 1452 struct mlx5_cmd_msg *msg; 1453 struct mlx5_cmd_msg *n; 1454 1455 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1456 list_del(&msg->list); 1457 mlx5_free_cmd_msg(dev, msg); 1458 } 1459 1460 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1461 list_del(&msg->list); 1462 mlx5_free_cmd_msg(dev, msg); 1463 } 1464 } 1465 1466 static int create_msg_cache(struct mlx5_core_dev *dev) 1467 { 1468 struct mlx5_cmd *cmd = &dev->cmd; 1469 struct mlx5_cmd_msg *msg; 1470 int err; 1471 int i; 1472 1473 spin_lock_init(&cmd->cache.large.lock); 1474 INIT_LIST_HEAD(&cmd->cache.large.head); 1475 spin_lock_init(&cmd->cache.med.lock); 1476 INIT_LIST_HEAD(&cmd->cache.med.head); 1477 1478 for (i = 0; i < NUM_LONG_LISTS; i++) { 1479 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1480 if (IS_ERR(msg)) { 1481 err = PTR_ERR(msg); 1482 goto ex_err; 1483 } 1484 msg->cache = &cmd->cache.large; 1485 list_add_tail(&msg->list, &cmd->cache.large.head); 1486 } 1487 1488 for (i = 0; i < NUM_MED_LISTS; i++) { 1489 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1490 if (IS_ERR(msg)) { 1491 err = PTR_ERR(msg); 1492 goto ex_err; 1493 } 1494 msg->cache = &cmd->cache.med; 1495 list_add_tail(&msg->list, &cmd->cache.med.head); 1496 } 1497 1498 return 0; 1499 1500 ex_err: 1501 destroy_msg_cache(dev); 1502 return err; 1503 } 1504 1505 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1506 { 1507 struct device *ddev = &dev->pdev->dev; 1508 1509 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1510 &cmd->alloc_dma, GFP_KERNEL); 1511 if (!cmd->cmd_alloc_buf) 1512 return -ENOMEM; 1513 1514 /* make sure it is aligned to 4K */ 1515 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 1516 cmd->cmd_buf = cmd->cmd_alloc_buf; 1517 cmd->dma = cmd->alloc_dma; 1518 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 1519 return 0; 1520 } 1521 1522 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1523 cmd->alloc_dma); 1524 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1525 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1526 &cmd->alloc_dma, GFP_KERNEL); 1527 if (!cmd->cmd_alloc_buf) 1528 return -ENOMEM; 1529 1530 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 1531 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 1532 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 1533 return 0; 1534 } 1535 1536 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1537 { 1538 struct device *ddev = &dev->pdev->dev; 1539 1540 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, 1541 cmd->alloc_dma); 1542 } 1543 1544 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1545 { 1546 int size = sizeof(struct mlx5_cmd_prot_block); 1547 int align = roundup_pow_of_two(size); 1548 struct mlx5_cmd *cmd = &dev->cmd; 1549 u32 cmd_h, cmd_l; 1550 u16 cmd_if_rev; 1551 int err; 1552 int i; 1553 1554 memset(cmd, 0, sizeof(*cmd)); 1555 cmd_if_rev = cmdif_rev(dev); 1556 if (cmd_if_rev != CMD_IF_REV) { 1557 dev_err(&dev->pdev->dev, 1558 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 1559 CMD_IF_REV, cmd_if_rev); 1560 return -EINVAL; 1561 } 1562 1563 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); 1564 if (!cmd->pool) 1565 return -ENOMEM; 1566 1567 err = alloc_cmd_page(dev, cmd); 1568 if (err) 1569 goto err_free_pool; 1570 1571 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1572 cmd->log_sz = cmd_l >> 4 & 0xf; 1573 cmd->log_stride = cmd_l & 0xf; 1574 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1575 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", 1576 1 << cmd->log_sz); 1577 err = -EINVAL; 1578 goto err_free_page; 1579 } 1580 1581 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1582 dev_err(&dev->pdev->dev, "command queue size overflow\n"); 1583 err = -EINVAL; 1584 goto err_free_page; 1585 } 1586 1587 cmd->checksum_disabled = 1; 1588 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1589 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1590 1591 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1592 if (cmd->cmdif_rev > CMD_IF_REV) { 1593 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1594 CMD_IF_REV, cmd->cmdif_rev); 1595 err = -ENOTSUPP; 1596 goto err_free_page; 1597 } 1598 1599 spin_lock_init(&cmd->alloc_lock); 1600 spin_lock_init(&cmd->token_lock); 1601 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1602 spin_lock_init(&cmd->stats[i].lock); 1603 1604 sema_init(&cmd->sem, cmd->max_reg_cmds); 1605 sema_init(&cmd->pages_sem, 1); 1606 1607 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1608 cmd_l = (u32)(cmd->dma); 1609 if (cmd_l & 0xfff) { 1610 dev_err(&dev->pdev->dev, "invalid command queue address\n"); 1611 err = -ENOMEM; 1612 goto err_free_page; 1613 } 1614 1615 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1616 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1617 1618 /* Make sure firmware sees the complete address before we proceed */ 1619 wmb(); 1620 1621 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1622 1623 cmd->mode = CMD_MODE_POLLING; 1624 1625 err = create_msg_cache(dev); 1626 if (err) { 1627 dev_err(&dev->pdev->dev, "failed to create command cache\n"); 1628 goto err_free_page; 1629 } 1630 1631 set_wqname(dev); 1632 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1633 if (!cmd->wq) { 1634 dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); 1635 err = -ENOMEM; 1636 goto err_cache; 1637 } 1638 1639 err = create_debugfs_files(dev); 1640 if (err) { 1641 err = -ENOMEM; 1642 goto err_wq; 1643 } 1644 1645 return 0; 1646 1647 err_wq: 1648 destroy_workqueue(cmd->wq); 1649 1650 err_cache: 1651 destroy_msg_cache(dev); 1652 1653 err_free_page: 1654 free_cmd_page(dev, cmd); 1655 1656 err_free_pool: 1657 pci_pool_destroy(cmd->pool); 1658 1659 return err; 1660 } 1661 EXPORT_SYMBOL(mlx5_cmd_init); 1662 1663 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1664 { 1665 struct mlx5_cmd *cmd = &dev->cmd; 1666 1667 clean_debug_files(dev); 1668 destroy_workqueue(cmd->wq); 1669 destroy_msg_cache(dev); 1670 free_cmd_page(dev, cmd); 1671 pci_pool_destroy(cmd->pool); 1672 } 1673 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1674 1675 static const char *cmd_status_str(u8 status) 1676 { 1677 switch (status) { 1678 case MLX5_CMD_STAT_OK: 1679 return "OK"; 1680 case MLX5_CMD_STAT_INT_ERR: 1681 return "internal error"; 1682 case MLX5_CMD_STAT_BAD_OP_ERR: 1683 return "bad operation"; 1684 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1685 return "bad parameter"; 1686 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1687 return "bad system state"; 1688 case MLX5_CMD_STAT_BAD_RES_ERR: 1689 return "bad resource"; 1690 case MLX5_CMD_STAT_RES_BUSY: 1691 return "resource busy"; 1692 case MLX5_CMD_STAT_LIM_ERR: 1693 return "limits exceeded"; 1694 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1695 return "bad resource state"; 1696 case MLX5_CMD_STAT_IX_ERR: 1697 return "bad index"; 1698 case MLX5_CMD_STAT_NO_RES_ERR: 1699 return "no resources"; 1700 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1701 return "bad input length"; 1702 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1703 return "bad output length"; 1704 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1705 return "bad QP state"; 1706 case MLX5_CMD_STAT_BAD_PKT_ERR: 1707 return "bad packet (discarded)"; 1708 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1709 return "bad size too many outstanding CQEs"; 1710 default: 1711 return "unknown status"; 1712 } 1713 } 1714 1715 static int cmd_status_to_err(u8 status) 1716 { 1717 switch (status) { 1718 case MLX5_CMD_STAT_OK: return 0; 1719 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1720 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1721 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1722 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1723 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1724 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1725 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1726 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1727 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1728 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1729 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1730 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1731 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1732 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1733 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1734 default: return -EIO; 1735 } 1736 } 1737 1738 /* this will be available till all the commands use set/get macros */ 1739 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1740 { 1741 if (!hdr->status) 1742 return 0; 1743 1744 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", 1745 cmd_status_str(hdr->status), hdr->status, 1746 be32_to_cpu(hdr->syndrome)); 1747 1748 return cmd_status_to_err(hdr->status); 1749 } 1750 1751 int mlx5_cmd_status_to_err_v2(void *ptr) 1752 { 1753 u32 syndrome; 1754 u8 status; 1755 1756 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1757 if (!status) 1758 return 0; 1759 1760 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1761 1762 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", 1763 cmd_status_str(status), status, syndrome); 1764 1765 return cmd_status_to_err(status); 1766 } 1767