1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <rdma/ib_user_verbs.h> 7 #include <rdma/ib_verbs.h> 8 #include <rdma/uverbs_types.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include <rdma/mlx5_user_ioctl_cmds.h> 11 #include <rdma/ib_umem.h> 12 #include <linux/mlx5/driver.h> 13 #include <linux/mlx5/fs.h> 14 #include "mlx5_ib.h" 15 16 #define UVERBS_MODULE_NAME mlx5_ib 17 #include <rdma/uverbs_named_ioctl.h> 18 19 #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in) 20 struct devx_obj { 21 struct mlx5_core_dev *mdev; 22 u32 obj_id; 23 u32 dinlen; /* destroy inbox length */ 24 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; 25 }; 26 27 struct devx_umem { 28 struct mlx5_core_dev *mdev; 29 struct ib_umem *umem; 30 u32 page_offset; 31 int page_shift; 32 int ncont; 33 u32 dinlen; 34 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)]; 35 }; 36 37 struct devx_umem_reg_cmd { 38 void *in; 39 u32 inlen; 40 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 41 }; 42 43 static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file) 44 { 45 return to_mucontext(ib_uverbs_get_ucontext(file)); 46 } 47 48 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) 49 { 50 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0}; 51 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 52 u64 general_obj_types; 53 void *hdr; 54 int err; 55 56 hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr); 57 58 general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types); 59 if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) || 60 !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM)) 61 return -EINVAL; 62 63 if (!capable(CAP_NET_RAW)) 64 return -EPERM; 65 66 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 67 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX); 68 69 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 70 if (err) 71 return err; 72 73 context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 74 return 0; 75 } 76 77 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, 78 struct mlx5_ib_ucontext *context) 79 { 80 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 81 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 82 83 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 84 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX); 85 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid); 86 87 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 88 } 89 90 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type) 91 { 92 struct devx_obj *devx_obj = obj; 93 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 94 95 switch (opcode) { 96 case MLX5_CMD_OP_DESTROY_TIR: 97 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 98 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, 99 obj_id); 100 return true; 101 102 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 103 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 104 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, 105 table_id); 106 return true; 107 default: 108 return false; 109 } 110 } 111 112 static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in) 113 { 114 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 115 u32 obj_id; 116 117 switch (opcode) { 118 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 119 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 120 obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id); 121 break; 122 case MLX5_CMD_OP_QUERY_MKEY: 123 obj_id = MLX5_GET(query_mkey_in, in, mkey_index); 124 break; 125 case MLX5_CMD_OP_QUERY_CQ: 126 obj_id = MLX5_GET(query_cq_in, in, cqn); 127 break; 128 case MLX5_CMD_OP_MODIFY_CQ: 129 obj_id = MLX5_GET(modify_cq_in, in, cqn); 130 break; 131 case MLX5_CMD_OP_QUERY_SQ: 132 obj_id = MLX5_GET(query_sq_in, in, sqn); 133 break; 134 case MLX5_CMD_OP_MODIFY_SQ: 135 obj_id = MLX5_GET(modify_sq_in, in, sqn); 136 break; 137 case MLX5_CMD_OP_QUERY_RQ: 138 obj_id = MLX5_GET(query_rq_in, in, rqn); 139 break; 140 case MLX5_CMD_OP_MODIFY_RQ: 141 obj_id = MLX5_GET(modify_rq_in, in, rqn); 142 break; 143 case MLX5_CMD_OP_QUERY_RMP: 144 obj_id = MLX5_GET(query_rmp_in, in, rmpn); 145 break; 146 case MLX5_CMD_OP_MODIFY_RMP: 147 obj_id = MLX5_GET(modify_rmp_in, in, rmpn); 148 break; 149 case MLX5_CMD_OP_QUERY_RQT: 150 obj_id = MLX5_GET(query_rqt_in, in, rqtn); 151 break; 152 case MLX5_CMD_OP_MODIFY_RQT: 153 obj_id = MLX5_GET(modify_rqt_in, in, rqtn); 154 break; 155 case MLX5_CMD_OP_QUERY_TIR: 156 obj_id = MLX5_GET(query_tir_in, in, tirn); 157 break; 158 case MLX5_CMD_OP_MODIFY_TIR: 159 obj_id = MLX5_GET(modify_tir_in, in, tirn); 160 break; 161 case MLX5_CMD_OP_QUERY_TIS: 162 obj_id = MLX5_GET(query_tis_in, in, tisn); 163 break; 164 case MLX5_CMD_OP_MODIFY_TIS: 165 obj_id = MLX5_GET(modify_tis_in, in, tisn); 166 break; 167 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 168 obj_id = MLX5_GET(query_flow_table_in, in, table_id); 169 break; 170 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 171 obj_id = MLX5_GET(modify_flow_table_in, in, table_id); 172 break; 173 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 174 obj_id = MLX5_GET(query_flow_group_in, in, group_id); 175 break; 176 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 177 obj_id = MLX5_GET(query_fte_in, in, flow_index); 178 break; 179 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 180 obj_id = MLX5_GET(set_fte_in, in, flow_index); 181 break; 182 case MLX5_CMD_OP_QUERY_Q_COUNTER: 183 obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id); 184 break; 185 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 186 obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id); 187 break; 188 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: 189 obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id); 190 break; 191 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 192 obj_id = MLX5_GET(query_scheduling_element_in, in, 193 scheduling_element_id); 194 break; 195 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 196 obj_id = MLX5_GET(modify_scheduling_element_in, in, 197 scheduling_element_id); 198 break; 199 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 200 obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port); 201 break; 202 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 203 obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index); 204 break; 205 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 206 obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index); 207 break; 208 case MLX5_CMD_OP_QUERY_QP: 209 obj_id = MLX5_GET(query_qp_in, in, qpn); 210 break; 211 case MLX5_CMD_OP_RST2INIT_QP: 212 obj_id = MLX5_GET(rst2init_qp_in, in, qpn); 213 break; 214 case MLX5_CMD_OP_INIT2RTR_QP: 215 obj_id = MLX5_GET(init2rtr_qp_in, in, qpn); 216 break; 217 case MLX5_CMD_OP_RTR2RTS_QP: 218 obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn); 219 break; 220 case MLX5_CMD_OP_RTS2RTS_QP: 221 obj_id = MLX5_GET(rts2rts_qp_in, in, qpn); 222 break; 223 case MLX5_CMD_OP_SQERR2RTS_QP: 224 obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn); 225 break; 226 case MLX5_CMD_OP_2ERR_QP: 227 obj_id = MLX5_GET(qp_2err_in, in, qpn); 228 break; 229 case MLX5_CMD_OP_2RST_QP: 230 obj_id = MLX5_GET(qp_2rst_in, in, qpn); 231 break; 232 case MLX5_CMD_OP_QUERY_DCT: 233 obj_id = MLX5_GET(query_dct_in, in, dctn); 234 break; 235 case MLX5_CMD_OP_QUERY_XRQ: 236 obj_id = MLX5_GET(query_xrq_in, in, xrqn); 237 break; 238 case MLX5_CMD_OP_QUERY_XRC_SRQ: 239 obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn); 240 break; 241 case MLX5_CMD_OP_ARM_XRC_SRQ: 242 obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn); 243 break; 244 case MLX5_CMD_OP_QUERY_SRQ: 245 obj_id = MLX5_GET(query_srq_in, in, srqn); 246 break; 247 case MLX5_CMD_OP_ARM_RQ: 248 obj_id = MLX5_GET(arm_rq_in, in, srq_number); 249 break; 250 case MLX5_CMD_OP_DRAIN_DCT: 251 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 252 obj_id = MLX5_GET(drain_dct_in, in, dctn); 253 break; 254 case MLX5_CMD_OP_ARM_XRQ: 255 obj_id = MLX5_GET(arm_xrq_in, in, xrqn); 256 break; 257 default: 258 return false; 259 } 260 261 if (obj_id == obj->obj_id) 262 return true; 263 264 return false; 265 } 266 267 static bool devx_is_obj_create_cmd(const void *in) 268 { 269 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 270 271 switch (opcode) { 272 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 273 case MLX5_CMD_OP_CREATE_MKEY: 274 case MLX5_CMD_OP_CREATE_CQ: 275 case MLX5_CMD_OP_ALLOC_PD: 276 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 277 case MLX5_CMD_OP_CREATE_RMP: 278 case MLX5_CMD_OP_CREATE_SQ: 279 case MLX5_CMD_OP_CREATE_RQ: 280 case MLX5_CMD_OP_CREATE_RQT: 281 case MLX5_CMD_OP_CREATE_TIR: 282 case MLX5_CMD_OP_CREATE_TIS: 283 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 284 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 285 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 286 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 287 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 288 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 289 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 290 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 291 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 292 case MLX5_CMD_OP_CREATE_QP: 293 case MLX5_CMD_OP_CREATE_SRQ: 294 case MLX5_CMD_OP_CREATE_XRC_SRQ: 295 case MLX5_CMD_OP_CREATE_DCT: 296 case MLX5_CMD_OP_CREATE_XRQ: 297 case MLX5_CMD_OP_ATTACH_TO_MCG: 298 case MLX5_CMD_OP_ALLOC_XRCD: 299 return true; 300 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 301 { 302 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); 303 if (op_mod == 0) 304 return true; 305 return false; 306 } 307 default: 308 return false; 309 } 310 } 311 312 static bool devx_is_obj_modify_cmd(const void *in) 313 { 314 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 315 316 switch (opcode) { 317 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 318 case MLX5_CMD_OP_MODIFY_CQ: 319 case MLX5_CMD_OP_MODIFY_RMP: 320 case MLX5_CMD_OP_MODIFY_SQ: 321 case MLX5_CMD_OP_MODIFY_RQ: 322 case MLX5_CMD_OP_MODIFY_RQT: 323 case MLX5_CMD_OP_MODIFY_TIR: 324 case MLX5_CMD_OP_MODIFY_TIS: 325 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 326 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 327 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 328 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 329 case MLX5_CMD_OP_RST2INIT_QP: 330 case MLX5_CMD_OP_INIT2RTR_QP: 331 case MLX5_CMD_OP_RTR2RTS_QP: 332 case MLX5_CMD_OP_RTS2RTS_QP: 333 case MLX5_CMD_OP_SQERR2RTS_QP: 334 case MLX5_CMD_OP_2ERR_QP: 335 case MLX5_CMD_OP_2RST_QP: 336 case MLX5_CMD_OP_ARM_XRC_SRQ: 337 case MLX5_CMD_OP_ARM_RQ: 338 case MLX5_CMD_OP_DRAIN_DCT: 339 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 340 case MLX5_CMD_OP_ARM_XRQ: 341 return true; 342 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 343 { 344 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); 345 346 if (op_mod == 1) 347 return true; 348 return false; 349 } 350 default: 351 return false; 352 } 353 } 354 355 static bool devx_is_obj_query_cmd(const void *in) 356 { 357 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 358 359 switch (opcode) { 360 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 361 case MLX5_CMD_OP_QUERY_MKEY: 362 case MLX5_CMD_OP_QUERY_CQ: 363 case MLX5_CMD_OP_QUERY_RMP: 364 case MLX5_CMD_OP_QUERY_SQ: 365 case MLX5_CMD_OP_QUERY_RQ: 366 case MLX5_CMD_OP_QUERY_RQT: 367 case MLX5_CMD_OP_QUERY_TIR: 368 case MLX5_CMD_OP_QUERY_TIS: 369 case MLX5_CMD_OP_QUERY_Q_COUNTER: 370 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 371 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 372 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 373 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 374 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: 375 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 376 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 377 case MLX5_CMD_OP_QUERY_QP: 378 case MLX5_CMD_OP_QUERY_SRQ: 379 case MLX5_CMD_OP_QUERY_XRC_SRQ: 380 case MLX5_CMD_OP_QUERY_DCT: 381 case MLX5_CMD_OP_QUERY_XRQ: 382 return true; 383 default: 384 return false; 385 } 386 } 387 388 static bool devx_is_general_cmd(void *in) 389 { 390 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 391 392 switch (opcode) { 393 case MLX5_CMD_OP_QUERY_HCA_CAP: 394 case MLX5_CMD_OP_QUERY_VPORT_STATE: 395 case MLX5_CMD_OP_QUERY_ADAPTER: 396 case MLX5_CMD_OP_QUERY_ISSI: 397 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 398 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 399 case MLX5_CMD_OP_QUERY_VNIC_ENV: 400 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 401 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 402 case MLX5_CMD_OP_NOP: 403 case MLX5_CMD_OP_QUERY_CONG_STATUS: 404 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 405 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 406 return true; 407 default: 408 return false; 409 } 410 } 411 412 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( 413 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 414 { 415 struct mlx5_ib_ucontext *c; 416 struct mlx5_ib_dev *dev; 417 int user_vector; 418 int dev_eqn; 419 unsigned int irqn; 420 int err; 421 422 if (uverbs_copy_from(&user_vector, attrs, 423 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC)) 424 return -EFAULT; 425 426 c = devx_ufile2uctx(file); 427 if (IS_ERR(c)) 428 return PTR_ERR(c); 429 dev = to_mdev(c->ibucontext.device); 430 431 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); 432 if (err < 0) 433 return err; 434 435 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, 436 &dev_eqn, sizeof(dev_eqn))) 437 return -EFAULT; 438 439 return 0; 440 } 441 442 /* 443 *Security note: 444 * The hardware protection mechanism works like this: Each device object that 445 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in 446 * the device specification manual) upon its creation. Then upon doorbell, 447 * hardware fetches the object context for which the doorbell was rang, and 448 * validates that the UAR through which the DB was rang matches the UAR ID 449 * of the object. 450 * If no match the doorbell is silently ignored by the hardware. Of course, 451 * the user cannot ring a doorbell on a UAR that was not mapped to it. 452 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command 453 * mailboxes (except tagging them with UID), we expose to the user its UAR 454 * ID, so it can embed it in these objects in the expected specification 455 * format. So the only thing the user can do is hurt itself by creating a 456 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users 457 * may ring a doorbell on its objects. 458 * The consequence of that will be that another user can schedule a QP/SQ 459 * of the buggy user for execution (just insert it to the hardware schedule 460 * queue or arm its CQ for event generation), no further harm is expected. 461 */ 462 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)( 463 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 464 { 465 struct mlx5_ib_ucontext *c; 466 struct mlx5_ib_dev *dev; 467 u32 user_idx; 468 s32 dev_idx; 469 470 c = devx_ufile2uctx(file); 471 if (IS_ERR(c)) 472 return PTR_ERR(c); 473 dev = to_mdev(c->ibucontext.device); 474 475 if (uverbs_copy_from(&user_idx, attrs, 476 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX)) 477 return -EFAULT; 478 479 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); 480 if (dev_idx < 0) 481 return dev_idx; 482 483 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 484 &dev_idx, sizeof(dev_idx))) 485 return -EFAULT; 486 487 return 0; 488 } 489 490 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)( 491 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 492 { 493 struct mlx5_ib_ucontext *c; 494 struct mlx5_ib_dev *dev; 495 void *cmd_in = uverbs_attr_get_alloced_ptr( 496 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN); 497 int cmd_out_len = uverbs_attr_get_len(attrs, 498 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT); 499 void *cmd_out; 500 int err; 501 502 c = devx_ufile2uctx(file); 503 if (IS_ERR(c)) 504 return PTR_ERR(c); 505 dev = to_mdev(c->ibucontext.device); 506 507 if (!c->devx_uid) 508 return -EPERM; 509 510 /* Only white list of some general HCA commands are allowed for this method. */ 511 if (!devx_is_general_cmd(cmd_in)) 512 return -EINVAL; 513 514 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 515 if (IS_ERR(cmd_out)) 516 return PTR_ERR(cmd_out); 517 518 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); 519 err = mlx5_cmd_exec(dev->mdev, cmd_in, 520 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN), 521 cmd_out, cmd_out_len); 522 if (err) 523 return err; 524 525 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, 526 cmd_out_len); 527 } 528 529 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, 530 u32 *dinlen, 531 u32 *obj_id) 532 { 533 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type); 534 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid); 535 536 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 537 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr); 538 539 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id); 540 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid); 541 542 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) { 543 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 544 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 545 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type); 546 break; 547 548 case MLX5_CMD_OP_CREATE_MKEY: 549 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); 550 break; 551 case MLX5_CMD_OP_CREATE_CQ: 552 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ); 553 break; 554 case MLX5_CMD_OP_ALLOC_PD: 555 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD); 556 break; 557 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 558 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 559 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); 560 break; 561 case MLX5_CMD_OP_CREATE_RMP: 562 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP); 563 break; 564 case MLX5_CMD_OP_CREATE_SQ: 565 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ); 566 break; 567 case MLX5_CMD_OP_CREATE_RQ: 568 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ); 569 break; 570 case MLX5_CMD_OP_CREATE_RQT: 571 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT); 572 break; 573 case MLX5_CMD_OP_CREATE_TIR: 574 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR); 575 break; 576 case MLX5_CMD_OP_CREATE_TIS: 577 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS); 578 break; 579 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 580 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 581 MLX5_CMD_OP_DEALLOC_Q_COUNTER); 582 break; 583 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 584 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in); 585 *obj_id = MLX5_GET(create_flow_table_out, out, table_id); 586 MLX5_SET(destroy_flow_table_in, din, other_vport, 587 MLX5_GET(create_flow_table_in, in, other_vport)); 588 MLX5_SET(destroy_flow_table_in, din, vport_number, 589 MLX5_GET(create_flow_table_in, in, vport_number)); 590 MLX5_SET(destroy_flow_table_in, din, table_type, 591 MLX5_GET(create_flow_table_in, in, table_type)); 592 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id); 593 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 594 MLX5_CMD_OP_DESTROY_FLOW_TABLE); 595 break; 596 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 597 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in); 598 *obj_id = MLX5_GET(create_flow_group_out, out, group_id); 599 MLX5_SET(destroy_flow_group_in, din, other_vport, 600 MLX5_GET(create_flow_group_in, in, other_vport)); 601 MLX5_SET(destroy_flow_group_in, din, vport_number, 602 MLX5_GET(create_flow_group_in, in, vport_number)); 603 MLX5_SET(destroy_flow_group_in, din, table_type, 604 MLX5_GET(create_flow_group_in, in, table_type)); 605 MLX5_SET(destroy_flow_group_in, din, table_id, 606 MLX5_GET(create_flow_group_in, in, table_id)); 607 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id); 608 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 609 MLX5_CMD_OP_DESTROY_FLOW_GROUP); 610 break; 611 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 612 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in); 613 *obj_id = MLX5_GET(set_fte_in, in, flow_index); 614 MLX5_SET(delete_fte_in, din, other_vport, 615 MLX5_GET(set_fte_in, in, other_vport)); 616 MLX5_SET(delete_fte_in, din, vport_number, 617 MLX5_GET(set_fte_in, in, vport_number)); 618 MLX5_SET(delete_fte_in, din, table_type, 619 MLX5_GET(set_fte_in, in, table_type)); 620 MLX5_SET(delete_fte_in, din, table_id, 621 MLX5_GET(set_fte_in, in, table_id)); 622 MLX5_SET(delete_fte_in, din, flow_index, *obj_id); 623 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 624 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); 625 break; 626 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 627 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 628 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); 629 break; 630 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 631 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 632 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); 633 break; 634 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 635 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 636 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); 637 break; 638 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 639 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in); 640 *obj_id = MLX5_GET(create_scheduling_element_out, out, 641 scheduling_element_id); 642 MLX5_SET(destroy_scheduling_element_in, din, 643 scheduling_hierarchy, 644 MLX5_GET(create_scheduling_element_in, in, 645 scheduling_hierarchy)); 646 MLX5_SET(destroy_scheduling_element_in, din, 647 scheduling_element_id, *obj_id); 648 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 649 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT); 650 break; 651 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 652 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in); 653 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port); 654 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id); 655 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 656 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); 657 break; 658 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 659 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in); 660 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index); 661 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id); 662 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 663 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); 664 break; 665 case MLX5_CMD_OP_CREATE_QP: 666 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP); 667 break; 668 case MLX5_CMD_OP_CREATE_SRQ: 669 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ); 670 break; 671 case MLX5_CMD_OP_CREATE_XRC_SRQ: 672 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 673 MLX5_CMD_OP_DESTROY_XRC_SRQ); 674 break; 675 case MLX5_CMD_OP_CREATE_DCT: 676 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT); 677 break; 678 case MLX5_CMD_OP_CREATE_XRQ: 679 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ); 680 break; 681 case MLX5_CMD_OP_ATTACH_TO_MCG: 682 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in); 683 MLX5_SET(detach_from_mcg_in, din, qpn, 684 MLX5_GET(attach_to_mcg_in, in, qpn)); 685 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid), 686 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid), 687 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid)); 688 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG); 689 break; 690 case MLX5_CMD_OP_ALLOC_XRCD: 691 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); 692 break; 693 default: 694 /* The entry must match to one of the devx_is_obj_create_cmd */ 695 WARN_ON(true); 696 break; 697 } 698 } 699 700 static int devx_obj_cleanup(struct ib_uobject *uobject, 701 enum rdma_remove_reason why) 702 { 703 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 704 struct devx_obj *obj = uobject->object; 705 int ret; 706 707 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 708 if (ib_is_destroy_retryable(ret, why, uobject)) 709 return ret; 710 711 kfree(obj); 712 return ret; 713 } 714 715 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( 716 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 717 { 718 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN); 719 int cmd_out_len = uverbs_attr_get_len(attrs, 720 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT); 721 void *cmd_out; 722 struct ib_uobject *uobj = uverbs_attr_get_uobject( 723 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); 724 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 725 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 726 struct devx_obj *obj; 727 int err; 728 729 if (!c->devx_uid) 730 return -EPERM; 731 732 if (!devx_is_obj_create_cmd(cmd_in)) 733 return -EINVAL; 734 735 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 736 if (IS_ERR(cmd_out)) 737 return PTR_ERR(cmd_out); 738 739 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL); 740 if (!obj) 741 return -ENOMEM; 742 743 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); 744 err = mlx5_cmd_exec(dev->mdev, cmd_in, 745 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN), 746 cmd_out, cmd_out_len); 747 if (err) 748 goto obj_free; 749 750 uobj->object = obj; 751 obj->mdev = dev->mdev; 752 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id); 753 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); 754 755 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); 756 if (err) 757 goto obj_free; 758 759 return 0; 760 761 obj_free: 762 kfree(obj); 763 return err; 764 } 765 766 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( 767 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 768 { 769 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN); 770 int cmd_out_len = uverbs_attr_get_len(attrs, 771 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT); 772 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 773 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE); 774 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 775 struct devx_obj *obj = uobj->object; 776 void *cmd_out; 777 int err; 778 779 if (!c->devx_uid) 780 return -EPERM; 781 782 if (!devx_is_obj_modify_cmd(cmd_in)) 783 return -EINVAL; 784 785 if (!devx_is_valid_obj_id(obj, cmd_in)) 786 return -EINVAL; 787 788 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 789 if (IS_ERR(cmd_out)) 790 return PTR_ERR(cmd_out); 791 792 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); 793 err = mlx5_cmd_exec(obj->mdev, cmd_in, 794 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN), 795 cmd_out, cmd_out_len); 796 if (err) 797 return err; 798 799 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, 800 cmd_out, cmd_out_len); 801 } 802 803 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( 804 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 805 { 806 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN); 807 int cmd_out_len = uverbs_attr_get_len(attrs, 808 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT); 809 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 810 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE); 811 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 812 struct devx_obj *obj = uobj->object; 813 void *cmd_out; 814 int err; 815 816 if (!c->devx_uid) 817 return -EPERM; 818 819 if (!devx_is_obj_query_cmd(cmd_in)) 820 return -EINVAL; 821 822 if (!devx_is_valid_obj_id(obj, cmd_in)) 823 return -EINVAL; 824 825 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 826 if (IS_ERR(cmd_out)) 827 return PTR_ERR(cmd_out); 828 829 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); 830 err = mlx5_cmd_exec(obj->mdev, cmd_in, 831 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN), 832 cmd_out, cmd_out_len); 833 if (err) 834 return err; 835 836 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, 837 cmd_out, cmd_out_len); 838 } 839 840 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, 841 struct uverbs_attr_bundle *attrs, 842 struct devx_umem *obj) 843 { 844 u64 addr; 845 size_t size; 846 u32 access; 847 int npages; 848 int err; 849 u32 page_mask; 850 851 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) || 852 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN)) 853 return -EFAULT; 854 855 err = uverbs_get_flags32(&access, attrs, 856 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 857 IB_ACCESS_SUPPORTED); 858 if (err) 859 return err; 860 861 err = ib_check_mr_access(access); 862 if (err) 863 return err; 864 865 obj->umem = ib_umem_get(ucontext, addr, size, access, 0); 866 if (IS_ERR(obj->umem)) 867 return PTR_ERR(obj->umem); 868 869 mlx5_ib_cont_pages(obj->umem, obj->umem->address, 870 MLX5_MKEY_PAGE_SHIFT_MASK, &npages, 871 &obj->page_shift, &obj->ncont, NULL); 872 873 if (!npages) { 874 ib_umem_release(obj->umem); 875 return -EINVAL; 876 } 877 878 page_mask = (1 << obj->page_shift) - 1; 879 obj->page_offset = obj->umem->address & page_mask; 880 881 return 0; 882 } 883 884 static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs, 885 struct devx_umem *obj, 886 struct devx_umem_reg_cmd *cmd) 887 { 888 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + 889 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont); 890 cmd->in = uverbs_zalloc(attrs, cmd->inlen); 891 return PTR_ERR_OR_ZERO(cmd->in); 892 } 893 894 static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev, 895 struct devx_umem *obj, 896 struct devx_umem_reg_cmd *cmd) 897 { 898 void *umem; 899 __be64 *mtt; 900 901 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); 902 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); 903 904 MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 905 MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM); 906 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont); 907 MLX5_SET(umem, umem, log_page_size, obj->page_shift - 908 MLX5_ADAPTER_PAGE_SHIFT); 909 MLX5_SET(umem, umem, page_offset, obj->page_offset); 910 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt, 911 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | 912 MLX5_IB_MTT_READ); 913 } 914 915 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)( 916 struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) 917 { 918 struct devx_umem_reg_cmd cmd; 919 struct devx_umem *obj; 920 struct ib_uobject *uobj = uverbs_attr_get_uobject( 921 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE); 922 u32 obj_id; 923 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 924 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 925 int err; 926 927 if (!c->devx_uid) 928 return -EPERM; 929 930 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL); 931 if (!obj) 932 return -ENOMEM; 933 934 err = devx_umem_get(dev, &c->ibucontext, attrs, obj); 935 if (err) 936 goto err_obj_free; 937 938 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd); 939 if (err) 940 goto err_umem_release; 941 942 devx_umem_reg_cmd_build(dev, obj, &cmd); 943 944 MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid); 945 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, 946 sizeof(cmd.out)); 947 if (err) 948 goto err_umem_release; 949 950 obj->mdev = dev->mdev; 951 uobj->object = obj; 952 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); 953 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id)); 954 if (err) 955 goto err_umem_destroy; 956 957 return 0; 958 959 err_umem_destroy: 960 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out)); 961 err_umem_release: 962 ib_umem_release(obj->umem); 963 err_obj_free: 964 kfree(obj); 965 return err; 966 } 967 968 static int devx_umem_cleanup(struct ib_uobject *uobject, 969 enum rdma_remove_reason why) 970 { 971 struct devx_umem *obj = uobject->object; 972 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 973 int err; 974 975 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 976 if (ib_is_destroy_retryable(err, why, uobject)) 977 return err; 978 979 ib_umem_release(obj->umem); 980 kfree(obj); 981 return 0; 982 } 983 984 DECLARE_UVERBS_NAMED_METHOD( 985 MLX5_IB_METHOD_DEVX_UMEM_REG, 986 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE, 987 MLX5_IB_OBJECT_DEVX_UMEM, 988 UVERBS_ACCESS_NEW, 989 UA_MANDATORY), 990 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, 991 UVERBS_ATTR_TYPE(u64), 992 UA_MANDATORY), 993 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, 994 UVERBS_ATTR_TYPE(u64), 995 UA_MANDATORY), 996 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 997 enum ib_access_flags), 998 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, 999 UVERBS_ATTR_TYPE(u32), 1000 UA_MANDATORY)); 1001 1002 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 1003 MLX5_IB_METHOD_DEVX_UMEM_DEREG, 1004 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE, 1005 MLX5_IB_OBJECT_DEVX_UMEM, 1006 UVERBS_ACCESS_DESTROY, 1007 UA_MANDATORY)); 1008 1009 DECLARE_UVERBS_NAMED_METHOD( 1010 MLX5_IB_METHOD_DEVX_QUERY_EQN, 1011 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC, 1012 UVERBS_ATTR_TYPE(u32), 1013 UA_MANDATORY), 1014 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, 1015 UVERBS_ATTR_TYPE(u32), 1016 UA_MANDATORY)); 1017 1018 DECLARE_UVERBS_NAMED_METHOD( 1019 MLX5_IB_METHOD_DEVX_QUERY_UAR, 1020 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX, 1021 UVERBS_ATTR_TYPE(u32), 1022 UA_MANDATORY), 1023 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 1024 UVERBS_ATTR_TYPE(u32), 1025 UA_MANDATORY)); 1026 1027 DECLARE_UVERBS_NAMED_METHOD( 1028 MLX5_IB_METHOD_DEVX_OTHER, 1029 UVERBS_ATTR_PTR_IN( 1030 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN, 1031 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 1032 UA_MANDATORY, 1033 UA_ALLOC_AND_COPY), 1034 UVERBS_ATTR_PTR_OUT( 1035 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, 1036 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 1037 UA_MANDATORY)); 1038 1039 DECLARE_UVERBS_NAMED_METHOD( 1040 MLX5_IB_METHOD_DEVX_OBJ_CREATE, 1041 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE, 1042 MLX5_IB_OBJECT_DEVX_OBJ, 1043 UVERBS_ACCESS_NEW, 1044 UA_MANDATORY), 1045 UVERBS_ATTR_PTR_IN( 1046 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN, 1047 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 1048 UA_MANDATORY, 1049 UA_ALLOC_AND_COPY), 1050 UVERBS_ATTR_PTR_OUT( 1051 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, 1052 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 1053 UA_MANDATORY)); 1054 1055 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 1056 MLX5_IB_METHOD_DEVX_OBJ_DESTROY, 1057 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE, 1058 MLX5_IB_OBJECT_DEVX_OBJ, 1059 UVERBS_ACCESS_DESTROY, 1060 UA_MANDATORY)); 1061 1062 DECLARE_UVERBS_NAMED_METHOD( 1063 MLX5_IB_METHOD_DEVX_OBJ_MODIFY, 1064 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, 1065 MLX5_IB_OBJECT_DEVX_OBJ, 1066 UVERBS_ACCESS_WRITE, 1067 UA_MANDATORY), 1068 UVERBS_ATTR_PTR_IN( 1069 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, 1070 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 1071 UA_MANDATORY, 1072 UA_ALLOC_AND_COPY), 1073 UVERBS_ATTR_PTR_OUT( 1074 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, 1075 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 1076 UA_MANDATORY)); 1077 1078 DECLARE_UVERBS_NAMED_METHOD( 1079 MLX5_IB_METHOD_DEVX_OBJ_QUERY, 1080 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, 1081 MLX5_IB_OBJECT_DEVX_OBJ, 1082 UVERBS_ACCESS_READ, 1083 UA_MANDATORY), 1084 UVERBS_ATTR_PTR_IN( 1085 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, 1086 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 1087 UA_MANDATORY, 1088 UA_ALLOC_AND_COPY), 1089 UVERBS_ATTR_PTR_OUT( 1090 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, 1091 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 1092 UA_MANDATORY)); 1093 1094 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX, 1095 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER), 1096 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR), 1097 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN)); 1098 1099 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ, 1100 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup), 1101 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE), 1102 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY), 1103 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY), 1104 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY)); 1105 1106 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM, 1107 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup), 1108 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG), 1109 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG)); 1110 1111 DECLARE_UVERBS_OBJECT_TREE(devx_objects, 1112 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX), 1113 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ), 1114 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM)); 1115 1116 const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void) 1117 { 1118 return &devx_objects; 1119 } 1120