1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <rdma/ib_user_verbs.h> 7 #include <rdma/ib_verbs.h> 8 #include <rdma/uverbs_types.h> 9 #include <rdma/uverbs_ioctl.h> 10 #include <rdma/mlx5_user_ioctl_cmds.h> 11 #include <rdma/mlx5_user_ioctl_verbs.h> 12 #include <rdma/ib_umem.h> 13 #include <rdma/uverbs_std_types.h> 14 #include <linux/mlx5/driver.h> 15 #include <linux/mlx5/fs.h> 16 #include "mlx5_ib.h" 17 #include <linux/xarray.h> 18 19 #define UVERBS_MODULE_NAME mlx5_ib 20 #include <rdma/uverbs_named_ioctl.h> 21 22 static void dispatch_event_fd(struct list_head *fd_list, const void *data); 23 24 enum devx_obj_flags { 25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, 26 DEVX_OBJ_FLAGS_DCT = 1 << 1, 27 DEVX_OBJ_FLAGS_CQ = 1 << 2, 28 }; 29 30 struct devx_async_data { 31 struct mlx5_ib_dev *mdev; 32 struct list_head list; 33 struct ib_uobject *fd_uobj; 34 struct mlx5_async_work cb_work; 35 u16 cmd_out_len; 36 /* must be last field in this structure */ 37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr; 38 }; 39 40 struct devx_async_event_data { 41 struct list_head list; /* headed in ev_file->event_list */ 42 struct mlx5_ib_uapi_devx_async_event_hdr hdr; 43 }; 44 45 /* first level XA value data structure */ 46 struct devx_event { 47 struct xarray object_ids; /* second XA level, Key = object id */ 48 struct list_head unaffiliated_list; 49 }; 50 51 /* second level XA value data structure */ 52 struct devx_obj_event { 53 struct rcu_head rcu; 54 struct list_head obj_sub_list; 55 }; 56 57 struct devx_event_subscription { 58 struct list_head file_list; /* headed in ev_file-> 59 * subscribed_events_list 60 */ 61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or 62 * devx_obj_event->obj_sub_list 63 */ 64 struct list_head obj_list; /* headed in devx_object */ 65 struct list_head event_list; /* headed in ev_file->event_list or in 66 * temp list via subscription 67 */ 68 69 u8 is_cleaned:1; 70 u32 xa_key_level1; 71 u32 xa_key_level2; 72 struct rcu_head rcu; 73 u64 cookie; 74 struct devx_async_event_file *ev_file; 75 struct file *filp; /* Upon hot unplug we need a direct access to */ 76 struct eventfd_ctx *eventfd; 77 }; 78 79 struct devx_async_event_file { 80 struct ib_uobject uobj; 81 /* Head of events that are subscribed to this FD */ 82 struct list_head subscribed_events_list; 83 spinlock_t lock; 84 wait_queue_head_t poll_wait; 85 struct list_head event_list; 86 struct mlx5_ib_dev *dev; 87 u8 omit_data:1; 88 u8 is_overflow_err:1; 89 u8 is_destroyed:1; 90 }; 91 92 #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in) 93 struct devx_obj { 94 struct mlx5_ib_dev *ib_dev; 95 u64 obj_id; 96 u32 dinlen; /* destroy inbox length */ 97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; 98 u32 flags; 99 union { 100 struct mlx5_ib_devx_mr devx_mr; 101 struct mlx5_core_dct core_dct; 102 struct mlx5_core_cq core_cq; 103 u32 flow_counter_bulk_size; 104 }; 105 struct list_head event_sub; /* holds devx_event_subscription entries */ 106 }; 107 108 struct devx_umem { 109 struct mlx5_core_dev *mdev; 110 struct ib_umem *umem; 111 u32 page_offset; 112 int page_shift; 113 int ncont; 114 u32 dinlen; 115 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)]; 116 }; 117 118 struct devx_umem_reg_cmd { 119 void *in; 120 u32 inlen; 121 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 122 }; 123 124 static struct mlx5_ib_ucontext * 125 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs) 126 { 127 return to_mucontext(ib_uverbs_get_ucontext(attrs)); 128 } 129 130 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user) 131 { 132 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0}; 133 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 134 void *uctx; 135 int err; 136 u16 uid; 137 u32 cap = 0; 138 139 /* 0 means not supported */ 140 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) 141 return -EINVAL; 142 143 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx); 144 if (is_user && capable(CAP_NET_RAW) && 145 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX)) 146 cap |= MLX5_UCTX_CAP_RAW_TX; 147 if (is_user && capable(CAP_SYS_RAWIO) && 148 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & 149 MLX5_UCTX_CAP_INTERNAL_DEV_RES)) 150 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES; 151 152 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX); 153 MLX5_SET(uctx, uctx, cap, cap); 154 155 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 156 if (err) 157 return err; 158 159 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 160 return uid; 161 } 162 163 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) 164 { 165 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0}; 166 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 167 168 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); 169 MLX5_SET(destroy_uctx_in, in, uid, uid); 170 171 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 172 } 173 174 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type) 175 { 176 struct devx_obj *devx_obj = obj; 177 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 178 179 switch (opcode) { 180 case MLX5_CMD_OP_DESTROY_TIR: 181 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; 182 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, 183 obj_id); 184 return true; 185 186 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 187 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 188 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, 189 table_id); 190 return true; 191 default: 192 return false; 193 } 194 } 195 196 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id) 197 { 198 struct devx_obj *devx_obj = obj; 199 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); 200 201 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) { 202 203 if (offset && offset >= devx_obj->flow_counter_bulk_size) 204 return false; 205 206 *counter_id = MLX5_GET(dealloc_flow_counter_in, 207 devx_obj->dinbox, 208 flow_counter_id); 209 *counter_id += offset; 210 return true; 211 } 212 213 return false; 214 } 215 216 static bool is_legacy_unaffiliated_event_num(u16 event_num) 217 { 218 switch (event_num) { 219 case MLX5_EVENT_TYPE_PORT_CHANGE: 220 return true; 221 default: 222 return false; 223 } 224 } 225 226 static bool is_legacy_obj_event_num(u16 event_num) 227 { 228 switch (event_num) { 229 case MLX5_EVENT_TYPE_PATH_MIG: 230 case MLX5_EVENT_TYPE_COMM_EST: 231 case MLX5_EVENT_TYPE_SQ_DRAINED: 232 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 233 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 234 case MLX5_EVENT_TYPE_CQ_ERROR: 235 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 236 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 237 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 238 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 239 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 240 case MLX5_EVENT_TYPE_DCT_DRAINED: 241 case MLX5_EVENT_TYPE_COMP: 242 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: 243 case MLX5_EVENT_TYPE_XRQ_ERROR: 244 return true; 245 default: 246 return false; 247 } 248 } 249 250 static u16 get_legacy_obj_type(u16 opcode) 251 { 252 switch (opcode) { 253 case MLX5_CMD_OP_CREATE_RQ: 254 return MLX5_EVENT_QUEUE_TYPE_RQ; 255 case MLX5_CMD_OP_CREATE_QP: 256 return MLX5_EVENT_QUEUE_TYPE_QP; 257 case MLX5_CMD_OP_CREATE_SQ: 258 return MLX5_EVENT_QUEUE_TYPE_SQ; 259 case MLX5_CMD_OP_CREATE_DCT: 260 return MLX5_EVENT_QUEUE_TYPE_DCT; 261 default: 262 return 0; 263 } 264 } 265 266 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num) 267 { 268 u16 opcode; 269 270 opcode = (obj->obj_id >> 32) & 0xffff; 271 272 if (is_legacy_obj_event_num(event_num)) 273 return get_legacy_obj_type(opcode); 274 275 switch (opcode) { 276 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 277 return (obj->obj_id >> 48); 278 case MLX5_CMD_OP_CREATE_RQ: 279 return MLX5_OBJ_TYPE_RQ; 280 case MLX5_CMD_OP_CREATE_QP: 281 return MLX5_OBJ_TYPE_QP; 282 case MLX5_CMD_OP_CREATE_SQ: 283 return MLX5_OBJ_TYPE_SQ; 284 case MLX5_CMD_OP_CREATE_DCT: 285 return MLX5_OBJ_TYPE_DCT; 286 case MLX5_CMD_OP_CREATE_TIR: 287 return MLX5_OBJ_TYPE_TIR; 288 case MLX5_CMD_OP_CREATE_TIS: 289 return MLX5_OBJ_TYPE_TIS; 290 case MLX5_CMD_OP_CREATE_PSV: 291 return MLX5_OBJ_TYPE_PSV; 292 case MLX5_OBJ_TYPE_MKEY: 293 return MLX5_OBJ_TYPE_MKEY; 294 case MLX5_CMD_OP_CREATE_RMP: 295 return MLX5_OBJ_TYPE_RMP; 296 case MLX5_CMD_OP_CREATE_XRC_SRQ: 297 return MLX5_OBJ_TYPE_XRC_SRQ; 298 case MLX5_CMD_OP_CREATE_XRQ: 299 return MLX5_OBJ_TYPE_XRQ; 300 case MLX5_CMD_OP_CREATE_RQT: 301 return MLX5_OBJ_TYPE_RQT; 302 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 303 return MLX5_OBJ_TYPE_FLOW_COUNTER; 304 case MLX5_CMD_OP_CREATE_CQ: 305 return MLX5_OBJ_TYPE_CQ; 306 default: 307 return 0; 308 } 309 } 310 311 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe) 312 { 313 switch (event_type) { 314 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 315 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 316 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 317 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 318 case MLX5_EVENT_TYPE_PATH_MIG: 319 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 320 case MLX5_EVENT_TYPE_COMM_EST: 321 case MLX5_EVENT_TYPE_SQ_DRAINED: 322 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 323 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 324 return eqe->data.qp_srq.type; 325 case MLX5_EVENT_TYPE_CQ_ERROR: 326 case MLX5_EVENT_TYPE_XRQ_ERROR: 327 return 0; 328 case MLX5_EVENT_TYPE_DCT_DRAINED: 329 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: 330 return MLX5_EVENT_QUEUE_TYPE_DCT; 331 default: 332 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); 333 } 334 } 335 336 static u32 get_dec_obj_id(u64 obj_id) 337 { 338 return (obj_id & 0xffffffff); 339 } 340 341 /* 342 * As the obj_id in the firmware is not globally unique the object type 343 * must be considered upon checking for a valid object id. 344 * For that the opcode of the creator command is encoded as part of the obj_id. 345 */ 346 static u64 get_enc_obj_id(u32 opcode, u32 obj_id) 347 { 348 return ((u64)opcode << 32) | obj_id; 349 } 350 351 static u64 devx_get_obj_id(const void *in) 352 { 353 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 354 u64 obj_id; 355 356 switch (opcode) { 357 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 358 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 359 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT | 360 MLX5_GET(general_obj_in_cmd_hdr, in, 361 obj_type) << 16, 362 MLX5_GET(general_obj_in_cmd_hdr, in, 363 obj_id)); 364 break; 365 case MLX5_CMD_OP_QUERY_MKEY: 366 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY, 367 MLX5_GET(query_mkey_in, in, 368 mkey_index)); 369 break; 370 case MLX5_CMD_OP_QUERY_CQ: 371 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, 372 MLX5_GET(query_cq_in, in, cqn)); 373 break; 374 case MLX5_CMD_OP_MODIFY_CQ: 375 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, 376 MLX5_GET(modify_cq_in, in, cqn)); 377 break; 378 case MLX5_CMD_OP_QUERY_SQ: 379 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, 380 MLX5_GET(query_sq_in, in, sqn)); 381 break; 382 case MLX5_CMD_OP_MODIFY_SQ: 383 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, 384 MLX5_GET(modify_sq_in, in, sqn)); 385 break; 386 case MLX5_CMD_OP_QUERY_RQ: 387 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 388 MLX5_GET(query_rq_in, in, rqn)); 389 break; 390 case MLX5_CMD_OP_MODIFY_RQ: 391 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 392 MLX5_GET(modify_rq_in, in, rqn)); 393 break; 394 case MLX5_CMD_OP_QUERY_RMP: 395 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP, 396 MLX5_GET(query_rmp_in, in, rmpn)); 397 break; 398 case MLX5_CMD_OP_MODIFY_RMP: 399 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP, 400 MLX5_GET(modify_rmp_in, in, rmpn)); 401 break; 402 case MLX5_CMD_OP_QUERY_RQT: 403 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, 404 MLX5_GET(query_rqt_in, in, rqtn)); 405 break; 406 case MLX5_CMD_OP_MODIFY_RQT: 407 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, 408 MLX5_GET(modify_rqt_in, in, rqtn)); 409 break; 410 case MLX5_CMD_OP_QUERY_TIR: 411 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, 412 MLX5_GET(query_tir_in, in, tirn)); 413 break; 414 case MLX5_CMD_OP_MODIFY_TIR: 415 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, 416 MLX5_GET(modify_tir_in, in, tirn)); 417 break; 418 case MLX5_CMD_OP_QUERY_TIS: 419 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, 420 MLX5_GET(query_tis_in, in, tisn)); 421 break; 422 case MLX5_CMD_OP_MODIFY_TIS: 423 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, 424 MLX5_GET(modify_tis_in, in, tisn)); 425 break; 426 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 427 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE, 428 MLX5_GET(query_flow_table_in, in, 429 table_id)); 430 break; 431 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 432 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE, 433 MLX5_GET(modify_flow_table_in, in, 434 table_id)); 435 break; 436 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 437 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP, 438 MLX5_GET(query_flow_group_in, in, 439 group_id)); 440 break; 441 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 442 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY, 443 MLX5_GET(query_fte_in, in, 444 flow_index)); 445 break; 446 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 447 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY, 448 MLX5_GET(set_fte_in, in, flow_index)); 449 break; 450 case MLX5_CMD_OP_QUERY_Q_COUNTER: 451 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER, 452 MLX5_GET(query_q_counter_in, in, 453 counter_set_id)); 454 break; 455 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 456 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER, 457 MLX5_GET(query_flow_counter_in, in, 458 flow_counter_id)); 459 break; 460 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: 461 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT, 462 MLX5_GET(general_obj_in_cmd_hdr, in, 463 obj_id)); 464 break; 465 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 466 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT, 467 MLX5_GET(query_scheduling_element_in, 468 in, scheduling_element_id)); 469 break; 470 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 471 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT, 472 MLX5_GET(modify_scheduling_element_in, 473 in, scheduling_element_id)); 474 break; 475 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 476 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT, 477 MLX5_GET(add_vxlan_udp_dport_in, in, 478 vxlan_udp_port)); 479 break; 480 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 481 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY, 482 MLX5_GET(query_l2_table_entry_in, in, 483 table_index)); 484 break; 485 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 486 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY, 487 MLX5_GET(set_l2_table_entry_in, in, 488 table_index)); 489 break; 490 case MLX5_CMD_OP_QUERY_QP: 491 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 492 MLX5_GET(query_qp_in, in, qpn)); 493 break; 494 case MLX5_CMD_OP_RST2INIT_QP: 495 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 496 MLX5_GET(rst2init_qp_in, in, qpn)); 497 break; 498 case MLX5_CMD_OP_INIT2RTR_QP: 499 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 500 MLX5_GET(init2rtr_qp_in, in, qpn)); 501 break; 502 case MLX5_CMD_OP_RTR2RTS_QP: 503 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 504 MLX5_GET(rtr2rts_qp_in, in, qpn)); 505 break; 506 case MLX5_CMD_OP_RTS2RTS_QP: 507 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 508 MLX5_GET(rts2rts_qp_in, in, qpn)); 509 break; 510 case MLX5_CMD_OP_SQERR2RTS_QP: 511 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 512 MLX5_GET(sqerr2rts_qp_in, in, qpn)); 513 break; 514 case MLX5_CMD_OP_2ERR_QP: 515 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 516 MLX5_GET(qp_2err_in, in, qpn)); 517 break; 518 case MLX5_CMD_OP_2RST_QP: 519 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 520 MLX5_GET(qp_2rst_in, in, qpn)); 521 break; 522 case MLX5_CMD_OP_QUERY_DCT: 523 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, 524 MLX5_GET(query_dct_in, in, dctn)); 525 break; 526 case MLX5_CMD_OP_QUERY_XRQ: 527 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: 528 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: 529 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, 530 MLX5_GET(query_xrq_in, in, xrqn)); 531 break; 532 case MLX5_CMD_OP_QUERY_XRC_SRQ: 533 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ, 534 MLX5_GET(query_xrc_srq_in, in, 535 xrc_srqn)); 536 break; 537 case MLX5_CMD_OP_ARM_XRC_SRQ: 538 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ, 539 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn)); 540 break; 541 case MLX5_CMD_OP_QUERY_SRQ: 542 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ, 543 MLX5_GET(query_srq_in, in, srqn)); 544 break; 545 case MLX5_CMD_OP_ARM_RQ: 546 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 547 MLX5_GET(arm_rq_in, in, srq_number)); 548 break; 549 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 550 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, 551 MLX5_GET(drain_dct_in, in, dctn)); 552 break; 553 case MLX5_CMD_OP_ARM_XRQ: 554 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: 555 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 556 case MLX5_CMD_OP_MODIFY_XRQ: 557 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, 558 MLX5_GET(arm_xrq_in, in, xrqn)); 559 break; 560 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: 561 obj_id = get_enc_obj_id 562 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT, 563 MLX5_GET(query_packet_reformat_context_in, 564 in, packet_reformat_id)); 565 break; 566 default: 567 obj_id = 0; 568 } 569 570 return obj_id; 571 } 572 573 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, 574 struct ib_uobject *uobj, const void *in) 575 { 576 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); 577 u64 obj_id = devx_get_obj_id(in); 578 579 if (!obj_id) 580 return false; 581 582 switch (uobj_get_object_id(uobj)) { 583 case UVERBS_OBJECT_CQ: 584 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, 585 to_mcq(uobj->object)->mcq.cqn) == 586 obj_id; 587 588 case UVERBS_OBJECT_SRQ: 589 { 590 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); 591 u16 opcode; 592 593 switch (srq->common.res) { 594 case MLX5_RES_XSRQ: 595 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ; 596 break; 597 case MLX5_RES_XRQ: 598 opcode = MLX5_CMD_OP_CREATE_XRQ; 599 break; 600 default: 601 if (!dev->mdev->issi) 602 opcode = MLX5_CMD_OP_CREATE_SRQ; 603 else 604 opcode = MLX5_CMD_OP_CREATE_RMP; 605 } 606 607 return get_enc_obj_id(opcode, 608 to_msrq(uobj->object)->msrq.srqn) == 609 obj_id; 610 } 611 612 case UVERBS_OBJECT_QP: 613 { 614 struct mlx5_ib_qp *qp = to_mqp(uobj->object); 615 enum ib_qp_type qp_type = qp->ibqp.qp_type; 616 617 if (qp_type == IB_QPT_RAW_PACKET || 618 (qp->flags & MLX5_IB_QP_UNDERLAY)) { 619 struct mlx5_ib_raw_packet_qp *raw_packet_qp = 620 &qp->raw_packet_qp; 621 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 622 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 623 624 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 625 rq->base.mqp.qpn) == obj_id || 626 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, 627 sq->base.mqp.qpn) == obj_id || 628 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, 629 rq->tirn) == obj_id || 630 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, 631 sq->tisn) == obj_id); 632 } 633 634 if (qp_type == MLX5_IB_QPT_DCT) 635 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, 636 qp->dct.mdct.mqp.qpn) == obj_id; 637 638 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, 639 qp->ibqp.qp_num) == obj_id; 640 } 641 642 case UVERBS_OBJECT_WQ: 643 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 644 to_mrwq(uobj->object)->core_qp.qpn) == 645 obj_id; 646 647 case UVERBS_OBJECT_RWQ_IND_TBL: 648 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, 649 to_mrwq_ind_table(uobj->object)->rqtn) == 650 obj_id; 651 652 case MLX5_IB_OBJECT_DEVX_OBJ: 653 return ((struct devx_obj *)uobj->object)->obj_id == obj_id; 654 655 default: 656 return false; 657 } 658 } 659 660 static void devx_set_umem_valid(const void *in) 661 { 662 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 663 664 switch (opcode) { 665 case MLX5_CMD_OP_CREATE_MKEY: 666 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1); 667 break; 668 case MLX5_CMD_OP_CREATE_CQ: 669 { 670 void *cqc; 671 672 MLX5_SET(create_cq_in, in, cq_umem_valid, 1); 673 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 674 MLX5_SET(cqc, cqc, dbr_umem_valid, 1); 675 break; 676 } 677 case MLX5_CMD_OP_CREATE_QP: 678 { 679 void *qpc; 680 681 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 682 MLX5_SET(qpc, qpc, dbr_umem_valid, 1); 683 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 684 break; 685 } 686 687 case MLX5_CMD_OP_CREATE_RQ: 688 { 689 void *rqc, *wq; 690 691 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 692 wq = MLX5_ADDR_OF(rqc, rqc, wq); 693 MLX5_SET(wq, wq, dbr_umem_valid, 1); 694 MLX5_SET(wq, wq, wq_umem_valid, 1); 695 break; 696 } 697 698 case MLX5_CMD_OP_CREATE_SQ: 699 { 700 void *sqc, *wq; 701 702 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 703 wq = MLX5_ADDR_OF(sqc, sqc, wq); 704 MLX5_SET(wq, wq, dbr_umem_valid, 1); 705 MLX5_SET(wq, wq, wq_umem_valid, 1); 706 break; 707 } 708 709 case MLX5_CMD_OP_MODIFY_CQ: 710 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1); 711 break; 712 713 case MLX5_CMD_OP_CREATE_RMP: 714 { 715 void *rmpc, *wq; 716 717 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx); 718 wq = MLX5_ADDR_OF(rmpc, rmpc, wq); 719 MLX5_SET(wq, wq, dbr_umem_valid, 1); 720 MLX5_SET(wq, wq, wq_umem_valid, 1); 721 break; 722 } 723 724 case MLX5_CMD_OP_CREATE_XRQ: 725 { 726 void *xrqc, *wq; 727 728 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context); 729 wq = MLX5_ADDR_OF(xrqc, xrqc, wq); 730 MLX5_SET(wq, wq, dbr_umem_valid, 1); 731 MLX5_SET(wq, wq, wq_umem_valid, 1); 732 break; 733 } 734 735 case MLX5_CMD_OP_CREATE_XRC_SRQ: 736 { 737 void *xrc_srqc; 738 739 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1); 740 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in, 741 xrc_srq_context_entry); 742 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1); 743 break; 744 } 745 746 default: 747 return; 748 } 749 } 750 751 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode) 752 { 753 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 754 755 switch (*opcode) { 756 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 757 case MLX5_CMD_OP_CREATE_MKEY: 758 case MLX5_CMD_OP_CREATE_CQ: 759 case MLX5_CMD_OP_ALLOC_PD: 760 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 761 case MLX5_CMD_OP_CREATE_RMP: 762 case MLX5_CMD_OP_CREATE_SQ: 763 case MLX5_CMD_OP_CREATE_RQ: 764 case MLX5_CMD_OP_CREATE_RQT: 765 case MLX5_CMD_OP_CREATE_TIR: 766 case MLX5_CMD_OP_CREATE_TIS: 767 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 768 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 769 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 770 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 771 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 772 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 773 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 774 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 775 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 776 case MLX5_CMD_OP_CREATE_QP: 777 case MLX5_CMD_OP_CREATE_SRQ: 778 case MLX5_CMD_OP_CREATE_XRC_SRQ: 779 case MLX5_CMD_OP_CREATE_DCT: 780 case MLX5_CMD_OP_CREATE_XRQ: 781 case MLX5_CMD_OP_ATTACH_TO_MCG: 782 case MLX5_CMD_OP_ALLOC_XRCD: 783 return true; 784 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 785 { 786 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); 787 if (op_mod == 0) 788 return true; 789 return false; 790 } 791 case MLX5_CMD_OP_CREATE_PSV: 792 { 793 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv); 794 795 if (num_psv == 1) 796 return true; 797 return false; 798 } 799 default: 800 return false; 801 } 802 } 803 804 static bool devx_is_obj_modify_cmd(const void *in) 805 { 806 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 807 808 switch (opcode) { 809 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 810 case MLX5_CMD_OP_MODIFY_CQ: 811 case MLX5_CMD_OP_MODIFY_RMP: 812 case MLX5_CMD_OP_MODIFY_SQ: 813 case MLX5_CMD_OP_MODIFY_RQ: 814 case MLX5_CMD_OP_MODIFY_RQT: 815 case MLX5_CMD_OP_MODIFY_TIR: 816 case MLX5_CMD_OP_MODIFY_TIS: 817 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 818 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 819 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 820 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 821 case MLX5_CMD_OP_RST2INIT_QP: 822 case MLX5_CMD_OP_INIT2RTR_QP: 823 case MLX5_CMD_OP_RTR2RTS_QP: 824 case MLX5_CMD_OP_RTS2RTS_QP: 825 case MLX5_CMD_OP_SQERR2RTS_QP: 826 case MLX5_CMD_OP_2ERR_QP: 827 case MLX5_CMD_OP_2RST_QP: 828 case MLX5_CMD_OP_ARM_XRC_SRQ: 829 case MLX5_CMD_OP_ARM_RQ: 830 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 831 case MLX5_CMD_OP_ARM_XRQ: 832 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: 833 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 834 case MLX5_CMD_OP_MODIFY_XRQ: 835 return true; 836 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 837 { 838 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); 839 840 if (op_mod == 1) 841 return true; 842 return false; 843 } 844 default: 845 return false; 846 } 847 } 848 849 static bool devx_is_obj_query_cmd(const void *in) 850 { 851 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 852 853 switch (opcode) { 854 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 855 case MLX5_CMD_OP_QUERY_MKEY: 856 case MLX5_CMD_OP_QUERY_CQ: 857 case MLX5_CMD_OP_QUERY_RMP: 858 case MLX5_CMD_OP_QUERY_SQ: 859 case MLX5_CMD_OP_QUERY_RQ: 860 case MLX5_CMD_OP_QUERY_RQT: 861 case MLX5_CMD_OP_QUERY_TIR: 862 case MLX5_CMD_OP_QUERY_TIS: 863 case MLX5_CMD_OP_QUERY_Q_COUNTER: 864 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 865 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 866 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 867 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 868 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: 869 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 870 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 871 case MLX5_CMD_OP_QUERY_QP: 872 case MLX5_CMD_OP_QUERY_SRQ: 873 case MLX5_CMD_OP_QUERY_XRC_SRQ: 874 case MLX5_CMD_OP_QUERY_DCT: 875 case MLX5_CMD_OP_QUERY_XRQ: 876 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: 877 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: 878 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: 879 return true; 880 default: 881 return false; 882 } 883 } 884 885 static bool devx_is_whitelist_cmd(void *in) 886 { 887 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 888 889 switch (opcode) { 890 case MLX5_CMD_OP_QUERY_HCA_CAP: 891 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 892 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 893 return true; 894 default: 895 return false; 896 } 897 } 898 899 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in) 900 { 901 if (devx_is_whitelist_cmd(cmd_in)) { 902 struct mlx5_ib_dev *dev; 903 904 if (c->devx_uid) 905 return c->devx_uid; 906 907 dev = to_mdev(c->ibucontext.device); 908 if (dev->devx_whitelist_uid) 909 return dev->devx_whitelist_uid; 910 911 return -EOPNOTSUPP; 912 } 913 914 if (!c->devx_uid) 915 return -EINVAL; 916 917 return c->devx_uid; 918 } 919 920 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev) 921 { 922 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); 923 924 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */ 925 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && 926 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) || 927 (opcode >= MLX5_CMD_OP_GENERAL_START && 928 opcode < MLX5_CMD_OP_GENERAL_END)) 929 return true; 930 931 switch (opcode) { 932 case MLX5_CMD_OP_QUERY_HCA_CAP: 933 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 934 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 935 case MLX5_CMD_OP_QUERY_VPORT_STATE: 936 case MLX5_CMD_OP_QUERY_ADAPTER: 937 case MLX5_CMD_OP_QUERY_ISSI: 938 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 939 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 940 case MLX5_CMD_OP_QUERY_VNIC_ENV: 941 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 942 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 943 case MLX5_CMD_OP_NOP: 944 case MLX5_CMD_OP_QUERY_CONG_STATUS: 945 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 946 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 947 case MLX5_CMD_OP_QUERY_LAG: 948 return true; 949 default: 950 return false; 951 } 952 } 953 954 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( 955 struct uverbs_attr_bundle *attrs) 956 { 957 struct mlx5_ib_ucontext *c; 958 struct mlx5_ib_dev *dev; 959 int user_vector; 960 int dev_eqn; 961 unsigned int irqn; 962 int err; 963 964 if (uverbs_copy_from(&user_vector, attrs, 965 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC)) 966 return -EFAULT; 967 968 c = devx_ufile2uctx(attrs); 969 if (IS_ERR(c)) 970 return PTR_ERR(c); 971 dev = to_mdev(c->ibucontext.device); 972 973 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); 974 if (err < 0) 975 return err; 976 977 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, 978 &dev_eqn, sizeof(dev_eqn))) 979 return -EFAULT; 980 981 return 0; 982 } 983 984 /* 985 *Security note: 986 * The hardware protection mechanism works like this: Each device object that 987 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in 988 * the device specification manual) upon its creation. Then upon doorbell, 989 * hardware fetches the object context for which the doorbell was rang, and 990 * validates that the UAR through which the DB was rang matches the UAR ID 991 * of the object. 992 * If no match the doorbell is silently ignored by the hardware. Of course, 993 * the user cannot ring a doorbell on a UAR that was not mapped to it. 994 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command 995 * mailboxes (except tagging them with UID), we expose to the user its UAR 996 * ID, so it can embed it in these objects in the expected specification 997 * format. So the only thing the user can do is hurt itself by creating a 998 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users 999 * may ring a doorbell on its objects. 1000 * The consequence of that will be that another user can schedule a QP/SQ 1001 * of the buggy user for execution (just insert it to the hardware schedule 1002 * queue or arm its CQ for event generation), no further harm is expected. 1003 */ 1004 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)( 1005 struct uverbs_attr_bundle *attrs) 1006 { 1007 struct mlx5_ib_ucontext *c; 1008 struct mlx5_ib_dev *dev; 1009 u32 user_idx; 1010 s32 dev_idx; 1011 1012 c = devx_ufile2uctx(attrs); 1013 if (IS_ERR(c)) 1014 return PTR_ERR(c); 1015 dev = to_mdev(c->ibucontext.device); 1016 1017 if (uverbs_copy_from(&user_idx, attrs, 1018 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX)) 1019 return -EFAULT; 1020 1021 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); 1022 if (dev_idx < 0) 1023 return dev_idx; 1024 1025 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 1026 &dev_idx, sizeof(dev_idx))) 1027 return -EFAULT; 1028 1029 return 0; 1030 } 1031 1032 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)( 1033 struct uverbs_attr_bundle *attrs) 1034 { 1035 struct mlx5_ib_ucontext *c; 1036 struct mlx5_ib_dev *dev; 1037 void *cmd_in = uverbs_attr_get_alloced_ptr( 1038 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN); 1039 int cmd_out_len = uverbs_attr_get_len(attrs, 1040 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT); 1041 void *cmd_out; 1042 int err; 1043 int uid; 1044 1045 c = devx_ufile2uctx(attrs); 1046 if (IS_ERR(c)) 1047 return PTR_ERR(c); 1048 dev = to_mdev(c->ibucontext.device); 1049 1050 uid = devx_get_uid(c, cmd_in); 1051 if (uid < 0) 1052 return uid; 1053 1054 /* Only white list of some general HCA commands are allowed for this method. */ 1055 if (!devx_is_general_cmd(cmd_in, dev)) 1056 return -EINVAL; 1057 1058 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 1059 if (IS_ERR(cmd_out)) 1060 return PTR_ERR(cmd_out); 1061 1062 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); 1063 err = mlx5_cmd_exec(dev->mdev, cmd_in, 1064 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN), 1065 cmd_out, cmd_out_len); 1066 if (err) 1067 return err; 1068 1069 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, 1070 cmd_out_len); 1071 } 1072 1073 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, 1074 u32 *dinlen, 1075 u32 *obj_id) 1076 { 1077 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type); 1078 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid); 1079 1080 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1081 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr); 1082 1083 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id); 1084 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid); 1085 1086 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) { 1087 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 1088 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 1089 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type); 1090 break; 1091 1092 case MLX5_CMD_OP_CREATE_UMEM: 1093 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1094 MLX5_CMD_OP_DESTROY_UMEM); 1095 break; 1096 case MLX5_CMD_OP_CREATE_MKEY: 1097 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); 1098 break; 1099 case MLX5_CMD_OP_CREATE_CQ: 1100 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ); 1101 break; 1102 case MLX5_CMD_OP_ALLOC_PD: 1103 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD); 1104 break; 1105 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 1106 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1107 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); 1108 break; 1109 case MLX5_CMD_OP_CREATE_RMP: 1110 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP); 1111 break; 1112 case MLX5_CMD_OP_CREATE_SQ: 1113 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ); 1114 break; 1115 case MLX5_CMD_OP_CREATE_RQ: 1116 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ); 1117 break; 1118 case MLX5_CMD_OP_CREATE_RQT: 1119 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT); 1120 break; 1121 case MLX5_CMD_OP_CREATE_TIR: 1122 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR); 1123 break; 1124 case MLX5_CMD_OP_CREATE_TIS: 1125 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS); 1126 break; 1127 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 1128 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1129 MLX5_CMD_OP_DEALLOC_Q_COUNTER); 1130 break; 1131 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 1132 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in); 1133 *obj_id = MLX5_GET(create_flow_table_out, out, table_id); 1134 MLX5_SET(destroy_flow_table_in, din, other_vport, 1135 MLX5_GET(create_flow_table_in, in, other_vport)); 1136 MLX5_SET(destroy_flow_table_in, din, vport_number, 1137 MLX5_GET(create_flow_table_in, in, vport_number)); 1138 MLX5_SET(destroy_flow_table_in, din, table_type, 1139 MLX5_GET(create_flow_table_in, in, table_type)); 1140 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id); 1141 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1142 MLX5_CMD_OP_DESTROY_FLOW_TABLE); 1143 break; 1144 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 1145 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in); 1146 *obj_id = MLX5_GET(create_flow_group_out, out, group_id); 1147 MLX5_SET(destroy_flow_group_in, din, other_vport, 1148 MLX5_GET(create_flow_group_in, in, other_vport)); 1149 MLX5_SET(destroy_flow_group_in, din, vport_number, 1150 MLX5_GET(create_flow_group_in, in, vport_number)); 1151 MLX5_SET(destroy_flow_group_in, din, table_type, 1152 MLX5_GET(create_flow_group_in, in, table_type)); 1153 MLX5_SET(destroy_flow_group_in, din, table_id, 1154 MLX5_GET(create_flow_group_in, in, table_id)); 1155 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id); 1156 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1157 MLX5_CMD_OP_DESTROY_FLOW_GROUP); 1158 break; 1159 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 1160 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in); 1161 *obj_id = MLX5_GET(set_fte_in, in, flow_index); 1162 MLX5_SET(delete_fte_in, din, other_vport, 1163 MLX5_GET(set_fte_in, in, other_vport)); 1164 MLX5_SET(delete_fte_in, din, vport_number, 1165 MLX5_GET(set_fte_in, in, vport_number)); 1166 MLX5_SET(delete_fte_in, din, table_type, 1167 MLX5_GET(set_fte_in, in, table_type)); 1168 MLX5_SET(delete_fte_in, din, table_id, 1169 MLX5_GET(set_fte_in, in, table_id)); 1170 MLX5_SET(delete_fte_in, din, flow_index, *obj_id); 1171 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1172 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); 1173 break; 1174 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 1175 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1176 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); 1177 break; 1178 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 1179 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1180 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); 1181 break; 1182 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 1183 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1184 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); 1185 break; 1186 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 1187 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in); 1188 *obj_id = MLX5_GET(create_scheduling_element_out, out, 1189 scheduling_element_id); 1190 MLX5_SET(destroy_scheduling_element_in, din, 1191 scheduling_hierarchy, 1192 MLX5_GET(create_scheduling_element_in, in, 1193 scheduling_hierarchy)); 1194 MLX5_SET(destroy_scheduling_element_in, din, 1195 scheduling_element_id, *obj_id); 1196 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1197 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT); 1198 break; 1199 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 1200 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in); 1201 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port); 1202 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id); 1203 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1204 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); 1205 break; 1206 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 1207 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in); 1208 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index); 1209 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id); 1210 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1211 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); 1212 break; 1213 case MLX5_CMD_OP_CREATE_QP: 1214 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP); 1215 break; 1216 case MLX5_CMD_OP_CREATE_SRQ: 1217 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ); 1218 break; 1219 case MLX5_CMD_OP_CREATE_XRC_SRQ: 1220 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1221 MLX5_CMD_OP_DESTROY_XRC_SRQ); 1222 break; 1223 case MLX5_CMD_OP_CREATE_DCT: 1224 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT); 1225 break; 1226 case MLX5_CMD_OP_CREATE_XRQ: 1227 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ); 1228 break; 1229 case MLX5_CMD_OP_ATTACH_TO_MCG: 1230 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in); 1231 MLX5_SET(detach_from_mcg_in, din, qpn, 1232 MLX5_GET(attach_to_mcg_in, in, qpn)); 1233 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid), 1234 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid), 1235 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid)); 1236 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG); 1237 break; 1238 case MLX5_CMD_OP_ALLOC_XRCD: 1239 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); 1240 break; 1241 case MLX5_CMD_OP_CREATE_PSV: 1242 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, 1243 MLX5_CMD_OP_DESTROY_PSV); 1244 MLX5_SET(destroy_psv_in, din, psvn, 1245 MLX5_GET(create_psv_out, out, psv0_index)); 1246 break; 1247 default: 1248 /* The entry must match to one of the devx_is_obj_create_cmd */ 1249 WARN_ON(true); 1250 break; 1251 } 1252 } 1253 1254 static int devx_handle_mkey_indirect(struct devx_obj *obj, 1255 struct mlx5_ib_dev *dev, 1256 void *in, void *out) 1257 { 1258 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; 1259 struct mlx5_core_mkey *mkey; 1260 void *mkc; 1261 u8 key; 1262 1263 mkey = &devx_mr->mmkey; 1264 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1265 key = MLX5_GET(mkc, mkc, mkey_7_0); 1266 mkey->key = mlx5_idx_to_mkey( 1267 MLX5_GET(create_mkey_out, out, mkey_index)) | key; 1268 mkey->type = MLX5_MKEY_INDIRECT_DEVX; 1269 mkey->iova = MLX5_GET64(mkc, mkc, start_addr); 1270 mkey->size = MLX5_GET64(mkc, mkc, len); 1271 mkey->pd = MLX5_GET(mkc, mkc, pd); 1272 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); 1273 1274 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey, 1275 GFP_KERNEL)); 1276 } 1277 1278 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev, 1279 struct devx_obj *obj, 1280 void *in, int in_len) 1281 { 1282 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) + 1283 MLX5_FLD_SZ_BYTES(create_mkey_in, 1284 memory_key_mkey_entry); 1285 void *mkc; 1286 u8 access_mode; 1287 1288 if (in_len < min_len) 1289 return -EINVAL; 1290 1291 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1292 1293 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0); 1294 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2; 1295 1296 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS || 1297 access_mode == MLX5_MKC_ACCESS_MODE_KSM) { 1298 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1299 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; 1300 return 0; 1301 } 1302 1303 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1); 1304 return 0; 1305 } 1306 1307 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev, 1308 struct devx_event_subscription *sub) 1309 { 1310 struct devx_event *event; 1311 struct devx_obj_event *xa_val_level2; 1312 1313 if (sub->is_cleaned) 1314 return; 1315 1316 sub->is_cleaned = 1; 1317 list_del_rcu(&sub->xa_list); 1318 1319 if (list_empty(&sub->obj_list)) 1320 return; 1321 1322 list_del_rcu(&sub->obj_list); 1323 /* check whether key level 1 for this obj_sub_list is empty */ 1324 event = xa_load(&dev->devx_event_table.event_xa, 1325 sub->xa_key_level1); 1326 WARN_ON(!event); 1327 1328 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); 1329 if (list_empty(&xa_val_level2->obj_sub_list)) { 1330 xa_erase(&event->object_ids, 1331 sub->xa_key_level2); 1332 kfree_rcu(xa_val_level2, rcu); 1333 } 1334 } 1335 1336 static int devx_obj_cleanup(struct ib_uobject *uobject, 1337 enum rdma_remove_reason why, 1338 struct uverbs_attr_bundle *attrs) 1339 { 1340 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 1341 struct mlx5_devx_event_table *devx_event_table; 1342 struct devx_obj *obj = uobject->object; 1343 struct devx_event_subscription *sub_entry, *tmp; 1344 struct mlx5_ib_dev *dev; 1345 int ret; 1346 1347 dev = mlx5_udata_to_mdev(&attrs->driver_udata); 1348 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { 1349 /* 1350 * The pagefault_single_data_segment() does commands against 1351 * the mmkey, we must wait for that to stop before freeing the 1352 * mkey, as another allocation could get the same mkey #. 1353 */ 1354 xa_erase(&obj->ib_dev->odp_mkeys, 1355 mlx5_base_mkey(obj->devx_mr.mmkey.key)); 1356 synchronize_srcu(&dev->odp_srcu); 1357 } 1358 1359 if (obj->flags & DEVX_OBJ_FLAGS_DCT) 1360 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); 1361 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) 1362 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); 1363 else 1364 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, 1365 obj->dinlen, out, sizeof(out)); 1366 if (ib_is_destroy_retryable(ret, why, uobject)) 1367 return ret; 1368 1369 devx_event_table = &dev->devx_event_table; 1370 1371 mutex_lock(&devx_event_table->event_xa_lock); 1372 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) 1373 devx_cleanup_subscription(dev, sub_entry); 1374 mutex_unlock(&devx_event_table->event_xa_lock); 1375 1376 kfree(obj); 1377 return ret; 1378 } 1379 1380 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) 1381 { 1382 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq); 1383 struct mlx5_devx_event_table *table; 1384 struct devx_event *event; 1385 struct devx_obj_event *obj_event; 1386 u32 obj_id = mcq->cqn; 1387 1388 table = &obj->ib_dev->devx_event_table; 1389 rcu_read_lock(); 1390 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); 1391 if (!event) 1392 goto out; 1393 1394 obj_event = xa_load(&event->object_ids, obj_id); 1395 if (!obj_event) 1396 goto out; 1397 1398 dispatch_event_fd(&obj_event->obj_sub_list, eqe); 1399 out: 1400 rcu_read_unlock(); 1401 } 1402 1403 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( 1404 struct uverbs_attr_bundle *attrs) 1405 { 1406 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN); 1407 int cmd_out_len = uverbs_attr_get_len(attrs, 1408 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT); 1409 int cmd_in_len = uverbs_attr_get_len(attrs, 1410 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN); 1411 void *cmd_out; 1412 struct ib_uobject *uobj = uverbs_attr_get_uobject( 1413 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); 1414 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1415 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1416 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 1417 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 1418 struct devx_obj *obj; 1419 u16 obj_type = 0; 1420 int err; 1421 int uid; 1422 u32 obj_id; 1423 u16 opcode; 1424 1425 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) 1426 return -EINVAL; 1427 1428 uid = devx_get_uid(c, cmd_in); 1429 if (uid < 0) 1430 return uid; 1431 1432 if (!devx_is_obj_create_cmd(cmd_in, &opcode)) 1433 return -EINVAL; 1434 1435 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 1436 if (IS_ERR(cmd_out)) 1437 return PTR_ERR(cmd_out); 1438 1439 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL); 1440 if (!obj) 1441 return -ENOMEM; 1442 1443 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); 1444 if (opcode == MLX5_CMD_OP_CREATE_MKEY) { 1445 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len); 1446 if (err) 1447 goto obj_free; 1448 } else { 1449 devx_set_umem_valid(cmd_in); 1450 } 1451 1452 if (opcode == MLX5_CMD_OP_CREATE_DCT) { 1453 obj->flags |= DEVX_OBJ_FLAGS_DCT; 1454 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct, 1455 cmd_in, cmd_in_len, 1456 cmd_out, cmd_out_len); 1457 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) { 1458 obj->flags |= DEVX_OBJ_FLAGS_CQ; 1459 obj->core_cq.comp = devx_cq_comp; 1460 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, 1461 cmd_in, cmd_in_len, cmd_out, 1462 cmd_out_len); 1463 } else { 1464 err = mlx5_cmd_exec(dev->mdev, cmd_in, 1465 cmd_in_len, 1466 cmd_out, cmd_out_len); 1467 } 1468 1469 if (err) 1470 goto obj_free; 1471 1472 if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) { 1473 u8 bulk = MLX5_GET(alloc_flow_counter_in, 1474 cmd_in, 1475 flow_counter_bulk); 1476 obj->flow_counter_bulk_size = 128UL * bulk; 1477 } 1478 1479 uobj->object = obj; 1480 INIT_LIST_HEAD(&obj->event_sub); 1481 obj->ib_dev = dev; 1482 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, 1483 &obj_id); 1484 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); 1485 1486 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); 1487 if (err) 1488 goto obj_destroy; 1489 1490 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT) 1491 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type); 1492 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); 1493 1494 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { 1495 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); 1496 if (err) 1497 goto obj_destroy; 1498 } 1499 return 0; 1500 1501 obj_destroy: 1502 if (obj->flags & DEVX_OBJ_FLAGS_DCT) 1503 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); 1504 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) 1505 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); 1506 else 1507 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, 1508 sizeof(out)); 1509 obj_free: 1510 kfree(obj); 1511 return err; 1512 } 1513 1514 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( 1515 struct uverbs_attr_bundle *attrs) 1516 { 1517 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN); 1518 int cmd_out_len = uverbs_attr_get_len(attrs, 1519 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT); 1520 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 1521 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE); 1522 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1523 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1524 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); 1525 void *cmd_out; 1526 int err; 1527 int uid; 1528 1529 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) 1530 return -EINVAL; 1531 1532 uid = devx_get_uid(c, cmd_in); 1533 if (uid < 0) 1534 return uid; 1535 1536 if (!devx_is_obj_modify_cmd(cmd_in)) 1537 return -EINVAL; 1538 1539 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) 1540 return -EINVAL; 1541 1542 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 1543 if (IS_ERR(cmd_out)) 1544 return PTR_ERR(cmd_out); 1545 1546 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); 1547 devx_set_umem_valid(cmd_in); 1548 1549 err = mlx5_cmd_exec(mdev->mdev, cmd_in, 1550 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN), 1551 cmd_out, cmd_out_len); 1552 if (err) 1553 return err; 1554 1555 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, 1556 cmd_out, cmd_out_len); 1557 } 1558 1559 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( 1560 struct uverbs_attr_bundle *attrs) 1561 { 1562 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN); 1563 int cmd_out_len = uverbs_attr_get_len(attrs, 1564 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT); 1565 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, 1566 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE); 1567 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1568 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1569 void *cmd_out; 1570 int err; 1571 int uid; 1572 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); 1573 1574 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) 1575 return -EINVAL; 1576 1577 uid = devx_get_uid(c, cmd_in); 1578 if (uid < 0) 1579 return uid; 1580 1581 if (!devx_is_obj_query_cmd(cmd_in)) 1582 return -EINVAL; 1583 1584 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) 1585 return -EINVAL; 1586 1587 cmd_out = uverbs_zalloc(attrs, cmd_out_len); 1588 if (IS_ERR(cmd_out)) 1589 return PTR_ERR(cmd_out); 1590 1591 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); 1592 err = mlx5_cmd_exec(mdev->mdev, cmd_in, 1593 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN), 1594 cmd_out, cmd_out_len); 1595 if (err) 1596 return err; 1597 1598 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, 1599 cmd_out, cmd_out_len); 1600 } 1601 1602 struct devx_async_event_queue { 1603 spinlock_t lock; 1604 wait_queue_head_t poll_wait; 1605 struct list_head event_list; 1606 atomic_t bytes_in_use; 1607 u8 is_destroyed:1; 1608 }; 1609 1610 struct devx_async_cmd_event_file { 1611 struct ib_uobject uobj; 1612 struct devx_async_event_queue ev_queue; 1613 struct mlx5_async_ctx async_ctx; 1614 }; 1615 1616 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue) 1617 { 1618 spin_lock_init(&ev_queue->lock); 1619 INIT_LIST_HEAD(&ev_queue->event_list); 1620 init_waitqueue_head(&ev_queue->poll_wait); 1621 atomic_set(&ev_queue->bytes_in_use, 0); 1622 ev_queue->is_destroyed = 0; 1623 } 1624 1625 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)( 1626 struct uverbs_attr_bundle *attrs) 1627 { 1628 struct devx_async_cmd_event_file *ev_file; 1629 1630 struct ib_uobject *uobj = uverbs_attr_get_uobject( 1631 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE); 1632 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); 1633 1634 ev_file = container_of(uobj, struct devx_async_cmd_event_file, 1635 uobj); 1636 devx_init_event_queue(&ev_file->ev_queue); 1637 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); 1638 return 0; 1639 } 1640 1641 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)( 1642 struct uverbs_attr_bundle *attrs) 1643 { 1644 struct ib_uobject *uobj = uverbs_attr_get_uobject( 1645 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE); 1646 struct devx_async_event_file *ev_file; 1647 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1648 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1649 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 1650 u32 flags; 1651 int err; 1652 1653 err = uverbs_get_flags32(&flags, attrs, 1654 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS, 1655 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA); 1656 1657 if (err) 1658 return err; 1659 1660 ev_file = container_of(uobj, struct devx_async_event_file, 1661 uobj); 1662 spin_lock_init(&ev_file->lock); 1663 INIT_LIST_HEAD(&ev_file->event_list); 1664 init_waitqueue_head(&ev_file->poll_wait); 1665 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA) 1666 ev_file->omit_data = 1; 1667 INIT_LIST_HEAD(&ev_file->subscribed_events_list); 1668 ev_file->dev = dev; 1669 get_device(&dev->ib_dev.dev); 1670 return 0; 1671 } 1672 1673 static void devx_query_callback(int status, struct mlx5_async_work *context) 1674 { 1675 struct devx_async_data *async_data = 1676 container_of(context, struct devx_async_data, cb_work); 1677 struct ib_uobject *fd_uobj = async_data->fd_uobj; 1678 struct devx_async_cmd_event_file *ev_file; 1679 struct devx_async_event_queue *ev_queue; 1680 unsigned long flags; 1681 1682 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file, 1683 uobj); 1684 ev_queue = &ev_file->ev_queue; 1685 1686 spin_lock_irqsave(&ev_queue->lock, flags); 1687 list_add_tail(&async_data->list, &ev_queue->event_list); 1688 spin_unlock_irqrestore(&ev_queue->lock, flags); 1689 1690 wake_up_interruptible(&ev_queue->poll_wait); 1691 fput(fd_uobj->object); 1692 } 1693 1694 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */ 1695 1696 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)( 1697 struct uverbs_attr_bundle *attrs) 1698 { 1699 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, 1700 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN); 1701 struct ib_uobject *uobj = uverbs_attr_get_uobject( 1702 attrs, 1703 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE); 1704 u16 cmd_out_len; 1705 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1706 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1707 struct ib_uobject *fd_uobj; 1708 int err; 1709 int uid; 1710 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); 1711 struct devx_async_cmd_event_file *ev_file; 1712 struct devx_async_data *async_data; 1713 1714 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) 1715 return -EINVAL; 1716 1717 uid = devx_get_uid(c, cmd_in); 1718 if (uid < 0) 1719 return uid; 1720 1721 if (!devx_is_obj_query_cmd(cmd_in)) 1722 return -EINVAL; 1723 1724 err = uverbs_get_const(&cmd_out_len, attrs, 1725 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN); 1726 if (err) 1727 return err; 1728 1729 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) 1730 return -EINVAL; 1731 1732 fd_uobj = uverbs_attr_get_uobject(attrs, 1733 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD); 1734 if (IS_ERR(fd_uobj)) 1735 return PTR_ERR(fd_uobj); 1736 1737 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file, 1738 uobj); 1739 1740 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > 1741 MAX_ASYNC_BYTES_IN_USE) { 1742 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); 1743 return -EAGAIN; 1744 } 1745 1746 async_data = kvzalloc(struct_size(async_data, hdr.out_data, 1747 cmd_out_len), GFP_KERNEL); 1748 if (!async_data) { 1749 err = -ENOMEM; 1750 goto sub_bytes; 1751 } 1752 1753 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, 1754 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID); 1755 if (err) 1756 goto free_async; 1757 1758 async_data->cmd_out_len = cmd_out_len; 1759 async_data->mdev = mdev; 1760 async_data->fd_uobj = fd_uobj; 1761 1762 get_file(fd_uobj->object); 1763 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); 1764 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, 1765 uverbs_attr_get_len(attrs, 1766 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN), 1767 async_data->hdr.out_data, 1768 async_data->cmd_out_len, 1769 devx_query_callback, &async_data->cb_work); 1770 1771 if (err) 1772 goto cb_err; 1773 1774 return 0; 1775 1776 cb_err: 1777 fput(fd_uobj->object); 1778 free_async: 1779 kvfree(async_data); 1780 sub_bytes: 1781 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); 1782 return err; 1783 } 1784 1785 static void 1786 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table, 1787 u32 key_level1, 1788 bool is_level2, 1789 u32 key_level2) 1790 { 1791 struct devx_event *event; 1792 struct devx_obj_event *xa_val_level2; 1793 1794 /* Level 1 is valid for future use, no need to free */ 1795 if (!is_level2) 1796 return; 1797 1798 event = xa_load(&devx_event_table->event_xa, key_level1); 1799 WARN_ON(!event); 1800 1801 xa_val_level2 = xa_load(&event->object_ids, 1802 key_level2); 1803 if (list_empty(&xa_val_level2->obj_sub_list)) { 1804 xa_erase(&event->object_ids, 1805 key_level2); 1806 kfree_rcu(xa_val_level2, rcu); 1807 } 1808 } 1809 1810 static int 1811 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, 1812 u32 key_level1, 1813 bool is_level2, 1814 u32 key_level2) 1815 { 1816 struct devx_obj_event *obj_event; 1817 struct devx_event *event; 1818 int err; 1819 1820 event = xa_load(&devx_event_table->event_xa, key_level1); 1821 if (!event) { 1822 event = kzalloc(sizeof(*event), GFP_KERNEL); 1823 if (!event) 1824 return -ENOMEM; 1825 1826 INIT_LIST_HEAD(&event->unaffiliated_list); 1827 xa_init(&event->object_ids); 1828 1829 err = xa_insert(&devx_event_table->event_xa, 1830 key_level1, 1831 event, 1832 GFP_KERNEL); 1833 if (err) { 1834 kfree(event); 1835 return err; 1836 } 1837 } 1838 1839 if (!is_level2) 1840 return 0; 1841 1842 obj_event = xa_load(&event->object_ids, key_level2); 1843 if (!obj_event) { 1844 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL); 1845 if (!obj_event) 1846 /* Level1 is valid for future use, no need to free */ 1847 return -ENOMEM; 1848 1849 err = xa_insert(&event->object_ids, 1850 key_level2, 1851 obj_event, 1852 GFP_KERNEL); 1853 if (err) 1854 return err; 1855 INIT_LIST_HEAD(&obj_event->obj_sub_list); 1856 } 1857 1858 return 0; 1859 } 1860 1861 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list, 1862 struct devx_obj *obj) 1863 { 1864 int i; 1865 1866 for (i = 0; i < num_events; i++) { 1867 if (obj) { 1868 if (!is_legacy_obj_event_num(event_type_num_list[i])) 1869 return false; 1870 } else if (!is_legacy_unaffiliated_event_num( 1871 event_type_num_list[i])) { 1872 return false; 1873 } 1874 } 1875 1876 return true; 1877 } 1878 1879 #define MAX_SUPP_EVENT_NUM 255 1880 static bool is_valid_events(struct mlx5_core_dev *dev, 1881 int num_events, u16 *event_type_num_list, 1882 struct devx_obj *obj) 1883 { 1884 __be64 *aff_events; 1885 __be64 *unaff_events; 1886 int mask_entry; 1887 int mask_bit; 1888 int i; 1889 1890 if (MLX5_CAP_GEN(dev, event_cap)) { 1891 aff_events = MLX5_CAP_DEV_EVENT(dev, 1892 user_affiliated_events); 1893 unaff_events = MLX5_CAP_DEV_EVENT(dev, 1894 user_unaffiliated_events); 1895 } else { 1896 return is_valid_events_legacy(num_events, event_type_num_list, 1897 obj); 1898 } 1899 1900 for (i = 0; i < num_events; i++) { 1901 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM) 1902 return false; 1903 1904 mask_entry = event_type_num_list[i] / 64; 1905 mask_bit = event_type_num_list[i] % 64; 1906 1907 if (obj) { 1908 /* CQ completion */ 1909 if (event_type_num_list[i] == 0) 1910 continue; 1911 1912 if (!(be64_to_cpu(aff_events[mask_entry]) & 1913 (1ull << mask_bit))) 1914 return false; 1915 1916 continue; 1917 } 1918 1919 if (!(be64_to_cpu(unaff_events[mask_entry]) & 1920 (1ull << mask_bit))) 1921 return false; 1922 } 1923 1924 return true; 1925 } 1926 1927 #define MAX_NUM_EVENTS 16 1928 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( 1929 struct uverbs_attr_bundle *attrs) 1930 { 1931 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject( 1932 attrs, 1933 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE); 1934 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 1935 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 1936 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 1937 struct ib_uobject *fd_uobj; 1938 struct devx_obj *obj = NULL; 1939 struct devx_async_event_file *ev_file; 1940 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; 1941 u16 *event_type_num_list; 1942 struct devx_event_subscription *event_sub, *tmp_sub; 1943 struct list_head sub_list; 1944 int redirect_fd; 1945 bool use_eventfd = false; 1946 int num_events; 1947 int num_alloc_xa_entries = 0; 1948 u16 obj_type = 0; 1949 u64 cookie = 0; 1950 u32 obj_id = 0; 1951 int err; 1952 int i; 1953 1954 if (!c->devx_uid) 1955 return -EINVAL; 1956 1957 if (!IS_ERR(devx_uobj)) { 1958 obj = (struct devx_obj *)devx_uobj->object; 1959 if (obj) 1960 obj_id = get_dec_obj_id(obj->obj_id); 1961 } 1962 1963 fd_uobj = uverbs_attr_get_uobject(attrs, 1964 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE); 1965 if (IS_ERR(fd_uobj)) 1966 return PTR_ERR(fd_uobj); 1967 1968 ev_file = container_of(fd_uobj, struct devx_async_event_file, 1969 uobj); 1970 1971 if (uverbs_attr_is_valid(attrs, 1972 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) { 1973 err = uverbs_copy_from(&redirect_fd, attrs, 1974 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM); 1975 if (err) 1976 return err; 1977 1978 use_eventfd = true; 1979 } 1980 1981 if (uverbs_attr_is_valid(attrs, 1982 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) { 1983 if (use_eventfd) 1984 return -EINVAL; 1985 1986 err = uverbs_copy_from(&cookie, attrs, 1987 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE); 1988 if (err) 1989 return err; 1990 } 1991 1992 num_events = uverbs_attr_ptr_get_array_size( 1993 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST, 1994 sizeof(u16)); 1995 1996 if (num_events < 0) 1997 return num_events; 1998 1999 if (num_events > MAX_NUM_EVENTS) 2000 return -EINVAL; 2001 2002 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs, 2003 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST); 2004 2005 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) 2006 return -EINVAL; 2007 2008 INIT_LIST_HEAD(&sub_list); 2009 2010 /* Protect from concurrent subscriptions to same XA entries to allow 2011 * both to succeed 2012 */ 2013 mutex_lock(&devx_event_table->event_xa_lock); 2014 for (i = 0; i < num_events; i++) { 2015 u32 key_level1; 2016 2017 if (obj) 2018 obj_type = get_dec_obj_type(obj, 2019 event_type_num_list[i]); 2020 key_level1 = event_type_num_list[i] | obj_type << 16; 2021 2022 err = subscribe_event_xa_alloc(devx_event_table, 2023 key_level1, 2024 obj, 2025 obj_id); 2026 if (err) 2027 goto err; 2028 2029 num_alloc_xa_entries++; 2030 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL); 2031 if (!event_sub) 2032 goto err; 2033 2034 list_add_tail(&event_sub->event_list, &sub_list); 2035 if (use_eventfd) { 2036 event_sub->eventfd = 2037 eventfd_ctx_fdget(redirect_fd); 2038 2039 if (IS_ERR(event_sub->eventfd)) { 2040 err = PTR_ERR(event_sub->eventfd); 2041 event_sub->eventfd = NULL; 2042 goto err; 2043 } 2044 } 2045 2046 event_sub->cookie = cookie; 2047 event_sub->ev_file = ev_file; 2048 event_sub->filp = fd_uobj->object; 2049 /* May be needed upon cleanup the devx object/subscription */ 2050 event_sub->xa_key_level1 = key_level1; 2051 event_sub->xa_key_level2 = obj_id; 2052 INIT_LIST_HEAD(&event_sub->obj_list); 2053 } 2054 2055 /* Once all the allocations and the XA data insertions were done we 2056 * can go ahead and add all the subscriptions to the relevant lists 2057 * without concern of a failure. 2058 */ 2059 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) { 2060 struct devx_event *event; 2061 struct devx_obj_event *obj_event; 2062 2063 list_del_init(&event_sub->event_list); 2064 2065 spin_lock_irq(&ev_file->lock); 2066 list_add_tail_rcu(&event_sub->file_list, 2067 &ev_file->subscribed_events_list); 2068 spin_unlock_irq(&ev_file->lock); 2069 2070 event = xa_load(&devx_event_table->event_xa, 2071 event_sub->xa_key_level1); 2072 WARN_ON(!event); 2073 2074 if (!obj) { 2075 list_add_tail_rcu(&event_sub->xa_list, 2076 &event->unaffiliated_list); 2077 continue; 2078 } 2079 2080 obj_event = xa_load(&event->object_ids, obj_id); 2081 WARN_ON(!obj_event); 2082 list_add_tail_rcu(&event_sub->xa_list, 2083 &obj_event->obj_sub_list); 2084 list_add_tail_rcu(&event_sub->obj_list, 2085 &obj->event_sub); 2086 } 2087 2088 mutex_unlock(&devx_event_table->event_xa_lock); 2089 return 0; 2090 2091 err: 2092 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) { 2093 list_del(&event_sub->event_list); 2094 2095 subscribe_event_xa_dealloc(devx_event_table, 2096 event_sub->xa_key_level1, 2097 obj, 2098 obj_id); 2099 2100 if (event_sub->eventfd) 2101 eventfd_ctx_put(event_sub->eventfd); 2102 2103 kfree(event_sub); 2104 } 2105 2106 mutex_unlock(&devx_event_table->event_xa_lock); 2107 return err; 2108 } 2109 2110 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, 2111 struct uverbs_attr_bundle *attrs, 2112 struct devx_umem *obj) 2113 { 2114 u64 addr; 2115 size_t size; 2116 u32 access; 2117 int npages; 2118 int err; 2119 u32 page_mask; 2120 2121 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) || 2122 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN)) 2123 return -EFAULT; 2124 2125 err = uverbs_get_flags32(&access, attrs, 2126 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 2127 IB_ACCESS_LOCAL_WRITE | 2128 IB_ACCESS_REMOTE_WRITE | 2129 IB_ACCESS_REMOTE_READ); 2130 if (err) 2131 return err; 2132 2133 err = ib_check_mr_access(access); 2134 if (err) 2135 return err; 2136 2137 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access); 2138 if (IS_ERR(obj->umem)) 2139 return PTR_ERR(obj->umem); 2140 2141 mlx5_ib_cont_pages(obj->umem, obj->umem->address, 2142 MLX5_MKEY_PAGE_SHIFT_MASK, &npages, 2143 &obj->page_shift, &obj->ncont, NULL); 2144 2145 if (!npages) { 2146 ib_umem_release(obj->umem); 2147 return -EINVAL; 2148 } 2149 2150 page_mask = (1 << obj->page_shift) - 1; 2151 obj->page_offset = obj->umem->address & page_mask; 2152 2153 return 0; 2154 } 2155 2156 static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs, 2157 struct devx_umem *obj, 2158 struct devx_umem_reg_cmd *cmd) 2159 { 2160 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + 2161 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont); 2162 cmd->in = uverbs_zalloc(attrs, cmd->inlen); 2163 return PTR_ERR_OR_ZERO(cmd->in); 2164 } 2165 2166 static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev, 2167 struct devx_umem *obj, 2168 struct devx_umem_reg_cmd *cmd) 2169 { 2170 void *umem; 2171 __be64 *mtt; 2172 2173 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); 2174 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); 2175 2176 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); 2177 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont); 2178 MLX5_SET(umem, umem, log_page_size, obj->page_shift - 2179 MLX5_ADAPTER_PAGE_SHIFT); 2180 MLX5_SET(umem, umem, page_offset, obj->page_offset); 2181 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt, 2182 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | 2183 MLX5_IB_MTT_READ); 2184 } 2185 2186 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)( 2187 struct uverbs_attr_bundle *attrs) 2188 { 2189 struct devx_umem_reg_cmd cmd; 2190 struct devx_umem *obj; 2191 struct ib_uobject *uobj = uverbs_attr_get_uobject( 2192 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE); 2193 u32 obj_id; 2194 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( 2195 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2196 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 2197 int err; 2198 2199 if (!c->devx_uid) 2200 return -EINVAL; 2201 2202 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL); 2203 if (!obj) 2204 return -ENOMEM; 2205 2206 err = devx_umem_get(dev, &c->ibucontext, attrs, obj); 2207 if (err) 2208 goto err_obj_free; 2209 2210 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd); 2211 if (err) 2212 goto err_umem_release; 2213 2214 devx_umem_reg_cmd_build(dev, obj, &cmd); 2215 2216 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); 2217 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, 2218 sizeof(cmd.out)); 2219 if (err) 2220 goto err_umem_release; 2221 2222 obj->mdev = dev->mdev; 2223 uobj->object = obj; 2224 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); 2225 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id)); 2226 if (err) 2227 goto err_umem_destroy; 2228 2229 return 0; 2230 2231 err_umem_destroy: 2232 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out)); 2233 err_umem_release: 2234 ib_umem_release(obj->umem); 2235 err_obj_free: 2236 kfree(obj); 2237 return err; 2238 } 2239 2240 static int devx_umem_cleanup(struct ib_uobject *uobject, 2241 enum rdma_remove_reason why, 2242 struct uverbs_attr_bundle *attrs) 2243 { 2244 struct devx_umem *obj = uobject->object; 2245 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 2246 int err; 2247 2248 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 2249 if (ib_is_destroy_retryable(err, why, uobject)) 2250 return err; 2251 2252 ib_umem_release(obj->umem); 2253 kfree(obj); 2254 return 0; 2255 } 2256 2257 static bool is_unaffiliated_event(struct mlx5_core_dev *dev, 2258 unsigned long event_type) 2259 { 2260 __be64 *unaff_events; 2261 int mask_entry; 2262 int mask_bit; 2263 2264 if (!MLX5_CAP_GEN(dev, event_cap)) 2265 return is_legacy_unaffiliated_event_num(event_type); 2266 2267 unaff_events = MLX5_CAP_DEV_EVENT(dev, 2268 user_unaffiliated_events); 2269 WARN_ON(event_type > MAX_SUPP_EVENT_NUM); 2270 2271 mask_entry = event_type / 64; 2272 mask_bit = event_type % 64; 2273 2274 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit))) 2275 return false; 2276 2277 return true; 2278 } 2279 2280 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data) 2281 { 2282 struct mlx5_eqe *eqe = data; 2283 u32 obj_id = 0; 2284 2285 switch (event_type) { 2286 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 2287 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 2288 case MLX5_EVENT_TYPE_PATH_MIG: 2289 case MLX5_EVENT_TYPE_COMM_EST: 2290 case MLX5_EVENT_TYPE_SQ_DRAINED: 2291 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 2292 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 2293 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 2294 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 2295 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 2296 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 2297 break; 2298 case MLX5_EVENT_TYPE_XRQ_ERROR: 2299 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; 2300 break; 2301 case MLX5_EVENT_TYPE_DCT_DRAINED: 2302 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: 2303 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; 2304 break; 2305 case MLX5_EVENT_TYPE_CQ_ERROR: 2306 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 2307 break; 2308 default: 2309 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); 2310 break; 2311 } 2312 2313 return obj_id; 2314 } 2315 2316 static int deliver_event(struct devx_event_subscription *event_sub, 2317 const void *data) 2318 { 2319 struct devx_async_event_file *ev_file; 2320 struct devx_async_event_data *event_data; 2321 unsigned long flags; 2322 2323 ev_file = event_sub->ev_file; 2324 2325 if (ev_file->omit_data) { 2326 spin_lock_irqsave(&ev_file->lock, flags); 2327 if (!list_empty(&event_sub->event_list)) { 2328 spin_unlock_irqrestore(&ev_file->lock, flags); 2329 return 0; 2330 } 2331 2332 list_add_tail(&event_sub->event_list, &ev_file->event_list); 2333 spin_unlock_irqrestore(&ev_file->lock, flags); 2334 wake_up_interruptible(&ev_file->poll_wait); 2335 return 0; 2336 } 2337 2338 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe), 2339 GFP_ATOMIC); 2340 if (!event_data) { 2341 spin_lock_irqsave(&ev_file->lock, flags); 2342 ev_file->is_overflow_err = 1; 2343 spin_unlock_irqrestore(&ev_file->lock, flags); 2344 return -ENOMEM; 2345 } 2346 2347 event_data->hdr.cookie = event_sub->cookie; 2348 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); 2349 2350 spin_lock_irqsave(&ev_file->lock, flags); 2351 list_add_tail(&event_data->list, &ev_file->event_list); 2352 spin_unlock_irqrestore(&ev_file->lock, flags); 2353 wake_up_interruptible(&ev_file->poll_wait); 2354 2355 return 0; 2356 } 2357 2358 static void dispatch_event_fd(struct list_head *fd_list, 2359 const void *data) 2360 { 2361 struct devx_event_subscription *item; 2362 2363 list_for_each_entry_rcu(item, fd_list, xa_list) { 2364 if (!get_file_rcu(item->filp)) 2365 continue; 2366 2367 if (item->eventfd) { 2368 eventfd_signal(item->eventfd, 1); 2369 fput(item->filp); 2370 continue; 2371 } 2372 2373 deliver_event(item, data); 2374 fput(item->filp); 2375 } 2376 } 2377 2378 static int devx_event_notifier(struct notifier_block *nb, 2379 unsigned long event_type, void *data) 2380 { 2381 struct mlx5_devx_event_table *table; 2382 struct mlx5_ib_dev *dev; 2383 struct devx_event *event; 2384 struct devx_obj_event *obj_event; 2385 u16 obj_type = 0; 2386 bool is_unaffiliated; 2387 u32 obj_id; 2388 2389 /* Explicit filtering to kernel events which may occur frequently */ 2390 if (event_type == MLX5_EVENT_TYPE_CMD || 2391 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST) 2392 return NOTIFY_OK; 2393 2394 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb); 2395 dev = container_of(table, struct mlx5_ib_dev, devx_event_table); 2396 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); 2397 2398 if (!is_unaffiliated) 2399 obj_type = get_event_obj_type(event_type, data); 2400 2401 rcu_read_lock(); 2402 event = xa_load(&table->event_xa, event_type | (obj_type << 16)); 2403 if (!event) { 2404 rcu_read_unlock(); 2405 return NOTIFY_DONE; 2406 } 2407 2408 if (is_unaffiliated) { 2409 dispatch_event_fd(&event->unaffiliated_list, data); 2410 rcu_read_unlock(); 2411 return NOTIFY_OK; 2412 } 2413 2414 obj_id = devx_get_obj_id_from_event(event_type, data); 2415 obj_event = xa_load(&event->object_ids, obj_id); 2416 if (!obj_event) { 2417 rcu_read_unlock(); 2418 return NOTIFY_DONE; 2419 } 2420 2421 dispatch_event_fd(&obj_event->obj_sub_list, data); 2422 2423 rcu_read_unlock(); 2424 return NOTIFY_OK; 2425 } 2426 2427 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) 2428 { 2429 struct mlx5_devx_event_table *table = &dev->devx_event_table; 2430 2431 xa_init(&table->event_xa); 2432 mutex_init(&table->event_xa_lock); 2433 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); 2434 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); 2435 } 2436 2437 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) 2438 { 2439 struct mlx5_devx_event_table *table = &dev->devx_event_table; 2440 struct devx_event_subscription *sub, *tmp; 2441 struct devx_event *event; 2442 void *entry; 2443 unsigned long id; 2444 2445 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); 2446 mutex_lock(&dev->devx_event_table.event_xa_lock); 2447 xa_for_each(&table->event_xa, id, entry) { 2448 event = entry; 2449 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list, 2450 xa_list) 2451 devx_cleanup_subscription(dev, sub); 2452 kfree(entry); 2453 } 2454 mutex_unlock(&dev->devx_event_table.event_xa_lock); 2455 xa_destroy(&table->event_xa); 2456 } 2457 2458 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf, 2459 size_t count, loff_t *pos) 2460 { 2461 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; 2462 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; 2463 struct devx_async_data *event; 2464 int ret = 0; 2465 size_t eventsz; 2466 2467 spin_lock_irq(&ev_queue->lock); 2468 2469 while (list_empty(&ev_queue->event_list)) { 2470 spin_unlock_irq(&ev_queue->lock); 2471 2472 if (filp->f_flags & O_NONBLOCK) 2473 return -EAGAIN; 2474 2475 if (wait_event_interruptible( 2476 ev_queue->poll_wait, 2477 (!list_empty(&ev_queue->event_list) || 2478 ev_queue->is_destroyed))) { 2479 return -ERESTARTSYS; 2480 } 2481 2482 if (list_empty(&ev_queue->event_list) && 2483 ev_queue->is_destroyed) 2484 return -EIO; 2485 2486 spin_lock_irq(&ev_queue->lock); 2487 } 2488 2489 event = list_entry(ev_queue->event_list.next, 2490 struct devx_async_data, list); 2491 eventsz = event->cmd_out_len + 2492 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr); 2493 2494 if (eventsz > count) { 2495 spin_unlock_irq(&ev_queue->lock); 2496 return -ENOSPC; 2497 } 2498 2499 list_del(ev_queue->event_list.next); 2500 spin_unlock_irq(&ev_queue->lock); 2501 2502 if (copy_to_user(buf, &event->hdr, eventsz)) 2503 ret = -EFAULT; 2504 else 2505 ret = eventsz; 2506 2507 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); 2508 kvfree(event); 2509 return ret; 2510 } 2511 2512 static int devx_async_cmd_event_close(struct inode *inode, struct file *filp) 2513 { 2514 struct ib_uobject *uobj = filp->private_data; 2515 struct devx_async_cmd_event_file *comp_ev_file = container_of( 2516 uobj, struct devx_async_cmd_event_file, uobj); 2517 struct devx_async_data *entry, *tmp; 2518 2519 spin_lock_irq(&comp_ev_file->ev_queue.lock); 2520 list_for_each_entry_safe(entry, tmp, 2521 &comp_ev_file->ev_queue.event_list, list) 2522 kvfree(entry); 2523 spin_unlock_irq(&comp_ev_file->ev_queue.lock); 2524 2525 uverbs_close_fd(filp); 2526 return 0; 2527 } 2528 2529 static __poll_t devx_async_cmd_event_poll(struct file *filp, 2530 struct poll_table_struct *wait) 2531 { 2532 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; 2533 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; 2534 __poll_t pollflags = 0; 2535 2536 poll_wait(filp, &ev_queue->poll_wait, wait); 2537 2538 spin_lock_irq(&ev_queue->lock); 2539 if (ev_queue->is_destroyed) 2540 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 2541 else if (!list_empty(&ev_queue->event_list)) 2542 pollflags = EPOLLIN | EPOLLRDNORM; 2543 spin_unlock_irq(&ev_queue->lock); 2544 2545 return pollflags; 2546 } 2547 2548 static const struct file_operations devx_async_cmd_event_fops = { 2549 .owner = THIS_MODULE, 2550 .read = devx_async_cmd_event_read, 2551 .poll = devx_async_cmd_event_poll, 2552 .release = devx_async_cmd_event_close, 2553 .llseek = no_llseek, 2554 }; 2555 2556 static ssize_t devx_async_event_read(struct file *filp, char __user *buf, 2557 size_t count, loff_t *pos) 2558 { 2559 struct devx_async_event_file *ev_file = filp->private_data; 2560 struct devx_event_subscription *event_sub; 2561 struct devx_async_event_data *uninitialized_var(event); 2562 int ret = 0; 2563 size_t eventsz; 2564 bool omit_data; 2565 void *event_data; 2566 2567 omit_data = ev_file->omit_data; 2568 2569 spin_lock_irq(&ev_file->lock); 2570 2571 if (ev_file->is_overflow_err) { 2572 ev_file->is_overflow_err = 0; 2573 spin_unlock_irq(&ev_file->lock); 2574 return -EOVERFLOW; 2575 } 2576 2577 if (ev_file->is_destroyed) { 2578 spin_unlock_irq(&ev_file->lock); 2579 return -EIO; 2580 } 2581 2582 while (list_empty(&ev_file->event_list)) { 2583 spin_unlock_irq(&ev_file->lock); 2584 2585 if (filp->f_flags & O_NONBLOCK) 2586 return -EAGAIN; 2587 2588 if (wait_event_interruptible(ev_file->poll_wait, 2589 (!list_empty(&ev_file->event_list) || 2590 ev_file->is_destroyed))) { 2591 return -ERESTARTSYS; 2592 } 2593 2594 spin_lock_irq(&ev_file->lock); 2595 if (ev_file->is_destroyed) { 2596 spin_unlock_irq(&ev_file->lock); 2597 return -EIO; 2598 } 2599 } 2600 2601 if (omit_data) { 2602 event_sub = list_first_entry(&ev_file->event_list, 2603 struct devx_event_subscription, 2604 event_list); 2605 eventsz = sizeof(event_sub->cookie); 2606 event_data = &event_sub->cookie; 2607 } else { 2608 event = list_first_entry(&ev_file->event_list, 2609 struct devx_async_event_data, list); 2610 eventsz = sizeof(struct mlx5_eqe) + 2611 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr); 2612 event_data = &event->hdr; 2613 } 2614 2615 if (eventsz > count) { 2616 spin_unlock_irq(&ev_file->lock); 2617 return -EINVAL; 2618 } 2619 2620 if (omit_data) 2621 list_del_init(&event_sub->event_list); 2622 else 2623 list_del(&event->list); 2624 2625 spin_unlock_irq(&ev_file->lock); 2626 2627 if (copy_to_user(buf, event_data, eventsz)) 2628 /* This points to an application issue, not a kernel concern */ 2629 ret = -EFAULT; 2630 else 2631 ret = eventsz; 2632 2633 if (!omit_data) 2634 kfree(event); 2635 return ret; 2636 } 2637 2638 static __poll_t devx_async_event_poll(struct file *filp, 2639 struct poll_table_struct *wait) 2640 { 2641 struct devx_async_event_file *ev_file = filp->private_data; 2642 __poll_t pollflags = 0; 2643 2644 poll_wait(filp, &ev_file->poll_wait, wait); 2645 2646 spin_lock_irq(&ev_file->lock); 2647 if (ev_file->is_destroyed) 2648 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 2649 else if (!list_empty(&ev_file->event_list)) 2650 pollflags = EPOLLIN | EPOLLRDNORM; 2651 spin_unlock_irq(&ev_file->lock); 2652 2653 return pollflags; 2654 } 2655 2656 static int devx_async_event_close(struct inode *inode, struct file *filp) 2657 { 2658 struct devx_async_event_file *ev_file = filp->private_data; 2659 struct devx_event_subscription *event_sub, *event_sub_tmp; 2660 struct devx_async_event_data *entry, *tmp; 2661 struct mlx5_ib_dev *dev = ev_file->dev; 2662 2663 mutex_lock(&dev->devx_event_table.event_xa_lock); 2664 /* delete the subscriptions which are related to this FD */ 2665 list_for_each_entry_safe(event_sub, event_sub_tmp, 2666 &ev_file->subscribed_events_list, file_list) { 2667 devx_cleanup_subscription(dev, event_sub); 2668 if (event_sub->eventfd) 2669 eventfd_ctx_put(event_sub->eventfd); 2670 2671 list_del_rcu(&event_sub->file_list); 2672 /* subscription may not be used by the read API any more */ 2673 kfree_rcu(event_sub, rcu); 2674 } 2675 2676 mutex_unlock(&dev->devx_event_table.event_xa_lock); 2677 2678 /* free the pending events allocation */ 2679 if (!ev_file->omit_data) { 2680 spin_lock_irq(&ev_file->lock); 2681 list_for_each_entry_safe(entry, tmp, 2682 &ev_file->event_list, list) 2683 kfree(entry); /* read can't come any more */ 2684 spin_unlock_irq(&ev_file->lock); 2685 } 2686 2687 uverbs_close_fd(filp); 2688 put_device(&dev->ib_dev.dev); 2689 return 0; 2690 } 2691 2692 static const struct file_operations devx_async_event_fops = { 2693 .owner = THIS_MODULE, 2694 .read = devx_async_event_read, 2695 .poll = devx_async_event_poll, 2696 .release = devx_async_event_close, 2697 .llseek = no_llseek, 2698 }; 2699 2700 static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj, 2701 enum rdma_remove_reason why) 2702 { 2703 struct devx_async_cmd_event_file *comp_ev_file = 2704 container_of(uobj, struct devx_async_cmd_event_file, 2705 uobj); 2706 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; 2707 2708 spin_lock_irq(&ev_queue->lock); 2709 ev_queue->is_destroyed = 1; 2710 spin_unlock_irq(&ev_queue->lock); 2711 2712 if (why == RDMA_REMOVE_DRIVER_REMOVE) 2713 wake_up_interruptible(&ev_queue->poll_wait); 2714 2715 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); 2716 return 0; 2717 }; 2718 2719 static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj, 2720 enum rdma_remove_reason why) 2721 { 2722 struct devx_async_event_file *ev_file = 2723 container_of(uobj, struct devx_async_event_file, 2724 uobj); 2725 2726 spin_lock_irq(&ev_file->lock); 2727 ev_file->is_destroyed = 1; 2728 spin_unlock_irq(&ev_file->lock); 2729 2730 wake_up_interruptible(&ev_file->poll_wait); 2731 return 0; 2732 }; 2733 2734 DECLARE_UVERBS_NAMED_METHOD( 2735 MLX5_IB_METHOD_DEVX_UMEM_REG, 2736 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE, 2737 MLX5_IB_OBJECT_DEVX_UMEM, 2738 UVERBS_ACCESS_NEW, 2739 UA_MANDATORY), 2740 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, 2741 UVERBS_ATTR_TYPE(u64), 2742 UA_MANDATORY), 2743 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, 2744 UVERBS_ATTR_TYPE(u64), 2745 UA_MANDATORY), 2746 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 2747 enum ib_access_flags), 2748 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, 2749 UVERBS_ATTR_TYPE(u32), 2750 UA_MANDATORY)); 2751 2752 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2753 MLX5_IB_METHOD_DEVX_UMEM_DEREG, 2754 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE, 2755 MLX5_IB_OBJECT_DEVX_UMEM, 2756 UVERBS_ACCESS_DESTROY, 2757 UA_MANDATORY)); 2758 2759 DECLARE_UVERBS_NAMED_METHOD( 2760 MLX5_IB_METHOD_DEVX_QUERY_EQN, 2761 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC, 2762 UVERBS_ATTR_TYPE(u32), 2763 UA_MANDATORY), 2764 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, 2765 UVERBS_ATTR_TYPE(u32), 2766 UA_MANDATORY)); 2767 2768 DECLARE_UVERBS_NAMED_METHOD( 2769 MLX5_IB_METHOD_DEVX_QUERY_UAR, 2770 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX, 2771 UVERBS_ATTR_TYPE(u32), 2772 UA_MANDATORY), 2773 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, 2774 UVERBS_ATTR_TYPE(u32), 2775 UA_MANDATORY)); 2776 2777 DECLARE_UVERBS_NAMED_METHOD( 2778 MLX5_IB_METHOD_DEVX_OTHER, 2779 UVERBS_ATTR_PTR_IN( 2780 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN, 2781 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 2782 UA_MANDATORY, 2783 UA_ALLOC_AND_COPY), 2784 UVERBS_ATTR_PTR_OUT( 2785 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, 2786 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 2787 UA_MANDATORY)); 2788 2789 DECLARE_UVERBS_NAMED_METHOD( 2790 MLX5_IB_METHOD_DEVX_OBJ_CREATE, 2791 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE, 2792 MLX5_IB_OBJECT_DEVX_OBJ, 2793 UVERBS_ACCESS_NEW, 2794 UA_MANDATORY), 2795 UVERBS_ATTR_PTR_IN( 2796 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN, 2797 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 2798 UA_MANDATORY, 2799 UA_ALLOC_AND_COPY), 2800 UVERBS_ATTR_PTR_OUT( 2801 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, 2802 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 2803 UA_MANDATORY)); 2804 2805 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 2806 MLX5_IB_METHOD_DEVX_OBJ_DESTROY, 2807 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE, 2808 MLX5_IB_OBJECT_DEVX_OBJ, 2809 UVERBS_ACCESS_DESTROY, 2810 UA_MANDATORY)); 2811 2812 DECLARE_UVERBS_NAMED_METHOD( 2813 MLX5_IB_METHOD_DEVX_OBJ_MODIFY, 2814 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, 2815 UVERBS_IDR_ANY_OBJECT, 2816 UVERBS_ACCESS_WRITE, 2817 UA_MANDATORY), 2818 UVERBS_ATTR_PTR_IN( 2819 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, 2820 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 2821 UA_MANDATORY, 2822 UA_ALLOC_AND_COPY), 2823 UVERBS_ATTR_PTR_OUT( 2824 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, 2825 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 2826 UA_MANDATORY)); 2827 2828 DECLARE_UVERBS_NAMED_METHOD( 2829 MLX5_IB_METHOD_DEVX_OBJ_QUERY, 2830 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, 2831 UVERBS_IDR_ANY_OBJECT, 2832 UVERBS_ACCESS_READ, 2833 UA_MANDATORY), 2834 UVERBS_ATTR_PTR_IN( 2835 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, 2836 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 2837 UA_MANDATORY, 2838 UA_ALLOC_AND_COPY), 2839 UVERBS_ATTR_PTR_OUT( 2840 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, 2841 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), 2842 UA_MANDATORY)); 2843 2844 DECLARE_UVERBS_NAMED_METHOD( 2845 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY, 2846 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, 2847 UVERBS_IDR_ANY_OBJECT, 2848 UVERBS_ACCESS_READ, 2849 UA_MANDATORY), 2850 UVERBS_ATTR_PTR_IN( 2851 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, 2852 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), 2853 UA_MANDATORY, 2854 UA_ALLOC_AND_COPY), 2855 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN, 2856 u16, UA_MANDATORY), 2857 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD, 2858 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, 2859 UVERBS_ACCESS_READ, 2860 UA_MANDATORY), 2861 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID, 2862 UVERBS_ATTR_TYPE(u64), 2863 UA_MANDATORY)); 2864 2865 DECLARE_UVERBS_NAMED_METHOD( 2866 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT, 2867 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE, 2868 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, 2869 UVERBS_ACCESS_READ, 2870 UA_MANDATORY), 2871 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE, 2872 MLX5_IB_OBJECT_DEVX_OBJ, 2873 UVERBS_ACCESS_READ, 2874 UA_OPTIONAL), 2875 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST, 2876 UVERBS_ATTR_MIN_SIZE(sizeof(u16)), 2877 UA_MANDATORY, 2878 UA_ALLOC_AND_COPY), 2879 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE, 2880 UVERBS_ATTR_TYPE(u64), 2881 UA_OPTIONAL), 2882 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM, 2883 UVERBS_ATTR_TYPE(u32), 2884 UA_OPTIONAL)); 2885 2886 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX, 2887 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER), 2888 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR), 2889 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN), 2890 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)); 2891 2892 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ, 2893 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup), 2894 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE), 2895 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY), 2896 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY), 2897 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY), 2898 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)); 2899 2900 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM, 2901 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup), 2902 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG), 2903 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG)); 2904 2905 2906 DECLARE_UVERBS_NAMED_METHOD( 2907 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC, 2908 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE, 2909 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, 2910 UVERBS_ACCESS_NEW, 2911 UA_MANDATORY)); 2912 2913 DECLARE_UVERBS_NAMED_OBJECT( 2914 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, 2915 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file), 2916 devx_hot_unplug_async_cmd_event_file, 2917 &devx_async_cmd_event_fops, "[devx_async_cmd]", 2918 O_RDONLY), 2919 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)); 2920 2921 DECLARE_UVERBS_NAMED_METHOD( 2922 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC, 2923 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE, 2924 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, 2925 UVERBS_ACCESS_NEW, 2926 UA_MANDATORY), 2927 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS, 2928 enum mlx5_ib_uapi_devx_create_event_channel_flags, 2929 UA_MANDATORY)); 2930 2931 DECLARE_UVERBS_NAMED_OBJECT( 2932 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, 2933 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file), 2934 devx_hot_unplug_async_event_file, 2935 &devx_async_event_fops, "[devx_async_event]", 2936 O_RDONLY), 2937 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)); 2938 2939 static bool devx_is_supported(struct ib_device *device) 2940 { 2941 struct mlx5_ib_dev *dev = to_mdev(device); 2942 2943 return MLX5_CAP_GEN(dev->mdev, log_max_uctx); 2944 } 2945 2946 const struct uapi_definition mlx5_ib_devx_defs[] = { 2947 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2948 MLX5_IB_OBJECT_DEVX, 2949 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), 2950 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2951 MLX5_IB_OBJECT_DEVX_OBJ, 2952 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), 2953 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2954 MLX5_IB_OBJECT_DEVX_UMEM, 2955 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), 2956 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2957 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, 2958 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), 2959 UAPI_DEF_CHAIN_OBJ_TREE_NAMED( 2960 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, 2961 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), 2962 {}, 2963 }; 2964