Lines Matching +full:sub +full:- +full:mailboxes

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
43 struct list_head list; /* headed in ev_file->event_list */
60 struct list_head file_list; /* headed in ev_file->
63 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
67 struct list_head event_list; /* headed in ev_file->event_list or in
122 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) in mlx5_ib_devx_create()
123 return -EINVAL; in mlx5_ib_devx_create()
127 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX)) in mlx5_ib_devx_create()
130 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & in mlx5_ib_devx_create()
137 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_create()
153 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_destroy()
210 opcode = (obj->obj_id >> 32) & 0xffff; in get_dec_obj_type()
217 return (obj->obj_id >> 48); in get_dec_obj_type()
264 return eqe->data.qp_srq.type; in get_event_obj_type()
272 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); in get_event_obj_type()
594 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_is_valid_obj_id()
603 to_mcq(uobj->object)->mcq.cqn) == in devx_is_valid_obj_id()
608 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
611 switch (srq->common.res) { in devx_is_valid_obj_id()
619 if (!dev->mdev->issi) in devx_is_valid_obj_id()
626 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()
632 struct mlx5_ib_qp *qp = to_mqp(uobj->object); in devx_is_valid_obj_id()
634 if (qp->type == IB_QPT_RAW_PACKET || in devx_is_valid_obj_id()
635 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { in devx_is_valid_obj_id()
637 &qp->raw_packet_qp; in devx_is_valid_obj_id()
638 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; in devx_is_valid_obj_id()
639 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; in devx_is_valid_obj_id()
642 rq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
644 sq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
646 rq->tirn) == obj_id || in devx_is_valid_obj_id()
648 sq->tisn) == obj_id); in devx_is_valid_obj_id()
651 if (qp->type == MLX5_IB_QPT_DCT) in devx_is_valid_obj_id()
653 qp->dct.mdct.mqp.qpn) == obj_id; in devx_is_valid_obj_id()
655 qp->ibqp.qp_num) == obj_id; in devx_is_valid_obj_id()
660 to_mrwq(uobj->object)->core_qp.qpn) == in devx_is_valid_obj_id()
665 to_mrwq_ind_table(uobj->object)->rqtn) == in devx_is_valid_obj_id()
671 struct devx_obj *devx_uobj = uobj->object; in devx_is_valid_obj_id()
674 devx_uobj->flow_counter_bulk_size) { in devx_is_valid_obj_id()
677 end = devx_uobj->obj_id + in devx_is_valid_obj_id()
678 devx_uobj->flow_counter_bulk_size; in devx_is_valid_obj_id()
679 return devx_uobj->obj_id <= obj_id && end > obj_id; in devx_is_valid_obj_id()
682 return devx_uobj->obj_id == obj_id; in devx_is_valid_obj_id()
936 if (c->devx_uid) in devx_get_uid()
937 return c->devx_uid; in devx_get_uid()
939 dev = to_mdev(c->ibucontext.device); in devx_get_uid()
940 if (dev->devx_whitelist_uid) in devx_get_uid()
941 return dev->devx_whitelist_uid; in devx_get_uid()
943 return -EOPNOTSUPP; in devx_get_uid()
946 if (!c->devx_uid) in devx_get_uid()
947 return -EINVAL; in devx_get_uid()
949 return c->devx_uid; in devx_get_uid()
957 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && in devx_is_general_cmd()
998 return -EFAULT; in UVERBS_HANDLER()
1003 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1005 err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn); in UVERBS_HANDLER()
1011 return -EFAULT; in UVERBS_HANDLER()
1027 * mailboxes (except tagging them with UID), we expose to the user its UAR
1047 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1051 return -EFAULT; in UVERBS_HANDLER()
1053 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); in UVERBS_HANDLER()
1059 return -EFAULT; in UVERBS_HANDLER()
1080 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1088 return -EINVAL; in UVERBS_HANDLER()
1095 err = mlx5_cmd_do(dev->mdev, cmd_in, in UVERBS_HANDLER()
1098 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1313 struct mlx5_ib_mkey *mkey = &obj->mkey; in devx_handle_mkey_indirect()
1319 mkey->key = mlx5_idx_to_mkey( in devx_handle_mkey_indirect()
1321 mkey->type = MLX5_MKEY_INDIRECT_DEVX; in devx_handle_mkey_indirect()
1322 mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); in devx_handle_mkey_indirect()
1323 init_waitqueue_head(&mkey->wait); in devx_handle_mkey_indirect()
1339 return -EINVAL; in devx_handle_mkey_create()
1349 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; in devx_handle_mkey_create()
1358 struct devx_event_subscription *sub) in devx_cleanup_subscription() argument
1363 if (sub->is_cleaned) in devx_cleanup_subscription()
1366 sub->is_cleaned = 1; in devx_cleanup_subscription()
1367 list_del_rcu(&sub->xa_list); in devx_cleanup_subscription()
1369 if (list_empty(&sub->obj_list)) in devx_cleanup_subscription()
1372 list_del_rcu(&sub->obj_list); in devx_cleanup_subscription()
1374 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription()
1375 sub->xa_key_level1); in devx_cleanup_subscription()
1378 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription()
1379 if (list_empty(&xa_val_level2->obj_sub_list)) { in devx_cleanup_subscription()
1380 xa_erase(&event->object_ids, in devx_cleanup_subscription()
1381 sub->xa_key_level2); in devx_cleanup_subscription()
1392 struct devx_obj *obj = uobject->object; in devx_obj_cleanup()
1397 dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_obj_cleanup()
1398 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY && in devx_obj_cleanup()
1399 xa_erase(&obj->ib_dev->odp_mkeys, in devx_obj_cleanup()
1400 mlx5_base_mkey(obj->mkey.key))) in devx_obj_cleanup()
1406 mlx5r_deref_wait_odp_mkey(&obj->mkey); in devx_obj_cleanup()
1408 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in devx_obj_cleanup()
1409 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in devx_obj_cleanup()
1410 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in devx_obj_cleanup()
1411 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in devx_obj_cleanup()
1413 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, in devx_obj_cleanup()
1414 obj->dinlen, out, sizeof(out)); in devx_obj_cleanup()
1418 devx_event_table = &dev->devx_event_table; in devx_obj_cleanup()
1420 mutex_lock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1421 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) in devx_obj_cleanup()
1423 mutex_unlock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1435 u32 obj_id = mcq->cqn; in devx_cq_comp()
1437 table = &obj->ib_dev->devx_event_table; in devx_cq_comp()
1439 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp()
1443 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp()
1447 dispatch_event_fd(&obj_event->obj_sub_list, eqe); in devx_cq_comp()
1454 if (!MLX5_CAP_GEN(dev->mdev, apu) || in is_apu_cq()
1473 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1474 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1484 return -EINVAL; in UVERBS_HANDLER()
1491 return -EINVAL; in UVERBS_HANDLER()
1499 return -ENOMEM; in UVERBS_HANDLER()
1511 obj->flags |= DEVX_OBJ_FLAGS_DCT; in UVERBS_HANDLER()
1512 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, in UVERBS_HANDLER()
1516 obj->flags |= DEVX_OBJ_FLAGS_CQ; in UVERBS_HANDLER()
1517 obj->core_cq.comp = devx_cq_comp; in UVERBS_HANDLER()
1518 err = mlx5_create_cq(dev->mdev, &obj->core_cq, in UVERBS_HANDLER()
1522 err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len, in UVERBS_HANDLER()
1526 if (err == -EREMOTEIO) in UVERBS_HANDLER()
1544 obj->flow_counter_bulk_size = bulk; in UVERBS_HANDLER()
1547 uobj->object = obj; in UVERBS_HANDLER()
1548 INIT_LIST_HEAD(&obj->event_sub); in UVERBS_HANDLER()
1549 obj->ib_dev = dev; in UVERBS_HANDLER()
1550 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, in UVERBS_HANDLER()
1552 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); in UVERBS_HANDLER()
1560 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); in UVERBS_HANDLER()
1562 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { in UVERBS_HANDLER()
1570 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in UVERBS_HANDLER()
1571 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in UVERBS_HANDLER()
1572 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in UVERBS_HANDLER()
1573 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in UVERBS_HANDLER()
1575 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, in UVERBS_HANDLER()
1591 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1592 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1598 return -EINVAL; in UVERBS_HANDLER()
1605 return -EINVAL; in UVERBS_HANDLER()
1608 return -EINVAL; in UVERBS_HANDLER()
1617 err = mlx5_cmd_do(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1620 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1638 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1642 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1645 return -EINVAL; in UVERBS_HANDLER()
1652 return -EINVAL; in UVERBS_HANDLER()
1655 return -EINVAL; in UVERBS_HANDLER()
1662 err = mlx5_cmd_do(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1665 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1690 spin_lock_init(&ev_queue->lock); in devx_init_event_queue()
1691 INIT_LIST_HEAD(&ev_queue->event_list); in devx_init_event_queue()
1692 init_waitqueue_head(&ev_queue->poll_wait); in devx_init_event_queue()
1693 atomic_set(&ev_queue->bytes_in_use, 0); in devx_init_event_queue()
1694 ev_queue->is_destroyed = 0; in devx_init_event_queue()
1704 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); in UVERBS_HANDLER()
1708 devx_init_event_queue(&ev_file->ev_queue); in UVERBS_HANDLER()
1709 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); in UVERBS_HANDLER()
1720 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1721 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1734 spin_lock_init(&ev_file->lock); in UVERBS_HANDLER()
1735 INIT_LIST_HEAD(&ev_file->event_list); in UVERBS_HANDLER()
1736 init_waitqueue_head(&ev_file->poll_wait); in UVERBS_HANDLER()
1738 ev_file->omit_data = 1; in UVERBS_HANDLER()
1739 INIT_LIST_HEAD(&ev_file->subscribed_events_list); in UVERBS_HANDLER()
1740 ev_file->dev = dev; in UVERBS_HANDLER()
1741 get_device(&dev->ib_dev.dev); in UVERBS_HANDLER()
1749 struct devx_async_cmd_event_file *ev_file = async_data->ev_file; in devx_query_callback()
1750 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue; in devx_query_callback()
1758 spin_lock_irqsave(&ev_queue->lock, flags); in devx_query_callback()
1759 list_add_tail(&async_data->list, &ev_queue->event_list); in devx_query_callback()
1760 spin_unlock_irqrestore(&ev_queue->lock, flags); in devx_query_callback()
1762 wake_up_interruptible(&ev_queue->poll_wait); in devx_query_callback()
1777 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1781 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1786 return -EINVAL; in UVERBS_HANDLER()
1793 return -EINVAL; in UVERBS_HANDLER()
1801 return -EINVAL; in UVERBS_HANDLER()
1811 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > in UVERBS_HANDLER()
1813 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1814 return -EAGAIN; in UVERBS_HANDLER()
1820 err = -ENOMEM; in UVERBS_HANDLER()
1824 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, in UVERBS_HANDLER()
1829 async_data->cmd_out_len = cmd_out_len; in UVERBS_HANDLER()
1830 async_data->mdev = mdev; in UVERBS_HANDLER()
1831 async_data->ev_file = ev_file; in UVERBS_HANDLER()
1834 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, in UVERBS_HANDLER()
1837 async_data->hdr.out_data, in UVERBS_HANDLER()
1838 async_data->cmd_out_len, in UVERBS_HANDLER()
1839 devx_query_callback, &async_data->cb_work); in UVERBS_HANDLER()
1849 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1866 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc()
1869 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc()
1871 if (list_empty(&xa_val_level2->obj_sub_list)) { in subscribe_event_xa_dealloc()
1872 xa_erase(&event->object_ids, in subscribe_event_xa_dealloc()
1888 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc()
1892 return -ENOMEM; in subscribe_event_xa_alloc()
1894 INIT_LIST_HEAD(&event->unaffiliated_list); in subscribe_event_xa_alloc()
1895 xa_init(&event->object_ids); in subscribe_event_xa_alloc()
1897 err = xa_insert(&devx_event_table->event_xa, in subscribe_event_xa_alloc()
1910 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc()
1915 return -ENOMEM; in subscribe_event_xa_alloc()
1917 err = xa_insert(&event->object_ids, in subscribe_event_xa_alloc()
1925 INIT_LIST_HEAD(&obj_event->obj_sub_list); in subscribe_event_xa_alloc()
2005 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2006 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2010 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; in UVERBS_HANDLER()
2023 if (!c->devx_uid) in UVERBS_HANDLER()
2024 return -EINVAL; in UVERBS_HANDLER()
2027 obj = (struct devx_obj *)devx_uobj->object; in UVERBS_HANDLER()
2029 obj_id = get_dec_obj_id(obj->obj_id); in UVERBS_HANDLER()
2053 return -EINVAL; in UVERBS_HANDLER()
2069 return -EINVAL; in UVERBS_HANDLER()
2074 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) in UVERBS_HANDLER()
2075 return -EINVAL; in UVERBS_HANDLER()
2082 mutex_lock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2100 err = -ENOMEM; in UVERBS_HANDLER()
2104 list_add_tail(&event_sub->event_list, &sub_list); in UVERBS_HANDLER()
2105 uverbs_uobject_get(&ev_file->uobj); in UVERBS_HANDLER()
2107 event_sub->eventfd = in UVERBS_HANDLER()
2110 if (IS_ERR(event_sub->eventfd)) { in UVERBS_HANDLER()
2111 err = PTR_ERR(event_sub->eventfd); in UVERBS_HANDLER()
2112 event_sub->eventfd = NULL; in UVERBS_HANDLER()
2117 event_sub->cookie = cookie; in UVERBS_HANDLER()
2118 event_sub->ev_file = ev_file; in UVERBS_HANDLER()
2120 event_sub->xa_key_level1 = key_level1; in UVERBS_HANDLER()
2121 event_sub->xa_key_level2 = obj_id; in UVERBS_HANDLER()
2122 INIT_LIST_HEAD(&event_sub->obj_list); in UVERBS_HANDLER()
2133 list_del_init(&event_sub->event_list); in UVERBS_HANDLER()
2135 spin_lock_irq(&ev_file->lock); in UVERBS_HANDLER()
2136 list_add_tail_rcu(&event_sub->file_list, in UVERBS_HANDLER()
2137 &ev_file->subscribed_events_list); in UVERBS_HANDLER()
2138 spin_unlock_irq(&ev_file->lock); in UVERBS_HANDLER()
2140 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER()
2141 event_sub->xa_key_level1); in UVERBS_HANDLER()
2145 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2146 &event->unaffiliated_list); in UVERBS_HANDLER()
2150 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER()
2152 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2153 &obj_event->obj_sub_list); in UVERBS_HANDLER()
2154 list_add_tail_rcu(&event_sub->obj_list, in UVERBS_HANDLER()
2155 &obj->event_sub); in UVERBS_HANDLER()
2158 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2163 list_del(&event_sub->event_list); in UVERBS_HANDLER()
2166 event_sub->xa_key_level1, in UVERBS_HANDLER()
2170 if (event_sub->eventfd) in UVERBS_HANDLER()
2171 eventfd_ctx_put(event_sub->eventfd); in UVERBS_HANDLER()
2172 uverbs_uobject_put(&event_sub->ev_file->uobj); in UVERBS_HANDLER()
2176 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2190 return -EFAULT; in devx_umem_get()
2192 err = ib_check_mr_access(&dev->ib_dev, access_flags); in devx_umem_get()
2203 return -EFAULT; in devx_umem_get()
2206 &dev->ib_dev, addr, size, dmabuf_fd, access_flags); in devx_umem_get()
2209 obj->umem = &umem_dmabuf->umem; in devx_umem_get()
2211 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags); in devx_umem_get()
2212 if (IS_ERR(obj->umem)) in devx_umem_get()
2213 return PTR_ERR(obj->umem); in devx_umem_get()
2226 pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length), in devx_umem_find_best_pgsize()
2243 (umem->length % page_size) != 0) && in devx_umem_find_best_pgsize()
2281 page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap); in devx_umem_reg_cmd_alloc()
2283 return -EINVAL; in devx_umem_reg_cmd_alloc()
2285 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + in devx_umem_reg_cmd_alloc()
2287 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2288 cmd->in = uverbs_zalloc(attrs, cmd->inlen); in devx_umem_reg_cmd_alloc()
2289 if (IS_ERR(cmd->in)) in devx_umem_reg_cmd_alloc()
2290 return PTR_ERR(cmd->in); in devx_umem_reg_cmd_alloc()
2292 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_alloc()
2295 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); in devx_umem_reg_cmd_alloc()
2297 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2299 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); in devx_umem_reg_cmd_alloc()
2301 ib_umem_dma_offset(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2303 if (mlx5_umem_needs_ats(dev, obj->umem, access)) in devx_umem_reg_cmd_alloc()
2306 mlx5_ib_populate_pas(obj->umem, page_size, mtt, in devx_umem_reg_cmd_alloc()
2307 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | in devx_umem_reg_cmd_alloc()
2321 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2322 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2326 if (!c->devx_uid) in UVERBS_HANDLER()
2327 return -EINVAL; in UVERBS_HANDLER()
2340 return -ENOMEM; in UVERBS_HANDLER()
2342 err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags); in UVERBS_HANDLER()
2350 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); in UVERBS_HANDLER()
2351 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, in UVERBS_HANDLER()
2356 obj->mdev = dev->mdev; in UVERBS_HANDLER()
2357 uobj->object = obj; in UVERBS_HANDLER()
2358 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); in UVERBS_HANDLER()
2366 ib_umem_release(obj->umem); in UVERBS_HANDLER()
2376 struct devx_umem *obj = uobject->object; in devx_umem_cleanup()
2380 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); in devx_umem_cleanup()
2384 ib_umem_release(obj->umem); in devx_umem_cleanup()
2428 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in devx_get_obj_id_from_event()
2431 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; in devx_get_obj_id_from_event()
2435 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
2438 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in devx_get_obj_id_from_event()
2441 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); in devx_get_obj_id_from_event()
2455 ev_file = event_sub->ev_file; in deliver_event()
2457 if (ev_file->omit_data) { in deliver_event()
2458 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2459 if (!list_empty(&event_sub->event_list) || in deliver_event()
2460 ev_file->is_destroyed) { in deliver_event()
2461 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2465 list_add_tail(&event_sub->event_list, &ev_file->event_list); in deliver_event()
2466 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2467 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2474 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2475 ev_file->is_overflow_err = 1; in deliver_event()
2476 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2477 return -ENOMEM; in deliver_event()
2480 event_data->hdr.cookie = event_sub->cookie; in deliver_event()
2481 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); in deliver_event()
2483 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2484 if (!ev_file->is_destroyed) in deliver_event()
2485 list_add_tail(&event_data->list, &ev_file->event_list); in deliver_event()
2488 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2489 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2500 if (item->eventfd) in dispatch_event_fd()
2501 eventfd_signal(item->eventfd, 1); in dispatch_event_fd()
2525 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); in devx_event_notifier()
2531 event = xa_load(&table->event_xa, event_type | (obj_type << 16)); in devx_event_notifier()
2538 dispatch_event_fd(&event->unaffiliated_list, data); in devx_event_notifier()
2544 obj_event = xa_load(&event->object_ids, obj_id); in devx_event_notifier()
2550 dispatch_event_fd(&obj_event->obj_sub_list, data); in devx_event_notifier()
2558 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_init()
2563 dev->devx_whitelist_uid = uid; in mlx5_ib_devx_init()
2564 xa_init(&table->event_xa); in mlx5_ib_devx_init()
2565 mutex_init(&table->event_xa_lock); in mlx5_ib_devx_init()
2566 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); in mlx5_ib_devx_init()
2567 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); in mlx5_ib_devx_init()
2575 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_cleanup()
2576 struct devx_event_subscription *sub, *tmp; in mlx5_ib_devx_cleanup() local
2581 if (dev->devx_whitelist_uid) { in mlx5_ib_devx_cleanup()
2582 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); in mlx5_ib_devx_cleanup()
2583 mutex_lock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2584 xa_for_each(&table->event_xa, id, entry) { in mlx5_ib_devx_cleanup()
2587 sub, tmp, &event->unaffiliated_list, xa_list) in mlx5_ib_devx_cleanup()
2588 devx_cleanup_subscription(dev, sub); in mlx5_ib_devx_cleanup()
2591 mutex_unlock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2592 xa_destroy(&table->event_xa); in mlx5_ib_devx_cleanup()
2594 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); in mlx5_ib_devx_cleanup()
2601 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_read()
2602 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_read()
2607 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2609 while (list_empty(&ev_queue->event_list)) { in devx_async_cmd_event_read()
2610 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2612 if (filp->f_flags & O_NONBLOCK) in devx_async_cmd_event_read()
2613 return -EAGAIN; in devx_async_cmd_event_read()
2616 ev_queue->poll_wait, in devx_async_cmd_event_read()
2617 (!list_empty(&ev_queue->event_list) || in devx_async_cmd_event_read()
2618 ev_queue->is_destroyed))) { in devx_async_cmd_event_read()
2619 return -ERESTARTSYS; in devx_async_cmd_event_read()
2622 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2623 if (ev_queue->is_destroyed) { in devx_async_cmd_event_read()
2624 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2625 return -EIO; in devx_async_cmd_event_read()
2629 event = list_entry(ev_queue->event_list.next, in devx_async_cmd_event_read()
2631 eventsz = event->cmd_out_len + in devx_async_cmd_event_read()
2635 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2636 return -ENOSPC; in devx_async_cmd_event_read()
2639 list_del(ev_queue->event_list.next); in devx_async_cmd_event_read()
2640 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2642 if (copy_to_user(buf, &event->hdr, eventsz)) in devx_async_cmd_event_read()
2643 ret = -EFAULT; in devx_async_cmd_event_read()
2647 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); in devx_async_cmd_event_read()
2655 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_poll()
2656 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_poll()
2659 poll_wait(filp, &ev_queue->poll_wait, wait); in devx_async_cmd_event_poll()
2661 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2662 if (ev_queue->is_destroyed) in devx_async_cmd_event_poll()
2664 else if (!list_empty(&ev_queue->event_list)) in devx_async_cmd_event_poll()
2666 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2682 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_read()
2690 omit_data = ev_file->omit_data; in devx_async_event_read()
2692 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2694 if (ev_file->is_overflow_err) { in devx_async_event_read()
2695 ev_file->is_overflow_err = 0; in devx_async_event_read()
2696 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2697 return -EOVERFLOW; in devx_async_event_read()
2701 while (list_empty(&ev_file->event_list)) { in devx_async_event_read()
2702 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2704 if (filp->f_flags & O_NONBLOCK) in devx_async_event_read()
2705 return -EAGAIN; in devx_async_event_read()
2707 if (wait_event_interruptible(ev_file->poll_wait, in devx_async_event_read()
2708 (!list_empty(&ev_file->event_list) || in devx_async_event_read()
2709 ev_file->is_destroyed))) { in devx_async_event_read()
2710 return -ERESTARTSYS; in devx_async_event_read()
2713 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2714 if (ev_file->is_destroyed) { in devx_async_event_read()
2715 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2716 return -EIO; in devx_async_event_read()
2721 event_sub = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2724 eventsz = sizeof(event_sub->cookie); in devx_async_event_read()
2725 event_data = &event_sub->cookie; in devx_async_event_read()
2727 event = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2731 event_data = &event->hdr; in devx_async_event_read()
2735 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2736 return -EINVAL; in devx_async_event_read()
2740 list_del_init(&event_sub->event_list); in devx_async_event_read()
2742 list_del(&event->list); in devx_async_event_read()
2744 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2748 ret = -EFAULT; in devx_async_event_read()
2760 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_poll()
2763 poll_wait(filp, &ev_file->poll_wait, wait); in devx_async_event_poll()
2765 spin_lock_irq(&ev_file->lock); in devx_async_event_poll()
2766 if (ev_file->is_destroyed) in devx_async_event_poll()
2768 else if (!list_empty(&ev_file->event_list)) in devx_async_event_poll()
2770 spin_unlock_irq(&ev_file->lock); in devx_async_event_poll()
2780 if (event_sub->eventfd) in devx_free_subscription()
2781 eventfd_ctx_put(event_sub->eventfd); in devx_free_subscription()
2782 uverbs_uobject_put(&event_sub->ev_file->uobj); in devx_free_subscription()
2800 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_destroy_uobj()
2803 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2804 ev_queue->is_destroyed = 1; in devx_async_cmd_event_destroy_uobj()
2805 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2806 wake_up_interruptible(&ev_queue->poll_wait); in devx_async_cmd_event_destroy_uobj()
2808 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); in devx_async_cmd_event_destroy_uobj()
2810 spin_lock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2812 &comp_ev_file->ev_queue.event_list, list) { in devx_async_cmd_event_destroy_uobj()
2813 list_del(&entry->list); in devx_async_cmd_event_destroy_uobj()
2816 spin_unlock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2826 struct mlx5_ib_dev *dev = ev_file->dev; in devx_async_event_destroy_uobj()
2828 spin_lock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2829 ev_file->is_destroyed = 1; in devx_async_event_destroy_uobj()
2832 if (ev_file->omit_data) { in devx_async_event_destroy_uobj()
2835 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2837 list_del_init(&event_sub->event_list); in devx_async_event_destroy_uobj()
2842 list_for_each_entry_safe(entry, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2844 list_del(&entry->list); in devx_async_event_destroy_uobj()
2849 spin_unlock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2850 wake_up_interruptible(&ev_file->poll_wait); in devx_async_event_destroy_uobj()
2852 mutex_lock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2855 &ev_file->subscribed_events_list, file_list) { in devx_async_event_destroy_uobj()
2857 list_del_rcu(&event_sub->file_list); in devx_async_event_destroy_uobj()
2859 call_rcu(&event_sub->rcu, devx_free_subscription); in devx_async_event_destroy_uobj()
2861 mutex_unlock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2863 put_device(&dev->ib_dev.dev); in devx_async_event_destroy_uobj()
3079 return MLX5_CAP_GEN(dev->mdev, log_max_uctx); in devx_is_supported()