Home
last modified time | relevance | path

Searched refs:ucmd (Results 1 – 25 of 37) sorted by relevance

12

/openbmc/linux/drivers/iommu/iommufd/
H A Dioas.c37 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) in iommufd_ioas_alloc_ioctl() argument
39 struct iommu_ioas_alloc *cmd = ucmd->cmd; in iommufd_ioas_alloc_ioctl()
46 ioas = iommufd_ioas_alloc(ucmd->ictx); in iommufd_ioas_alloc_ioctl()
51 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_ioas_alloc_ioctl()
54 iommufd_object_finalize(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl()
58 iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl()
62 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) in iommufd_ioas_iova_ranges() argument
65 struct iommu_ioas_iova_ranges *cmd = ucmd->cmd; in iommufd_ioas_iova_ranges()
74 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); in iommufd_ioas_iova_ranges()
101 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_ioas_iova_ranges()
[all …]
H A Diommufd_private.h103 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd, in iommufd_ucmd_respond() argument
106 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, in iommufd_ucmd_respond()
107 min_t(size_t, ucmd->user_size, cmd_len))) in iommufd_ucmd_respond()
213 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
215 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
216 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
217 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
218 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
219 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
220 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
[all …]
H A Dselftest.c87 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, in iommufd_test_syz_conv_iova_id() argument
96 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); in iommufd_test_syz_conv_iova_id()
340 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, in get_md_pagetable() argument
346 obj = iommufd_get_object(ucmd->ictx, mockpt_id, in get_md_pagetable()
419 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, in iommufd_test_mock_domain() argument
428 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); in iommufd_test_mock_domain()
432 sobj->idev.ictx = ucmd->ictx; in iommufd_test_mock_domain()
441 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev, in iommufd_test_mock_domain()
457 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_test_mock_domain()
460 iommufd_object_finalize(ucmd->ictx, &sobj->obj); in iommufd_test_mock_domain()
[all …]
H A Dmain.c207 static int iommufd_destroy(struct iommufd_ucmd *ucmd) in iommufd_destroy() argument
209 struct iommu_destroy *cmd = ucmd->cmd; in iommufd_destroy()
212 obj = iommufd_object_remove(ucmd->ictx, cmd->id, false); in iommufd_destroy()
280 static int iommufd_option(struct iommufd_ucmd *ucmd) in iommufd_option() argument
282 struct iommu_option *cmd = ucmd->cmd; in iommufd_option()
290 rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx); in iommufd_option()
293 rc = iommufd_ioas_option(ucmd); in iommufd_option()
300 if (copy_to_user(&((struct iommu_option __user *)ucmd->ubuffer)->val64, in iommufd_option()
327 int (*execute)(struct iommufd_ucmd *ucmd);
371 struct iommufd_ucmd ucmd = {}; in iommufd_fops_ioctl() local
[all …]
H A Dhw_pagetable.c136 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) in iommufd_hwpt_alloc() argument
138 struct iommu_hwpt_alloc *cmd = ucmd->cmd; in iommufd_hwpt_alloc()
147 idev = iommufd_get_device(ucmd, cmd->dev_id); in iommufd_hwpt_alloc()
151 ioas = iommufd_get_ioas(ucmd->ictx, cmd->pt_id); in iommufd_hwpt_alloc()
158 hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false); in iommufd_hwpt_alloc()
165 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_hwpt_alloc()
168 iommufd_object_finalize(ucmd->ictx, &hwpt->obj); in iommufd_hwpt_alloc()
172 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj); in iommufd_hwpt_alloc()
H A Dvfio_compat.c123 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) in iommufd_vfio_ioas() argument
125 struct iommu_vfio_ioas *cmd = ucmd->cmd; in iommufd_vfio_ioas()
132 ioas = get_compat_ioas(ucmd->ictx); in iommufd_vfio_ioas()
137 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_vfio_ioas()
140 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); in iommufd_vfio_ioas()
143 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
144 ucmd->ictx->vfio_ioas = ioas; in iommufd_vfio_ioas()
145 xa_unlock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
150 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
151 ucmd->ictx->vfio_ioas = NULL; in iommufd_vfio_ioas()
[all …]
/openbmc/linux/drivers/infiniband/hw/mana/
H A Dwq.c14 struct mana_ib_create_wq ucmd = {}; in mana_ib_create_wq() local
19 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_wq()
22 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_wq()
33 ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); in mana_ib_create_wq()
35 umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, in mana_ib_create_wq()
46 wq->wq_buf_size = ucmd.wq_buf_size; in mana_ib_create_wq()
H A Dqp.c104 struct mana_ib_create_qp_rss ucmd = {}; in mana_ib_create_qp_rss() local
121 if (!udata || udata->inlen < sizeof(ucmd)) in mana_ib_create_qp_rss()
124 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_qp_rss()
154 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { in mana_ib_create_qp_rss()
157 ucmd.rx_hash_function); in mana_ib_create_qp_rss()
162 port = ucmd.port; in mana_ib_create_qp_rss()
172 ucmd.rx_hash_function, port); in mana_ib_create_qp_rss()
227 ucmd.rx_hash_key_len, in mana_ib_create_qp_rss()
228 ucmd.rx_hash_key); in mana_ib_create_qp_rss()
271 struct mana_ib_create_qp ucmd = {}; in mana_ib_create_qp_raw() local
[all …]
H A Dcq.c13 struct mana_ib_create_cq ucmd = {}; in mana_ib_create_cq() local
19 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_cq()
22 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_cq()
35 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, in mana_ib_create_cq()
/openbmc/linux/drivers/infiniband/hw/mthca/
H A Dmthca_provider.c395 struct mthca_create_srq ucmd; in mthca_create_srq() local
405 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_srq()
409 context->db_tab, ucmd.db_index, in mthca_create_srq()
410 ucmd.db_page); in mthca_create_srq()
415 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq()
416 srq->db_index = ucmd.db_index; in mthca_create_srq()
424 context->db_tab, ucmd.db_index); in mthca_create_srq()
460 struct mthca_create_qp ucmd; in mthca_create_qp() local
474 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_qp()
479 ucmd.sq_db_index, in mthca_create_qp()
[all …]
/openbmc/linux/drivers/infiniband/hw/mlx5/
H A Dqp.c434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
452 if (ucmd) { in set_rq_size()
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
454 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) in set_rq_size()
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
638 struct mlx5_ib_create_qp *ucmd, in set_user_buf_size() argument
650 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { in set_user_buf_size()
652 ucmd in set_user_buf_size()
882 create_user_rq(struct mlx5_ib_dev * dev,struct ib_pd * pd,struct ib_udata * udata,struct mlx5_ib_rwq * rwq,struct mlx5_ib_create_wq * ucmd) create_user_rq() argument
946 _create_user_qp(struct mlx5_ib_dev * dev,struct ib_pd * pd,struct mlx5_ib_qp * qp,struct ib_udata * udata,struct ib_qp_init_attr * attr,u32 ** in,struct mlx5_ib_create_qp_resp * resp,int * inlen,struct mlx5_ib_qp_base * base,struct mlx5_ib_create_qp * ucmd) _create_user_qp() argument
1708 void *ucmd; global() member
1720 struct mlx5_ib_create_qp_rss *ucmd = params->ucmd; create_rss_raw_qp_tir() local
2073 struct mlx5_ib_create_qp *ucmd = params->ucmd; create_dci() local
2234 struct mlx5_ib_create_qp *ucmd = params->ucmd; create_user_qp() local
2734 struct mlx5_ib_create_qp *ucmd = params->ucmd; create_dct() local
2873 process_vendor_flags(struct mlx5_ib_dev * dev,struct mlx5_ib_qp * qp,void * ucmd,struct ib_qp_init_attr * attr) process_vendor_flags() argument
3046 size_t ucmd = sizeof(struct mlx5_ib_create_qp); process_udata_size() local
3177 struct mlx5_ib_create_qp *ucmd = params->ucmd; get_qp_uidx() local
4110 __mlx5_ib_modify_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state,const struct mlx5_ib_modify_qp * ucmd,struct mlx5_ib_modify_qp_resp * resp,struct ib_udata * udata) __mlx5_ib_modify_qp() argument
4496 mlx5_ib_modify_dct(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct mlx5_ib_modify_qp * ucmd,struct ib_udata * udata) mlx5_ib_modify_dct() argument
4664 struct mlx5_ib_modify_qp ucmd = {}; mlx5_ib_modify_qp() local
5315 set_user_rq_size(struct mlx5_ib_dev * dev,struct ib_wq_init_attr * wq_init_attr,struct mlx5_ib_create_wq * ucmd,struct mlx5_ib_rwq * rwq) set_user_rq_size() argument
5354 struct mlx5_ib_create_wq ucmd = {}; prepare_user_rq() local
5592 struct mlx5_ib_modify_wq ucmd = {}; mlx5_ib_modify_wq() local
[all...]
H A Dcq.c719 struct mlx5_ib_create_cq ucmd = {}; in create_cq_user() local
730 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_cq_user()
734 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) in create_cq_user()
737 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | in create_cq_user()
742 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user()
743 ucmd.reserved0 || ucmd.reserved1) in create_cq_user()
746 *cqe_size = ucmd.cqe_size; in create_cq_user()
749 ib_umem_get(&dev->ib_dev, ucmd.buf_addr, in create_cq_user()
750 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); in create_cq_user()
764 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); in create_cq_user()
[all …]
H A Dsrq.c48 struct mlx5_ib_create_srq ucmd = {}; in create_srq_user() local
55 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user()
57 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user()
62 if (ucmd.reserved0 || ucmd.reserved1) in create_srq_user()
65 if (udata->inlen > sizeof(ucmd) && in create_srq_user()
66 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user()
67 udata->inlen - sizeof(ucmd))) in create_srq_user()
71 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user()
76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user()
78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user()
[all …]
H A Dmlx5_ib.h1549 struct mlx5_ib_create_qp *ucmd, in get_qp_user_index() argument
1555 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && in get_qp_user_index()
1556 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_qp_user_index()
1559 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) in get_qp_user_index()
1562 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_qp_user_index()
1566 struct mlx5_ib_create_srq *ucmd, in get_srq_user_index() argument
1572 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && in get_srq_user_index()
1573 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) in get_srq_user_index()
1576 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) in get_srq_user_index()
1579 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); in get_srq_user_index()
H A Dcounters.h15 struct mlx5_ib_create_flow *ucmd);
/openbmc/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_cq.c299 struct hns_roce_ib_create_cq *ucmd) in get_cq_ucmd() argument
304 ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd))); in get_cq_ucmd()
314 struct hns_roce_ib_create_cq *ucmd) in set_cq_param() argument
330 struct hns_roce_ib_create_cq *ucmd) in set_cqe_size() argument
339 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) { in set_cqe_size()
340 if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE && in set_cqe_size()
341 ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) { in set_cqe_size()
343 "invalid cqe size %u.\n", ucmd->cqe_size); in set_cqe_size()
347 hr_cq->cqe_size = ucmd->cqe_size; in set_cqe_size()
362 struct hns_roce_ib_create_cq ucmd = {}; in hns_roce_create_cq() local
[all …]
H A Dhns_roce_qp.c613 struct hns_roce_ib_create_qp *ucmd) in check_sq_size_with_integrity() argument
619 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity()
620 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity()
636 struct hns_roce_ib_create_qp *ucmd) in set_user_sq_size() argument
642 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
646 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size()
655 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
802 struct hns_roce_ib_create_qp *ucmd) in user_qp_has_sdb() argument
807 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb()
860 struct hns_roce_ib_create_qp *ucmd, in alloc_user_qp_db() argument
[all …]
H A Dhns_roce_srq.c345 struct hns_roce_ib_create_srq ucmd = {}; in alloc_srq_buf() local
349 ret = ib_copy_from_udata(&ucmd, udata, in alloc_srq_buf()
350 min(udata->inlen, sizeof(ucmd))); in alloc_srq_buf()
359 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr); in alloc_srq_buf()
363 ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr); in alloc_srq_buf()
/openbmc/linux/drivers/infiniband/hw/mlx4/
H A Dqp.c448 struct mlx4_ib_create_qp *ucmd) in set_user_sq_size() argument
453 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
456 if (ucmd->log_sq_stride > in set_user_sq_size()
458 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) in set_user_sq_size()
461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
462 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
554 struct mlx4_ib_create_qp_rss *ucmd) in set_qp_rss() argument
559 if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && in set_qp_rss()
561 memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, in set_qp_rss()
568 if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | in set_qp_rss()
[all …]
H A Dsrq.c112 struct mlx4_ib_create_srq ucmd; in mlx4_ib_create_srq() local
114 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq()
118 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); in mlx4_ib_create_srq()
132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq()
H A Dcq.c205 struct mlx4_ib_create_cq ucmd; in mlx4_ib_create_cq() local
207 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mlx4_ib_create_cq()
212 buf_addr = (void *)(unsigned long)ucmd.buf_addr; in mlx4_ib_create_cq()
214 ucmd.buf_addr, entries); in mlx4_ib_create_cq()
218 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); in mlx4_ib_create_cq()
317 struct mlx4_ib_resize_cq ucmd; in mlx4_alloc_resize_umem() local
323 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) in mlx4_alloc_resize_umem()
331 ucmd.buf_addr, entries); in mlx4_alloc_resize_umem()
/openbmc/linux/drivers/dma-buf/
H A Ddma-heap.c126 static long dma_heap_ioctl(struct file *file, unsigned int ucmd, in dma_heap_ioctl() argument
133 int nr = _IOC_NR(ucmd); in dma_heap_ioctl()
145 out_size = _IOC_SIZE(ucmd); in dma_heap_ioctl()
147 if ((ucmd & kcmd & IOC_IN) == 0) in dma_heap_ioctl()
149 if ((ucmd & kcmd & IOC_OUT) == 0) in dma_heap_ioctl()
/openbmc/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_srq.c109 struct pvrdma_create_srq ucmd; in pvrdma_create_srq() local
144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_srq()
149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
H A Dpvrdma_cq.c116 struct pvrdma_create_cq ucmd; in pvrdma_create_cq() local
136 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_cq()
141 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
/openbmc/linux/arch/mips/cavium-octeon/
H A Docteon-platform.c36 u32 ucmd; in octeon2_usb_reset() local
43 ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD); in octeon2_usb_reset()
44 ucmd &= ~CMD_RUN; in octeon2_usb_reset()
45 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset()
47 ucmd |= CMD_RESET; in octeon2_usb_reset()
48 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset()
49 ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD); in octeon2_usb_reset()
50 ucmd |= CMD_RUN; in octeon2_usb_reset()
51 cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd); in octeon2_usb_reset()

12