/openbmc/qemu/replay/ |
H A D | replay-input.c | 25 InputMultiTouchEvent *mtt; in replay_save_input_event() local 63 mtt = evt->u.mtt.data; in replay_save_input_event() 64 replay_put_dword(mtt->type); in replay_save_input_event() 65 replay_put_qword(mtt->slot); in replay_save_input_event() 66 replay_put_qword(mtt->tracking_id); in replay_save_input_event() 67 replay_put_dword(mtt->axis); in replay_save_input_event() 68 replay_put_qword(mtt->value); in replay_save_input_event() 85 InputMultiTouchEvent mtt; in replay_read_input_event() local 123 evt.u.mtt.data = &mtt; in replay_read_input_event() 124 evt.u.mtt.data->type = (InputMultiTouchType)replay_get_dword(); in replay_read_input_event() [all …]
|
/openbmc/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_mr.c | 209 struct mthca_mtt *mtt; in __mthca_alloc_mtt() local 215 mtt = kmalloc(sizeof *mtt, GFP_KERNEL); in __mthca_alloc_mtt() 216 if (!mtt) in __mthca_alloc_mtt() 219 mtt->buddy = buddy; in __mthca_alloc_mtt() 220 mtt->order = 0; in __mthca_alloc_mtt() 222 ++mtt->order; in __mthca_alloc_mtt() 224 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); in __mthca_alloc_mtt() 225 if (mtt->first_seg == -1) { in __mthca_alloc_mtt() 226 kfree(mtt); in __mthca_alloc_mtt() 230 return mtt; in __mthca_alloc_mtt() [all …]
|
H A D | mthca_provider.c | 863 mr->mtt = mthca_alloc_mtt(dev, n); in mthca_reg_user_mr() 864 if (IS_ERR(mr->mtt)) { in mthca_reg_user_mr() 865 err = PTR_ERR(mr->mtt); in mthca_reg_user_mr() 887 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 896 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 911 mthca_free_mtt(dev, mr->mtt); in mthca_reg_user_mr()
|
H A D | mthca_provider.h | 76 struct mthca_mtt *mtt; member
|
H A D | mthca_dev.h | 467 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); 468 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
|
/openbmc/linux/drivers/infiniband/hw/erdma/ |
H A D | erdma_verbs.c | 25 struct erdma_mtt *mtt = mem->mtt; in assemble_qbuf_mtt_for_cmd() local 28 *addr0 = mtt->buf_dma; in assemble_qbuf_mtt_for_cmd() 32 *addr0 = mtt->buf[0]; in assemble_qbuf_mtt_for_cmd() 33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1)); in assemble_qbuf_mtt_for_cmd() 143 if (mr->mem.mtt->continuous) { in regmr_cmd() 144 req.phy_addr[0] = mr->mem.mtt->buf_dma; in regmr_cmd() 147 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); in regmr_cmd() 148 mtt_level = mr->mem.mtt->level; in regmr_cmd() 151 memcpy(req.phy_addr, mr->mem.mtt->buf, in regmr_cmd() 174 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) { in regmr_cmd() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | mr.c | 195 struct mlx4_mtt *mtt) in mlx4_mtt_init() argument 200 mtt->order = -1; in mlx4_mtt_init() 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 204 mtt->page_shift = page_shift; in mlx4_mtt_init() 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init() 207 ++mtt->order; in mlx4_mtt_init() 209 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); in mlx4_mtt_init() 210 if (mtt->offset == -1) in mlx4_mtt_init() 251 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) in mlx4_mtt_cleanup() argument 253 if (mtt->order < 0) in mlx4_mtt_cleanup() [all …]
|
H A D | resource_tracker.c | 110 struct res_mtt *mtt; member 158 struct res_mtt *mtt; member 170 struct res_mtt *mtt; member 181 struct res_mtt *mtt; member 193 struct res_mtt *mtt; member 456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; in mlx4_init_quotas() 468 dev->quotas.mtt = in mlx4_init_quotas() 2745 int size, struct res_mtt *mtt) in check_mtt_range() argument 2747 int res_start = mtt->com.res_id; in check_mtt_range() 2748 int res_size = (1 << mtt->order); in check_mtt_range() [all …]
|
H A D | cq.c | 192 int entries, struct mlx4_mtt *mtt) in mlx4_cq_resize() argument 205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 206 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_resize() 343 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, in mlx4_cq_alloc() argument 385 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc() 387 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_alloc()
|
H A D | srq.c | 163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument 193 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_srq_alloc() 195 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_srq_alloc()
|
H A D | qp.c | 92 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in __mlx4_qp_modify() argument 169 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); in __mlx4_qp_modify() 172 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in __mlx4_qp_modify() 213 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_modify() argument 219 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, in mlx4_qp_modify() 919 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_to_ready() argument 937 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], in mlx4_qp_to_ready()
|
H A D | alloc.c | 790 &wqres->mtt); in mlx4_alloc_hwq_res() 794 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); in mlx4_alloc_hwq_res() 801 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_alloc_hwq_res() 814 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_free_hwq_res()
|
/openbmc/qemu/ui/ |
H A D | input.c | 184 InputMultiTouchEvent *mtt; in qemu_input_event_trace() local 224 mtt = evt->u.mtt.data; in qemu_input_event_trace() 225 name = InputAxis_str(mtt->axis); in qemu_input_event_trace() 226 trace_input_event_mtt(idx, name, mtt->value); in qemu_input_event_trace() 517 InputMultiTouchEvent mtt = { in qemu_input_queue_mtt() local 524 .u.mtt.data = &mtt, in qemu_input_queue_mtt() 533 InputMultiTouchEvent mtt = { in qemu_input_queue_mtt_abs() local 544 .u.mtt.data = &mtt, in qemu_input_queue_mtt_abs()
|
/openbmc/linux/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 124 PAGE_SHIFT, &srq->mtt); in mlx4_ib_create_srq() 128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 164 &srq->mtt); in mlx4_ib_create_srq() 168 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); in mlx4_ib_create_srq() 186 &srq->mtt, srq->db.dma, &srq->msrq); in mlx4_ib_create_srq() 210 mlx4_mtt_cleanup(dev->dev, &srq->mtt); in mlx4_ib_create_srq() 274 mlx4_mtt_cleanup(dev->dev, &msrq->mtt); in mlx4_ib_destroy_srq()
|
H A D | cq.c | 113 &buf->mtt); in mlx4_ib_alloc_cq_buf() 117 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); in mlx4_ib_alloc_cq_buf() 124 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_alloc_cq_buf() 153 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); in mlx4_ib_get_cq_umem() 158 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 165 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_get_cq_umem() 247 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, in mlx4_ib_create_cq() 277 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); in mlx4_ib_create_cq() 383 struct mlx4_mtt mtt; in mlx4_ib_resize_cq() local 421 mtt = cq->buf.mtt; in mlx4_ib_resize_cq() [all …]
|
H A D | mr.c | 95 struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt_block() argument 134 err = mlx4_write_mtt(dev->dev, mtt, *start_index, in mlx4_ib_umem_write_mtt_block() 182 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt() argument 200 mtt_shift = mtt->page_shift; in mlx4_ib_umem_write_mtt() 214 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, in mlx4_ib_umem_write_mtt() 232 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, in mlx4_ib_umem_write_mtt() 241 err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); in mlx4_ib_umem_write_mtt() 433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() 526 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); in mlx4_ib_rereg_user_mr()
|
H A D | mlx4_ib.h | 107 struct mlx4_mtt mtt; member 342 struct mlx4_mtt mtt; member 385 struct mlx4_mtt mtt; member 755 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
|
/openbmc/qemu/hw/input/ |
H A D | virtio-input-hid.c | 91 InputMultiTouchEvent *mtt; in virtio_input_handle_event() local 149 mtt = evt->u.mtt.data; in virtio_input_handle_event() 150 if (mtt->type == INPUT_MULTI_TOUCH_TYPE_DATA) { in virtio_input_handle_event() 152 event.code = cpu_to_le16(axismap_tch[mtt->axis]); in virtio_input_handle_event() 153 event.value = cpu_to_le32(mtt->value); in virtio_input_handle_event() 158 event.value = cpu_to_le32(mtt->slot); in virtio_input_handle_event() 162 event.value = cpu_to_le32(mtt->tracking_id); in virtio_input_handle_event()
|
/openbmc/linux/drivers/infiniband/hw/mlx5/ |
H A D | umr.c | 619 struct mlx5_mtt *mtt; in mlx5r_umr_update_mr_pas() local 628 mtt = mlx5r_umr_create_xlt( in mlx5r_umr_update_mr_pas() 630 sizeof(*mtt), flags); in mlx5r_umr_update_mr_pas() 631 if (!mtt) in mlx5r_umr_update_mr_pas() 641 cur_mtt = mtt; in mlx5r_umr_update_mr_pas() 643 if (cur_mtt == (void *)mtt + sg.length) { in mlx5r_umr_update_mr_pas() 656 cur_mtt = mtt; in mlx5r_umr_update_mr_pas() 669 final_size = (void *)cur_mtt - (void *)mtt; in mlx5r_umr_update_mr_pas() 679 mlx5r_umr_unmap_free_xlt(dev, mtt, &sg); in mlx5r_umr_update_mr_pas()
|
H A D | odp.c | 133 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx); in populate_klm() local 136 if (mtt) { in populate_klm() 137 pklm->key = cpu_to_be32(mtt->ibmr.lkey); in populate_klm() 536 struct mlx5_ib_mr *mtt; in mlx5_ib_free_odp_mr() local 543 xa_for_each(&mr->implicit_children, idx, mtt) { in mlx5_ib_free_odp_mr() 545 mlx5_ib_dereg_mr(&mtt->ibmr, NULL); in mlx5_ib_free_odp_mr() 623 struct mlx5_ib_mr *mtt; in pagefault_implicit_mr() local 627 mtt = xa_load(&imr->implicit_children, idx); in pagefault_implicit_mr() 628 if (unlikely(!mtt)) { in pagefault_implicit_mr() 630 mtt = implicit_get_child_mr(imr, idx); in pagefault_implicit_mr() [all …]
|
/openbmc/linux/include/linux/mlx4/ |
H A D | device.h | 684 struct mlx4_mtt mtt; member 689 struct mlx4_mtt mtt; member 835 int mtt; member 1121 struct mlx4_mtt *mtt); 1122 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 1123 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 1133 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 1135 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 1146 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 1159 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
|
H A D | qp.h | 478 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 486 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
H A D | cq.h | 185 int entries, struct mlx4_mtt *mtt);
|
/openbmc/linux/drivers/vdpa/mlx5/core/ |
H A D | mr.c | 35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) in populate_mtts() argument 48 mtt[j++] = cpu_to_be64(dma_addr); in populate_mtts() 59 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); in create_direct_mr()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/diag/ |
H A D | fw_tracer.c | 181 __be64 *mtt; in mlx5_fw_tracer_create_mkey() local 186 sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2); in mlx5_fw_tracer_create_mkey() 194 mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); in mlx5_fw_tracer_create_mkey() 196 mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); in mlx5_fw_tracer_create_mkey()
|