Home
last modified time | relevance | path

Searched refs:sq (Results 1 – 25 of 256) sorted by relevance

1234567891011

/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_tx.c52 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err()
348 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { in mlx5e_tx_check_stop()
363 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_flush()
370 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_tx_flush()
371 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); in mlx5e_tx_flush()
423 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); in mlx5e_txwqe_complete()
582 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_mpwqe_session_complete()
634 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe()
639 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe()
781 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && in mlx5e_txqsq_wake()
[all …]
H A Den_txrx.c58 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim()
75 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq()
77 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq()
84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq()
85 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_trigger_irq()
159 if (sq) in mlx5e_napi_poll()
220 mlx5e_handle_tx_dim(&c->sq[i]); in mlx5e_napi_poll()
221 mlx5e_cq_arm(&c->sq[i].cq); in mlx5e_napi_poll()
227 if (sq) { in mlx5e_napi_poll()
228 mlx5e_handle_tx_dim(sq); in mlx5e_napi_poll()
[all …]
/openbmc/linux/drivers/nvme/target/
H A Dfabrics-cmd-auth.c21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work()
23 sq->dhchap_tid = -1; in nvmet_auth_expired_work()
30 sq->authenticated = false; in nvmet_auth_sq_init()
165 req->sq->dhchap_c2); in nvmet_auth_reply()
242 if (!req->sq->qid) { in nvmet_execute_auth_send()
278 req->sq->dhchap_step = in nvmet_execute_auth_send()
330 req->sq->dhchap_status, req->sq->dhchap_step); in nvmet_execute_auth_send()
388 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, in nvmet_auth_challenge()
475 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); in nvmet_execute_auth_receive()
503 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); in nvmet_execute_auth_receive()
[all …]
H A Dauth.c225 kfree(sq->dhchap_c1); in nvmet_auth_sq_free()
226 sq->dhchap_c1 = NULL; in nvmet_auth_sq_free()
227 kfree(sq->dhchap_c2); in nvmet_auth_sq_free()
228 sq->dhchap_c2 = NULL; in nvmet_auth_sq_free()
229 kfree(sq->dhchap_skey); in nvmet_auth_sq_free()
230 sq->dhchap_skey = NULL; in nvmet_auth_sq_free()
321 ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, in nvmet_auth_host_hash()
322 req->sq->dhchap_tid); in nvmet_auth_host_hash()
513 req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); in nvmet_auth_ctrl_sesskey()
514 if (!req->sq->dhchap_skey) in nvmet_auth_ctrl_sesskey()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c322 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_xdpsq_get_next_pi()
339 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); in mlx5e_xdp_mpwqe_session_start()
385 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, in mlx5e_xmit_xdp_frame_check_mpwqe()
466 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) { in mlx5e_xmit_xdp_frame_check_stop_room()
601 sq->pc++; in mlx5e_xmit_xdp_frame()
706 sqcc = sq->cc; in mlx5e_poll_xdpsq_cq()
732 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_xdpsq_cq()
750 sq->cc = sqcc; in mlx5e_poll_xdpsq_cq()
763 while (sq->cc != sq->pc) { in mlx5e_free_xdpsq_descs()
767 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); in mlx5e_free_xdpsq_descs()
[all …]
H A Dreporter_tx.c30 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush()
38 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush()
45 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc()
47 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc()
48 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc()
82 sq = ctx; in mlx5e_tx_reporter_err_cqe_recover()
143 sq = to_ctx->sq; in mlx5e_tx_reporter_timeout_recover()
458 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_diagnose() local
597 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_dump_all_sqs() local
657 to_ctx.sq = sq; in mlx5e_reporter_tx_timeout()
[all …]
H A Dxdp.h106 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
108 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
109 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
153 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument
155 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell()
156 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell()
157 sq->doorbell_cseg = NULL; in mlx5e_xmit_xdp_doorbell()
166 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; in mlx5e_xdp_get_inline_state()
195 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, in mlx5e_xdp_mpwqe_add_dseg() argument
199 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg()
[all …]
H A Dtxrx.h119 #define MLX5E_TX_FETCH_WQE(sq, pi) \ argument
170 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txqsq_get_next_pi()
178 wi = &sq->db.wqe_info[pi]; in mlx5e_txqsq_get_next_pi()
186 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi()
188 sq->stats->nop += contig_wqebbs; in mlx5e_txqsq_get_next_pi()
232 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_icosq_get_next_pi()
240 wi = &sq->db.wqe_info[pi]; in mlx5e_icosq_get_next_pi()
249 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_icosq_get_next_pi()
287 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; in mlx5e_dma_get()
294 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); in mlx5e_dma_push()
[all …]
H A Dqos.c79 struct mlx5e_txqsq *sq; in mlx5e_open_qos_sq() local
116 sq = kzalloc(sizeof(*sq), GFP_KERNEL); in mlx5e_open_qos_sq()
118 if (!sq) in mlx5e_open_qos_sq()
143 kfree(sq); in mlx5e_open_qos_sq()
157 struct mlx5e_txqsq *sq; in mlx5e_activate_qos_sq() local
169 priv->txq2sq[qid] = sq; in mlx5e_activate_qos_sq()
185 struct mlx5e_txqsq *sq; in mlx5e_deactivate_qos_sq() local
208 struct mlx5e_txqsq *sq; in mlx5e_close_qos_sq() local
223 mlx5e_close_txqsq(sq); in mlx5e_close_qos_sq()
225 kfree(sq); in mlx5e_close_qos_sq()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dtx.c49 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err()
56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err()
58 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err()
63 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
74 sq); in mlx5e_xsk_tx()
103 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
104 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx()
106 mlx5e_xsk_tx_post_err(sq, &xdpi); in mlx5e_xsk_tx()
115 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
116 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx()
[all …]
/openbmc/linux/sound/oss/dmasound/
H A Ddmasound_core.c480 sq->max_count = sq->numBufs ; in sq_setup()
481 sq->max_active = sq->numBufs ; in sq_setup()
482 sq->block_size = sq->bufSize; in sq_setup()
484 sq->user_frags = sq->numBufs ; in sq_setup()
485 sq->user_frag_size = sq->bufSize ; in sq_setup()
506 if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { in sq_setup()
510 sq->block_size = sq->bufSize ; in sq_setup()
513 sq->max_count = sq->user_frags ; in sq_setup()
515 sq->max_active = (sq->max_active <= sq->max_count) ? in sq_setup()
522 sq->max_active = sq->numBufs ; in sq_setup()
[all …]
/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dqos_sq.c76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init()
77 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init()
78 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); in otx2_qos_sq_aura_pool_init()
89 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; in otx2_qos_sq_aura_pool_init()
106 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init()
124 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
125 if (!sq->sqb_ptrs) in otx2_qos_sq_free_sqbs()
140 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
143 kfree(sq->sg); in otx2_qos_sq_free_sqbs()
147 memset((void *)sq, 0, sizeof(*sq)); in otx2_qos_sq_free_sqbs()
[all …]
H A Dotx2_txrx.c463 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
609 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush()
622 sq->sg[sq->head].num_segs = 0; in otx2_sqe_add_sg()
651 sq->sg[sq->head].num_segs++; in otx2_sqe_add_sg()
654 sq->sg[sq->head].skb = (u64)skb; in otx2_sqe_add_sg()
1152 free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); in otx2_sq_append_skb()
1263 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes()
1328 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe()
1366 sq->sg[sq->head].size[0] = len; in otx2_xdp_sqe_add_sg()
1367 sq->sg[sq->head].num_segs = 1; in otx2_xdp_sqe_add_sg()
[all …]
/openbmc/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c514 sq->desc = sq->dmem.base; in nicvf_init_snd_queue()
587 sq->tso_hdrs, sq->tso_hdrs_phys); in nicvf_free_snd_queue()
593 while (sq->head != sq->tail) { in nicvf_free_snd_queue()
594 skb = (struct sk_buff *)sq->skbuff[sq->head]; in nicvf_free_snd_queue()
613 nicvf_unmap_sndq_buffers(nic, sq, sq->head, in nicvf_free_snd_queue()
620 sq->head &= (sq->dmem.q_len - 1); in nicvf_free_snd_queue()
866 sq = &qs->sq[qidx]; in nicvf_snd_queue_config()
885 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; in nicvf_snd_queue_config()
1129 sq->tail &= (sq->dmem.q_len - 1); in nicvf_get_sq_desc()
1150 sq->head &= (sq->dmem.q_len - 1); in nicvf_put_sq_desc()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/ice/
H A Dice_controlq.c77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); in ice_alloc_ctrlq_sq_ring()
239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs()
260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); in ice_alloc_sq_bufs()
860 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq() local
942 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; in ice_sq_done()
1035 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd()
1055 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd()
1057 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dktls_rx.c149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params()
160 sq->pc += num_wqebbs; in post_static_params()
180 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, in post_progress_params()
191 sq->pc += num_wqebbs; in post_progress_params()
205 sq = &c->async_icosq; in post_rx_param_wqes()
215 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in post_rx_param_wqes()
312 sq->pc++; in resync_post_get_progress_params()
313 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in resync_post_get_progress_params()
346 sq = &c->async_icosq; in resync_handle_work()
370 sq = &c->async_icosq; in resync_handle_seq_match()
[all …]
H A Dktls_tx.c559 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params()
564 sq->pc += num_wqebbs; in post_static_params()
578 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0, in post_progress_params()
581 sq->pc += num_wqebbs; in post_progress_params()
589 tx_fill_wi(sq, pi, 1, 0, NULL); in tx_post_fence_nop()
591 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); in tx_post_fence_nop()
605 tx_post_fence_nop(sq); in mlx5e_ktls_tx_post_param_wqes()
726 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in tx_post_resync_dump()
745 dseg->lkey = sq->mkey_be; in tx_post_resync_dump()
763 stats = sq->stats; in mlx5e_ktls_tx_handle_resync_dump_comp()
[all …]
/openbmc/linux/drivers/infiniband/hw/erdma/
H A Derdma_cmdq.c91 struct erdma_cmdq_sq *sq = &cmdq->sq; in erdma_cmdq_sq_init() local
95 sq->depth = cmdq->max_outstandings * sq->wqebb_cnt; in erdma_cmdq_sq_init()
99 sq->qbuf = in erdma_cmdq_sq_init()
102 if (!sq->qbuf) in erdma_cmdq_sq_init()
105 sq->db_record = (u64 *)(sq->qbuf + buf_size); in erdma_cmdq_sq_init()
223 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_init()
248 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_destroy()
275 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth, in push_cmdq_sqe()
279 cmdq->sq.pi += cmdq->sq.wqebb_cnt; in push_cmdq_sqe()
307 sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth, in erdma_poll_single_cmd_completion()
[all …]
/openbmc/linux/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c108 struct io_uring_sq sq; member
194 munmap(sq->khead, sq->ring_sz); in io_uring_mmap()
238 struct io_uring_sq *sq = &ring->sq; in io_uring_submit() local
244 if (*sq->khead != *sq->ktail) { in io_uring_submit()
248 if (sq->sqe_head == sq->sqe_tail) in io_uring_submit()
251 ktail = *sq->ktail; in io_uring_submit()
252 to_submit = sq->sqe_tail - sq->sqe_head; in io_uring_submit()
255 sq->array[ktail++ & mask] = sq->sqe_head++ & mask; in io_uring_submit()
293 struct io_uring_sq *sq = &ring->sq; in io_uring_get_sqe() local
295 if (sq->sqe_tail + 1 - sq->sqe_head > *sq->kring_entries) in io_uring_get_sqe()
[all …]
/openbmc/linux/drivers/soc/qcom/
H A Dqmi_interface.c168 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local
178 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup()
179 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup()
182 msg.msg_name = &sq; in qmi_send_new_lookup()
243 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_server()
244 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_server()
247 msg.msg_name = &sq; in qmi_send_new_server()
462 qmi->sq = sq; in qmi_handle_net_reset()
528 struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; in qmi_data_ready_work()
558 if (sq.sq_node == qmi->sq.sq_node && in qmi_data_ready_work()
[all …]
/openbmc/linux/net/qrtr/
H A Dns.c53 struct sockaddr_qrtr sq; member
324 ret = say_hello(sq); in ctrl_cmd_hello()
336 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local
365 sq.sq_node = srv->node; in ctrl_cmd_bye()
366 sq.sq_port = srv->port; in ctrl_cmd_bye()
388 struct sockaddr_qrtr sq; in ctrl_cmd_del_client() local
439 sq.sq_node = srv->node; in ctrl_cmd_del_client()
538 lookup->sq = *from; in ctrl_cmd_new_lookup()
624 sq.sq_node, sq.sq_port); in qrtr_ns_worker()
671 sq.sq_node, sq.sq_port); in qrtr_ns_worker()
[all …]
/openbmc/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument
61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument
100 wq = sq->wq; in hinic_sq_prepare_ctxt()
226 if (!sq->saved_skb) in alloc_sq_skb_arr()
238 vfree(sq->saved_skb); in free_sq_skb_arr()
286 sq->hwif = hwif; in hinic_init_sq()
288 sq->wq = wq; in hinic_init_sq()
290 sq->irq = entry->vector; in hinic_init_sq()
307 free_sq_skb_arr(sq); in hinic_clean_sq()
617 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in sq_prepare_db()
[all …]
H A Dhinic_tx.c503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
564 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
668 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local
700 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll()
703 struct hinic_sq *sq = txq->sq; in free_tx_poll() local
745 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { in free_tx_poll()
804 struct hinic_sq *sq = txq->sq; in tx_request_irq() local
808 qp = container_of(sq, struct hinic_qp, sq); in tx_request_irq()
844 struct hinic_sq *sq = txq->sq; in tx_free_irq() local
861 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in hinic_init_txq()
[all …]
H A Dhinic_hw_qp.h57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument
58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
122 struct hinic_sq sq; member
133 struct hinic_sq *sq, u16 global_qid);
138 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
142 void hinic_clean_sq(struct hinic_sq *sq);
149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
184 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
189 void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
193 struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c162 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5_aso_alloc_sq()
179 void *sqc_data, struct mlx5_aso *sq) in create_aso_sq() argument
186 sizeof(u64) * sq->wq_ctrl.buf.npages; in create_aso_sq()
195 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); in create_aso_sq()
243 void *sqc_data, struct mlx5_aso *sq) in mlx5_aso_create_sq_rdy() argument
253 mlx5_core_destroy_sq(mdev, sq->sqn); in mlx5_aso_create_sq_rdy()
260 mlx5_wq_destroy(&sq->wq_ctrl); in mlx5_aso_free_sq()
265 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); in mlx5_aso_destroy_sq()
266 mlx5_aso_free_sq(sq); in mlx5_aso_destroy_sq()
270 u32 pdn, struct mlx5_aso *sq) in mlx5_aso_create_sq() argument
[all …]

1234567891011